Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/scsifront: don't request a slot on the ring until request is ready

Instead of requesting a new slot on the ring to the backend early, do
so only after all has been setup for the request to be sent. This
makes error handling easier as we don't need to undo the request id
allocation and ring slot allocation.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Juergen Gross <jgross@suse.com>

+83 -105
+83 -105
drivers/scsi/xen-scsifront.c
··· 79 79 struct vscsifrnt_shadow { 80 80 /* command between backend and frontend */ 81 81 unsigned char act; 82 + uint8_t nr_segments; 82 83 uint16_t rqid; 84 + uint16_t ref_rqid; 83 85 84 86 unsigned int nr_grants; /* number of grants in gref[] */ 85 87 struct scsiif_request_segment *sg; /* scatter/gather elements */ 88 + struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; 86 89 87 90 /* Do reset or abort function. */ 88 91 wait_queue_head_t wq_reset; /* reset work queue */ ··· 175 172 scsifront_wake_up(info); 176 173 } 177 174 178 - static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info) 175 + static int scsifront_do_request(struct vscsifrnt_info *info, 176 + struct vscsifrnt_shadow *shadow) 179 177 { 180 178 struct vscsiif_front_ring *ring = &(info->ring); 181 179 struct vscsiif_request *ring_req; 180 + struct scsi_cmnd *sc = shadow->sc; 182 181 uint32_t id; 182 + int i, notify; 183 + 184 + if (RING_FULL(&info->ring)) 185 + return -EBUSY; 183 186 184 187 id = scsifront_get_rqid(info); /* use id in response */ 185 188 if (id >= VSCSIIF_MAX_REQS) 186 - return NULL; 189 + return -EBUSY; 190 + 191 + info->shadow[id] = shadow; 192 + shadow->rqid = id; 187 193 188 194 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); 189 - 190 195 ring->req_prod_pvt++; 191 196 192 - ring_req->rqid = (uint16_t)id; 197 + ring_req->rqid = id; 198 + ring_req->act = shadow->act; 199 + ring_req->ref_rqid = shadow->ref_rqid; 200 + ring_req->nr_segments = shadow->nr_segments; 193 201 194 - return ring_req; 195 - } 202 + ring_req->id = sc->device->id; 203 + ring_req->lun = sc->device->lun; 204 + ring_req->channel = sc->device->channel; 205 + ring_req->cmd_len = sc->cmd_len; 196 206 197 - static void scsifront_do_request(struct vscsifrnt_info *info) 198 - { 199 - struct vscsiif_front_ring *ring = &(info->ring); 200 - int notify; 207 + BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); 208 + 209 + memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); 210 + 211 + ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; 212 + ring_req->timeout_per_command = sc->request->timeout / HZ; 213 + 214 + for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) 215 + ring_req->seg[i] = shadow->seg[i]; 201 216 202 217 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); 203 218 if (notify) 204 219 notify_remote_via_irq(info->irq); 220 + 221 + return 0; 205 222 } 206 223 207 - static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) 224 + static void scsifront_gnttab_done(struct vscsifrnt_info *info, 225 + struct vscsifrnt_shadow *shadow) 208 226 { 209 - struct vscsifrnt_shadow *s = info->shadow[id]; 210 227 int i; 211 228 212 - if (s->sc->sc_data_direction == DMA_NONE) 229 + if (shadow->sc->sc_data_direction == DMA_NONE) 213 230 return; 214 231 215 - for (i = 0; i < s->nr_grants; i++) { 216 - if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) { 232 + for (i = 0; i < shadow->nr_grants; i++) { 233 + if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { 217 234 shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME 218 235 "grant still in use by backend\n"); 219 236 BUG(); 220 237 } 221 - gnttab_end_foreign_access(s->gref[i], 0, 0UL); 238 + gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); 222 239 } 223 240 224 - kfree(s->sg); 241 + kfree(shadow->sg); 225 242 } 226 243 227 244 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, 228 245 struct vscsiif_response *ring_rsp) 229 246 { 247 + struct vscsifrnt_shadow *shadow; 230 248 struct scsi_cmnd *sc; 231 249 uint32_t id; 232 250 uint8_t sense_len; 233 251 234 252 id = ring_rsp->rqid; 235 - sc = info->shadow[id]->sc; 253 + shadow = info->shadow[id]; 254 + sc = shadow->sc; 236 255 237 256 BUG_ON(sc == NULL); 238 257 239 - scsifront_gnttab_done(info, id); 258 + scsifront_gnttab_done(info, shadow); 240 259 scsifront_put_rqid(info, id); 241 260 242 261 sc->result = ring_rsp->rslt; ··· 391 366 392 367 static int map_data_for_request(struct vscsifrnt_info *info, 393 368 struct scsi_cmnd *sc, 394 - struct vscsiif_request *ring_req, 395 369 struct vscsifrnt_shadow *shadow) 396 370 { 397 371 grant_ref_t gref_head; ··· 403 379 struct scatterlist *sg; 404 380 struct scsiif_request_segment *seg; 405 381 406 - ring_req->nr_segments = 0; 407 382 if (sc->sc_data_direction == DMA_NONE || !data_len) 408 383 return 0; 409 384 ··· 421 398 if (!shadow->sg) 422 399 return -ENOMEM; 423 400 } 424 - seg = shadow->sg ? : ring_req->seg; 401 + seg = shadow->sg ? : shadow->seg; 425 402 426 403 err = gnttab_alloc_grant_references(seg_grants + data_grants, 427 404 &gref_head); ··· 446 423 info->dev->otherend_id, 447 424 xen_page_to_gfn(page), 1); 448 425 shadow->gref[ref_cnt] = ref; 449 - ring_req->seg[ref_cnt].gref = ref; 450 - ring_req->seg[ref_cnt].offset = (uint16_t)off; 451 - ring_req->seg[ref_cnt].length = (uint16_t)bytes; 426 + shadow->seg[ref_cnt].gref = ref; 427 + shadow->seg[ref_cnt].offset = (uint16_t)off; 428 + shadow->seg[ref_cnt].length = (uint16_t)bytes; 452 429 453 430 page++; 454 431 len -= bytes; ··· 496 473 } 497 474 498 475 if (seg_grants) 499 - ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants; 476 + shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants; 500 477 else 501 - ring_req->nr_segments = (uint8_t)ref_cnt; 478 + shadow->nr_segments = (uint8_t)ref_cnt; 502 479 shadow->nr_grants = ref_cnt; 503 480 504 481 return 0; 505 - } 506 - 507 - static struct vscsiif_request *scsifront_command2ring( 508 - struct vscsifrnt_info *info, struct scsi_cmnd *sc, 509 - struct vscsifrnt_shadow *shadow) 510 - { 511 - struct vscsiif_request *ring_req; 512 - 513 - memset(shadow, 0, sizeof(*shadow)); 514 - 515 - ring_req = scsifront_pre_req(info); 516 - if (!ring_req) 517 - return NULL; 518 - 519 - info->shadow[ring_req->rqid] = shadow; 520 - shadow->rqid = ring_req->rqid; 521 - 522 - ring_req->id = sc->device->id; 523 - ring_req->lun = sc->device->lun; 524 - ring_req->channel = sc->device->channel; 525 - ring_req->cmd_len = sc->cmd_len; 526 - 527 - BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); 528 - 529 - memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); 530 - 531 - ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; 532 - ring_req->timeout_per_command = sc->request->timeout / HZ; 533 - 534 - return ring_req; 535 482 } 536 483 537 484 static int scsifront_enter(struct vscsifrnt_info *info) ··· 529 536 struct scsi_cmnd *sc) 530 537 { 531 538 struct vscsifrnt_info *info = shost_priv(shost); 532 - struct vscsiif_request *ring_req; 533 539 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); 534 540 unsigned long flags; 535 541 int err; 536 - uint16_t rqid; 542 + 543 + sc->result = 0; 544 + memset(shadow, 0, sizeof(*shadow)); 545 + 546 + shadow->sc = sc; 547 + shadow->act = VSCSIIF_ACT_SCSI_CDB; 537 548 538 549 spin_lock_irqsave(shost->host_lock, flags); 539 550 if (scsifront_enter(info)) { 540 551 spin_unlock_irqrestore(shost->host_lock, flags); 541 552 return SCSI_MLQUEUE_HOST_BUSY; 542 553 } 543 - if (RING_FULL(&info->ring)) 544 - goto busy; 545 554 546 - ring_req = scsifront_command2ring(info, sc, shadow); 547 - if (!ring_req) 548 - goto busy; 549 - 550 - sc->result = 0; 551 - 552 - rqid = ring_req->rqid; 553 - ring_req->act = VSCSIIF_ACT_SCSI_CDB; 554 - 555 - shadow->sc = sc; 556 - shadow->act = VSCSIIF_ACT_SCSI_CDB; 557 - 558 - err = map_data_for_request(info, sc, ring_req, shadow); 555 + err = map_data_for_request(info, sc, shadow); 559 556 if (err < 0) { 560 557 pr_debug("%s: err %d\n", __func__, err); 561 - scsifront_put_rqid(info, rqid); 562 558 scsifront_return(info); 563 559 spin_unlock_irqrestore(shost->host_lock, flags); 564 560 if (err == -ENOMEM) ··· 557 575 return 0; 558 576 } 559 577 560 - scsifront_do_request(info); 578 + if (scsifront_do_request(info, shadow)) { 579 + scsifront_gnttab_done(info, shadow); 580 + goto busy; 581 + } 582 + 561 583 scsifront_return(info); 562 584 spin_unlock_irqrestore(shost->host_lock, flags); 563 585 ··· 584 598 struct Scsi_Host *host = sc->device->host; 585 599 struct vscsifrnt_info *info = shost_priv(host); 586 600 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); 587 - struct vscsiif_request *ring_req; 588 601 int err = 0; 589 602 590 - shadow = kmalloc(sizeof(*shadow), GFP_NOIO); 603 + shadow = kzalloc(sizeof(*shadow), GFP_NOIO); 591 604 if (!shadow) 592 605 return FAILED; 606 + 607 + shadow->act = act; 608 + shadow->rslt_reset = RSLT_RESET_WAITING; 609 + shadow->sc = sc; 610 + shadow->ref_rqid = s->rqid; 611 + init_waitqueue_head(&shadow->wq_reset); 593 612 594 613 spin_lock_irq(host->host_lock); 595 614 596 615 for (;;) { 597 - if (!RING_FULL(&info->ring)) { 598 - ring_req = scsifront_command2ring(info, sc, shadow); 599 - if (ring_req) 600 - break; 601 - } 602 - if (err || info->pause) { 603 - spin_unlock_irq(host->host_lock); 604 - kfree(shadow); 605 - return FAILED; 606 - } 616 + if (scsifront_enter(info)) 617 + goto fail; 618 + 619 + if (!scsifront_do_request(info, shadow)) 620 + break; 621 + 622 + scsifront_return(info); 623 + if (err) 624 + goto fail; 607 625 info->wait_ring_available = 1; 608 626 spin_unlock_irq(host->host_lock); 609 627 err = wait_event_interruptible(info->wq_sync, 610 628 !info->wait_ring_available); 611 629 spin_lock_irq(host->host_lock); 612 630 } 613 - 614 - if (scsifront_enter(info)) { 615 - spin_unlock_irq(host->host_lock); 616 - kfree(shadow); 617 - return FAILED; 618 - } 619 - 620 - ring_req->act = act; 621 - ring_req->ref_rqid = s->rqid; 622 - 623 - shadow->act = act; 624 - shadow->rslt_reset = RSLT_RESET_WAITING; 625 - init_waitqueue_head(&shadow->wq_reset); 626 - 627 - ring_req->nr_segments = 0; 628 - 629 - scsifront_do_request(info); 630 631 631 632 spin_unlock_irq(host->host_lock); 632 633 err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); ··· 633 660 scsifront_return(info); 634 661 spin_unlock_irq(host->host_lock); 635 662 return err; 663 + 664 + fail: 665 + spin_unlock_irq(host->host_lock); 666 + kfree(shadow); 667 + return FAILED; 636 668 } 637 669 638 670 static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)