[SCSI] SCSI core: fix leakage of scsi_cmnd's

From: Alan Stern <stern@rowland.harvard.edu>

This patch (as559b) adds a new routine, scsi_unprep_request, which
gets called every place a request is requeued. (That includes
scsi_queue_insert as well as scsi_requeue_command.) It also changes
scsi_kill_requests to make it call __scsi_done with result equal to
DID_NO_CONNECT << 16. (I'm not sure if it's necessary to call
scsi_init_cmd_errh here; maybe you can check on that.) Finally, the
patch changes the return value from scsi_end_request, to avoid
returning a stale pointer in the case where the request was requeued.
Fortunately the return value is used in only place, and the change
actually simplified it.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>

Rejections fixed up and
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>

+71 -40
+71 -40
drivers/scsi/scsi_lib.c
··· 97 } 98 99 static void scsi_run_queue(struct request_queue *q); 100 101 /* 102 * Function: scsi_queue_insert() ··· 140 * commands. 141 * Notes: This could be called either from an interrupt context or a 142 * normal process context. 143 */ 144 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 145 { 146 struct Scsi_Host *host = cmd->device->host; 147 struct scsi_device *device = cmd->device; 148 struct request_queue *q = device->request_queue; 149 unsigned long flags; 150 151 SCSI_LOG_MLQUEUE(1, ··· 188 * function. The SCSI request function detects the blocked condition 189 * and plugs the queue appropriately. 190 */ 191 spin_lock_irqsave(q->queue_lock, flags); 192 - blk_requeue_request(q, cmd->request); 193 spin_unlock_irqrestore(q->queue_lock, flags); 194 195 scsi_run_queue(q); ··· 579 * I/O errors in the middle of the request, in which case 580 * we need to request the blocks that come after the bad 581 * sector. 582 */ 583 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 584 { 585 unsigned long flags; 586 587 - cmd->request->flags &= ~REQ_DONTPREP; 588 - 589 spin_lock_irqsave(q->queue_lock, flags); 590 - blk_requeue_request(q, cmd->request); 591 spin_unlock_irqrestore(q->queue_lock, flags); 592 593 scsi_run_queue(q); ··· 623 * 624 * Lock status: Assumed that lock is not held upon entry. 625 * 626 - * Returns: cmd if requeue done or required, NULL otherwise 627 * 628 * Notes: This is called for block device requests in order to 629 * mark some number of sectors as complete. 630 * 631 * We are guaranteeing that the request queue will be goosed 632 * at some point during this call. 633 */ 634 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 635 int bytes, int requeue) ··· 653 if (!uptodate && blk_noretry_request(req)) 654 end_that_request_chunk(req, 0, leftover); 655 else { 656 - if (requeue) 657 /* 658 * Bleah. Leftovers again. Stick the 659 * leftovers in the front of the 660 * queue, and goose the queue again. 661 */ 662 scsi_requeue_command(q, cmd); 663 - 664 return cmd; 665 } 666 } ··· 887 * requeueing right here - we will requeue down below 888 * when we handle the bad sectors. 889 */ 890 - cmd = scsi_end_request(cmd, 1, good_bytes, result == 0); 891 892 /* 893 - * If the command completed without error, then either finish off the 894 - * rest of the command, or start a new one. 895 */ 896 - if (result == 0 || cmd == NULL ) { 897 return; 898 - } 899 } 900 /* 901 * Now, if we were good little boys and girls, Santa left us a request ··· 908 * and quietly refuse further access. 909 */ 910 cmd->device->changed = 1; 911 - cmd = scsi_end_request(cmd, 0, 912 this_count, 1); 913 return; 914 } else { ··· 942 scsi_requeue_command(q, cmd); 943 result = 0; 944 } else { 945 - cmd = scsi_end_request(cmd, 0, this_count, 1); 946 return; 947 } 948 break; ··· 959 dev_printk(KERN_INFO, 960 &cmd->device->sdev_gendev, 961 "Device not ready.\n"); 962 - cmd = scsi_end_request(cmd, 0, this_count, 1); 963 return; 964 case VOLUME_OVERFLOW: 965 if (!(req->flags & REQ_QUIET)) { ··· 969 __scsi_print_command(cmd->data_cmnd); 970 scsi_print_sense("", cmd); 971 } 972 - cmd = scsi_end_request(cmd, 0, block_bytes, 1); 973 return; 974 default: 975 break; ··· 1000 block_bytes = req->hard_cur_sectors << 9; 1001 if (!block_bytes) 1002 block_bytes = req->data_len; 1003 - cmd = scsi_end_request(cmd, 0, block_bytes, 1); 1004 } 1005 } 1006 EXPORT_SYMBOL(scsi_io_completion); ··· 1364 } 1365 1366 /* 1367 - * Kill requests for a dead device 1368 */ 1369 - static void scsi_kill_requests(request_queue_t *q) 1370 { 1371 - struct request *req; 1372 1373 - while ((req = elv_next_request(q)) != NULL) { 1374 - blkdev_dequeue_request(req); 1375 - req->flags |= REQ_QUIET; 1376 - while (end_that_request_first(req, 0, req->nr_sectors)) 1377 - ; 1378 - end_that_request_last(req); 1379 } 1380 } 1381 1382 /* ··· 1404 1405 if (!sdev) { 1406 printk("scsi: killing requests for dead queue\n"); 1407 - scsi_kill_requests(q); 1408 return; 1409 } 1410 ··· 1433 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", 1434 sdev->host->host_no, sdev->id, sdev->lun); 1435 blkdev_dequeue_request(req); 1436 - req->flags |= REQ_QUIET; 1437 - while (end_that_request_first(req, 0, req->nr_sectors)) 1438 - ; 1439 - end_that_request_last(req); 1440 continue; 1441 } 1442 ··· 1446 sdev->device_busy++; 1447 1448 spin_unlock(q->queue_lock); 1449 spin_lock(shost->host_lock); 1450 1451 if (!scsi_host_queue_ready(q, shost, sdev)) ··· 1471 * take the lock again. 1472 */ 1473 spin_unlock_irq(shost->host_lock); 1474 - 1475 - cmd = req->special; 1476 - if (unlikely(cmd == NULL)) { 1477 - printk(KERN_CRIT "impossible request in %s.\n" 1478 - "please mail a stack trace to " 1479 - "linux-scsi@vger.kernel.org", 1480 - __FUNCTION__); 1481 - BUG(); 1482 - } 1483 1484 /* 1485 * Finally, initialize any error handling parameters, and set up ··· 1507 * cases (host limits or settings) should run the queue at some 1508 * later time. 1509 */ 1510 spin_lock_irq(q->queue_lock); 1511 blk_requeue_request(q, req); 1512 sdev->device_busy--;
··· 97 } 98 99 static void scsi_run_queue(struct request_queue *q); 100 + static void scsi_release_buffers(struct scsi_cmnd *cmd); 101 + 102 + /* 103 + * Function: scsi_unprep_request() 104 + * 105 + * Purpose: Remove all preparation done for a request, including its 106 + * associated scsi_cmnd, so that it can be requeued. 107 + * 108 + * Arguments: req - request to unprepare 109 + * 110 + * Lock status: Assumed that no locks are held upon entry. 111 + * 112 + * Returns: Nothing. 113 + */ 114 + static void scsi_unprep_request(struct request *req) 115 + { 116 + struct scsi_cmnd *cmd = req->special; 117 + 118 + req->flags &= ~REQ_DONTPREP; 119 + req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; 120 + 121 + scsi_release_buffers(cmd); 122 + scsi_put_command(cmd); 123 + } 124 125 /* 126 * Function: scsi_queue_insert() ··· 116 * commands. 117 * Notes: This could be called either from an interrupt context or a 118 * normal process context. 119 + * Notes: Upon return, cmd is a stale pointer. 120 */ 121 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 122 { 123 struct Scsi_Host *host = cmd->device->host; 124 struct scsi_device *device = cmd->device; 125 struct request_queue *q = device->request_queue; 126 + struct request *req = cmd->request; 127 unsigned long flags; 128 129 SCSI_LOG_MLQUEUE(1, ··· 162 * function. The SCSI request function detects the blocked condition 163 * and plugs the queue appropriately. 164 */ 165 + scsi_unprep_request(req); 166 spin_lock_irqsave(q->queue_lock, flags); 167 + blk_requeue_request(q, req); 168 spin_unlock_irqrestore(q->queue_lock, flags); 169 170 scsi_run_queue(q); ··· 552 * I/O errors in the middle of the request, in which case 553 * we need to request the blocks that come after the bad 554 * sector. 555 + * Notes: Upon return, cmd is a stale pointer. 556 */ 557 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 558 { 559 + struct request *req = cmd->request; 560 unsigned long flags; 561 562 + scsi_unprep_request(req); 563 spin_lock_irqsave(q->queue_lock, flags); 564 + blk_requeue_request(q, req); 565 spin_unlock_irqrestore(q->queue_lock, flags); 566 567 scsi_run_queue(q); ··· 595 * 596 * Lock status: Assumed that lock is not held upon entry. 597 * 598 + * Returns: cmd if requeue required, NULL otherwise. 599 * 600 * Notes: This is called for block device requests in order to 601 * mark some number of sectors as complete. 602 * 603 * We are guaranteeing that the request queue will be goosed 604 * at some point during this call. 605 + * Notes: If cmd was requeued, upon return it will be a stale pointer. 606 */ 607 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 608 int bytes, int requeue) ··· 624 if (!uptodate && blk_noretry_request(req)) 625 end_that_request_chunk(req, 0, leftover); 626 else { 627 + if (requeue) { 628 /* 629 * Bleah. Leftovers again. Stick the 630 * leftovers in the front of the 631 * queue, and goose the queue again. 632 */ 633 scsi_requeue_command(q, cmd); 634 + cmd = NULL; 635 + } 636 return cmd; 637 } 638 } ··· 857 * requeueing right here - we will requeue down below 858 * when we handle the bad sectors. 859 */ 860 861 /* 862 + * If the command completed without error, then either 863 + * finish off the rest of the command, or start a new one. 864 */ 865 + if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 866 return; 867 } 868 /* 869 * Now, if we were good little boys and girls, Santa left us a request ··· 880 * and quietly refuse further access. 881 */ 882 cmd->device->changed = 1; 883 + scsi_end_request(cmd, 0, 884 this_count, 1); 885 return; 886 } else { ··· 914 scsi_requeue_command(q, cmd); 915 result = 0; 916 } else { 917 + scsi_end_request(cmd, 0, this_count, 1); 918 return; 919 } 920 break; ··· 931 dev_printk(KERN_INFO, 932 &cmd->device->sdev_gendev, 933 "Device not ready.\n"); 934 + scsi_end_request(cmd, 0, this_count, 1); 935 return; 936 case VOLUME_OVERFLOW: 937 if (!(req->flags & REQ_QUIET)) { ··· 941 __scsi_print_command(cmd->data_cmnd); 942 scsi_print_sense("", cmd); 943 } 944 + scsi_end_request(cmd, 0, block_bytes, 1); 945 return; 946 default: 947 break; ··· 972 block_bytes = req->hard_cur_sectors << 9; 973 if (!block_bytes) 974 block_bytes = req->data_len; 975 + scsi_end_request(cmd, 0, block_bytes, 1); 976 } 977 } 978 EXPORT_SYMBOL(scsi_io_completion); ··· 1336 } 1337 1338 /* 1339 + * Kill a request for a dead device 1340 */ 1341 + static void scsi_kill_request(struct request *req, request_queue_t *q) 1342 { 1343 + struct scsi_cmnd *cmd = req->special; 1344 1345 + spin_unlock(q->queue_lock); 1346 + if (unlikely(cmd == NULL)) { 1347 + printk(KERN_CRIT "impossible request in %s.\n", 1348 + __FUNCTION__); 1349 + BUG(); 1350 } 1351 + 1352 + scsi_init_cmd_errh(cmd); 1353 + cmd->result = DID_NO_CONNECT << 16; 1354 + atomic_inc(&cmd->device->iorequest_cnt); 1355 + __scsi_done(cmd); 1356 + spin_lock(q->queue_lock); 1357 } 1358 1359 /* ··· 1371 1372 if (!sdev) { 1373 printk("scsi: killing requests for dead queue\n"); 1374 + while ((req = elv_next_request(q)) != NULL) 1375 + scsi_kill_request(req, q); 1376 return; 1377 } 1378 ··· 1399 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", 1400 sdev->host->host_no, sdev->id, sdev->lun); 1401 blkdev_dequeue_request(req); 1402 + scsi_kill_request(req, q); 1403 continue; 1404 } 1405 ··· 1415 sdev->device_busy++; 1416 1417 spin_unlock(q->queue_lock); 1418 + cmd = req->special; 1419 + if (unlikely(cmd == NULL)) { 1420 + printk(KERN_CRIT "impossible request in %s.\n" 1421 + "please mail a stack trace to " 1422 + "linux-scsi@vger.kernel.org", 1423 + __FUNCTION__); 1424 + BUG(); 1425 + } 1426 spin_lock(shost->host_lock); 1427 1428 if (!scsi_host_queue_ready(q, shost, sdev)) ··· 1432 * take the lock again. 1433 */ 1434 spin_unlock_irq(shost->host_lock); 1435 1436 /* 1437 * Finally, initialize any error handling parameters, and set up ··· 1477 * cases (host limits or settings) should run the queue at some 1478 * later time. 1479 */ 1480 + scsi_unprep_request(req); 1481 spin_lock_irq(q->queue_lock); 1482 blk_requeue_request(q, req); 1483 sdev->device_busy--;