[SCSI] SCSI core: fix leakage of scsi_cmnd's

From: Alan Stern <stern@rowland.harvard.edu>

This patch (as559b) adds a new routine, scsi_unprep_request, which
gets called every place a request is requeued. (That includes
scsi_queue_insert as well as scsi_requeue_command.) It also changes
scsi_kill_requests to make it call __scsi_done with result equal to
DID_NO_CONNECT << 16. (I'm not sure if it's necessary to call
scsi_init_cmd_errh here; maybe you can check on that.) Finally, the
patch changes the return value from scsi_end_request, to avoid
returning a stale pointer in the case where the request was requeued.
Fortunately the return value is used in only place, and the change
actually simplified it.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>

Rejections fixed up and
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>

+71 -40
+71 -40
drivers/scsi/scsi_lib.c
··· 97 97 } 98 98 99 99 static void scsi_run_queue(struct request_queue *q); 100 + static void scsi_release_buffers(struct scsi_cmnd *cmd); 101 + 102 + /* 103 + * Function: scsi_unprep_request() 104 + * 105 + * Purpose: Remove all preparation done for a request, including its 106 + * associated scsi_cmnd, so that it can be requeued. 107 + * 108 + * Arguments: req - request to unprepare 109 + * 110 + * Lock status: Assumed that no locks are held upon entry. 111 + * 112 + * Returns: Nothing. 113 + */ 114 + static void scsi_unprep_request(struct request *req) 115 + { 116 + struct scsi_cmnd *cmd = req->special; 117 + 118 + req->flags &= ~REQ_DONTPREP; 119 + req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; 120 + 121 + scsi_release_buffers(cmd); 122 + scsi_put_command(cmd); 123 + } 100 124 101 125 /* 102 126 * Function: scsi_queue_insert() ··· 140 116 * commands. 141 117 * Notes: This could be called either from an interrupt context or a 142 118 * normal process context. 119 + * Notes: Upon return, cmd is a stale pointer. 143 120 */ 144 121 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 145 122 { 146 123 struct Scsi_Host *host = cmd->device->host; 147 124 struct scsi_device *device = cmd->device; 148 125 struct request_queue *q = device->request_queue; 126 + struct request *req = cmd->request; 149 127 unsigned long flags; 150 128 151 129 SCSI_LOG_MLQUEUE(1, ··· 188 162 * function. The SCSI request function detects the blocked condition 189 163 * and plugs the queue appropriately. 190 164 */ 165 + scsi_unprep_request(req); 191 166 spin_lock_irqsave(q->queue_lock, flags); 192 - blk_requeue_request(q, cmd->request); 167 + blk_requeue_request(q, req); 193 168 spin_unlock_irqrestore(q->queue_lock, flags); 194 169 195 170 scsi_run_queue(q); ··· 579 552 * I/O errors in the middle of the request, in which case 580 553 * we need to request the blocks that come after the bad 581 554 * sector. 555 + * Notes: Upon return, cmd is a stale pointer. 582 556 */ 583 557 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 584 558 { 559 + struct request *req = cmd->request; 585 560 unsigned long flags; 586 561 587 - cmd->request->flags &= ~REQ_DONTPREP; 588 - 562 + scsi_unprep_request(req); 589 563 spin_lock_irqsave(q->queue_lock, flags); 590 - blk_requeue_request(q, cmd->request); 564 + blk_requeue_request(q, req); 591 565 spin_unlock_irqrestore(q->queue_lock, flags); 592 566 593 567 scsi_run_queue(q); ··· 623 595 * 624 596 * Lock status: Assumed that lock is not held upon entry. 625 597 * 626 - * Returns: cmd if requeue done or required, NULL otherwise 598 + * Returns: cmd if requeue required, NULL otherwise. 627 599 * 628 600 * Notes: This is called for block device requests in order to 629 601 * mark some number of sectors as complete. 630 602 * 631 603 * We are guaranteeing that the request queue will be goosed 632 604 * at some point during this call. 605 + * Notes: If cmd was requeued, upon return it will be a stale pointer. 633 606 */ 634 607 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 635 608 int bytes, int requeue) ··· 653 624 if (!uptodate && blk_noretry_request(req)) 654 625 end_that_request_chunk(req, 0, leftover); 655 626 else { 656 - if (requeue) 627 + if (requeue) { 657 628 /* 658 629 * Bleah. Leftovers again. Stick the 659 630 * leftovers in the front of the 660 631 * queue, and goose the queue again. 661 632 */ 662 633 scsi_requeue_command(q, cmd); 663 - 634 + cmd = NULL; 635 + } 664 636 return cmd; 665 637 } 666 638 } ··· 887 857 * requeueing right here - we will requeue down below 888 858 * when we handle the bad sectors. 889 859 */ 890 - cmd = scsi_end_request(cmd, 1, good_bytes, result == 0); 891 860 892 861 /* 893 - * If the command completed without error, then either finish off the 894 - * rest of the command, or start a new one. 862 + * If the command completed without error, then either 863 + * finish off the rest of the command, or start a new one. 895 864 */ 896 - if (result == 0 || cmd == NULL ) { 865 + if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 897 866 return; 898 - } 899 867 } 900 868 /* 901 869 * Now, if we were good little boys and girls, Santa left us a request ··· 908 880 * and quietly refuse further access. 909 881 */ 910 882 cmd->device->changed = 1; 911 - cmd = scsi_end_request(cmd, 0, 883 + scsi_end_request(cmd, 0, 912 884 this_count, 1); 913 885 return; 914 886 } else { ··· 942 914 scsi_requeue_command(q, cmd); 943 915 result = 0; 944 916 } else { 945 - cmd = scsi_end_request(cmd, 0, this_count, 1); 917 + scsi_end_request(cmd, 0, this_count, 1); 946 918 return; 947 919 } 948 920 break; ··· 959 931 dev_printk(KERN_INFO, 960 932 &cmd->device->sdev_gendev, 961 933 "Device not ready.\n"); 962 - cmd = scsi_end_request(cmd, 0, this_count, 1); 934 + scsi_end_request(cmd, 0, this_count, 1); 963 935 return; 964 936 case VOLUME_OVERFLOW: 965 937 if (!(req->flags & REQ_QUIET)) { ··· 969 941 __scsi_print_command(cmd->data_cmnd); 970 942 scsi_print_sense("", cmd); 971 943 } 972 - cmd = scsi_end_request(cmd, 0, block_bytes, 1); 944 + scsi_end_request(cmd, 0, block_bytes, 1); 973 945 return; 974 946 default: 975 947 break; ··· 1000 972 block_bytes = req->hard_cur_sectors << 9; 1001 973 if (!block_bytes) 1002 974 block_bytes = req->data_len; 1003 - cmd = scsi_end_request(cmd, 0, block_bytes, 1); 975 + scsi_end_request(cmd, 0, block_bytes, 1); 1004 976 } 1005 977 } 1006 978 EXPORT_SYMBOL(scsi_io_completion); ··· 1364 1336 } 1365 1337 1366 1338 /* 1367 - * Kill requests for a dead device 1339 + * Kill a request for a dead device 1368 1340 */ 1369 - static void scsi_kill_requests(request_queue_t *q) 1341 + static void scsi_kill_request(struct request *req, request_queue_t *q) 1370 1342 { 1371 - struct request *req; 1343 + struct scsi_cmnd *cmd = req->special; 1372 1344 1373 - while ((req = elv_next_request(q)) != NULL) { 1374 - blkdev_dequeue_request(req); 1375 - req->flags |= REQ_QUIET; 1376 - while (end_that_request_first(req, 0, req->nr_sectors)) 1377 - ; 1378 - end_that_request_last(req); 1345 + spin_unlock(q->queue_lock); 1346 + if (unlikely(cmd == NULL)) { 1347 + printk(KERN_CRIT "impossible request in %s.\n", 1348 + __FUNCTION__); 1349 + BUG(); 1379 1350 } 1351 + 1352 + scsi_init_cmd_errh(cmd); 1353 + cmd->result = DID_NO_CONNECT << 16; 1354 + atomic_inc(&cmd->device->iorequest_cnt); 1355 + __scsi_done(cmd); 1356 + spin_lock(q->queue_lock); 1380 1357 } 1381 1358 1382 1359 /* ··· 1404 1371 1405 1372 if (!sdev) { 1406 1373 printk("scsi: killing requests for dead queue\n"); 1407 - scsi_kill_requests(q); 1374 + while ((req = elv_next_request(q)) != NULL) 1375 + scsi_kill_request(req, q); 1408 1376 return; 1409 1377 } 1410 1378 ··· 1433 1399 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", 1434 1400 sdev->host->host_no, sdev->id, sdev->lun); 1435 1401 blkdev_dequeue_request(req); 1436 - req->flags |= REQ_QUIET; 1437 - while (end_that_request_first(req, 0, req->nr_sectors)) 1438 - ; 1439 - end_that_request_last(req); 1402 + scsi_kill_request(req, q); 1440 1403 continue; 1441 1404 } 1442 1405 ··· 1446 1415 sdev->device_busy++; 1447 1416 1448 1417 spin_unlock(q->queue_lock); 1418 + cmd = req->special; 1419 + if (unlikely(cmd == NULL)) { 1420 + printk(KERN_CRIT "impossible request in %s.\n" 1421 + "please mail a stack trace to " 1422 + "linux-scsi@vger.kernel.org", 1423 + __FUNCTION__); 1424 + BUG(); 1425 + } 1449 1426 spin_lock(shost->host_lock); 1450 1427 1451 1428 if (!scsi_host_queue_ready(q, shost, sdev)) ··· 1471 1432 * take the lock again. 1472 1433 */ 1473 1434 spin_unlock_irq(shost->host_lock); 1474 - 1475 - cmd = req->special; 1476 - if (unlikely(cmd == NULL)) { 1477 - printk(KERN_CRIT "impossible request in %s.\n" 1478 - "please mail a stack trace to " 1479 - "linux-scsi@vger.kernel.org", 1480 - __FUNCTION__); 1481 - BUG(); 1482 - } 1483 1435 1484 1436 /* 1485 1437 * Finally, initialize any error handling parameters, and set up ··· 1507 1477 * cases (host limits or settings) should run the queue at some 1508 1478 * later time. 1509 1479 */ 1480 + scsi_unprep_request(req); 1510 1481 spin_lock_irq(q->queue_lock); 1511 1482 blk_requeue_request(q, req); 1512 1483 sdev->device_busy--;