Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block,scsi: fixup blk_get_request dead queue scenarios

The blk_get_request function may fail in low-memory conditions or during
device removal (even if __GFP_WAIT is set). To distinguish between these
errors, modify the blk_get_request call stack to return the appropriate
ERR_PTR. Verify that all callers check the return status and consider
IS_ERR instead of a simple NULL pointer check.

For consistency, make a similar change to the blk_mq_alloc_request leg
of blk_get_request. It may fail if the queue is dead, or the caller was
unwilling to wait.

Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com>
Acked-by: Jiri Kosina <jkosina@suse.cz> [for pktdvd]
Acked-by: Boaz Harrosh <bharrosh@panasas.com> [for osd]
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Joe Lawrence and committed by
Jens Axboe
a492f075 eb571eea

+55 -51
+17 -17
block/blk-core.c
··· 933 933 * Get a free request from @q. This function may fail under memory 934 934 * pressure or if @q is dead. 935 935 * 936 - * Must be callled with @q->queue_lock held and, 937 - * Returns %NULL on failure, with @q->queue_lock held. 938 - * Returns !%NULL on success, with @q->queue_lock *not held*. 936 + * Must be called with @q->queue_lock held and, 937 + * Returns ERR_PTR on failure, with @q->queue_lock held. 938 + * Returns request pointer on success, with @q->queue_lock *not held*. 939 939 */ 940 940 static struct request *__get_request(struct request_list *rl, int rw_flags, 941 941 struct bio *bio, gfp_t gfp_mask) ··· 949 949 int may_queue; 950 950 951 951 if (unlikely(blk_queue_dying(q))) 952 - return NULL; 952 + return ERR_PTR(-ENODEV); 953 953 954 954 may_queue = elv_may_queue(q, rw_flags); 955 955 if (may_queue == ELV_MQUEUE_NO) ··· 974 974 * process is not a "batcher", and not 975 975 * exempted by the IO scheduler 976 976 */ 977 - return NULL; 977 + return ERR_PTR(-ENOMEM); 978 978 } 979 979 } 980 980 } ··· 992 992 * allocated with any setting of ->nr_requests 993 993 */ 994 994 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 995 - return NULL; 995 + return ERR_PTR(-ENOMEM); 996 996 997 997 q->nr_rqs[is_sync]++; 998 998 rl->count[is_sync]++; ··· 1097 1097 rq_starved: 1098 1098 if (unlikely(rl->count[is_sync] == 0)) 1099 1099 rl->starved[is_sync] = 1; 1100 - return NULL; 1100 + return ERR_PTR(-ENOMEM); 1101 1101 } 1102 1102 1103 1103 /** ··· 1110 1110 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this 1111 1111 * function keeps retrying under memory pressure and fails iff @q is dead. 1112 1112 * 1113 - * Must be callled with @q->queue_lock held and, 1114 - * Returns %NULL on failure, with @q->queue_lock held. 1115 - * Returns !%NULL on success, with @q->queue_lock *not held*. 1113 + * Must be called with @q->queue_lock held and, 1114 + * Returns ERR_PTR on failure, with @q->queue_lock held. 1115 + * Returns request pointer on success, with @q->queue_lock *not held*. 1116 1116 */ 1117 1117 static struct request *get_request(struct request_queue *q, int rw_flags, 1118 1118 struct bio *bio, gfp_t gfp_mask) ··· 1125 1125 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1126 1126 retry: 1127 1127 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1128 - if (rq) 1128 + if (!IS_ERR(rq)) 1129 1129 return rq; 1130 1130 1131 1131 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { 1132 1132 blk_put_rl(rl); 1133 - return NULL; 1133 + return rq; 1134 1134 } 1135 1135 1136 1136 /* wait on @rl and retry */ ··· 1167 1167 1168 1168 spin_lock_irq(q->queue_lock); 1169 1169 rq = get_request(q, rw, NULL, gfp_mask); 1170 - if (!rq) 1170 + if (IS_ERR(rq)) 1171 1171 spin_unlock_irq(q->queue_lock); 1172 1172 /* q->queue_lock is unlocked at this point */ 1173 1173 ··· 1219 1219 { 1220 1220 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1221 1221 1222 - if (unlikely(!rq)) 1223 - return ERR_PTR(-ENOMEM); 1222 + if (IS_ERR(rq)) 1223 + return rq; 1224 1224 1225 1225 blk_rq_set_block_pc(rq); 1226 1226 ··· 1615 1615 * Returns with the queue unlocked. 1616 1616 */ 1617 1617 req = get_request(q, rw_flags, bio, GFP_NOIO); 1618 - if (unlikely(!req)) { 1619 - bio_endio(bio, -ENODEV); /* @q is dead */ 1618 + if (IS_ERR(req)) { 1619 + bio_endio(bio, PTR_ERR(req)); /* @q is dead */ 1620 1620 goto out_unlock; 1621 1621 } 1622 1622
+6 -2
block/blk-mq.c
··· 218 218 struct blk_mq_hw_ctx *hctx; 219 219 struct request *rq; 220 220 struct blk_mq_alloc_data alloc_data; 221 + int ret; 221 222 222 - if (blk_mq_queue_enter(q)) 223 - return NULL; 223 + ret = blk_mq_queue_enter(q); 224 + if (ret) 225 + return ERR_PTR(ret); 224 226 225 227 ctx = blk_mq_get_ctx(q); 226 228 hctx = q->mq_ops->map_queue(q, ctx->cpu); ··· 242 240 ctx = alloc_data.ctx; 243 241 } 244 242 blk_mq_put_ctx(ctx); 243 + if (!rq) 244 + return ERR_PTR(-EWOULDBLOCK); 245 245 return rq; 246 246 } 247 247 EXPORT_SYMBOL(blk_mq_alloc_request);
+4 -4
block/bsg.c
··· 270 270 * map scatter-gather elements separately and string them to request 271 271 */ 272 272 rq = blk_get_request(q, rw, GFP_KERNEL); 273 - if (!rq) 274 - return ERR_PTR(-ENOMEM); 273 + if (IS_ERR(rq)) 274 + return rq; 275 275 blk_rq_set_block_pc(rq); 276 276 277 277 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); ··· 285 285 } 286 286 287 287 next_rq = blk_get_request(q, READ, GFP_KERNEL); 288 - if (!next_rq) { 289 - ret = -ENOMEM; 288 + if (IS_ERR(next_rq)) { 289 + ret = PTR_ERR(next_rq); 290 290 goto out; 291 291 } 292 292 rq->next_rq = next_rq;
+6 -6
block/scsi_ioctl.c
··· 318 318 at_head = 1; 319 319 320 320 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 321 - if (!rq) 322 - return -ENOMEM; 321 + if (IS_ERR(rq)) 322 + return PTR_ERR(rq); 323 323 blk_rq_set_block_pc(rq); 324 324 325 325 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { ··· 448 448 } 449 449 450 450 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 451 - if (!rq) { 452 - err = -ENODEV; 451 + if (IS_ERR(rq)) { 452 + err = PTR_ERR(rq); 453 453 goto error_free_buffer; 454 454 } 455 455 ··· 539 539 int err; 540 540 541 541 rq = blk_get_request(q, WRITE, __GFP_WAIT); 542 - if (!rq) 543 - return -ENODEV; 542 + if (IS_ERR(rq)) 543 + return PTR_ERR(rq); 544 544 blk_rq_set_block_pc(rq); 545 545 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 546 546 rq->cmd[0] = cmd;
+2 -2
drivers/block/paride/pd.c
··· 722 722 int err = 0; 723 723 724 724 rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); 725 - if (!rq) 726 - return -ENODEV; 725 + if (IS_ERR(rq)) 726 + return PTR_ERR(rq); 727 727 728 728 rq->cmd_type = REQ_TYPE_SPECIAL; 729 729 rq->special = func;
+2 -2
drivers/block/pktcdvd.c
··· 704 704 705 705 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 706 706 WRITE : READ, __GFP_WAIT); 707 - if (!rq) 708 - return -ENODEV; 707 + if (IS_ERR(rq)) 708 + return PTR_ERR(rq); 709 709 blk_rq_set_block_pc(rq); 710 710 711 711 if (cgc->buflen) {
+1 -1
drivers/block/sx8.c
··· 568 568 return NULL; 569 569 570 570 rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); 571 - if (!rq) { 571 + if (IS_ERR(rq)) { 572 572 spin_lock_irqsave(&host->lock, flags); 573 573 carm_put_request(host, crq); 574 574 spin_unlock_irqrestore(&host->lock, flags);
+2 -2
drivers/cdrom/cdrom.c
··· 2180 2180 len = nr * CD_FRAMESIZE_RAW; 2181 2181 2182 2182 rq = blk_get_request(q, READ, GFP_KERNEL); 2183 - if (!rq) { 2184 - ret = -ENOMEM; 2183 + if (IS_ERR(rq)) { 2184 + ret = PTR_ERR(rq); 2185 2185 break; 2186 2186 } 2187 2187 blk_rq_set_block_pc(rq);
+1 -1
drivers/ide/ide-park.c
··· 46 46 * timeout has expired, so power management will be reenabled. 47 47 */ 48 48 rq = blk_get_request(q, READ, GFP_NOWAIT); 49 - if (unlikely(!rq)) 49 + if (IS_ERR(rq)) 50 50 goto out; 51 51 52 52 rq->cmd[0] = REQ_UNPARK_HEADS;
+1 -1
drivers/scsi/device_handler/scsi_dh_alua.c
··· 115 115 116 116 rq = blk_get_request(q, rw, GFP_NOIO); 117 117 118 - if (!rq) { 118 + if (IS_ERR(rq)) { 119 119 sdev_printk(KERN_INFO, sdev, 120 120 "%s: blk_get_request failed\n", __func__); 121 121 return NULL;
+1 -1
drivers/scsi/device_handler/scsi_dh_emc.c
··· 275 275 276 276 rq = blk_get_request(sdev->request_queue, 277 277 (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO); 278 - if (!rq) { 278 + if (IS_ERR(rq)) { 279 279 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); 280 280 return NULL; 281 281 }
+2 -2
drivers/scsi/device_handler/scsi_dh_hp_sw.c
··· 117 117 118 118 retry: 119 119 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 120 - if (!req) 120 + if (IS_ERR(req)) 121 121 return SCSI_DH_RES_TEMP_UNAVAIL; 122 122 123 123 blk_rq_set_block_pc(req); ··· 247 247 struct request *req; 248 248 249 249 req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); 250 - if (!req) 250 + if (IS_ERR(req)) 251 251 return SCSI_DH_RES_TEMP_UNAVAIL; 252 252 253 253 blk_rq_set_block_pc(req);
+1 -1
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 274 274 275 275 rq = blk_get_request(q, rw, GFP_NOIO); 276 276 277 - if (!rq) { 277 + if (IS_ERR(rq)) { 278 278 sdev_printk(KERN_INFO, sdev, 279 279 "get_rdac_req: blk_get_request failed.\n"); 280 280 return NULL;
+2 -2
drivers/scsi/osd/osd_initiator.c
··· 1567 1567 struct request *req; 1568 1568 1569 1569 req = blk_get_request(q, has_write ? WRITE : READ, flags); 1570 - if (unlikely(!req)) 1571 - return ERR_PTR(-ENOMEM); 1570 + if (IS_ERR(req)) 1571 + return req; 1572 1572 1573 1573 blk_rq_set_block_pc(req); 1574 1574 return req;
+1 -1
drivers/scsi/osst.c
··· 362 362 int write = (data_direction == DMA_TO_DEVICE); 363 363 364 364 req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL); 365 - if (!req) 365 + if (IS_ERR(req)) 366 366 return DRIVER_ERROR << 24; 367 367 368 368 blk_rq_set_block_pc(req);
+1 -1
drivers/scsi/scsi_error.c
··· 1960 1960 * request becomes available 1961 1961 */ 1962 1962 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1963 - if (!req) 1963 + if (IS_ERR(req)) 1964 1964 return; 1965 1965 1966 1966 blk_rq_set_block_pc(req);
+1 -1
drivers/scsi/scsi_lib.c
··· 221 221 int ret = DRIVER_ERROR << 24; 222 222 223 223 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 224 - if (!req) 224 + if (IS_ERR(req)) 225 225 return ret; 226 226 blk_rq_set_block_pc(req); 227 227
+2 -2
drivers/scsi/sg.c
··· 1711 1711 } 1712 1712 1713 1713 rq = blk_get_request(q, rw, GFP_ATOMIC); 1714 - if (!rq) { 1714 + if (IS_ERR(rq)) { 1715 1715 kfree(long_cmdp); 1716 - return -ENOMEM; 1716 + return PTR_ERR(rq); 1717 1717 } 1718 1718 1719 1719 blk_rq_set_block_pc(rq);
+1 -1
drivers/scsi/st.c
··· 490 490 491 491 req = blk_get_request(SRpnt->stp->device->request_queue, write, 492 492 GFP_KERNEL); 493 - if (!req) 493 + if (IS_ERR(req)) 494 494 return DRIVER_ERROR << 24; 495 495 496 496 blk_rq_set_block_pc(req);
+1 -1
drivers/target/target_core_pscsi.c
··· 1050 1050 req = blk_get_request(pdv->pdv_sd->request_queue, 1051 1051 (data_direction == DMA_TO_DEVICE), 1052 1052 GFP_KERNEL); 1053 - if (!req) { 1053 + if (IS_ERR(req)) { 1054 1054 pr_err("PSCSI: blk_get_request() failed\n"); 1055 1055 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1056 1056 goto fail;