Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "scsi_debug: Some minor improvements"

John Garry <john.g.garry@oracle.com> says:

This series contains a bunch of minor improvements to the driver. I
have another bunch waiting with more major changes.

Most of the changes are quite straightforward, and the only patches of
note are as follows:

- Fix the command abort feature, enabled with host option
SDEBUG_OPT_CMD_ABORT.

- Drop driver count of queued commands per device.

- Add poll mode completions to statistics. We already have poll mode
callback call count, so maybe it was intentional to omit poll mode
from the statistics.

Link: https://lore.kernel.org/r/20230313093114.1498305-1-john.g.garry@oracle.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+78 -135
+78 -135
drivers/scsi/scsi_debug.c
··· 288 288 uuid_t lu_name; 289 289 struct sdebug_host_info *sdbg_host; 290 290 unsigned long uas_bm[1]; 291 - atomic_t num_in_q; 292 291 atomic_t stopped; /* 1: by SSU, 2: device start */ 293 292 bool used; 294 293 ··· 323 324 void *map_storep; /* provisioning map */ 324 325 }; 325 326 326 - #define to_sdebug_host(d) \ 327 + #define dev_to_sdebug_host(d) \ 327 328 container_of(d, struct sdebug_host_info, dev) 329 + 330 + #define shost_to_sdebug_host(shost) \ 331 + dev_to_sdebug_host(shost->dma_dev) 328 332 329 333 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1, 330 334 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3}; ··· 4930 4928 struct sdebug_queue *sqp; 4931 4929 struct sdebug_queued_cmd *sqcp; 4932 4930 struct scsi_cmnd *scp; 4933 - struct sdebug_dev_info *devip; 4934 4931 4935 4932 if (unlikely(aborted)) 4936 4933 sd_dp->aborted = false; ··· 4954 4953 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx); 4955 4954 return; 4956 4955 } 4957 - devip = (struct sdebug_dev_info *)scp->device->hostdata; 4958 - if (likely(devip)) 4959 - atomic_dec(&devip->num_in_q); 4960 - else 4961 - pr_err("devip=NULL\n"); 4956 + 4962 4957 if (unlikely(atomic_read(&retired_max_queue) > 0)) 4963 4958 retiring = 1; 4964 4959 ··· 4983 4986 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4984 4987 if (unlikely(aborted)) { 4985 4988 if (sdebug_verbose) 4986 - pr_info("bypassing scsi_done() due to aborted cmd\n"); 4989 + pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n"); 4990 + blk_abort_request(scsi_cmd_to_rq(scp)); 4987 4991 return; 4988 4992 } 4989 4993 scsi_done(scp); /* callback to mid level */ ··· 5150 5152 } else { 5151 5153 devip->zmodel = BLK_ZONED_NONE; 5152 5154 } 5153 - devip->sdbg_host = sdbg_host; 5154 5155 devip->create_ts = ktime_get_boottime(); 5155 5156 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); 5156 5157 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); ··· 5163 5166 struct sdebug_dev_info *open_devip = NULL; 5164 5167 struct sdebug_dev_info *devip; 5165 5168 5166 - sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); 5167 - if (!sdbg_host) { 5168 - pr_err("Host info NULL\n"); 5169 - return NULL; 5170 - } 5169 + sdbg_host = shost_to_sdebug_host(sdev->host); 5171 5170 5172 5171 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 5173 5172 if ((devip->used) && (devip->channel == sdev->channel) && ··· 5187 5194 open_devip->target = sdev->id; 5188 5195 open_devip->lun = sdev->lun; 5189 5196 open_devip->sdbg_host = sdbg_host; 5190 - atomic_set(&open_devip->num_in_q, 0); 5191 5197 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm); 5192 5198 open_devip->used = true; 5193 5199 return open_devip; ··· 5257 5265 enum sdeb_defer_type l_defer_t; 5258 5266 struct sdebug_queue *sqp; 5259 5267 struct sdebug_queued_cmd *sqcp; 5260 - struct sdebug_dev_info *devip; 5261 5268 struct sdebug_defer *sd_dp; 5262 5269 5263 5270 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { ··· 5271 5280 if (cmnd != sqcp->a_cmnd) 5272 5281 continue; 5273 5282 /* found */ 5274 - devip = (struct sdebug_dev_info *) 5275 - cmnd->device->hostdata; 5276 - if (devip) 5277 - atomic_dec(&devip->num_in_q); 5278 5283 sqcp->a_cmnd = NULL; 5279 5284 sd_dp = sqcp->sd_dp; 5280 5285 if (sd_dp) { ··· 5297 5310 enum sdeb_defer_type l_defer_t; 5298 5311 struct sdebug_queue *sqp; 5299 5312 struct sdebug_queued_cmd *sqcp; 5300 - struct sdebug_dev_info *devip; 5301 5313 struct sdebug_defer *sd_dp; 5302 5314 5303 5315 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { ··· 5306 5320 sqcp = &sqp->qc_arr[k]; 5307 5321 if (sqcp->a_cmnd == NULL) 5308 5322 continue; 5309 - devip = (struct sdebug_dev_info *) 5310 - sqcp->a_cmnd->device->hostdata; 5311 - if (devip) 5312 - atomic_dec(&devip->num_in_q); 5313 5323 sqcp->a_cmnd = NULL; 5314 5324 sd_dp = sqcp->sd_dp; 5315 5325 if (sd_dp) { ··· 5344 5362 bool ok; 5345 5363 5346 5364 ++num_aborts; 5347 - if (SCpnt) { 5348 - ok = stop_queued_cmnd(SCpnt); 5349 - if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) 5350 - sdev_printk(KERN_INFO, SCpnt->device, 5351 - "%s: command%s found\n", __func__, 5352 - ok ? "" : " not"); 5353 - } 5365 + 5366 + ok = stop_queued_cmnd(SCpnt); 5367 + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5368 + sdev_printk(KERN_INFO, SCpnt->device, 5369 + "%s: command%s found\n", __func__, 5370 + ok ? "" : " not"); 5371 + 5354 5372 return SUCCESS; 5355 5373 } 5356 5374 5357 5375 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) 5358 5376 { 5359 - ++num_dev_resets; 5360 - if (SCpnt && SCpnt->device) { 5361 - struct scsi_device *sdp = SCpnt->device; 5362 - struct sdebug_dev_info *devip = 5363 - (struct sdebug_dev_info *)sdp->hostdata; 5377 + struct scsi_device *sdp = SCpnt->device; 5378 + struct sdebug_dev_info *devip = sdp->hostdata; 5364 5379 5365 - if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5366 - sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 5367 - if (devip) 5368 - set_bit(SDEBUG_UA_POR, devip->uas_bm); 5369 - } 5380 + ++num_dev_resets; 5381 + 5382 + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5383 + sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 5384 + if (devip) 5385 + set_bit(SDEBUG_UA_POR, devip->uas_bm); 5386 + 5370 5387 return SUCCESS; 5371 5388 } 5372 5389 5373 5390 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) 5374 5391 { 5375 - struct sdebug_host_info *sdbg_host; 5392 + struct scsi_device *sdp = SCpnt->device; 5393 + struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); 5376 5394 struct sdebug_dev_info *devip; 5377 - struct scsi_device *sdp; 5378 - struct Scsi_Host *hp; 5379 5395 int k = 0; 5380 5396 5381 5397 ++num_target_resets; 5382 - if (!SCpnt) 5383 - goto lie; 5384 - sdp = SCpnt->device; 5385 - if (!sdp) 5386 - goto lie; 5387 5398 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5388 5399 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 5389 - hp = sdp->host; 5390 - if (!hp) 5391 - goto lie; 5392 - sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); 5393 - if (sdbg_host) { 5394 - list_for_each_entry(devip, 5395 - &sdbg_host->dev_info_list, 5396 - dev_list) 5397 - if (devip->target == sdp->id) { 5398 - set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 5399 - ++k; 5400 - } 5400 + 5401 + list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 5402 + if (devip->target == sdp->id) { 5403 + set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 5404 + ++k; 5405 + } 5401 5406 } 5407 + 5402 5408 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 5403 5409 sdev_printk(KERN_INFO, sdp, 5404 5410 "%s: %d device(s) found in target\n", __func__, k); 5405 - lie: 5411 + 5406 5412 return SUCCESS; 5407 5413 } 5408 5414 5409 5415 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt) 5410 5416 { 5411 - struct sdebug_host_info *sdbg_host; 5417 + struct scsi_device *sdp = SCpnt->device; 5418 + struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); 5412 5419 struct sdebug_dev_info *devip; 5413 - struct scsi_device *sdp; 5414 - struct Scsi_Host *hp; 5415 5420 int k = 0; 5416 5421 5417 5422 ++num_bus_resets; 5418 - if (!(SCpnt && SCpnt->device)) 5419 - goto lie; 5420 - sdp = SCpnt->device; 5423 + 5421 5424 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5422 5425 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 5423 - hp = sdp->host; 5424 - if (hp) { 5425 - sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); 5426 - if (sdbg_host) { 5427 - list_for_each_entry(devip, 5428 - &sdbg_host->dev_info_list, 5429 - dev_list) { 5430 - set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 5431 - ++k; 5432 - } 5433 - } 5426 + 5427 + list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 5428 + set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 5429 + ++k; 5434 5430 } 5431 + 5435 5432 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 5436 5433 sdev_printk(KERN_INFO, sdp, 5437 5434 "%s: %d device(s) found in host\n", __func__, k); 5438 - lie: 5439 5435 return SUCCESS; 5440 5436 } 5441 5437 ··· 5424 5464 int k = 0; 5425 5465 5426 5466 ++num_host_resets; 5427 - if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) 5467 + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5428 5468 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); 5429 5469 spin_lock(&sdebug_host_list_lock); 5430 5470 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { ··· 5549 5589 int delta_jiff, int ndelay) 5550 5590 { 5551 5591 bool new_sd_dp; 5552 - bool inject = false; 5553 5592 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED; 5554 - int k, num_in_q, qdepth; 5593 + int k; 5555 5594 unsigned long iflags; 5556 5595 u64 ns_from_boot = 0; 5557 5596 struct sdebug_queue *sqp; ··· 5574 5615 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5575 5616 return SCSI_MLQUEUE_HOST_BUSY; 5576 5617 } 5577 - num_in_q = atomic_read(&devip->num_in_q); 5578 - qdepth = cmnd->device->queue_depth; 5579 - if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) { 5580 - if (scsi_result) { 5581 - spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5582 - goto respond_in_thread; 5583 - } else 5584 - scsi_result = device_qfull_result; 5585 - } else if (unlikely(sdebug_every_nth && 5586 - (SDEBUG_OPT_RARE_TSF & sdebug_opts) && 5587 - (scsi_result == 0))) { 5618 + 5619 + if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) && 5620 + (scsi_result == 0))) { 5621 + int num_in_q = scsi_device_busy(sdp); 5622 + int qdepth = cmnd->device->queue_depth; 5623 + 5588 5624 if ((num_in_q == (qdepth - 1)) && 5589 5625 (atomic_inc_return(&sdebug_a_tsf) >= 5590 5626 abs(sdebug_every_nth))) { 5591 5627 atomic_set(&sdebug_a_tsf, 0); 5592 - inject = true; 5593 5628 scsi_result = device_qfull_result; 5629 + 5630 + if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts)) 5631 + sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n", 5632 + __func__, num_in_q); 5594 5633 } 5595 5634 } 5596 5635 ··· 5604 5647 goto respond_in_thread; 5605 5648 } 5606 5649 set_bit(k, sqp->in_use_bm); 5607 - atomic_inc(&devip->num_in_q); 5608 5650 sqcp = &sqp->qc_arr[k]; 5609 5651 sqcp->a_cmnd = cmnd; 5610 5652 cmnd->host_scribble = (unsigned char *)sqcp; ··· 5613 5657 if (!sd_dp) { 5614 5658 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); 5615 5659 if (!sd_dp) { 5616 - atomic_dec(&devip->num_in_q); 5617 5660 clear_bit(k, sqp->in_use_bm); 5618 5661 return SCSI_MLQUEUE_HOST_BUSY; 5619 5662 } ··· 5672 5717 if (kt <= d) { /* elapsed duration >= kt */ 5673 5718 spin_lock_irqsave(&sqp->qc_lock, iflags); 5674 5719 sqcp->a_cmnd = NULL; 5675 - atomic_dec(&devip->num_in_q); 5676 5720 clear_bit(k, sqp->in_use_bm); 5677 5721 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5678 5722 if (new_sd_dp) ··· 5713 5759 sd_dp->issuing_cpu = raw_smp_processor_id(); 5714 5760 } else { /* jdelay < 0, use work queue */ 5715 5761 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) && 5716 - atomic_read(&sdeb_inject_pending))) 5762 + atomic_read(&sdeb_inject_pending))) { 5717 5763 sd_dp->aborted = true; 5764 + atomic_set(&sdeb_inject_pending, 0); 5765 + sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n", 5766 + blk_mq_unique_tag_to_tag(get_tag(cmnd))); 5767 + } 5768 + 5718 5769 if (polled) { 5719 5770 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); 5720 5771 spin_lock_irqsave(&sqp->qc_lock, iflags); ··· 5744 5785 } 5745 5786 if (sdebug_statistics) 5746 5787 sd_dp->issuing_cpu = raw_smp_processor_id(); 5747 - if (unlikely(sd_dp->aborted)) { 5748 - sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", 5749 - scsi_cmd_to_rq(cmnd)->tag); 5750 - blk_abort_request(scsi_cmd_to_rq(cmnd)); 5751 - atomic_set(&sdeb_inject_pending, 0); 5752 - sd_dp->aborted = false; 5753 - } 5754 5788 } 5755 - if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result)) 5756 - sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__, 5757 - num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL"); 5789 + 5758 5790 return 0; 5759 5791 5760 5792 respond_in_thread: /* call back to mid-layer using invocation thread */ ··· 7115 7165 { 7116 7166 struct sdebug_host_info *sdbg_host; 7117 7167 7118 - sdbg_host = to_sdebug_host(dev); 7168 + sdbg_host = dev_to_sdebug_host(dev); 7119 7169 kfree(sdbg_host); 7120 7170 } 7121 7171 ··· 7350 7400 7351 7401 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) 7352 7402 { 7353 - int num_in_q = 0; 7354 - struct sdebug_dev_info *devip; 7403 + struct sdebug_dev_info *devip = sdev->hostdata; 7404 + 7405 + if (!devip) 7406 + return -ENODEV; 7355 7407 7356 7408 block_unblock_all_queues(true); 7357 - devip = (struct sdebug_dev_info *)sdev->hostdata; 7358 - if (NULL == devip) { 7359 - block_unblock_all_queues(false); 7360 - return -ENODEV; 7361 - } 7362 - num_in_q = atomic_read(&devip->num_in_q); 7363 - 7364 7409 if (qdepth > SDEBUG_CANQUEUE) { 7365 7410 qdepth = SDEBUG_CANQUEUE; 7366 7411 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__, ··· 7366 7421 if (qdepth != sdev->queue_depth) 7367 7422 scsi_change_queue_depth(sdev, qdepth); 7368 7423 7369 - if (SDEBUG_OPT_Q_NOISE & sdebug_opts) { 7370 - sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n", 7371 - __func__, qdepth, num_in_q); 7372 - } 7424 + if (SDEBUG_OPT_Q_NOISE & sdebug_opts) 7425 + sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth); 7373 7426 block_unblock_all_queues(false); 7374 7427 return sdev->queue_depth; 7375 7428 } ··· 7469 7526 struct sdebug_queue *sqp; 7470 7527 struct sdebug_queued_cmd *sqcp; 7471 7528 struct scsi_cmnd *scp; 7472 - struct sdebug_dev_info *devip; 7473 7529 struct sdebug_defer *sd_dp; 7474 7530 7475 7531 sqp = sdebug_q_arr + queue_num; ··· 7506 7564 7507 7565 } else /* ignoring non REQ_POLLED requests */ 7508 7566 continue; 7509 - devip = (struct sdebug_dev_info *)scp->device->hostdata; 7510 - if (likely(devip)) 7511 - atomic_dec(&devip->num_in_q); 7512 - else 7513 - pr_err("devip=NULL from %s\n", __func__); 7514 7567 if (unlikely(atomic_read(&retired_max_queue) > 0)) 7515 7568 retiring = true; 7516 7569 ··· 7531 7594 } 7532 7595 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE); 7533 7596 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 7597 + 7598 + if (sdebug_statistics) { 7599 + atomic_inc(&sdebug_completions); 7600 + if (raw_smp_processor_id() != sd_dp->issuing_cpu) 7601 + atomic_inc(&sdebug_miss_cpus); 7602 + } 7603 + 7534 7604 scsi_done(scp); /* callback to mid level */ 7535 7605 num_entries++; 7536 7606 spin_lock_irqsave(&sqp->qc_lock, iflags); ··· 7756 7812 struct Scsi_Host *hpnt; 7757 7813 int hprot; 7758 7814 7759 - sdbg_host = to_sdebug_host(dev); 7815 + sdbg_host = dev_to_sdebug_host(dev); 7760 7816 7761 7817 sdebug_driver_template.can_queue = sdebug_max_queue; 7762 7818 sdebug_driver_template.cmd_per_lun = sdebug_max_queue; 7763 7819 if (!sdebug_clustering) 7764 7820 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1; 7765 7821 7766 - hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 7822 + hpnt = scsi_host_alloc(&sdebug_driver_template, 0); 7767 7823 if (NULL == hpnt) { 7768 7824 pr_err("scsi_host_alloc failed\n"); 7769 7825 error = -ENODEV; ··· 7806 7862 hpnt->nr_maps = 3; 7807 7863 7808 7864 sdbg_host->shost = hpnt; 7809 - *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; 7810 7865 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) 7811 7866 hpnt->max_id = sdebug_num_tgts + 1; 7812 7867 else ··· 7879 7936 struct sdebug_host_info *sdbg_host; 7880 7937 struct sdebug_dev_info *sdbg_devinfo, *tmp; 7881 7938 7882 - sdbg_host = to_sdebug_host(dev); 7939 + sdbg_host = dev_to_sdebug_host(dev); 7883 7940 7884 7941 scsi_remove_host(sdbg_host->shost); 7885 7942