Merge tag 'block-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block updates from Jens Axboe:
"Followup set of fixes and updates for block for the 6.19 merge window.

NVMe had some late minute debates which lead to dropping some patches
from that tree, which is why the initial PR didn't have NVMe included.
It's here now. This pull request contains:

- NVMe pull request via Keith:
- Subsystem usage cleanups (Max)
- Endpoint device fixes (Shin'ichiro)
- Debug statements (Gerd)
- FC fabrics cleanups and fixes (Daniel)
- Consistent alloc API usages (Israel)
- Code comment updates (Chu)
- Authentication retry fix (Justin)

- Fix a memory leak in the discard ioctl code, if the task is being
interrupted by a signal at just the wrong time

- Zoned write plugging fixes

- Add ioctls for for persistent reservations

- Enable per-cpu bio caching by default

- Various little fixes and tweaks"

* tag 'block-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: (27 commits)
nvme-fabrics: add ENOKEY to no retry criteria for authentication failures
nvme-auth: use kvfree() for memory allocated with kvcalloc()
nvmet-tcp: use kvcalloc for commands array
nvmet-rdma: use kvcalloc for commands and responses arrays
nvme: fix typo error in nvme target
nvmet-fc: use pr_* print macros instead of dev_*
nvmet-fcloop: remove unused lsdir member.
nvmet-fcloop: check all request and response have been processed
nvme-fc: check all request and response have been processed
block: fix memory leak in __blkdev_issue_zero_pages
block: fix comment for op_is_zone_mgmt() to include RESET_ALL
block: Clear BLK_ZONE_WPLUG_PLUGGED when aborting plugged BIOs
blk-mq: Abort suspend when wakeup events are pending
blk-mq: add blk_rq_nr_bvec() helper
block: add IOC_PR_READ_RESERVATION ioctl
block: add IOC_PR_READ_KEYS ioctl
nvme: reject invalid pr_read_keys() num_keys values
scsi: sd: reject invalid pr_read_keys() num_keys values
block: enable per-cpu bio cache by default
block: use bio_alloc_bioset for passthru IO by default
...

+278 -153
+12 -14
block/bio.c
··· 517 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) 518 return NULL; 519 520 - if (opf & REQ_ALLOC_CACHE) { 521 - if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { 522 - bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, 523 - gfp_mask, bs); 524 - if (bio) 525 - return bio; 526 - /* 527 - * No cached bio available, bio returned below marked with 528 - * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache. 529 - */ 530 - } else { 531 - opf &= ~REQ_ALLOC_CACHE; 532 - } 533 - } 534 535 /* 536 * submit_bio_noacct() converts recursion to iteration; this means if
··· 517 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) 518 return NULL; 519 520 + if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { 521 + opf |= REQ_ALLOC_CACHE; 522 + bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, 523 + gfp_mask, bs); 524 + if (bio) 525 + return bio; 526 + /* 527 + * No cached bio available, bio returned below marked with 528 + * REQ_ALLOC_CACHE to participate in per-cpu alloc cache. 529 + */ 530 + } else 531 + opf &= ~REQ_ALLOC_CACHE; 532 533 /* 534 * submit_bio_noacct() converts recursion to iteration; this means if
+3 -3
block/blk-lib.c
··· 202 unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects); 203 struct bio *bio; 204 205 - bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); 206 - bio->bi_iter.bi_sector = sector; 207 - 208 if ((flags & BLKDEV_ZERO_KILLABLE) && 209 fatal_signal_pending(current)) 210 break; 211 212 do { 213 unsigned int len;
··· 202 unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects); 203 struct bio *bio; 204 205 if ((flags & BLKDEV_ZERO_KILLABLE) && 206 fatal_signal_pending(current)) 207 break; 208 + 209 + bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); 210 + bio->bi_iter.bi_sector = sector; 211 212 do { 213 unsigned int len;
+36 -54
block/blk-map.c
··· 37 return bmd; 38 } 39 40 /** 41 * bio_copy_from_iter - copy all pages from iov_iter to bio 42 * @bio: The &struct bio which describes the I/O as destination ··· 173 nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 174 175 ret = -ENOMEM; 176 - bio = bio_kmalloc(nr_pages, gfp_mask); 177 if (!bio) 178 goto out_bmd; 179 - bio_init_inline(bio, NULL, nr_pages, req_op(rq)); 180 181 if (map_data) { 182 nr_pages = 1U << map_data->page_order; ··· 251 cleanup: 252 if (!map_data) 253 bio_free_pages(bio); 254 - bio_uninit(bio); 255 - kfree(bio); 256 out_bmd: 257 kfree(bmd); 258 return ret; 259 - } 260 - 261 - static void blk_mq_map_bio_put(struct bio *bio) 262 - { 263 - if (bio->bi_opf & REQ_ALLOC_CACHE) { 264 - bio_put(bio); 265 - } else { 266 - bio_uninit(bio); 267 - kfree(bio); 268 - } 269 - } 270 - 271 - static struct bio *blk_rq_map_bio_alloc(struct request *rq, 272 - unsigned int nr_vecs, gfp_t gfp_mask) 273 - { 274 - struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL; 275 - struct bio *bio; 276 - 277 - if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) { 278 - bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask, 279 - &fs_bio_set); 280 - if (!bio) 281 - return NULL; 282 - } else { 283 - bio = bio_kmalloc(nr_vecs, gfp_mask); 284 - if (!bio) 285 - return NULL; 286 - bio_init_inline(bio, bdev, nr_vecs, req_op(rq)); 287 - } 288 - return bio; 289 } 290 291 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, ··· 305 static void bio_map_kern_endio(struct bio *bio) 306 { 307 bio_invalidate_vmalloc_pages(bio); 308 - bio_uninit(bio); 309 - kfree(bio); 310 } 311 312 - static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op, 313 gfp_t gfp_mask) 314 { 315 unsigned int nr_vecs = bio_add_max_vecs(data, len); 316 struct bio *bio; 317 318 - bio = bio_kmalloc(nr_vecs, gfp_mask); 319 if (!bio) 320 return ERR_PTR(-ENOMEM); 321 - bio_init_inline(bio, NULL, nr_vecs, op); 322 if (is_vmalloc_addr(data)) { 323 bio->bi_private = data; 324 if (!bio_add_vmalloc(bio, data, len)) { 325 - bio_uninit(bio); 326 - kfree(bio); 327 return ERR_PTR(-EINVAL); 328 } 329 } else { ··· 334 static void bio_copy_kern_endio(struct bio *bio) 335 { 336 bio_free_pages(bio); 337 - bio_uninit(bio); 338 - kfree(bio); 339 } 340 341 static void bio_copy_kern_endio_read(struct bio *bio) ··· 353 354 /** 355 * bio_copy_kern - copy kernel address into bio 356 * @data: pointer to buffer to copy 357 * @len: length in bytes 358 * @op: bio/request operation ··· 362 * copy the kernel address into a bio suitable for io to a block 363 * device. Returns an error pointer in case of error. 364 */ 365 - static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op, 366 gfp_t gfp_mask) 367 { 368 unsigned long kaddr = (unsigned long)data; 369 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 370 unsigned long start = kaddr >> PAGE_SHIFT; ··· 380 return ERR_PTR(-EINVAL); 381 382 nr_pages = end - start; 383 - bio = bio_kmalloc(nr_pages, gfp_mask); 384 if (!bio) 385 return ERR_PTR(-ENOMEM); 386 - bio_init_inline(bio, NULL, nr_pages, op); 387 388 while (len) { 389 struct page *page; ··· 416 417 cleanup: 418 bio_free_pages(bio); 419 - bio_uninit(bio); 420 - kfree(bio); 421 return ERR_PTR(-ENOMEM); 422 } 423 ··· 663 return -EINVAL; 664 665 if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf)) 666 - bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask); 667 else 668 - bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask); 669 670 if (IS_ERR(bio)) 671 return PTR_ERR(bio); 672 673 ret = blk_rq_append_bio(rq, bio); 674 - if (unlikely(ret)) { 675 - bio_uninit(bio); 676 - kfree(bio); 677 - } 678 return ret; 679 } 680 EXPORT_SYMBOL(blk_rq_map_kern);
··· 37 return bmd; 38 } 39 40 + static inline void blk_mq_map_bio_put(struct bio *bio) 41 + { 42 + bio_put(bio); 43 + } 44 + 45 + static struct bio *blk_rq_map_bio_alloc(struct request *rq, 46 + unsigned int nr_vecs, gfp_t gfp_mask) 47 + { 48 + struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL; 49 + struct bio *bio; 50 + 51 + bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask, 52 + &fs_bio_set); 53 + if (!bio) 54 + return NULL; 55 + 56 + return bio; 57 + } 58 + 59 /** 60 * bio_copy_from_iter - copy all pages from iov_iter to bio 61 * @bio: The &struct bio which describes the I/O as destination ··· 154 nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 155 156 ret = -ENOMEM; 157 + bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask); 158 if (!bio) 159 goto out_bmd; 160 161 if (map_data) { 162 nr_pages = 1U << map_data->page_order; ··· 233 cleanup: 234 if (!map_data) 235 bio_free_pages(bio); 236 + blk_mq_map_bio_put(bio); 237 out_bmd: 238 kfree(bmd); 239 return ret; 240 } 241 242 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, ··· 318 static void bio_map_kern_endio(struct bio *bio) 319 { 320 bio_invalidate_vmalloc_pages(bio); 321 + blk_mq_map_bio_put(bio); 322 } 323 324 + static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len, 325 gfp_t gfp_mask) 326 { 327 unsigned int nr_vecs = bio_add_max_vecs(data, len); 328 struct bio *bio; 329 330 + bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); 331 if (!bio) 332 return ERR_PTR(-ENOMEM); 333 + 334 if (is_vmalloc_addr(data)) { 335 bio->bi_private = data; 336 if (!bio_add_vmalloc(bio, data, len)) { 337 + blk_mq_map_bio_put(bio); 338 return ERR_PTR(-EINVAL); 339 } 340 } else { ··· 349 static void bio_copy_kern_endio(struct bio *bio) 350 { 351 bio_free_pages(bio); 352 + blk_mq_map_bio_put(bio); 353 } 354 355 static void bio_copy_kern_endio_read(struct bio *bio) ··· 369 370 /** 371 * bio_copy_kern - copy kernel address into bio 372 + * @rq: request to fill 373 * @data: pointer to buffer to copy 374 * @len: length in bytes 375 * @op: bio/request operation ··· 377 * copy the kernel address into a bio suitable for io to a block 378 * device. Returns an error pointer in case of error. 379 */ 380 + static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len, 381 gfp_t gfp_mask) 382 { 383 + enum req_op op = req_op(rq); 384 unsigned long kaddr = (unsigned long)data; 385 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 386 unsigned long start = kaddr >> PAGE_SHIFT; ··· 394 return ERR_PTR(-EINVAL); 395 396 nr_pages = end - start; 397 + bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask); 398 if (!bio) 399 return ERR_PTR(-ENOMEM); 400 401 while (len) { 402 struct page *page; ··· 431 432 cleanup: 433 bio_free_pages(bio); 434 + blk_mq_map_bio_put(bio); 435 return ERR_PTR(-ENOMEM); 436 } 437 ··· 679 return -EINVAL; 680 681 if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf)) 682 + bio = bio_copy_kern(rq, kbuf, len, gfp_mask); 683 else 684 + bio = bio_map_kern(rq, kbuf, len, gfp_mask); 685 686 if (IS_ERR(bio)) 687 return PTR_ERR(bio); 688 689 ret = blk_rq_append_bio(rq, bio); 690 + if (unlikely(ret)) 691 + blk_mq_map_bio_put(bio); 692 return ret; 693 } 694 EXPORT_SYMBOL(blk_rq_map_kern);
+16 -2
block/blk-mq.c
··· 23 #include <linux/cache.h> 24 #include <linux/sched/topology.h> 25 #include <linux/sched/signal.h> 26 #include <linux/delay.h> 27 #include <linux/crash_dump.h> 28 #include <linux/prefetch.h> ··· 3719 { 3720 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3721 struct blk_mq_hw_ctx, cpuhp_online); 3722 3723 if (blk_mq_hctx_has_online_cpu(hctx, cpu)) 3724 return 0; ··· 3740 * frozen and there are no requests. 3741 */ 3742 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3743 - while (blk_mq_hctx_has_requests(hctx)) 3744 msleep(5); 3745 percpu_ref_put(&hctx->queue->q_usage_counter); 3746 } 3747 3748 - return 0; 3749 } 3750 3751 /*
··· 23 #include <linux/cache.h> 24 #include <linux/sched/topology.h> 25 #include <linux/sched/signal.h> 26 + #include <linux/suspend.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> ··· 3718 { 3719 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3720 struct blk_mq_hw_ctx, cpuhp_online); 3721 + int ret = 0; 3722 3723 if (blk_mq_hctx_has_online_cpu(hctx, cpu)) 3724 return 0; ··· 3738 * frozen and there are no requests. 3739 */ 3740 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3741 + while (blk_mq_hctx_has_requests(hctx)) { 3742 + /* 3743 + * The wakeup capable IRQ handler of block device is 3744 + * not called during suspend. Skip the loop by checking 3745 + * pm_wakeup_pending to prevent the deadlock and improve 3746 + * suspend latency. 3747 + */ 3748 + if (pm_wakeup_pending()) { 3749 + clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3750 + ret = -EBUSY; 3751 + break; 3752 + } 3753 msleep(5); 3754 + } 3755 percpu_ref_put(&hctx->queue->q_usage_counter); 3756 } 3757 3758 + return ret; 3759 } 3760 3761 /*
+4
block/blk-zoned.c
··· 741 { 742 struct bio *bio; 743 744 if (bio_list_empty(&zwplug->bio_list)) 745 return; 746 ··· 750 zwplug->disk->disk_name, zwplug->zone_no); 751 while ((bio = bio_list_pop(&zwplug->bio_list))) 752 blk_zone_wplug_bio_io_error(zwplug, bio); 753 } 754 755 /*
··· 741 { 742 struct bio *bio; 743 744 + lockdep_assert_held(&zwplug->lock); 745 + 746 if (bio_list_empty(&zwplug->bio_list)) 747 return; 748 ··· 748 zwplug->disk->disk_name, zwplug->zone_no); 749 while ((bio = bio_list_pop(&zwplug->bio_list))) 750 blk_zone_wplug_bio_io_error(zwplug, bio); 751 + 752 + zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; 753 } 754 755 /*
-4
block/fops.c
··· 184 loff_t pos = iocb->ki_pos; 185 int ret = 0; 186 187 - if (iocb->ki_flags & IOCB_ALLOC_CACHE) 188 - opf |= REQ_ALLOC_CACHE; 189 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, 190 &blkdev_dio_pool); 191 dio = container_of(bio, struct blkdev_dio, bio); ··· 331 loff_t pos = iocb->ki_pos; 332 int ret = 0; 333 334 - if (iocb->ki_flags & IOCB_ALLOC_CACHE) 335 - opf |= REQ_ALLOC_CACHE; 336 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, 337 &blkdev_dio_pool); 338 dio = container_of(bio, struct blkdev_dio, bio);
··· 184 loff_t pos = iocb->ki_pos; 185 int ret = 0; 186 187 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, 188 &blkdev_dio_pool); 189 dio = container_of(bio, struct blkdev_dio, bio); ··· 333 loff_t pos = iocb->ki_pos; 334 int ret = 0; 335 336 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, 337 &blkdev_dio_pool); 338 dio = container_of(bio, struct blkdev_dio, bio);
+84
block/ioctl.c
··· 423 return ops->pr_clear(bdev, c.key); 424 } 425 426 static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd, 427 unsigned long arg) 428 { ··· 725 return blkdev_pr_preempt(bdev, mode, argp, true); 726 case IOC_PR_CLEAR: 727 return blkdev_pr_clear(bdev, mode, argp); 728 default: 729 return blk_get_meta_cap(bdev, cmd, argp); 730 }
··· 423 return ops->pr_clear(bdev, c.key); 424 } 425 426 + static int blkdev_pr_read_keys(struct block_device *bdev, blk_mode_t mode, 427 + struct pr_read_keys __user *arg) 428 + { 429 + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; 430 + struct pr_keys *keys_info; 431 + struct pr_read_keys read_keys; 432 + u64 __user *keys_ptr; 433 + size_t keys_info_len; 434 + size_t keys_copy_len; 435 + int ret; 436 + 437 + if (!blkdev_pr_allowed(bdev, mode)) 438 + return -EPERM; 439 + if (!ops || !ops->pr_read_keys) 440 + return -EOPNOTSUPP; 441 + 442 + if (copy_from_user(&read_keys, arg, sizeof(read_keys))) 443 + return -EFAULT; 444 + 445 + keys_info_len = struct_size(keys_info, keys, read_keys.num_keys); 446 + if (keys_info_len == SIZE_MAX) 447 + return -EINVAL; 448 + 449 + keys_info = kzalloc(keys_info_len, GFP_KERNEL); 450 + if (!keys_info) 451 + return -ENOMEM; 452 + 453 + keys_info->num_keys = read_keys.num_keys; 454 + 455 + ret = ops->pr_read_keys(bdev, keys_info); 456 + if (ret) 457 + goto out; 458 + 459 + /* Copy out individual keys */ 460 + keys_ptr = u64_to_user_ptr(read_keys.keys_ptr); 461 + keys_copy_len = min(read_keys.num_keys, keys_info->num_keys) * 462 + sizeof(keys_info->keys[0]); 463 + 464 + if (copy_to_user(keys_ptr, keys_info->keys, keys_copy_len)) { 465 + ret = -EFAULT; 466 + goto out; 467 + } 468 + 469 + /* Copy out the arg struct */ 470 + read_keys.generation = keys_info->generation; 471 + read_keys.num_keys = keys_info->num_keys; 472 + 473 + if (copy_to_user(arg, &read_keys, sizeof(read_keys))) 474 + ret = -EFAULT; 475 + out: 476 + kfree(keys_info); 477 + return ret; 478 + } 479 + 480 + static int blkdev_pr_read_reservation(struct block_device *bdev, 481 + blk_mode_t mode, struct pr_read_reservation __user *arg) 482 + { 483 + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; 484 + struct pr_held_reservation rsv = {}; 485 + struct pr_read_reservation out = {}; 486 + int ret; 487 + 488 + if (!blkdev_pr_allowed(bdev, mode)) 489 + return -EPERM; 490 + if (!ops || !ops->pr_read_reservation) 491 + return -EOPNOTSUPP; 492 + 493 + ret = ops->pr_read_reservation(bdev, &rsv); 494 + if (ret) 495 + return ret; 496 + 497 + out.key = rsv.key; 498 + out.generation = rsv.generation; 499 + out.type = rsv.type; 500 + 501 + if (copy_to_user(arg, &out, sizeof(out))) 502 + return -EFAULT; 503 + return 0; 504 + } 505 + 506 static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd, 507 unsigned long arg) 508 { ··· 645 return blkdev_pr_preempt(bdev, mode, argp, true); 646 case IOC_PR_CLEAR: 647 return blkdev_pr_clear(bdev, mode, argp); 648 + case IOC_PR_READ_KEYS: 649 + return blkdev_pr_read_keys(bdev, mode, argp); 650 + case IOC_PR_READ_RESERVATION: 651 + return blkdev_pr_read_reservation(bdev, mode, argp); 652 default: 653 return blk_get_meta_cap(bdev, cmd, argp); 654 }
+2 -3
drivers/block/loop.c
··· 348 struct file *file = lo->lo_backing_file; 349 struct bio_vec tmp; 350 unsigned int offset; 351 - int nr_bvec = 0; 352 int ret; 353 354 - rq_for_each_bvec(tmp, rq, rq_iter) 355 - nr_bvec++; 356 357 if (rq->bio != rq->biotail) { 358
··· 348 struct file *file = lo->lo_backing_file; 349 struct bio_vec tmp; 350 unsigned int offset; 351 + unsigned int nr_bvec; 352 int ret; 353 354 + nr_bvec = blk_rq_nr_bvec(rq); 355 356 if (rq->bio != rq->biotail) { 357
+2 -3
drivers/block/zloop.c
··· 394 struct bio_vec tmp; 395 unsigned long flags; 396 sector_t zone_end; 397 - int nr_bvec = 0; 398 int ret; 399 400 atomic_set(&cmd->ref, 2); ··· 487 spin_unlock_irqrestore(&zone->wp_lock, flags); 488 } 489 490 - rq_for_each_bvec(tmp, rq, rq_iter) 491 - nr_bvec++; 492 493 if (rq->bio != rq->biotail) { 494 struct bio_vec *bvec;
··· 394 struct bio_vec tmp; 395 unsigned long flags; 396 sector_t zone_end; 397 + unsigned int nr_bvec; 398 int ret; 399 400 atomic_set(&cmd->ref, 2); ··· 487 spin_unlock_irqrestore(&zone->wp_lock, flags); 488 } 489 490 + nr_bvec = blk_rq_nr_bvec(rq); 491 492 if (rq->bio != rq->biotail) { 493 struct bio_vec *bvec;
+1 -1
drivers/nvme/host/auth.c
··· 1122 if (ctrl->dhchap_ctxs) { 1123 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) 1124 nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); 1125 - kfree(ctrl->dhchap_ctxs); 1126 } 1127 if (ctrl->host_key) { 1128 nvme_auth_free_key(ctrl->host_key);
··· 1122 if (ctrl->dhchap_ctxs) { 1123 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) 1124 nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); 1125 + kvfree(ctrl->dhchap_ctxs); 1126 } 1127 if (ctrl->host_key) { 1128 nvme_auth_free_key(ctrl->host_key);
+1 -1
drivers/nvme/host/fabrics.c
··· 592 if (status > 0 && (status & NVME_STATUS_DNR)) 593 return false; 594 595 - if (status == -EKEYREJECTED) 596 return false; 597 598 if (ctrl->opts->max_reconnects == -1 ||
··· 592 if (status > 0 && (status & NVME_STATUS_DNR)) 593 return false; 594 595 + if (status == -EKEYREJECTED || status == -ENOKEY) 596 return false; 597 598 if (ctrl->opts->max_reconnects == -1 ||
+6 -2
drivers/nvme/host/fc.c
··· 520 521 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 522 WARN_ON(!list_empty(&rport->ctrl_list)); 523 524 /* remove from lport list */ 525 spin_lock_irqsave(&nvme_fc_lock, flags); ··· 1470 { 1471 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1472 &lsop->rqstbuf->rq_dis_assoc; 1473 - struct nvme_fc_ctrl *ctrl, *ret = NULL; 1474 struct nvmefc_ls_rcv_op *oldls = NULL; 1475 u64 association_id = be64_to_cpu(rqst->associd.association_id); 1476 unsigned long flags; 1477 1478 spin_lock_irqsave(&rport->lock, flags); 1479 1480 - list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 1481 if (!nvme_fc_ctrl_get(ctrl)) 1482 continue; 1483 spin_lock(&ctrl->lock); ··· 1490 if (ret) 1491 /* leave the ctrl get reference */ 1492 break; 1493 nvme_fc_ctrl_put(ctrl); 1494 } 1495 1496 spin_unlock_irqrestore(&rport->lock, flags);
··· 520 521 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 522 WARN_ON(!list_empty(&rport->ctrl_list)); 523 + WARN_ON(!list_empty(&rport->ls_req_list)); 524 + WARN_ON(!list_empty(&rport->ls_rcv_list)); 525 526 /* remove from lport list */ 527 spin_lock_irqsave(&nvme_fc_lock, flags); ··· 1468 { 1469 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1470 &lsop->rqstbuf->rq_dis_assoc; 1471 + struct nvme_fc_ctrl *ctrl, *tmp, *ret = NULL; 1472 struct nvmefc_ls_rcv_op *oldls = NULL; 1473 u64 association_id = be64_to_cpu(rqst->associd.association_id); 1474 unsigned long flags; 1475 1476 spin_lock_irqsave(&rport->lock, flags); 1477 1478 + list_for_each_entry_safe(ctrl, tmp, &rport->ctrl_list, ctrl_list) { 1479 if (!nvme_fc_ctrl_get(ctrl)) 1480 continue; 1481 spin_lock(&ctrl->lock); ··· 1488 if (ret) 1489 /* leave the ctrl get reference */ 1490 break; 1491 + spin_unlock_irqrestore(&rport->lock, flags); 1492 nvme_fc_ctrl_put(ctrl); 1493 + spin_lock_irqsave(&rport->lock, flags); 1494 } 1495 1496 spin_unlock_irqrestore(&rport->lock, flags);
+1 -1
drivers/nvme/host/ioctl.c
··· 447 struct iov_iter iter; 448 struct iov_iter *map_iter = NULL; 449 struct request *req; 450 - blk_opf_t rq_flags = REQ_ALLOC_CACHE; 451 blk_mq_req_flags_t blk_flags = 0; 452 int ret; 453
··· 447 struct iov_iter iter; 448 struct iov_iter *map_iter = NULL; 449 struct request *req; 450 + blk_opf_t rq_flags = 0; 451 blk_mq_req_flags_t blk_flags = 0; 452 int ret; 453
+2
drivers/nvme/host/pci.c
··· 2984 pci_set_master(pdev); 2985 2986 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 2987 result = -ENODEV; 2988 goto disable; 2989 } ··· 3610 nvme_uninit_ctrl(&dev->ctrl); 3611 out_put_ctrl: 3612 nvme_put_ctrl(&dev->ctrl); 3613 return result; 3614 } 3615
··· 2984 pci_set_master(pdev); 2985 2986 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 2987 + dev_dbg(dev->ctrl.device, "reading CSTS register failed\n"); 2988 result = -ENODEV; 2989 goto disable; 2990 } ··· 3609 nvme_uninit_ctrl(&dev->ctrl); 3610 out_put_ctrl: 3611 nvme_put_ctrl(&dev->ctrl); 3612 + dev_err_probe(&pdev->dev, result, "probe failed\n"); 3613 return result; 3614 } 3615
+5 -1
drivers/nvme/host/pr.c
··· 228 static int nvme_pr_read_keys(struct block_device *bdev, 229 struct pr_keys *keys_info) 230 { 231 - u32 rse_len, num_keys = keys_info->num_keys; 232 struct nvme_reservation_status_ext *rse; 233 int ret, i; 234 bool eds; ··· 239 * enough to get enough keys to fill the return keys buffer. 240 */ 241 rse_len = struct_size(rse, regctl_eds, num_keys); 242 rse = kzalloc(rse_len, GFP_KERNEL); 243 if (!rse) 244 return -ENOMEM;
··· 228 static int nvme_pr_read_keys(struct block_device *bdev, 229 struct pr_keys *keys_info) 230 { 231 + size_t rse_len; 232 + u32 num_keys = keys_info->num_keys; 233 struct nvme_reservation_status_ext *rse; 234 int ret, i; 235 bool eds; ··· 238 * enough to get enough keys to fill the return keys buffer. 239 */ 240 rse_len = struct_size(rse, regctl_eds, num_keys); 241 + if (rse_len > U32_MAX) 242 + return -EINVAL; 243 + 244 rse = kzalloc(rse_len, GFP_KERNEL); 245 if (!rse) 246 return -ENOMEM;
+1 -1
drivers/nvme/target/admin-cmd.c
··· 708 709 /* 710 * We don't really have a practical limit on the number of abort 711 - * comands. But we don't do anything useful for abort either, so 712 * no point in allowing more abort commands than the spec requires. 713 */ 714 id->acl = 3;
··· 708 709 /* 710 * We don't really have a practical limit on the number of abort 711 + * commands. But we don't do anything useful for abort either, so 712 * no point in allowing more abort commands than the spec requires. 713 */ 714 id->acl = 3;
+10 -8
drivers/nvme/target/auth.c
··· 381 ret = crypto_shash_update(shash, buf, 1); 382 if (ret) 383 goto out; 384 - ret = crypto_shash_update(shash, ctrl->subsysnqn, 385 - strlen(ctrl->subsysnqn)); 386 if (ret) 387 goto out; 388 ret = crypto_shash_final(shash, response); ··· 429 } 430 431 transformed_key = nvme_auth_transform_key(ctrl->ctrl_key, 432 - ctrl->subsysnqn); 433 if (IS_ERR(transformed_key)) { 434 ret = PTR_ERR(transformed_key); 435 goto out_free_tfm; ··· 484 ret = crypto_shash_update(shash, "Controller", 10); 485 if (ret) 486 goto out; 487 - ret = crypto_shash_update(shash, ctrl->subsysnqn, 488 - strlen(ctrl->subsysnqn)); 489 if (ret) 490 goto out; 491 ret = crypto_shash_update(shash, buf, 1); ··· 575 return; 576 } 577 ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len, 578 - sq->ctrl->subsysnqn, 579 sq->ctrl->hostnqn, &digest); 580 if (ret) { 581 pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n", ··· 590 goto out_free_digest; 591 } 592 #ifdef CONFIG_NVME_TARGET_TCP_TLS 593 - tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn, 594 - sq->ctrl->shash_id, tls_psk, psk_len, digest); 595 if (IS_ERR(tls_key)) { 596 pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", 597 __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
··· 381 ret = crypto_shash_update(shash, buf, 1); 382 if (ret) 383 goto out; 384 + ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn, 385 + strlen(ctrl->subsys->subsysnqn)); 386 if (ret) 387 goto out; 388 ret = crypto_shash_final(shash, response); ··· 429 } 430 431 transformed_key = nvme_auth_transform_key(ctrl->ctrl_key, 432 + ctrl->subsys->subsysnqn); 433 if (IS_ERR(transformed_key)) { 434 ret = PTR_ERR(transformed_key); 435 goto out_free_tfm; ··· 484 ret = crypto_shash_update(shash, "Controller", 10); 485 if (ret) 486 goto out; 487 + ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn, 488 + strlen(ctrl->subsys->subsysnqn)); 489 if (ret) 490 goto out; 491 ret = crypto_shash_update(shash, buf, 1); ··· 575 return; 576 } 577 ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len, 578 + sq->ctrl->subsys->subsysnqn, 579 sq->ctrl->hostnqn, &digest); 580 if (ret) { 581 pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n", ··· 590 goto out_free_digest; 591 } 592 #ifdef CONFIG_NVME_TARGET_TCP_TLS 593 + tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, 594 + sq->ctrl->subsys->subsysnqn, 595 + sq->ctrl->shash_id, tls_psk, psk_len, 596 + digest); 597 if (IS_ERR(tls_key)) { 598 pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", 599 __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
+3 -2
drivers/nvme/target/core.c
··· 40 * - the nvmet_transports array 41 * 42 * When updating any of those lists/structures write lock should be obtained, 43 - * while when reading (popolating discovery log page or checking host-subsystem 44 * link) read lock is obtained to allow concurrent reads. 45 */ 46 DECLARE_RWSEM(nvmet_config_sem); ··· 1628 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 1629 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); 1630 1631 - memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE); 1632 memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); 1633 1634 kref_init(&ctrl->ref); ··· 1902 struct nvmet_subsys *subsys = 1903 container_of(ref, struct nvmet_subsys, ref); 1904 1905 WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); 1906 1907 nvmet_debugfs_subsys_free(subsys);
··· 40 * - the nvmet_transports array 41 * 42 * When updating any of those lists/structures write lock should be obtained, 43 + * while when reading (populating discovery log page or checking host-subsystem 44 * link) read lock is obtained to allow concurrent reads. 45 */ 46 DECLARE_RWSEM(nvmet_config_sem); ··· 1628 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 1629 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); 1630 1631 memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); 1632 1633 kref_init(&ctrl->ref); ··· 1903 struct nvmet_subsys *subsys = 1904 container_of(ref, struct nvmet_subsys, ref); 1905 1906 + WARN_ON_ONCE(!list_empty(&subsys->ctrls)); 1907 + WARN_ON_ONCE(!list_empty(&subsys->hosts)); 1908 WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); 1909 1910 nvmet_debugfs_subsys_free(subsys);
+21 -27
drivers/nvme/target/fc.c
··· 490 sizeof(*discon_rqst) + sizeof(*discon_acc) + 491 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 492 if (!lsop) { 493 - dev_info(tgtport->dev, 494 - "{%d:%d} send Disconnect Association failed: ENOMEM\n", 495 tgtport->fc_target_port.port_num, assoc->a_id); 496 return; 497 } ··· 512 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 513 nvmet_fc_disconnect_assoc_done); 514 if (ret) { 515 - dev_info(tgtport->dev, 516 - "{%d:%d} XMT Disconnect Association failed: %d\n", 517 tgtport->fc_target_port.port_num, assoc->a_id, ret); 518 kfree(lsop); 519 } ··· 1185 if (oldls) 1186 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1187 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1188 - dev_info(tgtport->dev, 1189 - "{%d:%d} Association freed\n", 1190 tgtport->fc_target_port.port_num, assoc->a_id); 1191 kfree(assoc); 1192 } ··· 1221 flush_workqueue(assoc->queues[i]->work_q); 1222 } 1223 1224 - dev_info(tgtport->dev, 1225 - "{%d:%d} Association deleted\n", 1226 tgtport->fc_target_port.port_num, assoc->a_id); 1227 1228 nvmet_fc_tgtport_put(tgtport); ··· 1712 } 1713 1714 if (ret) { 1715 - dev_err(tgtport->dev, 1716 - "Create Association LS failed: %s\n", 1717 - validation_errors[ret]); 1718 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1719 sizeof(*acc), rqst->w0.ls_cmd, 1720 FCNVME_RJT_RC_LOGIC, ··· 1726 atomic_set(&queue->connected, 1); 1727 queue->sqhd = 0; /* best place to init value */ 1728 1729 - dev_info(tgtport->dev, 1730 - "{%d:%d} Association created\n", 1731 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1732 1733 /* format a response */ ··· 1804 } 1805 1806 if (ret) { 1807 - dev_err(tgtport->dev, 1808 - "Create Connection LS failed: %s\n", 1809 - validation_errors[ret]); 1810 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1811 sizeof(*acc), rqst->w0.ls_cmd, 1812 (ret == VERR_NO_ASSOC) ? ··· 1866 } 1867 1868 if (ret || !assoc) { 1869 - dev_err(tgtport->dev, 1870 - "Disconnect LS failed: %s\n", 1871 - validation_errors[ret]); 1872 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1873 sizeof(*acc), rqst->w0.ls_cmd, 1874 (ret == VERR_NO_ASSOC) ? ··· 1902 spin_unlock_irqrestore(&tgtport->lock, flags); 1903 1904 if (oldls) { 1905 - dev_info(tgtport->dev, 1906 - "{%d:%d} Multiple Disconnect Association LS's " 1907 "received\n", 1908 tgtport->fc_target_port.port_num, assoc->a_id); 1909 /* overwrite good response with bogus failure */ ··· 2045 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2046 2047 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2048 - dev_info(tgtport->dev, 2049 - "RCV %s LS failed: payload too large (%d)\n", 2050 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2051 nvmefc_ls_names[w0->ls_cmd] : "", 2052 lsreqbuf_len); ··· 2054 } 2055 2056 if (!nvmet_fc_tgtport_get(tgtport)) { 2057 - dev_info(tgtport->dev, 2058 - "RCV %s LS failed: target deleting\n", 2059 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2060 nvmefc_ls_names[w0->ls_cmd] : ""); 2061 return -ESHUTDOWN; ··· 2063 2064 iod = nvmet_fc_alloc_ls_iod(tgtport); 2065 if (!iod) { 2066 - dev_info(tgtport->dev, 2067 - "RCV %s LS failed: context allocation failed\n", 2068 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2069 nvmefc_ls_names[w0->ls_cmd] : ""); 2070 nvmet_fc_tgtport_put(tgtport);
··· 490 sizeof(*discon_rqst) + sizeof(*discon_acc) + 491 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 492 if (!lsop) { 493 + pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n", 494 tgtport->fc_target_port.port_num, assoc->a_id); 495 return; 496 } ··· 513 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 514 nvmet_fc_disconnect_assoc_done); 515 if (ret) { 516 + pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n", 517 tgtport->fc_target_port.port_num, assoc->a_id, ret); 518 kfree(lsop); 519 } ··· 1187 if (oldls) 1188 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1189 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1190 + pr_info("{%d:%d}: Association freed\n", 1191 tgtport->fc_target_port.port_num, assoc->a_id); 1192 kfree(assoc); 1193 } ··· 1224 flush_workqueue(assoc->queues[i]->work_q); 1225 } 1226 1227 + pr_info("{%d:%d}: Association deleted\n", 1228 tgtport->fc_target_port.port_num, assoc->a_id); 1229 1230 nvmet_fc_tgtport_put(tgtport); ··· 1716 } 1717 1718 if (ret) { 1719 + pr_err("{%d}: Create Association LS failed: %s\n", 1720 + tgtport->fc_target_port.port_num, 1721 + validation_errors[ret]); 1722 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1723 sizeof(*acc), rqst->w0.ls_cmd, 1724 FCNVME_RJT_RC_LOGIC, ··· 1730 atomic_set(&queue->connected, 1); 1731 queue->sqhd = 0; /* best place to init value */ 1732 1733 + pr_info("{%d:%d}: Association created\n", 1734 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1735 1736 /* format a response */ ··· 1809 } 1810 1811 if (ret) { 1812 + pr_err("{%d}: Create Connection LS failed: %s\n", 1813 + tgtport->fc_target_port.port_num, 1814 + validation_errors[ret]); 1815 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1816 sizeof(*acc), rqst->w0.ls_cmd, 1817 (ret == VERR_NO_ASSOC) ? ··· 1871 } 1872 1873 if (ret || !assoc) { 1874 + pr_err("{%d}: Disconnect LS failed: %s\n", 1875 + tgtport->fc_target_port.port_num, 1876 + validation_errors[ret]); 1877 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1878 sizeof(*acc), rqst->w0.ls_cmd, 1879 (ret == VERR_NO_ASSOC) ? ··· 1907 spin_unlock_irqrestore(&tgtport->lock, flags); 1908 1909 if (oldls) { 1910 + pr_info("{%d:%d}: Multiple Disconnect Association LS's " 1911 "received\n", 1912 tgtport->fc_target_port.port_num, assoc->a_id); 1913 /* overwrite good response with bogus failure */ ··· 2051 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2052 2053 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2054 + pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n", 2055 + tgtport->fc_target_port.port_num, 2056 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2057 nvmefc_ls_names[w0->ls_cmd] : "", 2058 lsreqbuf_len); ··· 2060 } 2061 2062 if (!nvmet_fc_tgtport_get(tgtport)) { 2063 + pr_info("{%d}: RCV %s LS failed: target deleting\n", 2064 + tgtport->fc_target_port.port_num, 2065 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2066 nvmefc_ls_names[w0->ls_cmd] : ""); 2067 return -ESHUTDOWN; ··· 2069 2070 iod = nvmet_fc_alloc_ls_iod(tgtport); 2071 if (!iod) { 2072 + pr_info("{%d}: RCV %s LS failed: context allocation failed\n", 2073 + tgtport->fc_target_port.port_num, 2074 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2075 nvmefc_ls_names[w0->ls_cmd] : ""); 2076 nvmet_fc_tgtport_put(tgtport);
+6 -3
drivers/nvme/target/fcloop.c
··· 254 struct fcloop_lsreq { 255 struct nvmefc_ls_req *lsreq; 256 struct nvmefc_ls_rsp ls_rsp; 257 - int lsdir; /* H2T or T2H */ 258 int status; 259 struct list_head ls_list; /* fcloop_rport->ls_list */ 260 }; ··· 1110 rport->nport->rport = NULL; 1111 spin_unlock_irqrestore(&fcloop_lock, flags); 1112 1113 - if (put_port) 1114 fcloop_nport_put(rport->nport); 1115 } 1116 1117 static void ··· 1131 tport->nport->tport = NULL; 1132 spin_unlock_irqrestore(&fcloop_lock, flags); 1133 1134 - if (put_port) 1135 fcloop_nport_put(tport->nport); 1136 } 1137 1138 #define FCLOOP_HW_QUEUES 4
··· 254 struct fcloop_lsreq { 255 struct nvmefc_ls_req *lsreq; 256 struct nvmefc_ls_rsp ls_rsp; 257 int status; 258 struct list_head ls_list; /* fcloop_rport->ls_list */ 259 }; ··· 1111 rport->nport->rport = NULL; 1112 spin_unlock_irqrestore(&fcloop_lock, flags); 1113 1114 + if (put_port) { 1115 + WARN_ON(!list_empty(&rport->ls_list)); 1116 fcloop_nport_put(rport->nport); 1117 + } 1118 } 1119 1120 static void ··· 1130 tport->nport->tport = NULL; 1131 spin_unlock_irqrestore(&fcloop_lock, flags); 1132 1133 + if (put_port) { 1134 + WARN_ON(!list_empty(&tport->ls_list)); 1135 fcloop_nport_put(tport->nport); 1136 + } 1137 } 1138 1139 #define FCLOOP_HW_QUEUES 4
-1
drivers/nvme/target/nvmet.h
··· 285 __le32 *changed_ns_list; 286 u32 nr_changed_ns; 287 288 - char subsysnqn[NVMF_NQN_FIELD_LEN]; 289 char hostnqn[NVMF_NQN_FIELD_LEN]; 290 291 struct device *p2p_client;
··· 285 __le32 *changed_ns_list; 286 u32 nr_changed_ns; 287 288 char hostnqn[NVMF_NQN_FIELD_LEN]; 289 290 struct device *p2p_client;
+1 -1
drivers/nvme/target/passthru.c
··· 150 * code path with duplicate ctrl subsysnqn. In order to prevent that we 151 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. 152 */ 153 - memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); 154 155 /* use fabric id-ctrl values */ 156 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
··· 150 * code path with duplicate ctrl subsysnqn. In order to prevent that we 151 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. 152 */ 153 + memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); 154 155 /* use fabric id-ctrl values */ 156 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+8 -6
drivers/nvme/target/pci-epf.c
··· 320 nvme_epf->dma_enabled = true; 321 322 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", 323 - dma_chan_name(chan), 324 - dma_get_max_seg_size(dmaengine_get_dma_device(chan))); 325 326 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", 327 - dma_chan_name(chan), 328 - dma_get_max_seg_size(dmaengine_get_dma_device(chan))); 329 330 return; 331 ··· 2327 return ret; 2328 } 2329 2330 /* Set device ID, class, etc. */ 2331 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; 2332 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; ··· 2425 ret = nvmet_pci_epf_configure_bar(nvme_epf); 2426 if (ret) 2427 return ret; 2428 - 2429 - nvmet_pci_epf_init_dma(nvme_epf); 2430 2431 return 0; 2432 }
··· 320 nvme_epf->dma_enabled = true; 321 322 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", 323 + dma_chan_name(nvme_epf->dma_rx_chan), 324 + dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf-> 325 + dma_rx_chan))); 326 327 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", 328 + dma_chan_name(nvme_epf->dma_tx_chan), 329 + dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf-> 330 + dma_tx_chan))); 331 332 return; 333 ··· 2325 return ret; 2326 } 2327 2328 + nvmet_pci_epf_init_dma(nvme_epf); 2329 + 2330 /* Set device ID, class, etc. */ 2331 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; 2332 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; ··· 2421 ret = nvmet_pci_epf_configure_bar(nvme_epf); 2422 if (ret) 2423 return ret; 2424 2425 return 0; 2426 }
+6 -6
drivers/nvme/target/rdma.c
··· 367 struct nvmet_rdma_cmd *cmds; 368 int ret = -EINVAL, i; 369 370 - cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 371 if (!cmds) 372 goto out; 373 ··· 382 out_free: 383 while (--i >= 0) 384 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 385 - kfree(cmds); 386 out: 387 return ERR_PTR(ret); 388 } ··· 394 395 for (i = 0; i < nr_cmds; i++) 396 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 397 - kfree(cmds); 398 } 399 400 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, ··· 455 NUMA_NO_NODE, false, true)) 456 goto out; 457 458 - queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 459 GFP_KERNEL); 460 if (!queue->rsps) 461 goto out_free_sbitmap; ··· 473 out_free: 474 while (--i >= 0) 475 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); 476 - kfree(queue->rsps); 477 out_free_sbitmap: 478 sbitmap_free(&queue->rsp_tags); 479 out: ··· 487 488 for (i = 0; i < nr_rsps; i++) 489 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); 490 - kfree(queue->rsps); 491 sbitmap_free(&queue->rsp_tags); 492 } 493
··· 367 struct nvmet_rdma_cmd *cmds; 368 int ret = -EINVAL, i; 369 370 + cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 371 if (!cmds) 372 goto out; 373 ··· 382 out_free: 383 while (--i >= 0) 384 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 385 + kvfree(cmds); 386 out: 387 return ERR_PTR(ret); 388 } ··· 394 395 for (i = 0; i < nr_cmds; i++) 396 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 397 + kvfree(cmds); 398 } 399 400 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, ··· 455 NUMA_NO_NODE, false, true)) 456 goto out; 457 458 + queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 459 GFP_KERNEL); 460 if (!queue->rsps) 461 goto out_free_sbitmap; ··· 473 out_free: 474 while (--i >= 0) 475 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); 476 + kvfree(queue->rsps); 477 out_free_sbitmap: 478 sbitmap_free(&queue->rsp_tags); 479 out: ··· 487 488 for (i = 0; i < nr_rsps; i++) 489 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); 490 + kvfree(queue->rsps); 491 sbitmap_free(&queue->rsp_tags); 492 } 493
+3 -3
drivers/nvme/target/tcp.c
··· 1484 struct nvmet_tcp_cmd *cmds; 1485 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1486 1487 - cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1488 if (!cmds) 1489 goto out; 1490 ··· 1500 out_free: 1501 while (--i >= 0) 1502 nvmet_tcp_free_cmd(cmds + i); 1503 - kfree(cmds); 1504 out: 1505 return ret; 1506 } ··· 1514 nvmet_tcp_free_cmd(cmds + i); 1515 1516 nvmet_tcp_free_cmd(&queue->connect); 1517 - kfree(cmds); 1518 } 1519 1520 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
··· 1484 struct nvmet_tcp_cmd *cmds; 1485 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1486 1487 + cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1488 if (!cmds) 1489 goto out; 1490 ··· 1500 out_free: 1501 while (--i >= 0) 1502 nvmet_tcp_free_cmd(cmds + i); 1503 + kvfree(cmds); 1504 out: 1505 return ret; 1506 } ··· 1514 nvmet_tcp_free_cmd(cmds + i); 1515 1516 nvmet_tcp_free_cmd(&queue->connect); 1517 + kvfree(cmds); 1518 } 1519 1520 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
+11 -1
drivers/scsi/sd.c
··· 2004 { 2005 int result, i, data_offset, num_copy_keys; 2006 u32 num_keys = keys_info->num_keys; 2007 - int data_len = num_keys * 8 + 8; 2008 u8 *data; 2009 2010 data = kzalloc(data_len, GFP_KERNEL); 2011 if (!data)
··· 2004 { 2005 int result, i, data_offset, num_copy_keys; 2006 u32 num_keys = keys_info->num_keys; 2007 + int data_len; 2008 u8 *data; 2009 + 2010 + /* 2011 + * Each reservation key takes 8 bytes and there is an 8-byte header 2012 + * before the reservation key list. The total size must fit into the 2013 + * 16-bit ALLOCATION LENGTH field. 2014 + */ 2015 + if (check_mul_overflow(num_keys, 8, &data_len) || 2016 + check_add_overflow(data_len, 8, &data_len) || 2017 + data_len > USHRT_MAX) 2018 + return -EINVAL; 2019 2020 data = kzalloc(data_len, GFP_KERNEL); 2021 if (!data)
+18
include/linux/blk-mq.h
··· 1213 return max_t(unsigned short, rq->nr_phys_segments, 1); 1214 } 1215 1216 int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, 1217 struct scatterlist **last_sg); 1218 static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
··· 1213 return max_t(unsigned short, rq->nr_phys_segments, 1); 1214 } 1215 1216 + /** 1217 + * blk_rq_nr_bvec - return number of bvecs in a request 1218 + * @rq: request to calculate bvecs for 1219 + * 1220 + * Returns the number of bvecs. 1221 + */ 1222 + static inline unsigned int blk_rq_nr_bvec(struct request *rq) 1223 + { 1224 + struct req_iterator rq_iter; 1225 + struct bio_vec bv; 1226 + unsigned int nr_bvec = 0; 1227 + 1228 + rq_for_each_bvec(bv, rq, rq_iter) 1229 + nr_bvec++; 1230 + 1231 + return nr_bvec; 1232 + } 1233 + 1234 int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, 1235 struct scatterlist **last_sg); 1236 static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
+1 -4
include/linux/blk_types.h
··· 479 } 480 481 /* 482 - * Check if a bio or request operation is a zone management operation, with 483 - * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case 484 - * due to its different handling in the block layer and device response in 485 - * case of command failure. 486 */ 487 static inline bool op_is_zone_mgmt(enum req_op op) 488 {
··· 479 } 480 481 /* 482 + * Check if a bio or request operation is a zone management operation. 483 */ 484 static inline bool op_is_zone_mgmt(enum req_op op) 485 {
+14
include/uapi/linux/pr.h
··· 56 __u32 __pad; 57 }; 58 59 #define PR_FL_IGNORE_KEY (1 << 0) /* ignore existing key */ 60 61 #define IOC_PR_REGISTER _IOW('p', 200, struct pr_registration) ··· 76 #define IOC_PR_PREEMPT _IOW('p', 203, struct pr_preempt) 77 #define IOC_PR_PREEMPT_ABORT _IOW('p', 204, struct pr_preempt) 78 #define IOC_PR_CLEAR _IOW('p', 205, struct pr_clear) 79 80 #endif /* _UAPI_PR_H */
··· 56 __u32 __pad; 57 }; 58 59 + struct pr_read_keys { 60 + __u32 generation; 61 + __u32 num_keys; 62 + __u64 keys_ptr; 63 + }; 64 + 65 + struct pr_read_reservation { 66 + __u64 key; 67 + __u32 generation; 68 + __u32 type; 69 + }; 70 + 71 #define PR_FL_IGNORE_KEY (1 << 0) /* ignore existing key */ 72 73 #define IOC_PR_REGISTER _IOW('p', 200, struct pr_registration) ··· 64 #define IOC_PR_PREEMPT _IOW('p', 203, struct pr_preempt) 65 #define IOC_PR_PREEMPT_ABORT _IOW('p', 204, struct pr_preempt) 66 #define IOC_PR_CLEAR _IOW('p', 205, struct pr_clear) 67 + #define IOC_PR_READ_KEYS _IOWR('p', 206, struct pr_read_keys) 68 + #define IOC_PR_READ_RESERVATION _IOR('p', 207, struct pr_read_reservation) 69 70 #endif /* _UAPI_PR_H */
-1
io_uring/rw.c
··· 855 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); 856 if (unlikely(ret)) 857 return ret; 858 - kiocb->ki_flags |= IOCB_ALLOC_CACHE; 859 860 /* 861 * If the file is marked O_NONBLOCK, still allow retry for it if it
··· 855 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); 856 if (unlikely(ret)) 857 return ret; 858 859 /* 860 * If the file is marked O_NONBLOCK, still allow retry for it if it