Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

- NVMe pull request via Keith:
- iostats accounting fixed on multipath retries (Amit)
- secure concatenation response fixup (Martin)
- tls partial record fixup (Wilfred)

- Fix for a lockdep reported issue with the elevator lock and
blk group frozen operations

- Fix for a regression in this merge window, where updating
'nr_requests' would not do the right thing for queues with
shared tags

* tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
nvme/tcp: handle tls partially sent records in write_space()
block: Remove elevator_lock usage from blkg_conf frozen operations
blk-mq: fix stale tag depth for shared sched tags in blk_mq_update_nr_requests()
nvme-auth: update sc_c in host response
nvme-multipath: Skip nr_active increments in RETRY disposition

+23 -17
+4 -9
block/blk-cgroup.c
··· 812 812 } 813 813 /* 814 814 * Similar to blkg_conf_open_bdev, but additionally freezes the queue, 815 - * acquires q->elevator_lock, and ensures the correct locking order 816 - * between q->elevator_lock and q->rq_qos_mutex. 815 + * ensures the correct locking order between freeze queue and q->rq_qos_mutex. 817 816 * 818 817 * This function returns negative error on failure. On success it returns 819 818 * memflags which must be saved and later passed to blkg_conf_exit_frozen ··· 833 834 * At this point, we haven’t started protecting anything related to QoS, 834 835 * so we release q->rq_qos_mutex here, which was first acquired in blkg_ 835 836 * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing 836 - * the queue and acquiring q->elevator_lock to maintain the correct 837 - * locking order. 837 + * the queue to maintain the correct locking order. 838 838 */ 839 839 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); 840 840 841 841 memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue); 842 - mutex_lock(&ctx->bdev->bd_queue->elevator_lock); 843 842 mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex); 844 843 845 844 return memflags; ··· 992 995 EXPORT_SYMBOL_GPL(blkg_conf_exit); 993 996 994 997 /* 995 - * Similar to blkg_conf_exit, but also unfreezes the queue and releases 996 - * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen 997 - * is used to open the bdev. 998 + * Similar to blkg_conf_exit, but also unfreezes the queue. Should be used 999 + * when blkg_conf_open_bdev_frozen is used to open the bdev. 998 1000 */ 999 1001 void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags) 1000 1002 { ··· 1001 1005 struct request_queue *q = ctx->bdev->bd_queue; 1002 1006 1003 1007 blkg_conf_exit(ctx); 1004 - mutex_unlock(&q->elevator_lock); 1005 1008 blk_mq_unfreeze_queue(q, memflags); 1006 1009 } 1007 1010 }
+1 -1
block/blk-mq-sched.c
··· 557 557 if (blk_mq_is_shared_tags(flags)) { 558 558 /* Shared tags are stored at index 0 in @et->tags. */ 559 559 q->sched_shared_tags = et->tags[0]; 560 - blk_mq_tag_update_sched_shared_tags(q); 560 + blk_mq_tag_update_sched_shared_tags(q, et->nr_requests); 561 561 } 562 562 563 563 queue_for_each_hw_ctx(q, hctx, i) {
+3 -2
block/blk-mq-tag.c
··· 622 622 sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); 623 623 } 624 624 625 - void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) 625 + void blk_mq_tag_update_sched_shared_tags(struct request_queue *q, 626 + unsigned int nr) 626 627 { 627 628 sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, 628 - q->nr_requests - q->tag_set->reserved_tags); 629 + nr - q->tag_set->reserved_tags); 629 630 } 630 631 631 632 /**
+1 -1
block/blk-mq.c
··· 4941 4941 * tags can't grow, see blk_mq_alloc_sched_tags(). 4942 4942 */ 4943 4943 if (q->elevator) 4944 - blk_mq_tag_update_sched_shared_tags(q); 4944 + blk_mq_tag_update_sched_shared_tags(q, nr); 4945 4945 else 4946 4946 blk_mq_tag_resize_shared_tags(set, nr); 4947 4947 } else if (!q->elevator) {
+2 -1
block/blk-mq.h
··· 186 186 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); 187 187 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, 188 188 unsigned int size); 189 - void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); 189 + void blk_mq_tag_update_sched_shared_tags(struct request_queue *q, 190 + unsigned int nr); 190 191 191 192 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 192 193 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
+5 -1
drivers/nvme/host/auth.c
··· 36 36 u8 status; 37 37 u8 dhgroup_id; 38 38 u8 hash_id; 39 + u8 sc_c; 39 40 size_t hash_len; 40 41 u8 c1[64]; 41 42 u8 c2[64]; ··· 154 153 data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096; 155 154 data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; 156 155 data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; 156 + 157 + chap->sc_c = data->sc_c; 157 158 158 159 return size; 159 160 } ··· 492 489 ret = crypto_shash_update(shash, buf, 2); 493 490 if (ret) 494 491 goto out; 495 - memset(buf, 0, sizeof(buf)); 492 + *buf = chap->sc_c; 496 493 ret = crypto_shash_update(shash, buf, 1); 497 494 if (ret) 498 495 goto out; ··· 503 500 strlen(ctrl->opts->host->nqn)); 504 501 if (ret) 505 502 goto out; 503 + memset(buf, 0, sizeof(buf)); 506 504 ret = crypto_shash_update(shash, buf, 1); 507 505 if (ret) 508 506 goto out;
+4 -2
drivers/nvme/host/multipath.c
··· 182 182 struct nvme_ns *ns = rq->q->queuedata; 183 183 struct gendisk *disk = ns->head->disk; 184 184 185 - if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) { 185 + if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) && 186 + !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) { 186 187 atomic_inc(&ns->ctrl->nr_active); 187 188 nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE; 188 189 } 189 190 190 - if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) 191 + if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) || 192 + (nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) 191 193 return; 192 194 193 195 nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
+3
drivers/nvme/host/tcp.c
··· 1081 1081 queue = sk->sk_user_data; 1082 1082 if (likely(queue && sk_stream_is_writeable(sk))) { 1083 1083 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1084 + /* Ensure pending TLS partial records are retried */ 1085 + if (nvme_tcp_queue_tls(queue)) 1086 + queue->write_space(sk); 1084 1087 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 1085 1088 } 1086 1089 read_unlock_bh(&sk->sk_callback_lock);