Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"Either fixes or a few additions that got missed in the initial merge
window pull. In detail:

- List iterator fix to avoid leaking value post loop (Jakob)

- One-off fix in minor count (Christophe)

- Fix for a regression in how io priority setting works for an
exiting task (Jiri)

- Fix a regression in this merge window with blkg_free() being called
in an inappropriate context (Ming)

- Misc fixes (Ming, Tom)"

* tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block:
blk-wbt: remove wbt_track stub
block: use dedicated list iterator variable
block: Fix the maximum minor value is blk_alloc_ext_minor()
block: restore the old set_task_ioprio() behaviour wrt PF_EXITING
block: avoid calling blkg_free() in atomic context
lib/sbitmap: allocate sb->map via kvzalloc_node

+46 -28
+22 -10
block/blk-cgroup.c
··· 65 65 return pol && test_bit(pol->plid, q->blkcg_pols); 66 66 } 67 67 68 - /** 69 - * blkg_free - free a blkg 70 - * @blkg: blkg to free 71 - * 72 - * Free @blkg which may be partially allocated. 73 - */ 74 - static void blkg_free(struct blkcg_gq *blkg) 68 + static void blkg_free_workfn(struct work_struct *work) 75 69 { 70 + struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 71 + free_work); 76 72 int i; 77 - 78 - if (!blkg) 79 - return; 80 73 81 74 for (i = 0; i < BLKCG_MAX_POLS; i++) 82 75 if (blkg->pd[i]) ··· 80 87 free_percpu(blkg->iostat_cpu); 81 88 percpu_ref_exit(&blkg->refcnt); 82 89 kfree(blkg); 90 + } 91 + 92 + /** 93 + * blkg_free - free a blkg 94 + * @blkg: blkg to free 95 + * 96 + * Free @blkg which may be partially allocated. 97 + */ 98 + static void blkg_free(struct blkcg_gq *blkg) 99 + { 100 + if (!blkg) 101 + return; 102 + 103 + /* 104 + * Both ->pd_free_fn() and request queue's release handler may 105 + * sleep, so free us by scheduling one work func 106 + */ 107 + INIT_WORK(&blkg->free_work, blkg_free_workfn); 108 + schedule_work(&blkg->free_work); 83 109 } 84 110 85 111 static void __blkg_release(struct rcu_head *rcu)
+1 -2
block/blk-ioc.c
··· 280 280 281 281 task_lock(task); 282 282 if (task->flags & PF_EXITING) { 283 - err = -ESRCH; 284 283 kmem_cache_free(iocontext_cachep, ioc); 285 284 goto out; 286 285 } ··· 291 292 task->io_context->ioprio = ioprio; 292 293 out: 293 294 task_unlock(task); 294 - return err; 295 + return 0; 295 296 } 296 297 EXPORT_SYMBOL_GPL(set_task_ioprio); 297 298
+16 -9
block/blk-mq.c
··· 4462 4462 return true; 4463 4463 } 4464 4464 4465 - static void blk_mq_elv_switch_back(struct list_head *head, 4466 - struct request_queue *q) 4465 + static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, 4466 + struct request_queue *q) 4467 4467 { 4468 4468 struct blk_mq_qe_pair *qe; 4469 - struct elevator_type *t = NULL; 4470 4469 4471 4470 list_for_each_entry(qe, head, node) 4472 - if (qe->q == q) { 4473 - t = qe->type; 4474 - break; 4475 - } 4471 + if (qe->q == q) 4472 + return qe; 4476 4473 4477 - if (!t) 4474 + return NULL; 4475 + } 4476 + 4477 + static void blk_mq_elv_switch_back(struct list_head *head, 4478 + struct request_queue *q) 4479 + { 4480 + struct blk_mq_qe_pair *qe; 4481 + struct elevator_type *t; 4482 + 4483 + qe = blk_lookup_qe_pair(head, q); 4484 + if (!qe) 4478 4485 return; 4479 - 4486 + t = qe->type; 4480 4487 list_del(&qe->node); 4481 4488 kfree(qe); 4482 4489
-3
block/blk-wbt.h
··· 101 101 102 102 #else 103 103 104 - static inline void wbt_track(struct request *rq, enum wbt_flags flags) 105 - { 106 - } 107 104 static inline int wbt_init(struct request_queue *q) 108 105 { 109 106 return -EINVAL;
+1 -1
block/genhd.c
··· 335 335 { 336 336 int idx; 337 337 338 - idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL); 338 + idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL); 339 339 if (idx == -ENOSPC) 340 340 return -EBUSY; 341 341 return idx;
+4 -1
include/linux/blk-cgroup.h
··· 95 95 96 96 spinlock_t async_bio_lock; 97 97 struct bio_list async_bios; 98 - struct work_struct async_bio_work; 98 + union { 99 + struct work_struct async_bio_work; 100 + struct work_struct free_work; 101 + }; 99 102 100 103 atomic_t use_delay; 101 104 atomic64_t delay_nsec;
+1 -1
include/linux/sbitmap.h
··· 174 174 static inline void sbitmap_free(struct sbitmap *sb) 175 175 { 176 176 free_percpu(sb->alloc_hint); 177 - kfree(sb->map); 177 + kvfree(sb->map); 178 178 sb->map = NULL; 179 179 } 180 180
+1 -1
lib/sbitmap.c
··· 110 110 sb->alloc_hint = NULL; 111 111 } 112 112 113 - sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); 113 + sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); 114 114 if (!sb->map) { 115 115 free_percpu(sb->alloc_hint); 116 116 return -ENOMEM;