Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

percpu_ref: add PERCPU_REF_INIT_* flags

With the recent addition of percpu_ref_reinit(), percpu_ref now can be
used as a persistent switch which can be turned on and off repeatedly
where turning off maps to killing the ref and waiting for it to drain;
however, there currently isn't a way to initialize a percpu_ref in its
off (killed and drained) state, which can be inconvenient for certain
persistent switch use cases.

Similarly, percpu_ref_switch_to_atomic/percpu() allow dynamic
selection of operation mode; however, currently a newly initialized
percpu_ref is always in percpu mode making it impossible to avoid the
latency overhead of switching to atomic mode.

This patch adds @flags to percpu_ref_init() and implements the
following flags.

* PERCPU_REF_INIT_ATOMIC : start ref in atomic mode
* PERCPU_REF_INIT_DEAD : start ref killed and drained

These flags should be able to serve the above two use cases.

v2: target_core_tpg.c conversion was missing. Fixed.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>

Tejun Heo 2aad2a86 f47ad457

+43 -13
+1 -1
block/blk-mq.c
··· 1796 1796 goto err_hctxs; 1797 1797 1798 1798 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, 1799 - GFP_KERNEL)) 1799 + 0, GFP_KERNEL)) 1800 1800 goto err_map; 1801 1801 1802 1802 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+1 -1
drivers/target/target_core_tpg.c
··· 819 819 { 820 820 int ret; 821 821 822 - ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 822 + ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, 823 823 GFP_KERNEL); 824 824 if (ret < 0) 825 825 return ret;
+2 -2
fs/aio.c
··· 661 661 662 662 INIT_LIST_HEAD(&ctx->active_reqs); 663 663 664 - if (percpu_ref_init(&ctx->users, free_ioctx_users, GFP_KERNEL)) 664 + if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) 665 665 goto err; 666 666 667 - if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, GFP_KERNEL)) 667 + if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) 668 668 goto err; 669 669 670 670 ctx->cpu = alloc_percpu(struct kioctx_cpu);
+17 -1
include/linux/percpu-refcount.h
··· 63 63 __PERCPU_REF_FLAG_BITS = 2, 64 64 }; 65 65 66 + /* @flags for percpu_ref_init() */ 67 + enum { 68 + /* 69 + * Start w/ ref == 1 in atomic mode. Can be switched to percpu 70 + * operation using percpu_ref_switch_to_percpu(). 71 + */ 72 + PERCPU_REF_INIT_ATOMIC = 1 << 0, 73 + 74 + /* 75 + * Start dead w/ ref == 0 in atomic mode. Must be revived with 76 + * percpu_ref_reinit() before used. Implies INIT_ATOMIC. 77 + */ 78 + PERCPU_REF_INIT_DEAD = 1 << 1, 79 + }; 80 + 66 81 struct percpu_ref { 67 82 atomic_long_t count; 68 83 /* ··· 91 76 }; 92 77 93 78 int __must_check percpu_ref_init(struct percpu_ref *ref, 94 - percpu_ref_func_t *release, gfp_t gfp); 79 + percpu_ref_func_t *release, unsigned int flags, 80 + gfp_t gfp); 95 81 void percpu_ref_exit(struct percpu_ref *ref); 96 82 void percpu_ref_switch_to_atomic(struct percpu_ref *ref, 97 83 percpu_ref_func_t *confirm_switch);
+4 -3
kernel/cgroup.c
··· 1634 1634 goto out; 1635 1635 root_cgrp->id = ret; 1636 1636 1637 - ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, GFP_KERNEL); 1637 + ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0, 1638 + GFP_KERNEL); 1638 1639 if (ret) 1639 1640 goto out; 1640 1641 ··· 4511 4510 4512 4511 init_and_link_css(css, ss, cgrp); 4513 4512 4514 - err = percpu_ref_init(&css->refcnt, css_release, GFP_KERNEL); 4513 + err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL); 4515 4514 if (err) 4516 4515 goto err_free_css; 4517 4516 ··· 4584 4583 goto out_unlock; 4585 4584 } 4586 4585 4587 - ret = percpu_ref_init(&cgrp->self.refcnt, css_release, GFP_KERNEL); 4586 + ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL); 4588 4587 if (ret) 4589 4588 goto out_free_cgrp; 4590 4589
+18 -5
lib/percpu-refcount.c
··· 45 45 * percpu_ref_init - initialize a percpu refcount 46 46 * @ref: percpu_ref to initialize 47 47 * @release: function which will be called when refcount hits 0 48 + * @flags: PERCPU_REF_INIT_* flags 48 49 * @gfp: allocation mask to use 49 50 * 50 - * Initializes the refcount in single atomic counter mode with a refcount of 1; 51 - * analagous to atomic_long_set(ref, 1). 51 + * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a 52 + * refcount of 1; analagous to atomic_long_set(ref, 1). See the 53 + * definitions of PERCPU_REF_INIT_* flags for flag behaviors. 52 54 * 53 55 * Note that @release must not sleep - it may potentially be called from RCU 54 56 * callback context by percpu_ref_kill(). 55 57 */ 56 58 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, 57 - gfp_t gfp) 59 + unsigned int flags, gfp_t gfp) 58 60 { 59 61 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, 60 62 __alignof__(unsigned long)); 61 - 62 - atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS); 63 + unsigned long start_count = 0; 63 64 64 65 ref->percpu_count_ptr = (unsigned long) 65 66 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); 66 67 if (!ref->percpu_count_ptr) 67 68 return -ENOMEM; 69 + 70 + if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) 71 + ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; 72 + else 73 + start_count += PERCPU_COUNT_BIAS; 74 + 75 + if (flags & PERCPU_REF_INIT_DEAD) 76 + ref->percpu_count_ptr |= __PERCPU_REF_DEAD; 77 + else 78 + start_count++; 79 + 80 + atomic_long_set(&ref->count, start_count); 68 81 69 82 ref->release = release; 70 83 return 0;