Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools include: Adopt kernel's refcount.h

To aid in catching bugs when using atomics as a reference count.

This is a trimmed down version with just what is used by tools/ at
this point.

After this, the patches submitted by Elena for tools/ doing the
conversion from atomic_ to recount_ methods can be applied and tested.

To activate it, buint perf with:

make DEBUG=1 -C tools/perf

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-dqtxsumns9ov0l9r5x398f19@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+152
+151
tools/include/linux/refcount.h
··· 1 + #ifndef _TOOLS_LINUX_REFCOUNT_H 2 + #define _TOOLS_LINUX_REFCOUNT_H 3 + 4 + /* 5 + * Variant of atomic_t specialized for reference counts. 6 + * 7 + * The interface matches the atomic_t interface (to aid in porting) but only 8 + * provides the few functions one should use for reference counting. 9 + * 10 + * It differs in that the counter saturates at UINT_MAX and will not move once 11 + * there. This avoids wrapping the counter and causing 'spurious' 12 + * use-after-free issues. 13 + * 14 + * Memory ordering rules are slightly relaxed wrt regular atomic_t functions 15 + * and provide only what is strictly required for refcounts. 16 + * 17 + * The increments are fully relaxed; these will not provide ordering. The 18 + * rationale is that whatever is used to obtain the object we're increasing the 19 + * reference count on will provide the ordering. For locked data structures, 20 + * its the lock acquire, for RCU/lockless data structures its the dependent 21 + * load. 22 + * 23 + * Do note that inc_not_zero() provides a control dependency which will order 24 + * future stores against the inc, this ensures we'll never modify the object 25 + * if we did not in fact acquire a reference. 26 + * 27 + * The decrements will provide release order, such that all the prior loads and 28 + * stores will be issued before, it also provides a control dependency, which 29 + * will order us against the subsequent free(). 30 + * 31 + * The control dependency is against the load of the cmpxchg (ll/sc) that 32 + * succeeded. This means the stores aren't fully ordered, but this is fine 33 + * because the 1->0 transition indicates no concurrency. 34 + * 35 + * Note that the allocator is responsible for ordering things between free() 36 + * and alloc(). 37 + * 38 + */ 39 + 40 + #include <linux/atomic.h> 41 + #include <linux/kernel.h> 42 + 43 + #ifdef NDEBUG 44 + #define REFCOUNT_WARN(cond, str) (void)(cond) 45 + #define __refcount_check 46 + #else 47 + #define REFCOUNT_WARN(cond, str) BUG_ON(cond) 48 + #define __refcount_check __must_check 49 + #endif 50 + 51 + typedef struct refcount_struct { 52 + atomic_t refs; 53 + } refcount_t; 54 + 55 + #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } 56 + 57 + static inline void refcount_set(refcount_t *r, unsigned int n) 58 + { 59 + atomic_set(&r->refs, n); 60 + } 61 + 62 + static inline unsigned int refcount_read(const refcount_t *r) 63 + { 64 + return atomic_read(&r->refs); 65 + } 66 + 67 + /* 68 + * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. 69 + * 70 + * Provides no memory ordering, it is assumed the caller has guaranteed the 71 + * object memory to be stable (RCU, etc.). It does provide a control dependency 72 + * and thereby orders future stores. See the comment on top. 73 + */ 74 + static inline __refcount_check 75 + bool refcount_inc_not_zero(refcount_t *r) 76 + { 77 + unsigned int old, new, val = atomic_read(&r->refs); 78 + 79 + for (;;) { 80 + new = val + 1; 81 + 82 + if (!val) 83 + return false; 84 + 85 + if (unlikely(!new)) 86 + return true; 87 + 88 + old = atomic_cmpxchg_relaxed(&r->refs, val, new); 89 + if (old == val) 90 + break; 91 + 92 + val = old; 93 + } 94 + 95 + REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); 96 + 97 + return true; 98 + } 99 + 100 + /* 101 + * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. 102 + * 103 + * Provides no memory ordering, it is assumed the caller already has a 104 + * reference on the object, will WARN when this is not so. 105 + */ 106 + static inline void refcount_inc(refcount_t *r) 107 + { 108 + REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); 109 + } 110 + 111 + /* 112 + * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to 113 + * decrement when saturated at UINT_MAX. 114 + * 115 + * Provides release memory ordering, such that prior loads and stores are done 116 + * before, and provides a control dependency such that free() must come after. 117 + * See the comment on top. 118 + */ 119 + static inline __refcount_check 120 + bool refcount_sub_and_test(unsigned int i, refcount_t *r) 121 + { 122 + unsigned int old, new, val = atomic_read(&r->refs); 123 + 124 + for (;;) { 125 + if (unlikely(val == UINT_MAX)) 126 + return false; 127 + 128 + new = val - i; 129 + if (new > val) { 130 + REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); 131 + return false; 132 + } 133 + 134 + old = atomic_cmpxchg_release(&r->refs, val, new); 135 + if (old == val) 136 + break; 137 + 138 + val = old; 139 + } 140 + 141 + return !new; 142 + } 143 + 144 + static inline __refcount_check 145 + bool refcount_dec_and_test(refcount_t *r) 146 + { 147 + return refcount_sub_and_test(1, r); 148 + } 149 + 150 + 151 + #endif /* _ATOMIC_LINUX_REFCOUNT_H */
+1
tools/perf/MANIFEST
··· 79 79 tools/include/linux/poison.h 80 80 tools/include/linux/rbtree.h 81 81 tools/include/linux/rbtree_augmented.h 82 + tools/include/linux/refcount.h 82 83 tools/include/linux/string.h 83 84 tools/include/linux/stringify.h 84 85 tools/include/linux/types.h