at v5.7 5.9 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_U64_STATS_SYNC_H 3#define _LINUX_U64_STATS_SYNC_H 4 5/* 6 * To properly implement 64bits network statistics on 32bit and 64bit hosts, 7 * we provide a synchronization point, that is a noop on 64bit or UP kernels. 8 * 9 * Key points : 10 * 1) Use a seqcount on SMP 32bits, with low overhead. 11 * 2) Whole thing is a noop on 64bit arches or UP kernels. 12 * 3) Write side must ensure mutual exclusion or one seqcount update could 13 * be lost, thus blocking readers forever. 14 * If this synchronization point is not a mutex, but a spinlock or 15 * spinlock_bh() or disable_bh() : 16 * 3.1) Write side should not sleep. 17 * 3.2) Write side should not allow preemption. 18 * 3.3) If applicable, interrupts should be disabled. 19 * 20 * 4) If reader fetches several counters, there is no guarantee the whole values 21 * are consistent (remember point 1) : this is a noop on 64bit arches anyway) 22 * 23 * 5) readers are allowed to sleep or be preempted/interrupted : They perform 24 * pure reads. But if they have to fetch many values, it's better to not allow 25 * preemptions/interruptions to avoid many retries. 26 * 27 * 6) If counter might be written by an interrupt, readers should block interrupts. 28 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could 29 * read partial values) 30 * 31 * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and 32 * u64_stats_fetch_retry_irq() helpers 33 * 34 * Usage : 35 * 36 * Stats producer (writer) should use following template granted it already got 37 * an exclusive access to counters (a lock is already taken, or per cpu 38 * data is used [in a non preemptable context]) 39 * 40 * spin_lock_bh(...) or other synchronization to get exclusive access 41 * ... 42 * u64_stats_update_begin(&stats->syncp); 43 * u64_stats_add(&stats->bytes64, len); // non atomic operation 44 * u64_stats_inc(&stats->packets64); // non atomic operation 45 * u64_stats_update_end(&stats->syncp); 46 * 47 * While a consumer (reader) should use following template to get consistent 48 * snapshot for each variable (but no guarantee on several ones) 49 * 50 * u64 tbytes, tpackets; 51 * unsigned int start; 52 * 53 * do { 54 * start = u64_stats_fetch_begin(&stats->syncp); 55 * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation 56 * tpackets = u64_stats_read(&stats->packets64); // non atomic operation 57 * } while (u64_stats_fetch_retry(&stats->syncp, start)); 58 * 59 * 60 * Example of use in drivers/net/loopback.c, using per_cpu containers, 61 * in BH disabled context. 62 */ 63#include <linux/seqlock.h> 64 65struct u64_stats_sync { 66#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 67 seqcount_t seq; 68#endif 69}; 70 71#if BITS_PER_LONG == 64 72#include <asm/local64.h> 73 74typedef struct { 75 local64_t v; 76} u64_stats_t ; 77 78static inline u64 u64_stats_read(const u64_stats_t *p) 79{ 80 return local64_read(&p->v); 81} 82 83static inline void u64_stats_add(u64_stats_t *p, unsigned long val) 84{ 85 local64_add(val, &p->v); 86} 87 88static inline void u64_stats_inc(u64_stats_t *p) 89{ 90 local64_inc(&p->v); 91} 92 93#else 94 95typedef struct { 96 u64 v; 97} u64_stats_t; 98 99static inline u64 u64_stats_read(const u64_stats_t *p) 100{ 101 return p->v; 102} 103 104static inline void u64_stats_add(u64_stats_t *p, unsigned long val) 105{ 106 p->v += val; 107} 108 109static inline void u64_stats_inc(u64_stats_t *p) 110{ 111 p->v++; 112} 113#endif 114 115static inline void u64_stats_init(struct u64_stats_sync *syncp) 116{ 117#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) 118 seqcount_init(&syncp->seq); 119#endif 120} 121 122static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) 123{ 124#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 125 write_seqcount_begin(&syncp->seq); 126#endif 127} 128 129static inline void u64_stats_update_end(struct u64_stats_sync *syncp) 130{ 131#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 132 write_seqcount_end(&syncp->seq); 133#endif 134} 135 136static inline unsigned long 137u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) 138{ 139 unsigned long flags = 0; 140 141#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 142 local_irq_save(flags); 143 write_seqcount_begin(&syncp->seq); 144#endif 145 return flags; 146} 147 148static inline void 149u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, 150 unsigned long flags) 151{ 152#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 153 write_seqcount_end(&syncp->seq); 154 local_irq_restore(flags); 155#endif 156} 157 158static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 159{ 160#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 161 return read_seqcount_begin(&syncp->seq); 162#else 163 return 0; 164#endif 165} 166 167static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 168{ 169#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 170 preempt_disable(); 171#endif 172 return __u64_stats_fetch_begin(syncp); 173} 174 175static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 176 unsigned int start) 177{ 178#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 179 return read_seqcount_retry(&syncp->seq, start); 180#else 181 return false; 182#endif 183} 184 185static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 186 unsigned int start) 187{ 188#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 189 preempt_enable(); 190#endif 191 return __u64_stats_fetch_retry(syncp, start); 192} 193 194/* 195 * In case irq handlers can update u64 counters, readers can use following helpers 196 * - SMP 32bit arches use seqcount protection, irq safe. 197 * - UP 32bit must disable irqs. 198 * - 64bit have no problem atomically reading u64 values, irq safe. 199 */ 200static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) 201{ 202#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 203 local_irq_disable(); 204#endif 205 return __u64_stats_fetch_begin(syncp); 206} 207 208static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, 209 unsigned int start) 210{ 211#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 212 local_irq_enable(); 213#endif 214 return __u64_stats_fetch_retry(syncp, start); 215} 216 217#endif /* _LINUX_U64_STATS_SYNC_H */