at v5.11-rc3 39 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_SEQLOCK_H 3#define __LINUX_SEQLOCK_H 4 5/* 6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with 7 * lockless readers (read-only retry loops), and no writer starvation. 8 * 9 * See Documentation/locking/seqlock.rst 10 * 11 * Copyrights: 12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli 13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH 14 */ 15 16#include <linux/compiler.h> 17#include <linux/kcsan-checks.h> 18#include <linux/lockdep.h> 19#include <linux/mutex.h> 20#include <linux/ww_mutex.h> 21#include <linux/preempt.h> 22#include <linux/spinlock.h> 23 24#include <asm/processor.h> 25 26/* 27 * The seqlock seqcount_t interface does not prescribe a precise sequence of 28 * read begin/retry/end. For readers, typically there is a call to 29 * read_seqcount_begin() and read_seqcount_retry(), however, there are more 30 * esoteric cases which do not follow this pattern. 31 * 32 * As a consequence, we take the following best-effort approach for raw usage 33 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, 34 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as 35 * atomics; if there is a matching read_seqcount_retry() call, no following 36 * memory operations are considered atomic. Usage of the seqlock_t interface 37 * is not affected. 38 */ 39#define KCSAN_SEQLOCK_REGION_MAX 1000 40 41/* 42 * Sequence counters (seqcount_t) 43 * 44 * This is the raw counting mechanism, without any writer protection. 45 * 46 * Write side critical sections must be serialized and non-preemptible. 47 * 48 * If readers can be invoked from hardirq or softirq contexts, 49 * interrupts or bottom halves must also be respectively disabled before 50 * entering the write section. 51 * 52 * This mechanism can't be used if the protected data contains pointers, 53 * as the writer can invalidate a pointer that a reader is following. 54 * 55 * If the write serialization mechanism is one of the common kernel 56 * locking primitives, use a sequence counter with associated lock 57 * (seqcount_LOCKNAME_t) instead. 58 * 59 * If it's desired to automatically handle the sequence counter writer 60 * serialization and non-preemptibility requirements, use a sequential 61 * lock (seqlock_t) instead. 62 * 63 * See Documentation/locking/seqlock.rst 64 */ 65typedef struct seqcount { 66 unsigned sequence; 67#ifdef CONFIG_DEBUG_LOCK_ALLOC 68 struct lockdep_map dep_map; 69#endif 70} seqcount_t; 71 72static inline void __seqcount_init(seqcount_t *s, const char *name, 73 struct lock_class_key *key) 74{ 75 /* 76 * Make sure we are not reinitializing a held lock: 77 */ 78 lockdep_init_map(&s->dep_map, name, key, 0); 79 s->sequence = 0; 80} 81 82#ifdef CONFIG_DEBUG_LOCK_ALLOC 83 84# define SEQCOUNT_DEP_MAP_INIT(lockname) \ 85 .dep_map = { .name = #lockname } 86 87/** 88 * seqcount_init() - runtime initializer for seqcount_t 89 * @s: Pointer to the seqcount_t instance 90 */ 91# define seqcount_init(s) \ 92 do { \ 93 static struct lock_class_key __key; \ 94 __seqcount_init((s), #s, &__key); \ 95 } while (0) 96 97static inline void seqcount_lockdep_reader_access(const seqcount_t *s) 98{ 99 seqcount_t *l = (seqcount_t *)s; 100 unsigned long flags; 101 102 local_irq_save(flags); 103 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); 104 seqcount_release(&l->dep_map, _RET_IP_); 105 local_irq_restore(flags); 106} 107 108#else 109# define SEQCOUNT_DEP_MAP_INIT(lockname) 110# define seqcount_init(s) __seqcount_init(s, NULL, NULL) 111# define seqcount_lockdep_reader_access(x) 112#endif 113 114/** 115 * SEQCNT_ZERO() - static initializer for seqcount_t 116 * @name: Name of the seqcount_t instance 117 */ 118#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } 119 120/* 121 * Sequence counters with associated locks (seqcount_LOCKNAME_t) 122 * 123 * A sequence counter which associates the lock used for writer 124 * serialization at initialization time. This enables lockdep to validate 125 * that the write side critical section is properly serialized. 126 * 127 * For associated locks which do not implicitly disable preemption, 128 * preemption protection is enforced in the write side function. 129 * 130 * Lockdep is never used in any for the raw write variants. 131 * 132 * See Documentation/locking/seqlock.rst 133 */ 134 135/* 136 * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot 137 * disable preemption. It can lead to higher latencies, and the write side 138 * sections will not be able to acquire locks which become sleeping locks 139 * (e.g. spinlock_t). 140 * 141 * To remain preemptible while avoiding a possible livelock caused by the 142 * reader preempting the writer, use a different technique: let the reader 143 * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the 144 * case, acquire then release the associated LOCKNAME writer serialization 145 * lock. This will allow any possibly-preempted writer to make progress 146 * until the end of its writer serialization lock critical section. 147 * 148 * This lock-unlock technique must be implemented for all of PREEMPT_RT 149 * sleeping locks. See Documentation/locking/locktypes.rst 150 */ 151#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) 152#define __SEQ_LOCK(expr) expr 153#else 154#define __SEQ_LOCK(expr) 155#endif 156 157/* 158 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated 159 * @seqcount: The real sequence counter 160 * @lock: Pointer to the associated lock 161 * 162 * A plain sequence counter with external writer synchronization by 163 * LOCKNAME @lock. The lock is associated to the sequence counter in the 164 * static initializer or init function. This enables lockdep to validate 165 * that the write side critical section is properly serialized. 166 * 167 * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex. 168 */ 169 170/* 171 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t 172 * @s: Pointer to the seqcount_LOCKNAME_t instance 173 * @lock: Pointer to the associated lock 174 */ 175 176#define seqcount_LOCKNAME_init(s, _lock, lockname) \ 177 do { \ 178 seqcount_##lockname##_t *____s = (s); \ 179 seqcount_init(&____s->seqcount); \ 180 __SEQ_LOCK(____s->lock = (_lock)); \ 181 } while (0) 182 183#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) 184#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) 185#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock); 186#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex); 187#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex); 188 189/* 190 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers 191 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t 192 * 193 * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t 194 * @locktype: LOCKNAME canonical C data type 195 * @preemptible: preemptibility of above locktype 196 * @lockmember: argument for lockdep_assert_held() 197 * @lockbase: associated lock release function (prefix only) 198 * @lock_acquire: associated lock acquisition function (full call) 199 */ 200#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \ 201typedef struct seqcount_##lockname { \ 202 seqcount_t seqcount; \ 203 __SEQ_LOCK(locktype *lock); \ 204} seqcount_##lockname##_t; \ 205 \ 206static __always_inline seqcount_t * \ 207__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ 208{ \ 209 return &s->seqcount; \ 210} \ 211 \ 212static __always_inline unsigned \ 213__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ 214{ \ 215 unsigned seq = READ_ONCE(s->seqcount.sequence); \ 216 \ 217 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ 218 return seq; \ 219 \ 220 if (preemptible && unlikely(seq & 1)) { \ 221 __SEQ_LOCK(lock_acquire); \ 222 __SEQ_LOCK(lockbase##_unlock(s->lock)); \ 223 \ 224 /* \ 225 * Re-read the sequence counter since the (possibly \ 226 * preempted) writer made progress. \ 227 */ \ 228 seq = READ_ONCE(s->seqcount.sequence); \ 229 } \ 230 \ 231 return seq; \ 232} \ 233 \ 234static __always_inline bool \ 235__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ 236{ \ 237 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ 238 return preemptible; \ 239 \ 240 /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \ 241 return false; \ 242} \ 243 \ 244static __always_inline void \ 245__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ 246{ \ 247 __SEQ_LOCK(lockdep_assert_held(lockmember)); \ 248} 249 250/* 251 * __seqprop() for seqcount_t 252 */ 253 254static inline seqcount_t *__seqprop_ptr(seqcount_t *s) 255{ 256 return s; 257} 258 259static inline unsigned __seqprop_sequence(const seqcount_t *s) 260{ 261 return READ_ONCE(s->sequence); 262} 263 264static inline bool __seqprop_preemptible(const seqcount_t *s) 265{ 266 return false; 267} 268 269static inline void __seqprop_assert(const seqcount_t *s) 270{ 271 lockdep_assert_preemption_disabled(); 272} 273 274#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) 275 276SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock)) 277SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) 278SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) 279SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) 280SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL)) 281 282/* 283 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t 284 * @name: Name of the seqcount_LOCKNAME_t instance 285 * @lock: Pointer to the associated LOCKNAME 286 */ 287 288#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \ 289 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ 290 __SEQ_LOCK(.lock = (assoc_lock)) \ 291} 292 293#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 294#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 295#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 296#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 297#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 298 299#define __seqprop_case(s, lockname, prop) \ 300 seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s)) 301 302#define __seqprop(s, prop) _Generic(*(s), \ 303 seqcount_t: __seqprop_##prop((void *)(s)), \ 304 __seqprop_case((s), raw_spinlock, prop), \ 305 __seqprop_case((s), spinlock, prop), \ 306 __seqprop_case((s), rwlock, prop), \ 307 __seqprop_case((s), mutex, prop), \ 308 __seqprop_case((s), ww_mutex, prop)) 309 310#define seqprop_ptr(s) __seqprop(s, ptr) 311#define seqprop_sequence(s) __seqprop(s, sequence) 312#define seqprop_preemptible(s) __seqprop(s, preemptible) 313#define seqprop_assert(s) __seqprop(s, assert) 314 315/** 316 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier 317 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 318 * 319 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() 320 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is 321 * provided before actually loading any of the variables that are to be 322 * protected in this critical section. 323 * 324 * Use carefully, only in critical code, and comment how the barrier is 325 * provided. 326 * 327 * Return: count to be passed to read_seqcount_retry() 328 */ 329#define __read_seqcount_begin(s) \ 330({ \ 331 unsigned __seq; \ 332 \ 333 while ((__seq = seqprop_sequence(s)) & 1) \ 334 cpu_relax(); \ 335 \ 336 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ 337 __seq; \ 338}) 339 340/** 341 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep 342 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 343 * 344 * Return: count to be passed to read_seqcount_retry() 345 */ 346#define raw_read_seqcount_begin(s) \ 347({ \ 348 unsigned _seq = __read_seqcount_begin(s); \ 349 \ 350 smp_rmb(); \ 351 _seq; \ 352}) 353 354/** 355 * read_seqcount_begin() - begin a seqcount_t read critical section 356 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 357 * 358 * Return: count to be passed to read_seqcount_retry() 359 */ 360#define read_seqcount_begin(s) \ 361({ \ 362 seqcount_lockdep_reader_access(seqprop_ptr(s)); \ 363 raw_read_seqcount_begin(s); \ 364}) 365 366/** 367 * raw_read_seqcount() - read the raw seqcount_t counter value 368 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 369 * 370 * raw_read_seqcount opens a read critical section of the given 371 * seqcount_t, without any lockdep checking, and without checking or 372 * masking the sequence counter LSB. Calling code is responsible for 373 * handling that. 374 * 375 * Return: count to be passed to read_seqcount_retry() 376 */ 377#define raw_read_seqcount(s) \ 378({ \ 379 unsigned __seq = seqprop_sequence(s); \ 380 \ 381 smp_rmb(); \ 382 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ 383 __seq; \ 384}) 385 386/** 387 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o 388 * lockdep and w/o counter stabilization 389 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 390 * 391 * raw_seqcount_begin opens a read critical section of the given 392 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait 393 * for the count to stabilize. If a writer is active when it begins, it 394 * will fail the read_seqcount_retry() at the end of the read critical 395 * section instead of stabilizing at the beginning of it. 396 * 397 * Use this only in special kernel hot paths where the read section is 398 * small and has a high probability of success through other external 399 * means. It will save a single branching instruction. 400 * 401 * Return: count to be passed to read_seqcount_retry() 402 */ 403#define raw_seqcount_begin(s) \ 404({ \ 405 /* \ 406 * If the counter is odd, let read_seqcount_retry() fail \ 407 * by decrementing the counter. \ 408 */ \ 409 raw_read_seqcount(s) & ~1; \ 410}) 411 412/** 413 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier 414 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 415 * @start: count, from read_seqcount_begin() 416 * 417 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() 418 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is 419 * provided before actually loading any of the variables that are to be 420 * protected in this critical section. 421 * 422 * Use carefully, only in critical code, and comment how the barrier is 423 * provided. 424 * 425 * Return: true if a read section retry is required, else false 426 */ 427#define __read_seqcount_retry(s, start) \ 428 do___read_seqcount_retry(seqprop_ptr(s), start) 429 430static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) 431{ 432 kcsan_atomic_next(0); 433 return unlikely(READ_ONCE(s->sequence) != start); 434} 435 436/** 437 * read_seqcount_retry() - end a seqcount_t read critical section 438 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 439 * @start: count, from read_seqcount_begin() 440 * 441 * read_seqcount_retry closes the read critical section of given 442 * seqcount_t. If the critical section was invalid, it must be ignored 443 * (and typically retried). 444 * 445 * Return: true if a read section retry is required, else false 446 */ 447#define read_seqcount_retry(s, start) \ 448 do_read_seqcount_retry(seqprop_ptr(s), start) 449 450static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) 451{ 452 smp_rmb(); 453 return do___read_seqcount_retry(s, start); 454} 455 456/** 457 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep 458 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 459 * 460 * Context: check write_seqcount_begin() 461 */ 462#define raw_write_seqcount_begin(s) \ 463do { \ 464 if (seqprop_preemptible(s)) \ 465 preempt_disable(); \ 466 \ 467 do_raw_write_seqcount_begin(seqprop_ptr(s)); \ 468} while (0) 469 470static inline void do_raw_write_seqcount_begin(seqcount_t *s) 471{ 472 kcsan_nestable_atomic_begin(); 473 s->sequence++; 474 smp_wmb(); 475} 476 477/** 478 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep 479 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 480 * 481 * Context: check write_seqcount_end() 482 */ 483#define raw_write_seqcount_end(s) \ 484do { \ 485 do_raw_write_seqcount_end(seqprop_ptr(s)); \ 486 \ 487 if (seqprop_preemptible(s)) \ 488 preempt_enable(); \ 489} while (0) 490 491static inline void do_raw_write_seqcount_end(seqcount_t *s) 492{ 493 smp_wmb(); 494 s->sequence++; 495 kcsan_nestable_atomic_end(); 496} 497 498/** 499 * write_seqcount_begin_nested() - start a seqcount_t write section with 500 * custom lockdep nesting level 501 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 502 * @subclass: lockdep nesting level 503 * 504 * See Documentation/locking/lockdep-design.rst 505 * Context: check write_seqcount_begin() 506 */ 507#define write_seqcount_begin_nested(s, subclass) \ 508do { \ 509 seqprop_assert(s); \ 510 \ 511 if (seqprop_preemptible(s)) \ 512 preempt_disable(); \ 513 \ 514 do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ 515} while (0) 516 517static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) 518{ 519 do_raw_write_seqcount_begin(s); 520 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); 521} 522 523/** 524 * write_seqcount_begin() - start a seqcount_t write side critical section 525 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 526 * 527 * Context: sequence counter write side sections must be serialized and 528 * non-preemptible. Preemption will be automatically disabled if and 529 * only if the seqcount write serialization lock is associated, and 530 * preemptible. If readers can be invoked from hardirq or softirq 531 * context, interrupts or bottom halves must be respectively disabled. 532 */ 533#define write_seqcount_begin(s) \ 534do { \ 535 seqprop_assert(s); \ 536 \ 537 if (seqprop_preemptible(s)) \ 538 preempt_disable(); \ 539 \ 540 do_write_seqcount_begin(seqprop_ptr(s)); \ 541} while (0) 542 543static inline void do_write_seqcount_begin(seqcount_t *s) 544{ 545 do_write_seqcount_begin_nested(s, 0); 546} 547 548/** 549 * write_seqcount_end() - end a seqcount_t write side critical section 550 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 551 * 552 * Context: Preemption will be automatically re-enabled if and only if 553 * the seqcount write serialization lock is associated, and preemptible. 554 */ 555#define write_seqcount_end(s) \ 556do { \ 557 do_write_seqcount_end(seqprop_ptr(s)); \ 558 \ 559 if (seqprop_preemptible(s)) \ 560 preempt_enable(); \ 561} while (0) 562 563static inline void do_write_seqcount_end(seqcount_t *s) 564{ 565 seqcount_release(&s->dep_map, _RET_IP_); 566 do_raw_write_seqcount_end(s); 567} 568 569/** 570 * raw_write_seqcount_barrier() - do a seqcount_t write barrier 571 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 572 * 573 * This can be used to provide an ordering guarantee instead of the usual 574 * consistency guarantee. It is one wmb cheaper, because it can collapse 575 * the two back-to-back wmb()s. 576 * 577 * Note that writes surrounding the barrier should be declared atomic (e.g. 578 * via WRITE_ONCE): a) to ensure the writes become visible to other threads 579 * atomically, avoiding compiler optimizations; b) to document which writes are 580 * meant to propagate to the reader critical section. This is necessary because 581 * neither writes before and after the barrier are enclosed in a seq-writer 582 * critical section that would ensure readers are aware of ongoing writes:: 583 * 584 * seqcount_t seq; 585 * bool X = true, Y = false; 586 * 587 * void read(void) 588 * { 589 * bool x, y; 590 * 591 * do { 592 * int s = read_seqcount_begin(&seq); 593 * 594 * x = X; y = Y; 595 * 596 * } while (read_seqcount_retry(&seq, s)); 597 * 598 * BUG_ON(!x && !y); 599 * } 600 * 601 * void write(void) 602 * { 603 * WRITE_ONCE(Y, true); 604 * 605 * raw_write_seqcount_barrier(seq); 606 * 607 * WRITE_ONCE(X, false); 608 * } 609 */ 610#define raw_write_seqcount_barrier(s) \ 611 do_raw_write_seqcount_barrier(seqprop_ptr(s)) 612 613static inline void do_raw_write_seqcount_barrier(seqcount_t *s) 614{ 615 kcsan_nestable_atomic_begin(); 616 s->sequence++; 617 smp_wmb(); 618 s->sequence++; 619 kcsan_nestable_atomic_end(); 620} 621 622/** 623 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read 624 * side operations 625 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 626 * 627 * After write_seqcount_invalidate, no seqcount_t read side operations 628 * will complete successfully and see data older than this. 629 */ 630#define write_seqcount_invalidate(s) \ 631 do_write_seqcount_invalidate(seqprop_ptr(s)) 632 633static inline void do_write_seqcount_invalidate(seqcount_t *s) 634{ 635 smp_wmb(); 636 kcsan_nestable_atomic_begin(); 637 s->sequence+=2; 638 kcsan_nestable_atomic_end(); 639} 640 641/* 642 * Latch sequence counters (seqcount_latch_t) 643 * 644 * A sequence counter variant where the counter even/odd value is used to 645 * switch between two copies of protected data. This allows the read path, 646 * typically NMIs, to safely interrupt the write side critical section. 647 * 648 * As the write sections are fully preemptible, no special handling for 649 * PREEMPT_RT is needed. 650 */ 651typedef struct { 652 seqcount_t seqcount; 653} seqcount_latch_t; 654 655/** 656 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t 657 * @seq_name: Name of the seqcount_latch_t instance 658 */ 659#define SEQCNT_LATCH_ZERO(seq_name) { \ 660 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ 661} 662 663/** 664 * seqcount_latch_init() - runtime initializer for seqcount_latch_t 665 * @s: Pointer to the seqcount_latch_t instance 666 */ 667static inline void seqcount_latch_init(seqcount_latch_t *s) 668{ 669 seqcount_init(&s->seqcount); 670} 671 672/** 673 * raw_read_seqcount_latch() - pick even/odd latch data copy 674 * @s: Pointer to seqcount_latch_t 675 * 676 * See raw_write_seqcount_latch() for details and a full reader/writer 677 * usage example. 678 * 679 * Return: sequence counter raw value. Use the lowest bit as an index for 680 * picking which data copy to read. The full counter must then be checked 681 * with read_seqcount_latch_retry(). 682 */ 683static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) 684{ 685 /* 686 * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). 687 * Due to the dependent load, a full smp_rmb() is not needed. 688 */ 689 return READ_ONCE(s->seqcount.sequence); 690} 691 692/** 693 * read_seqcount_latch_retry() - end a seqcount_latch_t read section 694 * @s: Pointer to seqcount_latch_t 695 * @start: count, from raw_read_seqcount_latch() 696 * 697 * Return: true if a read section retry is required, else false 698 */ 699static inline int 700read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) 701{ 702 return read_seqcount_retry(&s->seqcount, start); 703} 704 705/** 706 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy 707 * @s: Pointer to seqcount_latch_t 708 * 709 * The latch technique is a multiversion concurrency control method that allows 710 * queries during non-atomic modifications. If you can guarantee queries never 711 * interrupt the modification -- e.g. the concurrency is strictly between CPUs 712 * -- you most likely do not need this. 713 * 714 * Where the traditional RCU/lockless data structures rely on atomic 715 * modifications to ensure queries observe either the old or the new state the 716 * latch allows the same for non-atomic updates. The trade-off is doubling the 717 * cost of storage; we have to maintain two copies of the entire data 718 * structure. 719 * 720 * Very simply put: we first modify one copy and then the other. This ensures 721 * there is always one copy in a stable state, ready to give us an answer. 722 * 723 * The basic form is a data structure like:: 724 * 725 * struct latch_struct { 726 * seqcount_latch_t seq; 727 * struct data_struct data[2]; 728 * }; 729 * 730 * Where a modification, which is assumed to be externally serialized, does the 731 * following:: 732 * 733 * void latch_modify(struct latch_struct *latch, ...) 734 * { 735 * smp_wmb(); // Ensure that the last data[1] update is visible 736 * latch->seq.sequence++; 737 * smp_wmb(); // Ensure that the seqcount update is visible 738 * 739 * modify(latch->data[0], ...); 740 * 741 * smp_wmb(); // Ensure that the data[0] update is visible 742 * latch->seq.sequence++; 743 * smp_wmb(); // Ensure that the seqcount update is visible 744 * 745 * modify(latch->data[1], ...); 746 * } 747 * 748 * The query will have a form like:: 749 * 750 * struct entry *latch_query(struct latch_struct *latch, ...) 751 * { 752 * struct entry *entry; 753 * unsigned seq, idx; 754 * 755 * do { 756 * seq = raw_read_seqcount_latch(&latch->seq); 757 * 758 * idx = seq & 0x01; 759 * entry = data_query(latch->data[idx], ...); 760 * 761 * // This includes needed smp_rmb() 762 * } while (read_seqcount_latch_retry(&latch->seq, seq)); 763 * 764 * return entry; 765 * } 766 * 767 * So during the modification, queries are first redirected to data[1]. Then we 768 * modify data[0]. When that is complete, we redirect queries back to data[0] 769 * and we can modify data[1]. 770 * 771 * NOTE: 772 * 773 * The non-requirement for atomic modifications does _NOT_ include 774 * the publishing of new entries in the case where data is a dynamic 775 * data structure. 776 * 777 * An iteration might start in data[0] and get suspended long enough 778 * to miss an entire modification sequence, once it resumes it might 779 * observe the new entry. 780 * 781 * NOTE2: 782 * 783 * When data is a dynamic data structure; one should use regular RCU 784 * patterns to manage the lifetimes of the objects within. 785 */ 786static inline void raw_write_seqcount_latch(seqcount_latch_t *s) 787{ 788 smp_wmb(); /* prior stores before incrementing "sequence" */ 789 s->seqcount.sequence++; 790 smp_wmb(); /* increment "sequence" before following stores */ 791} 792 793/* 794 * Sequential locks (seqlock_t) 795 * 796 * Sequence counters with an embedded spinlock for writer serialization 797 * and non-preemptibility. 798 * 799 * For more info, see: 800 * - Comments on top of seqcount_t 801 * - Documentation/locking/seqlock.rst 802 */ 803typedef struct { 804 /* 805 * Make sure that readers don't starve writers on PREEMPT_RT: use 806 * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). 807 */ 808 seqcount_spinlock_t seqcount; 809 spinlock_t lock; 810} seqlock_t; 811 812#define __SEQLOCK_UNLOCKED(lockname) \ 813 { \ 814 .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \ 815 .lock = __SPIN_LOCK_UNLOCKED(lockname) \ 816 } 817 818/** 819 * seqlock_init() - dynamic initializer for seqlock_t 820 * @sl: Pointer to the seqlock_t instance 821 */ 822#define seqlock_init(sl) \ 823 do { \ 824 spin_lock_init(&(sl)->lock); \ 825 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ 826 } while (0) 827 828/** 829 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t 830 * @sl: Name of the seqlock_t instance 831 */ 832#define DEFINE_SEQLOCK(sl) \ 833 seqlock_t sl = __SEQLOCK_UNLOCKED(sl) 834 835/** 836 * read_seqbegin() - start a seqlock_t read side critical section 837 * @sl: Pointer to seqlock_t 838 * 839 * Return: count, to be passed to read_seqretry() 840 */ 841static inline unsigned read_seqbegin(const seqlock_t *sl) 842{ 843 unsigned ret = read_seqcount_begin(&sl->seqcount); 844 845 kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ 846 kcsan_flat_atomic_begin(); 847 return ret; 848} 849 850/** 851 * read_seqretry() - end a seqlock_t read side section 852 * @sl: Pointer to seqlock_t 853 * @start: count, from read_seqbegin() 854 * 855 * read_seqretry closes the read side critical section of given seqlock_t. 856 * If the critical section was invalid, it must be ignored (and typically 857 * retried). 858 * 859 * Return: true if a read section retry is required, else false 860 */ 861static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) 862{ 863 /* 864 * Assume not nested: read_seqretry() may be called multiple times when 865 * completing read critical section. 866 */ 867 kcsan_flat_atomic_end(); 868 869 return read_seqcount_retry(&sl->seqcount, start); 870} 871 872/* 873 * For all seqlock_t write side functions, use the the internal 874 * do_write_seqcount_begin() instead of generic write_seqcount_begin(). 875 * This way, no redundant lockdep_assert_held() checks are added. 876 */ 877 878/** 879 * write_seqlock() - start a seqlock_t write side critical section 880 * @sl: Pointer to seqlock_t 881 * 882 * write_seqlock opens a write side critical section for the given 883 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside 884 * that sequential lock. All seqlock_t write side sections are thus 885 * automatically serialized and non-preemptible. 886 * 887 * Context: if the seqlock_t read section, or other write side critical 888 * sections, can be invoked from hardirq or softirq contexts, use the 889 * _irqsave or _bh variants of this function instead. 890 */ 891static inline void write_seqlock(seqlock_t *sl) 892{ 893 spin_lock(&sl->lock); 894 do_write_seqcount_begin(&sl->seqcount.seqcount); 895} 896 897/** 898 * write_sequnlock() - end a seqlock_t write side critical section 899 * @sl: Pointer to seqlock_t 900 * 901 * write_sequnlock closes the (serialized and non-preemptible) write side 902 * critical section of given seqlock_t. 903 */ 904static inline void write_sequnlock(seqlock_t *sl) 905{ 906 do_write_seqcount_end(&sl->seqcount.seqcount); 907 spin_unlock(&sl->lock); 908} 909 910/** 911 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section 912 * @sl: Pointer to seqlock_t 913 * 914 * _bh variant of write_seqlock(). Use only if the read side section, or 915 * other write side sections, can be invoked from softirq contexts. 916 */ 917static inline void write_seqlock_bh(seqlock_t *sl) 918{ 919 spin_lock_bh(&sl->lock); 920 do_write_seqcount_begin(&sl->seqcount.seqcount); 921} 922 923/** 924 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section 925 * @sl: Pointer to seqlock_t 926 * 927 * write_sequnlock_bh closes the serialized, non-preemptible, and 928 * softirqs-disabled, seqlock_t write side critical section opened with 929 * write_seqlock_bh(). 930 */ 931static inline void write_sequnlock_bh(seqlock_t *sl) 932{ 933 do_write_seqcount_end(&sl->seqcount.seqcount); 934 spin_unlock_bh(&sl->lock); 935} 936 937/** 938 * write_seqlock_irq() - start a non-interruptible seqlock_t write section 939 * @sl: Pointer to seqlock_t 940 * 941 * _irq variant of write_seqlock(). Use only if the read side section, or 942 * other write sections, can be invoked from hardirq contexts. 943 */ 944static inline void write_seqlock_irq(seqlock_t *sl) 945{ 946 spin_lock_irq(&sl->lock); 947 do_write_seqcount_begin(&sl->seqcount.seqcount); 948} 949 950/** 951 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section 952 * @sl: Pointer to seqlock_t 953 * 954 * write_sequnlock_irq closes the serialized and non-interruptible 955 * seqlock_t write side section opened with write_seqlock_irq(). 956 */ 957static inline void write_sequnlock_irq(seqlock_t *sl) 958{ 959 do_write_seqcount_end(&sl->seqcount.seqcount); 960 spin_unlock_irq(&sl->lock); 961} 962 963static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) 964{ 965 unsigned long flags; 966 967 spin_lock_irqsave(&sl->lock, flags); 968 do_write_seqcount_begin(&sl->seqcount.seqcount); 969 return flags; 970} 971 972/** 973 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write 974 * section 975 * @lock: Pointer to seqlock_t 976 * @flags: Stack-allocated storage for saving caller's local interrupt 977 * state, to be passed to write_sequnlock_irqrestore(). 978 * 979 * _irqsave variant of write_seqlock(). Use it only if the read side 980 * section, or other write sections, can be invoked from hardirq context. 981 */ 982#define write_seqlock_irqsave(lock, flags) \ 983 do { flags = __write_seqlock_irqsave(lock); } while (0) 984 985/** 986 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write 987 * section 988 * @sl: Pointer to seqlock_t 989 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() 990 * 991 * write_sequnlock_irqrestore closes the serialized and non-interruptible 992 * seqlock_t write section previously opened with write_seqlock_irqsave(). 993 */ 994static inline void 995write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) 996{ 997 do_write_seqcount_end(&sl->seqcount.seqcount); 998 spin_unlock_irqrestore(&sl->lock, flags); 999} 1000 1001/** 1002 * read_seqlock_excl() - begin a seqlock_t locking reader section 1003 * @sl: Pointer to seqlock_t 1004 * 1005 * read_seqlock_excl opens a seqlock_t locking reader critical section. A 1006 * locking reader exclusively locks out *both* other writers *and* other 1007 * locking readers, but it does not update the embedded sequence number. 1008 * 1009 * Locking readers act like a normal spin_lock()/spin_unlock(). 1010 * 1011 * Context: if the seqlock_t write section, *or other read sections*, can 1012 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh 1013 * variant of this function instead. 1014 * 1015 * The opened read section must be closed with read_sequnlock_excl(). 1016 */ 1017static inline void read_seqlock_excl(seqlock_t *sl) 1018{ 1019 spin_lock(&sl->lock); 1020} 1021 1022/** 1023 * read_sequnlock_excl() - end a seqlock_t locking reader critical section 1024 * @sl: Pointer to seqlock_t 1025 */ 1026static inline void read_sequnlock_excl(seqlock_t *sl) 1027{ 1028 spin_unlock(&sl->lock); 1029} 1030 1031/** 1032 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with 1033 * softirqs disabled 1034 * @sl: Pointer to seqlock_t 1035 * 1036 * _bh variant of read_seqlock_excl(). Use this variant only if the 1037 * seqlock_t write side section, *or other read sections*, can be invoked 1038 * from softirq contexts. 1039 */ 1040static inline void read_seqlock_excl_bh(seqlock_t *sl) 1041{ 1042 spin_lock_bh(&sl->lock); 1043} 1044 1045/** 1046 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking 1047 * reader section 1048 * @sl: Pointer to seqlock_t 1049 */ 1050static inline void read_sequnlock_excl_bh(seqlock_t *sl) 1051{ 1052 spin_unlock_bh(&sl->lock); 1053} 1054 1055/** 1056 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking 1057 * reader section 1058 * @sl: Pointer to seqlock_t 1059 * 1060 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t 1061 * write side section, *or other read sections*, can be invoked from a 1062 * hardirq context. 1063 */ 1064static inline void read_seqlock_excl_irq(seqlock_t *sl) 1065{ 1066 spin_lock_irq(&sl->lock); 1067} 1068 1069/** 1070 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t 1071 * locking reader section 1072 * @sl: Pointer to seqlock_t 1073 */ 1074static inline void read_sequnlock_excl_irq(seqlock_t *sl) 1075{ 1076 spin_unlock_irq(&sl->lock); 1077} 1078 1079static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) 1080{ 1081 unsigned long flags; 1082 1083 spin_lock_irqsave(&sl->lock, flags); 1084 return flags; 1085} 1086 1087/** 1088 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t 1089 * locking reader section 1090 * @lock: Pointer to seqlock_t 1091 * @flags: Stack-allocated storage for saving caller's local interrupt 1092 * state, to be passed to read_sequnlock_excl_irqrestore(). 1093 * 1094 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t 1095 * write side section, *or other read sections*, can be invoked from a 1096 * hardirq context. 1097 */ 1098#define read_seqlock_excl_irqsave(lock, flags) \ 1099 do { flags = __read_seqlock_excl_irqsave(lock); } while (0) 1100 1101/** 1102 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t 1103 * locking reader section 1104 * @sl: Pointer to seqlock_t 1105 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() 1106 */ 1107static inline void 1108read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) 1109{ 1110 spin_unlock_irqrestore(&sl->lock, flags); 1111} 1112 1113/** 1114 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader 1115 * @lock: Pointer to seqlock_t 1116 * @seq : Marker and return parameter. If the passed value is even, the 1117 * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). 1118 * If the passed value is odd, the reader will become a *locking* reader 1119 * as in read_seqlock_excl(). In the first call to this function, the 1120 * caller *must* initialize and pass an even value to @seq; this way, a 1121 * lockless read can be optimistically tried first. 1122 * 1123 * read_seqbegin_or_lock is an API designed to optimistically try a normal 1124 * lockless seqlock_t read section first. If an odd counter is found, the 1125 * lockless read trial has failed, and the next read iteration transforms 1126 * itself into a full seqlock_t locking reader. 1127 * 1128 * This is typically used to avoid seqlock_t lockless readers starvation 1129 * (too much retry loops) in the case of a sharp spike in write side 1130 * activity. 1131 * 1132 * Context: if the seqlock_t write section, *or other read sections*, can 1133 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh 1134 * variant of this function instead. 1135 * 1136 * Check Documentation/locking/seqlock.rst for template example code. 1137 * 1138 * Return: the encountered sequence counter value, through the @seq 1139 * parameter, which is overloaded as a return parameter. This returned 1140 * value must be checked with need_seqretry(). If the read section need to 1141 * be retried, this returned value must also be passed as the @seq 1142 * parameter of the next read_seqbegin_or_lock() iteration. 1143 */ 1144static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) 1145{ 1146 if (!(*seq & 1)) /* Even */ 1147 *seq = read_seqbegin(lock); 1148 else /* Odd */ 1149 read_seqlock_excl(lock); 1150} 1151 1152/** 1153 * need_seqretry() - validate seqlock_t "locking or lockless" read section 1154 * @lock: Pointer to seqlock_t 1155 * @seq: sequence count, from read_seqbegin_or_lock() 1156 * 1157 * Return: true if a read section retry is required, false otherwise 1158 */ 1159static inline int need_seqretry(seqlock_t *lock, int seq) 1160{ 1161 return !(seq & 1) && read_seqretry(lock, seq); 1162} 1163 1164/** 1165 * done_seqretry() - end seqlock_t "locking or lockless" reader section 1166 * @lock: Pointer to seqlock_t 1167 * @seq: count, from read_seqbegin_or_lock() 1168 * 1169 * done_seqretry finishes the seqlock_t read side critical section started 1170 * with read_seqbegin_or_lock() and validated by need_seqretry(). 1171 */ 1172static inline void done_seqretry(seqlock_t *lock, int seq) 1173{ 1174 if (seq & 1) 1175 read_sequnlock_excl(lock); 1176} 1177 1178/** 1179 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or 1180 * a non-interruptible locking reader 1181 * @lock: Pointer to seqlock_t 1182 * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). 1183 * 1184 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if 1185 * the seqlock_t write section, *or other read sections*, can be invoked 1186 * from hardirq context. 1187 * 1188 * Note: Interrupts will be disabled only for "locking reader" mode. 1189 * 1190 * Return: 1191 * 1192 * 1. The saved local interrupts state in case of a locking reader, to 1193 * be passed to done_seqretry_irqrestore(). 1194 * 1195 * 2. The encountered sequence counter value, returned through @seq 1196 * overloaded as a return parameter. Check read_seqbegin_or_lock(). 1197 */ 1198static inline unsigned long 1199read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) 1200{ 1201 unsigned long flags = 0; 1202 1203 if (!(*seq & 1)) /* Even */ 1204 *seq = read_seqbegin(lock); 1205 else /* Odd */ 1206 read_seqlock_excl_irqsave(lock, flags); 1207 1208 return flags; 1209} 1210 1211/** 1212 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a 1213 * non-interruptible locking reader section 1214 * @lock: Pointer to seqlock_t 1215 * @seq: Count, from read_seqbegin_or_lock_irqsave() 1216 * @flags: Caller's saved local interrupt state in case of a locking 1217 * reader, also from read_seqbegin_or_lock_irqsave() 1218 * 1219 * This is the _irqrestore variant of done_seqretry(). The read section 1220 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated 1221 * by need_seqretry(). 1222 */ 1223static inline void 1224done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) 1225{ 1226 if (seq & 1) 1227 read_sequnlock_excl_irqrestore(lock, flags); 1228} 1229#endif /* __LINUX_SEQLOCK_H */