at for-next 37 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_SEQLOCK_H 3#define __LINUX_SEQLOCK_H 4 5/* 6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with 7 * lockless readers (read-only retry loops), and no writer starvation. 8 * 9 * See Documentation/locking/seqlock.rst 10 * 11 * Copyrights: 12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli 13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH 14 */ 15 16#include <linux/compiler.h> 17#include <linux/kcsan-checks.h> 18#include <linux/lockdep.h> 19#include <linux/mutex.h> 20#include <linux/preempt.h> 21#include <linux/seqlock_types.h> 22#include <linux/spinlock.h> 23 24#include <asm/processor.h> 25 26/* 27 * The seqlock seqcount_t interface does not prescribe a precise sequence of 28 * read begin/retry/end. For readers, typically there is a call to 29 * read_seqcount_begin() and read_seqcount_retry(), however, there are more 30 * esoteric cases which do not follow this pattern. 31 * 32 * As a consequence, we take the following best-effort approach for raw usage 33 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, 34 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as 35 * atomics; if there is a matching read_seqcount_retry() call, no following 36 * memory operations are considered atomic. Usage of the seqlock_t interface 37 * is not affected. 38 */ 39#define KCSAN_SEQLOCK_REGION_MAX 1000 40 41static inline void __seqcount_init(seqcount_t *s, const char *name, 42 struct lock_class_key *key) 43{ 44 /* 45 * Make sure we are not reinitializing a held lock: 46 */ 47 lockdep_init_map(&s->dep_map, name, key, 0); 48 s->sequence = 0; 49} 50 51#ifdef CONFIG_DEBUG_LOCK_ALLOC 52 53# define SEQCOUNT_DEP_MAP_INIT(lockname) \ 54 .dep_map = { .name = #lockname } 55 56/** 57 * seqcount_init() - runtime initializer for seqcount_t 58 * @s: Pointer to the seqcount_t instance 59 */ 60# define seqcount_init(s) \ 61 do { \ 62 static struct lock_class_key __key; \ 63 __seqcount_init((s), #s, &__key); \ 64 } while (0) 65 66static inline void seqcount_lockdep_reader_access(const seqcount_t *s) 67{ 68 seqcount_t *l = (seqcount_t *)s; 69 unsigned long flags; 70 71 local_irq_save(flags); 72 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); 73 seqcount_release(&l->dep_map, _RET_IP_); 74 local_irq_restore(flags); 75} 76 77#else 78# define SEQCOUNT_DEP_MAP_INIT(lockname) 79# define seqcount_init(s) __seqcount_init(s, NULL, NULL) 80# define seqcount_lockdep_reader_access(x) 81#endif 82 83/** 84 * SEQCNT_ZERO() - static initializer for seqcount_t 85 * @name: Name of the seqcount_t instance 86 */ 87#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } 88 89/* 90 * Sequence counters with associated locks (seqcount_LOCKNAME_t) 91 * 92 * A sequence counter which associates the lock used for writer 93 * serialization at initialization time. This enables lockdep to validate 94 * that the write side critical section is properly serialized. 95 * 96 * For associated locks which do not implicitly disable preemption, 97 * preemption protection is enforced in the write side function. 98 * 99 * Lockdep is never used in any for the raw write variants. 100 * 101 * See Documentation/locking/seqlock.rst 102 */ 103 104/* 105 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated 106 * @seqcount: The real sequence counter 107 * @lock: Pointer to the associated lock 108 * 109 * A plain sequence counter with external writer synchronization by 110 * LOCKNAME @lock. The lock is associated to the sequence counter in the 111 * static initializer or init function. This enables lockdep to validate 112 * that the write side critical section is properly serialized. 113 * 114 * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex 115 */ 116 117/* 118 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t 119 * @s: Pointer to the seqcount_LOCKNAME_t instance 120 * @lock: Pointer to the associated lock 121 */ 122 123#define seqcount_LOCKNAME_init(s, _lock, lockname) \ 124 do { \ 125 seqcount_##lockname##_t *____s = (s); \ 126 seqcount_init(&____s->seqcount); \ 127 __SEQ_LOCK(____s->lock = (_lock)); \ 128 } while (0) 129 130#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) 131#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) 132#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) 133#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) 134 135/* 136 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers 137 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t 138 * 139 * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t 140 * @locktype: LOCKNAME canonical C data type 141 * @preemptible: preemptibility of above locktype 142 * @lockbase: prefix for associated lock/unlock 143 */ 144#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ 145static __always_inline seqcount_t * \ 146__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ 147{ \ 148 return &s->seqcount; \ 149} \ 150 \ 151static __always_inline const seqcount_t * \ 152__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \ 153{ \ 154 return &s->seqcount; \ 155} \ 156 \ 157static __always_inline unsigned \ 158__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ 159{ \ 160 unsigned seq = smp_load_acquire(&s->seqcount.sequence); \ 161 \ 162 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ 163 return seq; \ 164 \ 165 if (preemptible && unlikely(seq & 1)) { \ 166 __SEQ_LOCK(lockbase##_lock(s->lock)); \ 167 __SEQ_LOCK(lockbase##_unlock(s->lock)); \ 168 \ 169 /* \ 170 * Re-read the sequence counter since the (possibly \ 171 * preempted) writer made progress. \ 172 */ \ 173 seq = smp_load_acquire(&s->seqcount.sequence); \ 174 } \ 175 \ 176 return seq; \ 177} \ 178 \ 179static __always_inline bool \ 180__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ 181{ \ 182 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ 183 return preemptible; \ 184 \ 185 /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \ 186 return false; \ 187} \ 188 \ 189static __always_inline void \ 190__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ 191{ \ 192 __SEQ_LOCK(lockdep_assert_held(s->lock)); \ 193} 194 195/* 196 * __seqprop() for seqcount_t 197 */ 198 199static inline seqcount_t *__seqprop_ptr(seqcount_t *s) 200{ 201 return s; 202} 203 204static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s) 205{ 206 return s; 207} 208 209static inline unsigned __seqprop_sequence(const seqcount_t *s) 210{ 211 return smp_load_acquire(&s->sequence); 212} 213 214static inline bool __seqprop_preemptible(const seqcount_t *s) 215{ 216 return false; 217} 218 219static inline void __seqprop_assert(const seqcount_t *s) 220{ 221 lockdep_assert_preemption_disabled(); 222} 223 224#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) 225 226SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) 227SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) 228SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) 229SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) 230#undef SEQCOUNT_LOCKNAME 231 232/* 233 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t 234 * @name: Name of the seqcount_LOCKNAME_t instance 235 * @lock: Pointer to the associated LOCKNAME 236 */ 237 238#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \ 239 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ 240 __SEQ_LOCK(.lock = (assoc_lock)) \ 241} 242 243#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 244#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 245#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 246#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 247#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 248 249#define __seqprop_case(s, lockname, prop) \ 250 seqcount_##lockname##_t: __seqprop_##lockname##_##prop 251 252#define __seqprop(s, prop) _Generic(*(s), \ 253 seqcount_t: __seqprop_##prop, \ 254 __seqprop_case((s), raw_spinlock, prop), \ 255 __seqprop_case((s), spinlock, prop), \ 256 __seqprop_case((s), rwlock, prop), \ 257 __seqprop_case((s), mutex, prop)) 258 259#define seqprop_ptr(s) __seqprop(s, ptr)(s) 260#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s) 261#define seqprop_sequence(s) __seqprop(s, sequence)(s) 262#define seqprop_preemptible(s) __seqprop(s, preemptible)(s) 263#define seqprop_assert(s) __seqprop(s, assert)(s) 264 265/** 266 * __read_seqcount_begin() - begin a seqcount_t read section 267 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 268 * 269 * Return: count to be passed to read_seqcount_retry() 270 */ 271#define __read_seqcount_begin(s) \ 272({ \ 273 unsigned __seq; \ 274 \ 275 while ((__seq = seqprop_sequence(s)) & 1) \ 276 cpu_relax(); \ 277 \ 278 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ 279 __seq; \ 280}) 281 282/** 283 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep 284 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 285 * 286 * Return: count to be passed to read_seqcount_retry() 287 */ 288#define raw_read_seqcount_begin(s) __read_seqcount_begin(s) 289 290/** 291 * read_seqcount_begin() - begin a seqcount_t read critical section 292 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 293 * 294 * Return: count to be passed to read_seqcount_retry() 295 */ 296#define read_seqcount_begin(s) \ 297({ \ 298 seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \ 299 raw_read_seqcount_begin(s); \ 300}) 301 302/** 303 * raw_read_seqcount() - read the raw seqcount_t counter value 304 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 305 * 306 * raw_read_seqcount opens a read critical section of the given 307 * seqcount_t, without any lockdep checking, and without checking or 308 * masking the sequence counter LSB. Calling code is responsible for 309 * handling that. 310 * 311 * Return: count to be passed to read_seqcount_retry() 312 */ 313#define raw_read_seqcount(s) \ 314({ \ 315 unsigned __seq = seqprop_sequence(s); \ 316 \ 317 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ 318 __seq; \ 319}) 320 321/** 322 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o 323 * lockdep and w/o counter stabilization 324 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 325 * 326 * raw_seqcount_begin opens a read critical section of the given 327 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait 328 * for the count to stabilize. If a writer is active when it begins, it 329 * will fail the read_seqcount_retry() at the end of the read critical 330 * section instead of stabilizing at the beginning of it. 331 * 332 * Use this only in special kernel hot paths where the read section is 333 * small and has a high probability of success through other external 334 * means. It will save a single branching instruction. 335 * 336 * Return: count to be passed to read_seqcount_retry() 337 */ 338#define raw_seqcount_begin(s) \ 339({ \ 340 /* \ 341 * If the counter is odd, let read_seqcount_retry() fail \ 342 * by decrementing the counter. \ 343 */ \ 344 raw_read_seqcount(s) & ~1; \ 345}) 346 347/** 348 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier 349 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 350 * @start: count, from read_seqcount_begin() 351 * 352 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() 353 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is 354 * provided before actually loading any of the variables that are to be 355 * protected in this critical section. 356 * 357 * Use carefully, only in critical code, and comment how the barrier is 358 * provided. 359 * 360 * Return: true if a read section retry is required, else false 361 */ 362#define __read_seqcount_retry(s, start) \ 363 do___read_seqcount_retry(seqprop_const_ptr(s), start) 364 365static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) 366{ 367 kcsan_atomic_next(0); 368 return unlikely(READ_ONCE(s->sequence) != start); 369} 370 371/** 372 * read_seqcount_retry() - end a seqcount_t read critical section 373 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 374 * @start: count, from read_seqcount_begin() 375 * 376 * read_seqcount_retry closes the read critical section of given 377 * seqcount_t. If the critical section was invalid, it must be ignored 378 * (and typically retried). 379 * 380 * Return: true if a read section retry is required, else false 381 */ 382#define read_seqcount_retry(s, start) \ 383 do_read_seqcount_retry(seqprop_const_ptr(s), start) 384 385static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) 386{ 387 smp_rmb(); 388 return do___read_seqcount_retry(s, start); 389} 390 391/** 392 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep 393 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 394 * 395 * Context: check write_seqcount_begin() 396 */ 397#define raw_write_seqcount_begin(s) \ 398do { \ 399 if (seqprop_preemptible(s)) \ 400 preempt_disable(); \ 401 \ 402 do_raw_write_seqcount_begin(seqprop_ptr(s)); \ 403} while (0) 404 405static inline void do_raw_write_seqcount_begin(seqcount_t *s) 406{ 407 kcsan_nestable_atomic_begin(); 408 s->sequence++; 409 smp_wmb(); 410} 411 412/** 413 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep 414 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 415 * 416 * Context: check write_seqcount_end() 417 */ 418#define raw_write_seqcount_end(s) \ 419do { \ 420 do_raw_write_seqcount_end(seqprop_ptr(s)); \ 421 \ 422 if (seqprop_preemptible(s)) \ 423 preempt_enable(); \ 424} while (0) 425 426static inline void do_raw_write_seqcount_end(seqcount_t *s) 427{ 428 smp_wmb(); 429 s->sequence++; 430 kcsan_nestable_atomic_end(); 431} 432 433/** 434 * write_seqcount_begin_nested() - start a seqcount_t write section with 435 * custom lockdep nesting level 436 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 437 * @subclass: lockdep nesting level 438 * 439 * See Documentation/locking/lockdep-design.rst 440 * Context: check write_seqcount_begin() 441 */ 442#define write_seqcount_begin_nested(s, subclass) \ 443do { \ 444 seqprop_assert(s); \ 445 \ 446 if (seqprop_preemptible(s)) \ 447 preempt_disable(); \ 448 \ 449 do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ 450} while (0) 451 452static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) 453{ 454 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); 455 do_raw_write_seqcount_begin(s); 456} 457 458/** 459 * write_seqcount_begin() - start a seqcount_t write side critical section 460 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 461 * 462 * Context: sequence counter write side sections must be serialized and 463 * non-preemptible. Preemption will be automatically disabled if and 464 * only if the seqcount write serialization lock is associated, and 465 * preemptible. If readers can be invoked from hardirq or softirq 466 * context, interrupts or bottom halves must be respectively disabled. 467 */ 468#define write_seqcount_begin(s) \ 469do { \ 470 seqprop_assert(s); \ 471 \ 472 if (seqprop_preemptible(s)) \ 473 preempt_disable(); \ 474 \ 475 do_write_seqcount_begin(seqprop_ptr(s)); \ 476} while (0) 477 478static inline void do_write_seqcount_begin(seqcount_t *s) 479{ 480 do_write_seqcount_begin_nested(s, 0); 481} 482 483/** 484 * write_seqcount_end() - end a seqcount_t write side critical section 485 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 486 * 487 * Context: Preemption will be automatically re-enabled if and only if 488 * the seqcount write serialization lock is associated, and preemptible. 489 */ 490#define write_seqcount_end(s) \ 491do { \ 492 do_write_seqcount_end(seqprop_ptr(s)); \ 493 \ 494 if (seqprop_preemptible(s)) \ 495 preempt_enable(); \ 496} while (0) 497 498static inline void do_write_seqcount_end(seqcount_t *s) 499{ 500 seqcount_release(&s->dep_map, _RET_IP_); 501 do_raw_write_seqcount_end(s); 502} 503 504/** 505 * raw_write_seqcount_barrier() - do a seqcount_t write barrier 506 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 507 * 508 * This can be used to provide an ordering guarantee instead of the usual 509 * consistency guarantee. It is one wmb cheaper, because it can collapse 510 * the two back-to-back wmb()s. 511 * 512 * Note that writes surrounding the barrier should be declared atomic (e.g. 513 * via WRITE_ONCE): a) to ensure the writes become visible to other threads 514 * atomically, avoiding compiler optimizations; b) to document which writes are 515 * meant to propagate to the reader critical section. This is necessary because 516 * neither writes before nor after the barrier are enclosed in a seq-writer 517 * critical section that would ensure readers are aware of ongoing writes:: 518 * 519 * seqcount_t seq; 520 * bool X = true, Y = false; 521 * 522 * void read(void) 523 * { 524 * bool x, y; 525 * 526 * do { 527 * int s = read_seqcount_begin(&seq); 528 * 529 * x = X; y = Y; 530 * 531 * } while (read_seqcount_retry(&seq, s)); 532 * 533 * BUG_ON(!x && !y); 534 * } 535 * 536 * void write(void) 537 * { 538 * WRITE_ONCE(Y, true); 539 * 540 * raw_write_seqcount_barrier(seq); 541 * 542 * WRITE_ONCE(X, false); 543 * } 544 */ 545#define raw_write_seqcount_barrier(s) \ 546 do_raw_write_seqcount_barrier(seqprop_ptr(s)) 547 548static inline void do_raw_write_seqcount_barrier(seqcount_t *s) 549{ 550 kcsan_nestable_atomic_begin(); 551 s->sequence++; 552 smp_wmb(); 553 s->sequence++; 554 kcsan_nestable_atomic_end(); 555} 556 557/** 558 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read 559 * side operations 560 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 561 * 562 * After write_seqcount_invalidate, no seqcount_t read side operations 563 * will complete successfully and see data older than this. 564 */ 565#define write_seqcount_invalidate(s) \ 566 do_write_seqcount_invalidate(seqprop_ptr(s)) 567 568static inline void do_write_seqcount_invalidate(seqcount_t *s) 569{ 570 smp_wmb(); 571 kcsan_nestable_atomic_begin(); 572 s->sequence+=2; 573 kcsan_nestable_atomic_end(); 574} 575 576/* 577 * Latch sequence counters (seqcount_latch_t) 578 * 579 * A sequence counter variant where the counter even/odd value is used to 580 * switch between two copies of protected data. This allows the read path, 581 * typically NMIs, to safely interrupt the write side critical section. 582 * 583 * As the write sections are fully preemptible, no special handling for 584 * PREEMPT_RT is needed. 585 */ 586typedef struct { 587 seqcount_t seqcount; 588} seqcount_latch_t; 589 590/** 591 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t 592 * @seq_name: Name of the seqcount_latch_t instance 593 */ 594#define SEQCNT_LATCH_ZERO(seq_name) { \ 595 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ 596} 597 598/** 599 * seqcount_latch_init() - runtime initializer for seqcount_latch_t 600 * @s: Pointer to the seqcount_latch_t instance 601 */ 602#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount) 603 604/** 605 * raw_read_seqcount_latch() - pick even/odd latch data copy 606 * @s: Pointer to seqcount_latch_t 607 * 608 * See raw_write_seqcount_latch() for details and a full reader/writer 609 * usage example. 610 * 611 * Return: sequence counter raw value. Use the lowest bit as an index for 612 * picking which data copy to read. The full counter must then be checked 613 * with raw_read_seqcount_latch_retry(). 614 */ 615static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) 616{ 617 /* 618 * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). 619 * Due to the dependent load, a full smp_rmb() is not needed. 620 */ 621 return READ_ONCE(s->seqcount.sequence); 622} 623 624/** 625 * read_seqcount_latch() - pick even/odd latch data copy 626 * @s: Pointer to seqcount_latch_t 627 * 628 * See write_seqcount_latch() for details and a full reader/writer usage 629 * example. 630 * 631 * Return: sequence counter raw value. Use the lowest bit as an index for 632 * picking which data copy to read. The full counter must then be checked 633 * with read_seqcount_latch_retry(). 634 */ 635static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s) 636{ 637 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); 638 return raw_read_seqcount_latch(s); 639} 640 641/** 642 * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section 643 * @s: Pointer to seqcount_latch_t 644 * @start: count, from raw_read_seqcount_latch() 645 * 646 * Return: true if a read section retry is required, else false 647 */ 648static __always_inline int 649raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) 650{ 651 smp_rmb(); 652 return unlikely(READ_ONCE(s->seqcount.sequence) != start); 653} 654 655/** 656 * read_seqcount_latch_retry() - end a seqcount_latch_t read section 657 * @s: Pointer to seqcount_latch_t 658 * @start: count, from read_seqcount_latch() 659 * 660 * Return: true if a read section retry is required, else false 661 */ 662static __always_inline int 663read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) 664{ 665 kcsan_atomic_next(0); 666 return raw_read_seqcount_latch_retry(s, start); 667} 668 669/** 670 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy 671 * @s: Pointer to seqcount_latch_t 672 */ 673static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s) 674{ 675 smp_wmb(); /* prior stores before incrementing "sequence" */ 676 s->seqcount.sequence++; 677 smp_wmb(); /* increment "sequence" before following stores */ 678} 679 680/** 681 * write_seqcount_latch_begin() - redirect latch readers to odd copy 682 * @s: Pointer to seqcount_latch_t 683 * 684 * The latch technique is a multiversion concurrency control method that allows 685 * queries during non-atomic modifications. If you can guarantee queries never 686 * interrupt the modification -- e.g. the concurrency is strictly between CPUs 687 * -- you most likely do not need this. 688 * 689 * Where the traditional RCU/lockless data structures rely on atomic 690 * modifications to ensure queries observe either the old or the new state the 691 * latch allows the same for non-atomic updates. The trade-off is doubling the 692 * cost of storage; we have to maintain two copies of the entire data 693 * structure. 694 * 695 * Very simply put: we first modify one copy and then the other. This ensures 696 * there is always one copy in a stable state, ready to give us an answer. 697 * 698 * The basic form is a data structure like:: 699 * 700 * struct latch_struct { 701 * seqcount_latch_t seq; 702 * struct data_struct data[2]; 703 * }; 704 * 705 * Where a modification, which is assumed to be externally serialized, does the 706 * following:: 707 * 708 * void latch_modify(struct latch_struct *latch, ...) 709 * { 710 * write_seqcount_latch_begin(&latch->seq); 711 * modify(latch->data[0], ...); 712 * write_seqcount_latch(&latch->seq); 713 * modify(latch->data[1], ...); 714 * write_seqcount_latch_end(&latch->seq); 715 * } 716 * 717 * The query will have a form like:: 718 * 719 * struct entry *latch_query(struct latch_struct *latch, ...) 720 * { 721 * struct entry *entry; 722 * unsigned seq, idx; 723 * 724 * do { 725 * seq = read_seqcount_latch(&latch->seq); 726 * 727 * idx = seq & 0x01; 728 * entry = data_query(latch->data[idx], ...); 729 * 730 * // This includes needed smp_rmb() 731 * } while (read_seqcount_latch_retry(&latch->seq, seq)); 732 * 733 * return entry; 734 * } 735 * 736 * So during the modification, queries are first redirected to data[1]. Then we 737 * modify data[0]. When that is complete, we redirect queries back to data[0] 738 * and we can modify data[1]. 739 * 740 * NOTE: 741 * 742 * The non-requirement for atomic modifications does _NOT_ include 743 * the publishing of new entries in the case where data is a dynamic 744 * data structure. 745 * 746 * An iteration might start in data[0] and get suspended long enough 747 * to miss an entire modification sequence, once it resumes it might 748 * observe the new entry. 749 * 750 * NOTE2: 751 * 752 * When data is a dynamic data structure; one should use regular RCU 753 * patterns to manage the lifetimes of the objects within. 754 */ 755static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s) 756{ 757 kcsan_nestable_atomic_begin(); 758 raw_write_seqcount_latch(s); 759} 760 761/** 762 * write_seqcount_latch() - redirect latch readers to even copy 763 * @s: Pointer to seqcount_latch_t 764 */ 765static __always_inline void write_seqcount_latch(seqcount_latch_t *s) 766{ 767 raw_write_seqcount_latch(s); 768} 769 770/** 771 * write_seqcount_latch_end() - end a seqcount_latch_t write section 772 * @s: Pointer to seqcount_latch_t 773 * 774 * Marks the end of a seqcount_latch_t writer section, after all copies of the 775 * latch-protected data have been updated. 776 */ 777static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) 778{ 779 kcsan_nestable_atomic_end(); 780} 781 782#define __SEQLOCK_UNLOCKED(lockname) \ 783 { \ 784 .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \ 785 .lock = __SPIN_LOCK_UNLOCKED(lockname) \ 786 } 787 788/** 789 * seqlock_init() - dynamic initializer for seqlock_t 790 * @sl: Pointer to the seqlock_t instance 791 */ 792#define seqlock_init(sl) \ 793 do { \ 794 spin_lock_init(&(sl)->lock); \ 795 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ 796 } while (0) 797 798/** 799 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t 800 * @sl: Name of the seqlock_t instance 801 */ 802#define DEFINE_SEQLOCK(sl) \ 803 seqlock_t sl = __SEQLOCK_UNLOCKED(sl) 804 805/** 806 * read_seqbegin() - start a seqlock_t read side critical section 807 * @sl: Pointer to seqlock_t 808 * 809 * Return: count, to be passed to read_seqretry() 810 */ 811static inline unsigned read_seqbegin(const seqlock_t *sl) 812{ 813 return read_seqcount_begin(&sl->seqcount); 814} 815 816/** 817 * read_seqretry() - end a seqlock_t read side section 818 * @sl: Pointer to seqlock_t 819 * @start: count, from read_seqbegin() 820 * 821 * read_seqretry closes the read side critical section of given seqlock_t. 822 * If the critical section was invalid, it must be ignored (and typically 823 * retried). 824 * 825 * Return: true if a read section retry is required, else false 826 */ 827static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) 828{ 829 return read_seqcount_retry(&sl->seqcount, start); 830} 831 832/* 833 * For all seqlock_t write side functions, use the internal 834 * do_write_seqcount_begin() instead of generic write_seqcount_begin(). 835 * This way, no redundant lockdep_assert_held() checks are added. 836 */ 837 838/** 839 * write_seqlock() - start a seqlock_t write side critical section 840 * @sl: Pointer to seqlock_t 841 * 842 * write_seqlock opens a write side critical section for the given 843 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside 844 * that sequential lock. All seqlock_t write side sections are thus 845 * automatically serialized and non-preemptible. 846 * 847 * Context: if the seqlock_t read section, or other write side critical 848 * sections, can be invoked from hardirq or softirq contexts, use the 849 * _irqsave or _bh variants of this function instead. 850 */ 851static inline void write_seqlock(seqlock_t *sl) 852{ 853 spin_lock(&sl->lock); 854 do_write_seqcount_begin(&sl->seqcount.seqcount); 855} 856 857/** 858 * write_sequnlock() - end a seqlock_t write side critical section 859 * @sl: Pointer to seqlock_t 860 * 861 * write_sequnlock closes the (serialized and non-preemptible) write side 862 * critical section of given seqlock_t. 863 */ 864static inline void write_sequnlock(seqlock_t *sl) 865{ 866 do_write_seqcount_end(&sl->seqcount.seqcount); 867 spin_unlock(&sl->lock); 868} 869 870/** 871 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section 872 * @sl: Pointer to seqlock_t 873 * 874 * _bh variant of write_seqlock(). Use only if the read side section, or 875 * other write side sections, can be invoked from softirq contexts. 876 */ 877static inline void write_seqlock_bh(seqlock_t *sl) 878{ 879 spin_lock_bh(&sl->lock); 880 do_write_seqcount_begin(&sl->seqcount.seqcount); 881} 882 883/** 884 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section 885 * @sl: Pointer to seqlock_t 886 * 887 * write_sequnlock_bh closes the serialized, non-preemptible, and 888 * softirqs-disabled, seqlock_t write side critical section opened with 889 * write_seqlock_bh(). 890 */ 891static inline void write_sequnlock_bh(seqlock_t *sl) 892{ 893 do_write_seqcount_end(&sl->seqcount.seqcount); 894 spin_unlock_bh(&sl->lock); 895} 896 897/** 898 * write_seqlock_irq() - start a non-interruptible seqlock_t write section 899 * @sl: Pointer to seqlock_t 900 * 901 * _irq variant of write_seqlock(). Use only if the read side section, or 902 * other write sections, can be invoked from hardirq contexts. 903 */ 904static inline void write_seqlock_irq(seqlock_t *sl) 905{ 906 spin_lock_irq(&sl->lock); 907 do_write_seqcount_begin(&sl->seqcount.seqcount); 908} 909 910/** 911 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section 912 * @sl: Pointer to seqlock_t 913 * 914 * write_sequnlock_irq closes the serialized and non-interruptible 915 * seqlock_t write side section opened with write_seqlock_irq(). 916 */ 917static inline void write_sequnlock_irq(seqlock_t *sl) 918{ 919 do_write_seqcount_end(&sl->seqcount.seqcount); 920 spin_unlock_irq(&sl->lock); 921} 922 923static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) 924{ 925 unsigned long flags; 926 927 spin_lock_irqsave(&sl->lock, flags); 928 do_write_seqcount_begin(&sl->seqcount.seqcount); 929 return flags; 930} 931 932/** 933 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write 934 * section 935 * @lock: Pointer to seqlock_t 936 * @flags: Stack-allocated storage for saving caller's local interrupt 937 * state, to be passed to write_sequnlock_irqrestore(). 938 * 939 * _irqsave variant of write_seqlock(). Use it only if the read side 940 * section, or other write sections, can be invoked from hardirq context. 941 */ 942#define write_seqlock_irqsave(lock, flags) \ 943 do { flags = __write_seqlock_irqsave(lock); } while (0) 944 945/** 946 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write 947 * section 948 * @sl: Pointer to seqlock_t 949 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() 950 * 951 * write_sequnlock_irqrestore closes the serialized and non-interruptible 952 * seqlock_t write section previously opened with write_seqlock_irqsave(). 953 */ 954static inline void 955write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) 956{ 957 do_write_seqcount_end(&sl->seqcount.seqcount); 958 spin_unlock_irqrestore(&sl->lock, flags); 959} 960 961/** 962 * read_seqlock_excl() - begin a seqlock_t locking reader section 963 * @sl: Pointer to seqlock_t 964 * 965 * read_seqlock_excl opens a seqlock_t locking reader critical section. A 966 * locking reader exclusively locks out *both* other writers *and* other 967 * locking readers, but it does not update the embedded sequence number. 968 * 969 * Locking readers act like a normal spin_lock()/spin_unlock(). 970 * 971 * Context: if the seqlock_t write section, *or other read sections*, can 972 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh 973 * variant of this function instead. 974 * 975 * The opened read section must be closed with read_sequnlock_excl(). 976 */ 977static inline void read_seqlock_excl(seqlock_t *sl) 978{ 979 spin_lock(&sl->lock); 980} 981 982/** 983 * read_sequnlock_excl() - end a seqlock_t locking reader critical section 984 * @sl: Pointer to seqlock_t 985 */ 986static inline void read_sequnlock_excl(seqlock_t *sl) 987{ 988 spin_unlock(&sl->lock); 989} 990 991/** 992 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with 993 * softirqs disabled 994 * @sl: Pointer to seqlock_t 995 * 996 * _bh variant of read_seqlock_excl(). Use this variant only if the 997 * seqlock_t write side section, *or other read sections*, can be invoked 998 * from softirq contexts. 999 */ 1000static inline void read_seqlock_excl_bh(seqlock_t *sl) 1001{ 1002 spin_lock_bh(&sl->lock); 1003} 1004 1005/** 1006 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking 1007 * reader section 1008 * @sl: Pointer to seqlock_t 1009 */ 1010static inline void read_sequnlock_excl_bh(seqlock_t *sl) 1011{ 1012 spin_unlock_bh(&sl->lock); 1013} 1014 1015/** 1016 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking 1017 * reader section 1018 * @sl: Pointer to seqlock_t 1019 * 1020 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t 1021 * write side section, *or other read sections*, can be invoked from a 1022 * hardirq context. 1023 */ 1024static inline void read_seqlock_excl_irq(seqlock_t *sl) 1025{ 1026 spin_lock_irq(&sl->lock); 1027} 1028 1029/** 1030 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t 1031 * locking reader section 1032 * @sl: Pointer to seqlock_t 1033 */ 1034static inline void read_sequnlock_excl_irq(seqlock_t *sl) 1035{ 1036 spin_unlock_irq(&sl->lock); 1037} 1038 1039static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) 1040{ 1041 unsigned long flags; 1042 1043 spin_lock_irqsave(&sl->lock, flags); 1044 return flags; 1045} 1046 1047/** 1048 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t 1049 * locking reader section 1050 * @lock: Pointer to seqlock_t 1051 * @flags: Stack-allocated storage for saving caller's local interrupt 1052 * state, to be passed to read_sequnlock_excl_irqrestore(). 1053 * 1054 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t 1055 * write side section, *or other read sections*, can be invoked from a 1056 * hardirq context. 1057 */ 1058#define read_seqlock_excl_irqsave(lock, flags) \ 1059 do { flags = __read_seqlock_excl_irqsave(lock); } while (0) 1060 1061/** 1062 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t 1063 * locking reader section 1064 * @sl: Pointer to seqlock_t 1065 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() 1066 */ 1067static inline void 1068read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) 1069{ 1070 spin_unlock_irqrestore(&sl->lock, flags); 1071} 1072 1073/** 1074 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader 1075 * @lock: Pointer to seqlock_t 1076 * @seq : Marker and return parameter. If the passed value is even, the 1077 * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). 1078 * If the passed value is odd, the reader will become a *locking* reader 1079 * as in read_seqlock_excl(). In the first call to this function, the 1080 * caller *must* initialize and pass an even value to @seq; this way, a 1081 * lockless read can be optimistically tried first. 1082 * 1083 * read_seqbegin_or_lock is an API designed to optimistically try a normal 1084 * lockless seqlock_t read section first. If an odd counter is found, the 1085 * lockless read trial has failed, and the next read iteration transforms 1086 * itself into a full seqlock_t locking reader. 1087 * 1088 * This is typically used to avoid seqlock_t lockless readers starvation 1089 * (too much retry loops) in the case of a sharp spike in write side 1090 * activity. 1091 * 1092 * Context: if the seqlock_t write section, *or other read sections*, can 1093 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh 1094 * variant of this function instead. 1095 * 1096 * Check Documentation/locking/seqlock.rst for template example code. 1097 * 1098 * Return: the encountered sequence counter value, through the @seq 1099 * parameter, which is overloaded as a return parameter. This returned 1100 * value must be checked with need_seqretry(). If the read section need to 1101 * be retried, this returned value must also be passed as the @seq 1102 * parameter of the next read_seqbegin_or_lock() iteration. 1103 */ 1104static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) 1105{ 1106 if (!(*seq & 1)) /* Even */ 1107 *seq = read_seqbegin(lock); 1108 else /* Odd */ 1109 read_seqlock_excl(lock); 1110} 1111 1112/** 1113 * need_seqretry() - validate seqlock_t "locking or lockless" read section 1114 * @lock: Pointer to seqlock_t 1115 * @seq: sequence count, from read_seqbegin_or_lock() 1116 * 1117 * Return: true if a read section retry is required, false otherwise 1118 */ 1119static inline int need_seqretry(seqlock_t *lock, int seq) 1120{ 1121 return !(seq & 1) && read_seqretry(lock, seq); 1122} 1123 1124/** 1125 * done_seqretry() - end seqlock_t "locking or lockless" reader section 1126 * @lock: Pointer to seqlock_t 1127 * @seq: count, from read_seqbegin_or_lock() 1128 * 1129 * done_seqretry finishes the seqlock_t read side critical section started 1130 * with read_seqbegin_or_lock() and validated by need_seqretry(). 1131 */ 1132static inline void done_seqretry(seqlock_t *lock, int seq) 1133{ 1134 if (seq & 1) 1135 read_sequnlock_excl(lock); 1136} 1137 1138/** 1139 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or 1140 * a non-interruptible locking reader 1141 * @lock: Pointer to seqlock_t 1142 * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). 1143 * 1144 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if 1145 * the seqlock_t write section, *or other read sections*, can be invoked 1146 * from hardirq context. 1147 * 1148 * Note: Interrupts will be disabled only for "locking reader" mode. 1149 * 1150 * Return: 1151 * 1152 * 1. The saved local interrupts state in case of a locking reader, to 1153 * be passed to done_seqretry_irqrestore(). 1154 * 1155 * 2. The encountered sequence counter value, returned through @seq 1156 * overloaded as a return parameter. Check read_seqbegin_or_lock(). 1157 */ 1158static inline unsigned long 1159read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) 1160{ 1161 unsigned long flags = 0; 1162 1163 if (!(*seq & 1)) /* Even */ 1164 *seq = read_seqbegin(lock); 1165 else /* Odd */ 1166 read_seqlock_excl_irqsave(lock, flags); 1167 1168 return flags; 1169} 1170 1171/** 1172 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a 1173 * non-interruptible locking reader section 1174 * @lock: Pointer to seqlock_t 1175 * @seq: Count, from read_seqbegin_or_lock_irqsave() 1176 * @flags: Caller's saved local interrupt state in case of a locking 1177 * reader, also from read_seqbegin_or_lock_irqsave() 1178 * 1179 * This is the _irqrestore variant of done_seqretry(). The read section 1180 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated 1181 * by need_seqretry(). 1182 */ 1183static inline void 1184done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) 1185{ 1186 if (seq & 1) 1187 read_sequnlock_excl_irqrestore(lock, flags); 1188} 1189#endif /* __LINUX_SEQLOCK_H */