Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/memory-model: Add model support for spin_is_locked()

This commit first adds a trivial macro for spin_is_locked() to
linux-kernel.def.

It also adds cat code for enumerating all possible matches of lock
write events (set LKW) with islocked events returning true (set RL,
for Read from Lock), and unlock write events (set UL) with islocked
events returning false (set RU, for Read from Unlock). Note that this
intentionally does not model uniprocessor kernels (CONFIG_SMP=n) built
with CONFIG_DEBUG_SPINLOCK=n, in which spin_is_locked() unconditionally
returns zero.

It also adds a pair of litmus tests demonstrating the minimal ordering
provided by spin_is_locked() in conjunction with spin_lock(). Will Deacon
noted that this minimal ordering happens on ARMv8:
https://lkml.kernel.org/r/20180226162426.GB17158@arm.com

Notice that herd7 installations strictly older than version 7.49
do not handle the new constructs.

Signed-off-by: Luc Maranget <luc.maranget@inria.fr>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Alan Stern <stern@rowland.harvard.edu>
Cc: Akira Yokosawa <akiyks@gmail.com>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Jade Alglave <j.alglave@ucl.ac.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luc Maranget <Luc.Maranget@inria.fr>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1526340837-12222-10-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Luc Maranget and committed by
Ingo Molnar
15553dcb 2fb6ae16

+129 -4
+1
tools/memory-model/linux-kernel.def
··· 38 38 spin_lock(X) { __lock(X); } 39 39 spin_unlock(X) { __unlock(X); } 40 40 spin_trylock(X) __trylock(X) 41 + spin_is_locked(X) __islocked(X) 41 42 42 43 // RCU 43 44 rcu_read_lock() { __fence{rcu-lock}; }
+35
tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
··· 1 + C MP+polockmbonce+poacquiresilsil 2 + 3 + (* 4 + * Result: Never 5 + * 6 + * Do spinlocks combined with smp_mb__after_spinlock() provide order 7 + * to outside observers using spin_is_locked() to sense the lock-held 8 + * state, ordered by acquire? Note that when the first spin_is_locked() 9 + * returns false and the second true, we know that the smp_load_acquire() 10 + * executed before the lock was acquired (loosely speaking). 11 + *) 12 + 13 + { 14 + } 15 + 16 + P0(spinlock_t *lo, int *x) 17 + { 18 + spin_lock(lo); 19 + smp_mb__after_spinlock(); 20 + WRITE_ONCE(*x, 1); 21 + spin_unlock(lo); 22 + } 23 + 24 + P1(spinlock_t *lo, int *x) 25 + { 26 + int r1; 27 + int r2; 28 + int r3; 29 + 30 + r1 = smp_load_acquire(x); 31 + r2 = spin_is_locked(lo); 32 + r3 = spin_is_locked(lo); 33 + } 34 + 35 + exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
+34
tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
··· 1 + C MP+polockonce+poacquiresilsil 2 + 3 + (* 4 + * Result: Sometimes 5 + * 6 + * Do spinlocks provide order to outside observers using spin_is_locked() 7 + * to sense the lock-held state, ordered by acquire? Note that when the 8 + * first spin_is_locked() returns false and the second true, we know that 9 + * the smp_load_acquire() executed before the lock was acquired (loosely 10 + * speaking). 11 + *) 12 + 13 + { 14 + } 15 + 16 + P0(spinlock_t *lo, int *x) 17 + { 18 + spin_lock(lo); 19 + WRITE_ONCE(*x, 1); 20 + spin_unlock(lo); 21 + } 22 + 23 + P1(spinlock_t *lo, int *x) 24 + { 25 + int r1; 26 + int r2; 27 + int r3; 28 + 29 + r1 = smp_load_acquire(x); 30 + r2 = spin_is_locked(lo); 31 + r3 = spin_is_locked(lo); 32 + } 33 + 34 + exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
+10
tools/memory-model/litmus-tests/README
··· 63 63 MP+onceassign+derefonce.litmus 64 64 As below, but with rcu_assign_pointer() and an rcu_dereference(). 65 65 66 + MP+polockmbonce+poacquiresilsil.litmus 67 + Protect the access with a lock and an smp_mb__after_spinlock() 68 + in one process, and use an acquire load followed by a pair of 69 + spin_is_locked() calls in the other process. 70 + 71 + MP+polockonce+poacquiresilsil.litmus 72 + Protect the access with a lock in one process, and use an 73 + acquire load followed by a pair of spin_is_locked() calls 74 + in the other process. 75 + 66 76 MP+polocks.litmus 67 77 As below, but with the second access of the writer process 68 78 and the first access of reader process protected by a lock.
+49 -4
tools/memory-model/lock.cat
··· 5 5 *) 6 6 7 7 (* Generate coherence orders and handle lock operations *) 8 - 8 + (* 9 + * Warning, crashes with herd7 versions strictly before 7.48. 10 + * spin_islocked is functional from version 7.49. 11 + * 12 + *) 9 13 include "cross.cat" 10 14 11 15 (* From lock reads to their partner lock writes *) ··· 36 32 (* The final value of a spinlock should not be tested *) 37 33 flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final 38 34 39 - 35 + (* 36 + * Backward compatibility 37 + *) 38 + let RL = try RL with emptyset (* defined herd7 >= 7.49 *) 39 + let RU = try RU with emptyset (* defined herd7 >= 7.49 *) 40 40 (* 41 41 * Put lock operations in their appropriate classes, but leave UL out of W 42 42 * until after the co relation has been generated. 43 43 *) 44 - let R = R | LKR | LF 44 + let R = R | LKR | LF | RL | RU 45 45 let W = W | LKW 46 46 47 47 let Release = Release | UL ··· 80 72 81 73 (* Generate all rf relations for LF events *) 82 74 with rfe-lf from cross(all-possible-rfe-lf) 83 - let rf = rf | rfi-lf | rfe-lf 84 75 76 + let rf-lf = rfe-lf | rfi-lf 77 + 78 + (* rf for RL events, ie islocked returning true, similar to LF above *) 79 + 80 + (* islocked returning true inside a critical section 81 + * must read from the opening lock 82 + *) 83 + let rfi-rl = ([LKW] ; po-loc ; [RL]) \ ([LKW] ; po-loc ; [UL] ; po-loc) 84 + 85 + (* islocked returning true outside critical sections can match any 86 + * external lock. 87 + *) 88 + let all-possible-rfe-rl = 89 + let possible-rfe-lf r = 90 + let pair-to-relation p = p ++ 0 91 + in map pair-to-relation ((LKW * {r}) & loc & ext) 92 + in map possible-rfe-lf (RL \ range(rfi-rl)) 93 + 94 + with rfe-rl from cross(all-possible-rfe-rl) 95 + let rf-rl = rfe-rl | rfi-rl 96 + 97 + (* Read from unlock, ie islocked returning false, slightly different *) 98 + 99 + (* islocked returning false can read from the last po-previous unlock *) 100 + let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc) 101 + 102 + (* any islocked returning false can read from any external unlock *) 103 + let all-possible-rfe-ru = 104 + let possible-rfe-ru r = 105 + let pair-to-relation p = p ++ 0 106 + in map pair-to-relation (((UL|IW) * {r}) & loc & ext) 107 + in map possible-rfe-ru RU 108 + 109 + with rfe-ru from cross(all-possible-rfe-ru) 110 + let rf-ru = rfe-ru | rfi-ru 111 + 112 + (* Final rf relation *) 113 + let rf = rf | rf-lf | rf-rl | rf-ru 85 114 86 115 (* Generate all co relations, including LKW events but not UL *) 87 116 let co0 = co0 | ([IW] ; loc ; [LKW]) |