A map using a lock-free concurrency using left-right algo, written in Go

s/epoch/serial/ s/flush/commit/

authored by

Johannes Kohnen and committed by joh.dev e24354cd e970d17d

+21 -21
+10 -10
README.md
··· 47 47 48 48 The writer does synchronize any actions with a central mutex lock. Keys and 49 49 values are kept at most three times: Once for both arenas, plus any 50 - non-flushed value in the operations log. The writer is in control over how 51 - long that operations log is: It is truncated after each flush operation. The 52 - delay of each flush is dependent on the readers: They need to preemptively 53 - leave the arena to allow the writer to carry out the flush operation. 50 + non-commited value in the operations log. The writer is in control over how 51 + long that operations log is: It is truncated after each commit operation. The 52 + delay of each commit is dependent on the readers: They need to preemptively 53 + leave the arena to allow the writer to carry out the commit operation. 54 54 55 55 The "sign", which is in fact a simple pointer, is written by the writer and 56 56 read by the readers with atomic operations. The signaling of where the readers 57 - are is done with an unsigned integer counter (named "epoch") that is atomically 57 + are is done with an unsigned integer counter (named "serial") that is atomically 58 58 incremented by 1 when entering an arena and when leaving an arena. Thus, if a 59 - reader has entered an arena the epoch is odd, and even if the reader has left 60 - the arenas. The writer atomically reads each epoch and spins over every epoch 61 - that happened to be odd at that first read until all of those epochs differ 62 - from the recorded odd epoch. That ensures that every reader has at least once 59 + reader has entered an arena the serial is odd, and even if the reader has left 60 + the arenas. The writer atomically reads each serial and spins over every serial 61 + that happened to be odd at that first read until all of those serials differ 62 + from the recorded odd serial. That ensures that every reader has at least once 63 63 read the pointer after the writer swapped it (and also handles the case of 64 - overflowed epochs). 64 + overflowed serials). 65 65 66 66 ### Performance 67 67
+10 -10
lrmap.go
··· 161 161 readers := make(map[*readHandlerInner[K, V]]uint64) 162 162 163 163 for rh := range m.readHandlers { 164 - if epoch := atomic.LoadUint64(&(rh.epoch)); epoch%2 == 1 { 165 - readers[rh] = epoch 164 + if serial := atomic.LoadUint64(&(rh.serial)); serial%2 == 1 { 165 + readers[rh] = serial 166 166 } 167 167 } 168 168 169 169 delay := time.Microsecond 170 170 171 171 for { 172 - for reader, epoch := range readers { 173 - if e := atomic.LoadUint64(&(reader.epoch)); e != epoch { 172 + for reader, serial := range readers { 173 + if s := atomic.LoadUint64(&(reader.serial)); s != serial { 174 174 delete(readers, reader) 175 175 } 176 176 } ··· 264 264 } 265 265 266 266 type readHandlerInner[K comparable, V any] struct { 267 - lrmap *LRMap[K, V] 268 - live arena[K, V] 269 - epoch uint64 267 + lrmap *LRMap[K, V] 268 + live arena[K, V] 269 + serial uint64 270 270 } 271 271 272 272 func (r *readHandlerInner[K, V]) enter() { ··· 274 274 panic("reader illegal state: must not Enter() twice") 275 275 } 276 276 277 - atomic.AddUint64(&r.epoch, 1) 277 + atomic.AddUint64(&r.serial, 1) 278 278 r.live = *r.lrmap.readMap.Load() 279 279 } 280 280 ··· 283 283 panic("reader illegal state: must not Leave() twice") 284 284 } 285 285 286 - atomic.AddUint64(&r.epoch, 1) 286 + atomic.AddUint64(&r.serial, 1) 287 287 } 288 288 289 289 func (r *readHandlerInner[K, V]) get(key K) V { ··· 318 318 } 319 319 320 320 func (r *readHandlerInner[K, V]) entered() bool { 321 - return r.epoch%2 == 1 321 + return r.serial%2 == 1 322 322 }
+1 -1
lrmap_test.go
··· 109 109 rh.Leave() 110 110 } 111 111 112 - func TestEpochOverflow(t *testing.T) { 112 + func TestSerialOverflow(t *testing.T) { 113 113 var maxEven uint64 = math.MaxUint64 - 1 114 114 t.Logf("maxEven(%d)", maxEven) 115 115 if maxEven%2 == 1 {