Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib/genalloc.c: make the avail variable an atomic_long_t

If the amount of resources allocated to a gen_pool exceeds 2^32 then the
avail atomic overflows and this causes problems when clients try and
borrow resources from the pool. This is only expected to be an issue on
64 bit systems.

Add the <linux/atomic.h> header to pull in atomic_long* operations. So
that 32 bit systems continue to use atomic32_t but 64 bit systems can
use atomic64_t.

Link: http://lkml.kernel.org/r/1509033843-25667-1-git-send-email-sbates@raithlin.com
Signed-off-by: Stephen Bates <sbates@raithlin.com>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Reviewed-by: Daniel Mentz <danielmentz@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Stephen Bates and committed by
Linus Torvalds
36a3d1dd e813a614

+7 -6
+2 -1
include/linux/genalloc.h
··· 32 32 33 33 #include <linux/types.h> 34 34 #include <linux/spinlock_types.h> 35 + #include <linux/atomic.h> 35 36 36 37 struct device; 37 38 struct device_node; ··· 72 71 */ 73 72 struct gen_pool_chunk { 74 73 struct list_head next_chunk; /* next chunk in pool */ 75 - atomic_t avail; 74 + atomic_long_t avail; 76 75 phys_addr_t phys_addr; /* physical starting address of memory chunk */ 77 76 unsigned long start_addr; /* start address of memory chunk */ 78 77 unsigned long end_addr; /* end address of memory chunk (inclusive) */
+5 -5
lib/genalloc.c
··· 194 194 chunk->phys_addr = phys; 195 195 chunk->start_addr = virt; 196 196 chunk->end_addr = virt + size - 1; 197 - atomic_set(&chunk->avail, size); 197 + atomic_long_set(&chunk->avail, size); 198 198 199 199 spin_lock(&pool->lock); 200 200 list_add_rcu(&chunk->next_chunk, &pool->chunks); ··· 304 304 nbits = (size + (1UL << order) - 1) >> order; 305 305 rcu_read_lock(); 306 306 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 307 - if (size > atomic_read(&chunk->avail)) 307 + if (size > atomic_long_read(&chunk->avail)) 308 308 continue; 309 309 310 310 start_bit = 0; ··· 324 324 325 325 addr = chunk->start_addr + ((unsigned long)start_bit << order); 326 326 size = nbits << order; 327 - atomic_sub(size, &chunk->avail); 327 + atomic_long_sub(size, &chunk->avail); 328 328 break; 329 329 } 330 330 rcu_read_unlock(); ··· 390 390 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); 391 391 BUG_ON(remain); 392 392 size = nbits << order; 393 - atomic_add(size, &chunk->avail); 393 + atomic_long_add(size, &chunk->avail); 394 394 rcu_read_unlock(); 395 395 return; 396 396 } ··· 464 464 465 465 rcu_read_lock(); 466 466 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 467 - avail += atomic_read(&chunk->avail); 467 + avail += atomic_long_read(&chunk->avail); 468 468 rcu_read_unlock(); 469 469 return avail; 470 470 }