Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

radix-tree: replace preallocated node array with linked list

Currently we use per-cpu array to hold pointers to preallocated nodes.
Let's replace it with linked list. On x86_64 it saves 256 bytes in
per-cpu ELF section which may translate into freeing up 2MB of memory for
NR_CPUS==8192.

[akpm@linux-foundation.org: fix comment, coding style]
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
9d2a8da0 9cf79d11

+17 -11
+17 -11
lib/radix-tree.c
··· 65 65 */ 66 66 struct radix_tree_preload { 67 67 int nr; 68 - struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE]; 68 + /* nodes->private_data points to next preallocated node */ 69 + struct radix_tree_node *nodes; 69 70 }; 70 71 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 71 72 ··· 198 197 */ 199 198 rtp = this_cpu_ptr(&radix_tree_preloads); 200 199 if (rtp->nr) { 201 - ret = rtp->nodes[rtp->nr - 1]; 202 - rtp->nodes[rtp->nr - 1] = NULL; 200 + ret = rtp->nodes; 201 + rtp->nodes = ret->private_data; 202 + ret->private_data = NULL; 203 203 rtp->nr--; 204 204 } 205 205 /* ··· 259 257 260 258 preempt_disable(); 261 259 rtp = this_cpu_ptr(&radix_tree_preloads); 262 - while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { 260 + while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { 263 261 preempt_enable(); 264 262 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 265 263 if (node == NULL) 266 264 goto out; 267 265 preempt_disable(); 268 266 rtp = this_cpu_ptr(&radix_tree_preloads); 269 - if (rtp->nr < ARRAY_SIZE(rtp->nodes)) 270 - rtp->nodes[rtp->nr++] = node; 271 - else 267 + if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { 268 + node->private_data = rtp->nodes; 269 + rtp->nodes = node; 270 + rtp->nr++; 271 + } else { 272 272 kmem_cache_free(radix_tree_node_cachep, node); 273 + } 273 274 } 274 275 ret = 0; 275 276 out: ··· 1468 1463 { 1469 1464 int cpu = (long)hcpu; 1470 1465 struct radix_tree_preload *rtp; 1466 + struct radix_tree_node *node; 1471 1467 1472 1468 /* Free per-cpu pool of perloaded nodes */ 1473 1469 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 1474 1470 rtp = &per_cpu(radix_tree_preloads, cpu); 1475 1471 while (rtp->nr) { 1476 - kmem_cache_free(radix_tree_node_cachep, 1477 - rtp->nodes[rtp->nr-1]); 1478 - rtp->nodes[rtp->nr-1] = NULL; 1479 - rtp->nr--; 1472 + node = rtp->nodes; 1473 + rtp->nodes = node->private_data; 1474 + kmem_cache_free(radix_tree_node_cachep, node); 1475 + rtp->nr--; 1480 1476 } 1481 1477 } 1482 1478 return NOTIFY_OK;