Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vmalloc: switch to for_each_vmap_node() helper

There are places which can be updated easily to use the helper to iterate
over all vmap-nodes. This is what this patch does.

The aim is to improve readability and simplify the code.

[akpm@linux-foundation.org: fix build warning]
Link: https://lkml.kernel.org/r/20250408151549.77937-2-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Christop Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Uladzislau Rezki (Sony) and committed by
Andrew Morton
ce906d76 43182550

+15 -25
+15 -25
mm/vmalloc.c
··· 1061 1061 { 1062 1062 unsigned long va_start_lowest; 1063 1063 struct vmap_node *vn; 1064 - int i; 1065 1064 1066 1065 repeat: 1067 - for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) { 1068 - vn = &vmap_nodes[i]; 1066 + va_start_lowest = 0; 1069 1067 1068 + for_each_vmap_node(vn) { 1070 1069 spin_lock(&vn->busy.lock); 1071 1070 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); 1072 1071 ··· 4962 4963 { 4963 4964 struct vmap_node *vn; 4964 4965 struct vmap_area *va; 4965 - int i; 4966 4966 4967 - for (i = 0; i < nr_vmap_nodes; i++) { 4968 - vn = &vmap_nodes[i]; 4969 - 4967 + for_each_vmap_node(vn) { 4970 4968 spin_lock(&vn->lazy.lock); 4971 4969 list_for_each_entry(va, &vn->lazy.head, list) { 4972 4970 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", ··· 4979 4983 struct vmap_node *vn; 4980 4984 struct vmap_area *va; 4981 4985 struct vm_struct *v; 4982 - int i; 4983 4986 4984 - for (i = 0; i < nr_vmap_nodes; i++) { 4985 - vn = &vmap_nodes[i]; 4986 - 4987 + for_each_vmap_node(vn) { 4987 4988 spin_lock(&vn->busy.lock); 4988 4989 list_for_each_entry(va, &vn->busy.head, list) { 4989 4990 if (!va->vm) { ··· 5101 5108 static void vmap_init_nodes(void) 5102 5109 { 5103 5110 struct vmap_node *vn; 5104 - int i, n; 5111 + int i; 5105 5112 5106 5113 #if BITS_PER_LONG == 64 5107 5114 /* ··· 5118 5125 * set of cores. Therefore a per-domain purging is supposed to 5119 5126 * be added as well as a per-domain balancing. 5120 5127 */ 5121 - n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 5128 + int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 5122 5129 5123 5130 if (n > 1) { 5124 5131 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); ··· 5133 5140 } 5134 5141 #endif 5135 5142 5136 - for (n = 0; n < nr_vmap_nodes; n++) { 5137 - vn = &vmap_nodes[n]; 5143 + for_each_vmap_node(vn) { 5138 5144 vn->busy.root = RB_ROOT; 5139 5145 INIT_LIST_HEAD(&vn->busy.head); 5140 5146 spin_lock_init(&vn->busy.lock); ··· 5154 5162 static unsigned long 5155 5163 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 5156 5164 { 5157 - unsigned long count; 5165 + unsigned long count = 0; 5158 5166 struct vmap_node *vn; 5159 - int i, j; 5167 + int i; 5160 5168 5161 - for (count = 0, i = 0; i < nr_vmap_nodes; i++) { 5162 - vn = &vmap_nodes[i]; 5163 - 5164 - for (j = 0; j < MAX_VA_SIZE_PAGES; j++) 5165 - count += READ_ONCE(vn->pool[j].len); 5169 + for_each_vmap_node(vn) { 5170 + for (i = 0; i < MAX_VA_SIZE_PAGES; i++) 5171 + count += READ_ONCE(vn->pool[i].len); 5166 5172 } 5167 5173 5168 5174 return count ? count : SHRINK_EMPTY; ··· 5169 5179 static unsigned long 5170 5180 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 5171 5181 { 5172 - int i; 5182 + struct vmap_node *vn; 5173 5183 5174 - for (i = 0; i < nr_vmap_nodes; i++) 5175 - decay_va_pool_node(&vmap_nodes[i], true); 5184 + for_each_vmap_node(vn) 5185 + decay_va_pool_node(vn, true); 5176 5186 5177 5187 return SHRINK_STOP; 5178 5188 }