Merge tag 'slab-for-6.18-rc1-hotfix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fix from Vlastimil Babka:
"A NULL pointer deref hotfix"

* tag 'slab-for-6.18-rc1-hotfix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
slab: fix barn NULL pointer dereference on memoryless nodes

+51 -14
+51 -14
mm/slub.c
··· 504 return s->node[node]; 505 } 506 507 - /* Get the barn of the current cpu's memory node */ 508 static inline struct node_barn *get_barn(struct kmem_cache *s) 509 { 510 - return get_node(s, numa_mem_id())->barn; 511 } 512 513 /* ··· 4990 } 4991 4992 barn = get_barn(s); 4993 4994 full = barn_replace_empty_sheaf(barn, pcs->main); 4995 ··· 5165 if (unlikely(pcs->main->size == 0)) { 5166 5167 struct slab_sheaf *full; 5168 5169 if (pcs->spare && pcs->spare->size > 0) { 5170 swap(pcs->main, pcs->spare); 5171 goto do_alloc; 5172 } 5173 5174 - full = barn_replace_empty_sheaf(get_barn(s), pcs->main); 5175 5176 if (full) { 5177 stat(s, BARN_GET); ··· 5333 { 5334 struct slub_percpu_sheaves *pcs; 5335 struct slab_sheaf *sheaf = NULL; 5336 5337 if (unlikely(size > s->sheaf_capacity)) { 5338 ··· 5375 pcs->spare = NULL; 5376 stat(s, SHEAF_PREFILL_FAST); 5377 } else { 5378 stat(s, SHEAF_PREFILL_SLOW); 5379 - sheaf = barn_get_full_or_empty_sheaf(get_barn(s)); 5380 if (sheaf && sheaf->size) 5381 stat(s, BARN_GET); 5382 else ··· 5449 * If the barn has too many full sheaves or we fail to refill the sheaf, 5450 * simply flush and free it. 5451 */ 5452 - if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES || 5453 refill_sheaf(s, sheaf, gfp)) { 5454 sheaf_flush_unused(s, sheaf); 5455 free_empty_sheaf(s, sheaf); ··· 5966 * put the full sheaf there. 5967 */ 5968 static void __pcs_install_empty_sheaf(struct kmem_cache *s, 5969 - struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty) 5970 { 5971 - struct node_barn *barn; 5972 - 5973 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 5974 5975 /* This is what we expect to find if nobody interrupted us. */ ··· 5977 pcs->main = empty; 5978 return; 5979 } 5980 - 5981 - barn = get_barn(s); 5982 5983 /* 5984 * Unlikely because if the main sheaf had space, we would have just ··· 6022 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 6023 6024 barn = get_barn(s); 6025 put_fail = false; 6026 6027 if (!pcs->spare) { ··· 6109 } 6110 6111 pcs = this_cpu_ptr(s->cpu_sheaves); 6112 - __pcs_install_empty_sheaf(s, pcs, empty); 6113 6114 return pcs; 6115 } ··· 6146 6147 static void rcu_free_sheaf(struct rcu_head *head) 6148 { 6149 struct slab_sheaf *sheaf; 6150 - struct node_barn *barn; 6151 struct kmem_cache *s; 6152 6153 sheaf = container_of(head, struct slab_sheaf, rcu_head); ··· 6165 */ 6166 __rcu_free_sheaf_prepare(s, sheaf); 6167 6168 - barn = get_node(s, sheaf->node)->barn; 6169 6170 /* due to slab_free_hook() */ 6171 if (unlikely(sheaf->size == 0)) ··· 6187 return; 6188 } 6189 6190 stat(s, BARN_PUT_FAIL); 6191 sheaf_flush_unused(s, sheaf); 6192 6193 empty: 6194 - if (data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) { 6195 barn_put_empty_sheaf(barn, sheaf); 6196 return; 6197 } ··· 6222 } 6223 6224 barn = get_barn(s); 6225 6226 empty = barn_get_empty_sheaf(barn); 6227 ··· 6339 goto do_free; 6340 6341 barn = get_barn(s); 6342 6343 if (!pcs->spare) { 6344 empty = barn_get_empty_sheaf(barn);
··· 504 return s->node[node]; 505 } 506 507 + /* 508 + * Get the barn of the current cpu's closest memory node. It may not exist on 509 + * systems with memoryless nodes but without CONFIG_HAVE_MEMORYLESS_NODES 510 + */ 511 static inline struct node_barn *get_barn(struct kmem_cache *s) 512 { 513 + struct kmem_cache_node *n = get_node(s, numa_mem_id()); 514 + 515 + if (!n) 516 + return NULL; 517 + 518 + return n->barn; 519 } 520 521 /* ··· 4982 } 4983 4984 barn = get_barn(s); 4985 + if (!barn) { 4986 + local_unlock(&s->cpu_sheaves->lock); 4987 + return NULL; 4988 + } 4989 4990 full = barn_replace_empty_sheaf(barn, pcs->main); 4991 ··· 5153 if (unlikely(pcs->main->size == 0)) { 5154 5155 struct slab_sheaf *full; 5156 + struct node_barn *barn; 5157 5158 if (pcs->spare && pcs->spare->size > 0) { 5159 swap(pcs->main, pcs->spare); 5160 goto do_alloc; 5161 } 5162 5163 + barn = get_barn(s); 5164 + if (!barn) { 5165 + local_unlock(&s->cpu_sheaves->lock); 5166 + return allocated; 5167 + } 5168 + 5169 + full = barn_replace_empty_sheaf(barn, pcs->main); 5170 5171 if (full) { 5172 stat(s, BARN_GET); ··· 5314 { 5315 struct slub_percpu_sheaves *pcs; 5316 struct slab_sheaf *sheaf = NULL; 5317 + struct node_barn *barn; 5318 5319 if (unlikely(size > s->sheaf_capacity)) { 5320 ··· 5355 pcs->spare = NULL; 5356 stat(s, SHEAF_PREFILL_FAST); 5357 } else { 5358 + barn = get_barn(s); 5359 + 5360 stat(s, SHEAF_PREFILL_SLOW); 5361 + if (barn) 5362 + sheaf = barn_get_full_or_empty_sheaf(barn); 5363 if (sheaf && sheaf->size) 5364 stat(s, BARN_GET); 5365 else ··· 5426 * If the barn has too many full sheaves or we fail to refill the sheaf, 5427 * simply flush and free it. 5428 */ 5429 + if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES || 5430 refill_sheaf(s, sheaf, gfp)) { 5431 sheaf_flush_unused(s, sheaf); 5432 free_empty_sheaf(s, sheaf); ··· 5943 * put the full sheaf there. 5944 */ 5945 static void __pcs_install_empty_sheaf(struct kmem_cache *s, 5946 + struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty, 5947 + struct node_barn *barn) 5948 { 5949 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 5950 5951 /* This is what we expect to find if nobody interrupted us. */ ··· 5955 pcs->main = empty; 5956 return; 5957 } 5958 5959 /* 5960 * Unlikely because if the main sheaf had space, we would have just ··· 6002 lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock)); 6003 6004 barn = get_barn(s); 6005 + if (!barn) { 6006 + local_unlock(&s->cpu_sheaves->lock); 6007 + return NULL; 6008 + } 6009 + 6010 put_fail = false; 6011 6012 if (!pcs->spare) { ··· 6084 } 6085 6086 pcs = this_cpu_ptr(s->cpu_sheaves); 6087 + __pcs_install_empty_sheaf(s, pcs, empty, barn); 6088 6089 return pcs; 6090 } ··· 6121 6122 static void rcu_free_sheaf(struct rcu_head *head) 6123 { 6124 + struct kmem_cache_node *n; 6125 struct slab_sheaf *sheaf; 6126 + struct node_barn *barn = NULL; 6127 struct kmem_cache *s; 6128 6129 sheaf = container_of(head, struct slab_sheaf, rcu_head); ··· 6139 */ 6140 __rcu_free_sheaf_prepare(s, sheaf); 6141 6142 + n = get_node(s, sheaf->node); 6143 + if (!n) 6144 + goto flush; 6145 + 6146 + barn = n->barn; 6147 6148 /* due to slab_free_hook() */ 6149 if (unlikely(sheaf->size == 0)) ··· 6157 return; 6158 } 6159 6160 + flush: 6161 stat(s, BARN_PUT_FAIL); 6162 sheaf_flush_unused(s, sheaf); 6163 6164 empty: 6165 + if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) { 6166 barn_put_empty_sheaf(barn, sheaf); 6167 return; 6168 } ··· 6191 } 6192 6193 barn = get_barn(s); 6194 + if (!barn) { 6195 + local_unlock(&s->cpu_sheaves->lock); 6196 + goto fail; 6197 + } 6198 6199 empty = barn_get_empty_sheaf(barn); 6200 ··· 6304 goto do_free; 6305 6306 barn = get_barn(s); 6307 + if (!barn) 6308 + goto no_empty; 6309 6310 if (!pcs->spare) { 6311 empty = barn_get_empty_sheaf(barn);