Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

book3s64/hash: Make kernel_map_linear_page() generic

Currently kernel_map_linear_page() function assumes to be working on
linear_map_hash_slots array. But since in later patches we need a
separate linear map array for kfence, hence make
kernel_map_linear_page() take a linear map array and lock in it's
function argument.

This is needed to separate out kfence from debug_pagealloc
infrastructure.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/5b67df7b29e68d7c78d6fc1f42d41137299bac6b.1729271995.git.ritesh.list@gmail.com

authored by

Ritesh Harjani (IBM) and committed by
Michael Ellerman
685d942d 43919f41

+25 -22
+25 -22
arch/powerpc/mm/book3s64/hash_utils.c
··· 272 272 } 273 273 274 274 #ifdef CONFIG_DEBUG_PAGEALLOC 275 - static u8 *linear_map_hash_slots; 276 - static unsigned long linear_map_hash_count; 277 - static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); 278 - 279 - static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) 275 + static void kernel_map_linear_page(unsigned long vaddr, unsigned long idx, 276 + u8 *slots, raw_spinlock_t *lock) 280 277 { 281 278 unsigned long hash; 282 279 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); ··· 287 290 if (!vsid) 288 291 return; 289 292 290 - if (linear_map_hash_slots[lmi] & 0x80) 293 + if (slots[idx] & 0x80) 291 294 return; 292 295 293 296 ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, ··· 295 298 mmu_linear_psize, mmu_kernel_ssize); 296 299 297 300 BUG_ON (ret < 0); 298 - raw_spin_lock(&linear_map_hash_lock); 299 - BUG_ON(linear_map_hash_slots[lmi] & 0x80); 300 - linear_map_hash_slots[lmi] = ret | 0x80; 301 - raw_spin_unlock(&linear_map_hash_lock); 301 + raw_spin_lock(lock); 302 + BUG_ON(slots[idx] & 0x80); 303 + slots[idx] = ret | 0x80; 304 + raw_spin_unlock(lock); 302 305 } 303 306 304 - static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) 307 + static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx, 308 + u8 *slots, raw_spinlock_t *lock) 305 309 { 306 - unsigned long hash, hidx, slot; 310 + unsigned long hash, hslot, slot; 307 311 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 308 312 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); 309 313 310 314 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 311 - raw_spin_lock(&linear_map_hash_lock); 312 - if (!(linear_map_hash_slots[lmi] & 0x80)) { 313 - raw_spin_unlock(&linear_map_hash_lock); 315 + raw_spin_lock(lock); 316 + if (!(slots[idx] & 0x80)) { 317 + raw_spin_unlock(lock); 314 318 return; 315 319 } 316 - hidx = linear_map_hash_slots[lmi] & 0x7f; 317 - linear_map_hash_slots[lmi] = 0; 318 - raw_spin_unlock(&linear_map_hash_lock); 319 - if (hidx & _PTEIDX_SECONDARY) 320 + hslot = slots[idx] & 0x7f; 321 + slots[idx] = 0; 322 + raw_spin_unlock(lock); 323 + if (hslot & _PTEIDX_SECONDARY) 320 324 hash = ~hash; 321 325 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 322 - slot += hidx & _PTEIDX_GROUP_IX; 326 + slot += hslot & _PTEIDX_GROUP_IX; 323 327 mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize, 324 328 mmu_linear_psize, 325 329 mmu_kernel_ssize, 0); 326 330 } 327 331 332 + static u8 *linear_map_hash_slots; 333 + static unsigned long linear_map_hash_count; 334 + static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); 328 335 static inline void hash_debug_pagealloc_alloc_slots(void) 329 336 { 330 337 if (!debug_pagealloc_enabled()) ··· 363 362 if (lmi >= linear_map_hash_count) 364 363 continue; 365 364 if (enable) 366 - kernel_map_linear_page(vaddr, lmi); 365 + kernel_map_linear_page(vaddr, lmi, 366 + linear_map_hash_slots, &linear_map_hash_lock); 367 367 else 368 - kernel_unmap_linear_page(vaddr, lmi); 368 + kernel_unmap_linear_page(vaddr, lmi, 369 + linear_map_hash_slots, &linear_map_hash_lock); 369 370 } 370 371 local_irq_restore(flags); 371 372 return 0;