Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/32s: Cleanup around PTE_FLAGS_OFFSET in hash_low.S

PTE_FLAGS_OFFSET is defined in asm/page_32.h and used only
in hash_low.S

And PTE_FLAGS_OFFSET nullity depends on CONFIG_PTE_64BIT

Instead of tests like #if (PTE_FLAGS_OFFSET != 0), use
CONFIG_PTE_64BIT related code.

Also move the definition of PTE_FLAGS_OFFSET into hash_low.S
directly, that improves readability.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f5bc21db7a33dab55924734e6060c2e9daed562e.1606247495.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
da481c4f fec6166b

+13 -16
-6
arch/powerpc/include/asm/page_32.h
··· 16 16 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 17 17 #endif 18 18 19 - #ifdef CONFIG_PTE_64BIT 20 - #define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ 21 - #else 22 - #define PTE_FLAGS_OFFSET 0 23 - #endif 24 - 25 19 #if defined(CONFIG_PPC_256K_PAGES) || \ 26 20 (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)) 27 21 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
+13 -10
arch/powerpc/mm/book3s32/hash_low.S
··· 26 26 #include <asm/feature-fixups.h> 27 27 #include <asm/code-patching-asm.h> 28 28 29 + #ifdef CONFIG_PTE_64BIT 30 + #define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ 31 + #else 32 + #define PTE_FLAGS_OFFSET 0 33 + #endif 34 + 29 35 /* 30 36 * Load a PTE into the hash table, if possible. 31 37 * The address is in r4, and r3 contains an access flag: ··· 94 88 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ 95 89 #else 96 90 rlwimi r8,r4,23,20,28 /* compute pte address */ 91 + /* 92 + * If PTE_64BIT is set, the low word is the flags word; use that 93 + * word for locking since it contains all the interesting bits. 94 + */ 95 + addi r8,r8,PTE_FLAGS_OFFSET 97 96 #endif 98 97 99 98 /* ··· 106 95 * because almost always, there won't be a permission violation 107 96 * and there won't already be an HPTE, and thus we will have 108 97 * to update the PTE to set _PAGE_HASHPTE. -- paulus. 109 - * 110 - * If PTE_64BIT is set, the low word is the flags word; use that 111 - * word for locking since it contains all the interesting bits. 112 98 */ 113 - #if (PTE_FLAGS_OFFSET != 0) 114 - addi r8,r8,PTE_FLAGS_OFFSET 115 - #endif 116 99 .Lretry: 117 100 lwarx r6,0,r8 /* get linux-style pte, flag word */ 118 101 #ifdef CONFIG_PPC_KUAP ··· 494 489 rlwimi r5,r4,22,20,29 495 490 #else 496 491 rlwimi r5,r4,23,20,28 492 + addi r5,r5,PTE_FLAGS_OFFSET 497 493 #endif 498 - 1: lwz r0,PTE_FLAGS_OFFSET(r5) 494 + 1: lwz r0,0(r5) 499 495 cmpwi cr1,r6,1 500 496 andi. r0,r0,_PAGE_HASHPTE 501 497 bne 2f ··· 540 534 * already clear, we're done (for this pte). If not, 541 535 * clear it (atomically) and proceed. -- paulus. 542 536 */ 543 - #if (PTE_FLAGS_OFFSET != 0) 544 - addi r5,r5,PTE_FLAGS_OFFSET 545 - #endif 546 537 33: lwarx r8,0,r5 /* fetch the pte flags word */ 547 538 andi. r0,r8,_PAGE_HASHPTE 548 539 beq 8f /* done if HASHPTE is already clear */