Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm: Convert virtual address to vpn

This patch convert different functions to take virtual page number
instead of virtual address. Virtual page number is virtual address
shifted right by VPN_SHIFT (12) bits. This enable us to have an
address range of upto 76 bits.

Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Aneesh Kumar K.V and committed by
Benjamin Herrenschmidt
5524a27d dcda287a

+324 -240
+1 -1
arch/powerpc/include/asm/kvm_book3s.h
··· 59 59 struct hlist_node list_vpte; 60 60 struct hlist_node list_vpte_long; 61 61 struct rcu_head rcu_head; 62 - u64 host_va; 62 + u64 host_vpn; 63 63 u64 pfn; 64 64 ulong slot; 65 65 struct kvmppc_pte pte;
+3 -3
arch/powerpc/include/asm/machdep.h
··· 34 34 char *name; 35 35 #ifdef CONFIG_PPC64 36 36 void (*hpte_invalidate)(unsigned long slot, 37 - unsigned long va, 37 + unsigned long vpn, 38 38 int psize, int ssize, 39 39 int local); 40 40 long (*hpte_updatepp)(unsigned long slot, 41 41 unsigned long newpp, 42 - unsigned long va, 42 + unsigned long vpn, 43 43 int psize, int ssize, 44 44 int local); 45 45 void (*hpte_updateboltedpp)(unsigned long newpp, 46 46 unsigned long ea, 47 47 int psize, int ssize); 48 48 long (*hpte_insert)(unsigned long hpte_group, 49 - unsigned long va, 49 + unsigned long vpn, 50 50 unsigned long prpn, 51 51 unsigned long rflags, 52 52 unsigned long vflags,
+61 -17
arch/powerpc/include/asm/mmu-hash64.h
··· 154 154 #define MMU_SEGSIZE_256M 0 155 155 #define MMU_SEGSIZE_1T 1 156 156 157 + /* 158 + * encode page number shift. 159 + * in order to fit the 78 bit va in a 64 bit variable we shift the va by 160 + * 12 bits. This enable us to address upto 76 bit va. 161 + * For hpt hash from a va we can ignore the page size bits of va and for 162 + * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure 163 + * we work in all cases including 4k page size. 164 + */ 165 + #define VPN_SHIFT 12 157 166 158 167 #ifndef __ASSEMBLY__ 168 + 169 + static inline int segment_shift(int ssize) 170 + { 171 + if (ssize == MMU_SEGSIZE_256M) 172 + return SID_SHIFT; 173 + return SID_SHIFT_1T; 174 + } 159 175 160 176 /* 161 177 * The current system page and segment sizes ··· 196 180 extern int mmu_ci_restrictions; 197 181 198 182 /* 183 + * This computes the AVPN and B fields of the first dword of a HPTE, 184 + * for use when we want to match an existing PTE. The bottom 7 bits 185 + * of the returned value are zero. 186 + */ 187 + static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, 188 + int ssize) 189 + { 190 + unsigned long v; 191 + /* 192 + * The AVA field omits the low-order 23 bits of the 78 bits VA. 193 + * These bits are not needed in the PTE, because the 194 + * low-order b of these bits are part of the byte offset 195 + * into the virtual page and, if b < 23, the high-order 196 + * 23-b of these bits are always used in selecting the 197 + * PTEGs to be searched 198 + */ 199 + v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); 200 + v <<= HPTE_V_AVPN_SHIFT; 201 + v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; 202 + return v; 203 + } 204 + 205 + /* 199 206 * This function sets the AVPN and L fields of the HPTE appropriately 200 207 * for the page size 201 208 */ 202 - static inline unsigned long hpte_encode_v(unsigned long va, int psize, 203 - int ssize) 209 + static inline unsigned long hpte_encode_v(unsigned long vpn, 210 + int psize, int ssize) 204 211 { 205 212 unsigned long v; 206 - v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); 207 - v <<= HPTE_V_AVPN_SHIFT; 213 + v = hpte_encode_avpn(vpn, psize, ssize); 208 214 if (psize != MMU_PAGE_4K) 209 215 v |= HPTE_V_LARGE; 210 - v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; 211 216 return v; 212 217 } 213 218 ··· 253 216 } 254 217 255 218 /* 256 - * Build a VA given VSID, EA and segment size 219 + * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. 257 220 */ 258 - static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid, 259 - int ssize) 221 + static inline unsigned long hpt_vpn(unsigned long ea, 222 + unsigned long vsid, int ssize) 260 223 { 261 - if (ssize == MMU_SEGSIZE_256M) 262 - return (vsid << 28) | (ea & 0xfffffffUL); 263 - return (vsid << 40) | (ea & 0xffffffffffUL); 224 + unsigned long mask; 225 + int s_shift = segment_shift(ssize); 226 + 227 + mask = (1ul << (s_shift - VPN_SHIFT)) - 1; 228 + return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); 264 229 } 265 230 266 231 /* 267 232 * This hashes a virtual address 268 233 */ 269 - 270 - static inline unsigned long hpt_hash(unsigned long va, unsigned int shift, 271 - int ssize) 234 + static inline unsigned long hpt_hash(unsigned long vpn, 235 + unsigned int shift, int ssize) 272 236 { 237 + int mask; 273 238 unsigned long hash, vsid; 274 239 240 + /* VPN_SHIFT can be atmost 12 */ 275 241 if (ssize == MMU_SEGSIZE_256M) { 276 - hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift); 242 + mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; 243 + hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ 244 + ((vpn & mask) >> (shift - VPN_SHIFT)); 277 245 } else { 278 - vsid = va >> 40; 279 - hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift); 246 + mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; 247 + vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); 248 + hash = vsid ^ (vsid << 25) ^ 249 + ((vpn & mask) >> (shift - VPN_SHIFT)) ; 280 250 } 281 251 return hash & 0x7fffffffffUL; 282 252 }
+10 -8
arch/powerpc/include/asm/pte-hash64-64k.h
··· 58 58 /* Trick: we set __end to va + 64k, which happens works for 59 59 * a 16M page as well as we want only one iteration 60 60 */ 61 - #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 62 - do { \ 63 - unsigned long __end = va + PAGE_SIZE; \ 64 - unsigned __split = (psize == MMU_PAGE_4K || \ 65 - psize == MMU_PAGE_64K_AP); \ 66 - shift = mmu_psize_defs[psize].shift; \ 67 - for (index = 0; va < __end; index++, va += (1L << shift)) { \ 68 - if (!__split || __rpte_sub_valid(rpte, index)) do { \ 61 + #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ 62 + do { \ 63 + unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ 64 + unsigned __split = (psize == MMU_PAGE_4K || \ 65 + psize == MMU_PAGE_64K_AP); \ 66 + shift = mmu_psize_defs[psize].shift; \ 67 + for (index = 0; vpn < __end; index++, \ 68 + vpn += (1L << (shift - VPN_SHIFT))) { \ 69 + if (!__split || __rpte_sub_valid(rpte, index)) \ 70 + do { 69 71 70 72 #define pte_iterate_hashed_end() } while(0); } } while(0) 71 73
+2 -2
arch/powerpc/include/asm/tlbflush.h
··· 95 95 unsigned long index; 96 96 struct mm_struct *mm; 97 97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 98 - unsigned long vaddr[PPC64_TLB_BATCH_NR]; 98 + unsigned long vpn[PPC64_TLB_BATCH_NR]; 99 99 unsigned int psize; 100 100 int ssize; 101 101 }; ··· 127 127 #define arch_flush_lazy_mmu_mode() do {} while (0) 128 128 129 129 130 - extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 130 + extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, 131 131 int ssize, int local); 132 132 extern void flush_hash_range(unsigned long number, int local); 133 133
+4 -4
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 141 141 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 142 142 { 143 143 pfn_t hpaddr; 144 - u64 va; 144 + u64 vpn; 145 145 u64 vsid; 146 146 struct kvmppc_sid_map *map; 147 147 volatile u32 *pteg; ··· 173 173 BUG_ON(!map); 174 174 175 175 vsid = map->host_vsid; 176 - va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); 176 + vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT) 177 177 178 178 next_pteg: 179 179 if (rr == 16) { ··· 244 244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 245 245 orig_pte->may_write ? 'w' : '-', 246 246 orig_pte->may_execute ? 'x' : '-', 247 - orig_pte->eaddr, (ulong)pteg, va, 247 + orig_pte->eaddr, (ulong)pteg, vpn, 248 248 orig_pte->vpage, hpaddr); 249 249 250 250 pte->slot = (ulong)&pteg[rr]; 251 - pte->host_va = va; 251 + pte->host_vpn = vpn; 252 252 pte->pte = *orig_pte; 253 253 pte->pfn = hpaddr >> PAGE_SHIFT; 254 254
+10 -7
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 33 33 34 34 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 35 35 { 36 - ppc_md.hpte_invalidate(pte->slot, pte->host_va, 36 + ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, 37 37 MMU_PAGE_4K, MMU_SEGSIZE_256M, 38 38 false); 39 39 } ··· 80 80 81 81 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 82 82 { 83 + unsigned long vpn; 83 84 pfn_t hpaddr; 84 - ulong hash, hpteg, va; 85 + ulong hash, hpteg; 85 86 u64 vsid; 86 87 int ret; 87 88 int rflags = 0x192; ··· 118 117 } 119 118 120 119 vsid = map->host_vsid; 121 - va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 120 + vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 122 121 123 122 if (!orig_pte->may_write) 124 123 rflags |= HPTE_R_PP; ··· 130 129 else 131 130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 132 131 133 - hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 132 + hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); 134 133 135 134 map_again: 136 135 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); ··· 142 141 goto out; 143 142 } 144 143 145 - ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); 144 + ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, 145 + MMU_PAGE_4K, MMU_SEGSIZE_256M); 146 146 147 147 if (ret < 0) { 148 148 /* If we couldn't map a primary PTE, try a secondary */ ··· 154 152 } else { 155 153 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); 156 154 157 - trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte); 155 + trace_kvm_book3s_64_mmu_map(rflags, hpteg, 156 + vpn, hpaddr, orig_pte); 158 157 159 158 /* The ppc_md code may give us a secondary entry even though we 160 159 asked for a primary. Fix up. */ ··· 165 162 } 166 163 167 164 pte->slot = hpteg + (ret & 7); 168 - pte->host_va = va; 165 + pte->host_vpn = vpn; 169 166 pte->pte = *orig_pte; 170 167 pte->pfn = hpaddr >> PAGE_SHIFT; 171 168
+7 -7
arch/powerpc/kvm/trace.h
··· 189 189 TP_ARGS(pte), 190 190 191 191 TP_STRUCT__entry( 192 - __field( u64, host_va ) 192 + __field( u64, host_vpn ) 193 193 __field( u64, pfn ) 194 194 __field( ulong, eaddr ) 195 195 __field( u64, vpage ) ··· 198 198 ), 199 199 200 200 TP_fast_assign( 201 - __entry->host_va = pte->host_va; 201 + __entry->host_vpn = pte->host_vpn; 202 202 __entry->pfn = pte->pfn; 203 203 __entry->eaddr = pte->pte.eaddr; 204 204 __entry->vpage = pte->pte.vpage; ··· 208 208 (pte->pte.may_execute ? 0x1 : 0); 209 209 ), 210 210 211 - TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 212 - __entry->host_va, __entry->pfn, __entry->eaddr, 211 + TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 212 + __entry->host_vpn, __entry->pfn, __entry->eaddr, 213 213 __entry->vpage, __entry->raddr, __entry->flags) 214 214 ); 215 215 ··· 218 218 TP_ARGS(pte), 219 219 220 220 TP_STRUCT__entry( 221 - __field( u64, host_va ) 221 + __field( u64, host_vpn ) 222 222 __field( u64, pfn ) 223 223 __field( ulong, eaddr ) 224 224 __field( u64, vpage ) ··· 227 227 ), 228 228 229 229 TP_fast_assign( 230 - __entry->host_va = pte->host_va; 230 + __entry->host_vpn = pte->host_vpn; 231 231 __entry->pfn = pte->pfn; 232 232 __entry->eaddr = pte->pte.eaddr; 233 233 __entry->vpage = pte->pte.vpage; ··· 238 238 ), 239 239 240 240 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 241 - __entry->host_va, __entry->pfn, __entry->eaddr, 241 + __entry->host_vpn, __entry->pfn, __entry->eaddr, 242 242 __entry->vpage, __entry->raddr, __entry->flags) 243 243 ); 244 244
+60 -37
arch/powerpc/mm/hash_low_64.S
··· 63 63 /* Save non-volatile registers. 64 64 * r31 will hold "old PTE" 65 65 * r30 is "new PTE" 66 - * r29 is "va" 66 + * r29 is vpn 67 67 * r28 is a hash value 68 68 * r27 is hashtab mask (maybe dynamic patched instead ?) 69 69 */ ··· 111 111 cmpdi r9,0 /* check segment size */ 112 112 bne 3f 113 113 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 114 - /* Calc va and put it in r29 */ 115 - rldicr r29,r5,28,63-28 116 - rldicl r3,r3,0,36 117 - or r29,r3,r29 114 + /* Calc vpn and put it in r29 */ 115 + sldi r29,r5,SID_SHIFT - VPN_SHIFT 116 + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 117 + or r29,r28,r29 118 118 119 119 /* Calculate hash value for primary slot and store it in r28 */ 120 120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ ··· 122 122 xor r28,r5,r0 123 123 b 4f 124 124 125 - 3: /* Calc VA and hash in r29 and r28 for 1T segment */ 126 - sldi r29,r5,40 /* vsid << 40 */ 127 - clrldi r3,r3,24 /* ea & 0xffffffffff */ 125 + 3: /* Calc vpn and put it in r29 */ 126 + sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT 127 + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) 128 + or r29,r28,r29 129 + 130 + /* 131 + * calculate hash value for primary slot and 132 + * store it in r28 for 1T segment 133 + */ 128 134 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 129 135 clrldi r5,r5,40 /* vsid & 0xffffff */ 130 136 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 131 137 xor r28,r28,r5 132 - or r29,r3,r29 /* VA */ 133 138 xor r28,r28,r0 /* hash */ 134 139 135 140 /* Convert linux PTE bits into HW equivalents */ ··· 190 185 191 186 /* Call ppc_md.hpte_insert */ 192 187 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 193 - mr r4,r29 /* Retrieve va */ 188 + mr r4,r29 /* Retrieve vpn */ 194 189 li r7,0 /* !bolted, !secondary */ 195 190 li r8,MMU_PAGE_4K /* page size */ 196 191 ld r9,STK_PARAM(R9)(r1) /* segment size */ ··· 213 208 214 209 /* Call ppc_md.hpte_insert */ 215 210 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 216 - mr r4,r29 /* Retrieve va */ 211 + mr r4,r29 /* Retrieve vpn */ 217 212 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 218 213 li r8,MMU_PAGE_4K /* page size */ 219 214 ld r9,STK_PARAM(R9)(r1) /* segment size */ ··· 283 278 add r3,r0,r3 /* add slot idx */ 284 279 285 280 /* Call ppc_md.hpte_updatepp */ 286 - mr r5,r29 /* va */ 281 + mr r5,r29 /* vpn */ 287 282 li r6,MMU_PAGE_4K /* page size */ 288 283 ld r7,STK_PARAM(R9)(r1) /* segment size */ 289 284 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ ··· 344 339 /* Save non-volatile registers. 345 340 * r31 will hold "old PTE" 346 341 * r30 is "new PTE" 347 - * r29 is "va" 342 + * r29 is vpn 348 343 * r28 is a hash value 349 344 * r27 is hashtab mask (maybe dynamic patched instead ?) 350 345 * r26 is the hidx mask ··· 399 394 cmpdi r9,0 /* check segment size */ 400 395 bne 3f 401 396 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 402 - /* Calc va and put it in r29 */ 403 - rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 404 - rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 405 - or r29,r3,r29 /* r29 = va */ 397 + /* Calc vpn and put it in r29 */ 398 + sldi r29,r5,SID_SHIFT - VPN_SHIFT 399 + /* 400 + * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff 401 + * srdi r28,r3,VPN_SHIFT 402 + */ 403 + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 404 + or r29,r28,r29 406 405 407 406 /* Calculate hash value for primary slot and store it in r28 */ 408 407 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ ··· 414 405 xor r28,r5,r0 415 406 b 4f 416 407 417 - 3: /* Calc VA and hash in r29 and r28 for 1T segment */ 418 - sldi r29,r5,40 /* vsid << 40 */ 419 - clrldi r3,r3,24 /* ea & 0xffffffffff */ 408 + 3: /* Calc vpn and put it in r29 */ 409 + sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT 410 + /* 411 + * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff 412 + * srdi r28,r3,VPN_SHIFT 413 + */ 414 + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) 415 + or r29,r28,r29 416 + 417 + /* 418 + * Calculate hash value for primary slot and 419 + * store it in r28 for 1T segment 420 + */ 420 421 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 421 422 clrldi r5,r5,40 /* vsid & 0xffffff */ 422 423 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 423 424 xor r28,r28,r5 424 - or r29,r3,r29 /* VA */ 425 425 xor r28,r28,r0 /* hash */ 426 426 427 427 /* Convert linux PTE bits into HW equivalents */ ··· 506 488 507 489 /* Call ppc_md.hpte_insert */ 508 490 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 509 - mr r4,r29 /* Retrieve va */ 491 + mr r4,r29 /* Retrieve vpn */ 510 492 li r7,0 /* !bolted, !secondary */ 511 493 li r8,MMU_PAGE_4K /* page size */ 512 494 ld r9,STK_PARAM(R9)(r1) /* segment size */ ··· 533 515 534 516 /* Call ppc_md.hpte_insert */ 535 517 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 536 - mr r4,r29 /* Retrieve va */ 518 + mr r4,r29 /* Retrieve vpn */ 537 519 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 538 520 li r8,MMU_PAGE_4K /* page size */ 539 521 ld r9,STK_PARAM(R9)(r1) /* segment size */ ··· 565 547 * useless now that the segment has been switched to 4k pages. 566 548 */ 567 549 htab_inval_old_hpte: 568 - mr r3,r29 /* virtual addr */ 550 + mr r3,r29 /* vpn */ 569 551 mr r4,r31 /* PTE.pte */ 570 552 li r5,0 /* PTE.hidx */ 571 553 li r6,MMU_PAGE_64K /* psize */ ··· 638 620 add r3,r0,r3 /* add slot idx */ 639 621 640 622 /* Call ppc_md.hpte_updatepp */ 641 - mr r5,r29 /* va */ 623 + mr r5,r29 /* vpn */ 642 624 li r6,MMU_PAGE_4K /* page size */ 643 625 ld r7,STK_PARAM(R9)(r1) /* segment size */ 644 626 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ ··· 694 676 /* Save non-volatile registers. 695 677 * r31 will hold "old PTE" 696 678 * r30 is "new PTE" 697 - * r29 is "va" 679 + * r29 is vpn 698 680 * r28 is a hash value 699 681 * r27 is hashtab mask (maybe dynamic patched instead ?) 700 682 */ ··· 747 729 cmpdi r9,0 /* check segment size */ 748 730 bne 3f 749 731 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 750 - /* Calc va and put it in r29 */ 751 - rldicr r29,r5,28,63-28 752 - rldicl r3,r3,0,36 753 - or r29,r3,r29 732 + /* Calc vpn and put it in r29 */ 733 + sldi r29,r5,SID_SHIFT - VPN_SHIFT 734 + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 735 + or r29,r28,r29 754 736 755 737 /* Calculate hash value for primary slot and store it in r28 */ 756 738 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ ··· 758 740 xor r28,r5,r0 759 741 b 4f 760 742 761 - 3: /* Calc VA and hash in r29 and r28 for 1T segment */ 762 - sldi r29,r5,40 /* vsid << 40 */ 763 - clrldi r3,r3,24 /* ea & 0xffffffffff */ 743 + 3: /* Calc vpn and put it in r29 */ 744 + sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT 745 + rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) 746 + or r29,r28,r29 747 + 748 + /* 749 + * calculate hash value for primary slot and 750 + * store it in r28 for 1T segment 751 + */ 764 752 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 765 753 clrldi r5,r5,40 /* vsid & 0xffffff */ 766 754 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 767 755 xor r28,r28,r5 768 - or r29,r3,r29 /* VA */ 769 756 xor r28,r28,r0 /* hash */ 770 757 771 758 /* Convert linux PTE bits into HW equivalents */ ··· 829 806 830 807 /* Call ppc_md.hpte_insert */ 831 808 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 832 - mr r4,r29 /* Retrieve va */ 809 + mr r4,r29 /* Retrieve vpn */ 833 810 li r7,0 /* !bolted, !secondary */ 834 811 li r8,MMU_PAGE_64K 835 812 ld r9,STK_PARAM(R9)(r1) /* segment size */ ··· 852 829 853 830 /* Call ppc_md.hpte_insert */ 854 831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 855 - mr r4,r29 /* Retrieve va */ 832 + mr r4,r29 /* Retrieve vpn */ 856 833 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 857 834 li r8,MMU_PAGE_64K 858 835 ld r9,STK_PARAM(R9)(r1) /* segment size */ ··· 922 899 add r3,r0,r3 /* add slot idx */ 923 900 924 901 /* Call ppc_md.hpte_updatepp */ 925 - mr r5,r29 /* va */ 902 + mr r5,r29 /* vpn */ 926 903 li r6,MMU_PAGE_64K 927 904 ld r7,STK_PARAM(R9)(r1) /* segment size */ 928 905 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
+72 -49
arch/powerpc/mm/hash_native_64.c
··· 39 39 40 40 DEFINE_RAW_SPINLOCK(native_tlbie_lock); 41 41 42 - static inline void __tlbie(unsigned long va, int psize, int ssize) 42 + static inline void __tlbie(unsigned long vpn, int psize, int ssize) 43 43 { 44 + unsigned long va; 44 45 unsigned int penc; 45 46 46 - /* clear top 16 bits, non SLS segment */ 47 + /* 48 + * We need 14 to 65 bits of va for a tlibe of 4K page 49 + * With vpn we ignore the lower VPN_SHIFT bits already. 50 + * And top two bits are already ignored because we can 51 + * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT 52 + * of 12. 53 + */ 54 + va = vpn << VPN_SHIFT; 55 + /* 56 + * clear top 16 bits of 64bit va, non SLS segment 57 + * Older versions of the architecture (2.02 and earler) require the 58 + * masking of the top 16 bits. 59 + */ 47 60 va &= ~(0xffffULL << 48); 48 61 49 62 switch (psize) { 50 63 case MMU_PAGE_4K: 51 - va &= ~0xffful; 52 64 va |= ssize << 8; 53 65 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 54 66 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 55 67 : "memory"); 56 68 break; 57 69 default: 70 + /* We need 14 to 14 + i bits of va */ 58 71 penc = mmu_psize_defs[psize].penc; 59 72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 60 73 va |= penc << 12; ··· 80 67 } 81 68 } 82 69 83 - static inline void __tlbiel(unsigned long va, int psize, int ssize) 70 + static inline void __tlbiel(unsigned long vpn, int psize, int ssize) 84 71 { 72 + unsigned long va; 85 73 unsigned int penc; 86 74 87 - /* clear top 16 bits, non SLS segment */ 75 + /* VPN_SHIFT can be atmost 12 */ 76 + va = vpn << VPN_SHIFT; 77 + /* 78 + * clear top 16 bits of 64 bit va, non SLS segment 79 + * Older versions of the architecture (2.02 and earler) require the 80 + * masking of the top 16 bits. 81 + */ 88 82 va &= ~(0xffffULL << 48); 89 83 90 84 switch (psize) { 91 85 case MMU_PAGE_4K: 92 - va &= ~0xffful; 93 86 va |= ssize << 8; 94 87 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 95 88 : : "r"(va) : "memory"); 96 89 break; 97 90 default: 91 + /* We need 14 to 14 + i bits of va */ 98 92 penc = mmu_psize_defs[psize].penc; 99 93 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 100 94 va |= penc << 12; ··· 114 94 115 95 } 116 96 117 - static inline void tlbie(unsigned long va, int psize, int ssize, int local) 97 + static inline void tlbie(unsigned long vpn, int psize, int ssize, int local) 118 98 { 119 99 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); 120 100 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); ··· 125 105 raw_spin_lock(&native_tlbie_lock); 126 106 asm volatile("ptesync": : :"memory"); 127 107 if (use_local) { 128 - __tlbiel(va, psize, ssize); 108 + __tlbiel(vpn, psize, ssize); 129 109 asm volatile("ptesync": : :"memory"); 130 110 } else { 131 - __tlbie(va, psize, ssize); 111 + __tlbie(vpn, psize, ssize); 132 112 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 133 113 } 134 114 if (lock_tlbie && !use_local) ··· 154 134 clear_bit_unlock(HPTE_LOCK_BIT, word); 155 135 } 156 136 157 - static long native_hpte_insert(unsigned long hpte_group, unsigned long va, 137 + static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, 158 138 unsigned long pa, unsigned long rflags, 159 139 unsigned long vflags, int psize, int ssize) 160 140 { ··· 163 143 int i; 164 144 165 145 if (!(vflags & HPTE_V_BOLTED)) { 166 - DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx," 146 + DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," 167 147 " rflags=%lx, vflags=%lx, psize=%d)\n", 168 - hpte_group, va, pa, rflags, vflags, psize); 148 + hpte_group, vpn, pa, rflags, vflags, psize); 169 149 } 170 150 171 151 for (i = 0; i < HPTES_PER_GROUP; i++) { ··· 183 163 if (i == HPTES_PER_GROUP) 184 164 return -1; 185 165 186 - hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 166 + hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; 187 167 hpte_r = hpte_encode_r(pa, psize) | rflags; 188 168 189 169 if (!(vflags & HPTE_V_BOLTED)) { ··· 245 225 } 246 226 247 227 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 248 - unsigned long va, int psize, int ssize, 228 + unsigned long vpn, int psize, int ssize, 249 229 int local) 250 230 { 251 231 struct hash_pte *hptep = htab_address + slot; 252 232 unsigned long hpte_v, want_v; 253 233 int ret = 0; 254 234 255 - want_v = hpte_encode_v(va, psize, ssize); 235 + want_v = hpte_encode_v(vpn, psize, ssize); 256 236 257 - DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", 258 - va, want_v & HPTE_V_AVPN, slot, newpp); 237 + DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", 238 + vpn, want_v & HPTE_V_AVPN, slot, newpp); 259 239 260 240 native_lock_hpte(hptep); 261 241 ··· 274 254 native_unlock_hpte(hptep); 275 255 276 256 /* Ensure it is out of the tlb too. */ 277 - tlbie(va, psize, ssize, local); 257 + tlbie(vpn, psize, ssize, local); 278 258 279 259 return ret; 280 260 } 281 261 282 - static long native_hpte_find(unsigned long va, int psize, int ssize) 262 + static long native_hpte_find(unsigned long vpn, int psize, int ssize) 283 263 { 284 264 struct hash_pte *hptep; 285 265 unsigned long hash; ··· 287 267 long slot; 288 268 unsigned long want_v, hpte_v; 289 269 290 - hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 291 - want_v = hpte_encode_v(va, psize, ssize); 270 + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); 271 + want_v = hpte_encode_v(vpn, psize, ssize); 292 272 293 273 /* Bolted mappings are only ever in the primary group */ 294 274 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; ··· 315 295 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 316 296 int psize, int ssize) 317 297 { 318 - unsigned long vsid, va; 298 + unsigned long vpn; 299 + unsigned long vsid; 319 300 long slot; 320 301 struct hash_pte *hptep; 321 302 322 303 vsid = get_kernel_vsid(ea, ssize); 323 - va = hpt_va(ea, vsid, ssize); 304 + vpn = hpt_vpn(ea, vsid, ssize); 324 305 325 - slot = native_hpte_find(va, psize, ssize); 306 + slot = native_hpte_find(vpn, psize, ssize); 326 307 if (slot == -1) 327 308 panic("could not find page to bolt\n"); 328 309 hptep = htab_address + slot; ··· 333 312 (newpp & (HPTE_R_PP | HPTE_R_N)); 334 313 335 314 /* Ensure it is out of the tlb too. */ 336 - tlbie(va, psize, ssize, 0); 315 + tlbie(vpn, psize, ssize, 0); 337 316 } 338 317 339 - static void native_hpte_invalidate(unsigned long slot, unsigned long va, 318 + static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, 340 319 int psize, int ssize, int local) 341 320 { 342 321 struct hash_pte *hptep = htab_address + slot; ··· 346 325 347 326 local_irq_save(flags); 348 327 349 - DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); 328 + DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); 350 329 351 - want_v = hpte_encode_v(va, psize, ssize); 330 + want_v = hpte_encode_v(vpn, psize, ssize); 352 331 native_lock_hpte(hptep); 353 332 hpte_v = hptep->v; 354 333 ··· 360 339 hptep->v = 0; 361 340 362 341 /* Invalidate the TLB */ 363 - tlbie(va, psize, ssize, local); 342 + tlbie(vpn, psize, ssize, local); 364 343 365 344 local_irq_restore(flags); 366 345 } ··· 370 349 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 371 350 372 351 static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 373 - int *psize, int *ssize, unsigned long *va) 352 + int *psize, int *ssize, unsigned long *vpn) 374 353 { 375 354 unsigned long avpn, pteg, vpi; 376 355 unsigned long hpte_r = hpte->r; ··· 420 399 vpi = (vsid ^ pteg) & htab_hash_mask; 421 400 seg_off |= vpi << shift; 422 401 } 423 - *va = vsid << SID_SHIFT | seg_off; 402 + *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; 424 403 case MMU_SEGSIZE_1T: 425 404 /* We only have 40 - 23 bits of seg_off in avpn */ 426 405 seg_off = (avpn & 0x1ffff) << 23; ··· 429 408 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; 430 409 seg_off |= vpi << shift; 431 410 } 432 - *va = vsid << SID_SHIFT_1T | seg_off; 411 + *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; 433 412 default: 434 - *va = size = 0; 413 + *vpn = size = 0; 435 414 } 436 415 *psize = size; 437 416 } ··· 446 425 */ 447 426 static void native_hpte_clear(void) 448 427 { 428 + unsigned long vpn = 0; 449 429 unsigned long slot, slots, flags; 450 430 struct hash_pte *hptep = htab_address; 451 - unsigned long hpte_v, va; 431 + unsigned long hpte_v; 452 432 unsigned long pteg_count; 453 433 int psize, ssize; 454 434 ··· 477 455 * already hold the native_tlbie_lock. 478 456 */ 479 457 if (hpte_v & HPTE_V_VALID) { 480 - hpte_decode(hptep, slot, &psize, &ssize, &va); 458 + hpte_decode(hptep, slot, &psize, &ssize, &vpn); 481 459 hptep->v = 0; 482 - __tlbie(va, psize, ssize); 460 + __tlbie(vpn, psize, ssize); 483 461 } 484 462 } 485 463 ··· 494 472 */ 495 473 static void native_flush_hash_range(unsigned long number, int local) 496 474 { 497 - unsigned long va, hash, index, hidx, shift, slot; 475 + unsigned long vpn; 476 + unsigned long hash, index, hidx, shift, slot; 498 477 struct hash_pte *hptep; 499 478 unsigned long hpte_v; 500 479 unsigned long want_v; ··· 509 486 local_irq_save(flags); 510 487 511 488 for (i = 0; i < number; i++) { 512 - va = batch->vaddr[i]; 489 + vpn = batch->vpn[i]; 513 490 pte = batch->pte[i]; 514 491 515 - pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 516 - hash = hpt_hash(va, shift, ssize); 492 + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 493 + hash = hpt_hash(vpn, shift, ssize); 517 494 hidx = __rpte_to_hidx(pte, index); 518 495 if (hidx & _PTEIDX_SECONDARY) 519 496 hash = ~hash; 520 497 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 521 498 slot += hidx & _PTEIDX_GROUP_IX; 522 499 hptep = htab_address + slot; 523 - want_v = hpte_encode_v(va, psize, ssize); 500 + want_v = hpte_encode_v(vpn, psize, ssize); 524 501 native_lock_hpte(hptep); 525 502 hpte_v = hptep->v; 526 503 if (!HPTE_V_COMPARE(hpte_v, want_v) || ··· 535 512 mmu_psize_defs[psize].tlbiel && local) { 536 513 asm volatile("ptesync":::"memory"); 537 514 for (i = 0; i < number; i++) { 538 - va = batch->vaddr[i]; 515 + vpn = batch->vpn[i]; 539 516 pte = batch->pte[i]; 540 517 541 - pte_iterate_hashed_subpages(pte, psize, va, index, 542 - shift) { 543 - __tlbiel(va, psize, ssize); 518 + pte_iterate_hashed_subpages(pte, psize, 519 + vpn, index, shift) { 520 + __tlbiel(vpn, psize, ssize); 544 521 } pte_iterate_hashed_end(); 545 522 } 546 523 asm volatile("ptesync":::"memory"); ··· 552 529 553 530 asm volatile("ptesync":::"memory"); 554 531 for (i = 0; i < number; i++) { 555 - va = batch->vaddr[i]; 532 + vpn = batch->vpn[i]; 556 533 pte = batch->pte[i]; 557 534 558 - pte_iterate_hashed_subpages(pte, psize, va, index, 559 - shift) { 560 - __tlbie(va, psize, ssize); 535 + pte_iterate_hashed_subpages(pte, psize, 536 + vpn, index, shift) { 537 + __tlbie(vpn, psize, ssize); 561 538 } pte_iterate_hashed_end(); 562 539 } 563 540 asm volatile("eieio; tlbsync; ptesync":::"memory");
+15 -15
arch/powerpc/mm/hash_utils_64.c
··· 191 191 vaddr += step, paddr += step) { 192 192 unsigned long hash, hpteg; 193 193 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 194 - unsigned long va = hpt_va(vaddr, vsid, ssize); 194 + unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 195 195 unsigned long tprot = prot; 196 196 197 197 /* Make kernel text executable */ 198 198 if (overlaps_kernel_text(vaddr, vaddr + step)) 199 199 tprot &= ~HPTE_R_N; 200 200 201 - hash = hpt_hash(va, shift, ssize); 201 + hash = hpt_hash(vpn, shift, ssize); 202 202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 203 203 204 204 BUG_ON(!ppc_md.hpte_insert); 205 - ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, 205 + ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot, 206 206 HPTE_V_BOLTED, psize, ssize); 207 207 208 208 if (ret < 0) ··· 1152 1152 /* WARNING: This is called from hash_low_64.S, if you change this prototype, 1153 1153 * do not forget to update the assembly call site ! 1154 1154 */ 1155 - void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, 1155 + void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, 1156 1156 int local) 1157 1157 { 1158 1158 unsigned long hash, index, shift, hidx, slot; 1159 1159 1160 - DBG_LOW("flush_hash_page(va=%016lx)\n", va); 1161 - pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 1162 - hash = hpt_hash(va, shift, ssize); 1160 + DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); 1161 + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 1162 + hash = hpt_hash(vpn, shift, ssize); 1163 1163 hidx = __rpte_to_hidx(pte, index); 1164 1164 if (hidx & _PTEIDX_SECONDARY) 1165 1165 hash = ~hash; 1166 1166 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1167 1167 slot += hidx & _PTEIDX_GROUP_IX; 1168 1168 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); 1169 - ppc_md.hpte_invalidate(slot, va, psize, ssize, local); 1169 + ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local); 1170 1170 } pte_iterate_hashed_end(); 1171 1171 } 1172 1172 ··· 1180 1180 &__get_cpu_var(ppc64_tlb_batch); 1181 1181 1182 1182 for (i = 0; i < number; i++) 1183 - flush_hash_page(batch->vaddr[i], batch->pte[i], 1183 + flush_hash_page(batch->vpn[i], batch->pte[i], 1184 1184 batch->psize, batch->ssize, local); 1185 1185 } 1186 1186 } ··· 1207 1207 { 1208 1208 unsigned long hash, hpteg; 1209 1209 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1210 - unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1210 + unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); 1211 1211 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); 1212 1212 int ret; 1213 1213 1214 - hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1214 + hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1215 1215 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1216 1216 1217 - ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 1217 + ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), 1218 1218 mode, HPTE_V_BOLTED, 1219 1219 mmu_linear_psize, mmu_kernel_ssize); 1220 1220 BUG_ON (ret < 0); ··· 1228 1228 { 1229 1229 unsigned long hash, hidx, slot; 1230 1230 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1231 - unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1231 + unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); 1232 1232 1233 - hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1233 + hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1234 1234 spin_lock(&linear_map_hash_lock); 1235 1235 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1236 1236 hidx = linear_map_hash_slots[lmi] & 0x7f; ··· 1240 1240 hash = ~hash; 1241 1241 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1242 1242 slot += hidx & _PTEIDX_GROUP_IX; 1243 - ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); 1243 + ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0); 1244 1244 } 1245 1245 1246 1246 void kernel_map_pages(struct page *page, int numpages, int enable)
+8 -7
arch/powerpc/mm/hugetlbpage-hash64.c
··· 18 18 pte_t *ptep, unsigned long trap, int local, int ssize, 19 19 unsigned int shift, unsigned int mmu_psize) 20 20 { 21 + unsigned long vpn; 21 22 unsigned long old_pte, new_pte; 22 - unsigned long va, rflags, pa, sz; 23 + unsigned long rflags, pa, sz; 23 24 long slot; 24 25 25 26 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 26 27 27 28 /* Search the Linux page table for a match with va */ 28 - va = hpt_va(ea, vsid, ssize); 29 + vpn = hpt_vpn(ea, vsid, ssize); 29 30 30 31 /* At this point, we have a pte (old_pte) which can be used to build 31 32 * or update an HPTE. There are 2 cases: ··· 70 69 /* There MIGHT be an HPTE for this pte */ 71 70 unsigned long hash, slot; 72 71 73 - hash = hpt_hash(va, shift, ssize); 72 + hash = hpt_hash(vpn, shift, ssize); 74 73 if (old_pte & _PAGE_F_SECOND) 75 74 hash = ~hash; 76 75 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 77 76 slot += (old_pte & _PAGE_F_GIX) >> 12; 78 77 79 - if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, 78 + if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize, 80 79 ssize, local) == -1) 81 80 old_pte &= ~_PAGE_HPTEFLAGS; 82 81 } 83 82 84 83 if (likely(!(old_pte & _PAGE_HASHPTE))) { 85 - unsigned long hash = hpt_hash(va, shift, ssize); 84 + unsigned long hash = hpt_hash(vpn, shift, ssize); 86 85 unsigned long hpte_group; 87 86 88 87 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; ··· 102 101 _PAGE_COHERENT | _PAGE_GUARDED)); 103 102 104 103 /* Insert into the hash table, primary slot */ 105 - slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 104 + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, 106 105 mmu_psize, ssize); 107 106 108 107 /* Primary is full, try the secondary */ 109 108 if (unlikely(slot == -1)) { 110 109 hpte_group = ((~hash & htab_hash_mask) * 111 110 HPTES_PER_GROUP) & ~0x7UL; 112 - slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 111 + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 113 112 HPTE_V_SECONDARY, 114 113 mmu_psize, ssize); 115 114 if (slot == -1) {
+6 -5
arch/powerpc/mm/tlb_hash64.c
··· 42 42 void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 43 43 pte_t *ptep, unsigned long pte, int huge) 44 44 { 45 + unsigned long vpn; 45 46 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 46 - unsigned long vsid, vaddr; 47 + unsigned long vsid; 47 48 unsigned int psize; 48 49 int ssize; 49 50 real_pte_t rpte; ··· 87 86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 88 87 ssize = mmu_kernel_ssize; 89 88 } 90 - vaddr = hpt_va(addr, vsid, ssize); 89 + vpn = hpt_vpn(addr, vsid, ssize); 91 90 rpte = __real_pte(__pte(pte), ptep); 92 91 93 92 /* ··· 97 96 * and decide to use local invalidates instead... 98 97 */ 99 98 if (!batch->active) { 100 - flush_hash_page(vaddr, rpte, psize, ssize, 0); 99 + flush_hash_page(vpn, rpte, psize, ssize, 0); 101 100 put_cpu_var(ppc64_tlb_batch); 102 101 return; 103 102 } ··· 123 122 batch->ssize = ssize; 124 123 } 125 124 batch->pte[i] = rpte; 126 - batch->vaddr[i] = vaddr; 125 + batch->vpn[i] = vpn; 127 126 batch->index = ++i; 128 127 if (i >= PPC64_TLB_BATCH_NR) 129 128 __flush_tlb_pending(batch); ··· 147 146 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) 148 147 local = 1; 149 148 if (i == 1) 150 - flush_hash_page(batch->vaddr[0], batch->pte[0], 149 + flush_hash_page(batch->vpn[0], batch->pte[0], 151 150 batch->psize, batch->ssize, local); 152 151 else 153 152 flush_hash_range(i, local);
+23 -22
arch/powerpc/platforms/cell/beat_htab.c
··· 88 88 } 89 89 90 90 static long beat_lpar_hpte_insert(unsigned long hpte_group, 91 - unsigned long va, unsigned long pa, 91 + unsigned long vpn, unsigned long pa, 92 92 unsigned long rflags, unsigned long vflags, 93 93 int psize, int ssize) 94 94 { ··· 103 103 "rflags=%lx, vflags=%lx, psize=%d)\n", 104 104 hpte_group, va, pa, rflags, vflags, psize); 105 105 106 - hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 106 + hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) | 107 107 vflags | HPTE_V_VALID; 108 108 hpte_r = hpte_encode_r(pa, psize) | rflags; 109 109 ··· 184 184 */ 185 185 static long beat_lpar_hpte_updatepp(unsigned long slot, 186 186 unsigned long newpp, 187 - unsigned long va, 187 + unsigned long vpn, 188 188 int psize, int ssize, int local) 189 189 { 190 190 unsigned long lpar_rc; 191 191 u64 dummy0, dummy1; 192 192 unsigned long want_v; 193 193 194 - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 194 + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); 195 195 196 196 DBG_LOW(" update: " 197 197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", ··· 220 220 return 0; 221 221 } 222 222 223 - static long beat_lpar_hpte_find(unsigned long va, int psize) 223 + static long beat_lpar_hpte_find(unsigned long vpn, int psize) 224 224 { 225 225 unsigned long hash; 226 226 unsigned long i, j; 227 227 long slot; 228 228 unsigned long want_v, hpte_v; 229 229 230 - hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); 231 - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 230 + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); 231 + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); 232 232 233 233 for (j = 0; j < 2; j++) { 234 234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; ··· 255 255 unsigned long ea, 256 256 int psize, int ssize) 257 257 { 258 - unsigned long lpar_rc, slot, vsid, va; 258 + unsigned long vpn; 259 + unsigned long lpar_rc, slot, vsid; 259 260 u64 dummy0, dummy1; 260 261 261 262 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); 262 - va = hpt_va(ea, vsid, MMU_SEGSIZE_256M); 263 + vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M); 263 264 264 265 raw_spin_lock(&beat_htab_lock); 265 - slot = beat_lpar_hpte_find(va, psize); 266 + slot = beat_lpar_hpte_find(vpn, psize); 266 267 BUG_ON(slot == -1); 267 268 268 269 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, ··· 273 272 BUG_ON(lpar_rc != 0); 274 273 } 275 274 276 - static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 275 + static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, 277 276 int psize, int ssize, int local) 278 277 { 279 278 unsigned long want_v; ··· 283 282 284 283 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 285 284 slot, va, psize, local); 286 - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 285 + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); 287 286 288 287 raw_spin_lock_irqsave(&beat_htab_lock, flags); 289 288 dummy1 = beat_lpar_hpte_getword0(slot); ··· 312 311 } 313 312 314 313 static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, 315 - unsigned long va, unsigned long pa, 314 + unsigned long vpn, unsigned long pa, 316 315 unsigned long rflags, unsigned long vflags, 317 316 int psize, int ssize) 318 317 { ··· 323 322 return -1; 324 323 325 324 if (!(vflags & HPTE_V_BOLTED)) 326 - DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 325 + DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, " 327 326 "rflags=%lx, vflags=%lx, psize=%d)\n", 328 - hpte_group, va, pa, rflags, vflags, psize); 327 + hpte_group, vpn, pa, rflags, vflags, psize); 329 328 330 - hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 329 + hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) | 331 330 vflags | HPTE_V_VALID; 332 331 hpte_r = hpte_encode_r(pa, psize) | rflags; 333 332 ··· 365 364 */ 366 365 static long beat_lpar_hpte_updatepp_v3(unsigned long slot, 367 366 unsigned long newpp, 368 - unsigned long va, 367 + unsigned long vpn, 369 368 int psize, int ssize, int local) 370 369 { 371 370 unsigned long lpar_rc; 372 371 unsigned long want_v; 373 372 unsigned long pss; 374 373 375 - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 374 + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); 376 375 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 377 376 378 377 DBG_LOW(" update: " ··· 393 392 return 0; 394 393 } 395 394 396 - static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, 395 + static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn, 397 396 int psize, int ssize, int local) 398 397 { 399 398 unsigned long want_v; 400 399 unsigned long lpar_rc; 401 400 unsigned long pss; 402 401 403 - DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 404 - slot, va, psize, local); 405 - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 402 + DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", 403 + slot, vpn, psize, local); 404 + want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M); 406 405 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 407 406 408 407 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
+11 -11
arch/powerpc/platforms/ps3/htab.c
··· 43 43 44 44 static DEFINE_SPINLOCK(ps3_htab_lock); 45 45 46 - static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, 46 + static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn, 47 47 unsigned long pa, unsigned long rflags, unsigned long vflags, 48 48 int psize, int ssize) 49 49 { ··· 61 61 */ 62 62 vflags &= ~HPTE_V_SECONDARY; 63 63 64 - hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 64 + hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; 65 65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; 66 66 67 67 spin_lock_irqsave(&ps3_htab_lock, flags); ··· 75 75 76 76 if (result) { 77 77 /* all entries bolted !*/ 78 - pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", 79 - __func__, result, va, pa, hpte_group, hpte_v, hpte_r); 78 + pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n", 79 + __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r); 80 80 BUG(); 81 81 } 82 82 ··· 107 107 } 108 108 109 109 static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 110 - unsigned long va, int psize, int ssize, int local) 110 + unsigned long vpn, int psize, int ssize, int local) 111 111 { 112 112 int result; 113 113 u64 hpte_v, want_v, hpte_rs; ··· 115 115 unsigned long flags; 116 116 long ret; 117 117 118 - want_v = hpte_encode_v(va, psize, ssize); 118 + want_v = hpte_encode_v(vpn, psize, ssize); 119 119 120 120 spin_lock_irqsave(&ps3_htab_lock, flags); 121 121 ··· 125 125 &hpte_rs); 126 126 127 127 if (result) { 128 - pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", 129 - __func__, result, va, slot, psize); 128 + pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n", 129 + __func__, result, vpn, slot, psize); 130 130 BUG(); 131 131 } 132 132 ··· 159 159 panic("ps3_hpte_updateboltedpp() not implemented"); 160 160 } 161 161 162 - static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, 162 + static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn, 163 163 int psize, int ssize, int local) 164 164 { 165 165 unsigned long flags; ··· 170 170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); 171 171 172 172 if (result) { 173 - pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", 174 - __func__, result, va, slot, psize); 173 + pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n", 174 + __func__, result, vpn, slot, psize); 175 175 BUG(); 176 176 } 177 177
+31 -45
arch/powerpc/platforms/pseries/lpar.c
··· 107 107 } 108 108 109 109 static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 110 - unsigned long va, unsigned long pa, 111 - unsigned long rflags, unsigned long vflags, 112 - int psize, int ssize) 110 + unsigned long vpn, unsigned long pa, 111 + unsigned long rflags, unsigned long vflags, 112 + int psize, int ssize) 113 113 { 114 114 unsigned long lpar_rc; 115 115 unsigned long flags; ··· 117 117 unsigned long hpte_v, hpte_r; 118 118 119 119 if (!(vflags & HPTE_V_BOLTED)) 120 - pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 121 - "rflags=%lx, vflags=%lx, psize=%d)\n", 122 - hpte_group, va, pa, rflags, vflags, psize); 120 + pr_devel("hpte_insert(group=%lx, vpn=%016lx, " 121 + "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", 122 + hpte_group, vpn, pa, rflags, vflags, psize); 123 123 124 - hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 124 + hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; 125 125 hpte_r = hpte_encode_r(pa, psize) | rflags; 126 126 127 127 if (!(vflags & HPTE_V_BOLTED)) ··· 226 226 } 227 227 228 228 /* 229 - * This computes the AVPN and B fields of the first dword of a HPTE, 230 - * for use when we want to match an existing PTE. The bottom 7 bits 231 - * of the returned value are zero. 232 - */ 233 - static inline unsigned long hpte_encode_avpn(unsigned long va, int psize, 234 - int ssize) 235 - { 236 - unsigned long v; 237 - 238 - v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); 239 - v <<= HPTE_V_AVPN_SHIFT; 240 - v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; 241 - return v; 242 - } 243 - 244 - /* 245 229 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 246 230 * the low 3 bits of flags happen to line up. So no transform is needed. 247 231 * We can probably optimize here and assume the high bits of newpp are ··· 233 249 */ 234 250 static long pSeries_lpar_hpte_updatepp(unsigned long slot, 235 251 unsigned long newpp, 236 - unsigned long va, 252 + unsigned long vpn, 237 253 int psize, int ssize, int local) 238 254 { 239 255 unsigned long lpar_rc; 240 256 unsigned long flags = (newpp & 7) | H_AVPN; 241 257 unsigned long want_v; 242 258 243 - want_v = hpte_encode_avpn(va, psize, ssize); 259 + want_v = hpte_encode_avpn(vpn, psize, ssize); 244 260 245 261 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", 246 262 want_v, slot, flags, psize); ··· 278 294 return dword0; 279 295 } 280 296 281 - static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) 297 + static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) 282 298 { 283 299 unsigned long hash; 284 300 unsigned long i; 285 301 long slot; 286 302 unsigned long want_v, hpte_v; 287 303 288 - hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 289 - want_v = hpte_encode_avpn(va, psize, ssize); 304 + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); 305 + want_v = hpte_encode_avpn(vpn, psize, ssize); 290 306 291 307 /* Bolted entries are always in the primary group */ 292 308 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; ··· 306 322 unsigned long ea, 307 323 int psize, int ssize) 308 324 { 309 - unsigned long lpar_rc, slot, vsid, va, flags; 325 + unsigned long vpn; 326 + unsigned long lpar_rc, slot, vsid, flags; 310 327 311 328 vsid = get_kernel_vsid(ea, ssize); 312 - va = hpt_va(ea, vsid, ssize); 329 + vpn = hpt_vpn(ea, vsid, ssize); 313 330 314 - slot = pSeries_lpar_hpte_find(va, psize, ssize); 331 + slot = pSeries_lpar_hpte_find(vpn, psize, ssize); 315 332 BUG_ON(slot == -1); 316 333 317 334 flags = newpp & 7; ··· 321 336 BUG_ON(lpar_rc != H_SUCCESS); 322 337 } 323 338 324 - static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 339 + static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, 325 340 int psize, int ssize, int local) 326 341 { 327 342 unsigned long want_v; 328 343 unsigned long lpar_rc; 329 344 unsigned long dummy1, dummy2; 330 345 331 - pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 332 - slot, va, psize, local); 346 + pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", 347 + slot, vpn, psize, local); 333 348 334 - want_v = hpte_encode_avpn(va, psize, ssize); 349 + want_v = hpte_encode_avpn(vpn, psize, ssize); 335 350 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); 336 351 if (lpar_rc == H_NOT_FOUND) 337 352 return; ··· 342 357 static void pSeries_lpar_hpte_removebolted(unsigned long ea, 343 358 int psize, int ssize) 344 359 { 345 - unsigned long slot, vsid, va; 360 + unsigned long vpn; 361 + unsigned long slot, vsid; 346 362 347 363 vsid = get_kernel_vsid(ea, ssize); 348 - va = hpt_va(ea, vsid, ssize); 364 + vpn = hpt_vpn(ea, vsid, ssize); 349 365 350 - slot = pSeries_lpar_hpte_find(va, psize, ssize); 366 + slot = pSeries_lpar_hpte_find(vpn, psize, ssize); 351 367 BUG_ON(slot == -1); 352 368 353 - pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); 369 + pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0); 354 370 } 355 371 356 372 /* Flag bits for H_BULK_REMOVE */ ··· 367 381 */ 368 382 static void pSeries_lpar_flush_hash_range(unsigned long number, int local) 369 383 { 384 + unsigned long vpn; 370 385 unsigned long i, pix, rc; 371 386 unsigned long flags = 0; 372 387 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 373 388 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 374 389 unsigned long param[9]; 375 - unsigned long va; 376 390 unsigned long hash, index, shift, hidx, slot; 377 391 real_pte_t pte; 378 392 int psize, ssize; ··· 384 398 ssize = batch->ssize; 385 399 pix = 0; 386 400 for (i = 0; i < number; i++) { 387 - va = batch->vaddr[i]; 401 + vpn = batch->vpn[i]; 388 402 pte = batch->pte[i]; 389 - pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 390 - hash = hpt_hash(va, shift, ssize); 403 + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 404 + hash = hpt_hash(vpn, shift, ssize); 391 405 hidx = __rpte_to_hidx(pte, index); 392 406 if (hidx & _PTEIDX_SECONDARY) 393 407 hash = ~hash; 394 408 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 395 409 slot += hidx & _PTEIDX_GROUP_IX; 396 410 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 397 - pSeries_lpar_hpte_invalidate(slot, va, psize, 411 + pSeries_lpar_hpte_invalidate(slot, vpn, psize, 398 412 ssize, local); 399 413 } else { 400 414 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 401 - param[pix+1] = hpte_encode_avpn(va, psize, 415 + param[pix+1] = hpte_encode_avpn(vpn, psize, 402 416 ssize); 403 417 pix += 2; 404 418 if (pix == 8) {