[ARM] Unuse another Linux PTE bit

L_PTE_ASID is not really required to be stored in every PTE, since we
can identify it via the address passed to set_pte_at(). So, create
set_pte_ext() which takes the address of the PTE to set, the Linux
PTE value, and the additional CPU PTE bits which aren't encoded in
the Linux PTE value.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by Russell King and committed by Russell King ad1ae2fe f06b97ff

+84 -84
+1 -1
arch/arm/mm/consistent.c
··· 238 238 * x86 does not mark the pages reserved... 239 239 */ 240 240 SetPageReserved(page); 241 - set_pte(pte, mk_pte(page, prot)); 241 + set_pte_ext(pte, mk_pte(page, prot), 0); 242 242 page++; 243 243 pte++; 244 244 off++;
+1 -1
arch/arm/mm/copypage-v4mc.c
··· 71 71 { 72 72 spin_lock(&minicache_lock); 73 73 74 - set_pte(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot)); 74 + set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 75 75 flush_tlb_kernel_page(0xffff8000); 76 76 77 77 mc_copy_user_page((void *)0xffff8000, kto);
+3 -3
arch/arm/mm/copypage-v6.c
··· 70 70 */ 71 71 spin_lock(&v6_lock); 72 72 73 - set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL)); 74 - set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL)); 73 + set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); 74 + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); 75 75 76 76 from = from_address + (offset << PAGE_SHIFT); 77 77 to = to_address + (offset << PAGE_SHIFT); ··· 110 110 */ 111 111 spin_lock(&v6_lock); 112 112 113 - set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL)); 113 + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); 114 114 flush_tlb_kernel_page(to); 115 115 clear_page((void *)to); 116 116
+1 -1
arch/arm/mm/copypage-xscale.c
··· 93 93 { 94 94 spin_lock(&minicache_lock); 95 95 96 - set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot)); 96 + set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 97 97 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 98 98 99 99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
+1 -1
arch/arm/mm/fault-armv.c
··· 61 61 if (pte_present(entry) && pte_val(entry) & shared_pte_mask) { 62 62 flush_cache_page(vma, address, pte_pfn(entry)); 63 63 pte_val(entry) &= ~shared_pte_mask; 64 - set_pte(pte, entry); 64 + set_pte_at(vma->vm_mm, address, pte, entry); 65 65 flush_tlb_page(vma, address); 66 66 ret = 1; 67 67 }
+1 -1
arch/arm/mm/flush.c
··· 26 26 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 27 27 const int zero = 0; 28 28 29 - set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); 29 + set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 30 30 flush_tlb_kernel_page(to); 31 31 32 32 asm( "mcrr p15, 0, %1, %0, c14\n"
+2 -2
arch/arm/mm/ioremap.c
··· 40 40 41 41 static inline void 42 42 remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 43 - unsigned long phys_addr, pgprot_t pgprot) 43 + unsigned long phys_addr, pgprot_t prot) 44 44 { 45 45 unsigned long end; 46 46 ··· 53 53 if (!pte_none(*pte)) 54 54 goto bad; 55 55 56 - set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot)); 56 + set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); 57 57 address += PAGE_SIZE; 58 58 phys_addr += PAGE_SIZE; 59 59 pte++;
+1 -7
arch/arm/mm/mmu.c
··· 294 294 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; 295 295 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 296 296 297 - /* 298 - * User pages need to be mapped with the ASID 299 - * (iow, non-global) 300 - */ 301 - user_pgprot |= L_PTE_ASID; 302 - 303 297 #ifdef CONFIG_SMP 304 298 /* 305 299 * Mark memory with the "shared" attribute for SMP systems ··· 402 408 } 403 409 ptep = pte_offset_kernel(pmdp, virt); 404 410 405 - set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 411 + set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, prot), 0); 406 412 } 407 413 408 414 /*
+1 -1
arch/arm/mm/pgd.c
··· 57 57 58 58 init_pmd = pmd_offset(init_pgd, 0); 59 59 init_pte = pte_offset_map_nested(init_pmd, 0); 60 - set_pte(new_pte, *init_pte); 60 + set_pte_ext(new_pte, *init_pte, 0); 61 61 pte_unmap_nested(init_pte); 62 62 pte_unmap(new_pte); 63 63 }
+2 -2
arch/arm/mm/proc-arm1020.S
··· 397 397 * Set a PTE and flush it out 398 398 */ 399 399 .align 5 400 - ENTRY(cpu_arm1020_set_pte) 400 + ENTRY(cpu_arm1020_set_pte_ext) 401 401 #ifdef CONFIG_MMU 402 402 str r1, [r0], #-2048 @ linux version 403 403 ··· 477 477 .word cpu_arm1020_do_idle 478 478 .word cpu_arm1020_dcache_clean_area 479 479 .word cpu_arm1020_switch_mm 480 - .word cpu_arm1020_set_pte 480 + .word cpu_arm1020_set_pte_ext 481 481 .size arm1020_processor_functions, . - arm1020_processor_functions 482 482 483 483 .section ".rodata"
+2 -2
arch/arm/mm/proc-arm1020e.S
··· 381 381 * Set a PTE and flush it out 382 382 */ 383 383 .align 5 384 - ENTRY(cpu_arm1020e_set_pte) 384 + ENTRY(cpu_arm1020e_set_pte_ext) 385 385 #ifdef CONFIG_MMU 386 386 str r1, [r0], #-2048 @ linux version 387 387 ··· 458 458 .word cpu_arm1020e_do_idle 459 459 .word cpu_arm1020e_dcache_clean_area 460 460 .word cpu_arm1020e_switch_mm 461 - .word cpu_arm1020e_set_pte 461 + .word cpu_arm1020e_set_pte_ext 462 462 .size arm1020e_processor_functions, . - arm1020e_processor_functions 463 463 464 464 .section ".rodata"
+3 -3
arch/arm/mm/proc-arm1022.S
··· 358 358 mov pc, lr 359 359 360 360 /* 361 - * cpu_arm1022_set_pte(ptep, pte) 361 + * cpu_arm1022_set_pte_ext(ptep, pte, ext) 362 362 * 363 363 * Set a PTE and flush it out 364 364 */ 365 365 .align 5 366 - ENTRY(cpu_arm1022_set_pte) 366 + ENTRY(cpu_arm1022_set_pte_ext) 367 367 #ifdef CONFIG_MMU 368 368 str r1, [r0], #-2048 @ linux version 369 369 ··· 441 441 .word cpu_arm1022_do_idle 442 442 .word cpu_arm1022_dcache_clean_area 443 443 .word cpu_arm1022_switch_mm 444 - .word cpu_arm1022_set_pte 444 + .word cpu_arm1022_set_pte_ext 445 445 .size arm1022_processor_functions, . - arm1022_processor_functions 446 446 447 447 .section ".rodata"
+3 -3
arch/arm/mm/proc-arm1026.S
··· 347 347 mov pc, lr 348 348 349 349 /* 350 - * cpu_arm1026_set_pte(ptep, pte) 350 + * cpu_arm1026_set_pte_ext(ptep, pte, ext) 351 351 * 352 352 * Set a PTE and flush it out 353 353 */ 354 354 .align 5 355 - ENTRY(cpu_arm1026_set_pte) 355 + ENTRY(cpu_arm1026_set_pte_ext) 356 356 #ifdef CONFIG_MMU 357 357 str r1, [r0], #-2048 @ linux version 358 358 ··· 436 436 .word cpu_arm1026_do_idle 437 437 .word cpu_arm1026_dcache_clean_area 438 438 .word cpu_arm1026_switch_mm 439 - .word cpu_arm1026_set_pte 439 + .word cpu_arm1026_set_pte_ext 440 440 .size arm1026_processor_functions, . - arm1026_processor_functions 441 441 442 442 .section .rodata
+5 -5
arch/arm/mm/proc-arm6_7.S
··· 209 209 mov pc, lr 210 210 211 211 /* 212 - * Function: arm6_7_set_pte(pte_t *ptep, pte_t pte) 212 + * Function: arm6_7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) 213 213 * Params : r0 = Address to set 214 214 * : r1 = value to set 215 215 * Purpose : Set a PTE and flush it out of any WB cache 216 216 */ 217 217 .align 5 218 - ENTRY(cpu_arm6_set_pte) 219 - ENTRY(cpu_arm7_set_pte) 218 + ENTRY(cpu_arm6_set_pte_ext) 219 + ENTRY(cpu_arm7_set_pte_ext) 220 220 #ifdef CONFIG_MMU 221 221 str r1, [r0], #-2048 @ linux version 222 222 ··· 299 299 .word cpu_arm6_do_idle 300 300 .word cpu_arm6_dcache_clean_area 301 301 .word cpu_arm6_switch_mm 302 - .word cpu_arm6_set_pte 302 + .word cpu_arm6_set_pte_ext 303 303 .size arm6_processor_functions, . - arm6_processor_functions 304 304 305 305 /* ··· 315 315 .word cpu_arm7_do_idle 316 316 .word cpu_arm7_dcache_clean_area 317 317 .word cpu_arm7_switch_mm 318 - .word cpu_arm7_set_pte 318 + .word cpu_arm7_set_pte_ext 319 319 .size arm7_processor_functions, . - arm7_processor_functions 320 320 321 321 .section ".rodata"
+3 -3
arch/arm/mm/proc-arm720.S
··· 88 88 mov pc, lr 89 89 90 90 /* 91 - * Function: arm720_set_pte(pte_t *ptep, pte_t pte) 91 + * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) 92 92 * Params : r0 = Address to set 93 93 * : r1 = value to set 94 94 * Purpose : Set a PTE and flush it out of any WB cache 95 95 */ 96 96 .align 5 97 - ENTRY(cpu_arm720_set_pte) 97 + ENTRY(cpu_arm720_set_pte_ext) 98 98 #ifdef CONFIG_MMU 99 99 str r1, [r0], #-2048 @ linux version 100 100 ··· 204 204 .word cpu_arm720_do_idle 205 205 .word cpu_arm720_dcache_clean_area 206 206 .word cpu_arm720_switch_mm 207 - .word cpu_arm720_set_pte 207 + .word cpu_arm720_set_pte_ext 208 208 .size arm720_processor_functions, . - arm720_processor_functions 209 209 210 210 .section ".rodata"
+3 -3
arch/arm/mm/proc-arm920.S
··· 344 344 mov pc, lr 345 345 346 346 /* 347 - * cpu_arm920_set_pte(ptep, pte) 347 + * cpu_arm920_set_pte(ptep, pte, ext) 348 348 * 349 349 * Set a PTE and flush it out 350 350 */ 351 351 .align 5 352 - ENTRY(cpu_arm920_set_pte) 352 + ENTRY(cpu_arm920_set_pte_ext) 353 353 #ifdef CONFIG_MMU 354 354 str r1, [r0], #-2048 @ linux version 355 355 ··· 423 423 .word cpu_arm920_do_idle 424 424 .word cpu_arm920_dcache_clean_area 425 425 .word cpu_arm920_switch_mm 426 - .word cpu_arm920_set_pte 426 + .word cpu_arm920_set_pte_ext 427 427 .size arm920_processor_functions, . - arm920_processor_functions 428 428 429 429 .section ".rodata"
+3 -3
arch/arm/mm/proc-arm922.S
··· 348 348 mov pc, lr 349 349 350 350 /* 351 - * cpu_arm922_set_pte(ptep, pte) 351 + * cpu_arm922_set_pte_ext(ptep, pte, ext) 352 352 * 353 353 * Set a PTE and flush it out 354 354 */ 355 355 .align 5 356 - ENTRY(cpu_arm922_set_pte) 356 + ENTRY(cpu_arm922_set_pte_ext) 357 357 #ifdef CONFIG_MMU 358 358 str r1, [r0], #-2048 @ linux version 359 359 ··· 427 427 .word cpu_arm922_do_idle 428 428 .word cpu_arm922_dcache_clean_area 429 429 .word cpu_arm922_switch_mm 430 - .word cpu_arm922_set_pte 430 + .word cpu_arm922_set_pte_ext 431 431 .size arm922_processor_functions, . - arm922_processor_functions 432 432 433 433 .section ".rodata"
+3 -3
arch/arm/mm/proc-arm925.S
··· 391 391 mov pc, lr 392 392 393 393 /* 394 - * cpu_arm925_set_pte(ptep, pte) 394 + * cpu_arm925_set_pte_ext(ptep, pte, ext) 395 395 * 396 396 * Set a PTE and flush it out 397 397 */ 398 398 .align 5 399 - ENTRY(cpu_arm925_set_pte) 399 + ENTRY(cpu_arm925_set_pte_ext) 400 400 #ifdef CONFIG_MMU 401 401 str r1, [r0], #-2048 @ linux version 402 402 ··· 490 490 .word cpu_arm925_do_idle 491 491 .word cpu_arm925_dcache_clean_area 492 492 .word cpu_arm925_switch_mm 493 - .word cpu_arm925_set_pte 493 + .word cpu_arm925_set_pte_ext 494 494 .size arm925_processor_functions, . - arm925_processor_functions 495 495 496 496 .section ".rodata"
+3 -3
arch/arm/mm/proc-arm926.S
··· 348 348 mov pc, lr 349 349 350 350 /* 351 - * cpu_arm926_set_pte(ptep, pte) 351 + * cpu_arm926_set_pte_ext(ptep, pte, ext) 352 352 * 353 353 * Set a PTE and flush it out 354 354 */ 355 355 .align 5 356 - ENTRY(cpu_arm926_set_pte) 356 + ENTRY(cpu_arm926_set_pte_ext) 357 357 #ifdef CONFIG_MMU 358 358 str r1, [r0], #-2048 @ linux version 359 359 ··· 439 439 .word cpu_arm926_do_idle 440 440 .word cpu_arm926_dcache_clean_area 441 441 .word cpu_arm926_switch_mm 442 - .word cpu_arm926_set_pte 442 + .word cpu_arm926_set_pte_ext 443 443 .size arm926_processor_functions, . - arm926_processor_functions 444 444 445 445 .section ".rodata"
+3 -3
arch/arm/mm/proc-sa110.S
··· 146 146 #endif 147 147 148 148 /* 149 - * cpu_sa110_set_pte(ptep, pte) 149 + * cpu_sa110_set_pte_ext(ptep, pte, ext) 150 150 * 151 151 * Set a PTE and flush it out 152 152 */ 153 153 .align 5 154 - ENTRY(cpu_sa110_set_pte) 154 + ENTRY(cpu_sa110_set_pte_ext) 155 155 #ifdef CONFIG_MMU 156 156 str r1, [r0], #-2048 @ linux version 157 157 ··· 222 222 .word cpu_sa110_do_idle 223 223 .word cpu_sa110_dcache_clean_area 224 224 .word cpu_sa110_switch_mm 225 - .word cpu_sa110_set_pte 225 + .word cpu_sa110_set_pte_ext 226 226 .size sa110_processor_functions, . - sa110_processor_functions 227 227 228 228 .section ".rodata"
+3 -3
arch/arm/mm/proc-sa1100.S
··· 159 159 #endif 160 160 161 161 /* 162 - * cpu_sa1100_set_pte(ptep, pte) 162 + * cpu_sa1100_set_pte_ext(ptep, pte, ext) 163 163 * 164 164 * Set a PTE and flush it out 165 165 */ 166 166 .align 5 167 - ENTRY(cpu_sa1100_set_pte) 167 + ENTRY(cpu_sa1100_set_pte_ext) 168 168 #ifdef CONFIG_MMU 169 169 str r1, [r0], #-2048 @ linux version 170 170 ··· 237 237 .word cpu_sa1100_do_idle 238 238 .word cpu_sa1100_dcache_clean_area 239 239 .word cpu_sa1100_switch_mm 240 - .word cpu_sa1100_set_pte 240 + .word cpu_sa1100_set_pte_ext 241 241 .size sa1100_processor_functions, . - sa1100_processor_functions 242 242 243 243 .section ".rodata"
+1 -1
arch/arm/mm/proc-syms.c
··· 17 17 18 18 #ifndef MULTI_CPU 19 19 EXPORT_SYMBOL(cpu_dcache_clean_area); 20 - EXPORT_SYMBOL(cpu_set_pte); 20 + EXPORT_SYMBOL(cpu_set_pte_ext); 21 21 #else 22 22 EXPORT_SYMBOL(processor); 23 23 #endif
+16 -14
arch/arm/mm/proc-v6.S
··· 103 103 mov pc, lr 104 104 105 105 /* 106 - * cpu_v6_set_pte(ptep, pte) 106 + * cpu_v6_set_pte_ext(ptep, pte, ext) 107 107 * 108 108 * Set a level 2 translation table entry. 109 109 * 110 110 * - ptep - pointer to level 2 translation table entry 111 111 * (hardware version is stored at -1024 bytes) 112 112 * - pte - PTE value to store 113 + * - ext - value for extended PTE bits 113 114 * 114 115 * Permissions: 115 116 * YUWD APX AP1 AP0 SVC User ··· 122 121 * 11x0 0 1 0 r/w r/o 123 122 * 1111 0 1 1 r/w r/w 124 123 */ 125 - ENTRY(cpu_v6_set_pte) 124 + ENTRY(cpu_v6_set_pte_ext) 126 125 #ifdef CONFIG_MMU 127 126 str r1, [r0], #-2048 @ linux version 128 127 129 - bic r2, r1, #0x000003f0 130 - bic r2, r2, #0x00000003 131 - orr r2, r2, #PTE_EXT_AP0 | 2 128 + bic r3, r1, #0x000003f0 129 + bic r3, r3, #0x00000003 130 + orr r3, r3, r2 131 + orr r3, r3, #PTE_EXT_AP0 | 2 132 132 133 133 tst r1, #L_PTE_WRITE 134 134 tstne r1, #L_PTE_DIRTY 135 - orreq r2, r2, #PTE_EXT_APX 135 + orreq r3, r3, #PTE_EXT_APX 136 136 137 137 tst r1, #L_PTE_USER 138 - orrne r2, r2, #PTE_EXT_AP1 139 - tstne r2, #PTE_EXT_APX 140 - bicne r2, r2, #PTE_EXT_APX | PTE_EXT_AP0 138 + orrne r3, r3, #PTE_EXT_AP1 139 + tstne r3, #PTE_EXT_APX 140 + bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 141 141 142 142 tst r1, #L_PTE_YOUNG 143 - biceq r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK 143 + biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK 144 144 145 145 tst r1, #L_PTE_EXEC 146 - orreq r2, r2, #PTE_EXT_XN 146 + orreq r3, r3, #PTE_EXT_XN 147 147 148 148 tst r1, #L_PTE_PRESENT 149 - moveq r2, #0 149 + moveq r3, #0 150 150 151 - str r2, [r0] 151 + str r3, [r0] 152 152 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 153 153 #endif 154 154 mov pc, lr ··· 235 233 .word cpu_v6_do_idle 236 234 .word cpu_v6_dcache_clean_area 237 235 .word cpu_v6_switch_mm 238 - .word cpu_v6_set_pte 236 + .word cpu_v6_set_pte_ext 239 237 .size v6_processor_functions, . - v6_processor_functions 240 238 241 239 .type cpu_arch_name, #object
+3 -3
arch/arm/mm/proc-xsc3.S
··· 357 357 cpwait_ret lr, ip 358 358 359 359 /* 360 - * cpu_xsc3_set_pte(ptep, pte) 360 + * cpu_xsc3_set_pte_ext(ptep, pte, ext) 361 361 * 362 362 * Set a PTE and flush it out 363 363 * 364 364 */ 365 365 .align 5 366 - ENTRY(cpu_xsc3_set_pte) 366 + ENTRY(cpu_xsc3_set_pte_ext) 367 367 str r1, [r0], #-2048 @ linux version 368 368 369 369 bic r2, r1, #0xff0 @ Keep C, B bits ··· 457 457 .word cpu_xsc3_do_idle 458 458 .word cpu_xsc3_dcache_clean_area 459 459 .word cpu_xsc3_switch_mm 460 - .word cpu_xsc3_set_pte 460 + .word cpu_xsc3_set_pte_ext 461 461 .size xsc3_processor_functions, . - xsc3_processor_functions 462 462 463 463 .section ".rodata"
+3 -3
arch/arm/mm/proc-xscale.S
··· 421 421 cpwait_ret lr, ip 422 422 423 423 /* 424 - * cpu_xscale_set_pte(ptep, pte) 424 + * cpu_xscale_set_pte_ext(ptep, pte, ext) 425 425 * 426 426 * Set a PTE and flush it out 427 427 * 428 428 * Errata 40: must set memory to write-through for user read-only pages. 429 429 */ 430 430 .align 5 431 - ENTRY(cpu_xscale_set_pte) 431 + ENTRY(cpu_xscale_set_pte_ext) 432 432 str r1, [r0], #-2048 @ linux version 433 433 434 434 bic r2, r1, #0xff0 ··· 529 529 .word cpu_xscale_do_idle 530 530 .word cpu_xscale_dcache_clean_area 531 531 .word cpu_xscale_switch_mm 532 - .word cpu_xscale_set_pte 532 + .word cpu_xscale_set_pte_ext 533 533 .size xscale_processor_functions, . - xscale_processor_functions 534 534 535 535 .section ".rodata"
+4 -3
include/asm-arm/cpu-multi32.h
··· 50 50 */ 51 51 void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); 52 52 /* 53 - * Set a PTE 53 + * Set a possibly extended PTE. Non-extended PTEs should 54 + * ignore 'ext'. 54 55 */ 55 - void (*set_pte)(pte_t *ptep, pte_t pte); 56 + void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); 56 57 } processor; 57 58 58 59 #define cpu_proc_init() processor._proc_init() ··· 61 60 #define cpu_reset(addr) processor.reset(addr) 62 61 #define cpu_do_idle() processor._do_idle() 63 62 #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) 64 - #define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte) 63 + #define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) 65 64 #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
+2 -2
include/asm-arm/cpu-single.h
··· 28 28 #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) 29 29 #define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) 30 30 #define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm) 31 - #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) 31 + #define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext) 32 32 33 33 #include <asm/page.h> 34 34 ··· 40 40 extern int cpu_do_idle(void); 41 41 extern void cpu_dcache_clean_area(void *, int); 42 42 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 43 - extern void cpu_set_pte(pte_t *ptep, pte_t pte); 43 + extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 44 44 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+7 -4
include/asm-arm/pgtable.h
··· 21 21 22 22 #include <asm/memory.h> 23 23 #include <asm/arch/vmalloc.h> 24 + #include <asm/pgtable-hwdef.h> 24 25 25 26 /* 26 27 * Just any arbitrary offset to the start of the vmalloc VM area: the ··· 171 170 #define L_PTE_EXEC (1 << 6) 172 171 #define L_PTE_DIRTY (1 << 7) 173 172 #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ 174 - #define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */ 175 173 176 174 #ifndef __ASSEMBLY__ 177 175 ··· 228 228 #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 229 229 230 230 #define pte_none(pte) (!pte_val(pte)) 231 - #define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) 231 + #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 232 232 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 233 233 #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 234 234 #define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) ··· 236 236 #define pte_unmap(pte) do { } while (0) 237 237 #define pte_unmap_nested(pte) do { } while (0) 238 238 239 - #define set_pte(ptep, pte) cpu_set_pte(ptep,pte) 240 - #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 239 + #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 240 + 241 + #define set_pte_at(mm,addr,ptep,pteval) do { \ 242 + set_pte_ext(ptep, pteval, (addr) >= PAGE_OFFSET ? 0 : PTE_EXT_NG); \ 243 + } while (0) 241 244 242 245 /* 243 246 * The following only work if pte_present() is true.