[PARISC] Further work for multiple page sizes

More work towards supporing multiple page sizes on 64-bit. Convert
some assumptions that 64bit uses 3 level page tables into testing
PT_NLEVELS. Also some BUG() to BUG_ON() conversions and some cleanups
to assembler.

Signed-off-by: Helge Deller <deller@parisc-linux.org>
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>

authored by Helge Deller and committed by Kyle McMartin 2fd83038 d668da80

+198 -102
+31
arch/parisc/Kconfig
··· 138 enable this option otherwise. The 64bit kernel is significantly bigger 139 and slower than the 32bit one. 140 141 config SMP 142 bool "Symmetric multi-processing support" 143 ---help---
··· 138 enable this option otherwise. The 64bit kernel is significantly bigger 139 and slower than the 32bit one. 140 141 + choice 142 + prompt "Kernel page size" 143 + default PARISC_PAGE_SIZE_4KB if !64BIT 144 + default PARISC_PAGE_SIZE_4KB if 64BIT 145 + # default PARISC_PAGE_SIZE_16KB if 64BIT 146 + 147 + config PARISC_PAGE_SIZE_4KB 148 + bool "4KB" 149 + help 150 + This lets you select the page size of the kernel. For best 151 + performance, a page size of 16KB is recommended. For best 152 + compatibility with 32bit applications, a page size of 4KB should be 153 + selected (the vast majority of 32bit binaries work perfectly fine 154 + with a larger page size). 155 + 156 + 4KB For best 32bit compatibility 157 + 16KB For best performance 158 + 64KB For best performance, might give more overhead. 159 + 160 + If you don't know what to do, choose 4KB. 161 + 162 + config PARISC_PAGE_SIZE_16KB 163 + bool "16KB (EXPERIMENTAL)" 164 + depends on PA8X00 && EXPERIMENTAL 165 + 166 + config PARISC_PAGE_SIZE_64KB 167 + bool "64KB (EXPERIMENTAL)" 168 + depends on PA8X00 && EXPERIMENTAL 169 + 170 + endchoice 171 + 172 config SMP 173 bool "Symmetric multi-processing support" 174 ---help---
+3
arch/parisc/kernel/asm-offsets.c
··· 288 DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); 289 DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE); 290 DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); 291 DEFINE(ASM_PT_INITIAL, PT_INITIAL); 292 DEFINE(ASM_PAGE_SIZE, PAGE_SIZE); 293 BLANK(); 294 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 295 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
··· 288 DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); 289 DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE); 290 DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); 291 + DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); 292 DEFINE(ASM_PT_INITIAL, PT_INITIAL); 293 DEFINE(ASM_PAGE_SIZE, PAGE_SIZE); 294 + DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64); 295 + DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128); 296 BLANK(); 297 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 298 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
+22 -14
arch/parisc/kernel/entry.S
··· 502 * all ILP32 processes and all the kernel for machines with 503 * under 4GB of memory) */ 504 .macro L3_ptep pgd,pte,index,va,fault 505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 506 copy %r0,\pte 507 - extrd,u,*= \va,31,32,%r0 508 ldw,s \index(\pgd),\pgd 509 - extrd,u,*= \va,31,32,%r0 510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 511 - extrd,u,*= \va,31,32,%r0 512 shld \pgd,PxD_VALUE_SHIFT,\index 513 - extrd,u,*= \va,31,32,%r0 514 copy \index,\pgd 515 - extrd,u,*<> \va,31,32,%r0 516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 517 L2_ptep \pgd,\pte,\index,\va,\fault 518 .endm 519 ··· 565 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 566 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 567 568 - /* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */ 569 570 - depd %r0,63,PAGE_SHIFT,\pte 571 - extrd,s \pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte 572 .endm 573 574 /* Identical macro to make_insert_tlb above, except it ··· 594 595 /* Get rid of prot bits and convert to page addr for iitlba */ 596 597 - depi 0,31,PAGE_SHIFT,\pte 598 extru \pte,24,25,\pte 599 - 600 .endm 601 602 /* This is for ILP32 PA2.0 only. The TLB insertion needs ··· 1210 */ 1211 1212 /* adjust isr/ior. */ 1213 - 1214 - extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */ 1215 - depd %r1,31,7,%r17 /* deposit them into ior */ 1216 - depdi 0,63,7,%r16 /* clear them from isr */ 1217 #endif 1218 STREG %r16, PT_ISR(%r29) 1219 STREG %r17, PT_IOR(%r29)
··· 502 * all ILP32 processes and all the kernel for machines with 503 * under 4GB of memory) */ 504 .macro L3_ptep pgd,pte,index,va,fault 505 + #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ 506 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 507 copy %r0,\pte 508 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 509 ldw,s \index(\pgd),\pgd 510 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 511 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 512 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 513 shld \pgd,PxD_VALUE_SHIFT,\index 514 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 515 copy \index,\pgd 516 + extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 517 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 518 + #endif 519 L2_ptep \pgd,\pte,\index,\va,\fault 520 .endm 521 ··· 563 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 564 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 565 566 + /* Enforce uncacheable pages. 567 + * This should ONLY be use for MMIO on PA 2.0 machines. 568 + * Memory/DMA is cache coherent on all PA2.0 machines we support 569 + * (that means T-class is NOT supported) and the memory controllers 570 + * on most of those machines only handles cache transactions. 571 + */ 572 + extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 573 + depi 1,12,1,\prot 574 575 + /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 576 + extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte 577 + depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte 578 .endm 579 580 /* Identical macro to make_insert_tlb above, except it ··· 584 585 /* Get rid of prot bits and convert to page addr for iitlba */ 586 587 + depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte 588 extru \pte,24,25,\pte 589 .endm 590 591 /* This is for ILP32 PA2.0 only. The TLB insertion needs ··· 1201 */ 1202 1203 /* adjust isr/ior. */ 1204 + extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ 1205 + depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ 1206 + depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ 1207 #endif 1208 STREG %r16, PT_ISR(%r29) 1209 STREG %r17, PT_IOR(%r29)
+8 -7
arch/parisc/kernel/head.S
··· 76 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 77 mtctl %r4,%cr25 /* Initialize user root pointer */ 78 79 - #ifdef CONFIG_64BIT 80 /* Set pmd in pgd */ 81 load32 PA(pmd0),%r5 82 shrd %r5,PxD_VALUE_SHIFT,%r3 83 - ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 84 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4) 85 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4 86 #else 87 /* 2-level page table, so pmd == pgd */ 88 - ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4 89 #endif 90 91 /* Fill in pmd with enough pte directories */ ··· 99 stw %r3,0(%r4) 100 ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 101 addib,> -1,%r1,1b 102 - #ifdef CONFIG_64BIT 103 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 104 #else 105 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 ··· 107 108 109 /* Now initialize the PTEs themselves */ 110 - ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ 111 load32 PA(pg0),%r1 112 113 $pgt_fill_loop: 114 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1) 115 - ldo ASM_PAGE_SIZE(%r3),%r3 116 - bb,>= %r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop 117 nop 118 119 /* Load the return address...er...crash 'n burn */
··· 76 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 77 mtctl %r4,%cr25 /* Initialize user root pointer */ 78 79 + #if PT_NLEVELS == 3 80 /* Set pmd in pgd */ 81 load32 PA(pmd0),%r5 82 shrd %r5,PxD_VALUE_SHIFT,%r3 83 + ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 84 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4) 85 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4 86 #else 87 /* 2-level page table, so pmd == pgd */ 88 + ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4 89 #endif 90 91 /* Fill in pmd with enough pte directories */ ··· 99 stw %r3,0(%r4) 100 ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 101 addib,> -1,%r1,1b 102 + #if PT_NLEVELS == 3 103 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 104 #else 105 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 ··· 107 108 109 /* Now initialize the PTEs themselves */ 110 + ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ 111 + ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 112 load32 PA(pg0),%r1 113 114 $pgt_fill_loop: 115 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1) 116 + ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */ 117 + addib,> -1,%r11,$pgt_fill_loop 118 nop 119 120 /* Load the return address...er...crash 'n burn */
+5 -5
arch/parisc/kernel/init_task.c
··· 53 __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) = 54 { INIT_THREAD_INFO(init_task) }; 55 56 - #ifdef __LP64__ 57 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout 58 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually 59 * guarantee that global objects will be laid out in memory in the same order 60 * as the order of declaration, so put these in different sections and use 61 * the linker script to order them. */ 62 - pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, }; 63 - 64 #endif 65 - pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, }; 66 - pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte"))) = { {0}, }; 67 68 /* 69 * Initial task structure.
··· 53 __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) = 54 { INIT_THREAD_INFO(init_task) }; 55 56 + #if PT_NLEVELS == 3 57 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout 58 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually 59 * guarantee that global objects will be laid out in memory in the same order 60 * as the order of declaration, so put these in different sections and use 61 * the linker script to order them. */ 62 + pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE))); 63 #endif 64 + 65 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE))); 66 + pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE))); 67 68 /* 69 * Initial task structure.
+12 -13
arch/parisc/kernel/pacache.S
··· 65 */ 66 67 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */ 68 - rsm PSW_SM_I, %r19 /* save I-bit state */ 69 load32 PA(1f), %r1 70 nop 71 nop ··· 84 rfi 85 nop 86 87 - 1: ldil L%PA(cache_info), %r1 88 - ldo R%PA(cache_info)(%r1), %r1 89 90 /* Flush Instruction Tlb */ 91 ··· 211 .entry 212 213 mtsp %r0, %sr1 214 - ldil L%cache_info, %r1 215 - ldo R%cache_info(%r1), %r1 216 217 /* Flush Instruction Cache */ 218 ··· 252 .entry 253 254 mtsp %r0, %sr1 255 - ldil L%cache_info, %r1 256 - ldo R%cache_info(%r1), %r1 257 258 /* Flush Data Cache */ 259 ··· 300 */ 301 302 ldd 0(%r25), %r19 303 - ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ 304 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ 305 ldw 128(%r25), %r0 /* prefetch 2 */ 306 ··· 366 * use ldd/std on a 32 bit kernel. 367 */ 368 ldw 0(%r25), %r19 369 - ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ 370 371 1: 372 ldw 4(%r25), %r20 ··· 459 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ 460 461 ldil L%(TMPALIAS_MAP_START), %r28 462 #ifdef CONFIG_64BIT 463 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 464 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ ··· 550 #ifdef CONFIG_64BIT 551 #if (TMPALIAS_MAP_START >= 0x80000000) 552 depdi 0, 31,32, %r28 /* clear any sign extension */ 553 #endif 554 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 555 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ ··· 566 pdtlb 0(%r28) 567 568 #ifdef CONFIG_64BIT 569 - ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ 570 571 /* PREFETCH (Write) has not (yet) been proven to help here */ 572 - /* #define PREFETCHW_OP ldd 256(%0), %r0 */ 573 574 1: std %r0, 0(%r28) 575 std %r0, 8(%r28) ··· 591 ldo 128(%r28), %r28 592 593 #else /* ! CONFIG_64BIT */ 594 - 595 - ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ 596 597 1: 598 stw %r0, 0(%r28)
··· 65 */ 66 67 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */ 68 + rsm PSW_SM_I, %r19 /* save I-bit state */ 69 load32 PA(1f), %r1 70 nop 71 nop ··· 84 rfi 85 nop 86 87 + 1: load32 PA(cache_info), %r1 88 89 /* Flush Instruction Tlb */ 90 ··· 212 .entry 213 214 mtsp %r0, %sr1 215 + load32 cache_info, %r1 216 217 /* Flush Instruction Cache */ 218 ··· 254 .entry 255 256 mtsp %r0, %sr1 257 + load32 cache_info, %r1 258 259 /* Flush Data Cache */ 260 ··· 303 */ 304 305 ldd 0(%r25), %r19 306 + ldi ASM_PAGE_SIZE_DIV128, %r1 307 + 308 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ 309 ldw 128(%r25), %r0 /* prefetch 2 */ 310 ··· 368 * use ldd/std on a 32 bit kernel. 369 */ 370 ldw 0(%r25), %r19 371 + ldi ASM_PAGE_SIZE_DIV64, %r1 372 373 1: 374 ldw 4(%r25), %r20 ··· 461 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ 462 463 ldil L%(TMPALIAS_MAP_START), %r28 464 + /* FIXME for different page sizes != 4k */ 465 #ifdef CONFIG_64BIT 466 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 467 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ ··· 551 #ifdef CONFIG_64BIT 552 #if (TMPALIAS_MAP_START >= 0x80000000) 553 depdi 0, 31,32, %r28 /* clear any sign extension */ 554 + /* FIXME: page size dependend */ 555 #endif 556 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 557 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ ··· 566 pdtlb 0(%r28) 567 568 #ifdef CONFIG_64BIT 569 + ldi ASM_PAGE_SIZE_DIV128, %r1 570 571 /* PREFETCH (Write) has not (yet) been proven to help here */ 572 + /* #define PREFETCHW_OP ldd 256(%0), %r0 */ 573 574 1: std %r0, 0(%r28) 575 std %r0, 8(%r28) ··· 591 ldo 128(%r28), %r28 592 593 #else /* ! CONFIG_64BIT */ 594 + ldi ASM_PAGE_SIZE_DIV64, %r1 595 596 1: 597 stw %r0, 0(%r28)
+5 -5
arch/parisc/kernel/syscall.S
··· 55 * pointers. 56 */ 57 58 - .align 4096 59 linux_gateway_page: 60 61 /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ ··· 632 end_compare_and_swap: 633 634 /* Make sure nothing else is placed on this page */ 635 - .align 4096 636 .export end_linux_gateway_page 637 end_linux_gateway_page: 638 ··· 652 653 .section .rodata,"a" 654 655 - .align 4096 656 /* Light-weight-syscall table */ 657 /* Start of lws table. */ 658 .export lws_table ··· 662 LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ 663 /* End of lws table */ 664 665 - .align 4096 666 .export sys_call_table 667 .Lsys_call_table: 668 sys_call_table: 669 #include "syscall_table.S" 670 671 #ifdef CONFIG_64BIT 672 - .align 4096 673 .export sys_call_table64 674 .Lsys_call_table64: 675 sys_call_table64:
··· 55 * pointers. 56 */ 57 58 + .align ASM_PAGE_SIZE 59 linux_gateway_page: 60 61 /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ ··· 632 end_compare_and_swap: 633 634 /* Make sure nothing else is placed on this page */ 635 + .align ASM_PAGE_SIZE 636 .export end_linux_gateway_page 637 end_linux_gateway_page: 638 ··· 652 653 .section .rodata,"a" 654 655 + .align ASM_PAGE_SIZE 656 /* Light-weight-syscall table */ 657 /* Start of lws table. */ 658 .export lws_table ··· 662 LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ 663 /* End of lws table */ 664 665 + .align ASM_PAGE_SIZE 666 .export sys_call_table 667 .Lsys_call_table: 668 sys_call_table: 669 #include "syscall_table.S" 670 671 #ifdef CONFIG_64BIT 672 + .align ASM_PAGE_SIZE 673 .export sys_call_table64 674 .Lsys_call_table64: 675 sys_call_table64:
+32 -22
arch/parisc/kernel/vmlinux.lds.S
··· 6 * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> 7 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 8 * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify ··· 28 /* needed for the processor specific cache alignment size */ 29 #include <asm/cache.h> 30 #include <asm/page.h> 31 32 /* ld script to make hppa Linux kernel */ 33 #ifndef CONFIG_64BIT ··· 70 RODATA 71 72 /* writeable */ 73 - . = ALIGN(4096); /* Make sure this is page aligned so 74 that we can properly leave these 75 as writable */ 76 data_start = .; ··· 83 __start___unwind = .; /* unwind info */ 84 .PARISC.unwind : { *(.PARISC.unwind) } 85 __stop___unwind = .; 86 - 87 .data : { /* Data */ 88 *(.data) 89 - *(.data.vm0.pmd) 90 - *(.data.vm0.pgd) 91 - *(.data.vm0.pte) 92 CONSTRUCTORS 93 } 94 - 95 - . = ALIGN(4096); 96 - /* nosave data is really only used for software suspend...it's here 97 - * just in case we ever implement it */ 98 - __nosave_begin = .; 99 - .data_nosave : { *(.data.nosave) } 100 - . = ALIGN(4096); 101 - __nosave_end = .; 102 103 . = ALIGN(L1_CACHE_BYTES); 104 .data.cacheline_aligned : { *(.data.cacheline_aligned) } ··· 101 . = ALIGN(16); 102 .data.lock_aligned : { *(.data.lock_aligned) } 103 104 - /* rarely changed data like cpu maps */ 105 - . = ALIGN(16); 106 - .data.read_mostly : { *(.data.read_mostly) } 107 108 _edata = .; /* End of data section */ 109 110 . = ALIGN(16384); /* init_task */ 111 .data.init_task : { *(.data.init_task) } 112 ··· 139 .dlt : { *(.dlt) } 140 #endif 141 142 . = ALIGN(16384); 143 __init_begin = .; 144 .init.text : { ··· 180 from .altinstructions and .eh_frame */ 181 .exit.text : { *(.exit.text) } 182 .exit.data : { *(.exit.data) } 183 - . = ALIGN(4096); 184 __initramfs_start = .; 185 .init.ramfs : { *(.init.ramfs) } 186 __initramfs_end = .; ··· 188 __per_cpu_start = .; 189 .data.percpu : { *(.data.percpu) } 190 __per_cpu_end = .; 191 - . = ALIGN(4096); 192 __init_end = .; 193 /* freed after init ends here */ 194 195 - __bss_start = .; /* BSS */ 196 - .bss : { *(.bss) *(COMMON) } 197 - __bss_stop = .; 198 - 199 _end = . ; 200 201 /* Sections to be discarded */
··· 6 * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> 7 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 8 * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> 9 + * Copyright (C) 2006 Helge Deller <deller@gmx.de> 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify ··· 27 /* needed for the processor specific cache alignment size */ 28 #include <asm/cache.h> 29 #include <asm/page.h> 30 + #include <asm/asm-offsets.h> 31 32 /* ld script to make hppa Linux kernel */ 33 #ifndef CONFIG_64BIT ··· 68 RODATA 69 70 /* writeable */ 71 + . = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so 72 that we can properly leave these 73 as writable */ 74 data_start = .; ··· 81 __start___unwind = .; /* unwind info */ 82 .PARISC.unwind : { *(.PARISC.unwind) } 83 __stop___unwind = .; 84 + 85 + /* rarely changed data like cpu maps */ 86 + . = ALIGN(16); 87 + .data.read_mostly : { *(.data.read_mostly) } 88 + 89 + . = ALIGN(L1_CACHE_BYTES); 90 .data : { /* Data */ 91 *(.data) 92 CONSTRUCTORS 93 } 94 95 . = ALIGN(L1_CACHE_BYTES); 96 .data.cacheline_aligned : { *(.data.cacheline_aligned) } ··· 105 . = ALIGN(16); 106 .data.lock_aligned : { *(.data.lock_aligned) } 107 108 + . = ALIGN(ASM_PAGE_SIZE); 109 + /* nosave data is really only used for software suspend...it's here 110 + * just in case we ever implement it */ 111 + __nosave_begin = .; 112 + .data_nosave : { *(.data.nosave) } 113 + . = ALIGN(ASM_PAGE_SIZE); 114 + __nosave_end = .; 115 116 _edata = .; /* End of data section */ 117 118 + __bss_start = .; /* BSS */ 119 + /* page table entries need to be PAGE_SIZE aligned */ 120 + . = ALIGN(ASM_PAGE_SIZE); 121 + .data.vmpages : { 122 + *(.data.vm0.pmd) 123 + *(.data.vm0.pgd) 124 + *(.data.vm0.pte) 125 + } 126 + .bss : { *(.bss) *(COMMON) } 127 + __bss_stop = .; 128 + 129 + 130 + /* assembler code expects init_task to be 16k aligned */ 131 . = ALIGN(16384); /* init_task */ 132 .data.init_task : { *(.data.init_task) } 133 ··· 126 .dlt : { *(.dlt) } 127 #endif 128 129 + /* reserve space for interrupt stack by aligning __init* to 16k */ 130 . = ALIGN(16384); 131 __init_begin = .; 132 .init.text : { ··· 166 from .altinstructions and .eh_frame */ 167 .exit.text : { *(.exit.text) } 168 .exit.data : { *(.exit.data) } 169 + . = ALIGN(ASM_PAGE_SIZE); 170 __initramfs_start = .; 171 .init.ramfs : { *(.init.ramfs) } 172 __initramfs_end = .; ··· 174 __per_cpu_start = .; 175 .data.percpu : { *(.data.percpu) } 176 __per_cpu_end = .; 177 + . = ALIGN(ASM_PAGE_SIZE); 178 __init_end = .; 179 /* freed after init ends here */ 180 181 _end = . ; 182 183 /* Sections to be discarded */
+15 -13
arch/parisc/mm/init.c
··· 6 * changed by Philipp Rumpf 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 8 * Copyright 2004 Randolph Chung (tausq@debian.org) 9 * 10 */ 11 ··· 372 373 void free_initmem(void) 374 { 375 - unsigned long addr; 376 - 377 printk(KERN_INFO "Freeing unused kernel memory: "); 378 379 #ifdef CONFIG_DEBUG_KERNEL ··· 396 local_irq_enable(); 397 #endif 398 399 - addr = (unsigned long)(&__init_begin); 400 - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 401 ClearPageReserved(virt_to_page(addr)); 402 init_page_count(virt_to_page(addr)); 403 free_page(addr); ··· 411 /* set up a new led state on systems shipped LED State panel */ 412 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 413 414 - printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10); 415 } 416 417 ··· 643 * Map the fault vector writable so we can 644 * write the HPMC checksum. 645 */ 646 if (address >= ro_start && address < ro_end 647 && address != fv_addr 648 && address != gw_addr) 649 pte = __mk_pte(address, PAGE_KERNEL_RO); 650 else 651 pte = __mk_pte(address, pgprot); 652 653 if (address >= end_paddr) ··· 880 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ 881 spin_lock(&sid_lock); 882 } 883 - if (free_space_ids == 0) 884 - BUG(); 885 } 886 887 free_space_ids--; ··· 904 905 spin_lock(&sid_lock); 906 907 - if (*dirty_space_offset & (1L << index)) 908 - BUG(); /* attempt to free space id twice */ 909 910 *dirty_space_offset |= (1L << index); 911 dirty_space_ids++; ··· 979 980 static unsigned long recycle_ndirty; 981 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; 982 - static unsigned int recycle_inuse = 0; 983 984 void flush_tlb_all(void) 985 { ··· 988 do_recycle = 0; 989 spin_lock(&sid_lock); 990 if (dirty_space_ids > RECYCLE_THRESHOLD) { 991 - if (recycle_inuse) { 992 - BUG(); /* FIXME: Use a semaphore/wait queue here */ 993 - } 994 get_dirty_sids(&recycle_ndirty,recycle_dirty_array); 995 recycle_inuse++; 996 do_recycle++;
··· 6 * changed by Philipp Rumpf 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 8 * Copyright 2004 Randolph Chung (tausq@debian.org) 9 + * Copyright 2006 Helge Deller (deller@gmx.de) 10 * 11 */ 12 ··· 371 372 void free_initmem(void) 373 { 374 + unsigned long addr, init_begin, init_end; 375 + 376 printk(KERN_INFO "Freeing unused kernel memory: "); 377 378 #ifdef CONFIG_DEBUG_KERNEL ··· 395 local_irq_enable(); 396 #endif 397 398 + /* align __init_begin and __init_end to page size, 399 + ignoring linker script where we might have tried to save RAM */ 400 + init_begin = PAGE_ALIGN((unsigned long)(&__init_begin)); 401 + init_end = PAGE_ALIGN((unsigned long)(&__init_end)); 402 + for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 403 ClearPageReserved(virt_to_page(addr)); 404 init_page_count(virt_to_page(addr)); 405 free_page(addr); ··· 407 /* set up a new led state on systems shipped LED State panel */ 408 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 409 410 + printk("%luk freed\n", (init_end - init_begin) >> 10); 411 } 412 413 ··· 639 * Map the fault vector writable so we can 640 * write the HPMC checksum. 641 */ 642 + #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 643 if (address >= ro_start && address < ro_end 644 && address != fv_addr 645 && address != gw_addr) 646 pte = __mk_pte(address, PAGE_KERNEL_RO); 647 else 648 + #endif 649 pte = __mk_pte(address, pgprot); 650 651 if (address >= end_paddr) ··· 874 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ 875 spin_lock(&sid_lock); 876 } 877 + BUG_ON(free_space_ids == 0); 878 } 879 880 free_space_ids--; ··· 899 900 spin_lock(&sid_lock); 901 902 + BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ 903 904 *dirty_space_offset |= (1L << index); 905 dirty_space_ids++; ··· 975 976 static unsigned long recycle_ndirty; 977 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; 978 + static unsigned int recycle_inuse; 979 980 void flush_tlb_all(void) 981 { ··· 984 do_recycle = 0; 985 spin_lock(&sid_lock); 986 if (dirty_space_ids > RECYCLE_THRESHOLD) { 987 + BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ 988 get_dirty_sids(&recycle_ndirty,recycle_dirty_array); 989 recycle_inuse++; 990 do_recycle++;
+21 -4
include/asm-parisc/page.h
··· 1 #ifndef _PARISC_PAGE_H 2 #define _PARISC_PAGE_H 3 4 - /* PAGE_SHIFT determines the page size */ 5 - #define PAGE_SHIFT 12 6 - #define PAGE_SIZE (1UL << PAGE_SHIFT) 7 - #define PAGE_MASK (~(PAGE_SIZE-1)) 8 9 #ifdef __KERNEL__ 10 #include <linux/config.h> 11 #ifndef __ASSEMBLY__ 12 13 #include <asm/types.h>
··· 1 #ifndef _PARISC_PAGE_H 2 #define _PARISC_PAGE_H 3 4 + #if !defined(__KERNEL__) 5 + /* this is for userspace applications (4k page size) */ 6 + # define PAGE_SHIFT 12 /* 4k */ 7 + # define PAGE_SIZE (1UL << PAGE_SHIFT) 8 + # define PAGE_MASK (~(PAGE_SIZE-1)) 9 + #endif 10 + 11 12 #ifdef __KERNEL__ 13 #include <linux/config.h> 14 + 15 + #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 16 + # define PAGE_SHIFT 12 /* 4k */ 17 + #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) 18 + # define PAGE_SHIFT 14 /* 16k */ 19 + #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) 20 + # define PAGE_SHIFT 16 /* 64k */ 21 + #else 22 + # error "unknown default kernel page size" 23 + #endif 24 + #define PAGE_SIZE (1UL << PAGE_SHIFT) 25 + #define PAGE_MASK (~(PAGE_SIZE-1)) 26 + 27 + 28 #ifndef __ASSEMBLY__ 29 30 #include <asm/types.h>
+44 -19
include/asm-parisc/pgtable.h
··· 59 #define ISTACK_SIZE 32768 /* Interrupt Stack Size */ 60 #define ISTACK_ORDER 3 61 62 - /* This is the size of the initially mapped kernel memory (i.e. currently 63 - * 0 to 1<<23 == 8MB */ 64 #ifdef CONFIG_64BIT 65 - #define KERNEL_INITIAL_ORDER 24 66 #else 67 - #define KERNEL_INITIAL_ORDER 23 68 #endif 69 #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 70 71 - #ifdef CONFIG_64BIT 72 #define PT_NLEVELS 3 73 #define PGD_ORDER 1 /* Number of pages per pgd */ 74 #define PMD_ORDER 1 /* Number of pages per pmd */ ··· 110 #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) 111 #define MAX_ADDRESS (1UL << MAX_ADDRBITS) 112 113 - #define SPACEID_SHIFT (MAX_ADDRBITS - 32) 114 115 /* This calculates the number of initial pages we need for the initial 116 * page tables */ 117 - #define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) 118 119 /* 120 * pgd entries used up by user/kernel: ··· 163 * to zero */ 164 #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) 165 166 /* this is how many bits may be used by the file functions */ 167 #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) 168 ··· 195 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds 196 * are page-aligned, we don't care about the PAGE_OFFSET bits, except 197 * for a few meta-information bits, so we shift the address to be 198 - * able to effectively address 40-bits of physical address space. */ 199 #define _PxD_PRESENT_BIT 31 200 #define _PxD_ATTACHED_BIT 30 201 #define _PxD_VALID_BIT 29 ··· 206 #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 207 #define PxD_FLAG_MASK (0xf) 208 #define PxD_FLAG_SHIFT (4) 209 - #define PxD_VALUE_SHIFT (8) 210 211 #ifndef __ASSEMBLY__ 212 ··· 254 #define __S110 PAGE_RWX 255 #define __S111 PAGE_RWX 256 257 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ 258 259 /* initial page tables for 0-8MB for kernel */ ··· 281 #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) 282 #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 283 284 - #ifdef CONFIG_64BIT 285 /* The first entry of the permanent pmd is not there if it contains 286 * the gateway marker */ 287 #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) ··· 291 #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) 292 #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) 293 static inline void pmd_clear(pmd_t *pmd) { 294 - #ifdef CONFIG_64BIT 295 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 296 /* This is the entry pointing to the permanent pmd 297 * attached to the pgd; cannot clear it */ ··· 312 #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) 313 #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) 314 static inline void pgd_clear(pgd_t *pgd) { 315 - #ifdef CONFIG_64BIT 316 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) 317 /* This is the permanent pmd attached to the pgd; cannot 318 * free it */ ··· 360 ({ \ 361 pte_t __pte; \ 362 \ 363 - pte_val(__pte) = ((addr)+pgprot_val(pgprot)); \ 364 \ 365 __pte; \ 366 }) ··· 370 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 371 { 372 pte_t pte; 373 - pte_val(pte) = (pfn << PAGE_SHIFT) | pgprot_val(pgprot); 374 return pte; 375 } 376 - 377 - /* This takes a physical page address that is used by the remapping functions */ 378 - #define mk_pte_phys(physpage, pgprot) \ 379 - ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) 380 381 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 382 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } 383 384 /* Permanent address of a page. On parisc we don't have highmem. */ 385 386 - #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 387 388 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 389 ··· 503 #define pte_same(A,B) (pte_val(A) == pte_val(B)) 504 505 #endif /* !__ASSEMBLY__ */ 506 507 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 508 remap_pfn_range(vma, vaddr, pfn, size, prot)
··· 59 #define ISTACK_SIZE 32768 /* Interrupt Stack Size */ 60 #define ISTACK_ORDER 3 61 62 + /* This is the size of the initially mapped kernel memory */ 63 #ifdef CONFIG_64BIT 64 + #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ 65 #else 66 + #define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */ 67 #endif 68 #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 69 70 + #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) 71 #define PT_NLEVELS 3 72 #define PGD_ORDER 1 /* Number of pages per pgd */ 73 #define PMD_ORDER 1 /* Number of pages per pmd */ ··· 111 #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) 112 #define MAX_ADDRESS (1UL << MAX_ADDRBITS) 113 114 + #define SPACEID_SHIFT (MAX_ADDRBITS - 32) 115 116 /* This calculates the number of initial pages we need for the initial 117 * page tables */ 118 + #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) 119 + # define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) 120 + #else 121 + # define PT_INITIAL (1) /* all initial PTEs fit into one page */ 122 + #endif 123 124 /* 125 * pgd entries used up by user/kernel: ··· 160 * to zero */ 161 #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) 162 163 + /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ 164 + #define PFN_PTE_SHIFT 12 165 + 166 + 167 /* this is how many bits may be used by the file functions */ 168 #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) 169 ··· 188 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds 189 * are page-aligned, we don't care about the PAGE_OFFSET bits, except 190 * for a few meta-information bits, so we shift the address to be 191 + * able to effectively address 40/42/44-bits of physical address space 192 + * depending on 4k/16k/64k PAGE_SIZE */ 193 #define _PxD_PRESENT_BIT 31 194 #define _PxD_ATTACHED_BIT 30 195 #define _PxD_VALID_BIT 29 ··· 198 #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 199 #define PxD_FLAG_MASK (0xf) 200 #define PxD_FLAG_SHIFT (4) 201 + #define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ 202 203 #ifndef __ASSEMBLY__ 204 ··· 246 #define __S110 PAGE_RWX 247 #define __S111 PAGE_RWX 248 249 + 250 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ 251 252 /* initial page tables for 0-8MB for kernel */ ··· 272 #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) 273 #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 274 275 + #if PT_NLEVELS == 3 276 /* The first entry of the permanent pmd is not there if it contains 277 * the gateway marker */ 278 #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) ··· 282 #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) 283 #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) 284 static inline void pmd_clear(pmd_t *pmd) { 285 + #if PT_NLEVELS == 3 286 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 287 /* This is the entry pointing to the permanent pmd 288 * attached to the pgd; cannot clear it */ ··· 303 #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) 304 #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) 305 static inline void pgd_clear(pgd_t *pgd) { 306 + #if PT_NLEVELS == 3 307 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) 308 /* This is the permanent pmd attached to the pgd; cannot 309 * free it */ ··· 351 ({ \ 352 pte_t __pte; \ 353 \ 354 + pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \ 355 \ 356 __pte; \ 357 }) ··· 361 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 362 { 363 pte_t pte; 364 + pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); 365 return pte; 366 } 367 368 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 369 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } 370 371 /* Permanent address of a page. On parisc we don't have highmem. */ 372 373 + #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) 374 375 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 376 ··· 498 #define pte_same(A,B) (pte_val(A) == pte_val(B)) 499 500 #endif /* !__ASSEMBLY__ */ 501 + 502 + 503 + /* TLB page size encoding - see table 3-1 in parisc20.pdf */ 504 + #define _PAGE_SIZE_ENCODING_4K 0 505 + #define _PAGE_SIZE_ENCODING_16K 1 506 + #define _PAGE_SIZE_ENCODING_64K 2 507 + #define _PAGE_SIZE_ENCODING_256K 3 508 + #define _PAGE_SIZE_ENCODING_1M 4 509 + #define _PAGE_SIZE_ENCODING_4M 5 510 + #define _PAGE_SIZE_ENCODING_16M 6 511 + #define _PAGE_SIZE_ENCODING_64M 7 512 + 513 + #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 514 + # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K 515 + #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) 516 + # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K 517 + #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) 518 + # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K 519 + #endif 520 + 521 522 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 523 remap_pfn_range(vma, vaddr, pfn, size, prot)