Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Pull rationalise-regions into release branch

Tony Luck 32905802 bcdd3a91

+50 -47
+1 -1
arch/ia64/kernel/sys_ia64.c
··· 35 35 return -ENOMEM; 36 36 37 37 #ifdef CONFIG_HUGETLB_PAGE 38 - if (REGION_NUMBER(addr) == REGION_HPAGE) 38 + if (REGION_NUMBER(addr) == RGN_HPAGE) 39 39 addr = 0; 40 40 #endif 41 41 if (!addr)
+4 -4
arch/ia64/mm/hugetlbpage.c
··· 76 76 return -EINVAL; 77 77 if (addr & ~HPAGE_MASK) 78 78 return -EINVAL; 79 - if (REGION_NUMBER(addr) != REGION_HPAGE) 79 + if (REGION_NUMBER(addr) != RGN_HPAGE) 80 80 return -EINVAL; 81 81 82 82 return 0; ··· 87 87 struct page *page; 88 88 pte_t *ptep; 89 89 90 - if (REGION_NUMBER(addr) != REGION_HPAGE) 90 + if (REGION_NUMBER(addr) != RGN_HPAGE) 91 91 return ERR_PTR(-EINVAL); 92 92 93 93 ptep = huge_pte_offset(mm, addr); ··· 142 142 return -ENOMEM; 143 143 if (len & ~HPAGE_MASK) 144 144 return -EINVAL; 145 - /* This code assumes that REGION_HPAGE != 0. */ 146 - if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1))) 145 + /* This code assumes that RGN_HPAGE != 0. */ 146 + if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) 147 147 addr = HPAGE_REGION_BASE; 148 148 else 149 149 addr = ALIGN(addr, HPAGE_SIZE);
+1 -1
include/asm-ia64/io.h
··· 23 23 #define __SLOW_DOWN_IO do { } while (0) 24 24 #define SLOW_DOWN_IO do { } while (0) 25 25 26 - #define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */ 26 + #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED) 27 27 28 28 /* 29 29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
+6 -1
include/asm-ia64/mmu_context.h
··· 19 19 20 20 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) 21 21 22 + # include <asm/page.h> 22 23 # ifndef __ASSEMBLY__ 23 24 24 25 #include <linux/compiler.h> ··· 123 122 unsigned long rid_incr = 0; 124 123 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; 125 124 126 - old_rr4 = ia64_get_rr(0x8000000000000000UL); 125 + old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE)); 127 126 rid = context << 3; /* make space for encoding the region number */ 128 127 rid_incr = 1 << 8; 129 128 ··· 135 134 rr4 = rr0 + 4*rid_incr; 136 135 #ifdef CONFIG_HUGETLB_PAGE 137 136 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); 137 + 138 + # if RGN_HPAGE != 4 139 + # error "reload_context assumes RGN_HPAGE is 4" 140 + # endif 138 141 #endif 139 142 140 143 ia64_set_rr(0x0000000000000000UL, rr0);
+18 -9
include/asm-ia64/page.h
··· 13 13 #include <asm/types.h> 14 14 15 15 /* 16 + * The top three bits of an IA64 address are its Region Number. 17 + * Different regions are assigned to different purposes. 18 + */ 19 + #define RGN_SHIFT (61) 20 + #define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT) 21 + #define RGN_BITS (RGN_BASE(-1)) 22 + 23 + #define RGN_KERNEL 7 /* Identity mapped region */ 24 + #define RGN_UNCACHED 6 /* Identity mapped I/O region */ 25 + #define RGN_GATE 5 /* Gate page, Kernel text, etc */ 26 + #define RGN_HPAGE 4 /* For Huge TLB pages */ 27 + 28 + /* 16 29 * PAGE_SHIFT determines the actual kernel page size. 17 30 */ 18 31 #if defined(CONFIG_IA64_PAGE_SIZE_4KB) ··· 49 36 50 37 #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ 51 38 39 + 52 40 #ifdef CONFIG_HUGETLB_PAGE 53 - # define REGION_HPAGE (4UL) /* note: this is hardcoded in reload_context()!*/ 54 - # define REGION_SHIFT 61 55 - # define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT) 41 + # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE) 56 42 # define HPAGE_SHIFT hpage_shift 57 43 # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */ 58 44 # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT) ··· 142 130 #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) 143 131 #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) 144 132 145 - #define REGION_SIZE REGION_NUMBER(1) 146 - #define REGION_KERNEL 7 147 - 148 133 #ifdef CONFIG_HUGETLB_PAGE 149 134 # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ 150 135 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) 151 136 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 152 137 # define is_hugepage_only_range(mm, addr, len) \ 153 - (REGION_NUMBER(addr) == REGION_HPAGE && \ 154 - REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE) 138 + (REGION_NUMBER(addr) == RGN_HPAGE && \ 139 + REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE) 155 140 extern unsigned int hpage_shift; 156 141 #endif 157 142 ··· 206 197 # define __pgprot(x) (x) 207 198 #endif /* !STRICT_MM_TYPECHECKS */ 208 199 209 - #define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000) 200 + #define PAGE_OFFSET RGN_BASE(RGN_KERNEL) 210 201 211 202 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ 212 203 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
+5 -8
include/asm-ia64/pgtable.h
··· 204 204 #define set_pte(ptep, pteval) (*(ptep) = (pteval)) 205 205 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 206 206 207 - #define RGN_SIZE (1UL << 61) 208 - #define RGN_KERNEL 7 209 - 210 - #define VMALLOC_START 0xa000000200000000UL 207 + #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) 211 208 #ifdef CONFIG_VIRTUAL_MEM_MAP 212 - # define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) 209 + # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 213 210 # define VMALLOC_END vmalloc_end 214 211 extern unsigned long vmalloc_end; 215 212 #else 216 - # define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) 213 + # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 217 214 #endif 218 215 219 216 /* fs/proc/kcore.c */ 220 - #define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL) 221 - #define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL) 217 + #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) 218 + #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) 222 219 223 220 /* 224 221 * Conversion functions: convert page frame number (pfn) and a protection value to a page
+12 -21
include/asm-ia64/sn/addrs.h
··· 65 65 66 66 #define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT) 67 67 #define AS_MASK ((u64)AS_BITMASK << AS_SHIFT) 68 - #define REGION_BITS 0xe000000000000000UL 69 68 70 69 71 70 /* ··· 78 79 #define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT) 79 80 80 81 81 - /* 82 - * Base addresses for various address ranges. 83 - */ 84 - #define CACHED 0xe000000000000000UL 85 - #define UNCACHED 0xc000000000000000UL 86 - #define UNCACHED_PHYS 0x8000000000000000UL 87 - 88 - 89 82 /* 90 83 * Virtual Mode Local & Global MMR space. 91 84 */ 92 85 #define SH1_LOCAL_MMR_OFFSET 0x8000000000UL 93 86 #define SH2_LOCAL_MMR_OFFSET 0x0200000000UL 94 87 #define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET) 95 - #define LOCAL_MMR_SPACE (UNCACHED | LOCAL_MMR_OFFSET) 96 - #define LOCAL_PHYS_MMR_SPACE (UNCACHED_PHYS | LOCAL_MMR_OFFSET) 88 + #define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET) 89 + #define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET) 97 90 98 91 #define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL 99 92 #define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL 100 93 #define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET) 101 - #define GLOBAL_MMR_SPACE (UNCACHED | GLOBAL_MMR_OFFSET) 94 + #define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET) 102 95 103 96 /* 104 97 * Physical mode addresses 105 98 */ 106 - #define GLOBAL_PHYS_MMR_SPACE (UNCACHED_PHYS | GLOBAL_MMR_OFFSET) 99 + #define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET) 107 100 108 101 109 102 /* 110 103 * Clear region & AS bits. 111 104 */ 112 - #define TO_PHYS_MASK (~(REGION_BITS | AS_MASK)) 105 + #define TO_PHYS_MASK (~(RGN_BITS | AS_MASK)) 113 106 114 107 115 108 /* ··· 126 135 /* 127 136 * general address defines 128 137 */ 129 - #define CAC_BASE (CACHED | AS_CAC_SPACE) 130 - #define AMO_BASE (UNCACHED | AS_AMO_SPACE) 131 - #define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE) 132 - #define GET_BASE (CACHED | AS_GET_SPACE) 138 + #define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE) 139 + #define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE) 140 + #define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE) 141 + #define GET_BASE (PAGE_OFFSET | AS_GET_SPACE) 133 142 134 143 /* 135 144 * Convert Memory addresses between various addressing modes. ··· 174 183 /* 175 184 * Macros to test for address type. 176 185 */ 177 - #define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE) 178 - #define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE) 186 + #define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE) 187 + #define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE) 179 188 180 189 181 190 /* ··· 190 199 #define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \ 191 200 ((u64) (w) << TIO_SWIN_SIZE_BITS)) 192 201 #define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n)) 193 - #define TIO_IO_BASE(n) (UNCACHED | NASID_SPACE(n)) 202 + #define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n)) 194 203 #define BWIN_SIZE (1UL << BWIN_SIZE_BITS) 195 204 #define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE) 196 205 #define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
+3 -2
include/asm-ia64/system.h
··· 19 19 #include <asm/pal.h> 20 20 #include <asm/percpu.h> 21 21 22 - #define GATE_ADDR __IA64_UL_CONST(0xa000000000000000) 22 + #define GATE_ADDR RGN_BASE(RGN_GATE) 23 + 23 24 /* 24 25 * 0xa000000000000000+2*PERCPU_PAGE_SIZE 25 26 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) 26 27 */ 27 - #define KERNEL_START __IA64_UL_CONST(0xa000000100000000) 28 + #define KERNEL_START (GATE_ADDR+0x100000000) 28 29 #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 29 30 30 31 #ifndef __ASSEMBLY__