Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
[SPARC64]: exec PT_DTRACE
[SPARC64]: Use shorter list_splice_init() for brevity.
[SPARC64]: Remove most limitations to kernel image size.

+98 -168
+1 -2
arch/sparc64/kernel/ds.c
··· 972 LIST_HEAD(todo); 973 974 spin_lock_irqsave(&ds_lock, flags); 975 - list_splice(&ds_work_list, &todo); 976 - INIT_LIST_HEAD(&ds_work_list); 977 spin_unlock_irqrestore(&ds_lock, flags); 978 979 list_for_each_entry_safe(qp, tmp, &todo, list) {
··· 972 LIST_HEAD(todo); 973 974 spin_lock_irqsave(&ds_lock, flags); 975 + list_splice_init(&ds_work_list, &todo); 976 spin_unlock_irqrestore(&ds_lock, flags); 977 978 list_for_each_entry_safe(qp, tmp, &todo, list) {
+6 -2
arch/sparc64/kernel/head.S
··· 288 /* Leave arg2 as-is, prom_mmu_ihandle_cache */ 289 mov -1, %l3 290 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) 291 - sethi %hi(8 * 1024 * 1024), %l3 292 - stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) 293 sethi %hi(KERNBASE), %l3 294 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) 295 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
··· 288 /* Leave arg2 as-is, prom_mmu_ihandle_cache */ 289 mov -1, %l3 290 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) 291 + /* 4MB align the kernel image size. */ 292 + set (_end - KERNBASE), %l3 293 + set ((4 * 1024 * 1024) - 1), %l4 294 + add %l3, %l4, %l3 295 + andn %l3, %l4, %l3 296 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB) 297 sethi %hi(KERNBASE), %l3 298 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) 299 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
-3
arch/sparc64/kernel/process.c
··· 731 current_thread_info()->xfsr[0] = 0; 732 current_thread_info()->fpsaved[0] = 0; 733 regs->tstate &= ~TSTATE_PEF; 734 - task_lock(current); 735 - current->ptrace &= ~PT_DTRACE; 736 - task_unlock(current); 737 } 738 out: 739 return error;
··· 731 current_thread_info()->xfsr[0] = 0; 732 current_thread_info()->fpsaved[0] = 0; 733 regs->tstate &= ~TSTATE_PEF; 734 } 735 out: 736 return error;
+9 -8
arch/sparc64/kernel/smp.c
··· 284 { 285 extern unsigned long sparc64_ttable_tl0; 286 extern unsigned long kern_locked_tte_data; 287 - extern int bigkernel; 288 struct hvtramp_descr *hdesc; 289 unsigned long trampoline_ra; 290 struct trap_per_cpu *tb; 291 u64 tte_vaddr, tte_data; 292 unsigned long hv_err; 293 294 - hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL); 295 if (!hdesc) { 296 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 297 "hvtramp_descr.\n"); ··· 302 } 303 304 hdesc->cpu = cpu; 305 - hdesc->num_mappings = (bigkernel ? 2 : 1); 306 307 tb = &trap_block[cpu]; 308 tb->hdesc = hdesc; ··· 315 tte_vaddr = (unsigned long) KERNBASE; 316 tte_data = kern_locked_tte_data; 317 318 - hdesc->maps[0].vaddr = tte_vaddr; 319 - hdesc->maps[0].tte = tte_data; 320 - if (bigkernel) { 321 tte_vaddr += 0x400000; 322 tte_data += 0x400000; 323 - hdesc->maps[1].vaddr = tte_vaddr; 324 - hdesc->maps[1].tte = tte_data; 325 } 326 327 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
··· 284 { 285 extern unsigned long sparc64_ttable_tl0; 286 extern unsigned long kern_locked_tte_data; 287 struct hvtramp_descr *hdesc; 288 unsigned long trampoline_ra; 289 struct trap_per_cpu *tb; 290 u64 tte_vaddr, tte_data; 291 unsigned long hv_err; 292 + int i; 293 294 + hdesc = kzalloc(sizeof(*hdesc) + 295 + (sizeof(struct hvtramp_mapping) * 296 + num_kernel_image_mappings - 1), 297 + GFP_KERNEL); 298 if (!hdesc) { 299 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 300 "hvtramp_descr.\n"); ··· 299 } 300 301 hdesc->cpu = cpu; 302 + hdesc->num_mappings = num_kernel_image_mappings; 303 304 tb = &trap_block[cpu]; 305 tb->hdesc = hdesc; ··· 312 tte_vaddr = (unsigned long) KERNBASE; 313 tte_data = kern_locked_tte_data; 314 315 + for (i = 0; i < hdesc->num_mappings; i++) { 316 + hdesc->maps[i].vaddr = tte_vaddr; 317 + hdesc->maps[i].tte = tte_data; 318 tte_vaddr += 0x400000; 319 tte_data += 0x400000; 320 } 321 322 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
-3
arch/sparc64/kernel/sys_sparc32.c
··· 678 current_thread_info()->xfsr[0] = 0; 679 current_thread_info()->fpsaved[0] = 0; 680 regs->tstate &= ~TSTATE_PEF; 681 - task_lock(current); 682 - current->ptrace &= ~PT_DTRACE; 683 - task_unlock(current); 684 } 685 out: 686 return error;
··· 678 current_thread_info()->xfsr[0] = 0; 679 current_thread_info()->fpsaved[0] = 0; 680 regs->tstate &= ~TSTATE_PEF; 681 } 682 out: 683 return error;
+65 -125
arch/sparc64/kernel/trampoline.S
··· 105 wr %g2, 0, %tick_cmpr 106 107 /* Call OBP by hand to lock KERNBASE into i/d tlbs. 108 - * We lock 2 consequetive entries if we are 'bigkernel'. 109 */ 110 sethi %hi(prom_entry_lock), %g2 111 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 ··· 119 add %l2, -(192 + 128), %sp 120 flushw 121 122 sethi %hi(call_method), %g2 123 or %g2, %lo(call_method), %g2 124 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 155 sethi %hi(prom_mmu_ihandle_cache), %g2 156 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 157 stx %g2, [%sp + 2047 + 128 + 0x20] 158 - sethi %hi(KERNBASE), %g2 159 - stx %g2, [%sp + 2047 + 128 + 0x28] 160 - sethi %hi(kern_locked_tte_data), %g2 161 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 162 - stx %g2, [%sp + 2047 + 128 + 0x30] 163 164 - mov 15, %g2 165 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 166 167 - mov 63, %g2 168 - 1: 169 stx %g2, [%sp + 2047 + 128 + 0x38] 170 sethi %hi(p1275buf), %g2 171 or %g2, %lo(p1275buf), %g2 172 ldx [%g2 + 0x08], %o1 173 call %o1 174 add %sp, (2047 + 128), %o0 175 176 - sethi %hi(bigkernel), %g2 177 - lduw [%g2 + %lo(bigkernel)], %g2 178 - brz,pt %g2, do_dtlb 179 - nop 180 - 181 - sethi %hi(call_method), %g2 182 - or %g2, %lo(call_method), %g2 183 - stx %g2, [%sp + 2047 + 128 + 0x00] 184 - mov 5, %g2 185 - stx %g2, [%sp + 2047 + 128 + 0x08] 186 - mov 1, %g2 187 - stx %g2, [%sp + 2047 + 128 + 0x10] 188 - sethi %hi(itlb_load), %g2 189 - or %g2, %lo(itlb_load), %g2 190 - stx %g2, [%sp + 2047 + 128 + 0x18] 191 - sethi %hi(prom_mmu_ihandle_cache), %g2 192 - lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 193 - stx %g2, [%sp + 2047 + 128 + 0x20] 194 - sethi %hi(KERNBASE + 0x400000), %g2 195 - stx %g2, [%sp + 2047 + 128 + 0x28] 196 - sethi %hi(kern_locked_tte_data), %g2 197 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 198 - sethi %hi(0x400000), %g1 199 - add %g2, %g1, %g2 200 - stx %g2, [%sp + 2047 + 128 + 0x30] 201 - 202 - mov 14, %g2 203 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 204 - 205 - mov 62, %g2 206 - 1: 207 - stx %g2, [%sp + 2047 + 128 + 0x38] 208 - sethi %hi(p1275buf), %g2 209 - or %g2, %lo(p1275buf), %g2 210 - ldx [%g2 + 0x08], %o1 211 - call %o1 212 - add %sp, (2047 + 128), %o0 213 - 214 - do_dtlb: 215 sethi %hi(call_method), %g2 216 or %g2, %lo(call_method), %g2 217 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 188 sethi %hi(prom_mmu_ihandle_cache), %g2 189 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 190 stx %g2, [%sp + 2047 + 128 + 0x20] 191 - sethi %hi(KERNBASE), %g2 192 - stx %g2, [%sp + 2047 + 128 + 0x28] 193 - sethi %hi(kern_locked_tte_data), %g2 194 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 195 - stx %g2, [%sp + 2047 + 128 + 0x30] 196 197 - mov 15, %g2 198 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 199 200 - mov 63, %g2 201 - 1: 202 203 stx %g2, [%sp + 2047 + 128 + 0x38] 204 sethi %hi(p1275buf), %g2 205 or %g2, %lo(p1275buf), %g2 206 ldx [%g2 + 0x08], %o1 207 call %o1 208 add %sp, (2047 + 128), %o0 209 210 - sethi %hi(bigkernel), %g2 211 - lduw [%g2 + %lo(bigkernel)], %g2 212 - brz,pt %g2, do_unlock 213 nop 214 215 - sethi %hi(call_method), %g2 216 - or %g2, %lo(call_method), %g2 217 - stx %g2, [%sp + 2047 + 128 + 0x00] 218 - mov 5, %g2 219 - stx %g2, [%sp + 2047 + 128 + 0x08] 220 - mov 1, %g2 221 - stx %g2, [%sp + 2047 + 128 + 0x10] 222 - sethi %hi(dtlb_load), %g2 223 - or %g2, %lo(dtlb_load), %g2 224 - stx %g2, [%sp + 2047 + 128 + 0x18] 225 - sethi %hi(prom_mmu_ihandle_cache), %g2 226 - lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 227 - stx %g2, [%sp + 2047 + 128 + 0x20] 228 - sethi %hi(KERNBASE + 0x400000), %g2 229 - stx %g2, [%sp + 2047 + 128 + 0x28] 230 - sethi %hi(kern_locked_tte_data), %g2 231 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 232 - sethi %hi(0x400000), %g1 233 - add %g2, %g1, %g2 234 - stx %g2, [%sp + 2047 + 128 + 0x30] 235 - 236 - mov 14, %g2 237 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 238 - 239 - mov 62, %g2 240 - 1: 241 - 242 - stx %g2, [%sp + 2047 + 128 + 0x38] 243 - sethi %hi(p1275buf), %g2 244 - or %g2, %lo(p1275buf), %g2 245 - ldx [%g2 + 0x08], %o1 246 - call %o1 247 - add %sp, (2047 + 128), %o0 248 - 249 - do_unlock: 250 sethi %hi(prom_entry_lock), %g2 251 stb %g0, [%g2 + %lo(prom_entry_lock)] 252 membar #StoreStore | #StoreLoad ··· 220 nop 221 222 niagara_lock_tlb: 223 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 224 - sethi %hi(KERNBASE), %o0 225 clr %o1 226 - sethi %hi(kern_locked_tte_data), %o2 227 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 228 mov HV_MMU_IMMU, %o3 229 ta HV_FAST_TRAP 230 231 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 232 - sethi %hi(KERNBASE), %o0 233 clr %o1 234 - sethi %hi(kern_locked_tte_data), %o2 235 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 236 mov HV_MMU_DMMU, %o3 237 ta HV_FAST_TRAP 238 239 - sethi %hi(bigkernel), %g2 240 - lduw [%g2 + %lo(bigkernel)], %g2 241 - brz,pt %g2, after_lock_tlb 242 nop 243 - 244 - mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 245 - sethi %hi(KERNBASE + 0x400000), %o0 246 - clr %o1 247 - sethi %hi(kern_locked_tte_data), %o2 248 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 249 - sethi %hi(0x400000), %o3 250 - add %o2, %o3, %o2 251 - mov HV_MMU_IMMU, %o3 252 - ta HV_FAST_TRAP 253 - 254 - mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 255 - sethi %hi(KERNBASE + 0x400000), %o0 256 - clr %o1 257 - sethi %hi(kern_locked_tte_data), %o2 258 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 259 - sethi %hi(0x400000), %o3 260 - add %o2, %o3, %o2 261 - mov HV_MMU_DMMU, %o3 262 - ta HV_FAST_TRAP 263 264 after_lock_tlb: 265 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
··· 105 wr %g2, 0, %tick_cmpr 106 107 /* Call OBP by hand to lock KERNBASE into i/d tlbs. 108 + * We lock 'num_kernel_image_mappings' consequetive entries. 109 */ 110 sethi %hi(prom_entry_lock), %g2 111 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 ··· 119 add %l2, -(192 + 128), %sp 120 flushw 121 122 + /* Setup the loop variables: 123 + * %l3: VADDR base 124 + * %l4: TTE base 125 + * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings' 126 + * %l6: Number of TTE entries to map 127 + * %l7: Highest TTE entry number, we count down 128 + */ 129 + sethi %hi(KERNBASE), %l3 130 + sethi %hi(kern_locked_tte_data), %l4 131 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 132 + clr %l5 133 + sethi %hi(num_kernel_image_mappings), %l6 134 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 135 + add %l6, 1, %l6 136 + 137 + mov 15, %l7 138 + BRANCH_IF_ANY_CHEETAH(g1,g5,2f) 139 + 140 + mov 63, %l7 141 + 2: 142 + 143 + 3: 144 + /* Lock into I-MMU */ 145 sethi %hi(call_method), %g2 146 or %g2, %lo(call_method), %g2 147 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 132 sethi %hi(prom_mmu_ihandle_cache), %g2 133 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 134 stx %g2, [%sp + 2047 + 128 + 0x20] 135 136 + /* Each TTE maps 4MB, convert index to offset. */ 137 + sllx %l5, 22, %g1 138 139 + add %l3, %g1, %g2 140 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR 141 + add %l4, %g1, %g2 142 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE 143 + 144 + /* TTE index is highest minus loop index. */ 145 + sub %l7, %l5, %g2 146 stx %g2, [%sp + 2047 + 128 + 0x38] 147 + 148 sethi %hi(p1275buf), %g2 149 or %g2, %lo(p1275buf), %g2 150 ldx [%g2 + 0x08], %o1 151 call %o1 152 add %sp, (2047 + 128), %o0 153 154 + /* Lock into D-MMU */ 155 sethi %hi(call_method), %g2 156 or %g2, %lo(call_method), %g2 157 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 202 sethi %hi(prom_mmu_ihandle_cache), %g2 203 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 204 stx %g2, [%sp + 2047 + 128 + 0x20] 205 206 + /* Each TTE maps 4MB, convert index to offset. */ 207 + sllx %l5, 22, %g1 208 209 + add %l3, %g1, %g2 210 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR 211 + add %l4, %g1, %g2 212 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE 213 214 + /* TTE index is highest minus loop index. */ 215 + sub %l7, %l5, %g2 216 stx %g2, [%sp + 2047 + 128 + 0x38] 217 + 218 sethi %hi(p1275buf), %g2 219 or %g2, %lo(p1275buf), %g2 220 ldx [%g2 + 0x08], %o1 221 call %o1 222 add %sp, (2047 + 128), %o0 223 224 + add %l5, 1, %l5 225 + cmp %l5, %l6 226 + bne,pt %xcc, 3b 227 nop 228 229 sethi %hi(prom_entry_lock), %g2 230 stb %g0, [%g2 + %lo(prom_entry_lock)] 231 membar #StoreStore | #StoreLoad ··· 269 nop 270 271 niagara_lock_tlb: 272 + sethi %hi(KERNBASE), %l3 273 + sethi %hi(kern_locked_tte_data), %l4 274 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 275 + clr %l5 276 + sethi %hi(num_kernel_image_mappings), %l6 277 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 278 + add %l6, 1, %l6 279 + 280 + 1: 281 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 282 + sllx %l5, 22, %g2 283 + add %l3, %g2, %o0 284 clr %o1 285 + add %l4, %g2, %o2 286 mov HV_MMU_IMMU, %o3 287 ta HV_FAST_TRAP 288 289 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 290 + sllx %l5, 22, %g2 291 + add %l3, %g2, %o0 292 clr %o1 293 + add %l4, %g2, %o2 294 mov HV_MMU_DMMU, %o3 295 ta HV_FAST_TRAP 296 297 + add %l5, 1, %l5 298 + cmp %l5, %l6 299 + bne,pt %xcc, 1b 300 nop 301 302 after_lock_tlb: 303 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
+14 -24
arch/sparc64/mm/init.c
··· 166 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 167 unsigned long sparc64_kern_sec_context __read_mostly; 168 169 - int bigkernel = 0; 170 171 #ifdef CONFIG_DEBUG_DCFLUSH 172 atomic_t dcpage_flushes = ATOMIC_INIT(0); ··· 572 static void __init remap_kernel(void) 573 { 574 unsigned long phys_page, tte_vaddr, tte_data; 575 - int tlb_ent = sparc64_highest_locked_tlbent(); 576 577 tte_vaddr = (unsigned long) KERNBASE; 578 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ··· 582 583 /* Now lock us into the TLBs via Hypervisor or OBP. */ 584 if (tlb_type == hypervisor) { 585 - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 586 - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 587 - if (bigkernel) { 588 - tte_vaddr += 0x400000; 589 - tte_data += 0x400000; 590 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 591 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 592 } 593 } else { 594 - prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); 595 - prom_itlb_load(tlb_ent, tte_data, tte_vaddr); 596 - if (bigkernel) { 597 - tlb_ent -= 1; 598 - prom_dtlb_load(tlb_ent, 599 - tte_data + 0x400000, 600 - tte_vaddr + 0x400000); 601 - prom_itlb_load(tlb_ent, 602 - tte_data + 0x400000, 603 - tte_vaddr + 0x400000); 604 } 605 - sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; 606 } 607 if (tlb_type == cheetah_plus) { 608 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | ··· 1345 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1346 1347 real_end = (unsigned long)_end; 1348 - if ((real_end > ((unsigned long)KERNBASE + 0x400000))) 1349 - bigkernel = 1; 1350 - if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { 1351 - prom_printf("paging_init: Kernel > 8MB, too large.\n"); 1352 - prom_halt(); 1353 - } 1354 1355 /* Set kernel pgd to upper alias so physical page computations 1356 * work.
··· 166 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 167 unsigned long sparc64_kern_sec_context __read_mostly; 168 169 + int num_kernel_image_mappings; 170 171 #ifdef CONFIG_DEBUG_DCFLUSH 172 atomic_t dcpage_flushes = ATOMIC_INIT(0); ··· 572 static void __init remap_kernel(void) 573 { 574 unsigned long phys_page, tte_vaddr, tte_data; 575 + int i, tlb_ent = sparc64_highest_locked_tlbent(); 576 577 tte_vaddr = (unsigned long) KERNBASE; 578 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ··· 582 583 /* Now lock us into the TLBs via Hypervisor or OBP. */ 584 if (tlb_type == hypervisor) { 585 + for (i = 0; i < num_kernel_image_mappings; i++) { 586 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 587 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 588 + tte_vaddr += 0x400000; 589 + tte_data += 0x400000; 590 } 591 } else { 592 + for (i = 0; i < num_kernel_image_mappings; i++) { 593 + prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 594 + prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 595 + tte_vaddr += 0x400000; 596 + tte_data += 0x400000; 597 } 598 + sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 599 } 600 if (tlb_type == cheetah_plus) { 601 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | ··· 1352 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1353 1354 real_end = (unsigned long)_end; 1355 + num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1356 + printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1357 + num_kernel_image_mappings); 1358 1359 /* Set kernel pgd to upper alias so physical page computations 1360 * work.
+1 -1
include/asm-sparc64/hvtramp.h
··· 16 __u64 fault_info_va; 17 __u64 fault_info_pa; 18 __u64 thread_reg; 19 - struct hvtramp_mapping maps[2]; 20 }; 21 22 extern void hv_cpu_startup(unsigned long hvdescr_pa);
··· 16 __u64 fault_info_va; 17 __u64 fault_info_pa; 18 __u64 thread_reg; 19 + struct hvtramp_mapping maps[1]; 20 }; 21 22 extern void hv_cpu_startup(unsigned long hvdescr_pa);
+2
include/asm-sparc64/spitfire.h
··· 63 SPITFIRE_HIGHEST_LOCKED_TLBENT : \ 64 CHEETAH_HIGHEST_LOCKED_TLBENT) 65 66 /* The data cache is write through, so this just invalidates the 67 * specified line. 68 */
··· 63 SPITFIRE_HIGHEST_LOCKED_TLBENT : \ 64 CHEETAH_HIGHEST_LOCKED_TLBENT) 65 66 + extern int num_kernel_image_mappings; 67 + 68 /* The data cache is write through, so this just invalidates the 69 * specified line. 70 */