Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc fixes from David Miller:
"Several fixes here, mostly having to due with either build errors or
memory corruptions depending upon whether you have THP enabled or not"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
sparc: remove unused wp_works_ok macro
sparc32: Export vac_cache_size to fix build error
sparc64: Fix memory corruption when THP is enabled
sparc64: Fix kernel panic due to erroneous #ifdef surrounding pmd_write()
arch/sparc: Avoid DCTI Couples
sparc64: kern_addr_valid regression
sparc64: Add support for 2G hugepages
sparc64: Fix size check in huge_pte_alloc

+47 -25
+2 -1
arch/sparc/include/asm/page_64.h
··· 17 18 #define HPAGE_SHIFT 23 19 #define REAL_HPAGE_SHIFT 22 20 #define HPAGE_256MB_SHIFT 28 21 #define HPAGE_64K_SHIFT 16 22 #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) ··· 28 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 29 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 30 #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 31 - #define HUGE_MAX_HSTATE 3 32 #endif 33 34 #ifndef __ASSEMBLY__
··· 17 18 #define HPAGE_SHIFT 23 19 #define REAL_HPAGE_SHIFT 22 20 + #define HPAGE_2GB_SHIFT 31 21 #define HPAGE_256MB_SHIFT 28 22 #define HPAGE_64K_SHIFT 16 23 #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) ··· 27 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 28 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 29 #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 30 + #define HUGE_MAX_HSTATE 4 31 #endif 32 33 #ifndef __ASSEMBLY__
+8 -7
arch/sparc/include/asm/pgtable_64.h
··· 679 return pte_pfn(pte); 680 } 681 682 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 683 static inline unsigned long pmd_dirty(pmd_t pmd) 684 { ··· 700 pte_t pte = __pte(pmd_val(pmd)); 701 702 return pte_young(pte); 703 - } 704 - 705 - static inline unsigned long pmd_write(pmd_t pmd) 706 - { 707 - pte_t pte = __pte(pmd_val(pmd)); 708 - 709 - return pte_write(pte); 710 } 711 712 static inline unsigned long pmd_trans_huge(pmd_t pmd)
··· 679 return pte_pfn(pte); 680 } 681 682 + #define __HAVE_ARCH_PMD_WRITE 683 + static inline unsigned long pmd_write(pmd_t pmd) 684 + { 685 + pte_t pte = __pte(pmd_val(pmd)); 686 + 687 + return pte_write(pte); 688 + } 689 + 690 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 691 static inline unsigned long pmd_dirty(pmd_t pmd) 692 { ··· 692 pte_t pte = __pte(pmd_val(pmd)); 693 694 return pte_young(pte); 695 } 696 697 static inline unsigned long pmd_trans_huge(pmd_t pmd)
-6
arch/sparc/include/asm/processor_32.h
··· 18 #include <asm/signal.h> 19 #include <asm/page.h> 20 21 - /* 22 - * The sparc has no problems with write protection 23 - */ 24 - #define wp_works_ok 1 25 - #define wp_works_ok__is_a_macro /* for versions in ksyms.c */ 26 - 27 /* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... 28 * That one page is used to protect kernel from intruders, so that 29 * we can make our access_ok test faster
··· 18 #include <asm/signal.h> 19 #include <asm/page.h> 20 21 /* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... 22 * That one page is used to protect kernel from intruders, so that 23 * we can make our access_ok test faster
-4
arch/sparc/include/asm/processor_64.h
··· 18 #include <asm/ptrace.h> 19 #include <asm/page.h> 20 21 - /* The sparc has no problems with write protection */ 22 - #define wp_works_ok 1 23 - #define wp_works_ok__is_a_macro /* for versions in ksyms.c */ 24 - 25 /* 26 * User lives in his very own context, and cannot reference us. Note 27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
··· 18 #include <asm/ptrace.h> 19 #include <asm/page.h> 20 21 /* 22 * User lives in his very own context, and cannot reference us. Note 23 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
+4
arch/sparc/kernel/head_64.S
··· 96 andn %g1, PSTATE_AM, %g1 97 wrpr %g1, 0x0, %pstate 98 ba,a,pt %xcc, 1f 99 100 .globl prom_finddev_name, prom_chosen_path, prom_root_node 101 .globl prom_getprop_name, prom_mmu_name, prom_peer_name ··· 614 nop 615 616 ba,a,pt %xcc, 80f 617 niagara4_patch: 618 call niagara4_patch_copyops 619 nop ··· 624 nop 625 626 ba,a,pt %xcc, 80f 627 628 niagara2_patch: 629 call niagara2_patch_copyops ··· 635 nop 636 637 ba,a,pt %xcc, 80f 638 639 niagara_patch: 640 call niagara_patch_copyops
··· 96 andn %g1, PSTATE_AM, %g1 97 wrpr %g1, 0x0, %pstate 98 ba,a,pt %xcc, 1f 99 + nop 100 101 .globl prom_finddev_name, prom_chosen_path, prom_root_node 102 .globl prom_getprop_name, prom_mmu_name, prom_peer_name ··· 613 nop 614 615 ba,a,pt %xcc, 80f 616 + nop 617 niagara4_patch: 618 call niagara4_patch_copyops 619 nop ··· 622 nop 623 624 ba,a,pt %xcc, 80f 625 + nop 626 627 niagara2_patch: 628 call niagara2_patch_copyops ··· 632 nop 633 634 ba,a,pt %xcc, 80f 635 + nop 636 637 niagara_patch: 638 call niagara_patch_copyops
+1
arch/sparc/kernel/misctrap.S
··· 82 call handle_stdfmna 83 add %sp, PTREGS_OFF, %o0 84 ba,a,pt %xcc, rtrap 85 .size do_stdfmna,.-do_stdfmna 86 87 .type breakpoint_trap,#function
··· 82 call handle_stdfmna 83 add %sp, PTREGS_OFF, %o0 84 ba,a,pt %xcc, rtrap 85 + nop 86 .size do_stdfmna,.-do_stdfmna 87 88 .type breakpoint_trap,#function
+1
arch/sparc/kernel/rtrap_64.S
··· 237 bne,pt %xcc, user_rtt_fill_32bit 238 wrpr %g1, %cwp 239 ba,a,pt %xcc, user_rtt_fill_64bit 240 241 user_rtt_fill_fixup_dax: 242 ba,pt %xcc, user_rtt_fill_fixup_common
··· 237 bne,pt %xcc, user_rtt_fill_32bit 238 wrpr %g1, %cwp 239 ba,a,pt %xcc, user_rtt_fill_64bit 240 + nop 241 242 user_rtt_fill_fixup_dax: 243 ba,pt %xcc, user_rtt_fill_fixup_common
+1
arch/sparc/kernel/spiterrs.S
··· 86 rd %pc, %g7 87 88 ba,a,pt %xcc, 2f 89 90 1: ba,pt %xcc, etrap_irq 91 rd %pc, %g7
··· 86 rd %pc, %g7 87 88 ba,a,pt %xcc, 2f 89 + nop 90 91 1: ba,pt %xcc, etrap_irq 92 rd %pc, %g7
+1
arch/sparc/kernel/sun4v_tlb_miss.S
··· 352 call sun4v_do_mna 353 add %sp, PTREGS_OFF, %o0 354 ba,a,pt %xcc, rtrap 355 356 /* Privileged Action. */ 357 sun4v_privact:
··· 352 call sun4v_do_mna 353 add %sp, PTREGS_OFF, %o0 354 ba,a,pt %xcc, rtrap 355 + nop 356 357 /* Privileged Action. */ 358 sun4v_privact:
+1
arch/sparc/kernel/urtt_fill.S
··· 92 call sun4v_data_access_exception 93 nop 94 ba,a,pt %xcc, rtrap 95 96 1: call spitfire_data_access_exception 97 nop
··· 92 call sun4v_data_access_exception 93 nop 94 ba,a,pt %xcc, rtrap 95 + nop 96 97 1: call spitfire_data_access_exception 98 nop
+2
arch/sparc/kernel/winfixup.S
··· 152 call sun4v_data_access_exception 153 nop 154 ba,a,pt %xcc, rtrap 155 1: call spitfire_data_access_exception 156 nop 157 ba,a,pt %xcc, rtrap
··· 152 call sun4v_data_access_exception 153 nop 154 ba,a,pt %xcc, rtrap 155 + nop 156 1: call spitfire_data_access_exception 157 nop 158 ba,a,pt %xcc, rtrap 159 + nop
+4
arch/sparc/lib/NG2memcpy.S
··· 326 blu 170f 327 nop 328 ba,a,pt %xcc, 180f 329 330 4: /* 32 <= low bits < 48 */ 331 blu 150f 332 nop 333 ba,a,pt %xcc, 160f 334 5: /* 0 < low bits < 32 */ 335 blu,a 6f 336 cmp %g2, 8 ··· 340 blu 130f 341 nop 342 ba,a,pt %xcc, 140f 343 6: /* 0 < low bits < 16 */ 344 bgeu 120f 345 nop ··· 478 brz,pt %o2, 85f 479 sub %o0, %o1, GLOBAL_SPARE 480 ba,a,pt %XCC, 90f 481 482 .align 64 483 75: /* 16 < len <= 64 */
··· 326 blu 170f 327 nop 328 ba,a,pt %xcc, 180f 329 + nop 330 331 4: /* 32 <= low bits < 48 */ 332 blu 150f 333 nop 334 ba,a,pt %xcc, 160f 335 + nop 336 5: /* 0 < low bits < 32 */ 337 blu,a 6f 338 cmp %g2, 8 ··· 338 blu 130f 339 nop 340 ba,a,pt %xcc, 140f 341 + nop 342 6: /* 0 < low bits < 16 */ 343 bgeu 120f 344 nop ··· 475 brz,pt %o2, 85f 476 sub %o0, %o1, GLOBAL_SPARE 477 ba,a,pt %XCC, 90f 478 + nop 479 480 .align 64 481 75: /* 16 < len <= 64 */
+1
arch/sparc/lib/NG4memcpy.S
··· 530 bne,pt %icc, 1b 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) 532 ba,a,pt %icc, .Lexit 533 .size FUNC_NAME, .-FUNC_NAME
··· 530 bne,pt %icc, 1b 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) 532 ba,a,pt %icc, .Lexit 533 + nop 534 .size FUNC_NAME, .-FUNC_NAME
+1
arch/sparc/lib/NG4memset.S
··· 102 bne,pt %icc, 1b 103 add %o0, 0x30, %o0 104 ba,a,pt %icc, .Lpostloop 105 .size NG4bzero,.-NG4bzero
··· 102 bne,pt %icc, 1b 103 add %o0, 0x30, %o0 104 ba,a,pt %icc, .Lpostloop 105 + nop 106 .size NG4bzero,.-NG4bzero
+1
arch/sparc/lib/NGmemcpy.S
··· 394 brz,pt %i2, 85f 395 sub %o0, %i1, %i3 396 ba,a,pt %XCC, 90f 397 398 .align 64 399 70: /* 16 < len <= 64 */
··· 394 brz,pt %i2, 85f 395 sub %o0, %i1, %i3 396 ba,a,pt %XCC, 90f 397 + nop 398 399 .align 64 400 70: /* 16 < len <= 64 */
+8 -1
arch/sparc/mm/hugetlbpage.c
··· 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 144 145 switch (shift) { 146 case HPAGE_256MB_SHIFT: 147 hugepage_size = _PAGE_SZ256MB_4V; 148 pte_val(entry) |= _PAGE_PMD_HUGE; ··· 187 unsigned int shift; 188 189 switch (tte_szbits) { 190 case _PAGE_SZ256MB_4V: 191 shift = HPAGE_256MB_SHIFT; 192 break; ··· 268 if (!pmd) 269 return NULL; 270 271 - if (sz == PMD_SHIFT) 272 pte = (pte_t *)pmd; 273 else 274 pte = pte_alloc_map(mm, pmd, addr);
··· 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 144 145 switch (shift) { 146 + case HPAGE_2GB_SHIFT: 147 + hugepage_size = _PAGE_SZ2GB_4V; 148 + pte_val(entry) |= _PAGE_PMD_HUGE; 149 + break; 150 case HPAGE_256MB_SHIFT: 151 hugepage_size = _PAGE_SZ256MB_4V; 152 pte_val(entry) |= _PAGE_PMD_HUGE; ··· 183 unsigned int shift; 184 185 switch (tte_szbits) { 186 + case _PAGE_SZ2GB_4V: 187 + shift = HPAGE_2GB_SHIFT; 188 + break; 189 case _PAGE_SZ256MB_4V: 190 shift = HPAGE_256MB_SHIFT; 191 break; ··· 261 if (!pmd) 262 return NULL; 263 264 + if (sz >= PMD_SIZE) 265 pte = (pte_t *)pmd; 266 else 267 pte = pte_alloc_map(mm, pmd, addr);
+5 -1
arch/sparc/mm/init_64.c
··· 337 hugepage_shift = ilog2(hugepage_size); 338 339 switch (hugepage_shift) { 340 case HPAGE_256MB_SHIFT: 341 hv_pgsz_mask = HV_PGSZ_MASK_256MB; 342 hv_pgsz_idx = HV_PGSZ_IDX_256MB; ··· 1567 if ((long)addr < 0L) { 1568 unsigned long pa = __pa(addr); 1569 1570 - if ((addr >> max_phys_bits) != 0UL) 1571 return false; 1572 1573 return pfn_valid(pa >> PAGE_SHIFT);
··· 337 hugepage_shift = ilog2(hugepage_size); 338 339 switch (hugepage_shift) { 340 + case HPAGE_2GB_SHIFT: 341 + hv_pgsz_mask = HV_PGSZ_MASK_2GB; 342 + hv_pgsz_idx = HV_PGSZ_IDX_2GB; 343 + break; 344 case HPAGE_256MB_SHIFT: 345 hv_pgsz_mask = HV_PGSZ_MASK_256MB; 346 hv_pgsz_idx = HV_PGSZ_IDX_256MB; ··· 1563 if ((long)addr < 0L) { 1564 unsigned long pa = __pa(addr); 1565 1566 + if ((pa >> max_phys_bits) != 0UL) 1567 return false; 1568 1569 return pfn_valid(pa >> PAGE_SHIFT);
+1
arch/sparc/mm/srmmu.c
··· 54 enum mbus_module srmmu_modtype; 55 static unsigned int hwbug_bitmask; 56 int vac_cache_size; 57 int vac_line_size; 58 59 extern struct resource sparc_iomap;
··· 54 enum mbus_module srmmu_modtype; 55 static unsigned int hwbug_bitmask; 56 int vac_cache_size; 57 + EXPORT_SYMBOL(vac_cache_size); 58 int vac_line_size; 59 60 extern struct resource sparc_iomap;
+3 -3
arch/sparc/mm/tlb.c
··· 154 if (pte_val(*pte) & _PAGE_VALID) { 155 bool exec = pte_exec(*pte); 156 157 - tlb_batch_add_one(mm, vaddr, exec, false); 158 } 159 pte++; 160 vaddr += PAGE_SIZE; ··· 209 pte_t orig_pte = __pte(pmd_val(orig)); 210 bool exec = pte_exec(orig_pte); 211 212 - tlb_batch_add_one(mm, addr, exec, true); 213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, 214 - true); 215 } else { 216 tlb_batch_pmd_scan(mm, addr, orig); 217 }
··· 154 if (pte_val(*pte) & _PAGE_VALID) { 155 bool exec = pte_exec(*pte); 156 157 + tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); 158 } 159 pte++; 160 vaddr += PAGE_SIZE; ··· 209 pte_t orig_pte = __pte(pmd_val(orig)); 210 bool exec = pte_exec(orig_pte); 211 212 + tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); 213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, 214 + REAL_HPAGE_SHIFT); 215 } else { 216 tlb_batch_pmd_scan(mm, addr, orig); 217 }
+2 -2
arch/sparc/mm/tsb.c
··· 122 123 spin_lock_irqsave(&mm->context.lock, flags); 124 125 - if (tb->hugepage_shift < HPAGE_SHIFT) { 126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 128 if (tlb_type == cheetah_plus || tlb_type == hypervisor) ··· 155 156 spin_lock_irqsave(&mm->context.lock, flags); 157 158 - if (hugepage_shift < HPAGE_SHIFT) { 159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
··· 122 123 spin_lock_irqsave(&mm->context.lock, flags); 124 125 + if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { 126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 128 if (tlb_type == cheetah_plus || tlb_type == hypervisor) ··· 155 156 spin_lock_irqsave(&mm->context.lock, flags); 157 158 + if (hugepage_shift < REAL_HPAGE_SHIFT) { 159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)