Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc fixes from David Miller:
"Several fixes here, mostly having to due with either build errors or
memory corruptions depending upon whether you have THP enabled or not"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
sparc: remove unused wp_works_ok macro
sparc32: Export vac_cache_size to fix build error
sparc64: Fix memory corruption when THP is enabled
sparc64: Fix kernel panic due to erroneous #ifdef surrounding pmd_write()
arch/sparc: Avoid DCTI Couples
sparc64: kern_addr_valid regression
sparc64: Add support for 2G hugepages
sparc64: Fix size check in huge_pte_alloc

+47 -25
+2 -1
arch/sparc/include/asm/page_64.h
··· 17 17 18 18 #define HPAGE_SHIFT 23 19 19 #define REAL_HPAGE_SHIFT 22 20 + #define HPAGE_2GB_SHIFT 31 20 21 #define HPAGE_256MB_SHIFT 28 21 22 #define HPAGE_64K_SHIFT 16 22 23 #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) ··· 28 27 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 29 28 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 30 29 #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 31 - #define HUGE_MAX_HSTATE 3 30 + #define HUGE_MAX_HSTATE 4 32 31 #endif 33 32 34 33 #ifndef __ASSEMBLY__
+8 -7
arch/sparc/include/asm/pgtable_64.h
··· 679 679 return pte_pfn(pte); 680 680 } 681 681 682 + #define __HAVE_ARCH_PMD_WRITE 683 + static inline unsigned long pmd_write(pmd_t pmd) 684 + { 685 + pte_t pte = __pte(pmd_val(pmd)); 686 + 687 + return pte_write(pte); 688 + } 689 + 682 690 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 683 691 static inline unsigned long pmd_dirty(pmd_t pmd) 684 692 { ··· 700 692 pte_t pte = __pte(pmd_val(pmd)); 701 693 702 694 return pte_young(pte); 703 - } 704 - 705 - static inline unsigned long pmd_write(pmd_t pmd) 706 - { 707 - pte_t pte = __pte(pmd_val(pmd)); 708 - 709 - return pte_write(pte); 710 695 } 711 696 712 697 static inline unsigned long pmd_trans_huge(pmd_t pmd)
-6
arch/sparc/include/asm/processor_32.h
··· 18 18 #include <asm/signal.h> 19 19 #include <asm/page.h> 20 20 21 - /* 22 - * The sparc has no problems with write protection 23 - */ 24 - #define wp_works_ok 1 25 - #define wp_works_ok__is_a_macro /* for versions in ksyms.c */ 26 - 27 21 /* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... 28 22 * That one page is used to protect kernel from intruders, so that 29 23 * we can make our access_ok test faster
-4
arch/sparc/include/asm/processor_64.h
··· 18 18 #include <asm/ptrace.h> 19 19 #include <asm/page.h> 20 20 21 - /* The sparc has no problems with write protection */ 22 - #define wp_works_ok 1 23 - #define wp_works_ok__is_a_macro /* for versions in ksyms.c */ 24 - 25 21 /* 26 22 * User lives in his very own context, and cannot reference us. Note 27 23 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
+4
arch/sparc/kernel/head_64.S
··· 96 96 andn %g1, PSTATE_AM, %g1 97 97 wrpr %g1, 0x0, %pstate 98 98 ba,a,pt %xcc, 1f 99 + nop 99 100 100 101 .globl prom_finddev_name, prom_chosen_path, prom_root_node 101 102 .globl prom_getprop_name, prom_mmu_name, prom_peer_name ··· 614 613 nop 615 614 616 615 ba,a,pt %xcc, 80f 616 + nop 617 617 niagara4_patch: 618 618 call niagara4_patch_copyops 619 619 nop ··· 624 622 nop 625 623 626 624 ba,a,pt %xcc, 80f 625 + nop 627 626 628 627 niagara2_patch: 629 628 call niagara2_patch_copyops ··· 635 632 nop 636 633 637 634 ba,a,pt %xcc, 80f 635 + nop 638 636 639 637 niagara_patch: 640 638 call niagara_patch_copyops
+1
arch/sparc/kernel/misctrap.S
··· 82 82 call handle_stdfmna 83 83 add %sp, PTREGS_OFF, %o0 84 84 ba,a,pt %xcc, rtrap 85 + nop 85 86 .size do_stdfmna,.-do_stdfmna 86 87 87 88 .type breakpoint_trap,#function
+1
arch/sparc/kernel/rtrap_64.S
··· 237 237 bne,pt %xcc, user_rtt_fill_32bit 238 238 wrpr %g1, %cwp 239 239 ba,a,pt %xcc, user_rtt_fill_64bit 240 + nop 240 241 241 242 user_rtt_fill_fixup_dax: 242 243 ba,pt %xcc, user_rtt_fill_fixup_common
+1
arch/sparc/kernel/spiterrs.S
··· 86 86 rd %pc, %g7 87 87 88 88 ba,a,pt %xcc, 2f 89 + nop 89 90 90 91 1: ba,pt %xcc, etrap_irq 91 92 rd %pc, %g7
+1
arch/sparc/kernel/sun4v_tlb_miss.S
··· 352 352 call sun4v_do_mna 353 353 add %sp, PTREGS_OFF, %o0 354 354 ba,a,pt %xcc, rtrap 355 + nop 355 356 356 357 /* Privileged Action. */ 357 358 sun4v_privact:
+1
arch/sparc/kernel/urtt_fill.S
··· 92 92 call sun4v_data_access_exception 93 93 nop 94 94 ba,a,pt %xcc, rtrap 95 + nop 95 96 96 97 1: call spitfire_data_access_exception 97 98 nop
+2
arch/sparc/kernel/winfixup.S
··· 152 152 call sun4v_data_access_exception 153 153 nop 154 154 ba,a,pt %xcc, rtrap 155 + nop 155 156 1: call spitfire_data_access_exception 156 157 nop 157 158 ba,a,pt %xcc, rtrap 159 + nop
+4
arch/sparc/lib/NG2memcpy.S
··· 326 326 blu 170f 327 327 nop 328 328 ba,a,pt %xcc, 180f 329 + nop 329 330 330 331 4: /* 32 <= low bits < 48 */ 331 332 blu 150f 332 333 nop 333 334 ba,a,pt %xcc, 160f 335 + nop 334 336 5: /* 0 < low bits < 32 */ 335 337 blu,a 6f 336 338 cmp %g2, 8 ··· 340 338 blu 130f 341 339 nop 342 340 ba,a,pt %xcc, 140f 341 + nop 343 342 6: /* 0 < low bits < 16 */ 344 343 bgeu 120f 345 344 nop ··· 478 475 brz,pt %o2, 85f 479 476 sub %o0, %o1, GLOBAL_SPARE 480 477 ba,a,pt %XCC, 90f 478 + nop 481 479 482 480 .align 64 483 481 75: /* 16 < len <= 64 */
+1
arch/sparc/lib/NG4memcpy.S
··· 530 530 bne,pt %icc, 1b 531 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) 532 532 ba,a,pt %icc, .Lexit 533 + nop 533 534 .size FUNC_NAME, .-FUNC_NAME
+1
arch/sparc/lib/NG4memset.S
··· 102 102 bne,pt %icc, 1b 103 103 add %o0, 0x30, %o0 104 104 ba,a,pt %icc, .Lpostloop 105 + nop 105 106 .size NG4bzero,.-NG4bzero
+1
arch/sparc/lib/NGmemcpy.S
··· 394 394 brz,pt %i2, 85f 395 395 sub %o0, %i1, %i3 396 396 ba,a,pt %XCC, 90f 397 + nop 397 398 398 399 .align 64 399 400 70: /* 16 < len <= 64 */
+8 -1
arch/sparc/mm/hugetlbpage.c
··· 143 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 144 144 145 145 switch (shift) { 146 + case HPAGE_2GB_SHIFT: 147 + hugepage_size = _PAGE_SZ2GB_4V; 148 + pte_val(entry) |= _PAGE_PMD_HUGE; 149 + break; 146 150 case HPAGE_256MB_SHIFT: 147 151 hugepage_size = _PAGE_SZ256MB_4V; 148 152 pte_val(entry) |= _PAGE_PMD_HUGE; ··· 187 183 unsigned int shift; 188 184 189 185 switch (tte_szbits) { 186 + case _PAGE_SZ2GB_4V: 187 + shift = HPAGE_2GB_SHIFT; 188 + break; 190 189 case _PAGE_SZ256MB_4V: 191 190 shift = HPAGE_256MB_SHIFT; 192 191 break; ··· 268 261 if (!pmd) 269 262 return NULL; 270 263 271 - if (sz == PMD_SHIFT) 264 + if (sz >= PMD_SIZE) 272 265 pte = (pte_t *)pmd; 273 266 else 274 267 pte = pte_alloc_map(mm, pmd, addr);
+5 -1
arch/sparc/mm/init_64.c
··· 337 337 hugepage_shift = ilog2(hugepage_size); 338 338 339 339 switch (hugepage_shift) { 340 + case HPAGE_2GB_SHIFT: 341 + hv_pgsz_mask = HV_PGSZ_MASK_2GB; 342 + hv_pgsz_idx = HV_PGSZ_IDX_2GB; 343 + break; 340 344 case HPAGE_256MB_SHIFT: 341 345 hv_pgsz_mask = HV_PGSZ_MASK_256MB; 342 346 hv_pgsz_idx = HV_PGSZ_IDX_256MB; ··· 1567 1563 if ((long)addr < 0L) { 1568 1564 unsigned long pa = __pa(addr); 1569 1565 1570 - if ((addr >> max_phys_bits) != 0UL) 1566 + if ((pa >> max_phys_bits) != 0UL) 1571 1567 return false; 1572 1568 1573 1569 return pfn_valid(pa >> PAGE_SHIFT);
+1
arch/sparc/mm/srmmu.c
··· 54 54 enum mbus_module srmmu_modtype; 55 55 static unsigned int hwbug_bitmask; 56 56 int vac_cache_size; 57 + EXPORT_SYMBOL(vac_cache_size); 57 58 int vac_line_size; 58 59 59 60 extern struct resource sparc_iomap;
+3 -3
arch/sparc/mm/tlb.c
··· 154 154 if (pte_val(*pte) & _PAGE_VALID) { 155 155 bool exec = pte_exec(*pte); 156 156 157 - tlb_batch_add_one(mm, vaddr, exec, false); 157 + tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); 158 158 } 159 159 pte++; 160 160 vaddr += PAGE_SIZE; ··· 209 209 pte_t orig_pte = __pte(pmd_val(orig)); 210 210 bool exec = pte_exec(orig_pte); 211 211 212 - tlb_batch_add_one(mm, addr, exec, true); 212 + tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); 213 213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, 214 - true); 214 + REAL_HPAGE_SHIFT); 215 215 } else { 216 216 tlb_batch_pmd_scan(mm, addr, orig); 217 217 }
+2 -2
arch/sparc/mm/tsb.c
··· 122 122 123 123 spin_lock_irqsave(&mm->context.lock, flags); 124 124 125 - if (tb->hugepage_shift < HPAGE_SHIFT) { 125 + if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { 126 126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 127 127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 128 128 if (tlb_type == cheetah_plus || tlb_type == hypervisor) ··· 155 155 156 156 spin_lock_irqsave(&mm->context.lock, flags); 157 157 158 - if (hugepage_shift < HPAGE_SHIFT) { 158 + if (hugepage_shift < REAL_HPAGE_SHIFT) { 159 159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 160 160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 161 161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)