Merge tag 'arm64-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64

Pull ARM64 fixes from Catalin Marinas:
- Remove preempt_count modifications in the arm64 IRQ handling code
since that's already dealt with in generic irq_enter/irq_exit
- PTE_PROT_NONE bit moved higher up to avoid overlapping with the
hardware bits (for PROT_NONE mappings which are pte_present)
- Big-endian fixes for ptrace support
- Asynchronous aborts unmasking while in the kernel
- pgprot_writecombine() change to create Normal NonCacheable memory
rather than Device GRE

* tag 'arm64-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64:
arm64: Move PTE_PROT_NONE higher up
arm64: Use Normal NonCacheable memory for writecombine
arm64: debug: make aarch32 bkpt checking endian clean
arm64: ptrace: fix compat registes get/set to be endian clean
arm64: Unmask asynchronous aborts when in kernel mode
arm64: dts: Reserve the memory used for secondary CPU release address
arm64: let the core code deal with preempt_count

Changed files
+67 -66
arch
+2
arch/arm64/boot/dts/foundation-v8.dts
··· 6 6 7 7 /dts-v1/; 8 8 9 + /memreserve/ 0x80000000 0x00010000; 10 + 9 11 / { 10 12 model = "Foundation-v8A"; 11 13 compatible = "arm,foundation-aarch64", "arm,vexpress";
+3
arch/arm64/include/asm/irqflags.h
··· 56 56 #define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") 57 57 #define local_fiq_disable() asm("msr daifset, #1" : : : "memory") 58 58 59 + #define local_async_enable() asm("msr daifclr, #4" : : : "memory") 60 + #define local_async_disable() asm("msr daifset, #4" : : : "memory") 61 + 59 62 /* 60 63 * Save the current interrupt enable state. 61 64 */
+18 -15
arch/arm64/include/asm/pgtable.h
··· 25 25 * Software defined PTE bits definition. 26 26 */ 27 27 #define PTE_VALID (_AT(pteval_t, 1) << 0) 28 - #define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ 29 - #define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ 28 + #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ 30 29 #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 31 30 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 31 + /* bit 57 for PMD_SECT_SPLITTING */ 32 + #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ 32 33 33 34 /* 34 35 * VMALLOC and SPARSEMEM_VMEMMAP ranges. ··· 255 254 #define pgprot_noncached(prot) \ 256 255 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) 257 256 #define pgprot_writecombine(prot) \ 258 - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) 257 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) 259 258 #define pgprot_dmacoherent(prot) \ 260 259 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) 261 260 #define __HAVE_PHYS_MEM_ACCESS_PROT ··· 358 357 359 358 /* 360 359 * Encode and decode a swap entry: 361 - * bits 0, 2: present (must both be zero) 362 - * bit 3: PTE_FILE 363 - * bits 4-8: swap type 364 - * bits 9-63: swap offset 360 + * bits 0-1: present (must be zero) 361 + * bit 2: PTE_FILE 362 + * bits 3-8: swap type 363 + * bits 9-57: swap offset 365 364 */ 366 - #define __SWP_TYPE_SHIFT 4 365 + #define __SWP_TYPE_SHIFT 3 367 366 #define __SWP_TYPE_BITS 6 367 + #define __SWP_OFFSET_BITS 49 368 368 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 369 369 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 370 + #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 370 371 371 372 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 372 - #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 373 + #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 373 374 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 374 375 375 376 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) ··· 385 382 386 383 /* 387 384 * Encode and decode a file entry: 388 - * bits 0, 2: present (must both be zero) 389 - * bit 3: PTE_FILE 390 - * bits 4-63: file offset / PAGE_SIZE 385 + * bits 0-1: present (must be zero) 386 + * bit 2: PTE_FILE 387 + * bits 3-57: file offset / PAGE_SIZE 391 388 */ 392 389 #define pte_file(pte) (pte_val(pte) & PTE_FILE) 393 - #define pte_to_pgoff(x) (pte_val(x) >> 4) 394 - #define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) 390 + #define pte_to_pgoff(x) (pte_val(x) >> 3) 391 + #define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) 395 392 396 - #define PTE_FILE_MAX_BITS 60 393 + #define PTE_FILE_MAX_BITS 55 397 394 398 395 extern int kern_addr_valid(unsigned long addr); 399 396
+12 -8
arch/arm64/kernel/debug-monitors.c
··· 248 248 int aarch32_break_handler(struct pt_regs *regs) 249 249 { 250 250 siginfo_t info; 251 - unsigned int instr; 251 + u32 arm_instr; 252 + u16 thumb_instr; 252 253 bool bp = false; 253 254 void __user *pc = (void __user *)instruction_pointer(regs); 254 255 ··· 258 257 259 258 if (compat_thumb_mode(regs)) { 260 259 /* get 16-bit Thumb instruction */ 261 - get_user(instr, (u16 __user *)pc); 262 - if (instr == AARCH32_BREAK_THUMB2_LO) { 260 + get_user(thumb_instr, (u16 __user *)pc); 261 + thumb_instr = le16_to_cpu(thumb_instr); 262 + if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { 263 263 /* get second half of 32-bit Thumb-2 instruction */ 264 - get_user(instr, (u16 __user *)(pc + 2)); 265 - bp = instr == AARCH32_BREAK_THUMB2_HI; 264 + get_user(thumb_instr, (u16 __user *)(pc + 2)); 265 + thumb_instr = le16_to_cpu(thumb_instr); 266 + bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; 266 267 } else { 267 - bp = instr == AARCH32_BREAK_THUMB; 268 + bp = thumb_instr == AARCH32_BREAK_THUMB; 268 269 } 269 270 } else { 270 271 /* 32-bit ARM instruction */ 271 - get_user(instr, (u32 __user *)pc); 272 - bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; 272 + get_user(arm_instr, (u32 __user *)pc); 273 + arm_instr = le32_to_cpu(arm_instr); 274 + bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; 273 275 } 274 276 275 277 if (!bp)
+7 -22
arch/arm64/kernel/entry.S
··· 309 309 #ifdef CONFIG_TRACE_IRQFLAGS 310 310 bl trace_hardirqs_off 311 311 #endif 312 + 313 + irq_handler 314 + 312 315 #ifdef CONFIG_PREEMPT 313 316 get_thread_info tsk 314 - ldr w24, [tsk, #TI_PREEMPT] // get preempt count 315 - add w0, w24, #1 // increment it 316 - str w0, [tsk, #TI_PREEMPT] 317 - #endif 318 - irq_handler 319 - #ifdef CONFIG_PREEMPT 320 - str w24, [tsk, #TI_PREEMPT] // restore preempt count 317 + ldr w24, [tsk, #TI_PREEMPT] // restore preempt count 321 318 cbnz w24, 1f // preempt count != 0 322 319 ldr x0, [tsk, #TI_FLAGS] // get flags 323 320 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? ··· 504 507 #ifdef CONFIG_TRACE_IRQFLAGS 505 508 bl trace_hardirqs_off 506 509 #endif 507 - get_thread_info tsk 508 - #ifdef CONFIG_PREEMPT 509 - ldr w24, [tsk, #TI_PREEMPT] // get preempt count 510 - add w23, w24, #1 // increment it 511 - str w23, [tsk, #TI_PREEMPT] 512 - #endif 510 + 513 511 irq_handler 514 - #ifdef CONFIG_PREEMPT 515 - ldr w0, [tsk, #TI_PREEMPT] 516 - str w24, [tsk, #TI_PREEMPT] 517 - cmp w0, w23 518 - b.eq 1f 519 - mov x1, #0 520 - str x1, [x1] // BUG 521 - 1: 522 - #endif 512 + get_thread_info tsk 513 + 523 514 #ifdef CONFIG_TRACE_IRQFLAGS 524 515 bl trace_hardirqs_on 525 516 #endif
+19 -21
arch/arm64/kernel/ptrace.c
··· 636 636 637 637 for (i = 0; i < num_regs; ++i) { 638 638 unsigned int idx = start + i; 639 - void *reg; 639 + compat_ulong_t reg; 640 640 641 641 switch (idx) { 642 642 case 15: 643 - reg = (void *)&task_pt_regs(target)->pc; 643 + reg = task_pt_regs(target)->pc; 644 644 break; 645 645 case 16: 646 - reg = (void *)&task_pt_regs(target)->pstate; 646 + reg = task_pt_regs(target)->pstate; 647 647 break; 648 648 case 17: 649 - reg = (void *)&task_pt_regs(target)->orig_x0; 649 + reg = task_pt_regs(target)->orig_x0; 650 650 break; 651 651 default: 652 - reg = (void *)&task_pt_regs(target)->regs[idx]; 652 + reg = task_pt_regs(target)->regs[idx]; 653 653 } 654 654 655 - ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); 656 - 655 + ret = copy_to_user(ubuf, &reg, sizeof(reg)); 657 656 if (ret) 658 657 break; 659 - else 660 - ubuf += sizeof(compat_ulong_t); 658 + 659 + ubuf += sizeof(reg); 661 660 } 662 661 663 662 return ret; ··· 684 685 685 686 for (i = 0; i < num_regs; ++i) { 686 687 unsigned int idx = start + i; 687 - void *reg; 688 + compat_ulong_t reg; 689 + 690 + ret = copy_from_user(&reg, ubuf, sizeof(reg)); 691 + if (ret) 692 + return ret; 693 + 694 + ubuf += sizeof(reg); 688 695 689 696 switch (idx) { 690 697 case 15: 691 - reg = (void *)&newregs.pc; 698 + newregs.pc = reg; 692 699 break; 693 700 case 16: 694 - reg = (void *)&newregs.pstate; 701 + newregs.pstate = reg; 695 702 break; 696 703 case 17: 697 - reg = (void *)&newregs.orig_x0; 704 + newregs.orig_x0 = reg; 698 705 break; 699 706 default: 700 - reg = (void *)&newregs.regs[idx]; 707 + newregs.regs[idx] = reg; 701 708 } 702 709 703 - ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t)); 704 - 705 - if (ret) 706 - goto out; 707 - else 708 - ubuf += sizeof(compat_ulong_t); 709 710 } 710 711 711 712 if (valid_user_regs(&newregs.user_regs)) ··· 713 714 else 714 715 ret = -EINVAL; 715 716 716 - out: 717 717 return ret; 718 718 } 719 719
+5
arch/arm64/kernel/setup.c
··· 205 205 206 206 void __init setup_arch(char **cmdline_p) 207 207 { 208 + /* 209 + * Unmask asynchronous aborts early to catch possible system errors. 210 + */ 211 + local_async_enable(); 212 + 208 213 setup_processor(); 209 214 210 215 setup_machine_fdt(__fdt_pointer);
+1
arch/arm64/kernel/smp.c
··· 160 160 161 161 local_irq_enable(); 162 162 local_fiq_enable(); 163 + local_async_enable(); 163 164 164 165 /* 165 166 * OK, it's off to the idle thread for us