Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core fixes from Ingo Molnar:
"This fixes a particularly thorny munmap() bug with MPX, plus fixes a
host build environment assumption in objtool"

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
objtool: Allow AR to be overridden with HOSTAR
x86/mpx, mm/core: Fix recursive munmap() corruption

+27 -26
-1
arch/powerpc/include/asm/mmu_context.h
··· 232 extern void arch_exit_mmap(struct mm_struct *mm); 233 234 static inline void arch_unmap(struct mm_struct *mm, 235 - struct vm_area_struct *vma, 236 unsigned long start, unsigned long end) 237 { 238 if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
··· 232 extern void arch_exit_mmap(struct mm_struct *mm); 233 234 static inline void arch_unmap(struct mm_struct *mm, 235 unsigned long start, unsigned long end) 236 { 237 if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
-1
arch/um/include/asm/mmu_context.h
··· 22 } 23 extern void arch_exit_mmap(struct mm_struct *mm); 24 static inline void arch_unmap(struct mm_struct *mm, 25 - struct vm_area_struct *vma, 26 unsigned long start, unsigned long end) 27 { 28 }
··· 22 } 23 extern void arch_exit_mmap(struct mm_struct *mm); 24 static inline void arch_unmap(struct mm_struct *mm, 25 unsigned long start, unsigned long end) 26 { 27 }
-1
arch/unicore32/include/asm/mmu_context.h
··· 88 } 89 90 static inline void arch_unmap(struct mm_struct *mm, 91 - struct vm_area_struct *vma, 92 unsigned long start, unsigned long end) 93 { 94 }
··· 88 } 89 90 static inline void arch_unmap(struct mm_struct *mm, 91 unsigned long start, unsigned long end) 92 { 93 }
+3 -3
arch/x86/include/asm/mmu_context.h
··· 278 mpx_mm_init(mm); 279 } 280 281 - static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 282 - unsigned long start, unsigned long end) 283 { 284 /* 285 * mpx_notify_unmap() goes and reads a rarely-hot ··· 299 * consistently wrong. 300 */ 301 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) 302 - mpx_notify_unmap(mm, vma, start, end); 303 } 304 305 /*
··· 278 mpx_mm_init(mm); 279 } 280 281 + static inline void arch_unmap(struct mm_struct *mm, unsigned long start, 282 + unsigned long end) 283 { 284 /* 285 * mpx_notify_unmap() goes and reads a rarely-hot ··· 299 * consistently wrong. 300 */ 301 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) 302 + mpx_notify_unmap(mm, start, end); 303 } 304 305 /*
+8 -7
arch/x86/include/asm/mpx.h
··· 64 }; 65 66 #ifdef CONFIG_X86_INTEL_MPX 67 - int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs); 68 - int mpx_handle_bd_fault(void); 69 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) 70 { 71 return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR); 72 } 73 static inline void mpx_mm_init(struct mm_struct *mm) 74 { 75 /* ··· 81 */ 82 mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR; 83 } 84 - void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 85 - unsigned long start, unsigned long end); 86 87 - unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, 88 - unsigned long flags); 89 #else 90 static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs) 91 { ··· 102 { 103 } 104 static inline void mpx_notify_unmap(struct mm_struct *mm, 105 - struct vm_area_struct *vma, 106 unsigned long start, unsigned long end) 107 { 108 }
··· 64 }; 65 66 #ifdef CONFIG_X86_INTEL_MPX 67 + 68 + extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs); 69 + extern int mpx_handle_bd_fault(void); 70 + 71 static inline int kernel_managing_mpx_tables(struct mm_struct *mm) 72 { 73 return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR); 74 } 75 + 76 static inline void mpx_mm_init(struct mm_struct *mm) 77 { 78 /* ··· 78 */ 79 mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR; 80 } 81 82 + extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end); 83 + extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags); 84 + 85 #else 86 static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs) 87 { ··· 100 { 101 } 102 static inline void mpx_notify_unmap(struct mm_struct *mm, 103 unsigned long start, unsigned long end) 104 { 105 }
+6 -4
arch/x86/mm/mpx.c
··· 881 * the virtual address region start...end have already been split if 882 * necessary, and the 'vma' is the first vma in this range (start -> end). 883 */ 884 - void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 885 - unsigned long start, unsigned long end) 886 { 887 int ret; 888 889 /* ··· 903 * which should not occur normally. Being strict about it here 904 * helps ensure that we do not have an exploitable stack overflow. 905 */ 906 - do { 907 if (vma->vm_flags & VM_MPX) 908 return; 909 vma = vma->vm_next; 910 - } while (vma && vma->vm_start < end); 911 912 ret = mpx_unmap_tables(mm, start, end); 913 if (ret)
··· 881 * the virtual address region start...end have already been split if 882 * necessary, and the 'vma' is the first vma in this range (start -> end). 883 */ 884 + void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, 885 + unsigned long end) 886 { 887 + struct vm_area_struct *vma; 888 int ret; 889 890 /* ··· 902 * which should not occur normally. Being strict about it here 903 * helps ensure that we do not have an exploitable stack overflow. 904 */ 905 + vma = find_vma(mm, start); 906 + while (vma && vma->vm_start < end) { 907 if (vma->vm_flags & VM_MPX) 908 return; 909 vma = vma->vm_next; 910 + } 911 912 ret = mpx_unmap_tables(mm, start, end); 913 if (ret)
-1
include/asm-generic/mm_hooks.h
··· 18 } 19 20 static inline void arch_unmap(struct mm_struct *mm, 21 - struct vm_area_struct *vma, 22 unsigned long start, unsigned long end) 23 { 24 }
··· 18 } 19 20 static inline void arch_unmap(struct mm_struct *mm, 21 unsigned long start, unsigned long end) 22 { 23 }
+8 -7
mm/mmap.c
··· 2735 return -EINVAL; 2736 2737 len = PAGE_ALIGN(len); 2738 if (len == 0) 2739 return -EINVAL; 2740 2741 /* Find the first overlapping VMA */ 2742 vma = find_vma(mm, start); ··· 2754 /* we have start < vma->vm_end */ 2755 2756 /* if it doesn't overlap, we have nothing.. */ 2757 - end = start + len; 2758 if (vma->vm_start >= end) 2759 return 0; 2760 ··· 2822 2823 /* Detach vmas from rbtree */ 2824 detach_vmas_to_be_unmapped(mm, vma, prev, end); 2825 - 2826 - /* 2827 - * mpx unmap needs to be called with mmap_sem held for write. 2828 - * It is safe to call it before unmap_region(). 2829 - */ 2830 - arch_unmap(mm, vma, start, end); 2831 2832 if (downgrade) 2833 downgrade_write(&mm->mmap_sem);
··· 2735 return -EINVAL; 2736 2737 len = PAGE_ALIGN(len); 2738 + end = start + len; 2739 if (len == 0) 2740 return -EINVAL; 2741 + 2742 + /* 2743 + * arch_unmap() might do unmaps itself. It must be called 2744 + * and finish any rbtree manipulation before this code 2745 + * runs and also starts to manipulate the rbtree. 2746 + */ 2747 + arch_unmap(mm, start, end); 2748 2749 /* Find the first overlapping VMA */ 2750 vma = find_vma(mm, start); ··· 2746 /* we have start < vma->vm_end */ 2747 2748 /* if it doesn't overlap, we have nothing.. */ 2749 if (vma->vm_start >= end) 2750 return 0; 2751 ··· 2815 2816 /* Detach vmas from rbtree */ 2817 detach_vmas_to_be_unmapped(mm, vma, prev, end); 2818 2819 if (downgrade) 2820 downgrade_write(&mm->mmap_sem);
+2 -1
tools/objtool/Makefile
··· 7 endif 8 9 # always use the host compiler 10 HOSTCC ?= gcc 11 HOSTLD ?= ld 12 CC = $(HOSTCC) 13 LD = $(HOSTLD) 14 - AR = ar 15 16 ifeq ($(srctree),) 17 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
··· 7 endif 8 9 # always use the host compiler 10 + HOSTAR ?= ar 11 HOSTCC ?= gcc 12 HOSTLD ?= ld 13 + AR = $(HOSTAR) 14 CC = $(HOSTCC) 15 LD = $(HOSTLD) 16 17 ifeq ($(srctree),) 18 srctree := $(patsubst %/,%,$(dir $(CURDIR)))