Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull two ARM fixes from Russell King:
"It's been fairly quiet with the fixes. Just two this time. One fixes
a long standing problem with KALLSYMS needing an additional pass, and
the other sorts a problem with the vmalloc space interacting with
static IO mappings."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7438/1: fill possible PMD empty section gaps
ARM: 7428/1: Prevent KALLSYM size mismatch on ARM.

+76
+2
arch/arm/kernel/vmlinux.lds.S
··· 183 183 } 184 184 #endif 185 185 186 + #ifdef CONFIG_SMP 186 187 PERCPU_SECTION(L1_CACHE_BYTES) 188 + #endif 187 189 188 190 #ifdef CONFIG_XIP_KERNEL 189 191 __data_loc = ALIGN(4); /* location in binary */
+74
arch/arm/mm/mmu.c
··· 791 791 } 792 792 } 793 793 794 + #ifndef CONFIG_ARM_LPAE 795 + 796 + /* 797 + * The Linux PMD is made of two consecutive section entries covering 2MB 798 + * (see definition in include/asm/pgtable-2level.h). However a call to 799 + * create_mapping() may optimize static mappings by using individual 800 + * 1MB section mappings. This leaves the actual PMD potentially half 801 + * initialized if the top or bottom section entry isn't used, leaving it 802 + * open to problems if a subsequent ioremap() or vmalloc() tries to use 803 + * the virtual space left free by that unused section entry. 804 + * 805 + * Let's avoid the issue by inserting dummy vm entries covering the unused 806 + * PMD halves once the static mappings are in place. 807 + */ 808 + 809 + static void __init pmd_empty_section_gap(unsigned long addr) 810 + { 811 + struct vm_struct *vm; 812 + 813 + vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); 814 + vm->addr = (void *)addr; 815 + vm->size = SECTION_SIZE; 816 + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 817 + vm->caller = pmd_empty_section_gap; 818 + vm_area_add_early(vm); 819 + } 820 + 821 + static void __init fill_pmd_gaps(void) 822 + { 823 + struct vm_struct *vm; 824 + unsigned long addr, next = 0; 825 + pmd_t *pmd; 826 + 827 + /* we're still single threaded hence no lock needed here */ 828 + for (vm = vmlist; vm; vm = vm->next) { 829 + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) 830 + continue; 831 + addr = (unsigned long)vm->addr; 832 + if (addr < next) 833 + continue; 834 + 835 + /* 836 + * Check if this vm starts on an odd section boundary. 837 + * If so and the first section entry for this PMD is free 838 + * then we block the corresponding virtual address. 839 + */ 840 + if ((addr & ~PMD_MASK) == SECTION_SIZE) { 841 + pmd = pmd_off_k(addr); 842 + if (pmd_none(*pmd)) 843 + pmd_empty_section_gap(addr & PMD_MASK); 844 + } 845 + 846 + /* 847 + * Then check if this vm ends on an odd section boundary. 848 + * If so and the second section entry for this PMD is empty 849 + * then we block the corresponding virtual address. 850 + */ 851 + addr += vm->size; 852 + if ((addr & ~PMD_MASK) == SECTION_SIZE) { 853 + pmd = pmd_off_k(addr) + 1; 854 + if (pmd_none(*pmd)) 855 + pmd_empty_section_gap(addr); 856 + } 857 + 858 + /* no need to look at any vm entry until we hit the next PMD */ 859 + next = (addr + PMD_SIZE - 1) & PMD_MASK; 860 + } 861 + } 862 + 863 + #else 864 + #define fill_pmd_gaps() do { } while (0) 865 + #endif 866 + 794 867 static void * __initdata vmalloc_min = 795 868 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); 796 869 ··· 1145 1072 */ 1146 1073 if (mdesc->map_io) 1147 1074 mdesc->map_io(); 1075 + fill_pmd_gaps(); 1148 1076 1149 1077 /* 1150 1078 * Finally flush the caches and tlb to ensure that we're in a