Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
- add the new bpf syscall to ARM.
- drop a redundant return statement in __iommu_alloc_remap()
- fix a performance issue noticed by Thomas Petazzoni with
kmap_atomic().
- fix an issue with the L2 cache OF parsing code which caused it to
incorrectly print warnings on each boot, and make the warning text
more consistent with the rest of the code

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: 8180/1: mm: implement no-highmem fast path in kmap_atomic_pfn()
ARM: 8183/1: l2c: Improve l2c310_of_parse() error message
ARM: 8181/1: Drop extra return statement
ARM: 8182/1: l2c: Make l2x0_cache_size_of_parse() return 'int'
ARM: enable bpf syscall

Changed files
+23 -9
arch
arm
include
uapi
asm
kernel
mm
+1
arch/arm/include/uapi/asm/unistd.h
··· 412 #define __NR_seccomp (__NR_SYSCALL_BASE+383) 413 #define __NR_getrandom (__NR_SYSCALL_BASE+384) 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385) 415 416 /* 417 * The following SWIs are ARM private.
··· 412 #define __NR_seccomp (__NR_SYSCALL_BASE+383) 413 #define __NR_getrandom (__NR_SYSCALL_BASE+384) 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385) 415 + #define __NR_bpf (__NR_SYSCALL_BASE+386) 416 417 /* 418 * The following SWIs are ARM private.
+1
arch/arm/kernel/calls.S
··· 395 CALL(sys_seccomp) 396 CALL(sys_getrandom) 397 /* 385 */ CALL(sys_memfd_create) 398 #ifndef syscalls_counted 399 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 400 #define syscalls_counted
··· 395 CALL(sys_seccomp) 396 CALL(sys_getrandom) 397 /* 385 */ CALL(sys_memfd_create) 398 + CALL(sys_bpf) 399 #ifndef syscalls_counted 400 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 401 #define syscalls_counted
+18 -8
arch/arm/mm/cache-l2x0.c
··· 956 * @associativity: variable to return the calculated associativity in 957 * @max_way_size: the maximum size in bytes for the cache ways 958 */ 959 - static void __init l2x0_cache_size_of_parse(const struct device_node *np, 960 u32 *aux_val, u32 *aux_mask, 961 u32 *associativity, 962 u32 max_way_size) ··· 974 of_property_read_u32(np, "cache-line-size", &line_size); 975 976 if (!cache_size || !sets) 977 - return; 978 979 /* All these l2 caches have the same line = block size actually */ 980 if (!line_size) { ··· 1009 1010 if (way_size > max_way_size) { 1011 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1012 - return; 1013 } 1014 1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", ··· 1027 if (way_size_bits < 1 || way_size_bits > 6) { 1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1029 way_size); 1030 - return; 1031 } 1032 1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; ··· 1036 *aux_val &= ~mask; 1037 *aux_val |= val; 1038 *aux_mask &= ~mask; 1039 } 1040 1041 static void __init l2x0_of_parse(const struct device_node *np, ··· 1048 u32 dirty = 0; 1049 u32 val = 0, mask = 0; 1050 u32 assoc; 1051 1052 of_property_read_u32(np, "arm,tag-latency", &tag); 1053 if (tag) { ··· 1071 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1072 } 1073 1074 - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1075 if (assoc > 8) { 1076 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1077 pr_err("l2x0 of: %d calculated, max 8\n", assoc); ··· 1131 u32 tag[3] = { 0, 0, 0 }; 1132 u32 filter[2] = { 0, 0 }; 1133 u32 assoc; 1134 1135 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1136 if (tag[0] && tag[1] && tag[2]) ··· 1159 l2x0_base + L310_ADDR_FILTER_START); 1160 } 1161 1162 - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1163 switch (assoc) { 1164 case 16: 1165 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; ··· 1174 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1175 break; 1176 default: 1177 - pr_err("PL310 OF: cache setting yield illegal associativity\n"); 1178 - pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); 1179 break; 1180 } 1181 }
··· 956 * @associativity: variable to return the calculated associativity in 957 * @max_way_size: the maximum size in bytes for the cache ways 958 */ 959 + static int __init l2x0_cache_size_of_parse(const struct device_node *np, 960 u32 *aux_val, u32 *aux_mask, 961 u32 *associativity, 962 u32 max_way_size) ··· 974 of_property_read_u32(np, "cache-line-size", &line_size); 975 976 if (!cache_size || !sets) 977 + return -ENODEV; 978 979 /* All these l2 caches have the same line = block size actually */ 980 if (!line_size) { ··· 1009 1010 if (way_size > max_way_size) { 1011 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1012 + return -EINVAL; 1013 } 1014 1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", ··· 1027 if (way_size_bits < 1 || way_size_bits > 6) { 1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1029 way_size); 1030 + return -EINVAL; 1031 } 1032 1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; ··· 1036 *aux_val &= ~mask; 1037 *aux_val |= val; 1038 *aux_mask &= ~mask; 1039 + 1040 + return 0; 1041 } 1042 1043 static void __init l2x0_of_parse(const struct device_node *np, ··· 1046 u32 dirty = 0; 1047 u32 val = 0, mask = 0; 1048 u32 assoc; 1049 + int ret; 1050 1051 of_property_read_u32(np, "arm,tag-latency", &tag); 1052 if (tag) { ··· 1068 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1069 } 1070 1071 + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1072 + if (ret) 1073 + return; 1074 + 1075 if (assoc > 8) { 1076 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1077 pr_err("l2x0 of: %d calculated, max 8\n", assoc); ··· 1125 u32 tag[3] = { 0, 0, 0 }; 1126 u32 filter[2] = { 0, 0 }; 1127 u32 assoc; 1128 + int ret; 1129 1130 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1131 if (tag[0] && tag[1] && tag[2]) ··· 1152 l2x0_base + L310_ADDR_FILTER_START); 1153 } 1154 1155 + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1156 + if (ret) 1157 + return; 1158 + 1159 switch (assoc) { 1160 case 16: 1161 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; ··· 1164 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1165 break; 1166 default: 1167 + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1168 + assoc); 1169 break; 1170 } 1171 }
-1
arch/arm/mm/dma-mapping.c
··· 1198 { 1199 return dma_common_pages_remap(pages, size, 1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 1201 - return NULL; 1202 } 1203 1204 /*
··· 1198 { 1199 return dma_common_pages_remap(pages, size, 1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 1201 } 1202 1203 /*
+3
arch/arm/mm/highmem.c
··· 127 { 128 unsigned long vaddr; 129 int idx, type; 130 131 pagefault_disable(); 132 133 type = kmap_atomic_idx_push(); 134 idx = type + KM_TYPE_NR * smp_processor_id();
··· 127 { 128 unsigned long vaddr; 129 int idx, type; 130 + struct page *page = pfn_to_page(pfn); 131 132 pagefault_disable(); 133 + if (!PageHighMem(page)) 134 + return page_address(page); 135 136 type = kmap_atomic_idx_push(); 137 idx = type + KM_TYPE_NR * smp_processor_id();