Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
- add the new bpf syscall to ARM.
- drop a redundant return statement in __iommu_alloc_remap()
- fix a performance issue noticed by Thomas Petazzoni with
kmap_atomic().
- fix an issue with the L2 cache OF parsing code which caused it to
incorrectly print warnings on each boot, and make the warning text
more consistent with the rest of the code

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: 8180/1: mm: implement no-highmem fast path in kmap_atomic_pfn()
ARM: 8183/1: l2c: Improve l2c310_of_parse() error message
ARM: 8181/1: Drop extra return statement
ARM: 8182/1: l2c: Make l2x0_cache_size_of_parse() return 'int'
ARM: enable bpf syscall

Changed files
+23 -9
arch
arm
include
uapi
asm
kernel
mm
+1
arch/arm/include/uapi/asm/unistd.h
··· 412 412 #define __NR_seccomp (__NR_SYSCALL_BASE+383) 413 413 #define __NR_getrandom (__NR_SYSCALL_BASE+384) 414 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385) 415 + #define __NR_bpf (__NR_SYSCALL_BASE+386) 415 416 416 417 /* 417 418 * The following SWIs are ARM private.
+1
arch/arm/kernel/calls.S
··· 395 395 CALL(sys_seccomp) 396 396 CALL(sys_getrandom) 397 397 /* 385 */ CALL(sys_memfd_create) 398 + CALL(sys_bpf) 398 399 #ifndef syscalls_counted 399 400 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 400 401 #define syscalls_counted
+18 -8
arch/arm/mm/cache-l2x0.c
··· 956 956 * @associativity: variable to return the calculated associativity in 957 957 * @max_way_size: the maximum size in bytes for the cache ways 958 958 */ 959 - static void __init l2x0_cache_size_of_parse(const struct device_node *np, 959 + static int __init l2x0_cache_size_of_parse(const struct device_node *np, 960 960 u32 *aux_val, u32 *aux_mask, 961 961 u32 *associativity, 962 962 u32 max_way_size) ··· 974 974 of_property_read_u32(np, "cache-line-size", &line_size); 975 975 976 976 if (!cache_size || !sets) 977 - return; 977 + return -ENODEV; 978 978 979 979 /* All these l2 caches have the same line = block size actually */ 980 980 if (!line_size) { ··· 1009 1009 1010 1010 if (way_size > max_way_size) { 1011 1011 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1012 - return; 1012 + return -EINVAL; 1013 1013 } 1014 1014 1015 1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", ··· 1027 1027 if (way_size_bits < 1 || way_size_bits > 6) { 1028 1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1029 1029 way_size); 1030 - return; 1030 + return -EINVAL; 1031 1031 } 1032 1032 1033 1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; ··· 1036 1036 *aux_val &= ~mask; 1037 1037 *aux_val |= val; 1038 1038 *aux_mask &= ~mask; 1039 + 1040 + return 0; 1039 1041 } 1040 1042 1041 1043 static void __init l2x0_of_parse(const struct device_node *np, ··· 1048 1046 u32 dirty = 0; 1049 1047 u32 val = 0, mask = 0; 1050 1048 u32 assoc; 1049 + int ret; 1051 1050 1052 1051 of_property_read_u32(np, "arm,tag-latency", &tag); 1053 1052 if (tag) { ··· 1071 1068 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1072 1069 } 1073 1070 1074 - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1071 + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1072 + if (ret) 1073 + return; 1074 + 1075 1075 if (assoc > 8) { 1076 1076 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1077 1077 pr_err("l2x0 of: %d calculated, max 8\n", assoc); ··· 1131 1125 u32 tag[3] = { 0, 0, 0 }; 1132 1126 u32 filter[2] = { 0, 0 }; 1133 1127 u32 assoc; 1128 + int ret; 1134 1129 1135 1130 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1136 1131 if (tag[0] && tag[1] && tag[2]) ··· 1159 1152 l2x0_base + L310_ADDR_FILTER_START); 1160 1153 } 1161 1154 1162 - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1155 + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1156 + if (ret) 1157 + return; 1158 + 1163 1159 switch (assoc) { 1164 1160 case 16: 1165 1161 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; ··· 1174 1164 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1175 1165 break; 1176 1166 default: 1177 - pr_err("PL310 OF: cache setting yield illegal associativity\n"); 1178 - pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); 1167 + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1168 + assoc); 1179 1169 break; 1180 1170 } 1181 1171 }
-1
arch/arm/mm/dma-mapping.c
··· 1198 1198 { 1199 1199 return dma_common_pages_remap(pages, size, 1200 1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 1201 - return NULL; 1202 1201 } 1203 1202 1204 1203 /*
+3
arch/arm/mm/highmem.c
··· 127 127 { 128 128 unsigned long vaddr; 129 129 int idx, type; 130 + struct page *page = pfn_to_page(pfn); 130 131 131 132 pagefault_disable(); 133 + if (!PageHighMem(page)) 134 + return page_address(page); 132 135 133 136 type = kmap_atomic_idx_push(); 134 137 idx = type + KM_TYPE_NR * smp_processor_id();