Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'locks-3.20' of git://git.samba.org/jlayton/linux into for-3.20

Christoph's block pnfs patches have some minor dependencies on these
lock patches.

+3755 -2123
+1
.mailmap
··· 51 51 Greg Kroah-Hartman <greg@kroah.com> 52 52 Henk Vergonet <Henk.Vergonet@gmail.com> 53 53 Henrik Kretzschmar <henne@nachtwindheim.de> 54 + Henrik Rydberg <rydberg@bitmath.org> 54 55 Herbert Xu <herbert@gondor.apana.org.au> 55 56 Jacob Shin <Jacob.Shin@amd.com> 56 57 James Bottomley <jejb@mulgrave.(none)>
+2
Documentation/networking/ip-sysctl.txt
··· 66 66 route/max_size - INTEGER 67 67 Maximum number of routes allowed in the kernel. Increase 68 68 this when using large numbers of interfaces and/or routes. 69 + From linux kernel 3.6 onwards, this is deprecated for ipv4 70 + as route cache is no longer used. 69 71 70 72 neigh/default/gc_thresh1 - INTEGER 71 73 Minimum number of entries to keep. Garbage collector will not
+12 -37
Documentation/target/tcm_mod_builder.py
··· 389 389 buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" 390 390 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" 391 391 buf += " .close_session = " + fabric_mod_name + "_close_session,\n" 392 - buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" 393 - buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" 394 - buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" 395 392 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" 396 393 buf += " .sess_get_initiator_sid = NULL,\n" 397 394 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" ··· 399 402 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" 400 403 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" 401 404 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" 402 - buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" 405 + buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n" 403 406 buf += " /*\n" 404 407 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" 405 408 buf += " */\n" ··· 425 428 buf += " /*\n" 426 429 buf += " * Register the top level struct config_item_type with TCM core\n" 427 430 buf += " */\n" 428 - buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" 431 + buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n" 429 432 buf += " if (IS_ERR(fabric)) {\n" 430 433 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" 431 434 buf += " return PTR_ERR(fabric);\n" ··· 592 595 if re.search('get_fabric_name', fo): 593 596 buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" 594 597 buf += "{\n" 595 - buf += " return \"" + fabric_mod_name[4:] + "\";\n" 598 + buf += " return \"" + fabric_mod_name + "\";\n" 596 599 buf += "}\n\n" 597 600 bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" 598 601 continue ··· 817 820 buf += "}\n\n" 818 821 bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" 819 822 820 - if re.search('stop_session\)\(', fo): 821 - buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" 822 - buf += "{\n" 823 - buf += " return;\n" 824 - buf += "}\n\n" 825 - bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" 826 - 827 - if re.search('fall_back_to_erl0\)\(', fo): 828 - buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" 829 - buf += "{\n" 830 - buf += " return;\n" 831 - buf += "}\n\n" 832 - bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" 833 - 834 - if re.search('sess_logged_in\)\(', fo): 835 - buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" 836 - buf += "{\n" 837 - buf += " return 0;\n" 838 - buf += "}\n\n" 839 - bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" 840 - 841 823 if re.search('sess_get_index\)\(', fo): 842 824 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" 843 825 buf += "{\n" ··· 874 898 bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" 875 899 876 900 if re.search('queue_tm_rsp\)\(', fo): 877 - buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" 901 + buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" 878 902 buf += "{\n" 879 - buf += " return 0;\n" 903 + buf += " return;\n" 880 904 buf += "}\n\n" 881 - bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" 905 + bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" 882 906 883 - if re.search('is_state_remove\)\(', fo): 884 - buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" 907 + if re.search('aborted_task\)\(', fo): 908 + buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n" 885 909 buf += "{\n" 886 - buf += " return 0;\n" 910 + buf += " return;\n" 887 911 buf += "}\n\n" 888 - bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" 889 - 912 + bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n" 890 913 891 914 ret = p.write(buf) 892 915 if ret: ··· 993 1018 tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) 994 1019 tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) 995 1020 996 - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") 1021 + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ") 997 1022 if input == "yes" or input == "y": 998 1023 tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) 999 1024 1000 - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") 1025 + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ") 1001 1026 if input == "yes" or input == "y": 1002 1027 tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) 1003 1028
+13 -2
Documentation/thermal/cpu-cooling-api.txt
··· 3 3 4 4 Written by Amit Daniel Kachhap <amit.kachhap@linaro.org> 5 5 6 - Updated: 12 May 2012 6 + Updated: 6 Jan 2015 7 7 8 8 Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 9 9 ··· 25 25 26 26 clip_cpus: cpumask of cpus where the frequency constraints will happen. 27 27 28 - 1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 28 + 1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register( 29 + struct device_node *np, const struct cpumask *clip_cpus) 30 + 31 + This interface function registers the cpufreq cooling device with 32 + the name "thermal-cpufreq-%x" linking it with a device tree node, in 33 + order to bind it via the thermal DT code. This api can support multiple 34 + instances of cpufreq cooling devices. 35 + 36 + np: pointer to the cooling device device tree node 37 + clip_cpus: cpumask of cpus where the frequency constraints will happen. 38 + 39 + 1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 29 40 30 41 This interface function unregisters the "thermal-cpufreq-%x" cooling device. 31 42
+19 -8
MAINTAINERS
··· 724 724 F: drivers/char/apm-emulation.c 725 725 726 726 APPLE BCM5974 MULTITOUCH DRIVER 727 - M: Henrik Rydberg <rydberg@euromail.se> 727 + M: Henrik Rydberg <rydberg@bitmath.org> 728 728 L: linux-input@vger.kernel.org 729 - S: Maintained 729 + S: Odd fixes 730 730 F: drivers/input/mouse/bcm5974.c 731 731 732 732 APPLE SMC DRIVER 733 - M: Henrik Rydberg <rydberg@euromail.se> 733 + M: Henrik Rydberg <rydberg@bitmath.org> 734 734 L: lm-sensors@lm-sensors.org 735 - S: Maintained 735 + S: Odd fixes 736 736 F: drivers/hwmon/applesmc.c 737 737 738 738 APPLETALK NETWORK LAYER ··· 2259 2259 BTRFS FILE SYSTEM 2260 2260 M: Chris Mason <clm@fb.com> 2261 2261 M: Josef Bacik <jbacik@fb.com> 2262 + M: David Sterba <dsterba@suse.cz> 2262 2263 L: linux-btrfs@vger.kernel.org 2263 2264 W: http://btrfs.wiki.kernel.org/ 2264 2265 Q: http://patchwork.kernel.org/project/linux-btrfs/list/ ··· 4749 4748 F: drivers/scsi/ipr.* 4750 4749 4751 4750 IBM Power Virtual Ethernet Device Driver 4752 - M: Santiago Leon <santil@linux.vnet.ibm.com> 4751 + M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> 4753 4752 L: netdev@vger.kernel.org 4754 4753 S: Supported 4755 4754 F: drivers/net/ethernet/ibm/ibmveth.* ··· 4941 4940 F: include/linux/input/ 4942 4941 4943 4942 INPUT MULTITOUCH (MT) PROTOCOL 4944 - M: Henrik Rydberg <rydberg@euromail.se> 4943 + M: Henrik Rydberg <rydberg@bitmath.org> 4945 4944 L: linux-input@vger.kernel.org 4946 4945 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git 4947 - S: Maintained 4946 + S: Odd fixes 4948 4947 F: Documentation/input/multi-touch-protocol.txt 4949 4948 F: drivers/input/input-mt.c 4950 4949 K: \b(ABS|SYN)_MT_ ··· 5279 5278 W: www.open-iscsi.org 5280 5279 Q: http://patchwork.kernel.org/project/linux-rdma/list/ 5281 5280 F: drivers/infiniband/ulp/iser/ 5281 + 5282 + ISCSI EXTENSIONS FOR RDMA (ISER) TARGET 5283 + M: Sagi Grimberg <sagig@mellanox.com> 5284 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 5285 + L: linux-rdma@vger.kernel.org 5286 + L: target-devel@vger.kernel.org 5287 + S: Supported 5288 + W: http://www.linux-iscsi.org 5289 + F: drivers/infiniband/ulp/isert 5282 5290 5283 5291 ISDN SUBSYSTEM 5284 5292 M: Karsten Keil <isdn@linux-pingi.de> ··· 9543 9533 TI BANDGAP AND THERMAL DRIVER 9544 9534 M: Eduardo Valentin <edubezval@gmail.com> 9545 9535 L: linux-pm@vger.kernel.org 9546 - S: Supported 9536 + L: linux-omap@vger.kernel.org 9537 + S: Maintained 9547 9538 F: drivers/thermal/ti-soc-thermal/ 9548 9539 9549 9540 TI CLOCK DRIVER
+2 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 19 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Diseased Newt 6 6 7 7 # *DOCUMENTATION* ··· 391 391 # Needed to be compatible with the O= option 392 392 LINUXINCLUDE := \ 393 393 -I$(srctree)/arch/$(hdr-arch)/include \ 394 + -Iarch/$(hdr-arch)/include/generated/uapi \ 394 395 -Iarch/$(hdr-arch)/include/generated \ 395 396 $(if $(KBUILD_SRC), -I$(srctree)/include) \ 396 397 -Iinclude \
+15
arch/arm/boot/dts/imx6sx-sdb.dts
··· 159 159 pinctrl-0 = <&pinctrl_enet1>; 160 160 phy-supply = <&reg_enet_3v3>; 161 161 phy-mode = "rgmii"; 162 + phy-handle = <&ethphy1>; 162 163 status = "okay"; 164 + 165 + mdio { 166 + #address-cells = <1>; 167 + #size-cells = <0>; 168 + 169 + ethphy1: ethernet-phy@0 { 170 + reg = <0>; 171 + }; 172 + 173 + ethphy2: ethernet-phy@1 { 174 + reg = <1>; 175 + }; 176 + }; 163 177 }; 164 178 165 179 &fec2 { 166 180 pinctrl-names = "default"; 167 181 pinctrl-0 = <&pinctrl_enet2>; 168 182 phy-mode = "rgmii"; 183 + phy-handle = <&ethphy2>; 169 184 status = "okay"; 170 185 }; 171 186
+15
arch/arm/boot/dts/vf610-twr.dts
··· 129 129 130 130 &fec0 { 131 131 phy-mode = "rmii"; 132 + phy-handle = <&ethphy0>; 132 133 pinctrl-names = "default"; 133 134 pinctrl-0 = <&pinctrl_fec0>; 134 135 status = "okay"; 136 + 137 + mdio { 138 + #address-cells = <1>; 139 + #size-cells = <0>; 140 + 141 + ethphy0: ethernet-phy@0 { 142 + reg = <0>; 143 + }; 144 + 145 + ethphy1: ethernet-phy@1 { 146 + reg = <1>; 147 + }; 148 + }; 135 149 }; 136 150 137 151 &fec1 { 138 152 phy-mode = "rmii"; 153 + phy-handle = <&ethphy1>; 139 154 pinctrl-names = "default"; 140 155 pinctrl-0 = <&pinctrl_fec1>; 141 156 status = "okay";
+1
arch/arm/include/uapi/asm/unistd.h
··· 413 413 #define __NR_getrandom (__NR_SYSCALL_BASE+384) 414 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385) 415 415 #define __NR_bpf (__NR_SYSCALL_BASE+386) 416 + #define __NR_execveat (__NR_SYSCALL_BASE+387) 416 417 417 418 /* 418 419 * The following SWIs are ARM private.
+1
arch/arm/kernel/calls.S
··· 396 396 CALL(sys_getrandom) 397 397 /* 385 */ CALL(sys_memfd_create) 398 398 CALL(sys_bpf) 399 + CALL(sys_execveat) 399 400 #ifndef syscalls_counted 400 401 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 401 402 #define syscalls_counted
+8
arch/arm/kernel/perf_regs.c
··· 28 28 { 29 29 return PERF_SAMPLE_REGS_ABI_32; 30 30 } 31 + 32 + void perf_get_regs_user(struct perf_regs *regs_user, 33 + struct pt_regs *regs, 34 + struct pt_regs *regs_user_copy) 35 + { 36 + regs_user->regs = task_pt_regs(current); 37 + regs_user->abi = perf_reg_abi(current); 38 + }
+2 -7
arch/arm/mm/dump.c
··· 220 220 static const char units[] = "KMGTPE"; 221 221 u64 prot = val & pg_level[level].mask; 222 222 223 - if (addr < USER_PGTABLES_CEILING) 224 - return; 225 - 226 223 if (!st->level) { 227 224 st->level = level; 228 225 st->current_prot = prot; ··· 305 308 pgd_t *pgd = swapper_pg_dir; 306 309 struct pg_state st; 307 310 unsigned long addr; 308 - unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE; 311 + unsigned i; 309 312 310 313 memset(&st, 0, sizeof(st)); 311 314 st.seq = m; 312 315 st.marker = address_markers; 313 316 314 - pgd += pgdoff; 315 - 316 - for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) { 317 + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 317 318 addr = i * PGDIR_SIZE; 318 319 if (!pgd_none(*pgd)) { 319 320 walk_pud(&st, pgd, addr);
+2 -2
arch/arm/mm/init.c
··· 658 658 .start = (unsigned long)_stext, 659 659 .end = (unsigned long)__init_begin, 660 660 #ifdef CONFIG_ARM_LPAE 661 - .mask = ~PMD_SECT_RDONLY, 662 - .prot = PMD_SECT_RDONLY, 661 + .mask = ~L_PMD_SECT_RDONLY, 662 + .prot = L_PMD_SECT_RDONLY, 663 663 #else 664 664 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 665 665 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+2 -2
arch/arm/mm/mmu.c
··· 1329 1329 static void __init map_lowmem(void) 1330 1330 { 1331 1331 struct memblock_region *reg; 1332 - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1333 - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1332 + phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1333 + phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1334 1334 1335 1335 /* Map all the lowmem memory banks. */ 1336 1336 for_each_memblock(memory, reg) {
+1
arch/arm64/include/asm/arch_timer.h
··· 21 21 22 22 #include <asm/barrier.h> 23 23 24 + #include <linux/bug.h> 24 25 #include <linux/init.h> 25 26 #include <linux/types.h> 26 27
+5
arch/arm64/include/asm/cpu.h
··· 39 39 u64 reg_id_aa64pfr0; 40 40 u64 reg_id_aa64pfr1; 41 41 42 + u32 reg_id_dfr0; 42 43 u32 reg_id_isar0; 43 44 u32 reg_id_isar1; 44 45 u32 reg_id_isar2; ··· 52 51 u32 reg_id_mmfr3; 53 52 u32 reg_id_pfr0; 54 53 u32 reg_id_pfr1; 54 + 55 + u32 reg_mvfr0; 56 + u32 reg_mvfr1; 57 + u32 reg_mvfr2; 55 58 }; 56 59 57 60 DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+2
arch/arm64/include/asm/kvm_emulate.h
··· 41 41 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 42 42 { 43 43 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 44 + if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 45 + vcpu->arch.hcr_el2 &= ~HCR_RW; 44 46 } 45 47 46 48 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+1 -3
arch/arm64/include/asm/processor.h
··· 31 31 32 32 #include <asm/fpsimd.h> 33 33 #include <asm/hw_breakpoint.h> 34 + #include <asm/pgtable-hwdef.h> 34 35 #include <asm/ptrace.h> 35 36 #include <asm/types.h> 36 37 ··· 123 122 124 123 /* Free all resources held by a thread. */ 125 124 extern void release_thread(struct task_struct *); 126 - 127 - /* Prepare to copy thread state - unlazy all lazy status */ 128 - #define prepare_to_copy(tsk) do { } while (0) 129 125 130 126 unsigned long get_wchan(struct task_struct *p); 131 127
+1 -1
arch/arm64/include/asm/unistd.h
··· 44 44 #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 45 45 #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 46 46 47 - #define __NR_compat_syscalls 386 47 + #define __NR_compat_syscalls 387 48 48 #endif 49 49 50 50 #define __ARCH_WANT_SYS_CLONE
+10
arch/arm64/kernel/cpuinfo.c
··· 147 147 * If we have AArch32, we care about 32-bit features for compat. These 148 148 * registers should be RES0 otherwise. 149 149 */ 150 + diff |= CHECK(id_dfr0, boot, cur, cpu); 150 151 diff |= CHECK(id_isar0, boot, cur, cpu); 151 152 diff |= CHECK(id_isar1, boot, cur, cpu); 152 153 diff |= CHECK(id_isar2, boot, cur, cpu); ··· 165 164 diff |= CHECK(id_mmfr3, boot, cur, cpu); 166 165 diff |= CHECK(id_pfr0, boot, cur, cpu); 167 166 diff |= CHECK(id_pfr1, boot, cur, cpu); 167 + 168 + diff |= CHECK(mvfr0, boot, cur, cpu); 169 + diff |= CHECK(mvfr1, boot, cur, cpu); 170 + diff |= CHECK(mvfr2, boot, cur, cpu); 168 171 169 172 /* 170 173 * Mismatched CPU features are a recipe for disaster. Don't even ··· 194 189 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); 195 190 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); 196 191 192 + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); 197 193 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 198 194 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 199 195 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); ··· 207 201 info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); 208 202 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 209 203 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 204 + 205 + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); 206 + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); 207 + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); 210 208 211 209 cpuinfo_detect_icache_policy(info); 212 210
+1 -1
arch/arm64/kernel/efi.c
··· 326 326 327 327 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ 328 328 efi_setup_idmap(); 329 + early_memunmap(memmap.map, memmap.map_end - memmap.map); 329 330 } 330 331 331 332 static int __init remap_region(efi_memory_desc_t *md, void **new) ··· 381 380 } 382 381 383 382 mapsize = memmap.map_end - memmap.map; 384 - early_memunmap(memmap.map, mapsize); 385 383 386 384 if (efi_runtime_disabled()) { 387 385 pr_info("EFI runtime services will be disabled.\n");
+1
arch/arm64/kernel/module.c
··· 25 25 #include <linux/mm.h> 26 26 #include <linux/moduleloader.h> 27 27 #include <linux/vmalloc.h> 28 + #include <asm/alternative.h> 28 29 #include <asm/insn.h> 29 30 #include <asm/sections.h> 30 31
+8
arch/arm64/kernel/perf_regs.c
··· 50 50 else 51 51 return PERF_SAMPLE_REGS_ABI_64; 52 52 } 53 + 54 + void perf_get_regs_user(struct perf_regs *regs_user, 55 + struct pt_regs *regs, 56 + struct pt_regs *regs_user_copy) 57 + { 58 + regs_user->regs = task_pt_regs(current); 59 + regs_user->abi = perf_reg_abi(current); 60 + }
+1
arch/arm64/kernel/setup.c
··· 402 402 request_standard_resources(); 403 403 404 404 efi_idmap_init(); 405 + early_ioremap_reset(); 405 406 406 407 unflatten_device_tree(); 407 408
+1
arch/arm64/kernel/smp_spin_table.c
··· 25 25 #include <asm/cacheflush.h> 26 26 #include <asm/cpu_ops.h> 27 27 #include <asm/cputype.h> 28 + #include <asm/io.h> 28 29 #include <asm/smp_plat.h> 29 30 30 31 extern void secondary_holding_pen(void);
+1
arch/arm64/kvm/hyp.S
··· 1014 1014 * Instead, we invalidate Stage-2 for this IPA, and the 1015 1015 * whole of Stage-1. Weep... 1016 1016 */ 1017 + lsr x1, x1, #12 1017 1018 tlbi ipas2e1is, x1 1018 1019 /* 1019 1020 * We have to ensure completion of the invalidation at Stage-2,
-1
arch/arm64/kvm/reset.c
··· 90 90 if (!cpu_has_32bit_el1()) 91 91 return -EINVAL; 92 92 cpu_reset = &default_regs_reset32; 93 - vcpu->arch.hcr_el2 &= ~HCR_RW; 94 93 } else { 95 94 cpu_reset = &default_regs_reset; 96 95 }
+1
arch/blackfin/mach-bf533/boards/stamp.c
··· 7 7 */ 8 8 9 9 #include <linux/device.h> 10 + #include <linux/delay.h> 10 11 #include <linux/platform_device.h> 11 12 #include <linux/mtd/mtd.h> 12 13 #include <linux/mtd/partitions.h>
+4 -5
arch/ia64/kernel/acpi.c
··· 893 893 } 894 894 895 895 /* wrapper to silence section mismatch warning */ 896 - int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 896 + int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) 897 897 { 898 898 return _acpi_map_lsapic(handle, physid, pcpu); 899 899 } 900 - EXPORT_SYMBOL(acpi_map_lsapic); 900 + EXPORT_SYMBOL(acpi_map_cpu); 901 901 902 - int acpi_unmap_lsapic(int cpu) 902 + int acpi_unmap_cpu(int cpu) 903 903 { 904 904 ia64_cpu_to_sapicid[cpu] = -1; 905 905 set_cpu_present(cpu, false); ··· 910 910 911 911 return (0); 912 912 } 913 - 914 - EXPORT_SYMBOL(acpi_unmap_lsapic); 913 + EXPORT_SYMBOL(acpi_unmap_cpu); 915 914 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 916 915 917 916 #ifdef CONFIG_ACPI_NUMA
+1 -1
arch/m68k/include/asm/unistd.h
··· 4 4 #include <uapi/asm/unistd.h> 5 5 6 6 7 - #define NR_syscalls 355 7 + #define NR_syscalls 356 8 8 9 9 #define __ARCH_WANT_OLD_READDIR 10 10 #define __ARCH_WANT_OLD_STAT
+1
arch/m68k/include/uapi/asm/unistd.h
··· 360 360 #define __NR_getrandom 352 361 361 #define __NR_memfd_create 353 362 362 #define __NR_bpf 354 363 + #define __NR_execveat 355 363 364 364 365 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
+1
arch/m68k/kernel/syscalltable.S
··· 375 375 .long sys_getrandom 376 376 .long sys_memfd_create 377 377 .long sys_bpf 378 + .long sys_execveat /* 355 */ 378 379
+7 -6
arch/powerpc/include/asm/thread_info.h
··· 23 23 #define THREAD_SIZE (1 << THREAD_SHIFT) 24 24 25 25 #ifdef CONFIG_PPC64 26 - #define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT 26 + #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT) 27 27 #else 28 - #define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT 28 + #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT) 29 29 #endif 30 30 31 31 #ifndef __ASSEMBLY__ ··· 71 71 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 72 72 73 73 /* how to get the thread information struct from C */ 74 - register unsigned long __current_r1 asm("r1"); 75 74 static inline struct thread_info *current_thread_info(void) 76 75 { 77 - /* gcc4, at least, is smart enough to turn this into a single 78 - * rlwinm for ppc32 and clrrdi for ppc64 */ 79 - return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); 76 + unsigned long val; 77 + 78 + asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val)); 79 + 80 + return (struct thread_info *)val; 80 81 } 81 82 82 83 #endif /* __ASSEMBLY__ */
-1
arch/powerpc/platforms/powernv/opal-wrappers.S
··· 40 40 b 1f; \ 41 41 END_FTR_SECTION(0, 1); \ 42 42 ld r12,opal_tracepoint_refcount@toc(r2); \ 43 - std r12,32(r1); \ 44 43 cmpdi r12,0; \ 45 44 bne- LABEL; \ 46 45 1:
+1 -1
arch/s390/hypfs/hypfs_vm.c
··· 231 231 struct dbfs_d2fc_hdr { 232 232 u64 len; /* Length of d2fc buffer without header */ 233 233 u16 version; /* Version of header */ 234 - char tod_ext[16]; /* TOD clock for d2fc */ 234 + char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */ 235 235 u64 count; /* Number of VM guests in d2fc buffer */ 236 236 char reserved[30]; 237 237 } __attribute__ ((packed));
+1 -1
arch/s390/include/asm/irqflags.h
··· 36 36 37 37 static inline notrace unsigned long arch_local_save_flags(void) 38 38 { 39 - return __arch_local_irq_stosm(0x00); 39 + return __arch_local_irq_stnsm(0xff); 40 40 } 41 41 42 42 static inline notrace unsigned long arch_local_irq_save(void)
+6 -4
arch/s390/include/asm/timex.h
··· 67 67 set_clock_comparator(S390_lowcore.clock_comparator); 68 68 } 69 69 70 - #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 70 + #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 71 + #define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */ 71 72 72 73 typedef unsigned long long cycles_t; 73 74 74 - static inline void get_tod_clock_ext(char clk[16]) 75 + static inline void get_tod_clock_ext(char *clk) 75 76 { 76 - typedef struct { char _[sizeof(clk)]; } addrtype; 77 + typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype; 77 78 78 79 asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); 79 80 } 80 81 81 82 static inline unsigned long long get_tod_clock(void) 82 83 { 83 - unsigned char clk[16]; 84 + unsigned char clk[STORE_CLOCK_EXT_SIZE]; 85 + 84 86 get_tod_clock_ext(clk); 85 87 return *((unsigned long long *)&clk[1]); 86 88 }
+2 -1
arch/s390/include/uapi/asm/unistd.h
··· 289 289 #define __NR_bpf 351 290 290 #define __NR_s390_pci_mmio_write 352 291 291 #define __NR_s390_pci_mmio_read 353 292 - #define NR_syscalls 354 292 + #define __NR_execveat 354 293 + #define NR_syscalls 355 293 294 294 295 /* 295 296 * There are some system calls that are not present on 64 bit, some
+1
arch/s390/kernel/syscalls.S
··· 362 362 SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) 363 363 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) 364 364 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) 365 + SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
+60 -9
arch/s390/kernel/uprobes.c
··· 48 48 return false; 49 49 } 50 50 51 + static int check_per_event(unsigned short cause, unsigned long control, 52 + struct pt_regs *regs) 53 + { 54 + if (!(regs->psw.mask & PSW_MASK_PER)) 55 + return 0; 56 + /* user space single step */ 57 + if (control == 0) 58 + return 1; 59 + /* over indication for storage alteration */ 60 + if ((control & 0x20200000) && (cause & 0x2000)) 61 + return 1; 62 + if (cause & 0x8000) { 63 + /* all branches */ 64 + if ((control & 0x80800000) == 0x80000000) 65 + return 1; 66 + /* branch into selected range */ 67 + if (((control & 0x80800000) == 0x80800000) && 68 + regs->psw.addr >= current->thread.per_user.start && 69 + regs->psw.addr <= current->thread.per_user.end) 70 + return 1; 71 + } 72 + return 0; 73 + } 74 + 51 75 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 52 76 { 53 77 int fixup = probe_get_fixup_type(auprobe->insn); ··· 95 71 if (regs->psw.addr - utask->xol_vaddr == ilen) 96 72 regs->psw.addr = utask->vaddr + ilen; 97 73 } 98 - /* If per tracing was active generate trap */ 99 - if (regs->psw.mask & PSW_MASK_PER) 100 - do_per_trap(regs); 74 + if (check_per_event(current->thread.per_event.cause, 75 + current->thread.per_user.control, regs)) { 76 + /* fix per address */ 77 + current->thread.per_event.address = utask->vaddr; 78 + /* trigger per event */ 79 + set_pt_regs_flag(regs, PIF_PER_TRAP); 80 + } 101 81 return 0; 102 82 } 103 83 ··· 134 106 clear_thread_flag(TIF_UPROBE_SINGLESTEP); 135 107 regs->int_code = auprobe->saved_int_code; 136 108 regs->psw.addr = current->utask->vaddr; 109 + current->thread.per_event.address = current->utask->vaddr; 137 110 } 138 111 139 112 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, ··· 175 146 __rc; \ 176 147 }) 177 148 178 - #define emu_store_ril(ptr, input) \ 149 + #define emu_store_ril(regs, ptr, input) \ 179 150 ({ \ 180 151 unsigned int mask = sizeof(*(ptr)) - 1; \ 152 + __typeof__(ptr) __ptr = (ptr); \ 181 153 int __rc = 0; \ 182 154 \ 183 155 if (!test_facility(34)) \ 184 156 __rc = EMU_ILLEGAL_OP; \ 185 - else if ((u64 __force)ptr & mask) \ 157 + else if ((u64 __force)__ptr & mask) \ 186 158 __rc = EMU_SPECIFICATION; \ 187 - else if (put_user(*(input), ptr)) \ 159 + else if (put_user(*(input), __ptr)) \ 188 160 __rc = EMU_ADDRESSING; \ 161 + if (__rc == 0) \ 162 + sim_stor_event(regs, __ptr, mask + 1); \ 189 163 __rc; \ 190 164 }) 191 165 ··· 228 196 s32 s32[2]; 229 197 s16 s16[4]; 230 198 }; 199 + 200 + /* 201 + * If user per registers are setup to trace storage alterations and an 202 + * emulated store took place on a fitting address a user trap is generated. 203 + */ 204 + static void sim_stor_event(struct pt_regs *regs, void *addr, int len) 205 + { 206 + if (!(regs->psw.mask & PSW_MASK_PER)) 207 + return; 208 + if (!(current->thread.per_user.control & PER_EVENT_STORE)) 209 + return; 210 + if ((void *)current->thread.per_user.start > (addr + len)) 211 + return; 212 + if ((void *)current->thread.per_user.end < addr) 213 + return; 214 + current->thread.per_event.address = regs->psw.addr; 215 + current->thread.per_event.cause = PER_EVENT_STORE >> 16; 216 + set_pt_regs_flag(regs, PIF_PER_TRAP); 217 + } 231 218 232 219 /* 233 220 * pc relative instructions are emulated, since parameters may not be ··· 300 249 rc = emu_load_ril((u32 __user *)uptr, &rx->u64); 301 250 break; 302 251 case 0x07: /* sthrl */ 303 - rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]); 252 + rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]); 304 253 break; 305 254 case 0x0b: /* stgrl */ 306 - rc = emu_store_ril((u64 __user *)uptr, &rx->u64); 255 + rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64); 307 256 break; 308 257 case 0x0f: /* strl */ 309 - rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]); 258 + rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]); 310 259 break; 311 260 } 312 261 break;
-2
arch/s390/kernel/vtime.c
··· 128 128 struct thread_info *ti = task_thread_info(tsk); 129 129 u64 timer, system; 130 130 131 - WARN_ON_ONCE(!irqs_disabled()); 132 - 133 131 timer = S390_lowcore.last_update_timer; 134 132 S390_lowcore.last_update_timer = get_vtimer(); 135 133 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+3 -2
arch/s390/mm/pgtable.c
··· 322 322 static unsigned long __gmap_segment_gaddr(unsigned long *entry) 323 323 { 324 324 struct page *page; 325 - unsigned long offset; 325 + unsigned long offset, mask; 326 326 327 327 offset = (unsigned long) entry / sizeof(unsigned long); 328 328 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; 329 - page = pmd_to_page((pmd_t *) entry); 329 + mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 330 + page = virt_to_page((void *)((unsigned long) entry & mask)); 330 331 return page->index + offset; 331 332 } 332 333
+4 -4
arch/s390/net/bpf_jit_comp.c
··· 431 431 EMIT4_DISP(0x88500000, K); 432 432 break; 433 433 case BPF_ALU | BPF_NEG: /* A = -A */ 434 - /* lnr %r5,%r5 */ 435 - EMIT2(0x1155); 434 + /* lcr %r5,%r5 */ 435 + EMIT2(0x1355); 436 436 break; 437 437 case BPF_JMP | BPF_JA: /* ip += K */ 438 438 offset = addrs[i + K] + jit->start - jit->prg; ··· 502 502 xbranch: /* Emit compare if the branch targets are different */ 503 503 if (filter->jt != filter->jf) { 504 504 jit->seen |= SEEN_XREG; 505 - /* cr %r5,%r12 */ 506 - EMIT2(0x195c); 505 + /* clr %r5,%r12 */ 506 + EMIT2(0x155c); 507 507 } 508 508 goto branch; 509 509 case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
+1
arch/x86/boot/Makefile
··· 51 51 $(obj)/cpustr.h: $(obj)/mkcpustr FORCE 52 52 $(call if_changed,cpustr) 53 53 endif 54 + clean-files += cpustr.h 54 55 55 56 # --------------------------------------------------------------------------- 56 57
+1 -1
arch/x86/crypto/Makefile
··· 26 26 27 27 obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o 28 28 obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o 29 - obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ 30 29 obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o 31 30 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o 32 31 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o ··· 45 46 ifeq ($(avx2_supported),yes) 46 47 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o 47 48 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o 49 + obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ 48 50 endif 49 51 50 52 aes-i586-y := aes-i586-asm_32.o aes_glue.o
+35 -11
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
··· 208 208 209 209 .if (klen == KEY_128) 210 210 .if (load_keys) 211 - vmovdqa 3*16(p_keys), xkeyA 211 + vmovdqa 3*16(p_keys), xkey4 212 212 .endif 213 213 .else 214 214 vmovdqa 3*16(p_keys), xkeyA ··· 224 224 add $(16*by), p_in 225 225 226 226 .if (klen == KEY_128) 227 - vmovdqa 4*16(p_keys), xkey4 227 + vmovdqa 4*16(p_keys), xkeyB 228 228 .else 229 229 .if (load_keys) 230 230 vmovdqa 4*16(p_keys), xkey4 ··· 234 234 .set i, 0 235 235 .rept by 236 236 club XDATA, i 237 - vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ 237 + /* key 3 */ 238 + .if (klen == KEY_128) 239 + vaesenc xkey4, var_xdata, var_xdata 240 + .else 241 + vaesenc xkeyA, var_xdata, var_xdata 242 + .endif 238 243 .set i, (i +1) 239 244 .endr 240 245 ··· 248 243 .set i, 0 249 244 .rept by 250 245 club XDATA, i 251 - vaesenc xkey4, var_xdata, var_xdata /* key 4 */ 246 + /* key 4 */ 247 + .if (klen == KEY_128) 248 + vaesenc xkeyB, var_xdata, var_xdata 249 + .else 250 + vaesenc xkey4, var_xdata, var_xdata 251 + .endif 252 252 .set i, (i +1) 253 253 .endr 254 254 255 255 .if (klen == KEY_128) 256 256 .if (load_keys) 257 - vmovdqa 6*16(p_keys), xkeyB 257 + vmovdqa 6*16(p_keys), xkey8 258 258 .endif 259 259 .else 260 260 vmovdqa 6*16(p_keys), xkeyB ··· 277 267 .set i, 0 278 268 .rept by 279 269 club XDATA, i 280 - vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ 270 + /* key 6 */ 271 + .if (klen == KEY_128) 272 + vaesenc xkey8, var_xdata, var_xdata 273 + .else 274 + vaesenc xkeyB, var_xdata, var_xdata 275 + .endif 281 276 .set i, (i +1) 282 277 .endr 283 278 284 279 .if (klen == KEY_128) 285 - vmovdqa 8*16(p_keys), xkey8 280 + vmovdqa 8*16(p_keys), xkeyB 286 281 .else 287 282 .if (load_keys) 288 283 vmovdqa 8*16(p_keys), xkey8 ··· 303 288 304 289 .if (klen == KEY_128) 305 290 .if (load_keys) 306 - vmovdqa 9*16(p_keys), xkeyA 291 + vmovdqa 9*16(p_keys), xkey12 307 292 .endif 308 293 .else 309 294 vmovdqa 9*16(p_keys), xkeyA ··· 312 297 .set i, 0 313 298 .rept by 314 299 club XDATA, i 315 - vaesenc xkey8, var_xdata, var_xdata /* key 8 */ 300 + /* key 8 */ 301 + .if (klen == KEY_128) 302 + vaesenc xkeyB, var_xdata, var_xdata 303 + .else 304 + vaesenc xkey8, var_xdata, var_xdata 305 + .endif 316 306 .set i, (i +1) 317 307 .endr 318 308 ··· 326 306 .set i, 0 327 307 .rept by 328 308 club XDATA, i 329 - vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ 309 + /* key 9 */ 310 + .if (klen == KEY_128) 311 + vaesenc xkey12, var_xdata, var_xdata 312 + .else 313 + vaesenc xkeyA, var_xdata, var_xdata 314 + .endif 330 315 .set i, (i +1) 331 316 .endr 332 317 ··· 437 412 /* main body of aes ctr load */ 438 413 439 414 .macro do_aes_ctrmain key_len 440 - 441 415 cmp $16, num_bytes 442 416 jb .Ldo_return2\key_len 443 417
+4 -2
arch/x86/include/asm/vgtod.h
··· 80 80 81 81 /* 82 82 * Load per CPU data from GDT. LSL is faster than RDTSCP and 83 - * works on all CPUs. 83 + * works on all CPUs. This is volatile so that it orders 84 + * correctly wrt barrier() and to keep gcc from cleverly 85 + * hoisting it out of the calling function. 84 86 */ 85 - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 87 + asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 86 88 87 89 return p; 88 90 }
+4 -5
arch/x86/kernel/acpi/boot.c
··· 750 750 } 751 751 752 752 /* wrapper to silence section mismatch warning */ 753 - int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 753 + int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) 754 754 { 755 755 return _acpi_map_lsapic(handle, physid, pcpu); 756 756 } 757 - EXPORT_SYMBOL(acpi_map_lsapic); 757 + EXPORT_SYMBOL(acpi_map_cpu); 758 758 759 - int acpi_unmap_lsapic(int cpu) 759 + int acpi_unmap_cpu(int cpu) 760 760 { 761 761 #ifdef CONFIG_ACPI_NUMA 762 762 set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); ··· 768 768 769 769 return (0); 770 770 } 771 - 772 - EXPORT_SYMBOL(acpi_unmap_lsapic); 771 + EXPORT_SYMBOL(acpi_unmap_cpu); 773 772 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 774 773 775 774 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
+1
arch/x86/kernel/cpu/Makefile
··· 66 66 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE 67 67 $(call if_changed,mkcapflags) 68 68 endif 69 + clean-files += capflags.c
+1 -1
arch/x86/kernel/cpu/mkcapflags.sh
··· 28 28 # If the /* comment */ starts with a quote string, grab that. 29 29 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" 30 30 [ -z "$VALUE" ] && VALUE="\"$NAME\"" 31 - [ "$VALUE" == '""' ] && continue 31 + [ "$VALUE" = '""' ] && continue 32 32 33 33 # Name is uppercase, VALUE is all lowercase 34 34 VALUE="$(echo "$VALUE" | tr A-Z a-z)"
+1 -1
arch/x86/kernel/cpu/perf_event_intel_uncore.h
··· 17 17 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 18 18 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 19 19 #define UNCORE_EXTRA_PCI_DEV 0xff 20 - #define UNCORE_EXTRA_PCI_DEV_MAX 2 20 + #define UNCORE_EXTRA_PCI_DEV_MAX 3 21 21 22 22 /* support up to 8 sockets */ 23 23 #define UNCORE_SOCKET_MAX 8
+17
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
··· 891 891 enum { 892 892 SNBEP_PCI_QPI_PORT0_FILTER, 893 893 SNBEP_PCI_QPI_PORT1_FILTER, 894 + HSWEP_PCI_PCU_3, 894 895 }; 895 896 896 897 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) ··· 2027 2026 { 2028 2027 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2029 2028 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2029 + 2030 + /* Detect 6-8 core systems with only two SBOXes */ 2031 + if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) { 2032 + u32 capid4; 2033 + 2034 + pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3], 2035 + 0x94, &capid4); 2036 + if (((capid4 >> 6) & 0x3) == 0) 2037 + hswep_uncore_sbox.num_boxes = 2; 2038 + } 2039 + 2030 2040 uncore_msr_uncores = hswep_msr_uncores; 2031 2041 } 2032 2042 ··· 2298 2286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96), 2299 2287 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2300 2288 SNBEP_PCI_QPI_PORT1_FILTER), 2289 + }, 2290 + { /* PCU.3 (for Capability registers) */ 2291 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), 2292 + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2293 + HSWEP_PCI_PCU_3), 2301 2294 }, 2302 2295 { /* end: all zeroes */ } 2303 2296 };
+90
arch/x86/kernel/perf_regs.c
··· 78 78 { 79 79 return PERF_SAMPLE_REGS_ABI_32; 80 80 } 81 + 82 + void perf_get_regs_user(struct perf_regs *regs_user, 83 + struct pt_regs *regs, 84 + struct pt_regs *regs_user_copy) 85 + { 86 + regs_user->regs = task_pt_regs(current); 87 + regs_user->abi = perf_reg_abi(current); 88 + } 81 89 #else /* CONFIG_X86_64 */ 82 90 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ 83 91 (1ULL << PERF_REG_X86_ES) | \ ··· 109 101 return PERF_SAMPLE_REGS_ABI_32; 110 102 else 111 103 return PERF_SAMPLE_REGS_ABI_64; 104 + } 105 + 106 + void perf_get_regs_user(struct perf_regs *regs_user, 107 + struct pt_regs *regs, 108 + struct pt_regs *regs_user_copy) 109 + { 110 + struct pt_regs *user_regs = task_pt_regs(current); 111 + 112 + /* 113 + * If we're in an NMI that interrupted task_pt_regs setup, then 114 + * we can't sample user regs at all. This check isn't really 115 + * sufficient, though, as we could be in an NMI inside an interrupt 116 + * that happened during task_pt_regs setup. 117 + */ 118 + if (regs->sp > (unsigned long)&user_regs->r11 && 119 + regs->sp <= (unsigned long)(user_regs + 1)) { 120 + regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 121 + regs_user->regs = NULL; 122 + return; 123 + } 124 + 125 + /* 126 + * RIP, flags, and the argument registers are usually saved. 127 + * orig_ax is probably okay, too. 128 + */ 129 + regs_user_copy->ip = user_regs->ip; 130 + regs_user_copy->cx = user_regs->cx; 131 + regs_user_copy->dx = user_regs->dx; 132 + regs_user_copy->si = user_regs->si; 133 + regs_user_copy->di = user_regs->di; 134 + regs_user_copy->r8 = user_regs->r8; 135 + regs_user_copy->r9 = user_regs->r9; 136 + regs_user_copy->r10 = user_regs->r10; 137 + regs_user_copy->r11 = user_regs->r11; 138 + regs_user_copy->orig_ax = user_regs->orig_ax; 139 + regs_user_copy->flags = user_regs->flags; 140 + 141 + /* 142 + * Don't even try to report the "rest" regs. 143 + */ 144 + regs_user_copy->bx = -1; 145 + regs_user_copy->bp = -1; 146 + regs_user_copy->r12 = -1; 147 + regs_user_copy->r13 = -1; 148 + regs_user_copy->r14 = -1; 149 + regs_user_copy->r15 = -1; 150 + 151 + /* 152 + * For this to be at all useful, we need a reasonable guess for 153 + * sp and the ABI. Be careful: we're in NMI context, and we're 154 + * considering current to be the current task, so we should 155 + * be careful not to look at any other percpu variables that might 156 + * change during context switches. 157 + */ 158 + if (IS_ENABLED(CONFIG_IA32_EMULATION) && 159 + task_thread_info(current)->status & TS_COMPAT) { 160 + /* Easy case: we're in a compat syscall. */ 161 + regs_user->abi = PERF_SAMPLE_REGS_ABI_32; 162 + regs_user_copy->sp = user_regs->sp; 163 + regs_user_copy->cs = user_regs->cs; 164 + regs_user_copy->ss = user_regs->ss; 165 + } else if (user_regs->orig_ax != -1) { 166 + /* 167 + * We're probably in a 64-bit syscall. 168 + * Warning: this code is severely racy. At least it's better 169 + * than just blindly copying user_regs. 170 + */ 171 + regs_user->abi = PERF_SAMPLE_REGS_ABI_64; 172 + regs_user_copy->sp = this_cpu_read(old_rsp); 173 + regs_user_copy->cs = __USER_CS; 174 + regs_user_copy->ss = __USER_DS; 175 + regs_user_copy->cx = -1; /* usually contains garbage */ 176 + } else { 177 + /* We're probably in an interrupt or exception. */ 178 + regs_user->abi = user_64bit_mode(user_regs) ? 179 + PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; 180 + regs_user_copy->sp = user_regs->sp; 181 + regs_user_copy->cs = user_regs->cs; 182 + regs_user_copy->ss = user_regs->ss; 183 + } 184 + 185 + regs_user->regs = regs_user_copy; 112 186 } 113 187 #endif /* CONFIG_X86_32 */
+1 -1
arch/x86/lib/insn.c
··· 28 28 29 29 /* Verify next sizeof(t) bytes can be on the same instruction */ 30 30 #define validate_next(t, insn, n) \ 31 - ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr) 31 + ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) 32 32 33 33 #define __get_next(t, insn) \ 34 34 ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+17 -20
arch/x86/mm/init.c
··· 438 438 static unsigned long __init get_new_step_size(unsigned long step_size) 439 439 { 440 440 /* 441 - * Explain why we shift by 5 and why we don't have to worry about 442 - * 'step_size << 5' overflowing: 443 - * 444 - * initial mapped size is PMD_SIZE (2M). 441 + * Initial mapped size is PMD_SIZE (2M). 445 442 * We can not set step_size to be PUD_SIZE (1G) yet. 446 443 * In worse case, when we cross the 1G boundary, and 447 444 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) 448 - * to map 1G range with PTE. Use 5 as shift for now. 445 + * to map 1G range with PTE. Hence we use one less than the 446 + * difference of page table level shifts. 449 447 * 450 - * Don't need to worry about overflow, on 32bit, when step_size 451 - * is 0, round_down() returns 0 for start, and that turns it 452 - * into 0x100000000ULL. 448 + * Don't need to worry about overflow in the top-down case, on 32bit, 449 + * when step_size is 0, round_down() returns 0 for start, and that 450 + * turns it into 0x100000000ULL. 451 + * In the bottom-up case, round_up(x, 0) returns 0 though too, which 452 + * needs to be taken into consideration by the code below. 453 453 */ 454 - return step_size << 5; 454 + return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); 455 455 } 456 456 457 457 /** ··· 471 471 unsigned long step_size; 472 472 unsigned long addr; 473 473 unsigned long mapped_ram_size = 0; 474 - unsigned long new_mapped_ram_size; 475 474 476 475 /* xen has big range in reserved near end of ram, skip it at first.*/ 477 476 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); ··· 495 496 start = map_start; 496 497 } else 497 498 start = map_start; 498 - new_mapped_ram_size = init_range_memory_mapping(start, 499 + mapped_ram_size += init_range_memory_mapping(start, 499 500 last_start); 500 501 last_start = start; 501 502 min_pfn_mapped = last_start >> PAGE_SHIFT; 502 - /* only increase step_size after big range get mapped */ 503 - if (new_mapped_ram_size > mapped_ram_size) 503 + if (mapped_ram_size >= step_size) 504 504 step_size = get_new_step_size(step_size); 505 - mapped_ram_size += new_mapped_ram_size; 506 505 } 507 506 508 507 if (real_end < map_end) ··· 521 524 static void __init memory_map_bottom_up(unsigned long map_start, 522 525 unsigned long map_end) 523 526 { 524 - unsigned long next, new_mapped_ram_size, start; 527 + unsigned long next, start; 525 528 unsigned long mapped_ram_size = 0; 526 529 /* step_size need to be small so pgt_buf from BRK could cover it */ 527 530 unsigned long step_size = PMD_SIZE; ··· 536 539 * for page table. 537 540 */ 538 541 while (start < map_end) { 539 - if (map_end - start > step_size) { 542 + if (step_size && map_end - start > step_size) { 540 543 next = round_up(start + 1, step_size); 541 544 if (next > map_end) 542 545 next = map_end; 543 - } else 546 + } else { 544 547 next = map_end; 548 + } 545 549 546 - new_mapped_ram_size = init_range_memory_mapping(start, next); 550 + mapped_ram_size += init_range_memory_mapping(start, next); 547 551 start = next; 548 552 549 - if (new_mapped_ram_size > mapped_ram_size) 553 + if (mapped_ram_size >= step_size) 550 554 step_size = get_new_step_size(step_size); 551 - mapped_ram_size += new_mapped_ram_size; 552 555 } 553 556 } 554 557
+29 -16
arch/x86/vdso/vma.c
··· 41 41 42 42 struct linux_binprm; 43 43 44 - /* Put the vdso above the (randomized) stack with another randomized offset. 45 - This way there is no hole in the middle of address space. 46 - To save memory make sure it is still in the same PTE as the stack top. 47 - This doesn't give that many random bits. 48 - 49 - Only used for the 64-bit and x32 vdsos. */ 44 + /* 45 + * Put the vdso above the (randomized) stack with another randomized 46 + * offset. This way there is no hole in the middle of address space. 47 + * To save memory make sure it is still in the same PTE as the stack 48 + * top. This doesn't give that many random bits. 49 + * 50 + * Note that this algorithm is imperfect: the distribution of the vdso 51 + * start address within a PMD is biased toward the end. 52 + * 53 + * Only used for the 64-bit and x32 vdsos. 54 + */ 50 55 static unsigned long vdso_addr(unsigned long start, unsigned len) 51 56 { 52 57 #ifdef CONFIG_X86_32 ··· 59 54 #else 60 55 unsigned long addr, end; 61 56 unsigned offset; 62 - end = (start + PMD_SIZE - 1) & PMD_MASK; 57 + 58 + /* 59 + * Round up the start address. It can start out unaligned as a result 60 + * of stack start randomization. 61 + */ 62 + start = PAGE_ALIGN(start); 63 + 64 + /* Round the lowest possible end address up to a PMD boundary. */ 65 + end = (start + len + PMD_SIZE - 1) & PMD_MASK; 63 66 if (end >= TASK_SIZE_MAX) 64 67 end = TASK_SIZE_MAX; 65 68 end -= len; 66 - /* This loses some more bits than a modulo, but is cheaper */ 67 - offset = get_random_int() & (PTRS_PER_PTE - 1); 68 - addr = start + (offset << PAGE_SHIFT); 69 - if (addr >= end) 70 - addr = end; 69 + 70 + if (end > start) { 71 + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); 72 + addr = start + (offset << PAGE_SHIFT); 73 + } else { 74 + addr = start; 75 + } 71 76 72 77 /* 73 - * page-align it here so that get_unmapped_area doesn't 74 - * align it wrongfully again to the next page. addr can come in 4K 75 - * unaligned here as a result of stack start randomization. 78 + * Forcibly align the final address in case we have a hardware 79 + * issue that requires alignment for performance reasons. 76 80 */ 77 - addr = PAGE_ALIGN(addr); 78 81 addr = align_vdso_addr(addr); 79 82 80 83 return addr;
+21 -1
arch/x86/xen/enlighten.c
··· 40 40 #include <xen/interface/physdev.h> 41 41 #include <xen/interface/vcpu.h> 42 42 #include <xen/interface/memory.h> 43 + #include <xen/interface/nmi.h> 43 44 #include <xen/interface/xen-mca.h> 44 45 #include <xen/features.h> 45 46 #include <xen/page.h> ··· 67 66 #include <asm/reboot.h> 68 67 #include <asm/stackprotector.h> 69 68 #include <asm/hypervisor.h> 69 + #include <asm/mach_traps.h> 70 70 #include <asm/mwait.h> 71 71 #include <asm/pci_x86.h> 72 72 #include <asm/pat.h> ··· 1353 1351 .emergency_restart = xen_emergency_restart, 1354 1352 }; 1355 1353 1354 + static unsigned char xen_get_nmi_reason(void) 1355 + { 1356 + unsigned char reason = 0; 1357 + 1358 + /* Construct a value which looks like it came from port 0x61. */ 1359 + if (test_bit(_XEN_NMIREASON_io_error, 1360 + &HYPERVISOR_shared_info->arch.nmi_reason)) 1361 + reason |= NMI_REASON_IOCHK; 1362 + if (test_bit(_XEN_NMIREASON_pci_serr, 1363 + &HYPERVISOR_shared_info->arch.nmi_reason)) 1364 + reason |= NMI_REASON_SERR; 1365 + 1366 + return reason; 1367 + } 1368 + 1356 1369 static void __init xen_boot_params_init_edd(void) 1357 1370 { 1358 1371 #if IS_ENABLED(CONFIG_EDD) ··· 1552 1535 pv_info = xen_info; 1553 1536 pv_init_ops = xen_init_ops; 1554 1537 pv_apic_ops = xen_apic_ops; 1555 - if (!xen_pvh_domain()) 1538 + if (!xen_pvh_domain()) { 1556 1539 pv_cpu_ops = xen_cpu_ops; 1540 + 1541 + x86_platform.get_nmi_reason = xen_get_nmi_reason; 1542 + } 1557 1543 1558 1544 if (xen_feature(XENFEAT_auto_translated_physmap)) 1559 1545 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
+10 -10
arch/x86/xen/p2m.c
··· 167 167 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); 168 168 } 169 169 170 - /* Only to be called in case of a race for a page just allocated! */ 171 - static void free_p2m_page(void *p) 170 + static void __ref free_p2m_page(void *p) 172 171 { 173 - BUG_ON(!slab_is_available()); 172 + if (unlikely(!slab_is_available())) { 173 + free_bootmem((unsigned long)p, PAGE_SIZE); 174 + return; 175 + } 176 + 174 177 free_page((unsigned long)p); 175 178 } 176 179 ··· 378 375 p2m_missing_pte : p2m_identity_pte; 379 376 for (i = 0; i < PMDS_PER_MID_PAGE; i++) { 380 377 pmdp = populate_extra_pmd( 381 - (unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); 378 + (unsigned long)(p2m + pfn) + i * PMD_SIZE); 382 379 set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); 383 380 } 384 381 } ··· 439 436 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual 440 437 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! 441 438 */ 442 - static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) 439 + static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) 443 440 { 444 441 pte_t *ptechk; 445 - pte_t *pteret = ptep; 446 442 pte_t *pte_newpg[PMDS_PER_MID_PAGE]; 447 443 pmd_t *pmdp; 448 444 unsigned int level; ··· 475 473 if (ptechk == pte_pg) { 476 474 set_pmd(pmdp, 477 475 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); 478 - if (vaddr == (addr & ~(PMD_SIZE - 1))) 479 - pteret = pte_offset_kernel(pmdp, addr); 480 476 pte_newpg[i] = NULL; 481 477 } 482 478 ··· 488 488 vaddr += PMD_SIZE; 489 489 } 490 490 491 - return pteret; 491 + return lookup_address(addr, &level); 492 492 } 493 493 494 494 /* ··· 517 517 518 518 if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { 519 519 /* PMD level is missing, allocate a new one */ 520 - ptep = alloc_p2m_pmd(addr, ptep, pte_pg); 520 + ptep = alloc_p2m_pmd(addr, pte_pg); 521 521 if (!ptep) 522 522 return false; 523 523 }
+20 -22
arch/x86/xen/setup.c
··· 140 140 unsigned long __ref xen_chk_extra_mem(unsigned long pfn) 141 141 { 142 142 int i; 143 - unsigned long addr = PFN_PHYS(pfn); 143 + phys_addr_t addr = PFN_PHYS(pfn); 144 144 145 145 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 146 146 if (addr >= xen_extra_mem[i].start && ··· 160 160 int i; 161 161 162 162 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 163 + if (!xen_extra_mem[i].size) 164 + continue; 163 165 pfn_s = PFN_DOWN(xen_extra_mem[i].start); 164 166 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); 165 167 for (pfn = pfn_s; pfn < pfn_e; pfn++) ··· 231 229 * as a fallback if the remapping fails. 232 230 */ 233 231 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, 234 - unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, 235 - unsigned long *released) 232 + unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) 236 233 { 237 - unsigned long len = 0; 238 234 unsigned long pfn, end; 239 235 int ret; 240 236 241 237 WARN_ON(start_pfn > end_pfn); 242 238 239 + /* Release pages first. */ 243 240 end = min(end_pfn, nr_pages); 244 241 for (pfn = start_pfn; pfn < end; pfn++) { 245 242 unsigned long mfn = pfn_to_mfn(pfn); ··· 251 250 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); 252 251 253 252 if (ret == 1) { 253 + (*released)++; 254 254 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) 255 255 break; 256 - len++; 257 256 } else 258 257 break; 259 258 } 260 259 261 - /* Need to release pages first */ 262 - *released += len; 263 - *identity += set_phys_range_identity(start_pfn, end_pfn); 260 + set_phys_range_identity(start_pfn, end_pfn); 264 261 } 265 262 266 263 /* ··· 286 287 } 287 288 288 289 /* Update kernel mapping, but not for highmem. */ 289 - if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) 290 + if (pfn >= PFN_UP(__pa(high_memory - 1))) 290 291 return; 291 292 292 293 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), ··· 317 318 unsigned long ident_pfn_iter, remap_pfn_iter; 318 319 unsigned long ident_end_pfn = start_pfn + size; 319 320 unsigned long left = size; 320 - unsigned long ident_cnt = 0; 321 321 unsigned int i, chunk; 322 322 323 323 WARN_ON(size == 0); ··· 345 347 xen_remap_mfn = mfn; 346 348 347 349 /* Set identity map */ 348 - ident_cnt += set_phys_range_identity(ident_pfn_iter, 349 - ident_pfn_iter + chunk); 350 + set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); 350 351 351 352 left -= chunk; 352 353 } ··· 368 371 static unsigned long __init xen_set_identity_and_remap_chunk( 369 372 const struct e820entry *list, size_t map_size, unsigned long start_pfn, 370 373 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, 371 - unsigned long *identity, unsigned long *released) 374 + unsigned long *released, unsigned long *remapped) 372 375 { 373 376 unsigned long pfn; 374 377 unsigned long i = 0; ··· 383 386 /* Do not remap pages beyond the current allocation */ 384 387 if (cur_pfn >= nr_pages) { 385 388 /* Identity map remaining pages */ 386 - *identity += set_phys_range_identity(cur_pfn, 387 - cur_pfn + size); 389 + set_phys_range_identity(cur_pfn, cur_pfn + size); 388 390 break; 389 391 } 390 392 if (cur_pfn + size > nr_pages) ··· 394 398 if (!remap_range_size) { 395 399 pr_warning("Unable to find available pfn range, not remapping identity pages\n"); 396 400 xen_set_identity_and_release_chunk(cur_pfn, 397 - cur_pfn + left, nr_pages, identity, released); 401 + cur_pfn + left, nr_pages, released); 398 402 break; 399 403 } 400 404 /* Adjust size to fit in current e820 RAM region */ ··· 406 410 /* Update variables to reflect new mappings. */ 407 411 i += size; 408 412 remap_pfn += size; 409 - *identity += size; 413 + *remapped += size; 410 414 } 411 415 412 416 /* ··· 423 427 424 428 static void __init xen_set_identity_and_remap( 425 429 const struct e820entry *list, size_t map_size, unsigned long nr_pages, 426 - unsigned long *released) 430 + unsigned long *released, unsigned long *remapped) 427 431 { 428 432 phys_addr_t start = 0; 429 - unsigned long identity = 0; 430 433 unsigned long last_pfn = nr_pages; 431 434 const struct e820entry *entry; 432 435 unsigned long num_released = 0; 436 + unsigned long num_remapped = 0; 433 437 int i; 434 438 435 439 /* ··· 456 460 last_pfn = xen_set_identity_and_remap_chunk( 457 461 list, map_size, start_pfn, 458 462 end_pfn, nr_pages, last_pfn, 459 - &identity, &num_released); 463 + &num_released, &num_remapped); 460 464 start = end; 461 465 } 462 466 } 463 467 464 468 *released = num_released; 469 + *remapped = num_remapped; 465 470 466 - pr_info("Set %ld page(s) to 1-1 mapping\n", identity); 467 471 pr_info("Released %ld page(s)\n", num_released); 468 472 } 469 473 ··· 582 586 struct xen_memory_map memmap; 583 587 unsigned long max_pages; 584 588 unsigned long extra_pages = 0; 589 + unsigned long remapped_pages; 585 590 int i; 586 591 int op; 587 592 ··· 632 635 * underlying RAM. 633 636 */ 634 637 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, 635 - &xen_released_pages); 638 + &xen_released_pages, &remapped_pages); 636 639 637 640 extra_pages += xen_released_pages; 641 + extra_pages += remapped_pages; 638 642 639 643 /* 640 644 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+5 -13
arch/x86/xen/time.c
··· 391 391 392 392 struct xen_clock_event_device { 393 393 struct clock_event_device evt; 394 - char *name; 394 + char name[16]; 395 395 }; 396 396 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; 397 397 ··· 420 420 if (evt->irq >= 0) { 421 421 unbind_from_irqhandler(evt->irq, NULL); 422 422 evt->irq = -1; 423 - kfree(per_cpu(xen_clock_events, cpu).name); 424 - per_cpu(xen_clock_events, cpu).name = NULL; 425 423 } 426 424 } 427 425 428 426 void xen_setup_timer(int cpu) 429 427 { 430 - char *name; 431 - struct clock_event_device *evt; 428 + struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); 429 + struct clock_event_device *evt = &xevt->evt; 432 430 int irq; 433 431 434 - evt = &per_cpu(xen_clock_events, cpu).evt; 435 432 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); 436 433 if (evt->irq >= 0) 437 434 xen_teardown_timer(cpu); 438 435 439 436 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 440 437 441 - name = kasprintf(GFP_KERNEL, "timer%d", cpu); 442 - if (!name) 443 - name = "<timer kasprintf failed>"; 438 + snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu); 444 439 445 440 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 446 441 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| 447 442 IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, 448 - name, NULL); 443 + xevt->name, NULL); 449 444 (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); 450 445 451 446 memcpy(evt, xen_clockevent, sizeof(*evt)); 452 447 453 448 evt->cpumask = cpumask_of(cpu); 454 449 evt->irq = irq; 455 - per_cpu(xen_clock_events, cpu).name = name; 456 450 } 457 451 458 452 459 453 void xen_setup_cpu_clockevents(void) 460 454 { 461 - BUG_ON(preemptible()); 462 - 463 455 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); 464 456 } 465 457
+20 -1
block/blk-core.c
··· 473 473 } 474 474 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 475 475 476 + void blk_set_queue_dying(struct request_queue *q) 477 + { 478 + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 479 + 480 + if (q->mq_ops) 481 + blk_mq_wake_waiters(q); 482 + else { 483 + struct request_list *rl; 484 + 485 + blk_queue_for_each_rl(rl, q) { 486 + if (rl->rq_pool) { 487 + wake_up(&rl->wait[BLK_RW_SYNC]); 488 + wake_up(&rl->wait[BLK_RW_ASYNC]); 489 + } 490 + } 491 + } 492 + } 493 + EXPORT_SYMBOL_GPL(blk_set_queue_dying); 494 + 476 495 /** 477 496 * blk_cleanup_queue - shutdown a request queue 478 497 * @q: request queue to shutdown ··· 505 486 506 487 /* mark @q DYING, no new request or merges will be allowed afterwards */ 507 488 mutex_lock(&q->sysfs_lock); 508 - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 489 + blk_set_queue_dying(q); 509 490 spin_lock_irq(lock); 510 491 511 492 /*
+10 -4
block/blk-mq-tag.c
··· 68 68 } 69 69 70 70 /* 71 - * Wakeup all potentially sleeping on normal (non-reserved) tags 71 + * Wakeup all potentially sleeping on tags 72 72 */ 73 - static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) 73 + void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) 74 74 { 75 75 struct blk_mq_bitmap_tags *bt; 76 76 int i, wake_index; ··· 84 84 wake_up(&bs->wait); 85 85 86 86 wake_index = bt_index_inc(wake_index); 87 + } 88 + 89 + if (include_reserve) { 90 + bt = &tags->breserved_tags; 91 + if (waitqueue_active(&bt->bs[0].wait)) 92 + wake_up(&bt->bs[0].wait); 87 93 } 88 94 } 89 95 ··· 106 100 107 101 atomic_dec(&tags->active_queues); 108 102 109 - blk_mq_tag_wakeup_all(tags); 103 + blk_mq_tag_wakeup_all(tags, false); 110 104 } 111 105 112 106 /* ··· 590 584 * static and should never need resizing. 591 585 */ 592 586 bt_update_count(&tags->bitmap_tags, tdepth); 593 - blk_mq_tag_wakeup_all(tags); 587 + blk_mq_tag_wakeup_all(tags, false); 594 588 return 0; 595 589 } 596 590
+1
block/blk-mq-tag.h
··· 54 54 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); 55 55 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); 56 56 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); 57 + extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 57 58 58 59 enum { 59 60 BLK_MQ_TAG_CACHE_MIN = 1,
+69 -6
block/blk-mq.c
··· 107 107 wake_up_all(&q->mq_freeze_wq); 108 108 } 109 109 110 - static void blk_mq_freeze_queue_start(struct request_queue *q) 110 + void blk_mq_freeze_queue_start(struct request_queue *q) 111 111 { 112 112 bool freeze; 113 113 ··· 120 120 blk_mq_run_queues(q, false); 121 121 } 122 122 } 123 + EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); 123 124 124 125 static void blk_mq_freeze_queue_wait(struct request_queue *q) 125 126 { ··· 137 136 blk_mq_freeze_queue_wait(q); 138 137 } 139 138 140 - static void blk_mq_unfreeze_queue(struct request_queue *q) 139 + void blk_mq_unfreeze_queue(struct request_queue *q) 141 140 { 142 141 bool wake; 143 142 ··· 149 148 percpu_ref_reinit(&q->mq_usage_counter); 150 149 wake_up_all(&q->mq_freeze_wq); 151 150 } 151 + } 152 + EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 153 + 154 + void blk_mq_wake_waiters(struct request_queue *q) 155 + { 156 + struct blk_mq_hw_ctx *hctx; 157 + unsigned int i; 158 + 159 + queue_for_each_hw_ctx(q, hctx, i) 160 + if (blk_mq_hw_queue_mapped(hctx)) 161 + blk_mq_tag_wakeup_all(hctx->tags, true); 162 + 163 + /* 164 + * If we are called because the queue has now been marked as 165 + * dying, we need to ensure that processes currently waiting on 166 + * the queue are notified as well. 167 + */ 168 + wake_up_all(&q->mq_freeze_wq); 152 169 } 153 170 154 171 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) ··· 277 258 ctx = alloc_data.ctx; 278 259 } 279 260 blk_mq_put_ctx(ctx); 280 - if (!rq) 261 + if (!rq) { 262 + blk_mq_queue_exit(q); 281 263 return ERR_PTR(-EWOULDBLOCK); 264 + } 282 265 return rq; 283 266 } 284 267 EXPORT_SYMBOL(blk_mq_alloc_request); ··· 404 383 } 405 384 EXPORT_SYMBOL(blk_mq_complete_request); 406 385 386 + int blk_mq_request_started(struct request *rq) 387 + { 388 + return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 389 + } 390 + EXPORT_SYMBOL_GPL(blk_mq_request_started); 391 + 407 392 void blk_mq_start_request(struct request *rq) 408 393 { 409 394 struct request_queue *q = rq->q; ··· 527 500 } 528 501 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 529 502 503 + void blk_mq_cancel_requeue_work(struct request_queue *q) 504 + { 505 + cancel_work_sync(&q->requeue_work); 506 + } 507 + EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); 508 + 530 509 void blk_mq_kick_requeue_list(struct request_queue *q) 531 510 { 532 511 kblockd_schedule_work(&q->requeue_work); 533 512 } 534 513 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 514 + 515 + void blk_mq_abort_requeue_list(struct request_queue *q) 516 + { 517 + unsigned long flags; 518 + LIST_HEAD(rq_list); 519 + 520 + spin_lock_irqsave(&q->requeue_lock, flags); 521 + list_splice_init(&q->requeue_list, &rq_list); 522 + spin_unlock_irqrestore(&q->requeue_lock, flags); 523 + 524 + while (!list_empty(&rq_list)) { 525 + struct request *rq; 526 + 527 + rq = list_first_entry(&rq_list, struct request, queuelist); 528 + list_del_init(&rq->queuelist); 529 + rq->errors = -EIO; 530 + blk_mq_end_request(rq, rq->errors); 531 + } 532 + } 533 + EXPORT_SYMBOL(blk_mq_abort_requeue_list); 535 534 536 535 static inline bool is_flush_request(struct request *rq, 537 536 struct blk_flush_queue *fq, unsigned int tag) ··· 619 566 break; 620 567 } 621 568 } 622 - 569 + 623 570 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 624 571 struct request *rq, void *priv, bool reserved) 625 572 { 626 573 struct blk_mq_timeout_data *data = priv; 627 574 628 - if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 575 + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 576 + /* 577 + * If a request wasn't started before the queue was 578 + * marked dying, kill it here or it'll go unnoticed. 579 + */ 580 + if (unlikely(blk_queue_dying(rq->q))) { 581 + rq->errors = -EIO; 582 + blk_mq_complete_request(rq); 583 + } 584 + return; 585 + } 586 + if (rq->cmd_flags & REQ_NO_TIMEOUT) 629 587 return; 630 588 631 589 if (time_after_eq(jiffies, rq->deadline)) { ··· 1665 1601 hctx->queue = q; 1666 1602 hctx->queue_num = hctx_idx; 1667 1603 hctx->flags = set->flags; 1668 - hctx->cmd_size = set->cmd_size; 1669 1604 1670 1605 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1671 1606 blk_mq_hctx_notify, hctx);
+1
block/blk-mq.h
··· 32 32 void blk_mq_clone_flush_request(struct request *flush_rq, 33 33 struct request *orig_rq); 34 34 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 35 + void blk_mq_wake_waiters(struct request_queue *q); 35 36 36 37 /* 37 38 * CPU hotplug helpers
+3
block/blk-timeout.c
··· 190 190 struct request_queue *q = req->q; 191 191 unsigned long expiry; 192 192 193 + if (req->cmd_flags & REQ_NO_TIMEOUT) 194 + return; 195 + 193 196 /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ 194 197 if (!q->mq_ops && !q->rq_timed_out_fn) 195 198 return;
+4 -2
drivers/Makefile
··· 50 50 obj-y += tty/ 51 51 obj-y += char/ 52 52 53 - # gpu/ comes after char for AGP vs DRM startup 53 + # iommu/ comes before gpu as gpu are using iommu controllers 54 + obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ 55 + 56 + # gpu/ comes after char for AGP vs DRM startup and after iommu 54 57 obj-y += gpu/ 55 58 56 59 obj-$(CONFIG_CONNECTOR) += connector/ ··· 144 141 145 142 obj-$(CONFIG_MAILBOX) += mailbox/ 146 143 obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ 147 - obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ 148 144 obj-$(CONFIG_REMOTEPROC) += remoteproc/ 149 145 obj-$(CONFIG_RPMSG) += rpmsg/ 150 146
+14 -11
drivers/acpi/acpi_processor.c
··· 170 170 acpi_status status; 171 171 int ret; 172 172 173 - if (pr->apic_id == -1) 173 + if (pr->phys_id == -1) 174 174 return -ENODEV; 175 175 176 176 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); ··· 180 180 cpu_maps_update_begin(); 181 181 cpu_hotplug_begin(); 182 182 183 - ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); 183 + ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id); 184 184 if (ret) 185 185 goto out; 186 186 187 187 ret = arch_register_cpu(pr->id); 188 188 if (ret) { 189 - acpi_unmap_lsapic(pr->id); 189 + acpi_unmap_cpu(pr->id); 190 190 goto out; 191 191 } 192 192 ··· 215 215 union acpi_object object = { 0 }; 216 216 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 217 217 struct acpi_processor *pr = acpi_driver_data(device); 218 - int apic_id, cpu_index, device_declaration = 0; 218 + int phys_id, cpu_index, device_declaration = 0; 219 219 acpi_status status = AE_OK; 220 220 static int cpu0_initialized; 221 221 unsigned long long value; ··· 262 262 pr->acpi_id = value; 263 263 } 264 264 265 - apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); 266 - if (apic_id < 0) 267 - acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); 268 - pr->apic_id = apic_id; 265 + phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); 266 + if (phys_id < 0) 267 + acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); 268 + pr->phys_id = phys_id; 269 269 270 - cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); 270 + cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id); 271 271 if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { 272 272 cpu0_initialized = 1; 273 - /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 273 + /* 274 + * Handle UP system running SMP kernel, with no CPU 275 + * entry in MADT 276 + */ 274 277 if ((cpu_index == -1) && (num_online_cpus() == 1)) 275 278 cpu_index = 0; 276 279 } ··· 461 458 462 459 /* Remove the CPU. */ 463 460 arch_unregister_cpu(pr->id); 464 - acpi_unmap_lsapic(pr->id); 461 + acpi_unmap_cpu(pr->id); 465 462 466 463 cpu_hotplug_done(); 467 464 cpu_maps_update_done();
+1 -1
drivers/acpi/device_pm.c
··· 257 257 258 258 device->power.state = ACPI_STATE_UNKNOWN; 259 259 if (!acpi_device_is_present(device)) 260 - return 0; 260 + return -ENXIO; 261 261 262 262 result = acpi_device_get_power(device, &state); 263 263 if (result)
+7 -4
drivers/acpi/int340x_thermal.c
··· 14 14 15 15 #include "internal.h" 16 16 17 - #define DO_ENUMERATION 0x01 17 + #define INT3401_DEVICE 0X01 18 18 static const struct acpi_device_id int340x_thermal_device_ids[] = { 19 - {"INT3400", DO_ENUMERATION }, 20 - {"INT3401"}, 19 + {"INT3400"}, 20 + {"INT3401", INT3401_DEVICE}, 21 21 {"INT3402"}, 22 22 {"INT3403"}, 23 23 {"INT3404"}, ··· 34 34 const struct acpi_device_id *id) 35 35 { 36 36 #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) 37 - if (id->driver_data == DO_ENUMERATION) 37 + acpi_create_platform_device(adev); 38 + #elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) 39 + /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ 40 + if (id->driver_data == INT3401_DEVICE) 38 41 acpi_create_platform_device(adev); 39 42 #endif 40 43 return 1;
+28 -28
drivers/acpi/processor_core.c
··· 69 69 unsigned long madt_end, entry; 70 70 static struct acpi_table_madt *madt; 71 71 static int read_madt; 72 - int apic_id = -1; 72 + int phys_id = -1; /* CPU hardware ID */ 73 73 74 74 if (!read_madt) { 75 75 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, ··· 79 79 } 80 80 81 81 if (!madt) 82 - return apic_id; 82 + return phys_id; 83 83 84 84 entry = (unsigned long)madt; 85 85 madt_end = entry + madt->header.length; ··· 91 91 struct acpi_subtable_header *header = 92 92 (struct acpi_subtable_header *)entry; 93 93 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 94 - if (!map_lapic_id(header, acpi_id, &apic_id)) 94 + if (!map_lapic_id(header, acpi_id, &phys_id)) 95 95 break; 96 96 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { 97 - if (!map_x2apic_id(header, type, acpi_id, &apic_id)) 97 + if (!map_x2apic_id(header, type, acpi_id, &phys_id)) 98 98 break; 99 99 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 100 - if (!map_lsapic_id(header, type, acpi_id, &apic_id)) 100 + if (!map_lsapic_id(header, type, acpi_id, &phys_id)) 101 101 break; 102 102 } 103 103 entry += header->length; 104 104 } 105 - return apic_id; 105 + return phys_id; 106 106 } 107 107 108 108 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) ··· 110 110 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 111 111 union acpi_object *obj; 112 112 struct acpi_subtable_header *header; 113 - int apic_id = -1; 113 + int phys_id = -1; 114 114 115 115 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 116 116 goto exit; ··· 126 126 127 127 header = (struct acpi_subtable_header *)obj->buffer.pointer; 128 128 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) 129 - map_lapic_id(header, acpi_id, &apic_id); 129 + map_lapic_id(header, acpi_id, &phys_id); 130 130 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) 131 - map_lsapic_id(header, type, acpi_id, &apic_id); 131 + map_lsapic_id(header, type, acpi_id, &phys_id); 132 132 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) 133 - map_x2apic_id(header, type, acpi_id, &apic_id); 133 + map_x2apic_id(header, type, acpi_id, &phys_id); 134 134 135 135 exit: 136 136 kfree(buffer.pointer); 137 - return apic_id; 137 + return phys_id; 138 138 } 139 139 140 - int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id) 140 + int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) 141 141 { 142 - int apic_id; 142 + int phys_id; 143 143 144 - apic_id = map_mat_entry(handle, type, acpi_id); 145 - if (apic_id == -1) 146 - apic_id = map_madt_entry(type, acpi_id); 144 + phys_id = map_mat_entry(handle, type, acpi_id); 145 + if (phys_id == -1) 146 + phys_id = map_madt_entry(type, acpi_id); 147 147 148 - return apic_id; 148 + return phys_id; 149 149 } 150 150 151 - int acpi_map_cpuid(int apic_id, u32 acpi_id) 151 + int acpi_map_cpuid(int phys_id, u32 acpi_id) 152 152 { 153 153 #ifdef CONFIG_SMP 154 154 int i; 155 155 #endif 156 156 157 - if (apic_id == -1) { 157 + if (phys_id == -1) { 158 158 /* 159 159 * On UP processor, there is no _MAT or MADT table. 160 - * So above apic_id is always set to -1. 160 + * So above phys_id is always set to -1. 161 161 * 162 162 * BIOS may define multiple CPU handles even for UP processor. 163 163 * For example, ··· 170 170 * Processor (CPU3, 0x03, 0x00000410, 0x06) {} 171 171 * } 172 172 * 173 - * Ignores apic_id and always returns 0 for the processor 173 + * Ignores phys_id and always returns 0 for the processor 174 174 * handle with acpi id 0 if nr_cpu_ids is 1. 175 175 * This should be the case if SMP tables are not found. 176 176 * Return -1 for other CPU's handle. ··· 178 178 if (nr_cpu_ids <= 1 && acpi_id == 0) 179 179 return acpi_id; 180 180 else 181 - return apic_id; 181 + return phys_id; 182 182 } 183 183 184 184 #ifdef CONFIG_SMP 185 185 for_each_possible_cpu(i) { 186 - if (cpu_physical_id(i) == apic_id) 186 + if (cpu_physical_id(i) == phys_id) 187 187 return i; 188 188 } 189 189 #else 190 190 /* In UP kernel, only processor 0 is valid */ 191 - if (apic_id == 0) 192 - return apic_id; 191 + if (phys_id == 0) 192 + return phys_id; 193 193 #endif 194 194 return -1; 195 195 } 196 196 197 197 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 198 198 { 199 - int apic_id; 199 + int phys_id; 200 200 201 - apic_id = acpi_get_apicid(handle, type, acpi_id); 201 + phys_id = acpi_get_phys_id(handle, type, acpi_id); 202 202 203 - return acpi_map_cpuid(apic_id, acpi_id); 203 + return acpi_map_cpuid(phys_id, acpi_id); 204 204 } 205 205 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
+8 -5
drivers/acpi/scan.c
··· 1001 1001 if (device->wakeup.flags.valid) 1002 1002 acpi_power_resources_list_free(&device->wakeup.resources); 1003 1003 1004 - if (!device->flags.power_manageable) 1004 + if (!device->power.flags.power_resources) 1005 1005 return; 1006 1006 1007 1007 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { ··· 1744 1744 device->power.flags.power_resources) 1745 1745 device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; 1746 1746 1747 - if (acpi_bus_init_power(device)) { 1748 - acpi_free_power_resources_lists(device); 1747 + if (acpi_bus_init_power(device)) 1749 1748 device->flags.power_manageable = 0; 1750 - } 1751 1749 } 1752 1750 1753 1751 static void acpi_bus_get_flags(struct acpi_device *device) ··· 2369 2371 /* Skip devices that are not present. */ 2370 2372 if (!acpi_device_is_present(device)) { 2371 2373 device->flags.visited = false; 2374 + device->flags.power_manageable = 0; 2372 2375 return; 2373 2376 } 2374 2377 if (device->handler) 2375 2378 goto ok; 2376 2379 2377 2380 if (!device->flags.initialized) { 2378 - acpi_bus_update_power(device, NULL); 2381 + device->flags.power_manageable = 2382 + device->power.states[ACPI_STATE_D0].flags.valid; 2383 + if (acpi_bus_init_power(device)) 2384 + device->flags.power_manageable = 0; 2385 + 2379 2386 device->flags.initialized = true; 2380 2387 } 2381 2388 device->flags.visited = false;
+10
drivers/acpi/video.c
··· 522 522 DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), 523 523 }, 524 524 }, 525 + 526 + { 527 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ 528 + .callback = video_disable_native_backlight, 529 + .ident = "Dell XPS15 L521X", 530 + .matches = { 531 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 532 + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), 533 + }, 534 + }, 525 535 {} 526 536 }; 527 537
+1 -1
drivers/block/null_blk.c
··· 530 530 goto out_cleanup_queues; 531 531 532 532 nullb->q = blk_mq_init_queue(&nullb->tag_set); 533 - if (!nullb->q) { 533 + if (IS_ERR(nullb->q)) { 534 534 rv = -ENOMEM; 535 535 goto out_cleanup_tags; 536 536 }
+126 -49
drivers/block/nvme-core.c
··· 215 215 cmd->fn = handler; 216 216 cmd->ctx = ctx; 217 217 cmd->aborted = 0; 218 + blk_mq_start_request(blk_mq_rq_from_pdu(cmd)); 218 219 } 219 220 220 221 /* Special values must be less than 0x1000 */ ··· 432 431 if (unlikely(status)) { 433 432 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 434 433 && (jiffies - req->start_time) < req->timeout) { 434 + unsigned long flags; 435 + 435 436 blk_mq_requeue_request(req); 436 - blk_mq_kick_requeue_list(req->q); 437 + spin_lock_irqsave(req->q->queue_lock, flags); 438 + if (!blk_queue_stopped(req->q)) 439 + blk_mq_kick_requeue_list(req->q); 440 + spin_unlock_irqrestore(req->q->queue_lock, flags); 437 441 return; 438 442 } 439 443 req->errors = nvme_error_status(status); ··· 670 664 } 671 665 } 672 666 673 - blk_mq_start_request(req); 674 - 675 667 nvme_set_info(cmd, iod, req_completion); 676 668 spin_lock_irq(&nvmeq->q_lock); 677 669 if (req->cmd_flags & REQ_DISCARD) ··· 839 835 if (IS_ERR(req)) 840 836 return PTR_ERR(req); 841 837 838 + req->cmd_flags |= REQ_NO_TIMEOUT; 842 839 cmd_info = blk_mq_rq_to_pdu(req); 843 840 nvme_set_info(cmd_info, req, async_req_completion); 844 841 ··· 1021 1016 struct nvme_command cmd; 1022 1017 1023 1018 if (!nvmeq->qid || cmd_rq->aborted) { 1019 + unsigned long flags; 1020 + 1021 + spin_lock_irqsave(&dev_list_lock, flags); 1024 1022 if (work_busy(&dev->reset_work)) 1025 - return; 1023 + goto out; 1026 1024 list_del_init(&dev->node); 1027 1025 dev_warn(&dev->pci_dev->dev, 1028 1026 "I/O %d QID %d timeout, reset controller\n", 1029 1027 req->tag, nvmeq->qid); 1030 1028 dev->reset_workfn = nvme_reset_failed_dev; 1031 1029 queue_work(nvme_workq, &dev->reset_work); 1030 + out: 1031 + spin_unlock_irqrestore(&dev_list_lock, flags); 1032 1032 return; 1033 1033 } 1034 1034 ··· 1074 1064 void *ctx; 1075 1065 nvme_completion_fn fn; 1076 1066 struct nvme_cmd_info *cmd; 1077 - static struct nvme_completion cqe = { 1078 - .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), 1079 - }; 1067 + struct nvme_completion cqe; 1068 + 1069 + if (!blk_mq_request_started(req)) 1070 + return; 1080 1071 1081 1072 cmd = blk_mq_rq_to_pdu(req); 1082 1073 1083 1074 if (cmd->ctx == CMD_CTX_CANCELLED) 1084 1075 return; 1076 + 1077 + if (blk_queue_dying(req->q)) 1078 + cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); 1079 + else 1080 + cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1081 + 1085 1082 1086 1083 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", 1087 1084 req->tag, nvmeq->qid); ··· 1101 1084 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1102 1085 struct nvme_queue *nvmeq = cmd->nvmeq; 1103 1086 1104 - dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1105 - nvmeq->qid); 1106 - if (nvmeq->dev->initialized) 1107 - nvme_abort_req(req); 1108 - 1109 1087 /* 1110 1088 * The aborted req will be completed on receiving the abort req. 1111 1089 * We enable the timer again. If hit twice, it'll cause a device reset, 1112 1090 * as the device then is in a faulty state. 1113 1091 */ 1114 - return BLK_EH_RESET_TIMER; 1092 + int ret = BLK_EH_RESET_TIMER; 1093 + 1094 + dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1095 + nvmeq->qid); 1096 + 1097 + spin_lock_irq(&nvmeq->q_lock); 1098 + if (!nvmeq->dev->initialized) { 1099 + /* 1100 + * Force cancelled command frees the request, which requires we 1101 + * return BLK_EH_NOT_HANDLED. 1102 + */ 1103 + nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); 1104 + ret = BLK_EH_NOT_HANDLED; 1105 + } else 1106 + nvme_abort_req(req); 1107 + spin_unlock_irq(&nvmeq->q_lock); 1108 + 1109 + return ret; 1115 1110 } 1116 1111 1117 1112 static void nvme_free_queue(struct nvme_queue *nvmeq) ··· 1160 1131 */ 1161 1132 static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1162 1133 { 1163 - int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1134 + int vector; 1164 1135 1165 1136 spin_lock_irq(&nvmeq->q_lock); 1137 + if (nvmeq->cq_vector == -1) { 1138 + spin_unlock_irq(&nvmeq->q_lock); 1139 + return 1; 1140 + } 1141 + vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1166 1142 nvmeq->dev->online_queues--; 1143 + nvmeq->cq_vector = -1; 1167 1144 spin_unlock_irq(&nvmeq->q_lock); 1168 1145 1169 1146 irq_set_affinity_hint(vector, NULL); ··· 1204 1169 adapter_delete_sq(dev, qid); 1205 1170 adapter_delete_cq(dev, qid); 1206 1171 } 1172 + if (!qid && dev->admin_q) 1173 + blk_mq_freeze_queue_start(dev->admin_q); 1207 1174 nvme_clear_queue(nvmeq); 1208 1175 } 1209 1176 1210 1177 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1211 - int depth, int vector) 1178 + int depth) 1212 1179 { 1213 1180 struct device *dmadev = &dev->pci_dev->dev; 1214 1181 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); ··· 1236 1199 nvmeq->cq_phase = 1; 1237 1200 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1238 1201 nvmeq->q_depth = depth; 1239 - nvmeq->cq_vector = vector; 1240 1202 nvmeq->qid = qid; 1241 1203 dev->queue_count++; 1242 1204 dev->queues[qid] = nvmeq; ··· 1280 1244 struct nvme_dev *dev = nvmeq->dev; 1281 1245 int result; 1282 1246 1247 + nvmeq->cq_vector = qid - 1; 1283 1248 result = adapter_alloc_cq(dev, qid, nvmeq); 1284 1249 if (result < 0) 1285 1250 return result; ··· 1392 1355 .timeout = nvme_timeout, 1393 1356 }; 1394 1357 1358 + static void nvme_dev_remove_admin(struct nvme_dev *dev) 1359 + { 1360 + if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { 1361 + blk_cleanup_queue(dev->admin_q); 1362 + blk_mq_free_tag_set(&dev->admin_tagset); 1363 + } 1364 + } 1365 + 1395 1366 static int nvme_alloc_admin_tags(struct nvme_dev *dev) 1396 1367 { 1397 1368 if (!dev->admin_q) { ··· 1415 1370 return -ENOMEM; 1416 1371 1417 1372 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); 1418 - if (!dev->admin_q) { 1373 + if (IS_ERR(dev->admin_q)) { 1419 1374 blk_mq_free_tag_set(&dev->admin_tagset); 1420 1375 return -ENOMEM; 1421 1376 } 1422 - } 1377 + if (!blk_get_queue(dev->admin_q)) { 1378 + nvme_dev_remove_admin(dev); 1379 + return -ENODEV; 1380 + } 1381 + } else 1382 + blk_mq_unfreeze_queue(dev->admin_q); 1423 1383 1424 1384 return 0; 1425 - } 1426 - 1427 - static void nvme_free_admin_tags(struct nvme_dev *dev) 1428 - { 1429 - if (dev->admin_q) 1430 - blk_mq_free_tag_set(&dev->admin_tagset); 1431 1385 } 1432 1386 1433 1387 static int nvme_configure_admin_queue(struct nvme_dev *dev) ··· 1460 1416 1461 1417 nvmeq = dev->queues[0]; 1462 1418 if (!nvmeq) { 1463 - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); 1419 + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1464 1420 if (!nvmeq) 1465 1421 return -ENOMEM; 1466 1422 } ··· 1483 1439 if (result) 1484 1440 goto free_nvmeq; 1485 1441 1486 - result = nvme_alloc_admin_tags(dev); 1442 + nvmeq->cq_vector = 0; 1443 + result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1487 1444 if (result) 1488 1445 goto free_nvmeq; 1489 1446 1490 - result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1491 - if (result) 1492 - goto free_tags; 1493 - 1494 1447 return result; 1495 1448 1496 - free_tags: 1497 - nvme_free_admin_tags(dev); 1498 1449 free_nvmeq: 1499 1450 nvme_free_queues(dev, 0); 1500 1451 return result; ··· 1983 1944 unsigned i; 1984 1945 1985 1946 for (i = dev->queue_count; i <= dev->max_qid; i++) 1986 - if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) 1947 + if (!nvme_alloc_queue(dev, i, dev->q_depth)) 1987 1948 break; 1988 1949 1989 1950 for (i = dev->online_queues; i <= dev->queue_count - 1; i++) ··· 2274 2235 break; 2275 2236 if (!schedule_timeout(ADMIN_TIMEOUT) || 2276 2237 fatal_signal_pending(current)) { 2238 + /* 2239 + * Disable the controller first since we can't trust it 2240 + * at this point, but leave the admin queue enabled 2241 + * until all queue deletion requests are flushed. 2242 + * FIXME: This may take a while if there are more h/w 2243 + * queues than admin tags. 2244 + */ 2277 2245 set_current_state(TASK_RUNNING); 2278 - 2279 2246 nvme_disable_ctrl(dev, readq(&dev->bar->cap)); 2280 - nvme_disable_queue(dev, 0); 2281 - 2282 - send_sig(SIGKILL, dq->worker->task, 1); 2247 + nvme_clear_queue(dev->queues[0]); 2283 2248 flush_kthread_worker(dq->worker); 2249 + nvme_disable_queue(dev, 0); 2284 2250 return; 2285 2251 } 2286 2252 } ··· 2362 2318 { 2363 2319 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 2364 2320 cmdinfo.work); 2365 - allow_signal(SIGKILL); 2366 2321 if (nvme_delete_sq(nvmeq)) 2367 2322 nvme_del_queue_end(nvmeq); 2368 2323 } ··· 2419 2376 kthread_stop(tmp); 2420 2377 } 2421 2378 2379 + static void nvme_freeze_queues(struct nvme_dev *dev) 2380 + { 2381 + struct nvme_ns *ns; 2382 + 2383 + list_for_each_entry(ns, &dev->namespaces, list) { 2384 + blk_mq_freeze_queue_start(ns->queue); 2385 + 2386 + spin_lock(ns->queue->queue_lock); 2387 + queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); 2388 + spin_unlock(ns->queue->queue_lock); 2389 + 2390 + blk_mq_cancel_requeue_work(ns->queue); 2391 + blk_mq_stop_hw_queues(ns->queue); 2392 + } 2393 + } 2394 + 2395 + static void nvme_unfreeze_queues(struct nvme_dev *dev) 2396 + { 2397 + struct nvme_ns *ns; 2398 + 2399 + list_for_each_entry(ns, &dev->namespaces, list) { 2400 + queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); 2401 + blk_mq_unfreeze_queue(ns->queue); 2402 + blk_mq_start_stopped_hw_queues(ns->queue, true); 2403 + blk_mq_kick_requeue_list(ns->queue); 2404 + } 2405 + } 2406 + 2422 2407 static void nvme_dev_shutdown(struct nvme_dev *dev) 2423 2408 { 2424 2409 int i; ··· 2455 2384 dev->initialized = 0; 2456 2385 nvme_dev_list_remove(dev); 2457 2386 2458 - if (dev->bar) 2387 + if (dev->bar) { 2388 + nvme_freeze_queues(dev); 2459 2389 csts = readl(&dev->bar->csts); 2390 + } 2460 2391 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 2461 2392 for (i = dev->queue_count - 1; i >= 0; i--) { 2462 2393 struct nvme_queue *nvmeq = dev->queues[i]; ··· 2473 2400 nvme_dev_unmap(dev); 2474 2401 } 2475 2402 2476 - static void nvme_dev_remove_admin(struct nvme_dev *dev) 2477 - { 2478 - if (dev->admin_q && !blk_queue_dying(dev->admin_q)) 2479 - blk_cleanup_queue(dev->admin_q); 2480 - } 2481 - 2482 2403 static void nvme_dev_remove(struct nvme_dev *dev) 2483 2404 { 2484 2405 struct nvme_ns *ns; ··· 2480 2413 list_for_each_entry(ns, &dev->namespaces, list) { 2481 2414 if (ns->disk->flags & GENHD_FL_UP) 2482 2415 del_gendisk(ns->disk); 2483 - if (!blk_queue_dying(ns->queue)) 2416 + if (!blk_queue_dying(ns->queue)) { 2417 + blk_mq_abort_requeue_list(ns->queue); 2484 2418 blk_cleanup_queue(ns->queue); 2419 + } 2485 2420 } 2486 2421 } 2487 2422 ··· 2564 2495 nvme_free_namespaces(dev); 2565 2496 nvme_release_instance(dev); 2566 2497 blk_mq_free_tag_set(&dev->tagset); 2498 + blk_put_queue(dev->admin_q); 2567 2499 kfree(dev->queues); 2568 2500 kfree(dev->entry); 2569 2501 kfree(dev); ··· 2661 2591 } 2662 2592 2663 2593 nvme_init_queue(dev->queues[0], 0); 2594 + result = nvme_alloc_admin_tags(dev); 2595 + if (result) 2596 + goto disable; 2664 2597 2665 2598 result = nvme_setup_io_queues(dev); 2666 2599 if (result) 2667 - goto disable; 2600 + goto free_tags; 2668 2601 2669 2602 nvme_set_irq_hints(dev); 2670 2603 2671 2604 return result; 2672 2605 2606 + free_tags: 2607 + nvme_dev_remove_admin(dev); 2673 2608 disable: 2674 2609 nvme_disable_queue(dev, 0); 2675 2610 nvme_dev_list_remove(dev); ··· 2714 2639 dev->reset_workfn = nvme_remove_disks; 2715 2640 queue_work(nvme_workq, &dev->reset_work); 2716 2641 spin_unlock(&dev_list_lock); 2642 + } else { 2643 + nvme_unfreeze_queues(dev); 2644 + nvme_set_irq_hints(dev); 2717 2645 } 2718 2646 dev->initialized = 1; 2719 2647 return 0; ··· 2854 2776 pci_set_drvdata(pdev, NULL); 2855 2777 flush_work(&dev->reset_work); 2856 2778 misc_deregister(&dev->miscdev); 2857 - nvme_dev_remove(dev); 2858 2779 nvme_dev_shutdown(dev); 2780 + nvme_dev_remove(dev); 2859 2781 nvme_dev_remove_admin(dev); 2860 2782 nvme_free_queues(dev, 0); 2861 - nvme_free_admin_tags(dev); 2862 2783 nvme_release_prp_pools(dev); 2863 2784 kref_put(&dev->kref, nvme_free_dev); 2864 2785 }
+1 -1
drivers/block/virtio_blk.c
··· 638 638 goto out_put_disk; 639 639 640 640 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); 641 - if (!q) { 641 + if (IS_ERR(q)) { 642 642 err = -ENOMEM; 643 643 goto out_free_tags; 644 644 }
+2 -1
drivers/char/ipmi/ipmi_ssif.c
··· 969 969 970 970 do_gettimeofday(&t); 971 971 pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", 972 - msg->data[0], msg->data[1], t.tv_sec, t.tv_usec); 972 + msg->data[0], msg->data[1], 973 + (long) t.tv_sec, (long) t.tv_usec); 973 974 } 974 975 } 975 976
+67 -89
drivers/gpio/gpio-dln2.c
··· 47 47 48 48 #define DLN2_GPIO_MAX_PINS 32 49 49 50 - struct dln2_irq_work { 51 - struct work_struct work; 52 - struct dln2_gpio *dln2; 53 - int pin; 54 - int type; 55 - }; 56 - 57 50 struct dln2_gpio { 58 51 struct platform_device *pdev; 59 52 struct gpio_chip gpio; ··· 57 64 */ 58 65 DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS); 59 66 60 - DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS); 61 - DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS); 62 - DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS); 63 - struct dln2_irq_work *irq_work; 67 + /* active IRQs - not synced to hardware */ 68 + DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS); 69 + /* active IRQS - synced to hardware */ 70 + DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS); 71 + int irq_type[DLN2_GPIO_MAX_PINS]; 72 + struct mutex irq_lock; 64 73 }; 65 74 66 75 struct dln2_gpio_pin { ··· 136 141 return !!ret; 137 142 } 138 143 139 - static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, 140 - unsigned int pin, int value) 144 + static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, 145 + unsigned int pin, int value) 141 146 { 142 147 struct dln2_gpio_pin_val req = { 143 148 .pin = cpu_to_le16(pin), 144 149 .value = value, 145 150 }; 146 151 147 - dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, 148 - sizeof(req)); 152 + return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, 153 + sizeof(req)); 149 154 } 150 155 151 156 #define DLN2_GPIO_DIRECTION_IN 0 ··· 262 267 static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 263 268 int value) 264 269 { 270 + struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio); 271 + int ret; 272 + 273 + ret = dln2_gpio_pin_set_out_val(dln2, offset, value); 274 + if (ret < 0) 275 + return ret; 276 + 265 277 return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT); 266 278 } 267 279 ··· 299 297 &req, sizeof(req)); 300 298 } 301 299 302 - static void dln2_irq_work(struct work_struct *w) 303 - { 304 - struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work); 305 - struct dln2_gpio *dln2 = iw->dln2; 306 - u8 type = iw->type & DLN2_GPIO_EVENT_MASK; 307 - 308 - if (test_bit(iw->pin, dln2->irqs_enabled)) 309 - dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0); 310 - else 311 - dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0); 312 - } 313 - 314 - static void dln2_irq_enable(struct irq_data *irqd) 300 + static void dln2_irq_unmask(struct irq_data *irqd) 315 301 { 316 302 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 317 303 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 318 304 int pin = irqd_to_hwirq(irqd); 319 305 320 - set_bit(pin, dln2->irqs_enabled); 321 - schedule_work(&dln2->irq_work[pin].work); 322 - } 323 - 324 - static void dln2_irq_disable(struct irq_data *irqd) 325 - { 326 - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 327 - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 328 - int pin = irqd_to_hwirq(irqd); 329 - 330 - clear_bit(pin, dln2->irqs_enabled); 331 - schedule_work(&dln2->irq_work[pin].work); 306 + set_bit(pin, dln2->unmasked_irqs); 332 307 } 333 308 334 309 static void dln2_irq_mask(struct irq_data *irqd) ··· 314 335 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 315 336 int pin = irqd_to_hwirq(irqd); 316 337 317 - set_bit(pin, dln2->irqs_masked); 318 - } 319 - 320 - static void dln2_irq_unmask(struct irq_data *irqd) 321 - { 322 - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 323 - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 324 - struct device *dev = dln2->gpio.dev; 325 - int pin = irqd_to_hwirq(irqd); 326 - 327 - if (test_and_clear_bit(pin, dln2->irqs_pending)) { 328 - int irq; 329 - 330 - irq = irq_find_mapping(dln2->gpio.irqdomain, pin); 331 - if (!irq) { 332 - dev_err(dev, "pin %d not mapped to IRQ\n", pin); 333 - return; 334 - } 335 - 336 - generic_handle_irq(irq); 337 - } 338 + clear_bit(pin, dln2->unmasked_irqs); 338 339 } 339 340 340 341 static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) ··· 325 366 326 367 switch (type) { 327 368 case IRQ_TYPE_LEVEL_HIGH: 328 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH; 369 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH; 329 370 break; 330 371 case IRQ_TYPE_LEVEL_LOW: 331 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW; 372 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW; 332 373 break; 333 374 case IRQ_TYPE_EDGE_BOTH: 334 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE; 375 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE; 335 376 break; 336 377 case IRQ_TYPE_EDGE_RISING: 337 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING; 378 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING; 338 379 break; 339 380 case IRQ_TYPE_EDGE_FALLING: 340 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING; 381 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING; 341 382 break; 342 383 default: 343 384 return -EINVAL; ··· 346 387 return 0; 347 388 } 348 389 390 + static void dln2_irq_bus_lock(struct irq_data *irqd) 391 + { 392 + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 393 + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 394 + 395 + mutex_lock(&dln2->irq_lock); 396 + } 397 + 398 + static void dln2_irq_bus_unlock(struct irq_data *irqd) 399 + { 400 + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 401 + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 402 + int pin = irqd_to_hwirq(irqd); 403 + int enabled, unmasked; 404 + unsigned type; 405 + int ret; 406 + 407 + enabled = test_bit(pin, dln2->enabled_irqs); 408 + unmasked = test_bit(pin, dln2->unmasked_irqs); 409 + 410 + if (enabled != unmasked) { 411 + if (unmasked) { 412 + type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK; 413 + set_bit(pin, dln2->enabled_irqs); 414 + } else { 415 + type = DLN2_GPIO_EVENT_NONE; 416 + clear_bit(pin, dln2->enabled_irqs); 417 + } 418 + 419 + ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0); 420 + if (ret) 421 + dev_err(dln2->gpio.dev, "failed to set event\n"); 422 + } 423 + 424 + mutex_unlock(&dln2->irq_lock); 425 + } 426 + 349 427 static struct irq_chip dln2_gpio_irqchip = { 350 428 .name = "dln2-irq", 351 - .irq_enable = dln2_irq_enable, 352 - .irq_disable = dln2_irq_disable, 353 429 .irq_mask = dln2_irq_mask, 354 430 .irq_unmask = dln2_irq_unmask, 355 431 .irq_set_type = dln2_irq_set_type, 432 + .irq_bus_lock = dln2_irq_bus_lock, 433 + .irq_bus_sync_unlock = dln2_irq_bus_unlock, 356 434 }; 357 435 358 436 static void dln2_gpio_event(struct platform_device *pdev, u16 echo, ··· 421 425 return; 422 426 } 423 427 424 - if (!test_bit(pin, dln2->irqs_enabled)) 425 - return; 426 - if (test_bit(pin, dln2->irqs_masked)) { 427 - set_bit(pin, dln2->irqs_pending); 428 - return; 429 - } 430 - 431 - switch (dln2->irq_work[pin].type) { 428 + switch (dln2->irq_type[pin]) { 432 429 case DLN2_GPIO_EVENT_CHANGE_RISING: 433 430 if (event->value) 434 431 generic_handle_irq(irq); ··· 440 451 struct dln2_gpio *dln2; 441 452 struct device *dev = &pdev->dev; 442 453 int pins; 443 - int i, ret; 454 + int ret; 444 455 445 456 pins = dln2_gpio_get_pin_count(pdev); 446 457 if (pins < 0) { ··· 456 467 if (!dln2) 457 468 return -ENOMEM; 458 469 459 - dln2->irq_work = devm_kcalloc(&pdev->dev, pins, 460 - sizeof(struct dln2_irq_work), GFP_KERNEL); 461 - if (!dln2->irq_work) 462 - return -ENOMEM; 463 - for (i = 0; i < pins; i++) { 464 - INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work); 465 - dln2->irq_work[i].pin = i; 466 - dln2->irq_work[i].dln2 = dln2; 467 - } 470 + mutex_init(&dln2->irq_lock); 468 471 469 472 dln2->pdev = pdev; 470 473 ··· 510 529 static int dln2_gpio_remove(struct platform_device *pdev) 511 530 { 512 531 struct dln2_gpio *dln2 = platform_get_drvdata(pdev); 513 - int i; 514 532 515 533 dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV); 516 - for (i = 0; i < dln2->gpio.ngpio; i++) 517 - flush_work(&dln2->irq_work[i].work); 518 534 gpiochip_remove(&dln2->gpio); 519 535 520 536 return 0;
+2 -1
drivers/gpio/gpio-grgpio.c
··· 441 441 err = gpiochip_add(gc); 442 442 if (err) { 443 443 dev_err(&ofdev->dev, "Could not add gpiochip\n"); 444 - irq_domain_remove(priv->domain); 444 + if (priv->domain) 445 + irq_domain_remove(priv->domain); 445 446 return err; 446 447 } 447 448
+1 -1
drivers/gpu/drm/Makefile
··· 37 37 obj-$(CONFIG_DRM_TTM) += ttm/ 38 38 obj-$(CONFIG_DRM_TDFX) += tdfx/ 39 39 obj-$(CONFIG_DRM_R128) += r128/ 40 + obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ 40 41 obj-$(CONFIG_DRM_RADEON)+= radeon/ 41 42 obj-$(CONFIG_DRM_MGA) += mga/ 42 43 obj-$(CONFIG_DRM_I810) += i810/ ··· 68 67 obj-y += i2c/ 69 68 obj-y += panel/ 70 69 obj-y += bridge/ 71 - obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
+169 -149
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 31 31 #include <uapi/linux/kfd_ioctl.h> 32 32 #include <linux/time.h> 33 33 #include <linux/mm.h> 34 - #include <linux/uaccess.h> 35 34 #include <uapi/asm-generic/mman-common.h> 36 35 #include <asm/processor.h> 37 36 #include "kfd_priv.h" ··· 126 127 return 0; 127 128 } 128 129 129 - static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 130 - void __user *arg) 130 + static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 131 + void *data) 131 132 { 132 - struct kfd_ioctl_get_version_args args; 133 + struct kfd_ioctl_get_version_args *args = data; 133 134 int err = 0; 134 135 135 - args.major_version = KFD_IOCTL_MAJOR_VERSION; 136 - args.minor_version = KFD_IOCTL_MINOR_VERSION; 137 - 138 - if (copy_to_user(arg, &args, sizeof(args))) 139 - err = -EFAULT; 136 + args->major_version = KFD_IOCTL_MAJOR_VERSION; 137 + args->minor_version = KFD_IOCTL_MINOR_VERSION; 140 138 141 139 return err; 142 140 } ··· 217 221 return 0; 218 222 } 219 223 220 - static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 221 - void __user *arg) 224 + static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 225 + void *data) 222 226 { 223 - struct kfd_ioctl_create_queue_args args; 227 + struct kfd_ioctl_create_queue_args *args = data; 224 228 struct kfd_dev *dev; 225 229 int err = 0; 226 230 unsigned int queue_id; ··· 229 233 230 234 memset(&q_properties, 0, sizeof(struct queue_properties)); 231 235 232 - if (copy_from_user(&args, arg, sizeof(args))) 233 - return -EFAULT; 234 - 235 236 pr_debug("kfd: creating queue ioctl\n"); 236 237 237 - err = set_queue_properties_from_user(&q_properties, &args); 238 + err = set_queue_properties_from_user(&q_properties, args); 238 239 if (err) 239 240 return err; 240 241 241 - dev = kfd_device_by_id(args.gpu_id); 242 + dev = kfd_device_by_id(args->gpu_id); 242 243 if (dev == NULL) 243 244 return -EINVAL; 244 245 ··· 243 250 244 251 pdd = kfd_bind_process_to_device(dev, p); 245 252 if (IS_ERR(pdd)) { 246 - err = PTR_ERR(pdd); 253 + err = -ESRCH; 247 254 goto err_bind_process; 248 255 } 249 256 ··· 256 263 if (err != 0) 257 264 goto err_create_queue; 258 265 259 - args.queue_id = queue_id; 266 + args->queue_id = queue_id; 260 267 261 268 /* Return gpu_id as doorbell offset for mmap usage */ 262 - args.doorbell_offset = args.gpu_id << PAGE_SHIFT; 263 - 264 - if (copy_to_user(arg, &args, sizeof(args))) { 265 - err = -EFAULT; 266 - goto err_copy_args_out; 267 - } 269 + args->doorbell_offset = args->gpu_id << PAGE_SHIFT; 268 270 269 271 mutex_unlock(&p->mutex); 270 272 271 - pr_debug("kfd: queue id %d was created successfully\n", args.queue_id); 273 + pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); 272 274 273 275 pr_debug("ring buffer address == 0x%016llX\n", 274 - args.ring_base_address); 276 + args->ring_base_address); 275 277 276 278 pr_debug("read ptr address == 0x%016llX\n", 277 - args.read_pointer_address); 279 + args->read_pointer_address); 278 280 279 281 pr_debug("write ptr address == 0x%016llX\n", 280 - args.write_pointer_address); 282 + args->write_pointer_address); 281 283 282 284 return 0; 283 285 284 - err_copy_args_out: 285 - pqm_destroy_queue(&p->pqm, queue_id); 286 286 err_create_queue: 287 287 err_bind_process: 288 288 mutex_unlock(&p->mutex); ··· 283 297 } 284 298 285 299 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, 286 - void __user *arg) 300 + void *data) 287 301 { 288 302 int retval; 289 - struct kfd_ioctl_destroy_queue_args args; 290 - 291 - if (copy_from_user(&args, arg, sizeof(args))) 292 - return -EFAULT; 303 + struct kfd_ioctl_destroy_queue_args *args = data; 293 304 294 305 pr_debug("kfd: destroying queue id %d for PASID %d\n", 295 - args.queue_id, 306 + args->queue_id, 296 307 p->pasid); 297 308 298 309 mutex_lock(&p->mutex); 299 310 300 - retval = pqm_destroy_queue(&p->pqm, args.queue_id); 311 + retval = pqm_destroy_queue(&p->pqm, args->queue_id); 301 312 302 313 mutex_unlock(&p->mutex); 303 314 return retval; 304 315 } 305 316 306 317 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, 307 - void __user *arg) 318 + void *data) 308 319 { 309 320 int retval; 310 - struct kfd_ioctl_update_queue_args args; 321 + struct kfd_ioctl_update_queue_args *args = data; 311 322 struct queue_properties properties; 312 323 313 - if (copy_from_user(&args, arg, sizeof(args))) 314 - return -EFAULT; 315 - 316 - if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { 324 + if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { 317 325 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 318 326 return -EINVAL; 319 327 } 320 328 321 - if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) { 329 + if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 322 330 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 323 331 return -EINVAL; 324 332 } 325 333 326 - if ((args.ring_base_address) && 334 + if ((args->ring_base_address) && 327 335 (!access_ok(VERIFY_WRITE, 328 - (const void __user *) args.ring_base_address, 336 + (const void __user *) args->ring_base_address, 329 337 sizeof(uint64_t)))) { 330 338 pr_err("kfd: can't access ring base address\n"); 331 339 return -EFAULT; 332 340 } 333 341 334 - if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) { 342 + if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 335 343 pr_err("kfd: ring size must be a power of 2 or 0\n"); 336 344 return -EINVAL; 337 345 } 338 346 339 - properties.queue_address = args.ring_base_address; 340 - properties.queue_size = args.ring_size; 341 - properties.queue_percent = args.queue_percentage; 342 - properties.priority = args.queue_priority; 347 + properties.queue_address = args->ring_base_address; 348 + properties.queue_size = args->ring_size; 349 + properties.queue_percent = args->queue_percentage; 350 + properties.priority = args->queue_priority; 343 351 344 352 pr_debug("kfd: updating queue id %d for PASID %d\n", 345 - args.queue_id, p->pasid); 353 + args->queue_id, p->pasid); 346 354 347 355 mutex_lock(&p->mutex); 348 356 349 - retval = pqm_update_queue(&p->pqm, args.queue_id, &properties); 357 + retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); 350 358 351 359 mutex_unlock(&p->mutex); 352 360 353 361 return retval; 354 362 } 355 363 356 - static long kfd_ioctl_set_memory_policy(struct file *filep, 357 - struct kfd_process *p, void __user *arg) 364 + static int kfd_ioctl_set_memory_policy(struct file *filep, 365 + struct kfd_process *p, void *data) 358 366 { 359 - struct kfd_ioctl_set_memory_policy_args args; 367 + struct kfd_ioctl_set_memory_policy_args *args = data; 360 368 struct kfd_dev *dev; 361 369 int err = 0; 362 370 struct kfd_process_device *pdd; 363 371 enum cache_policy default_policy, alternate_policy; 364 372 365 - if (copy_from_user(&args, arg, sizeof(args))) 366 - return -EFAULT; 367 - 368 - if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT 369 - && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 373 + if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT 374 + && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 370 375 return -EINVAL; 371 376 } 372 377 373 - if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 374 - && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 378 + if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 379 + && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 375 380 return -EINVAL; 376 381 } 377 382 378 - dev = kfd_device_by_id(args.gpu_id); 383 + dev = kfd_device_by_id(args->gpu_id); 379 384 if (dev == NULL) 380 385 return -EINVAL; 381 386 ··· 374 397 375 398 pdd = kfd_bind_process_to_device(dev, p); 376 399 if (IS_ERR(pdd)) { 377 - err = PTR_ERR(pdd); 400 + err = -ESRCH; 378 401 goto out; 379 402 } 380 403 381 - default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 404 + default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 382 405 ? cache_policy_coherent : cache_policy_noncoherent; 383 406 384 407 alternate_policy = 385 - (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 408 + (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 386 409 ? cache_policy_coherent : cache_policy_noncoherent; 387 410 388 411 if (!dev->dqm->set_cache_memory_policy(dev->dqm, 389 412 &pdd->qpd, 390 413 default_policy, 391 414 alternate_policy, 392 - (void __user *)args.alternate_aperture_base, 393 - args.alternate_aperture_size)) 415 + (void __user *)args->alternate_aperture_base, 416 + args->alternate_aperture_size)) 394 417 err = -EINVAL; 395 418 396 419 out: ··· 399 422 return err; 400 423 } 401 424 402 - static long kfd_ioctl_get_clock_counters(struct file *filep, 403 - struct kfd_process *p, void __user *arg) 425 + static int kfd_ioctl_get_clock_counters(struct file *filep, 426 + struct kfd_process *p, void *data) 404 427 { 405 - struct kfd_ioctl_get_clock_counters_args args; 428 + struct kfd_ioctl_get_clock_counters_args *args = data; 406 429 struct kfd_dev *dev; 407 430 struct timespec time; 408 431 409 - if (copy_from_user(&args, arg, sizeof(args))) 410 - return -EFAULT; 411 - 412 - dev = kfd_device_by_id(args.gpu_id); 432 + dev = kfd_device_by_id(args->gpu_id); 413 433 if (dev == NULL) 414 434 return -EINVAL; 415 435 416 436 /* Reading GPU clock counter from KGD */ 417 - args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); 437 + args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); 418 438 419 439 /* No access to rdtsc. Using raw monotonic time */ 420 440 getrawmonotonic(&time); 421 - args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time); 441 + args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time); 422 442 423 443 get_monotonic_boottime(&time); 424 - args.system_clock_counter = (uint64_t)timespec_to_ns(&time); 444 + args->system_clock_counter = (uint64_t)timespec_to_ns(&time); 425 445 426 446 /* Since the counter is in nano-seconds we use 1GHz frequency */ 427 - args.system_clock_freq = 1000000000; 428 - 429 - if (copy_to_user(arg, &args, sizeof(args))) 430 - return -EFAULT; 447 + args->system_clock_freq = 1000000000; 431 448 432 449 return 0; 433 450 } 434 451 435 452 436 453 static int kfd_ioctl_get_process_apertures(struct file *filp, 437 - struct kfd_process *p, void __user *arg) 454 + struct kfd_process *p, void *data) 438 455 { 439 - struct kfd_ioctl_get_process_apertures_args args; 456 + struct kfd_ioctl_get_process_apertures_args *args = data; 440 457 struct kfd_process_device_apertures *pAperture; 441 458 struct kfd_process_device *pdd; 442 459 443 460 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); 444 461 445 - if (copy_from_user(&args, arg, sizeof(args))) 446 - return -EFAULT; 447 - 448 - args.num_of_nodes = 0; 462 + args->num_of_nodes = 0; 449 463 450 464 mutex_lock(&p->mutex); 451 465 ··· 445 477 /* Run over all pdd of the process */ 446 478 pdd = kfd_get_first_process_device_data(p); 447 479 do { 448 - pAperture = &args.process_apertures[args.num_of_nodes]; 480 + pAperture = 481 + &args->process_apertures[args->num_of_nodes]; 449 482 pAperture->gpu_id = pdd->dev->id; 450 483 pAperture->lds_base = pdd->lds_base; 451 484 pAperture->lds_limit = pdd->lds_limit; ··· 456 487 pAperture->scratch_limit = pdd->scratch_limit; 457 488 458 489 dev_dbg(kfd_device, 459 - "node id %u\n", args.num_of_nodes); 490 + "node id %u\n", args->num_of_nodes); 460 491 dev_dbg(kfd_device, 461 492 "gpu id %u\n", pdd->dev->id); 462 493 dev_dbg(kfd_device, ··· 472 503 dev_dbg(kfd_device, 473 504 "scratch_limit %llX\n", pdd->scratch_limit); 474 505 475 - args.num_of_nodes++; 506 + args->num_of_nodes++; 476 507 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && 477 - (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 508 + (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 478 509 } 479 510 480 511 mutex_unlock(&p->mutex); 481 512 482 - if (copy_to_user(arg, &args, sizeof(args))) 483 - return -EFAULT; 484 - 485 513 return 0; 486 514 } 515 + 516 + #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ 517 + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} 518 + 519 + /** Ioctl table */ 520 + static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { 521 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, 522 + kfd_ioctl_get_version, 0), 523 + 524 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, 525 + kfd_ioctl_create_queue, 0), 526 + 527 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, 528 + kfd_ioctl_destroy_queue, 0), 529 + 530 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, 531 + kfd_ioctl_set_memory_policy, 0), 532 + 533 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, 534 + kfd_ioctl_get_clock_counters, 0), 535 + 536 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, 537 + kfd_ioctl_get_process_apertures, 0), 538 + 539 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, 540 + kfd_ioctl_update_queue, 0), 541 + }; 542 + 543 + #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) 487 544 488 545 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 489 546 { 490 547 struct kfd_process *process; 491 - long err = -EINVAL; 548 + amdkfd_ioctl_t *func; 549 + const struct amdkfd_ioctl_desc *ioctl = NULL; 550 + unsigned int nr = _IOC_NR(cmd); 551 + char stack_kdata[128]; 552 + char *kdata = NULL; 553 + unsigned int usize, asize; 554 + int retcode = -EINVAL; 492 555 493 - dev_dbg(kfd_device, 494 - "ioctl cmd 0x%x (#%d), arg 0x%lx\n", 495 - cmd, _IOC_NR(cmd), arg); 556 + if (nr >= AMDKFD_CORE_IOCTL_COUNT) 557 + goto err_i1; 558 + 559 + if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { 560 + u32 amdkfd_size; 561 + 562 + ioctl = &amdkfd_ioctls[nr]; 563 + 564 + amdkfd_size = _IOC_SIZE(ioctl->cmd); 565 + usize = asize = _IOC_SIZE(cmd); 566 + if (amdkfd_size > asize) 567 + asize = amdkfd_size; 568 + 569 + cmd = ioctl->cmd; 570 + } else 571 + goto err_i1; 572 + 573 + dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); 496 574 497 575 process = kfd_get_process(current); 498 - if (IS_ERR(process)) 499 - return PTR_ERR(process); 500 - 501 - switch (cmd) { 502 - case KFD_IOC_GET_VERSION: 503 - err = kfd_ioctl_get_version(filep, process, (void __user *)arg); 504 - break; 505 - case KFD_IOC_CREATE_QUEUE: 506 - err = kfd_ioctl_create_queue(filep, process, 507 - (void __user *)arg); 508 - break; 509 - 510 - case KFD_IOC_DESTROY_QUEUE: 511 - err = kfd_ioctl_destroy_queue(filep, process, 512 - (void __user *)arg); 513 - break; 514 - 515 - case KFD_IOC_SET_MEMORY_POLICY: 516 - err = kfd_ioctl_set_memory_policy(filep, process, 517 - (void __user *)arg); 518 - break; 519 - 520 - case KFD_IOC_GET_CLOCK_COUNTERS: 521 - err = kfd_ioctl_get_clock_counters(filep, process, 522 - (void __user *)arg); 523 - break; 524 - 525 - case KFD_IOC_GET_PROCESS_APERTURES: 526 - err = kfd_ioctl_get_process_apertures(filep, process, 527 - (void __user *)arg); 528 - break; 529 - 530 - case KFD_IOC_UPDATE_QUEUE: 531 - err = kfd_ioctl_update_queue(filep, process, 532 - (void __user *)arg); 533 - break; 534 - 535 - default: 536 - dev_err(kfd_device, 537 - "unknown ioctl cmd 0x%x, arg 0x%lx)\n", 538 - cmd, arg); 539 - err = -EINVAL; 540 - break; 576 + if (IS_ERR(process)) { 577 + dev_dbg(kfd_device, "no process\n"); 578 + goto err_i1; 541 579 } 542 580 543 - if (err < 0) 544 - dev_err(kfd_device, 545 - "ioctl error %ld for ioctl cmd 0x%x (#%d)\n", 546 - err, cmd, _IOC_NR(cmd)); 581 + /* Do not trust userspace, use our own definition */ 582 + func = ioctl->func; 547 583 548 - return err; 584 + if (unlikely(!func)) { 585 + dev_dbg(kfd_device, "no function\n"); 586 + retcode = -EINVAL; 587 + goto err_i1; 588 + } 589 + 590 + if (cmd & (IOC_IN | IOC_OUT)) { 591 + if (asize <= sizeof(stack_kdata)) { 592 + kdata = stack_kdata; 593 + } else { 594 + kdata = kmalloc(asize, GFP_KERNEL); 595 + if (!kdata) { 596 + retcode = -ENOMEM; 597 + goto err_i1; 598 + } 599 + } 600 + if (asize > usize) 601 + memset(kdata + usize, 0, asize - usize); 602 + } 603 + 604 + if (cmd & IOC_IN) { 605 + if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { 606 + retcode = -EFAULT; 607 + goto err_i1; 608 + } 609 + } else if (cmd & IOC_OUT) { 610 + memset(kdata, 0, usize); 611 + } 612 + 613 + retcode = func(filep, process, kdata); 614 + 615 + if (cmd & IOC_OUT) 616 + if (copy_to_user((void __user *)arg, kdata, usize) != 0) 617 + retcode = -EFAULT; 618 + 619 + err_i1: 620 + if (!ioctl) 621 + dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 622 + task_pid_nr(current), cmd, nr); 623 + 624 + if (kdata != stack_kdata) 625 + kfree(kdata); 626 + 627 + if (retcode) 628 + dev_dbg(kfd_device, "ret = %d\n", retcode); 629 + 630 + return retcode; 549 631 } 550 632 551 633 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+26 -2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 161 161 { 162 162 int bit = qpd->vmid - KFD_VMID_START_OFFSET; 163 163 164 + /* Release the vmid mapping */ 165 + set_pasid_vmid_mapping(dqm, 0, qpd->vmid); 166 + 164 167 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); 165 168 qpd->vmid = 0; 166 169 q->properties.vmid = 0; ··· 275 272 return retval; 276 273 } 277 274 275 + pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", 276 + q->pipe, 277 + q->queue); 278 + 279 + retval = mqd->load_mqd(mqd, q->mqd, q->pipe, 280 + q->queue, q->properties.write_ptr); 281 + if (retval != 0) { 282 + deallocate_hqd(dqm, q); 283 + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 284 + return retval; 285 + } 286 + 278 287 return 0; 279 288 } 280 289 ··· 335 320 { 336 321 int retval; 337 322 struct mqd_manager *mqd; 323 + bool prev_active = false; 338 324 339 325 BUG_ON(!dqm || !q || !q->mqd); 340 326 ··· 346 330 return -ENOMEM; 347 331 } 348 332 349 - retval = mqd->update_mqd(mqd, q->mqd, &q->properties); 350 333 if (q->properties.is_active == true) 334 + prev_active = true; 335 + 336 + /* 337 + * 338 + * check active state vs. the previous state 339 + * and modify counter accordingly 340 + */ 341 + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); 342 + if ((q->properties.is_active == true) && (prev_active == false)) 351 343 dqm->queue_count++; 352 - else 344 + else if ((q->properties.is_active == false) && (prev_active == true)) 353 345 dqm->queue_count--; 354 346 355 347 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
··· 184 184 uint32_t queue_id) 185 185 { 186 186 187 - return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address, 187 + return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, 188 188 pipe_id, queue_id); 189 189 190 190 }
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
··· 32 32 { 33 33 pasid_limit = max_num_of_processes; 34 34 35 - pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL); 35 + pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); 36 36 if (!pasid_bitmap) 37 37 return -ENOMEM; 38 38
+18
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 463 463 bool is_32bit_user_mode; 464 464 }; 465 465 466 + /** 467 + * Ioctl function type. 468 + * 469 + * \param filep pointer to file structure. 470 + * \param p amdkfd process pointer. 471 + * \param data pointer to arg that was copied from user. 472 + */ 473 + typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 474 + void *data); 475 + 476 + struct amdkfd_ioctl_desc { 477 + unsigned int cmd; 478 + int flags; 479 + amdkfd_ioctl_t *func; 480 + unsigned int cmd_drv; 481 + const char *name; 482 + }; 483 + 466 484 void kfd_process_create_wq(void); 467 485 void kfd_process_destroy_wq(void); 468 486 struct kfd_process *kfd_create_process(const struct task_struct *);
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 921 921 uint32_t i = 0; 922 922 923 923 list_for_each_entry(dev, &topology_device_list, list) { 924 - ret = kfd_build_sysfs_node_entry(dev, 0); 924 + ret = kfd_build_sysfs_node_entry(dev, i); 925 925 if (ret < 0) 926 926 return ret; 927 927 i++;
+1 -1
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
··· 183 183 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 184 184 uint32_t queue_id, uint32_t __user *wptr); 185 185 186 - bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address, 186 + bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, 187 187 uint32_t pipe_id, uint32_t queue_id); 188 188 189 189 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
-2
drivers/gpu/drm/i915/i915_drv.h
··· 1756 1756 */ 1757 1757 struct workqueue_struct *dp_wq; 1758 1758 1759 - uint32_t bios_vgacntr; 1760 - 1761 1759 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1762 1760 struct { 1763 1761 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
+7 -1
drivers/gpu/drm/i915/i915_gem.c
··· 1048 1048 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1049 1049 struct drm_file *file) 1050 1050 { 1051 + struct drm_i915_private *dev_priv = dev->dev_private; 1051 1052 struct drm_i915_gem_pwrite *args = data; 1052 1053 struct drm_i915_gem_object *obj; 1053 1054 int ret; ··· 1068 1067 return -EFAULT; 1069 1068 } 1070 1069 1070 + intel_runtime_pm_get(dev_priv); 1071 + 1071 1072 ret = i915_mutex_lock_interruptible(dev); 1072 1073 if (ret) 1073 - return ret; 1074 + goto put_rpm; 1074 1075 1075 1076 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1076 1077 if (&obj->base == NULL) { ··· 1124 1121 drm_gem_object_unreference(&obj->base); 1125 1122 unlock: 1126 1123 mutex_unlock(&dev->struct_mutex); 1124 + put_rpm: 1125 + intel_runtime_pm_put(dev_priv); 1126 + 1127 1127 return ret; 1128 1128 } 1129 1129
+2 -4
drivers/gpu/drm/i915/i915_irq.c
··· 3725 3725 if ((iir & flip_pending) == 0) 3726 3726 goto check_page_flip; 3727 3727 3728 - intel_prepare_page_flip(dev, plane); 3729 - 3730 3728 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3731 3729 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3732 3730 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence ··· 3734 3736 if (I915_READ16(ISR) & flip_pending) 3735 3737 goto check_page_flip; 3736 3738 3739 + intel_prepare_page_flip(dev, plane); 3737 3740 intel_finish_page_flip(dev, pipe); 3738 3741 return true; 3739 3742 ··· 3906 3907 if ((iir & flip_pending) == 0) 3907 3908 goto check_page_flip; 3908 3909 3909 - intel_prepare_page_flip(dev, plane); 3910 - 3911 3910 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3912 3911 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3913 3912 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence ··· 3915 3918 if (I915_READ(ISR) & flip_pending) 3916 3919 goto check_page_flip; 3917 3920 3921 + intel_prepare_page_flip(dev, plane); 3918 3922 intel_finish_page_flip(dev, pipe); 3919 3923 return true; 3920 3924
+1 -7
drivers/gpu/drm/i915/intel_display.c
··· 13057 13057 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 13058 13058 udelay(300); 13059 13059 13060 - /* 13061 - * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming 13062 - * from S3 without preserving (some of?) the other bits. 13063 - */ 13064 - I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); 13060 + I915_WRITE(vga_reg, VGA_DISP_DISABLE); 13065 13061 POSTING_READ(vga_reg); 13066 13062 } 13067 13063 ··· 13142 13146 13143 13147 intel_shared_dpll_init(dev); 13144 13148 13145 - /* save the BIOS value before clobbering it */ 13146 - dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); 13147 13149 /* Just disable it once at startup */ 13148 13150 i915_disable_vga(dev); 13149 13151 intel_setup_outputs(dev);
-27
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 615 615 vlv_power_sequencer_reset(dev_priv); 616 616 } 617 617 618 - static void check_power_well_state(struct drm_i915_private *dev_priv, 619 - struct i915_power_well *power_well) 620 - { 621 - bool enabled = power_well->ops->is_enabled(dev_priv, power_well); 622 - 623 - if (power_well->always_on || !i915.disable_power_well) { 624 - if (!enabled) 625 - goto mismatch; 626 - 627 - return; 628 - } 629 - 630 - if (enabled != (power_well->count > 0)) 631 - goto mismatch; 632 - 633 - return; 634 - 635 - mismatch: 636 - WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", 637 - power_well->name, power_well->always_on, enabled, 638 - power_well->count, i915.disable_power_well); 639 - } 640 - 641 618 /** 642 619 * intel_display_power_get - grab a power domain reference 643 620 * @dev_priv: i915 device instance ··· 646 669 power_well->ops->enable(dev_priv, power_well); 647 670 power_well->hw_enabled = true; 648 671 } 649 - 650 - check_power_well_state(dev_priv, power_well); 651 672 } 652 673 653 674 power_domains->domain_use_count[domain]++; ··· 684 709 power_well->hw_enabled = false; 685 710 power_well->ops->disable(dev_priv, power_well); 686 711 } 687 - 688 - check_power_well_state(dev_priv, power_well); 689 712 } 690 713 691 714 mutex_unlock(&power_domains->lock);
+2 -2
drivers/gpu/drm/nouveau/core/core/event.c
··· 26 26 void 27 27 nvkm_event_put(struct nvkm_event *event, u32 types, int index) 28 28 { 29 - BUG_ON(!spin_is_locked(&event->refs_lock)); 29 + assert_spin_locked(&event->refs_lock); 30 30 while (types) { 31 31 int type = __ffs(types); types &= ~(1 << type); 32 32 if (--event->refs[index * event->types_nr + type] == 0) { ··· 39 39 void 40 40 nvkm_event_get(struct nvkm_event *event, u32 types, int index) 41 41 { 42 - BUG_ON(!spin_is_locked(&event->refs_lock)); 42 + assert_spin_locked(&event->refs_lock); 43 43 while (types) { 44 44 int type = __ffs(types); types &= ~(1 << type); 45 45 if (++event->refs[index * event->types_nr + type] == 1) {
+1 -1
drivers/gpu/drm/nouveau/core/core/notify.c
··· 98 98 struct nvkm_event *event = notify->event; 99 99 unsigned long flags; 100 100 101 - BUG_ON(!spin_is_locked(&event->list_lock)); 101 + assert_spin_locked(&event->list_lock); 102 102 BUG_ON(size != notify->size); 103 103 104 104 spin_lock_irqsave(&event->refs_lock, flags);
+33
drivers/gpu/drm/nouveau/core/engine/device/nve0.c
··· 249 249 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 250 250 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass; 251 251 break; 252 + case 0x106: 253 + device->cname = "GK208B"; 254 + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 255 + device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 256 + device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; 257 + device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass; 258 + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 259 + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 260 + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 261 + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; 262 + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; 263 + device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 264 + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 265 + device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 266 + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; 267 + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 268 + device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 269 + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 270 + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 271 + device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; 272 + device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 273 + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; 274 + device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; 275 + device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 276 + device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; 277 + device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; 278 + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 279 + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 280 + device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 281 + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 282 + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 283 + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 284 + break; 252 285 case 0x108: 253 286 device->cname = "GK208"; 254 287 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+4 -2
drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
··· 44 44 pramin_fini(void *data) 45 45 { 46 46 struct priv *priv = data; 47 - nv_wr32(priv->bios, 0x001700, priv->bar0); 48 - kfree(priv); 47 + if (priv) { 48 + nv_wr32(priv->bios, 0x001700, priv->bar0); 49 + kfree(priv); 50 + } 49 51 } 50 52 51 53 static void *
+51 -14
drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
··· 24 24 25 25 #include "nv50.h" 26 26 27 + struct nvaa_ram_priv { 28 + struct nouveau_ram base; 29 + u64 poller_base; 30 + }; 31 + 27 32 static int 28 33 nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 29 34 struct nouveau_oclass *oclass, void *data, u32 datasize, 30 35 struct nouveau_object **pobject) 31 36 { 32 - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 33 - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 37 + u32 rsvd_head = ( 256 * 1024); /* vga memory */ 38 + u32 rsvd_tail = (1024 * 1024); /* vbios etc */ 34 39 struct nouveau_fb *pfb = nouveau_fb(parent); 35 - struct nouveau_ram *ram; 40 + struct nvaa_ram_priv *priv; 36 41 int ret; 37 42 38 - ret = nouveau_ram_create(parent, engine, oclass, &ram); 39 - *pobject = nv_object(ram); 43 + ret = nouveau_ram_create(parent, engine, oclass, &priv); 44 + *pobject = nv_object(priv); 40 45 if (ret) 41 46 return ret; 42 47 43 - ram->size = nv_rd32(pfb, 0x10020c); 44 - ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32); 48 + priv->base.type = NV_MEM_TYPE_STOLEN; 49 + priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; 50 + priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12; 45 51 46 - ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) - 47 - (rsvd_head + rsvd_tail), 1); 52 + rsvd_tail += 0x1000; 53 + priv->poller_base = priv->base.size - rsvd_tail; 54 + 55 + ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12, 56 + (priv->base.size - (rsvd_head + rsvd_tail)) >> 12, 57 + 1); 48 58 if (ret) 49 59 return ret; 50 60 51 - ram->type = NV_MEM_TYPE_STOLEN; 52 - ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; 53 - ram->get = nv50_ram_get; 54 - ram->put = nv50_ram_put; 61 + priv->base.get = nv50_ram_get; 62 + priv->base.put = nv50_ram_put; 63 + return 0; 64 + } 65 + 66 + static int 67 + nvaa_ram_init(struct nouveau_object *object) 68 + { 69 + struct nouveau_fb *pfb = nouveau_fb(object); 70 + struct nvaa_ram_priv *priv = (void *)object; 71 + int ret; 72 + u64 dniso, hostnb, flush; 73 + 74 + ret = nouveau_ram_init(&priv->base); 75 + if (ret) 76 + return ret; 77 + 78 + dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1; 79 + hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1; 80 + flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1; 81 + 82 + /* Enable NISO poller for various clients and set their associated 83 + * read address, only for MCP77/78 and MCP79/7A. (fd#25701) 84 + */ 85 + nv_wr32(pfb, 0x100c18, dniso); 86 + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001); 87 + nv_wr32(pfb, 0x100c1c, hostnb); 88 + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002); 89 + nv_wr32(pfb, 0x100c24, flush); 90 + nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000); 91 + 55 92 return 0; 56 93 } 57 94 ··· 97 60 .ofuncs = &(struct nouveau_ofuncs) { 98 61 .ctor = nvaa_ram_ctor, 99 62 .dtor = _nouveau_ram_dtor, 100 - .init = _nouveau_ram_init, 63 + .init = nvaa_ram_init, 101 64 .fini = _nouveau_ram_fini, 102 65 }, 103 66 };
-8
drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
··· 24 24 25 25 #include "nv04.h" 26 26 27 - static void 28 - nv4c_mc_msi_rearm(struct nouveau_mc *pmc) 29 - { 30 - struct nv04_mc_priv *priv = (void *)pmc; 31 - nv_wr08(priv, 0x088050, 0xff); 32 - } 33 - 34 27 struct nouveau_oclass * 35 28 nv4c_mc_oclass = &(struct nouveau_mc_oclass) { 36 29 .base.handle = NV_SUBDEV(MC, 0x4c), ··· 34 41 .fini = _nouveau_mc_fini, 35 42 }, 36 43 .intr = nv04_mc_intr, 37 - .msi_rearm = nv4c_mc_msi_rearm, 38 44 }.base;
+3 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1572 1572 * so use the DMA API for them. 1573 1573 */ 1574 1574 if (!nv_device_is_cpu_coherent(device) && 1575 - ttm->caching_state == tt_uncached) 1575 + ttm->caching_state == tt_uncached) { 1576 1576 ttm_dma_unpopulate(ttm_dma, dev->dev); 1577 + return; 1578 + } 1577 1579 1578 1580 #if __OS_HAS_AGP 1579 1581 if (drm->agp.stat == ENABLED) {
+31 -6
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 36 36 nouveau_gem_object_del(struct drm_gem_object *gem) 37 37 { 38 38 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 39 + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 39 40 struct ttm_buffer_object *bo = &nvbo->bo; 41 + struct device *dev = drm->dev->dev; 42 + int ret; 43 + 44 + ret = pm_runtime_get_sync(dev); 45 + if (WARN_ON(ret < 0 && ret != -EACCES)) 46 + return; 40 47 41 48 if (gem->import_attach) 42 49 drm_prime_gem_destroy(gem, nvbo->bo.sg); ··· 53 46 /* reset filp so nouveau_bo_del_ttm() can test for it */ 54 47 gem->filp = NULL; 55 48 ttm_bo_unref(&bo); 49 + 50 + pm_runtime_mark_last_busy(dev); 51 + pm_runtime_put_autosuspend(dev); 56 52 } 57 53 58 54 int ··· 63 53 { 64 54 struct nouveau_cli *cli = nouveau_cli(file_priv); 65 55 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 56 + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 66 57 struct nouveau_vma *vma; 58 + struct device *dev = drm->dev->dev; 67 59 int ret; 68 60 69 61 if (!cli->vm) ··· 83 71 goto out; 84 72 } 85 73 86 - ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); 87 - if (ret) { 88 - kfree(vma); 74 + ret = pm_runtime_get_sync(dev); 75 + if (ret < 0 && ret != -EACCES) 89 76 goto out; 90 - } 77 + 78 + ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); 79 + if (ret) 80 + kfree(vma); 81 + 82 + pm_runtime_mark_last_busy(dev); 83 + pm_runtime_put_autosuspend(dev); 91 84 } else { 92 85 vma->refcount++; 93 86 } ··· 146 129 { 147 130 struct nouveau_cli *cli = nouveau_cli(file_priv); 148 131 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 132 + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 133 + struct device *dev = drm->dev->dev; 149 134 struct nouveau_vma *vma; 150 135 int ret; 151 136 ··· 160 141 161 142 vma = nouveau_bo_vma_find(nvbo, cli->vm); 162 143 if (vma) { 163 - if (--vma->refcount == 0) 164 - nouveau_gem_object_unmap(nvbo, vma); 144 + if (--vma->refcount == 0) { 145 + ret = pm_runtime_get_sync(dev); 146 + if (!WARN_ON(ret < 0 && ret != -EACCES)) { 147 + nouveau_gem_object_unmap(nvbo, vma); 148 + pm_runtime_mark_last_busy(dev); 149 + pm_runtime_put_autosuspend(dev); 150 + } 151 + } 165 152 } 166 153 ttm_bo_unreserve(&nvbo->bo); 167 154 }
+4 -4
drivers/gpu/drm/radeon/atombios_crtc.c
··· 1851 1851 return pll; 1852 1852 } 1853 1853 /* otherwise, pick one of the plls */ 1854 - if ((rdev->family == CHIP_KAVERI) || 1855 - (rdev->family == CHIP_KABINI) || 1854 + if ((rdev->family == CHIP_KABINI) || 1856 1855 (rdev->family == CHIP_MULLINS)) { 1857 - /* KB/KV/ML has PPLL1 and PPLL2 */ 1856 + /* KB/ML has PPLL1 and PPLL2 */ 1858 1857 pll_in_use = radeon_get_pll_use_mask(crtc); 1859 1858 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1860 1859 return ATOM_PPLL2; ··· 1862 1863 DRM_ERROR("unable to allocate a PPLL\n"); 1863 1864 return ATOM_PPLL_INVALID; 1864 1865 } else { 1865 - /* CI has PPLL0, PPLL1, and PPLL2 */ 1866 + /* CI/KV has PPLL0, PPLL1, and PPLL2 */ 1866 1867 pll_in_use = radeon_get_pll_use_mask(crtc); 1867 1868 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1868 1869 return ATOM_PPLL2; ··· 2154 2155 case ATOM_PPLL0: 2155 2156 /* disable the ppll */ 2156 2157 if ((rdev->family == CHIP_ARUBA) || 2158 + (rdev->family == CHIP_KAVERI) || 2157 2159 (rdev->family == CHIP_BONAIRE) || 2158 2160 (rdev->family == CHIP_HAWAII)) 2159 2161 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+4
drivers/gpu/drm/radeon/atombios_dp.c
··· 492 492 struct radeon_connector_atom_dig *dig_connector; 493 493 int dp_clock; 494 494 495 + if ((mode->clock > 340000) && 496 + (!radeon_connector_is_dp12_capable(connector))) 497 + return MODE_CLOCK_HIGH; 498 + 495 499 if (!radeon_connector->con_priv) 496 500 return MODE_CLOCK_HIGH; 497 501 dig_connector = radeon_connector->con_priv;
+2
drivers/gpu/drm/radeon/cikd.h
··· 2156 2156 #define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu 2157 2157 #define ATC_VM_APERTURE1_LOW_ADDR 0x3304u 2158 2158 2159 + #define IH_VMID_0_LUT 0x3D40u 2160 + 2159 2161 #endif
+1 -1
drivers/gpu/drm/radeon/dce3_1_afmt.c
··· 103 103 } 104 104 105 105 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 106 - if (sad_count < 0) { 106 + if (sad_count <= 0) { 107 107 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 108 108 return; 109 109 }
+4 -6
drivers/gpu/drm/radeon/kv_dpm.c
··· 2745 2745 pi->enable_auto_thermal_throttling = true; 2746 2746 pi->disable_nb_ps3_in_battery = false; 2747 2747 if (radeon_bapm == -1) { 2748 - /* There are stability issues reported on with 2749 - * bapm enabled on an asrock system. 2750 - */ 2751 - if (rdev->pdev->subsystem_vendor == 0x1849) 2752 - pi->bapm_enable = false; 2753 - else 2748 + /* only enable bapm on KB, ML by default */ 2749 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2754 2750 pi->bapm_enable = true; 2751 + else 2752 + pi->bapm_enable = false; 2755 2753 } else if (radeon_bapm == 0) { 2756 2754 pi->bapm_enable = false; 2757 2755 } else {
+20 -3
drivers/gpu/drm/radeon/radeon_kfd.c
··· 72 72 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 73 73 uint32_t queue_id, uint32_t __user *wptr); 74 74 75 - static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, 75 + static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 76 76 uint32_t pipe_id, uint32_t queue_id); 77 77 78 78 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, ··· 92 92 .init_memory = kgd_init_memory, 93 93 .init_pipeline = kgd_init_pipeline, 94 94 .hqd_load = kgd_hqd_load, 95 - .hqd_is_occupies = kgd_hqd_is_occupies, 95 + .hqd_is_occupied = kgd_hqd_is_occupied, 96 96 .hqd_destroy = kgd_hqd_destroy, 97 97 .get_fw_version = get_fw_version 98 98 }; ··· 101 101 102 102 bool radeon_kfd_init(void) 103 103 { 104 + #if defined(CONFIG_HSA_AMD_MODULE) 104 105 bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*, 105 106 const struct kgd2kfd_calls**); 106 107 ··· 118 117 } 119 118 120 119 return true; 120 + #elif defined(CONFIG_HSA_AMD) 121 + if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) { 122 + kgd2kfd = NULL; 123 + 124 + return false; 125 + } 126 + 127 + return true; 128 + #else 129 + return false; 130 + #endif 121 131 } 122 132 123 133 void radeon_kfd_fini(void) ··· 390 378 cpu_relax(); 391 379 write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); 392 380 381 + /* Mapping vmid to pasid also for IH block */ 382 + write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t), 383 + pasid_mapping); 384 + 393 385 return 0; 394 386 } 395 387 ··· 533 517 return 0; 534 518 } 535 519 536 - static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, 520 + static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 537 521 uint32_t pipe_id, uint32_t queue_id) 538 522 { 539 523 uint32_t act; ··· 572 556 if (timeout == 0) { 573 557 pr_err("kfd: cp queue preemption time out (%dms)\n", 574 558 temp); 559 + release_queue(kgd); 575 560 return -ETIME; 576 561 } 577 562 msleep(20);
+1 -1
drivers/gpu/drm/radeon/radeon_state.c
··· 1703 1703 u32 format; 1704 1704 u32 *buffer; 1705 1705 const u8 __user *data; 1706 - int size, dwords, tex_width, blit_width, spitch; 1706 + unsigned int size, dwords, tex_width, blit_width, spitch; 1707 1707 u32 height; 1708 1708 int i; 1709 1709 u32 texpitch, microtile;
+2 -1
drivers/hid/Kconfig
··· 27 27 28 28 config HID_BATTERY_STRENGTH 29 29 bool "Battery level reporting for HID devices" 30 - depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY 30 + depends on HID 31 + select POWER_SUPPLY 31 32 default n 32 33 ---help--- 33 34 This option adds support of reporting battery strength (for HID devices
+1
drivers/hid/hid-core.c
··· 1805 1805 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1806 1806 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, 1807 1807 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, 1808 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, 1808 1809 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 1809 1810 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1810 1811 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+1
drivers/hid/hid-ids.h
··· 526 526 #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 527 527 #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 528 528 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 529 + #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a 529 530 #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 530 531 531 532 #define USB_VENDOR_ID_LABTEC 0x1020
+3
drivers/hid/hid-input.c
··· 312 312 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), 313 313 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 314 314 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 315 + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), 316 + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 317 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 315 318 USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), 316 319 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 317 320 {}
+4
drivers/hid/hid-kye.c
··· 323 323 } 324 324 break; 325 325 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: 326 + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: 326 327 if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { 327 328 rdesc = mousepen_i608x_rdesc_fixed; 328 329 *rsize = sizeof(mousepen_i608x_rdesc_fixed); ··· 416 415 switch (id->product) { 417 416 case USB_DEVICE_ID_KYE_EASYPEN_I405X: 418 417 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: 418 + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: 419 419 case USB_DEVICE_ID_KYE_EASYPEN_M610X: 420 420 ret = kye_tablet_enable(hdev); 421 421 if (ret) { ··· 447 445 USB_DEVICE_ID_KYE_EASYPEN_I405X) }, 448 446 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 449 447 USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, 448 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 449 + USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, 450 450 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 451 451 USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 452 452 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+15 -1
drivers/hid/hid-logitech-dj.c
··· 962 962 963 963 switch (data[0]) { 964 964 case REPORT_ID_DJ_SHORT: 965 + if (size != DJREPORT_SHORT_LENGTH) { 966 + dev_err(&hdev->dev, "DJ report of bad size (%d)", size); 967 + return false; 968 + } 965 969 return logi_dj_dj_event(hdev, report, data, size); 966 970 case REPORT_ID_HIDPP_SHORT: 967 - /* intentional fallthrough */ 971 + if (size != HIDPP_REPORT_SHORT_LENGTH) { 972 + dev_err(&hdev->dev, 973 + "Short HID++ report of bad size (%d)", size); 974 + return false; 975 + } 976 + return logi_dj_hidpp_event(hdev, report, data, size); 968 977 case REPORT_ID_HIDPP_LONG: 978 + if (size != HIDPP_REPORT_LONG_LENGTH) { 979 + dev_err(&hdev->dev, 980 + "Long HID++ report of bad size (%d)", size); 981 + return false; 982 + } 969 983 return logi_dj_hidpp_event(hdev, report, data, size); 970 984 } 971 985
+41
drivers/hid/hid-logitech-hidpp.c
··· 282 282 (report->rap.sub_id == 0x41); 283 283 } 284 284 285 + /** 286 + * hidpp_prefix_name() prefixes the current given name with "Logitech ". 287 + */ 288 + static void hidpp_prefix_name(char **name, int name_length) 289 + { 290 + #define PREFIX_LENGTH 9 /* "Logitech " */ 291 + 292 + int new_length; 293 + char *new_name; 294 + 295 + if (name_length > PREFIX_LENGTH && 296 + strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0) 297 + /* The prefix has is already in the name */ 298 + return; 299 + 300 + new_length = PREFIX_LENGTH + name_length; 301 + new_name = kzalloc(new_length, GFP_KERNEL); 302 + if (!new_name) 303 + return; 304 + 305 + snprintf(new_name, new_length, "Logitech %s", *name); 306 + 307 + kfree(*name); 308 + 309 + *name = new_name; 310 + } 311 + 285 312 /* -------------------------------------------------------------------------- */ 286 313 /* HIDP++ 1.0 commands */ 287 314 /* -------------------------------------------------------------------------- */ ··· 348 321 return NULL; 349 322 350 323 memcpy(name, &response.rap.params[2], len); 324 + 325 + /* include the terminating '\0' */ 326 + hidpp_prefix_name(&name, len + 1); 327 + 351 328 return name; 352 329 } 353 330 ··· 528 497 } 529 498 index += ret; 530 499 } 500 + 501 + /* include the terminating '\0' */ 502 + hidpp_prefix_name(&name, __name_length + 1); 531 503 532 504 return name; 533 505 } ··· 828 794 829 795 switch (data[0]) { 830 796 case 0x02: 797 + if (size < 2) { 798 + hid_err(hdev, "Received HID report of bad size (%d)", 799 + size); 800 + return 1; 801 + } 831 802 if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { 832 803 input_event(wd->input, EV_KEY, BTN_LEFT, 833 804 !!(data[1] & 0x01)); 834 805 input_event(wd->input, EV_KEY, BTN_RIGHT, 835 806 !!(data[1] & 0x02)); 836 807 input_sync(wd->input); 808 + return 0; 837 809 } else { 838 810 if (size < 21) 839 811 return 1; 840 812 return wtp_mouse_raw_xy_event(hidpp, &data[7]); 841 813 } 842 814 case REPORT_ID_HIDPP_LONG: 815 + /* size is already checked in hidpp_raw_event. */ 843 816 if ((report->fap.feature_index != wd->mt_feature_index) || 844 817 (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) 845 818 return 1;
+6 -2
drivers/hid/hid-roccat-pyra.c
··· 35 35 static void profile_activated(struct pyra_device *pyra, 36 36 unsigned int new_profile) 37 37 { 38 + if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) 39 + return; 38 40 pyra->actual_profile = new_profile; 39 41 pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; 40 42 } ··· 259 257 if (off != 0 || count != PYRA_SIZE_SETTINGS) 260 258 return -EINVAL; 261 259 262 - mutex_lock(&pyra->pyra_lock); 263 - 264 260 settings = (struct pyra_settings const *)buf; 261 + if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) 262 + return -EINVAL; 263 + 264 + mutex_lock(&pyra->pyra_lock); 265 265 266 266 retval = pyra_set_settings(usb_dev, settings); 267 267 if (retval) {
-5
drivers/hid/i2c-hid/i2c-hid.c
··· 706 706 707 707 static void i2c_hid_stop(struct hid_device *hid) 708 708 { 709 - struct i2c_client *client = hid->driver_data; 710 - struct i2c_hid *ihid = i2c_get_clientdata(client); 711 - 712 709 hid->claimed = 0; 713 - 714 - i2c_hid_free_buffers(ihid); 715 710 } 716 711 717 712 static int i2c_hid_open(struct hid_device *hid)
+1
drivers/hid/usbhid/hid-quirks.c
··· 124 124 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, 125 125 { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, 126 126 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, 127 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT }, 127 128 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, 128 129 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, 129 130 { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
+4 -8
drivers/iommu/intel-iommu.c
··· 4029 4029 if (action != BUS_NOTIFY_REMOVED_DEVICE) 4030 4030 return 0; 4031 4031 4032 - /* 4033 - * If the device is still attached to a device driver we can't 4034 - * tear down the domain yet as DMA mappings may still be in use. 4035 - * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that. 4036 - */ 4037 - if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL) 4038 - return 0; 4039 - 4040 4032 domain = find_domain(dev); 4041 4033 if (!domain) 4042 4034 return 0; ··· 4420 4428 domain_remove_one_dev_info(old_domain, dev); 4421 4429 else 4422 4430 domain_remove_dev_info(old_domain); 4431 + 4432 + if (!domain_type_is_vm_or_si(old_domain) && 4433 + list_empty(&old_domain->devices)) 4434 + domain_exit(old_domain); 4423 4435 } 4424 4436 } 4425 4437
+3 -3
drivers/iommu/ipmmu-vmsa.c
··· 558 558 559 559 static u64 ipmmu_page_prot(unsigned int prot, u64 type) 560 560 { 561 - u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF 561 + u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF 562 562 | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV 563 563 | ARM_VMSA_PTE_NS | type; 564 564 ··· 568 568 if (prot & IOMMU_CACHE) 569 569 pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; 570 570 571 - if (prot & IOMMU_EXEC) 572 - pgprot &= ~ARM_VMSA_PTE_XN; 571 + if (prot & IOMMU_NOEXEC) 572 + pgprot |= ARM_VMSA_PTE_XN; 573 573 else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) 574 574 /* If no access create a faulting entry to avoid TLB fills. */ 575 575 pgprot &= ~ARM_VMSA_PTE_PAGE;
-1
drivers/iommu/rockchip-iommu.c
··· 1009 1009 .remove = rk_iommu_remove, 1010 1010 .driver = { 1011 1011 .name = "rk_iommu", 1012 - .owner = THIS_MODULE, 1013 1012 .of_match_table = of_match_ptr(rk_iommu_dt_ids), 1014 1013 }, 1015 1014 };
+1 -1
drivers/isdn/hardware/eicon/message.c
··· 4880 4880 byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ 4881 4881 byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; 4882 4882 byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; 4883 - byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; 4883 + byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00"; 4884 4884 byte force_mt_info = false; 4885 4885 byte dir; 4886 4886 dword d;
+6 -6
drivers/leds/leds-netxbig.c
··· 330 330 led_dat->sata = 0; 331 331 led_dat->cdev.brightness = LED_OFF; 332 332 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 333 - /* 334 - * If available, expose the SATA activity blink capability through 335 - * a "sata" sysfs attribute. 336 - */ 337 - if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 338 - led_dat->cdev.groups = netxbig_led_groups; 339 333 led_dat->mode_addr = template->mode_addr; 340 334 led_dat->mode_val = template->mode_val; 341 335 led_dat->bright_addr = template->bright_addr; 342 336 led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; 343 337 led_dat->timer = pdata->timer; 344 338 led_dat->num_timer = pdata->num_timer; 339 + /* 340 + * If available, expose the SATA activity blink capability through 341 + * a "sata" sysfs attribute. 342 + */ 343 + if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 344 + led_dat->cdev.groups = netxbig_led_groups; 345 345 346 346 return led_classdev_register(&pdev->dev, &led_dat->cdev); 347 347 }
+63 -19
drivers/misc/cxl/context.c
··· 100 100 return 0; 101 101 } 102 102 103 + static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 104 + { 105 + struct cxl_context *ctx = vma->vm_file->private_data; 106 + unsigned long address = (unsigned long)vmf->virtual_address; 107 + u64 area, offset; 108 + 109 + offset = vmf->pgoff << PAGE_SHIFT; 110 + 111 + pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", 112 + __func__, ctx->pe, address, offset); 113 + 114 + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 115 + area = ctx->afu->psn_phys; 116 + if (offset > ctx->afu->adapter->ps_size) 117 + return VM_FAULT_SIGBUS; 118 + } else { 119 + area = ctx->psn_phys; 120 + if (offset > ctx->psn_size) 121 + return VM_FAULT_SIGBUS; 122 + } 123 + 124 + mutex_lock(&ctx->status_mutex); 125 + 126 + if (ctx->status != STARTED) { 127 + mutex_unlock(&ctx->status_mutex); 128 + pr_devel("%s: Context not started, failing problem state access\n", __func__); 129 + return VM_FAULT_SIGBUS; 130 + } 131 + 132 + vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); 133 + 134 + mutex_unlock(&ctx->status_mutex); 135 + 136 + return VM_FAULT_NOPAGE; 137 + } 138 + 139 + static const struct vm_operations_struct cxl_mmap_vmops = { 140 + .fault = cxl_mmap_fault, 141 + }; 142 + 103 143 /* 104 144 * Map a per-context mmio space into the given vma. 105 145 */ ··· 148 108 u64 len = vma->vm_end - vma->vm_start; 149 109 len = min(len, ctx->psn_size); 150 110 151 - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 152 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 153 - return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); 154 - } 111 + if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { 112 + /* make sure there is a valid per process space for this AFU */ 113 + if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { 114 + pr_devel("AFU doesn't support mmio space\n"); 115 + return -EINVAL; 116 + } 155 117 156 - /* make sure there is a valid per process space for this AFU */ 157 - if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { 158 - pr_devel("AFU doesn't support mmio space\n"); 159 - return -EINVAL; 118 + /* Can't mmap until the AFU is enabled */ 119 + if (!ctx->afu->enabled) 120 + return -EBUSY; 160 121 } 161 - 162 - /* Can't mmap until the AFU is enabled */ 163 - if (!ctx->afu->enabled) 164 - return -EBUSY; 165 122 166 123 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, 167 124 ctx->psn_phys, ctx->pe , ctx->master); 168 125 126 + vma->vm_flags |= VM_IO | VM_PFNMAP; 169 127 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 170 - return vm_iomap_memory(vma, ctx->psn_phys, len); 128 + vma->vm_ops = &cxl_mmap_vmops; 129 + return 0; 171 130 } 172 131 173 132 /* ··· 189 150 afu_release_irqs(ctx); 190 151 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 191 152 wake_up_all(&ctx->wq); 192 - 193 - /* Release Problem State Area mapping */ 194 - mutex_lock(&ctx->mapping_lock); 195 - if (ctx->mapping) 196 - unmap_mapping_range(ctx->mapping, 0, 0, 1); 197 - mutex_unlock(&ctx->mapping_lock); 198 153 } 199 154 200 155 /* ··· 217 184 * created and torn down after the IDR removed 218 185 */ 219 186 __detach_context(ctx); 187 + 188 + /* 189 + * We are force detaching - remove any active PSA mappings so 190 + * userspace cannot interfere with the card if it comes back. 191 + * Easiest way to exercise this is to unbind and rebind the 192 + * driver via sysfs while it is in use. 193 + */ 194 + mutex_lock(&ctx->mapping_lock); 195 + if (ctx->mapping) 196 + unmap_mapping_range(ctx->mapping, 0, 0, 1); 197 + mutex_unlock(&ctx->mapping_lock); 220 198 } 221 199 mutex_unlock(&afu->contexts_lock); 222 200 }
+8 -6
drivers/misc/cxl/file.c
··· 140 140 141 141 pr_devel("%s: pe: %i\n", __func__, ctx->pe); 142 142 143 - mutex_lock(&ctx->status_mutex); 144 - if (ctx->status != OPENED) { 145 - rc = -EIO; 146 - goto out; 147 - } 148 - 143 + /* Do this outside the status_mutex to avoid a circular dependency with 144 + * the locking in cxl_mmap_fault() */ 149 145 if (copy_from_user(&work, uwork, 150 146 sizeof(struct cxl_ioctl_start_work))) { 151 147 rc = -EFAULT; 148 + goto out; 149 + } 150 + 151 + mutex_lock(&ctx->status_mutex); 152 + if (ctx->status != OPENED) { 153 + rc = -EIO; 152 154 goto out; 153 155 } 154 156
+2
drivers/mmc/host/sdhci-acpi.c
··· 247 247 { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, 248 248 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, 249 249 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, 250 + { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, 250 251 { "PNP0D40" }, 251 252 { }, 252 253 }; ··· 258 257 { "INT33BB" }, 259 258 { "INT33C6" }, 260 259 { "INT3436" }, 260 + { "INT344D" }, 261 261 { "PNP0D40" }, 262 262 { }, 263 263 };
+25
drivers/mmc/host/sdhci-pci.c
··· 993 993 .subdevice = PCI_ANY_ID, 994 994 .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, 995 995 }, 996 + 997 + { 998 + .vendor = PCI_VENDOR_ID_INTEL, 999 + .device = PCI_DEVICE_ID_INTEL_SPT_EMMC, 1000 + .subvendor = PCI_ANY_ID, 1001 + .subdevice = PCI_ANY_ID, 1002 + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, 1003 + }, 1004 + 1005 + { 1006 + .vendor = PCI_VENDOR_ID_INTEL, 1007 + .device = PCI_DEVICE_ID_INTEL_SPT_SDIO, 1008 + .subvendor = PCI_ANY_ID, 1009 + .subdevice = PCI_ANY_ID, 1010 + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, 1011 + }, 1012 + 1013 + { 1014 + .vendor = PCI_VENDOR_ID_INTEL, 1015 + .device = PCI_DEVICE_ID_INTEL_SPT_SD, 1016 + .subvendor = PCI_ANY_ID, 1017 + .subdevice = PCI_ANY_ID, 1018 + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, 1019 + }, 1020 + 996 1021 { 997 1022 .vendor = PCI_VENDOR_ID_O2, 998 1023 .device = PCI_DEVICE_ID_O2_8120,
+3
drivers/mmc/host/sdhci-pci.h
··· 21 21 #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 22 22 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 23 23 #define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 24 + #define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b 25 + #define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c 26 + #define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d 24 27 25 28 /* 26 29 * PCI registers
+7 -8
drivers/mmc/host/sdhci-pxav3.c
··· 300 300 if (IS_ERR(host)) 301 301 return PTR_ERR(host); 302 302 303 - if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { 304 - ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); 305 - if (ret < 0) 306 - goto err_mbus_win; 307 - } 308 - 309 - 310 303 pltfm_host = sdhci_priv(host); 311 304 pltfm_host->priv = pxa; 312 305 ··· 317 324 pxa->clk_core = devm_clk_get(dev, "core"); 318 325 if (!IS_ERR(pxa->clk_core)) 319 326 clk_prepare_enable(pxa->clk_core); 327 + 328 + if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { 329 + ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); 330 + if (ret < 0) 331 + goto err_mbus_win; 332 + } 320 333 321 334 /* enable 1/8V DDR capable */ 322 335 host->mmc->caps |= MMC_CAP_1_8V_DDR; ··· 395 396 pm_runtime_disable(&pdev->dev); 396 397 err_of_parse: 397 398 err_cd_req: 399 + err_mbus_win: 398 400 clk_disable_unprepare(pxa->clk_io); 399 401 if (!IS_ERR(pxa->clk_core)) 400 402 clk_disable_unprepare(pxa->clk_core); 401 403 err_clk_get: 402 - err_mbus_win: 403 404 sdhci_pltfm_free(pdev); 404 405 return ret; 405 406 }
+54 -26
drivers/mmc/host/sdhci.c
··· 259 259 260 260 del_timer_sync(&host->tuning_timer); 261 261 host->flags &= ~SDHCI_NEEDS_RETUNING; 262 - host->mmc->max_blk_count = 263 - (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 264 262 } 265 263 sdhci_enable_card_detection(host); 266 264 } ··· 1271 1273 spin_unlock_irq(&host->lock); 1272 1274 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1273 1275 spin_lock_irq(&host->lock); 1276 + 1277 + if (mode != MMC_POWER_OFF) 1278 + sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1279 + else 1280 + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1281 + 1274 1282 return; 1275 1283 } 1276 1284 ··· 1357 1353 1358 1354 sdhci_runtime_pm_get(host); 1359 1355 1356 + present = mmc_gpio_get_cd(host->mmc); 1357 + 1360 1358 spin_lock_irqsave(&host->lock, flags); 1361 1359 1362 1360 WARN_ON(host->mrq != NULL); ··· 1387 1381 * zero: cd-gpio is used, and card is removed 1388 1382 * one: cd-gpio is used, and card is present 1389 1383 */ 1390 - present = mmc_gpio_get_cd(host->mmc); 1391 1384 if (present < 0) { 1392 1385 /* If polling, assume that the card is always present. */ 1393 1386 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ··· 1885 1880 return !(present_state & SDHCI_DATA_LVL_MASK); 1886 1881 } 1887 1882 1883 + static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 1884 + { 1885 + struct sdhci_host *host = mmc_priv(mmc); 1886 + unsigned long flags; 1887 + 1888 + spin_lock_irqsave(&host->lock, flags); 1889 + host->flags |= SDHCI_HS400_TUNING; 1890 + spin_unlock_irqrestore(&host->lock, flags); 1891 + 1892 + return 0; 1893 + } 1894 + 1888 1895 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1889 1896 { 1890 1897 struct sdhci_host *host = mmc_priv(mmc); ··· 1904 1887 int tuning_loop_counter = MAX_TUNING_LOOP; 1905 1888 int err = 0; 1906 1889 unsigned long flags; 1890 + unsigned int tuning_count = 0; 1891 + bool hs400_tuning; 1907 1892 1908 1893 sdhci_runtime_pm_get(host); 1909 1894 spin_lock_irqsave(&host->lock, flags); 1895 + 1896 + hs400_tuning = host->flags & SDHCI_HS400_TUNING; 1897 + host->flags &= ~SDHCI_HS400_TUNING; 1898 + 1899 + if (host->tuning_mode == SDHCI_TUNING_MODE_1) 1900 + tuning_count = host->tuning_count; 1910 1901 1911 1902 /* 1912 1903 * The Host Controller needs tuning only in case of SDR104 mode ··· 1924 1899 * tuning function has to be executed. 1925 1900 */ 1926 1901 switch (host->timing) { 1902 + /* HS400 tuning is done in HS200 mode */ 1927 1903 case MMC_TIMING_MMC_HS400: 1904 + err = -EINVAL; 1905 + goto out_unlock; 1906 + 1928 1907 case MMC_TIMING_MMC_HS200: 1908 + /* 1909 + * Periodic re-tuning for HS400 is not expected to be needed, so 1910 + * disable it here. 1911 + */ 1912 + if (hs400_tuning) 1913 + tuning_count = 0; 1914 + break; 1915 + 1929 1916 case MMC_TIMING_UHS_SDR104: 1930 1917 break; 1931 1918 ··· 1948 1911 /* FALLTHROUGH */ 1949 1912 1950 1913 default: 1951 - spin_unlock_irqrestore(&host->lock, flags); 1952 - sdhci_runtime_pm_put(host); 1953 - return 0; 1914 + goto out_unlock; 1954 1915 } 1955 1916 1956 1917 if (host->ops->platform_execute_tuning) { ··· 2072 2037 } 2073 2038 2074 2039 out: 2075 - /* 2076 - * If this is the very first time we are here, we start the retuning 2077 - * timer. Since only during the first time, SDHCI_NEEDS_RETUNING 2078 - * flag won't be set, we check this condition before actually starting 2079 - * the timer. 2080 - */ 2081 - if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && 2082 - (host->tuning_mode == SDHCI_TUNING_MODE_1)) { 2040 + host->flags &= ~SDHCI_NEEDS_RETUNING; 2041 + 2042 + if (tuning_count) { 2083 2043 host->flags |= SDHCI_USING_RETUNING_TIMER; 2084 - mod_timer(&host->tuning_timer, jiffies + 2085 - host->tuning_count * HZ); 2086 - /* Tuning mode 1 limits the maximum data length to 4MB */ 2087 - mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; 2088 - } else if (host->flags & SDHCI_USING_RETUNING_TIMER) { 2089 - host->flags &= ~SDHCI_NEEDS_RETUNING; 2090 - /* Reload the new initial value for timer */ 2091 - mod_timer(&host->tuning_timer, jiffies + 2092 - host->tuning_count * HZ); 2044 + mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); 2093 2045 } 2094 2046 2095 2047 /* ··· 2092 2070 2093 2071 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2094 2072 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2073 + out_unlock: 2095 2074 spin_unlock_irqrestore(&host->lock, flags); 2096 2075 sdhci_runtime_pm_put(host); 2097 2076 ··· 2133 2110 { 2134 2111 struct sdhci_host *host = mmc_priv(mmc); 2135 2112 unsigned long flags; 2113 + int present; 2136 2114 2137 2115 /* First check if client has provided their own card event */ 2138 2116 if (host->ops->card_event) 2139 2117 host->ops->card_event(host); 2140 2118 2119 + present = sdhci_do_get_cd(host); 2120 + 2141 2121 spin_lock_irqsave(&host->lock, flags); 2142 2122 2143 2123 /* Check host->mrq first in case we are runtime suspended */ 2144 - if (host->mrq && !sdhci_do_get_cd(host)) { 2124 + if (host->mrq && !present) { 2145 2125 pr_err("%s: Card removed during transfer!\n", 2146 2126 mmc_hostname(host->mmc)); 2147 2127 pr_err("%s: Resetting controller.\n", ··· 2168 2142 .hw_reset = sdhci_hw_reset, 2169 2143 .enable_sdio_irq = sdhci_enable_sdio_irq, 2170 2144 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2145 + .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2171 2146 .execute_tuning = sdhci_execute_tuning, 2172 2147 .card_event = sdhci_card_event, 2173 2148 .card_busy = sdhci_card_busy, ··· 3287 3260 mmc->max_segs = SDHCI_MAX_SEGS; 3288 3261 3289 3262 /* 3290 - * Maximum number of sectors in one transfer. Limited by DMA boundary 3291 - * size (512KiB). 3263 + * Maximum number of sectors in one transfer. Limited by SDMA boundary 3264 + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 3265 + * is less anyway. 3292 3266 */ 3293 3267 mmc->max_req_size = 524288; 3294 3268
+3 -1
drivers/net/ethernet/allwinner/sun4i-emac.c
··· 850 850 } 851 851 852 852 db->clk = devm_clk_get(&pdev->dev, NULL); 853 - if (IS_ERR(db->clk)) 853 + if (IS_ERR(db->clk)) { 854 + ret = PTR_ERR(db->clk); 854 855 goto out; 856 + } 855 857 856 858 clk_prepare_enable(db->clk); 857 859
+6 -9
drivers/net/ethernet/altera/altera_tse_main.c
··· 1170 1170 init_error: 1171 1171 free_skbufs(dev); 1172 1172 alloc_skbuf_error: 1173 - if (priv->phydev) { 1174 - phy_disconnect(priv->phydev); 1175 - priv->phydev = NULL; 1176 - } 1177 1173 phy_error: 1178 1174 return ret; 1179 1175 } ··· 1182 1186 int ret; 1183 1187 unsigned long int flags; 1184 1188 1185 - /* Stop and disconnect the PHY */ 1186 - if (priv->phydev) { 1189 + /* Stop the PHY */ 1190 + if (priv->phydev) 1187 1191 phy_stop(priv->phydev); 1188 - phy_disconnect(priv->phydev); 1189 - priv->phydev = NULL; 1190 - } 1191 1192 1192 1193 netif_stop_queue(dev); 1193 1194 napi_disable(&priv->napi); ··· 1518 1525 static int altera_tse_remove(struct platform_device *pdev) 1519 1526 { 1520 1527 struct net_device *ndev = platform_get_drvdata(pdev); 1528 + struct altera_tse_private *priv = netdev_priv(ndev); 1529 + 1530 + if (priv->phydev) 1531 + phy_disconnect(priv->phydev); 1521 1532 1522 1533 platform_set_drvdata(pdev, NULL); 1523 1534 altera_tse_mdio_destroy(ndev);
+13 -11
drivers/net/ethernet/atheros/alx/main.c
··· 184 184 schedule_work(&alx->reset_wk); 185 185 } 186 186 187 - static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) 187 + static int alx_clean_rx_irq(struct alx_priv *alx, int budget) 188 188 { 189 189 struct alx_rx_queue *rxq = &alx->rxq; 190 190 struct alx_rrd *rrd; 191 191 struct alx_buffer *rxb; 192 192 struct sk_buff *skb; 193 193 u16 length, rfd_cleaned = 0; 194 + int work = 0; 194 195 195 - while (budget > 0) { 196 + while (work < budget) { 196 197 rrd = &rxq->rrd[rxq->rrd_read_idx]; 197 198 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) 198 199 break; ··· 204 203 ALX_GET_FIELD(le32_to_cpu(rrd->word0), 205 204 RRD_NOR) != 1) { 206 205 alx_schedule_reset(alx); 207 - return 0; 206 + return work; 208 207 } 209 208 210 209 rxb = &rxq->bufs[rxq->read_idx]; ··· 244 243 } 245 244 246 245 napi_gro_receive(&alx->napi, skb); 247 - budget--; 246 + work++; 248 247 249 248 next_pkt: 250 249 if (++rxq->read_idx == alx->rx_ringsz) ··· 259 258 if (rfd_cleaned) 260 259 alx_refill_rx_ring(alx, GFP_ATOMIC); 261 260 262 - return budget > 0; 261 + return work; 263 262 } 264 263 265 264 static int alx_poll(struct napi_struct *napi, int budget) 266 265 { 267 266 struct alx_priv *alx = container_of(napi, struct alx_priv, napi); 268 267 struct alx_hw *hw = &alx->hw; 269 - bool complete = true; 270 268 unsigned long flags; 269 + bool tx_complete; 270 + int work; 271 271 272 - complete = alx_clean_tx_irq(alx) && 273 - alx_clean_rx_irq(alx, budget); 272 + tx_complete = alx_clean_tx_irq(alx); 273 + work = alx_clean_rx_irq(alx, budget); 274 274 275 - if (!complete) 276 - return 1; 275 + if (!tx_complete || work == budget) 276 + return budget; 277 277 278 278 napi_complete(&alx->napi); 279 279 ··· 286 284 287 285 alx_post_write(hw); 288 286 289 - return 0; 287 + return work; 290 288 } 291 289 292 290 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+20 -3
drivers/net/ethernet/broadcom/tg3.c
··· 7413 7413 } 7414 7414 7415 7415 static void tg3_irq_quiesce(struct tg3 *tp) 7416 + __releases(tp->lock) 7417 + __acquires(tp->lock) 7416 7418 { 7417 7419 int i; 7418 7420 ··· 7423 7421 tp->irq_sync = 1; 7424 7422 smp_mb(); 7425 7423 7424 + spin_unlock_bh(&tp->lock); 7425 + 7426 7426 for (i = 0; i < tp->irq_cnt; i++) 7427 7427 synchronize_irq(tp->napi[i].irq_vec); 7428 + 7429 + spin_lock_bh(&tp->lock); 7428 7430 } 7429 7431 7430 7432 /* Fully shutdown all tg3 driver activity elsewhere in the system. ··· 9024 9018 9025 9019 /* tp->lock is held. */ 9026 9020 static int tg3_chip_reset(struct tg3 *tp) 9021 + __releases(tp->lock) 9022 + __acquires(tp->lock) 9027 9023 { 9028 9024 u32 val; 9029 9025 void (*write_op)(struct tg3 *, u32, u32); ··· 9081 9073 } 9082 9074 smp_mb(); 9083 9075 9076 + tg3_full_unlock(tp); 9077 + 9084 9078 for (i = 0; i < tp->irq_cnt; i++) 9085 9079 synchronize_irq(tp->napi[i].irq_vec); 9080 + 9081 + tg3_full_lock(tp, 0); 9086 9082 9087 9083 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9088 9084 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; ··· 10915 10903 { 10916 10904 struct tg3 *tp = (struct tg3 *) __opaque; 10917 10905 10918 - if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) 10919 - goto restart_timer; 10920 - 10921 10906 spin_lock(&tp->lock); 10907 + 10908 + if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10909 + spin_unlock(&tp->lock); 10910 + goto restart_timer; 10911 + } 10922 10912 10923 10913 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10924 10914 tg3_flag(tp, 57765_CLASS)) ··· 11115 11101 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11116 11102 int err; 11117 11103 11104 + rtnl_lock(); 11118 11105 tg3_full_lock(tp, 0); 11119 11106 11120 11107 if (!netif_running(tp->dev)) { 11121 11108 tg3_flag_clear(tp, RESET_TASK_PENDING); 11122 11109 tg3_full_unlock(tp); 11110 + rtnl_unlock(); 11123 11111 return; 11124 11112 } 11125 11113 ··· 11154 11138 tg3_phy_start(tp); 11155 11139 11156 11140 tg3_flag_clear(tp, RESET_TASK_PENDING); 11141 + rtnl_unlock(); 11157 11142 } 11158 11143 11159 11144 static int tg3_request_irq(struct tg3 *tp, int irq_num)
+5 -5
drivers/net/ethernet/cadence/at91_ether.c
··· 340 340 res = PTR_ERR(lp->pclk); 341 341 goto err_free_dev; 342 342 } 343 - clk_enable(lp->pclk); 343 + clk_prepare_enable(lp->pclk); 344 344 345 345 lp->hclk = ERR_PTR(-ENOENT); 346 346 lp->tx_clk = ERR_PTR(-ENOENT); ··· 406 406 err_out_unregister_netdev: 407 407 unregister_netdev(dev); 408 408 err_disable_clock: 409 - clk_disable(lp->pclk); 409 + clk_disable_unprepare(lp->pclk); 410 410 err_free_dev: 411 411 free_netdev(dev); 412 412 return res; ··· 424 424 kfree(lp->mii_bus->irq); 425 425 mdiobus_free(lp->mii_bus); 426 426 unregister_netdev(dev); 427 - clk_disable(lp->pclk); 427 + clk_disable_unprepare(lp->pclk); 428 428 free_netdev(dev); 429 429 430 430 return 0; ··· 440 440 netif_stop_queue(net_dev); 441 441 netif_device_detach(net_dev); 442 442 443 - clk_disable(lp->pclk); 443 + clk_disable_unprepare(lp->pclk); 444 444 } 445 445 return 0; 446 446 } ··· 451 451 struct macb *lp = netdev_priv(net_dev); 452 452 453 453 if (netif_running(net_dev)) { 454 - clk_enable(lp->pclk); 454 + clk_prepare_enable(lp->pclk); 455 455 456 456 netif_device_attach(net_dev); 457 457 netif_start_queue(net_dev);
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
··· 2430 2430 */ 2431 2431 n10g = 0; 2432 2432 for_each_port(adapter, pidx) 2433 - n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); 2433 + n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); 2434 2434 2435 2435 /* 2436 2436 * We default to 1 queue per non-10G port and up to # of cores queues
+2
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
··· 323 323 return v; 324 324 325 325 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 326 + pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? 327 + FW_PORT_CMD_MDIOADDR_G(v) : -1; 326 328 pi->port_type = FW_PORT_CMD_PTYPE_G(v); 327 329 pi->mod_type = FW_PORT_MOD_TYPE_NA; 328 330
+4 -2
drivers/net/ethernet/cisco/enic/enic_main.c
··· 1616 1616 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1617 1617 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1618 1618 err = -ENOMEM; 1619 - goto err_out_notify_unset; 1619 + goto err_out_free_rq; 1620 1620 } 1621 1621 } 1622 1622 ··· 1649 1649 1650 1650 return 0; 1651 1651 1652 - err_out_notify_unset: 1652 + err_out_free_rq: 1653 + for (i = 0; i < enic->rq_count; i++) 1654 + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1653 1655 enic_dev_notify_unset(enic); 1654 1656 err_out_free_intr: 1655 1657 enic_free_intr(enic);
+5 -13
drivers/net/ethernet/dnet.c
··· 398 398 * break out of while loop if there are no more 399 399 * packets waiting 400 400 */ 401 - if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { 402 - napi_complete(napi); 403 - int_enable = dnet_readl(bp, INTR_ENB); 404 - int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 405 - dnet_writel(bp, int_enable, INTR_ENB); 406 - return 0; 407 - } 401 + if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) 402 + break; 408 403 409 404 cmd_word = dnet_readl(bp, RX_LEN_FIFO); 410 405 pkt_len = cmd_word & 0xFFFF; ··· 428 433 "size %u.\n", dev->name, pkt_len); 429 434 } 430 435 431 - budget -= npackets; 432 - 433 436 if (npackets < budget) { 434 437 /* We processed all packets available. Tell NAPI it can 435 - * stop polling then re-enable rx interrupts */ 438 + * stop polling then re-enable rx interrupts. 439 + */ 436 440 napi_complete(napi); 437 441 int_enable = dnet_readl(bp, INTR_ENB); 438 442 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 439 443 dnet_writel(bp, int_enable, INTR_ENB); 440 - return 0; 441 444 } 442 445 443 - /* There are still packets waiting */ 444 - return 1; 446 + return npackets; 445 447 } 446 448 447 449 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
+2
drivers/net/ethernet/freescale/fec.h
··· 424 424 * (40ns * 6). 425 425 */ 426 426 #define FEC_QUIRK_BUG_CAPTURE (1 << 10) 427 + /* Controller has only one MDIO bus */ 428 + #define FEC_QUIRK_SINGLE_MDIO (1 << 11) 427 429 428 430 struct fec_enet_priv_tx_q { 429 431 int index;
+6 -4
drivers/net/ethernet/freescale/fec_main.c
··· 91 91 .driver_data = 0, 92 92 }, { 93 93 .name = "imx28-fec", 94 - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 94 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 95 + FEC_QUIRK_SINGLE_MDIO, 95 96 }, { 96 97 .name = "imx6q-fec", 97 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | ··· 1938 1937 int err = -ENXIO, i; 1939 1938 1940 1939 /* 1941 - * The dual fec interfaces are not equivalent with enet-mac. 1940 + * The i.MX28 dual fec interfaces are not equal. 1942 1941 * Here are the differences: 1943 1942 * 1944 1943 * - fec0 supports MII & RMII modes while fec1 only supports RMII ··· 1953 1952 * mdio interface in board design, and need to be configured by 1954 1953 * fec0 mii_bus. 1955 1954 */ 1956 - if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { 1955 + if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 1957 1956 /* fec1 uses fec0 mii_bus */ 1958 1957 if (mii_cnt && fec0_mii_bus) { 1959 1958 fep->mii_bus = fec0_mii_bus; ··· 2016 2015 mii_cnt++; 2017 2016 2018 2017 /* save fec0 mii_bus */ 2019 - if (fep->quirks & FEC_QUIRK_ENET_MAC) 2018 + if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2020 2019 fec0_mii_bus = fep->mii_bus; 2021 2020 2022 2021 return 0; ··· 3130 3129 pdev->id_entry = of_id->data; 3131 3130 fep->quirks = pdev->id_entry->driver_data; 3132 3131 3132 + fep->netdev = ndev; 3133 3133 fep->num_rx_queues = num_rx_qs; 3134 3134 fep->num_tx_queues = num_tx_qs; 3135 3135
+11
drivers/net/ethernet/intel/Kconfig
··· 281 281 282 282 If unsure, say N. 283 283 284 + config I40E_FCOE 285 + bool "Fibre Channel over Ethernet (FCoE)" 286 + default n 287 + depends on I40E && DCB && FCOE 288 + ---help--- 289 + Say Y here if you want to use Fibre Channel over Ethernet (FCoE) 290 + in the driver. This will create new netdev for exclusive FCoE 291 + use with XL710 FCoE offloads enabled. 292 + 293 + If unsure, say N. 294 + 284 295 config I40EVF 285 296 tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" 286 297 depends on PCI_MSI
+1 -1
drivers/net/ethernet/intel/e100.c
··· 1543 1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); 1544 1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1545 1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && 1546 - !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { 1546 + (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { 1547 1547 /* enable/disable MDI/MDI-X auto-switching. */ 1548 1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 1549 1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
+1 -1
drivers/net/ethernet/intel/i40e/Makefile
··· 44 44 i40e_virtchnl_pf.o 45 45 46 46 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o 47 - i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o 47 + i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
+3 -1
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
··· 829 829 if (desc_n >= ring->count || desc_n < 0) { 830 830 dev_info(&pf->pdev->dev, 831 831 "descriptor %d not found\n", desc_n); 832 - return; 832 + goto out; 833 833 } 834 834 if (!is_rx_ring) { 835 835 txd = I40E_TX_DESC(ring, desc_n); ··· 855 855 } else { 856 856 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); 857 857 } 858 + 859 + out: 858 860 kfree(ring); 859 861 } 860 862
+2 -2
drivers/net/ethernet/intel/i40e/i40e_osdep.h
··· 78 78 } while (0) 79 79 80 80 typedef enum i40e_status_code i40e_status; 81 - #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 81 + #ifdef CONFIG_I40E_FCOE 82 82 #define I40E_FCOE 83 - #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ 83 + #endif 84 84 #endif /* _I40E_OSDEP_H_ */
+72 -32
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 658 658 return le32_to_cpu(*(volatile __le32 *)head); 659 659 } 660 660 661 + #define WB_STRIDE 0x3 662 + 661 663 /** 662 664 * i40e_clean_tx_irq - Reclaim resources after transmit completes 663 665 * @tx_ring: tx ring to clean ··· 761 759 tx_ring->q_vector->tx.total_bytes += total_bytes; 762 760 tx_ring->q_vector->tx.total_packets += total_packets; 763 761 762 + /* check to see if there are any non-cache aligned descriptors 763 + * waiting to be written back, and kick the hardware to force 764 + * them to be written back in case of napi polling 765 + */ 766 + if (budget && 767 + !((i & WB_STRIDE) == WB_STRIDE) && 768 + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && 769 + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 770 + tx_ring->arm_wb = true; 771 + else 772 + tx_ring->arm_wb = false; 773 + 764 774 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { 765 775 /* schedule immediate reset if we believe we hung */ 766 776 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" ··· 791 777 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 792 778 793 779 dev_info(tx_ring->dev, 794 - "tx hang detected on queue %d, resetting adapter\n", 780 + "tx hang detected on queue %d, reset requested\n", 795 781 tx_ring->queue_index); 796 782 797 - tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); 783 + /* do not fire the reset immediately, wait for the stack to 784 + * decide we are truly stuck, also prevents every queue from 785 + * simultaneously requesting a reset 786 + */ 798 787 799 - /* the adapter is about to reset, no point in enabling stuff */ 800 - return true; 788 + /* the adapter is about to reset, no point in enabling polling */ 789 + budget = 1; 801 790 } 802 791 803 792 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, ··· 823 806 } 824 807 } 825 808 826 - return budget > 0; 809 + return !!budget; 810 + } 811 + 812 + /** 813 + * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors 814 + * @vsi: the VSI we care about 815 + * @q_vector: the vector on which to force writeback 816 + * 817 + **/ 818 + static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) 819 + { 820 + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 821 + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 822 + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK 823 + /* allow 00 to be written to the index */; 824 + 825 + wr32(&vsi->back->hw, 826 + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), 827 + val); 827 828 } 828 829 829 830 /** ··· 1325 1290 * so the total length of IPv4 header is IHL*4 bytes 1326 1291 * The UDP_0 bit *may* bet set if the *inner* header is UDP 1327 1292 */ 1328 - if (ipv4_tunnel && 1329 - (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && 1330 - !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 1293 + if (ipv4_tunnel) { 1331 1294 skb->transport_header = skb->mac_header + 1332 1295 sizeof(struct ethhdr) + 1333 1296 (ip_hdr(skb)->ihl * 4); ··· 1335 1302 skb->protocol == htons(ETH_P_8021AD)) 1336 1303 ? VLAN_HLEN : 0; 1337 1304 1338 - rx_udp_csum = udp_csum(skb); 1339 - iph = ip_hdr(skb); 1340 - csum = csum_tcpudp_magic( 1341 - iph->saddr, iph->daddr, 1342 - (skb->len - skb_transport_offset(skb)), 1343 - IPPROTO_UDP, rx_udp_csum); 1305 + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && 1306 + (udp_hdr(skb)->check != 0)) { 1307 + rx_udp_csum = udp_csum(skb); 1308 + iph = ip_hdr(skb); 1309 + csum = csum_tcpudp_magic( 1310 + iph->saddr, iph->daddr, 1311 + (skb->len - skb_transport_offset(skb)), 1312 + IPPROTO_UDP, rx_udp_csum); 1344 1313 1345 - if (udp_hdr(skb)->check != csum) 1346 - goto checksum_fail; 1314 + if (udp_hdr(skb)->check != csum) 1315 + goto checksum_fail; 1316 + 1317 + } /* else its GRE and so no outer UDP header */ 1347 1318 } 1348 1319 1349 1320 skb->ip_summed = CHECKSUM_UNNECESSARY; ··· 1618 1581 struct i40e_vsi *vsi = q_vector->vsi; 1619 1582 struct i40e_ring *ring; 1620 1583 bool clean_complete = true; 1584 + bool arm_wb = false; 1621 1585 int budget_per_ring; 1622 1586 1623 1587 if (test_bit(__I40E_DOWN, &vsi->state)) { ··· 1629 1591 /* Since the actual Tx work is minimal, we can give the Tx a larger 1630 1592 * budget and be more aggressive about cleaning up the Tx descriptors. 1631 1593 */ 1632 - i40e_for_each_ring(ring, q_vector->tx) 1594 + i40e_for_each_ring(ring, q_vector->tx) { 1633 1595 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); 1596 + arm_wb |= ring->arm_wb; 1597 + } 1634 1598 1635 1599 /* We attempt to distribute budget to each Rx queue fairly, but don't 1636 1600 * allow the budget to go below 1 because that would exit polling early. ··· 1643 1603 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); 1644 1604 1645 1605 /* If work not completed, return budget and polling will return */ 1646 - if (!clean_complete) 1606 + if (!clean_complete) { 1607 + if (arm_wb) 1608 + i40e_force_wb(vsi, q_vector); 1647 1609 return budget; 1610 + } 1648 1611 1649 1612 /* Work is done so exit the polling mode and re-enable the interrupt */ 1650 1613 napi_complete(napi); ··· 1883 1840 if (err < 0) 1884 1841 return err; 1885 1842 1886 - if (protocol == htons(ETH_P_IP)) { 1887 - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1843 + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1844 + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 1845 + 1846 + if (iph->version == 4) { 1888 1847 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1889 1848 iph->tot_len = 0; 1890 1849 iph->check = 0; 1891 1850 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1892 1851 0, IPPROTO_TCP, 0); 1893 - } else if (skb_is_gso_v6(skb)) { 1894 - 1895 - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) 1896 - : ipv6_hdr(skb); 1852 + } else if (ipv6h->version == 6) { 1897 1853 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1898 1854 ipv6h->payload_len = 0; 1899 1855 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, ··· 1988 1946 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1989 1947 } 1990 1948 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1991 - if (tx_flags & I40E_TX_FLAGS_TSO) { 1992 - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1949 + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1950 + if (tx_flags & I40E_TX_FLAGS_TSO) 1993 1951 ip_hdr(skb)->check = 0; 1994 - } else { 1995 - *cd_tunneling |= 1996 - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1997 - } 1998 1952 } 1999 1953 2000 1954 /* Now set the ctx descriptor fields */ ··· 2000 1962 ((skb_inner_network_offset(skb) - 2001 1963 skb_transport_offset(skb)) >> 1) << 2002 1964 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 2003 - 1965 + if (this_ip_hdr->version == 6) { 1966 + tx_flags &= ~I40E_TX_FLAGS_IPV4; 1967 + tx_flags |= I40E_TX_FLAGS_IPV6; 1968 + } 2004 1969 } else { 2005 1970 network_hdr_len = skb_network_header_len(skb); 2006 1971 this_ip_hdr = ip_hdr(skb); ··· 2239 2198 /* Place RS bit on last descriptor of any packet that spans across the 2240 2199 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. 2241 2200 */ 2242 - #define WB_STRIDE 0x3 2243 2201 if (((i & WB_STRIDE) != WB_STRIDE) && 2244 2202 (first <= &tx_ring->tx_bi[i]) && 2245 2203 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+1
drivers/net/ethernet/intel/i40e/i40e_txrx.h
··· 241 241 unsigned long last_rx_timestamp; 242 242 243 243 bool ring_active; /* is ring online or not */ 244 + bool arm_wb; /* do something to arm write back */ 244 245 245 246 /* stats structs */ 246 247 struct i40e_queue_stats stats;
+1 -1
drivers/net/ethernet/intel/igb/e1000_82575.c
··· 1125 1125 u32 swmask = mask; 1126 1126 u32 fwmask = mask << 16; 1127 1127 s32 ret_val = 0; 1128 - s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1128 + s32 i = 0, timeout = 200; 1129 1129 1130 1130 while (i < timeout) { 1131 1131 if (igb_get_hw_semaphore(hw)) {
+4 -9
drivers/net/ethernet/mellanox/mlx4/main.c
··· 1829 1829 err = mlx4_dev_cap(dev, &dev_cap); 1830 1830 if (err) { 1831 1831 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1832 - goto err_stop_fw; 1832 + return err; 1833 1833 } 1834 1834 1835 1835 choose_steering_mode(dev, &dev_cap); ··· 1860 1860 &init_hca); 1861 1861 if ((long long) icm_size < 0) { 1862 1862 err = icm_size; 1863 - goto err_stop_fw; 1863 + return err; 1864 1864 } 1865 1865 1866 1866 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; ··· 1874 1874 1875 1875 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1876 1876 if (err) 1877 - goto err_stop_fw; 1877 + return err; 1878 1878 1879 1879 err = mlx4_INIT_HCA(dev, &init_hca); 1880 1880 if (err) { ··· 1886 1886 err = mlx4_query_func(dev, &dev_cap); 1887 1887 if (err < 0) { 1888 1888 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 1889 - goto err_stop_fw; 1889 + goto err_close; 1890 1890 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 1891 1891 dev->caps.num_eqs = dev_cap.max_eqs; 1892 1892 dev->caps.reserved_eqs = dev_cap.reserved_eqs; ··· 2006 2006 if (!mlx4_is_slave(dev)) 2007 2007 mlx4_free_icms(dev); 2008 2008 2009 - err_stop_fw: 2010 - if (!mlx4_is_slave(dev)) { 2011 - mlx4_UNMAP_FA(dev); 2012 - mlx4_free_icm(dev, priv->fw.fw_icm, 0); 2013 - } 2014 2009 return err; 2015 2010 } 2016 2011
+5 -4
drivers/net/ethernet/mellanox/mlx4/mr.c
··· 584 584 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) 585 585 { 586 586 mlx4_mtt_cleanup(dev, &mr->mtt); 587 + mr->mtt.order = -1; 587 588 } 588 589 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); 589 590 ··· 594 593 { 595 594 int err; 596 595 597 - mpt_entry->start = cpu_to_be64(iova); 598 - mpt_entry->length = cpu_to_be64(size); 599 - mpt_entry->entity_size = cpu_to_be32(page_shift); 600 - 601 596 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 602 597 if (err) 603 598 return err; 599 + 600 + mpt_entry->start = cpu_to_be64(mr->iova); 601 + mpt_entry->length = cpu_to_be64(mr->size); 602 + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 604 603 605 604 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | 606 605 MLX4_MPT_PD_FLAG_EN_INV);
+3 -1
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 4033 4033 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4034 4034 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 4035 4035 &mgp->cmd_bus, GFP_KERNEL); 4036 - if (mgp->cmd == NULL) 4036 + if (!mgp->cmd) { 4037 + status = -ENOMEM; 4037 4038 goto abort_with_enabled; 4039 + } 4038 4040 4039 4041 mgp->board_span = pci_resource_len(pdev, 0); 4040 4042 mgp->iomem_base = pci_resource_start(pdev, 0);
+3 -5
drivers/net/ethernet/qlogic/qla3xxx.c
··· 146 146 { 147 147 int i = 0; 148 148 149 - while (i < 10) { 150 - if (i) 151 - ssleep(1); 152 - 149 + do { 153 150 if (ql_sem_lock(qdev, 154 151 QL_DRVR_SEM_MASK, 155 152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) ··· 155 158 "driver lock acquired\n"); 156 159 return 1; 157 160 } 158 - } 161 + ssleep(1); 162 + } while (++i < 10); 159 163 160 164 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 161 165 return 0;
+1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 2605 2605 } else { 2606 2606 dev_err(&pdev->dev, 2607 2607 "%s: failed. Please Reboot\n", __func__); 2608 + err = -ENODEV; 2608 2609 goto err_out_free_hw; 2609 2610 } 2610 2611
+8 -1
drivers/net/ethernet/renesas/sh_eth.c
··· 473 473 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 474 474 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 475 475 EESR_ECI, 476 + .fdr_value = 0x00000f0f, 476 477 477 478 .apr = 1, 478 479 .mpr = 1, ··· 496 495 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 497 496 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 498 497 EESR_ECI, 498 + .fdr_value = 0x00000f0f, 499 499 500 500 .apr = 1, 501 501 .mpr = 1, ··· 537 535 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 538 536 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 539 537 EESR_ECI, 538 + 539 + .trscer_err_mask = DESC_I_RINT8, 540 540 541 541 .apr = 1, 542 542 .mpr = 1, ··· 860 856 861 857 if (!cd->eesr_err_check) 862 858 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 859 + 860 + if (!cd->trscer_err_mask) 861 + cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; 863 862 } 864 863 865 864 static int sh_eth_check_reset(struct net_device *ndev) ··· 1301 1294 /* Frame recv control (enable multiple-packets per rx irq) */ 1302 1295 sh_eth_write(ndev, RMCR_RNC, RMCR); 1303 1296 1304 - sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1297 + sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); 1305 1298 1306 1299 if (mdp->cd->bculr) 1307 1300 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
+5
drivers/net/ethernet/renesas/sh_eth.h
··· 369 369 DESC_I_RINT1 = 0x0001, 370 370 }; 371 371 372 + #define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) 373 + 372 374 /* RPADIR */ 373 375 enum RPADIR_BIT { 374 376 RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, ··· 471 469 /* interrupt checking mask */ 472 470 unsigned long tx_check; 473 471 unsigned long eesr_err_check; 472 + 473 + /* Error mask */ 474 + unsigned long trscer_err_mask; 474 475 475 476 /* hardware features */ 476 477 unsigned long irq_flags; /* IRQ configuration flags */
+17 -13
drivers/net/ethernet/ti/cpsw.c
··· 610 610 611 611 /* Clear all mcast from ALE */ 612 612 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << 613 - priv->host_port); 613 + priv->host_port, -1); 614 614 615 615 /* Flood All Unicast Packets to Host port */ 616 616 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); ··· 634 634 static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 635 635 { 636 636 struct cpsw_priv *priv = netdev_priv(ndev); 637 + int vid; 638 + 639 + if (priv->data.dual_emac) 640 + vid = priv->slaves[priv->emac_port].port_vlan; 641 + else 642 + vid = priv->data.default_vlan; 637 643 638 644 if (ndev->flags & IFF_PROMISC) { 639 645 /* Enable promiscuous mode */ ··· 655 649 cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); 656 650 657 651 /* Clear all mcast from ALE */ 658 - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); 652 + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, 653 + vid); 659 654 660 655 if (!netdev_mc_empty(ndev)) { 661 656 struct netdev_hw_addr *ha; ··· 764 757 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 765 758 { 766 759 struct cpsw_priv *priv = dev_id; 760 + int value = irq - priv->irqs_table[0]; 761 + 762 + /* NOTICE: Ending IRQ here. The trick with the 'value' variable above 763 + * is to make sure we will always write the correct value to the EOI 764 + * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 765 + * for TX Interrupt and 3 for MISC Interrupt. 766 + */ 767 + cpdma_ctlr_eoi(priv->dma, value); 767 768 768 769 cpsw_intr_disable(priv); 769 770 if (priv->irq_enabled == true) { ··· 801 786 int num_tx, num_rx; 802 787 803 788 num_tx = cpdma_chan_process(priv->txch, 128); 804 - if (num_tx) 805 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 806 789 807 790 num_rx = cpdma_chan_process(priv->rxch, budget); 808 791 if (num_rx < budget) { ··· 808 795 809 796 napi_complete(napi); 810 797 cpsw_intr_enable(priv); 811 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 812 798 prim_cpsw = cpsw_get_slave_priv(priv, 0); 813 799 if (prim_cpsw->irq_enabled == false) { 814 800 prim_cpsw->irq_enabled = true; ··· 1322 1310 napi_enable(&priv->napi); 1323 1311 cpdma_ctlr_start(priv->dma); 1324 1312 cpsw_intr_enable(priv); 1325 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1326 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1327 1313 1328 1314 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1329 1315 if (prim_cpsw->irq_enabled == false) { ··· 1588 1578 cpdma_chan_start(priv->txch); 1589 1579 cpdma_ctlr_int_ctrl(priv->dma, true); 1590 1580 cpsw_intr_enable(priv); 1591 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1592 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1593 - 1594 1581 } 1595 1582 1596 1583 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) ··· 1627 1620 cpsw_interrupt(ndev->irq, priv); 1628 1621 cpdma_ctlr_int_ctrl(priv->dma, true); 1629 1622 cpsw_intr_enable(priv); 1630 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1631 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1632 - 1633 1623 } 1634 1624 #endif 1635 1625
+9 -1
drivers/net/ethernet/ti/cpsw_ale.c
··· 234 234 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); 235 235 } 236 236 237 - int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) 237 + int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) 238 238 { 239 239 u32 ale_entry[ALE_ENTRY_WORDS]; 240 240 int ret, idx; ··· 243 243 cpsw_ale_read(ale, idx, ale_entry); 244 244 ret = cpsw_ale_get_entry_type(ale_entry); 245 245 if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) 246 + continue; 247 + 248 + /* if vid passed is -1 then remove all multicast entry from 249 + * the table irrespective of vlan id, if a valid vlan id is 250 + * passed then remove only multicast added to that vlan id. 251 + * if vlan id doesn't match then move on to next entry. 252 + */ 253 + if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid) 246 254 continue; 247 255 248 256 if (cpsw_ale_get_mcast(ale_entry)) {
+1 -1
drivers/net/ethernet/ti/cpsw_ale.h
··· 92 92 93 93 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); 94 94 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); 95 - int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); 95 + int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); 96 96 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, 97 97 int flags, u16 vid); 98 98 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+2
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 1043 1043 lp->regs = of_iomap(op->dev.of_node, 0); 1044 1044 if (!lp->regs) { 1045 1045 dev_err(&op->dev, "could not map temac regs.\n"); 1046 + rc = -ENOMEM; 1046 1047 goto nodev; 1047 1048 } 1048 1049 ··· 1063 1062 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); 1064 1063 if (!np) { 1065 1064 dev_err(&op->dev, "could not find DMA node\n"); 1065 + rc = -ENODEV; 1066 1066 goto err_iounmap; 1067 1067 } 1068 1068
+2
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1501 1501 lp->regs = of_iomap(op->dev.of_node, 0); 1502 1502 if (!lp->regs) { 1503 1503 dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1504 + ret = -ENOMEM; 1504 1505 goto nodev; 1505 1506 } 1506 1507 /* Setup checksum offload, but default to off if not specified */ ··· 1564 1563 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1565 1564 if (!np) { 1566 1565 dev_err(&op->dev, "could not find DMA node\n"); 1566 + ret = -ENODEV; 1567 1567 goto err_iounmap; 1568 1568 } 1569 1569 lp->dma_regs = of_iomap(np, 0);
+1
drivers/net/ethernet/xilinx/xilinx_emaclite.c
··· 1109 1109 res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); 1110 1110 if (!res) { 1111 1111 dev_err(dev, "no IRQ found\n"); 1112 + rc = -ENXIO; 1112 1113 goto error; 1113 1114 } 1114 1115
+14 -2
drivers/net/team/team.c
··· 629 629 static void team_notify_peers_work(struct work_struct *work) 630 630 { 631 631 struct team *team; 632 + int val; 632 633 633 634 team = container_of(work, struct team, notify_peers.dw.work); 634 635 ··· 637 636 schedule_delayed_work(&team->notify_peers.dw, 0); 638 637 return; 639 638 } 639 + val = atomic_dec_if_positive(&team->notify_peers.count_pending); 640 + if (val < 0) { 641 + rtnl_unlock(); 642 + return; 643 + } 640 644 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); 641 645 rtnl_unlock(); 642 - if (!atomic_dec_and_test(&team->notify_peers.count_pending)) 646 + if (val) 643 647 schedule_delayed_work(&team->notify_peers.dw, 644 648 msecs_to_jiffies(team->notify_peers.interval)); 645 649 } ··· 675 669 static void team_mcast_rejoin_work(struct work_struct *work) 676 670 { 677 671 struct team *team; 672 + int val; 678 673 679 674 team = container_of(work, struct team, mcast_rejoin.dw.work); 680 675 ··· 683 676 schedule_delayed_work(&team->mcast_rejoin.dw, 0); 684 677 return; 685 678 } 679 + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); 680 + if (val < 0) { 681 + rtnl_unlock(); 682 + return; 683 + } 686 684 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); 687 685 rtnl_unlock(); 688 - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) 686 + if (val) 689 687 schedule_delayed_work(&team->mcast_rejoin.dw, 690 688 msecs_to_jiffies(team->mcast_rejoin.interval)); 691 689 }
+1 -1
drivers/net/usb/kaweth.c
··· 1276 1276 awd.done = 0; 1277 1277 1278 1278 urb->context = &awd; 1279 - status = usb_submit_urb(urb, GFP_NOIO); 1279 + status = usb_submit_urb(urb, GFP_ATOMIC); 1280 1280 if (status) { 1281 1281 // something went wrong 1282 1282 usb_free_urb(urb);
+7 -3
drivers/net/usb/qmi_wwan.c
··· 56 56 /* default ethernet address used by the modem */ 57 57 static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; 58 58 59 + static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00}; 60 + 59 61 /* Make up an ethernet header if the packet doesn't have one. 60 62 * 61 63 * A firmware bug common among several devices cause them to send raw ··· 334 332 usb_driver_release_interface(driver, info->data); 335 333 } 336 334 337 - /* Never use the same address on both ends of the link, even 338 - * if the buggy firmware told us to. 335 + /* Never use the same address on both ends of the link, even if the 336 + * buggy firmware told us to. Or, if device is assigned the well-known 337 + * buggy firmware MAC address, replace it with a random address, 339 338 */ 340 - if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) 339 + if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) || 340 + ether_addr_equal(dev->net->dev_addr, buggy_fw_addr)) 341 341 eth_hw_addr_random(dev->net); 342 342 343 343 /* make MAC addr easily distinguishable from an IP header */
+17
drivers/net/usb/r8152.c
··· 1897 1897 netif_wake_queue(netdev); 1898 1898 } 1899 1899 1900 + static netdev_features_t 1901 + rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, 1902 + netdev_features_t features) 1903 + { 1904 + u32 mss = skb_shinfo(skb)->gso_size; 1905 + int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; 1906 + int offset = skb_transport_offset(skb); 1907 + 1908 + if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) 1909 + features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); 1910 + else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) 1911 + features &= ~NETIF_F_GSO_MASK; 1912 + 1913 + return features; 1914 + } 1915 + 1900 1916 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, 1901 1917 struct net_device *netdev) 1902 1918 { ··· 3722 3706 .ndo_set_mac_address = rtl8152_set_mac_address, 3723 3707 .ndo_change_mtu = rtl8152_change_mtu, 3724 3708 .ndo_validate_addr = eth_validate_addr, 3709 + .ndo_features_check = rtl8152_features_check, 3725 3710 }; 3726 3711 3727 3712 static void r8152b_get_version(struct r8152 *tp)
+3 -3
drivers/net/wireless/iwlwifi/iwl-7000.c
··· 69 69 #include "iwl-agn-hw.h" 70 70 71 71 /* Highest firmware API version supported */ 72 - #define IWL7260_UCODE_API_MAX 10 73 - #define IWL3160_UCODE_API_MAX 10 72 + #define IWL7260_UCODE_API_MAX 12 73 + #define IWL3160_UCODE_API_MAX 12 74 74 75 75 /* Oldest version we won't warn about */ 76 76 #define IWL7260_UCODE_API_OK 10 ··· 105 105 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 106 106 107 107 #define IWL7265D_FW_PRE "iwlwifi-7265D-" 108 - #define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 108 + #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" 109 109 110 110 #define NVM_HW_SECTION_NUM_FAMILY_7000 0 111 111
+1 -1
drivers/net/wireless/iwlwifi/iwl-8000.c
··· 69 69 #include "iwl-agn-hw.h" 70 70 71 71 /* Highest firmware API version supported */ 72 - #define IWL8000_UCODE_API_MAX 10 72 + #define IWL8000_UCODE_API_MAX 12 73 73 74 74 /* Oldest version we won't warn about */ 75 75 #define IWL8000_UCODE_API_OK 10
+4
drivers/net/wireless/iwlwifi/iwl-fw-file.h
··· 243 243 * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. 244 244 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time 245 245 * longer than the passive one, which is essential for fragmented scan. 246 + * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 247 + * regardless of the band or the number of the probes. FW will calculate 248 + * the actual dwell time. 246 249 */ 247 250 enum iwl_ucode_tlv_api { 248 251 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), ··· 256 253 IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), 257 254 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), 258 255 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 256 + IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 259 257 }; 260 258 261 259 /**
+2
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
··· 672 672 * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented 673 673 * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report 674 674 * and DS parameter set IEs into probe requests. 675 + * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches 675 676 */ 676 677 enum iwl_mvm_lmac_scan_flags { 677 678 IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), ··· 682 681 IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), 683 682 IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), 684 683 IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), 684 + IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), 685 685 }; 686 686 687 687 enum iwl_scan_priority {
+14 -5
drivers/net/wireless/iwlwifi/mvm/scan.c
··· 171 171 * already included in the probe template, so we need to set only 172 172 * req->n_ssids - 1 bits in addition to the first bit. 173 173 */ 174 - static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) 174 + static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, 175 + enum ieee80211_band band, int n_ssids) 175 176 { 177 + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) 178 + return 10; 176 179 if (band == IEEE80211_BAND_2GHZ) 177 180 return 20 + 3 * (n_ssids + 1); 178 181 return 10 + 2 * (n_ssids + 1); 179 182 } 180 183 181 - static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) 184 + static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, 185 + enum ieee80211_band band) 182 186 { 187 + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) 188 + return 110; 183 189 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; 184 190 } 185 191 ··· 337 331 */ 338 332 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 339 333 u32 passive_dwell = 340 - iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); 334 + iwl_mvm_get_passive_dwell(mvm, 335 + IEEE80211_BAND_2GHZ); 341 336 params->max_out_time = passive_dwell; 342 337 } else { 343 338 params->passive_fragmented = true; ··· 355 348 params->dwell[band].passive = frag_passive_dwell; 356 349 else 357 350 params->dwell[band].passive = 358 - iwl_mvm_get_passive_dwell(band); 359 - params->dwell[band].active = iwl_mvm_get_active_dwell(band, 351 + iwl_mvm_get_passive_dwell(mvm, band); 352 + params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band, 360 353 n_ssids); 361 354 } 362 355 } ··· 1455 1448 1456 1449 if (iwl_mvm_scan_pass_all(mvm, req)) 1457 1450 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; 1451 + else 1452 + flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; 1458 1453 1459 1454 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) 1460 1455 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+6 -2
drivers/net/wireless/iwlwifi/mvm/tx.c
··· 108 108 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 109 109 } 110 110 111 - /* tid_tspec will default to 0 = BE when QOS isn't enabled */ 112 - ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; 111 + /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ 112 + if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) 113 + ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; 114 + else 115 + ac = tid_to_mac80211_ac[0]; 116 + 113 117 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << 114 118 TX_CMD_FLG_BT_PRIO_POS; 115 119
+1 -1
drivers/net/wireless/iwlwifi/mvm/utils.c
··· 665 665 if (num_of_ant(mvm->fw->valid_rx_ant) == 1) 666 666 return false; 667 667 668 - if (!mvm->cfg->rx_with_siso_diversity) 668 + if (mvm->cfg->rx_with_siso_diversity) 669 669 return false; 670 670 671 671 ieee80211_iterate_active_interfaces_atomic(
+3 -1
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 527 527 else if (cfg == &iwl7265_n_cfg) 528 528 cfg_7265d = &iwl7265d_n_cfg; 529 529 if (cfg_7265d && 530 - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) 530 + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { 531 531 cfg = cfg_7265d; 532 + iwl_trans->cfg = cfg_7265d; 533 + } 532 534 #endif 533 535 534 536 pci_set_drvdata(pdev, iwl_trans);
+25 -9
drivers/net/wireless/rtlwifi/pci.c
··· 666 666 } 667 667 668 668 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, 669 - u8 *entry, int rxring_idx, int desc_idx) 669 + struct sk_buff *new_skb, u8 *entry, 670 + int rxring_idx, int desc_idx) 670 671 { 671 672 struct rtl_priv *rtlpriv = rtl_priv(hw); 672 673 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); ··· 675 674 u8 tmp_one = 1; 676 675 struct sk_buff *skb; 677 676 677 + if (likely(new_skb)) { 678 + skb = new_skb; 679 + goto remap; 680 + } 678 681 skb = dev_alloc_skb(rtlpci->rxbuffersize); 679 682 if (!skb) 680 683 return 0; 681 - rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; 682 684 685 + remap: 683 686 /* just set skb->cb to mapping addr for pci_unmap_single use */ 684 687 *((dma_addr_t *)skb->cb) = 685 688 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), ··· 691 686 bufferaddress = *((dma_addr_t *)skb->cb); 692 687 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) 693 688 return 0; 689 + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; 694 690 if (rtlpriv->use_new_trx_flow) { 695 691 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, 696 692 HW_DESC_RX_PREPARE, ··· 787 781 /*rx pkt */ 788 782 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ 789 783 rtlpci->rx_ring[rxring_idx].idx]; 784 + struct sk_buff *new_skb; 790 785 791 786 if (rtlpriv->use_new_trx_flow) { 792 787 rx_remained_cnt = ··· 814 807 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), 815 808 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); 816 809 810 + /* get a new skb - if fail, old one will be reused */ 811 + new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 812 + if (unlikely(!new_skb)) { 813 + pr_err("Allocation of new skb failed in %s\n", 814 + __func__); 815 + goto no_new; 816 + } 817 817 if (rtlpriv->use_new_trx_flow) { 818 818 buffer_desc = 819 819 &rtlpci->rx_ring[rxring_idx].buffer_desc ··· 925 911 schedule_work(&rtlpriv->works.lps_change_work); 926 912 } 927 913 end: 914 + skb = new_skb; 915 + no_new: 928 916 if (rtlpriv->use_new_trx_flow) { 929 - _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, 917 + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, 930 918 rxring_idx, 931 - rtlpci->rx_ring[rxring_idx].idx); 932 - } else { 933 - _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, 934 919 rtlpci->rx_ring[rxring_idx].idx); 935 - 920 + } else { 921 + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, 922 + rxring_idx, 923 + rtlpci->rx_ring[rxring_idx].idx); 936 924 if (rtlpci->rx_ring[rxring_idx].idx == 937 925 rtlpci->rxringcount - 1) 938 926 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, ··· 1323 1307 rtlpci->rx_ring[rxring_idx].idx = 0; 1324 1308 for (i = 0; i < rtlpci->rxringcount; i++) { 1325 1309 entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; 1326 - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 1310 + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, 1327 1311 rxring_idx, i)) 1328 1312 return -ENOMEM; 1329 1313 } ··· 1348 1332 1349 1333 for (i = 0; i < rtlpci->rxringcount; i++) { 1350 1334 entry = &rtlpci->rx_ring[rxring_idx].desc[i]; 1351 - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 1335 + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, 1352 1336 rxring_idx, i)) 1353 1337 return -ENOMEM; 1354 1338 }
+1
drivers/net/xen-netback/xenbus.c
··· 737 737 } 738 738 739 739 queue->remaining_credit = credit_bytes; 740 + queue->credit_usec = credit_usec; 740 741 741 742 err = connect_rings(be, queue); 742 743 if (err) {
+42 -29
drivers/net/xen-netfront.c
··· 88 88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 89 89 90 90 struct netfront_stats { 91 - u64 rx_packets; 92 - u64 tx_packets; 93 - u64 rx_bytes; 94 - u64 tx_bytes; 91 + u64 packets; 92 + u64 bytes; 95 93 struct u64_stats_sync syncp; 96 94 }; 97 95 ··· 158 160 struct netfront_queue *queues; 159 161 160 162 /* Statistics */ 161 - struct netfront_stats __percpu *stats; 163 + struct netfront_stats __percpu *rx_stats; 164 + struct netfront_stats __percpu *tx_stats; 162 165 163 166 atomic_t rx_gso_checksum_fixup; 164 167 }; ··· 564 565 { 565 566 unsigned short id; 566 567 struct netfront_info *np = netdev_priv(dev); 567 - struct netfront_stats *stats = this_cpu_ptr(np->stats); 568 + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 568 569 struct xen_netif_tx_request *tx; 569 570 char *data = skb->data; 570 571 RING_IDX i; ··· 671 672 if (notify) 672 673 notify_remote_via_irq(queue->tx_irq); 673 674 674 - u64_stats_update_begin(&stats->syncp); 675 - stats->tx_bytes += skb->len; 676 - stats->tx_packets++; 677 - u64_stats_update_end(&stats->syncp); 675 + u64_stats_update_begin(&tx_stats->syncp); 676 + tx_stats->bytes += skb->len; 677 + tx_stats->packets++; 678 + u64_stats_update_end(&tx_stats->syncp); 678 679 679 680 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 680 681 xennet_tx_buf_gc(queue); ··· 930 931 static int handle_incoming_queue(struct netfront_queue *queue, 931 932 struct sk_buff_head *rxq) 932 933 { 933 - struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); 934 + struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 934 935 int packets_dropped = 0; 935 936 struct sk_buff *skb; 936 937 ··· 951 952 continue; 952 953 } 953 954 954 - u64_stats_update_begin(&stats->syncp); 955 - stats->rx_packets++; 956 - stats->rx_bytes += skb->len; 957 - u64_stats_update_end(&stats->syncp); 955 + u64_stats_update_begin(&rx_stats->syncp); 956 + rx_stats->packets++; 957 + rx_stats->bytes += skb->len; 958 + u64_stats_update_end(&rx_stats->syncp); 958 959 959 960 /* Pass it up. */ 960 961 napi_gro_receive(&queue->napi, skb); ··· 1078 1079 int cpu; 1079 1080 1080 1081 for_each_possible_cpu(cpu) { 1081 - struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); 1082 + struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1083 + struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1082 1084 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1083 1085 unsigned int start; 1084 1086 1085 1087 do { 1086 - start = u64_stats_fetch_begin_irq(&stats->syncp); 1088 + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1089 + tx_packets = tx_stats->packets; 1090 + tx_bytes = tx_stats->bytes; 1091 + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1087 1092 1088 - rx_packets = stats->rx_packets; 1089 - tx_packets = stats->tx_packets; 1090 - rx_bytes = stats->rx_bytes; 1091 - tx_bytes = stats->tx_bytes; 1092 - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1093 + do { 1094 + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1095 + rx_packets = rx_stats->packets; 1096 + rx_bytes = rx_stats->bytes; 1097 + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1093 1098 1094 1099 tot->rx_packets += rx_packets; 1095 1100 tot->tx_packets += tx_packets; ··· 1278 1275 #endif 1279 1276 }; 1280 1277 1278 + static void xennet_free_netdev(struct net_device *netdev) 1279 + { 1280 + struct netfront_info *np = netdev_priv(netdev); 1281 + 1282 + free_percpu(np->rx_stats); 1283 + free_percpu(np->tx_stats); 1284 + free_netdev(netdev); 1285 + } 1286 + 1281 1287 static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1282 1288 { 1283 1289 int err; ··· 1307 1295 np->queues = NULL; 1308 1296 1309 1297 err = -ENOMEM; 1310 - np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1311 - if (np->stats == NULL) 1298 + np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1299 + if (np->rx_stats == NULL) 1300 + goto exit; 1301 + np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1302 + if (np->tx_stats == NULL) 1312 1303 goto exit; 1313 1304 1314 1305 netdev->netdev_ops = &xennet_netdev_ops; ··· 1342 1327 return netdev; 1343 1328 1344 1329 exit: 1345 - free_netdev(netdev); 1330 + xennet_free_netdev(netdev); 1346 1331 return ERR_PTR(err); 1347 1332 } 1348 1333 ··· 1384 1369 return 0; 1385 1370 1386 1371 fail: 1387 - free_netdev(netdev); 1372 + xennet_free_netdev(netdev); 1388 1373 dev_set_drvdata(&dev->dev, NULL); 1389 1374 return err; 1390 1375 } ··· 2204 2189 info->queues = NULL; 2205 2190 } 2206 2191 2207 - free_percpu(info->stats); 2208 - 2209 - free_netdev(info->netdev); 2192 + xennet_free_netdev(info->netdev); 2210 2193 2211 2194 return 0; 2212 2195 }
+54 -3
drivers/pinctrl/pinctrl-rockchip.c
··· 89 89 * @reg_pull: optional separate register for additional pull settings 90 90 * @clk: clock of the gpio bank 91 91 * @irq: interrupt of the gpio bank 92 + * @saved_enables: Saved content of GPIO_INTEN at suspend time. 92 93 * @pin_base: first pin number 93 94 * @nr_pins: number of pins in this bank 94 95 * @name: name of the bank ··· 108 107 struct regmap *regmap_pull; 109 108 struct clk *clk; 110 109 int irq; 110 + u32 saved_enables; 111 111 u32 pin_base; 112 112 u8 nr_pins; 113 113 char *name; ··· 1545 1543 return 0; 1546 1544 } 1547 1545 1546 + static void rockchip_irq_suspend(struct irq_data *d) 1547 + { 1548 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1549 + struct rockchip_pin_bank *bank = gc->private; 1550 + 1551 + bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN); 1552 + irq_reg_writel(gc, gc->wake_active, GPIO_INTEN); 1553 + } 1554 + 1555 + static void rockchip_irq_resume(struct irq_data *d) 1556 + { 1557 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1558 + struct rockchip_pin_bank *bank = gc->private; 1559 + 1560 + irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN); 1561 + } 1562 + 1563 + static void rockchip_irq_disable(struct irq_data *d) 1564 + { 1565 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1566 + u32 val; 1567 + 1568 + irq_gc_lock(gc); 1569 + 1570 + val = irq_reg_readl(gc, GPIO_INTEN); 1571 + val &= ~d->mask; 1572 + irq_reg_writel(gc, val, GPIO_INTEN); 1573 + 1574 + irq_gc_unlock(gc); 1575 + } 1576 + 1577 + static void rockchip_irq_enable(struct irq_data *d) 1578 + { 1579 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1580 + u32 val; 1581 + 1582 + irq_gc_lock(gc); 1583 + 1584 + val = irq_reg_readl(gc, GPIO_INTEN); 1585 + val |= d->mask; 1586 + irq_reg_writel(gc, val, GPIO_INTEN); 1587 + 1588 + irq_gc_unlock(gc); 1589 + } 1590 + 1548 1591 static int rockchip_interrupts_register(struct platform_device *pdev, 1549 1592 struct rockchip_pinctrl *info) 1550 1593 { ··· 1628 1581 gc = irq_get_domain_generic_chip(bank->domain, 0); 1629 1582 gc->reg_base = bank->reg_base; 1630 1583 gc->private = bank; 1631 - gc->chip_types[0].regs.mask = GPIO_INTEN; 1584 + gc->chip_types[0].regs.mask = GPIO_INTMASK; 1632 1585 gc->chip_types[0].regs.ack = GPIO_PORTS_EOI; 1633 1586 gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; 1634 - gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 1635 - gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 1587 + gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; 1588 + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; 1589 + gc->chip_types[0].chip.irq_enable = rockchip_irq_enable; 1590 + gc->chip_types[0].chip.irq_disable = rockchip_irq_disable; 1636 1591 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; 1592 + gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; 1593 + gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; 1637 1594 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; 1638 1595 gc->wake_enabled = IRQ_MSK(bank->nr_pins); 1639 1596
+4 -1
drivers/pinctrl/pinctrl-st.c
··· 1012 1012 struct seq_file *s, unsigned pin_id) 1013 1013 { 1014 1014 unsigned long config; 1015 - st_pinconf_get(pctldev, pin_id, &config); 1016 1015 1016 + mutex_unlock(&pctldev->mutex); 1017 + st_pinconf_get(pctldev, pin_id, &config); 1018 + mutex_lock(&pctldev->mutex); 1017 1019 seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n" 1018 1020 "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld," 1019 1021 "de:%ld,rt-clk:%ld,rt-delay:%ld]", ··· 1445 1443 1446 1444 static struct irq_chip st_gpio_irqchip = { 1447 1445 .name = "GPIO", 1446 + .irq_disable = st_gpio_irq_mask, 1448 1447 .irq_mask = st_gpio_irq_mask, 1449 1448 .irq_unmask = st_gpio_irq_unmask, 1450 1449 .irq_set_type = st_gpio_irq_set_type,
+7 -3
drivers/s390/crypto/ap_bus.c
··· 1163 1163 */ 1164 1164 static inline int ap_test_config_domain(unsigned int domain) 1165 1165 { 1166 - if (!ap_configuration) 1167 - return 1; 1168 - return ap_test_config(ap_configuration->aqm, domain); 1166 + if (!ap_configuration) /* QCI not supported */ 1167 + if (domain < 16) 1168 + return 1; /* then domains 0...15 are configured */ 1169 + else 1170 + return 0; 1171 + else 1172 + return ap_test_config(ap_configuration->aqm, domain); 1169 1173 } 1170 1174 1171 1175 /**
+3 -1
drivers/scsi/qla2xxx/qla_os.c
··· 734 734 * Return target busy if we've received a non-zero retry_delay_timer 735 735 * in a FCP_RSP. 736 736 */ 737 - if (time_after(jiffies, fcport->retry_delay_timestamp)) 737 + if (fcport->retry_delay_timestamp == 0) { 738 + /* retry delay not set */ 739 + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 738 740 fcport->retry_delay_timestamp = 0; 739 741 else 740 742 goto qc24_target_busy;
+6 -6
drivers/target/iscsi/iscsi_target.c
··· 2027 2027 goto reject; 2028 2028 } 2029 2029 if (!strncmp("=All", text_ptr, 4)) { 2030 - cmd->cmd_flags |= IFC_SENDTARGETS_ALL; 2030 + cmd->cmd_flags |= ICF_SENDTARGETS_ALL; 2031 2031 } else if (!strncmp("=iqn.", text_ptr, 5) || 2032 2032 !strncmp("=eui.", text_ptr, 5)) { 2033 - cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE; 2033 + cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; 2034 2034 } else { 2035 2035 pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); 2036 2036 goto reject; ··· 3415 3415 return -ENOMEM; 3416 3416 } 3417 3417 /* 3418 - * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE 3418 + * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE 3419 3419 * explicit case.. 3420 3420 */ 3421 - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) { 3421 + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { 3422 3422 text_ptr = strchr(text_in, '='); 3423 3423 if (!text_ptr) { 3424 3424 pr_err("Unable to locate '=' string in text_in:" ··· 3434 3434 3435 3435 spin_lock(&tiqn_lock); 3436 3436 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3437 - if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) && 3437 + if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && 3438 3438 strcmp(tiqn->tiqn, text_ptr)) { 3439 3439 continue; 3440 3440 } ··· 3512 3512 if (end_of_buf) 3513 3513 break; 3514 3514 3515 - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) 3515 + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) 3516 3516 break; 3517 3517 } 3518 3518 spin_unlock(&tiqn_lock);
+2 -2
drivers/target/iscsi/iscsi_target_core.h
··· 135 135 ICF_CONTIG_MEMORY = 0x00000020, 136 136 ICF_ATTACHED_TO_RQUEUE = 0x00000040, 137 137 ICF_OOO_CMDSN = 0x00000080, 138 - IFC_SENDTARGETS_ALL = 0x00000100, 139 - IFC_SENDTARGETS_SINGLE = 0x00000200, 138 + ICF_SENDTARGETS_ALL = 0x00000100, 139 + ICF_SENDTARGETS_SINGLE = 0x00000200, 140 140 }; 141 141 142 142 /* struct iscsi_cmd->i_state */
+4 -50
drivers/target/target_core_device.c
··· 1103 1103 } 1104 1104 EXPORT_SYMBOL(se_dev_set_queue_depth); 1105 1105 1106 - int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1107 - { 1108 - int block_size = dev->dev_attrib.block_size; 1109 - 1110 - if (dev->export_count) { 1111 - pr_err("dev[%p]: Unable to change SE Device" 1112 - " fabric_max_sectors while export_count is %d\n", 1113 - dev, dev->export_count); 1114 - return -EINVAL; 1115 - } 1116 - if (!fabric_max_sectors) { 1117 - pr_err("dev[%p]: Illegal ZERO value for" 1118 - " fabric_max_sectors\n", dev); 1119 - return -EINVAL; 1120 - } 1121 - if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1122 - pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" 1123 - " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, 1124 - DA_STATUS_MAX_SECTORS_MIN); 1125 - return -EINVAL; 1126 - } 1127 - if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1128 - pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1129 - " greater than DA_STATUS_MAX_SECTORS_MAX:" 1130 - " %u\n", dev, fabric_max_sectors, 1131 - DA_STATUS_MAX_SECTORS_MAX); 1132 - return -EINVAL; 1133 - } 1134 - /* 1135 - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1136 - */ 1137 - if (!block_size) { 1138 - block_size = 512; 1139 - pr_warn("Defaulting to 512 for zero block_size\n"); 1140 - } 1141 - fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 1142 - block_size); 1143 - 1144 - dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 1145 - pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1146 - dev, fabric_max_sectors); 1147 - return 0; 1148 - } 1149 - EXPORT_SYMBOL(se_dev_set_fabric_max_sectors); 1150 - 1151 1106 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1152 1107 { 1153 1108 if (dev->export_count) { ··· 1111 1156 dev, dev->export_count); 1112 1157 return -EINVAL; 1113 1158 } 1114 - if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1159 + if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { 1115 1160 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1116 - " greater than fabric_max_sectors: %u\n", dev, 1117 - optimal_sectors, dev->dev_attrib.fabric_max_sectors); 1161 + " greater than hw_max_sectors: %u\n", dev, 1162 + optimal_sectors, dev->dev_attrib.hw_max_sectors); 1118 1163 return -EINVAL; 1119 1164 } 1120 1165 ··· 1508 1553 dev->dev_attrib.unmap_granularity_alignment = 1509 1554 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1510 1555 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 1511 - dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1512 - dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 1513 1556 1514 1557 xcopy_lun = &dev->xcopy_lun; 1515 1558 xcopy_lun->lun_se_dev = dev; ··· 1548 1595 dev->dev_attrib.hw_max_sectors = 1549 1596 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1550 1597 dev->dev_attrib.hw_block_size); 1598 + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 1551 1599 1552 1600 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1553 1601 dev->creation_time = get_jiffies_64();
+10 -2
drivers/target/target_core_file.c
··· 621 621 struct fd_prot fd_prot; 622 622 sense_reason_t rc; 623 623 int ret = 0; 624 - 624 + /* 625 + * We are currently limited by the number of iovecs (2048) per 626 + * single vfs_[writev,readv] call. 627 + */ 628 + if (cmd->data_length > FD_MAX_BYTES) { 629 + pr_err("FILEIO: Not able to process I/O of %u bytes due to" 630 + "FD_MAX_BYTES: %u iovec count limitiation\n", 631 + cmd->data_length, FD_MAX_BYTES); 632 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 633 + } 625 634 /* 626 635 * Call vectorized fileio functions to map struct scatterlist 627 636 * physical memory addresses to struct iovec virtual memory. ··· 968 959 &fileio_dev_attrib_hw_block_size.attr, 969 960 &fileio_dev_attrib_block_size.attr, 970 961 &fileio_dev_attrib_hw_max_sectors.attr, 971 - &fileio_dev_attrib_fabric_max_sectors.attr, 972 962 &fileio_dev_attrib_optimal_sectors.attr, 973 963 &fileio_dev_attrib_hw_queue_depth.attr, 974 964 &fileio_dev_attrib_queue_depth.attr,
+1 -2
drivers/target/target_core_iblock.c
··· 124 124 q = bdev_get_queue(bd); 125 125 126 126 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 127 - dev->dev_attrib.hw_max_sectors = UINT_MAX; 127 + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 128 128 dev->dev_attrib.hw_queue_depth = q->nr_requests; 129 129 130 130 /* ··· 883 883 &iblock_dev_attrib_hw_block_size.attr, 884 884 &iblock_dev_attrib_block_size.attr, 885 885 &iblock_dev_attrib_hw_max_sectors.attr, 886 - &iblock_dev_attrib_fabric_max_sectors.attr, 887 886 &iblock_dev_attrib_optimal_sectors.attr, 888 887 &iblock_dev_attrib_hw_queue_depth.attr, 889 888 &iblock_dev_attrib_queue_depth.attr,
+12
drivers/target/target_core_pr.c
··· 528 528 529 529 return 0; 530 530 } 531 + } else if (we && registered_nexus) { 532 + /* 533 + * Reads are allowed for Write Exclusive locks 534 + * from all registrants. 535 + */ 536 + if (cmd->data_direction == DMA_FROM_DEVICE) { 537 + pr_debug("Allowing READ CDB: 0x%02x for %s" 538 + " reservation\n", cdb[0], 539 + core_scsi3_pr_dump_type(pr_reg_type)); 540 + 541 + return 0; 542 + } 531 543 } 532 544 pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" 533 545 " for %s reservation\n", transport_dump_cmd_direction(cmd),
-1
drivers/target/target_core_rd.c
··· 657 657 &rd_mcp_dev_attrib_hw_block_size.attr, 658 658 &rd_mcp_dev_attrib_block_size.attr, 659 659 &rd_mcp_dev_attrib_hw_max_sectors.attr, 660 - &rd_mcp_dev_attrib_fabric_max_sectors.attr, 661 660 &rd_mcp_dev_attrib_optimal_sectors.attr, 662 661 &rd_mcp_dev_attrib_hw_queue_depth.attr, 663 662 &rd_mcp_dev_attrib_queue_depth.attr,
-15
drivers/target/target_core_sbc.c
··· 953 953 954 954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 955 955 unsigned long long end_lba; 956 - 957 - if (sectors > dev->dev_attrib.fabric_max_sectors) { 958 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 959 - " big sectors %u exceeds fabric_max_sectors:" 960 - " %u\n", cdb[0], sectors, 961 - dev->dev_attrib.fabric_max_sectors); 962 - return TCM_INVALID_CDB_FIELD; 963 - } 964 - if (sectors > dev->dev_attrib.hw_max_sectors) { 965 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 966 - " big sectors %u exceeds backend hw_max_sectors:" 967 - " %u\n", cdb[0], sectors, 968 - dev->dev_attrib.hw_max_sectors); 969 - return TCM_INVALID_CDB_FIELD; 970 - } 971 956 check_lba: 972 957 end_lba = dev->transport->get_blocks(dev) + 1; 973 958 if (cmd->t_task_lba + sectors > end_lba) {
+1 -4
drivers/target/target_core_spc.c
··· 505 505 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 506 506 { 507 507 struct se_device *dev = cmd->se_dev; 508 - u32 max_sectors; 509 508 int have_tp = 0; 510 509 int opt, min; 511 510 ··· 538 539 /* 539 540 * Set MAXIMUM TRANSFER LENGTH 540 541 */ 541 - max_sectors = min(dev->dev_attrib.fabric_max_sectors, 542 - dev->dev_attrib.hw_max_sectors); 543 - put_unaligned_be32(max_sectors, &buf[8]); 542 + put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); 544 543 545 544 /* 546 545 * Set OPTIMAL TRANSFER LENGTH
-1
drivers/target/target_core_user.c
··· 1118 1118 &tcmu_dev_attrib_hw_block_size.attr, 1119 1119 &tcmu_dev_attrib_block_size.attr, 1120 1120 &tcmu_dev_attrib_hw_max_sectors.attr, 1121 - &tcmu_dev_attrib_fabric_max_sectors.attr, 1122 1121 &tcmu_dev_attrib_optimal_sectors.attr, 1123 1122 &tcmu_dev_attrib_hw_queue_depth.attr, 1124 1123 &tcmu_dev_attrib_queue_depth.attr,
+2
drivers/thermal/imx_thermal.c
··· 608 608 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); 609 609 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); 610 610 data->mode = THERMAL_DEVICE_DISABLED; 611 + clk_disable_unprepare(data->thermal_clk); 611 612 612 613 return 0; 613 614 } ··· 618 617 struct imx_thermal_data *data = dev_get_drvdata(dev); 619 618 struct regmap *map = data->tempmon; 620 619 620 + clk_prepare_enable(data->thermal_clk); 621 621 /* Enabled thermal sensor after resume */ 622 622 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); 623 623 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+4 -12
drivers/thermal/int340x_thermal/acpi_thermal_rel.c
··· 119 119 continue; 120 120 121 121 result = acpi_bus_get_device(trt->source, &adev); 122 - if (!result) 123 - acpi_create_platform_device(adev); 124 - else 122 + if (result) 125 123 pr_warn("Failed to get source ACPI device\n"); 126 124 127 125 result = acpi_bus_get_device(trt->target, &adev); 128 - if (!result) 129 - acpi_create_platform_device(adev); 130 - else 126 + if (result) 131 127 pr_warn("Failed to get target ACPI device\n"); 132 128 } 133 129 ··· 202 206 203 207 if (art->source) { 204 208 result = acpi_bus_get_device(art->source, &adev); 205 - if (!result) 206 - acpi_create_platform_device(adev); 207 - else 209 + if (result) 208 210 pr_warn("Failed to get source ACPI device\n"); 209 211 } 210 212 if (art->target) { 211 213 result = acpi_bus_get_device(art->target, &adev); 212 - if (!result) 213 - acpi_create_platform_device(adev); 214 - else 214 + if (result) 215 215 pr_warn("Failed to get source ACPI device\n"); 216 216 } 217 217 }
+2
drivers/thermal/int340x_thermal/processor_thermal_device.c
··· 130 130 int ret; 131 131 132 132 adev = ACPI_COMPANION(dev); 133 + if (!adev) 134 + return -ENODEV; 133 135 134 136 status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); 135 137 if (ACPI_FAILURE(status))
+1 -1
drivers/thermal/of-thermal.c
··· 149 149 * 150 150 * Return: pointer to trip points table, NULL otherwise 151 151 */ 152 - const struct thermal_trip * const 152 + const struct thermal_trip * 153 153 of_thermal_get_trip_points(struct thermal_zone_device *tz) 154 154 { 155 155 struct __thermal_zone *data = tz->devdata;
+11 -6
drivers/thermal/rcar_thermal.c
··· 63 63 struct mutex lock; 64 64 struct list_head list; 65 65 int id; 66 - int ctemp; 66 + u32 ctemp; 67 67 }; 68 68 69 69 #define rcar_thermal_for_each_priv(pos, common) \ ··· 145 145 { 146 146 struct device *dev = rcar_priv_to_dev(priv); 147 147 int i; 148 - int ctemp, old, new; 148 + u32 ctemp, old, new; 149 149 int ret = -EINVAL; 150 150 151 151 mutex_lock(&priv->lock); ··· 372 372 int i; 373 373 int ret = -ENODEV; 374 374 int idle = IDLE_INTERVAL; 375 + u32 enr_bits = 0; 375 376 376 377 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); 377 378 if (!common) ··· 391 390 392 391 /* 393 392 * platform has IRQ support. 394 - * Then, drier use common register 393 + * Then, driver uses common registers 395 394 */ 396 395 397 396 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, ··· 408 407 common->base = devm_ioremap_resource(dev, res); 409 408 if (IS_ERR(common->base)) 410 409 return PTR_ERR(common->base); 411 - 412 - /* enable temperature comparation */ 413 - rcar_thermal_common_write(common, ENR, 0x00030303); 414 410 415 411 idle = 0; /* polling delay is not needed */ 416 412 } ··· 450 452 rcar_thermal_irq_enable(priv); 451 453 452 454 list_move_tail(&priv->list, &common->head); 455 + 456 + /* update ENR bits */ 457 + enr_bits |= 3 << (i * 8); 453 458 } 459 + 460 + /* enable temperature comparation */ 461 + if (irq) 462 + rcar_thermal_common_write(common, ENR, enr_bits); 454 463 455 464 platform_set_drvdata(pdev, common); 456 465
+2 -2
drivers/thermal/thermal_core.h
··· 91 91 void of_thermal_destroy_zones(void); 92 92 int of_thermal_get_ntrips(struct thermal_zone_device *); 93 93 bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); 94 - const struct thermal_trip * const 94 + const struct thermal_trip * 95 95 of_thermal_get_trip_points(struct thermal_zone_device *); 96 96 #else 97 97 static inline int of_parse_thermal_zones(void) { return 0; } ··· 105 105 { 106 106 return 0; 107 107 } 108 - static inline const struct thermal_trip * const 108 + static inline const struct thermal_trip * 109 109 of_thermal_get_trip_points(struct thermal_zone_device *tz) 110 110 { 111 111 return NULL;
+1 -3
drivers/vfio/pci/vfio_pci.c
··· 840 840 841 841 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 842 842 { 843 - u8 type; 844 843 struct vfio_pci_device *vdev; 845 844 struct iommu_group *group; 846 845 int ret; 847 846 848 - pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type); 849 - if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) 847 + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) 850 848 return -EINVAL; 851 849 852 850 group = iommu_group_get(&pdev->dev);
+1 -1
drivers/vhost/net.c
··· 538 538 ++headcount; 539 539 seg += in; 540 540 } 541 - heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen); 541 + heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); 542 542 *iovcount = seg; 543 543 if (unlikely(log)) 544 544 *log_num = nlogs;
+21 -3
drivers/vhost/scsi.c
··· 911 911 return 0; 912 912 } 913 913 914 + static int vhost_scsi_to_tcm_attr(int attr) 915 + { 916 + switch (attr) { 917 + case VIRTIO_SCSI_S_SIMPLE: 918 + return TCM_SIMPLE_TAG; 919 + case VIRTIO_SCSI_S_ORDERED: 920 + return TCM_ORDERED_TAG; 921 + case VIRTIO_SCSI_S_HEAD: 922 + return TCM_HEAD_TAG; 923 + case VIRTIO_SCSI_S_ACA: 924 + return TCM_ACA_TAG; 925 + default: 926 + break; 927 + } 928 + return TCM_SIMPLE_TAG; 929 + } 930 + 914 931 static void tcm_vhost_submission_work(struct work_struct *work) 915 932 { 916 933 struct tcm_vhost_cmd *cmd = ··· 953 936 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, 954 937 cmd->tvc_cdb, &cmd->tvc_sense_buf[0], 955 938 cmd->tvc_lun, cmd->tvc_exp_data_len, 956 - cmd->tvc_task_attr, cmd->tvc_data_direction, 957 - TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 958 - NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); 939 + vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), 940 + cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, 941 + sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, 942 + cmd->tvc_prot_sgl_count); 959 943 if (rc < 0) { 960 944 transport_send_check_condition_and_sense(se_cmd, 961 945 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+5 -3
drivers/video/fbdev/broadsheetfb.c
··· 636 636 err = broadsheet_spiflash_read_range(par, start_sector_addr, 637 637 data_start_addr, sector_buffer); 638 638 if (err) 639 - return err; 639 + goto out; 640 640 } 641 641 642 642 /* now we copy our data into the right place in the sector buffer */ ··· 657 657 err = broadsheet_spiflash_read_range(par, tail_start_addr, 658 658 tail_len, sector_buffer + tail_start_addr); 659 659 if (err) 660 - return err; 660 + goto out; 661 661 } 662 662 663 663 /* if we got here we have the full sector that we want to rewrite. */ ··· 665 665 /* first erase the sector */ 666 666 err = broadsheet_spiflash_erase_sector(par, start_sector_addr); 667 667 if (err) 668 - return err; 668 + goto out; 669 669 670 670 /* now write it */ 671 671 err = broadsheet_spiflash_write_sector(par, start_sector_addr, 672 672 sector_buffer, sector_size); 673 + out: 674 + kfree(sector_buffer); 673 675 return err; 674 676 } 675 677
+1 -1
drivers/video/fbdev/simplefb.c
··· 402 402 if (ret) 403 403 return ret; 404 404 405 - if (IS_ENABLED(CONFIG_OF) && of_chosen) { 405 + if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) { 406 406 for_each_child_of_node(of_chosen, np) { 407 407 if (of_device_is_compatible(np, "simple-framebuffer")) 408 408 of_platform_device_create(np, NULL, NULL);
+1 -9
drivers/virtio/virtio_pci_common.c
··· 282 282 283 283 vp_free_vectors(vdev); 284 284 kfree(vp_dev->vqs); 285 + vp_dev->vqs = NULL; 285 286 } 286 287 287 288 static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, ··· 420 419 } 421 420 } 422 421 return 0; 423 - } 424 - 425 - void virtio_pci_release_dev(struct device *_d) 426 - { 427 - /* 428 - * No need for a release method as we allocate/free 429 - * all devices together with the pci devices. 430 - * Provide an empty one to avoid getting a warning from core. 431 - */ 432 422 } 433 423 434 424 #ifdef CONFIG_PM_SLEEP
-1
drivers/virtio/virtio_pci_common.h
··· 126 126 * - ignore the affinity request if we're using INTX 127 127 */ 128 128 int vp_set_vq_affinity(struct virtqueue *vq, int cpu); 129 - void virtio_pci_release_dev(struct device *); 130 129 131 130 int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 132 131 const struct pci_device_id *id);
+11 -1
drivers/virtio/virtio_pci_legacy.c
··· 211 211 .set_vq_affinity = vp_set_vq_affinity, 212 212 }; 213 213 214 + static void virtio_pci_release_dev(struct device *_d) 215 + { 216 + struct virtio_device *vdev = dev_to_virtio(_d); 217 + struct virtio_pci_device *vp_dev = to_vp_device(vdev); 218 + 219 + /* As struct device is a kobject, it's not safe to 220 + * free the memory (including the reference counter itself) 221 + * until it's release callback. */ 222 + kfree(vp_dev); 223 + } 224 + 214 225 /* the PCI probing function */ 215 226 int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 216 227 const struct pci_device_id *id) ··· 313 302 pci_iounmap(pci_dev, vp_dev->ioaddr); 314 303 pci_release_regions(pci_dev); 315 304 pci_disable_device(pci_dev); 316 - kfree(vp_dev); 317 305 }
+10 -3
fs/btrfs/backref.c
··· 1552 1552 { 1553 1553 int ret; 1554 1554 int type; 1555 - struct btrfs_tree_block_info *info; 1556 1555 struct btrfs_extent_inline_ref *eiref; 1557 1556 1558 1557 if (*ptr == (unsigned long)-1) ··· 1572 1573 } 1573 1574 1574 1575 /* we can treat both ref types equally here */ 1575 - info = (struct btrfs_tree_block_info *)(ei + 1); 1576 1576 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1577 - *out_level = btrfs_tree_block_level(eb, info); 1577 + 1578 + if (key->type == BTRFS_EXTENT_ITEM_KEY) { 1579 + struct btrfs_tree_block_info *info; 1580 + 1581 + info = (struct btrfs_tree_block_info *)(ei + 1); 1582 + *out_level = btrfs_tree_block_level(eb, info); 1583 + } else { 1584 + ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); 1585 + *out_level = (u8)key->offset; 1586 + } 1578 1587 1579 1588 if (ret == 1) 1580 1589 *ptr = (unsigned long)-1;
+8
fs/btrfs/delayed-inode.c
··· 1857 1857 { 1858 1858 struct btrfs_delayed_node *delayed_node; 1859 1859 1860 + /* 1861 + * we don't do delayed inode updates during log recovery because it 1862 + * leads to enospc problems. This means we also can't do 1863 + * delayed inode refs 1864 + */ 1865 + if (BTRFS_I(inode)->root->fs_info->log_root_recovering) 1866 + return -EAGAIN; 1867 + 1860 1868 delayed_node = btrfs_get_or_create_delayed_node(inode); 1861 1869 if (IS_ERR(delayed_node)) 1862 1870 return PTR_ERR(delayed_node);
+6 -6
fs/btrfs/extent-tree.c
··· 3139 3139 struct extent_buffer *leaf; 3140 3140 3141 3141 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 3142 - if (ret < 0) 3142 + if (ret) { 3143 + if (ret > 0) 3144 + ret = -ENOENT; 3143 3145 goto fail; 3144 - BUG_ON(ret); /* Corruption */ 3146 + } 3145 3147 3146 3148 leaf = path->nodes[0]; 3147 3149 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); ··· 3151 3149 btrfs_mark_buffer_dirty(leaf); 3152 3150 btrfs_release_path(path); 3153 3151 fail: 3154 - if (ret) { 3152 + if (ret) 3155 3153 btrfs_abort_transaction(trans, root, ret); 3156 - return ret; 3157 - } 3158 - return 0; 3154 + return ret; 3159 3155 3160 3156 } 3161 3157
+3 -1
fs/btrfs/inode.c
··· 6255 6255 6256 6256 out_fail: 6257 6257 btrfs_end_transaction(trans, root); 6258 - if (drop_on_err) 6258 + if (drop_on_err) { 6259 + inode_dec_link_count(inode); 6259 6260 iput(inode); 6261 + } 6260 6262 btrfs_balance_delayed_items(root); 6261 6263 btrfs_btree_balance_dirty(root); 6262 6264 return err;
+1 -1
fs/btrfs/scrub.c
··· 2607 2607 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, 2608 2608 flags, gen, mirror_num, 2609 2609 have_csum ? csum : NULL); 2610 - skip: 2611 2610 if (ret) 2612 2611 return ret; 2612 + skip: 2613 2613 len -= l; 2614 2614 logical += l; 2615 2615 physical += l;
+1 -1
fs/ceph/addr.c
··· 1416 1416 } 1417 1417 } 1418 1418 1419 - dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n", 1419 + dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1420 1420 inode, ceph_vinop(inode), len, locked_page); 1421 1421 1422 1422 if (len > 0) {
+32 -32
fs/ceph/locks.c
··· 239 239 return err; 240 240 } 241 241 242 - /** 243 - * Must be called with lock_flocks() already held. Fills in the passed 244 - * counter variables, so you can prepare pagelist metadata before calling 245 - * ceph_encode_locks. 242 + /* 243 + * Fills in the passed counter variables, so you can prepare pagelist metadata 244 + * before calling ceph_encode_locks. 246 245 */ 247 246 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) 248 247 { 249 - struct file_lock *lock; 248 + struct file_lock_context *ctx; 250 249 251 250 *fcntl_count = 0; 252 251 *flock_count = 0; 253 252 254 - for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 255 - if (lock->fl_flags & FL_POSIX) 256 - ++(*fcntl_count); 257 - else if (lock->fl_flags & FL_FLOCK) 258 - ++(*flock_count); 253 + ctx = inode->i_flctx; 254 + if (ctx) { 255 + *fcntl_count = ctx->flc_posix_cnt; 256 + *flock_count = ctx->flc_flock_cnt; 259 257 } 260 258 dout("counted %d flock locks and %d fcntl locks", 261 259 *flock_count, *fcntl_count); ··· 269 271 int num_fcntl_locks, int num_flock_locks) 270 272 { 271 273 struct file_lock *lock; 274 + struct file_lock_context *ctx = inode->i_flctx; 272 275 int err = 0; 273 276 int seen_fcntl = 0; 274 277 int seen_flock = 0; ··· 278 279 dout("encoding %d flock and %d fcntl locks", num_flock_locks, 279 280 num_fcntl_locks); 280 281 281 - for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 282 - if (lock->fl_flags & FL_POSIX) { 283 - ++seen_fcntl; 284 - if (seen_fcntl > num_fcntl_locks) { 285 - err = -ENOSPC; 286 - goto fail; 287 - } 288 - err = lock_to_ceph_filelock(lock, &flocks[l]); 289 - if (err) 290 - goto fail; 291 - ++l; 282 + if (!ctx) 283 + return 0; 284 + 285 + spin_lock(&ctx->flc_lock); 286 + list_for_each_entry(lock, &ctx->flc_flock, fl_list) { 287 + ++seen_fcntl; 288 + if (seen_fcntl > num_fcntl_locks) { 289 + err = -ENOSPC; 290 + goto fail; 292 291 } 292 + err = lock_to_ceph_filelock(lock, &flocks[l]); 293 + if (err) 294 + goto fail; 295 + ++l; 293 296 } 294 - for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 295 - if (lock->fl_flags & FL_FLOCK) { 296 - ++seen_flock; 297 - if (seen_flock > num_flock_locks) { 298 - err = -ENOSPC; 299 - goto fail; 300 - } 301 - err = lock_to_ceph_filelock(lock, &flocks[l]); 302 - if (err) 303 - goto fail; 304 - ++l; 297 + list_for_each_entry(lock, &ctx->flc_flock, fl_list) { 298 + ++seen_flock; 299 + if (seen_flock > num_flock_locks) { 300 + err = -ENOSPC; 301 + goto fail; 305 302 } 303 + err = lock_to_ceph_filelock(lock, &flocks[l]); 304 + if (err) 305 + goto fail; 306 + ++l; 306 307 } 307 308 fail: 309 + spin_unlock(&ctx->flc_lock); 308 310 return err; 309 311 } 310 312
-4
fs/ceph/mds_client.c
··· 2700 2700 struct ceph_filelock *flocks; 2701 2701 2702 2702 encode_again: 2703 - spin_lock(&inode->i_lock); 2704 2703 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); 2705 - spin_unlock(&inode->i_lock); 2706 2704 flocks = kmalloc((num_fcntl_locks+num_flock_locks) * 2707 2705 sizeof(struct ceph_filelock), GFP_NOFS); 2708 2706 if (!flocks) { 2709 2707 err = -ENOMEM; 2710 2708 goto out_free; 2711 2709 } 2712 - spin_lock(&inode->i_lock); 2713 2710 err = ceph_encode_locks_to_buffer(inode, flocks, 2714 2711 num_fcntl_locks, 2715 2712 num_flock_locks); 2716 - spin_unlock(&inode->i_lock); 2717 2713 if (err) { 2718 2714 kfree(flocks); 2719 2715 if (err == -ENOSPC)
+11 -23
fs/cifs/file.c
··· 1109 1109 return rc; 1110 1110 } 1111 1111 1112 - /* copied from fs/locks.c with a name change */ 1113 - #define cifs_for_each_lock(inode, lockp) \ 1114 - for (lockp = &inode->i_flock; *lockp != NULL; \ 1115 - lockp = &(*lockp)->fl_next) 1116 - 1117 1112 struct lock_to_push { 1118 1113 struct list_head llist; 1119 1114 __u64 offset; ··· 1123 1128 { 1124 1129 struct inode *inode = cfile->dentry->d_inode; 1125 1130 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1126 - struct file_lock *flock, **before; 1127 - unsigned int count = 0, i = 0; 1131 + struct file_lock *flock; 1132 + struct file_lock_context *flctx = inode->i_flctx; 1133 + unsigned int i; 1128 1134 int rc = 0, xid, type; 1129 1135 struct list_head locks_to_send, *el; 1130 1136 struct lock_to_push *lck, *tmp; ··· 1133 1137 1134 1138 xid = get_xid(); 1135 1139 1136 - spin_lock(&inode->i_lock); 1137 - cifs_for_each_lock(inode, before) { 1138 - if ((*before)->fl_flags & FL_POSIX) 1139 - count++; 1140 - } 1141 - spin_unlock(&inode->i_lock); 1140 + if (!flctx) 1141 + goto out; 1142 1142 1143 1143 INIT_LIST_HEAD(&locks_to_send); 1144 1144 1145 1145 /* 1146 - * Allocating count locks is enough because no FL_POSIX locks can be 1147 - * added to the list while we are holding cinode->lock_sem that 1146 + * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks 1147 + * can be added to the list while we are holding cinode->lock_sem that 1148 1148 * protects locking operations of this inode. 1149 1149 */ 1150 - for (; i < count; i++) { 1150 + for (i = 0; i < flctx->flc_posix_cnt; i++) { 1151 1151 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); 1152 1152 if (!lck) { 1153 1153 rc = -ENOMEM; ··· 1153 1161 } 1154 1162 1155 1163 el = locks_to_send.next; 1156 - spin_lock(&inode->i_lock); 1157 - cifs_for_each_lock(inode, before) { 1158 - flock = *before; 1159 - if ((flock->fl_flags & FL_POSIX) == 0) 1160 - continue; 1164 + spin_lock(&flctx->flc_lock); 1165 + list_for_each_entry(flock, &flctx->flc_posix, fl_list) { 1161 1166 if (el == &locks_to_send) { 1162 1167 /* 1163 1168 * The list ended. We don't have enough allocated ··· 1174 1185 lck->length = length; 1175 1186 lck->type = type; 1176 1187 lck->offset = flock->fl_start; 1177 - el = el->next; 1178 1188 } 1179 - spin_unlock(&inode->i_lock); 1189 + spin_unlock(&flctx->flc_lock); 1180 1190 1181 1191 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1182 1192 int stored_rc;
+2 -2
fs/ext4/extents.c
··· 5166 5166 5167 5167 /* fallback to generic here if not in extents fmt */ 5168 5168 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5169 - return __generic_block_fiemap(inode, fieinfo, start, len, 5170 - ext4_get_block); 5169 + return generic_block_fiemap(inode, fieinfo, start, len, 5170 + ext4_get_block); 5171 5171 5172 5172 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 5173 5173 return -EBADR;
+116 -108
fs/ext4/file.c
··· 273 273 * we determine this extent as a data or a hole according to whether the 274 274 * page cache has data or not. 275 275 */ 276 - static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, 277 - loff_t endoff, loff_t *offset) 276 + static int ext4_find_unwritten_pgoff(struct inode *inode, 277 + int whence, 278 + struct ext4_map_blocks *map, 279 + loff_t *offset) 278 280 { 279 281 struct pagevec pvec; 282 + unsigned int blkbits; 280 283 pgoff_t index; 281 284 pgoff_t end; 285 + loff_t endoff; 282 286 loff_t startoff; 283 287 loff_t lastoff; 284 288 int found = 0; 285 289 290 + blkbits = inode->i_sb->s_blocksize_bits; 286 291 startoff = *offset; 287 292 lastoff = startoff; 288 - 293 + endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; 289 294 290 295 index = startoff >> PAGE_CACHE_SHIFT; 291 296 end = endoff >> PAGE_CACHE_SHIFT; ··· 408 403 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 409 404 { 410 405 struct inode *inode = file->f_mapping->host; 411 - struct fiemap_extent_info fie; 412 - struct fiemap_extent ext[2]; 413 - loff_t next; 414 - int i, ret = 0; 406 + struct ext4_map_blocks map; 407 + struct extent_status es; 408 + ext4_lblk_t start, last, end; 409 + loff_t dataoff, isize; 410 + int blkbits; 411 + int ret = 0; 415 412 416 413 mutex_lock(&inode->i_mutex); 417 - if (offset >= inode->i_size) { 414 + 415 + isize = i_size_read(inode); 416 + if (offset >= isize) { 418 417 mutex_unlock(&inode->i_mutex); 419 418 return -ENXIO; 420 419 } 421 - fie.fi_flags = 0; 422 - fie.fi_extents_max = 2; 423 - fie.fi_extents_start = (struct fiemap_extent __user *) &ext; 424 - while (1) { 425 - mm_segment_t old_fs = get_fs(); 426 420 427 - fie.fi_extents_mapped = 0; 428 - memset(ext, 0, sizeof(*ext) * fie.fi_extents_max); 421 + blkbits = inode->i_sb->s_blocksize_bits; 422 + start = offset >> blkbits; 423 + last = start; 424 + end = isize >> blkbits; 425 + dataoff = offset; 429 426 430 - set_fs(get_ds()); 431 - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); 432 - set_fs(old_fs); 433 - if (ret) 434 - break; 435 - 436 - /* No extents found, EOF */ 437 - if (!fie.fi_extents_mapped) { 438 - ret = -ENXIO; 427 + do { 428 + map.m_lblk = last; 429 + map.m_len = end - last + 1; 430 + ret = ext4_map_blocks(NULL, inode, &map, 0); 431 + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 432 + if (last != start) 433 + dataoff = (loff_t)last << blkbits; 439 434 break; 440 435 } 441 - for (i = 0; i < fie.fi_extents_mapped; i++) { 442 - next = (loff_t)(ext[i].fe_length + ext[i].fe_logical); 443 436 444 - if (offset < (loff_t)ext[i].fe_logical) 445 - offset = (loff_t)ext[i].fe_logical; 446 - /* 447 - * If extent is not unwritten, then it contains valid 448 - * data, mapped or delayed. 449 - */ 450 - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) 451 - goto out; 452 - 453 - /* 454 - * If there is a unwritten extent at this offset, 455 - * it will be as a data or a hole according to page 456 - * cache that has data or not. 457 - */ 458 - if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, 459 - next, &offset)) 460 - goto out; 461 - 462 - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) { 463 - ret = -ENXIO; 464 - goto out; 465 - } 466 - offset = next; 437 + /* 438 + * If there is a delay extent at this offset, 439 + * it will be as a data. 440 + */ 441 + ext4_es_find_delayed_extent_range(inode, last, last, &es); 442 + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 443 + if (last != start) 444 + dataoff = (loff_t)last << blkbits; 445 + break; 467 446 } 468 - } 469 - if (offset > inode->i_size) 470 - offset = inode->i_size; 471 - out: 447 + 448 + /* 449 + * If there is a unwritten extent at this offset, 450 + * it will be as a data or a hole according to page 451 + * cache that has data or not. 452 + */ 453 + if (map.m_flags & EXT4_MAP_UNWRITTEN) { 454 + int unwritten; 455 + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, 456 + &map, &dataoff); 457 + if (unwritten) 458 + break; 459 + } 460 + 461 + last++; 462 + dataoff = (loff_t)last << blkbits; 463 + } while (last <= end); 464 + 472 465 mutex_unlock(&inode->i_mutex); 473 - if (ret) 474 - return ret; 475 466 476 - return vfs_setpos(file, offset, maxsize); 467 + if (dataoff > isize) 468 + return -ENXIO; 469 + 470 + return vfs_setpos(file, dataoff, maxsize); 477 471 } 478 472 479 473 /* 480 - * ext4_seek_hole() retrieves the offset for SEEK_HOLE 474 + * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 481 475 */ 482 476 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 483 477 { 484 478 struct inode *inode = file->f_mapping->host; 485 - struct fiemap_extent_info fie; 486 - struct fiemap_extent ext[2]; 487 - loff_t next; 488 - int i, ret = 0; 479 + struct ext4_map_blocks map; 480 + struct extent_status es; 481 + ext4_lblk_t start, last, end; 482 + loff_t holeoff, isize; 483 + int blkbits; 484 + int ret = 0; 489 485 490 486 mutex_lock(&inode->i_mutex); 491 - if (offset >= inode->i_size) { 487 + 488 + isize = i_size_read(inode); 489 + if (offset >= isize) { 492 490 mutex_unlock(&inode->i_mutex); 493 491 return -ENXIO; 494 492 } 495 493 496 - fie.fi_flags = 0; 497 - fie.fi_extents_max = 2; 498 - fie.fi_extents_start = (struct fiemap_extent __user *)&ext; 499 - while (1) { 500 - mm_segment_t old_fs = get_fs(); 494 + blkbits = inode->i_sb->s_blocksize_bits; 495 + start = offset >> blkbits; 496 + last = start; 497 + end = isize >> blkbits; 498 + holeoff = offset; 501 499 502 - fie.fi_extents_mapped = 0; 503 - memset(ext, 0, sizeof(*ext)); 500 + do { 501 + map.m_lblk = last; 502 + map.m_len = end - last + 1; 503 + ret = ext4_map_blocks(NULL, inode, &map, 0); 504 + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 505 + last += ret; 506 + holeoff = (loff_t)last << blkbits; 507 + continue; 508 + } 504 509 505 - set_fs(get_ds()); 506 - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); 507 - set_fs(old_fs); 508 - if (ret) 509 - break; 510 + /* 511 + * If there is a delay extent at this offset, 512 + * we will skip this extent. 513 + */ 514 + ext4_es_find_delayed_extent_range(inode, last, last, &es); 515 + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 516 + last = es.es_lblk + es.es_len; 517 + holeoff = (loff_t)last << blkbits; 518 + continue; 519 + } 510 520 511 - /* No extents found */ 512 - if (!fie.fi_extents_mapped) 513 - break; 514 - 515 - for (i = 0; i < fie.fi_extents_mapped; i++) { 516 - next = (loff_t)(ext[i].fe_logical + ext[i].fe_length); 517 - /* 518 - * If extent is not unwritten, then it contains valid 519 - * data, mapped or delayed. 520 - */ 521 - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) { 522 - if (offset < (loff_t)ext[i].fe_logical) 523 - goto out; 524 - offset = next; 521 + /* 522 + * If there is a unwritten extent at this offset, 523 + * it will be as a data or a hole according to page 524 + * cache that has data or not. 525 + */ 526 + if (map.m_flags & EXT4_MAP_UNWRITTEN) { 527 + int unwritten; 528 + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 529 + &map, &holeoff); 530 + if (!unwritten) { 531 + last += ret; 532 + holeoff = (loff_t)last << blkbits; 525 533 continue; 526 534 } 527 - /* 528 - * If there is a unwritten extent at this offset, 529 - * it will be as a data or a hole according to page 530 - * cache that has data or not. 531 - */ 532 - if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 533 - next, &offset)) 534 - goto out; 535 - 536 - offset = next; 537 - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) 538 - goto out; 539 535 } 540 - } 541 - if (offset > inode->i_size) 542 - offset = inode->i_size; 543 - out: 544 - mutex_unlock(&inode->i_mutex); 545 - if (ret) 546 - return ret; 547 536 548 - return vfs_setpos(file, offset, maxsize); 537 + /* find a hole */ 538 + break; 539 + } while (last <= end); 540 + 541 + mutex_unlock(&inode->i_mutex); 542 + 543 + if (holeoff > isize) 544 + holeoff = isize; 545 + 546 + return vfs_setpos(file, holeoff, maxsize); 549 547 } 550 548 551 549 /*
+12 -12
fs/ext4/resize.c
··· 24 24 return -EPERM; 25 25 26 26 /* 27 + * If we are not using the primary superblock/GDT copy don't resize, 28 + * because the user tools have no way of handling this. Probably a 29 + * bad time to do it anyways. 30 + */ 31 + if (EXT4_SB(sb)->s_sbh->b_blocknr != 32 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 33 + ext4_warning(sb, "won't resize using backup superblock at %llu", 34 + (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 35 + return -EPERM; 36 + } 37 + 38 + /* 27 39 * We are not allowed to do online-resizing on a filesystem mounted 28 40 * with error, because it can destroy the filesystem easily. 29 41 */ ··· 769 757 printk(KERN_DEBUG 770 758 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 771 759 gdb_num); 772 - 773 - /* 774 - * If we are not using the primary superblock/GDT copy don't resize, 775 - * because the user tools have no way of handling this. Probably a 776 - * bad time to do it anyways. 777 - */ 778 - if (EXT4_SB(sb)->s_sbh->b_blocknr != 779 - le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 780 - ext4_warning(sb, "won't resize using backup superblock at %llu", 781 - (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 782 - return -EPERM; 783 - } 784 760 785 761 gdb_bh = sb_bread(sb, gdblock); 786 762 if (!gdb_bh)
+1 -1
fs/ext4/super.c
··· 3482 3482 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3483 3483 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 3484 3484 EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 3485 - ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " 3485 + ext4_warning(sb, "metadata_csum and uninit_bg are " 3486 3486 "redundant flags; please run fsck."); 3487 3487 3488 3488 /* Check for a known checksum algorithm */
+3 -2
fs/fcntl.c
··· 740 740 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY 741 741 * is defined as O_NONBLOCK on some platforms and not on others. 742 742 */ 743 - BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 743 + BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 744 744 O_RDONLY | O_WRONLY | O_RDWR | 745 745 O_CREAT | O_EXCL | O_NOCTTY | 746 746 O_TRUNC | O_APPEND | /* O_NONBLOCK | */ 747 747 __O_SYNC | O_DSYNC | FASYNC | 748 748 O_DIRECT | O_LARGEFILE | O_DIRECTORY | 749 749 O_NOFOLLOW | O_NOATIME | O_CLOEXEC | 750 - __FMODE_EXEC | O_PATH | __O_TMPFILE 750 + __FMODE_EXEC | O_PATH | __O_TMPFILE | 751 + __FMODE_NONOTIFY 751 752 )); 752 753 753 754 fasync_cache = kmem_cache_create("fasync_cache",
+49 -2
fs/fuse/dev.c
··· 131 131 req->in.h.pid = current->pid; 132 132 } 133 133 134 + void fuse_set_initialized(struct fuse_conn *fc) 135 + { 136 + /* Make sure stores before this are seen on another CPU */ 137 + smp_wmb(); 138 + fc->initialized = 1; 139 + } 140 + 134 141 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) 135 142 { 136 143 return !fc->initialized || (for_background && fc->blocked); ··· 162 155 if (intr) 163 156 goto out; 164 157 } 158 + /* Matches smp_wmb() in fuse_set_initialized() */ 159 + smp_rmb(); 165 160 166 161 err = -ENOTCONN; 167 162 if (!fc->connected) ··· 262 253 263 254 atomic_inc(&fc->num_waiting); 264 255 wait_event(fc->blocked_waitq, fc->initialized); 256 + /* Matches smp_wmb() in fuse_set_initialized() */ 257 + smp_rmb(); 265 258 req = fuse_request_alloc(0); 266 259 if (!req) 267 260 req = get_reserved_req(fc, file); ··· 522 511 } 523 512 EXPORT_SYMBOL_GPL(fuse_request_send); 524 513 514 + static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) 515 + { 516 + if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) 517 + args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; 518 + 519 + if (fc->minor < 9) { 520 + switch (args->in.h.opcode) { 521 + case FUSE_LOOKUP: 522 + case FUSE_CREATE: 523 + case FUSE_MKNOD: 524 + case FUSE_MKDIR: 525 + case FUSE_SYMLINK: 526 + case FUSE_LINK: 527 + args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 528 + break; 529 + case FUSE_GETATTR: 530 + case FUSE_SETATTR: 531 + args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 532 + break; 533 + } 534 + } 535 + if (fc->minor < 12) { 536 + switch (args->in.h.opcode) { 537 + case FUSE_CREATE: 538 + args->in.args[0].size = sizeof(struct fuse_open_in); 539 + break; 540 + case FUSE_MKNOD: 541 + args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; 542 + break; 543 + } 544 + } 545 + } 546 + 525 547 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) 526 548 { 527 549 struct fuse_req *req; ··· 563 519 req = fuse_get_req(fc, 0); 564 520 if (IS_ERR(req)) 565 521 return PTR_ERR(req); 522 + 523 + /* Needs to be done after fuse_get_req() so that fc->minor is valid */ 524 + fuse_adjust_compat(fc, args); 566 525 567 526 req->in.h.opcode = args->in.h.opcode; 568 527 req->in.h.nodeid = args->in.h.nodeid; ··· 2174 2127 if (fc->connected) { 2175 2128 fc->connected = 0; 2176 2129 fc->blocked = 0; 2177 - fc->initialized = 1; 2130 + fuse_set_initialized(fc); 2178 2131 end_io_requests(fc); 2179 2132 end_queued_requests(fc); 2180 2133 end_polls(fc); ··· 2193 2146 spin_lock(&fc->lock); 2194 2147 fc->connected = 0; 2195 2148 fc->blocked = 0; 2196 - fc->initialized = 1; 2149 + fuse_set_initialized(fc); 2197 2150 end_queued_requests(fc); 2198 2151 end_polls(fc); 2199 2152 wake_up_all(&fc->blocked_waitq);
+7 -24
fs/fuse/dir.c
··· 156 156 args->in.args[0].size = name->len + 1; 157 157 args->in.args[0].value = name->name; 158 158 args->out.numargs = 1; 159 - if (fc->minor < 9) 160 - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 161 - else 162 - args->out.args[0].size = sizeof(struct fuse_entry_out); 159 + args->out.args[0].size = sizeof(struct fuse_entry_out); 163 160 args->out.args[0].value = outarg; 164 161 } 165 162 ··· 419 422 args.in.h.opcode = FUSE_CREATE; 420 423 args.in.h.nodeid = get_node_id(dir); 421 424 args.in.numargs = 2; 422 - args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : 423 - sizeof(inarg); 425 + args.in.args[0].size = sizeof(inarg); 424 426 args.in.args[0].value = &inarg; 425 427 args.in.args[1].size = entry->d_name.len + 1; 426 428 args.in.args[1].value = entry->d_name.name; 427 429 args.out.numargs = 2; 428 - if (fc->minor < 9) 429 - args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 430 - else 431 - args.out.args[0].size = sizeof(outentry); 430 + args.out.args[0].size = sizeof(outentry); 432 431 args.out.args[0].value = &outentry; 433 432 args.out.args[1].size = sizeof(outopen); 434 433 args.out.args[1].value = &outopen; ··· 532 539 memset(&outarg, 0, sizeof(outarg)); 533 540 args->in.h.nodeid = get_node_id(dir); 534 541 args->out.numargs = 1; 535 - if (fc->minor < 9) 536 - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 537 - else 538 - args->out.args[0].size = sizeof(outarg); 542 + args->out.args[0].size = sizeof(outarg); 539 543 args->out.args[0].value = &outarg; 540 544 err = fuse_simple_request(fc, args); 541 545 if (err) ··· 582 592 inarg.umask = current_umask(); 583 593 args.in.h.opcode = FUSE_MKNOD; 584 594 args.in.numargs = 2; 585 - args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : 586 - sizeof(inarg); 595 + args.in.args[0].size = sizeof(inarg); 587 596 args.in.args[0].value = &inarg; 588 597 args.in.args[1].size = entry->d_name.len + 1; 589 598 args.in.args[1].value = entry->d_name.name; ··· 888 899 args.in.args[0].size = sizeof(inarg); 889 900 args.in.args[0].value = &inarg; 890 901 args.out.numargs = 1; 891 - if (fc->minor < 9) 892 - args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 893 - else 894 - args.out.args[0].size = sizeof(outarg); 902 + args.out.args[0].size = sizeof(outarg); 895 903 args.out.args[0].value = &outarg; 896 904 err = fuse_simple_request(fc, &args); 897 905 if (!err) { ··· 1560 1574 args->in.args[0].size = sizeof(*inarg_p); 1561 1575 args->in.args[0].value = inarg_p; 1562 1576 args->out.numargs = 1; 1563 - if (fc->minor < 9) 1564 - args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 1565 - else 1566 - args->out.args[0].size = sizeof(*outarg_p); 1577 + args->out.args[0].size = sizeof(*outarg_p); 1567 1578 args->out.args[0].value = outarg_p; 1568 1579 } 1569 1580
+2
fs/fuse/fuse_i.h
··· 906 906 int fuse_do_setattr(struct inode *inode, struct iattr *attr, 907 907 struct file *file); 908 908 909 + void fuse_set_initialized(struct fuse_conn *fc); 910 + 909 911 #endif /* _FS_FUSE_I_H */
+2 -3
fs/fuse/inode.c
··· 424 424 args.in.h.opcode = FUSE_STATFS; 425 425 args.in.h.nodeid = get_node_id(dentry->d_inode); 426 426 args.out.numargs = 1; 427 - args.out.args[0].size = 428 - fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); 427 + args.out.args[0].size = sizeof(outarg); 429 428 args.out.args[0].value = &outarg; 430 429 err = fuse_simple_request(fc, &args); 431 430 if (!err) ··· 897 898 fc->max_write = max_t(unsigned, 4096, fc->max_write); 898 899 fc->conn_init = 1; 899 900 } 900 - fc->initialized = 1; 901 + fuse_set_initialized(fc); 901 902 wake_up_all(&fc->blocked_waitq); 902 903 } 903 904
+2 -1
fs/inode.c
··· 194 194 #ifdef CONFIG_FSNOTIFY 195 195 inode->i_fsnotify_mask = 0; 196 196 #endif 197 - 197 + inode->i_flctx = NULL; 198 198 this_cpu_inc(nr_inodes); 199 199 200 200 return 0; ··· 237 237 BUG_ON(inode_has_buffers(inode)); 238 238 security_inode_free(inode); 239 239 fsnotify_inode_delete(inode); 240 + locks_free_lock_context(inode->i_flctx); 240 241 if (!inode->i_nlink) { 241 242 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 242 243 atomic_long_dec(&inode->i_sb->s_remove_count);
+16 -10
fs/lockd/svcsubs.c
··· 164 164 { 165 165 struct inode *inode = nlmsvc_file_inode(file); 166 166 struct file_lock *fl; 167 + struct file_lock_context *flctx = inode->i_flctx; 167 168 struct nlm_host *lockhost; 168 169 170 + if (!flctx || list_empty_careful(&flctx->flc_posix)) 171 + return 0; 169 172 again: 170 173 file->f_locks = 0; 171 - spin_lock(&inode->i_lock); 172 - for (fl = inode->i_flock; fl; fl = fl->fl_next) { 174 + spin_lock(&flctx->flc_lock); 175 + list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 173 176 if (fl->fl_lmops != &nlmsvc_lock_operations) 174 177 continue; 175 178 ··· 183 180 if (match(lockhost, host)) { 184 181 struct file_lock lock = *fl; 185 182 186 - spin_unlock(&inode->i_lock); 183 + spin_unlock(&flctx->flc_lock); 187 184 lock.fl_type = F_UNLCK; 188 185 lock.fl_start = 0; 189 186 lock.fl_end = OFFSET_MAX; ··· 195 192 goto again; 196 193 } 197 194 } 198 - spin_unlock(&inode->i_lock); 195 + spin_unlock(&flctx->flc_lock); 199 196 200 197 return 0; 201 198 } ··· 226 223 { 227 224 struct inode *inode = nlmsvc_file_inode(file); 228 225 struct file_lock *fl; 226 + struct file_lock_context *flctx = inode->i_flctx; 229 227 230 228 if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) 231 229 return 1; 232 230 233 - spin_lock(&inode->i_lock); 234 - for (fl = inode->i_flock; fl; fl = fl->fl_next) { 235 - if (fl->fl_lmops == &nlmsvc_lock_operations) { 236 - spin_unlock(&inode->i_lock); 237 - return 1; 231 + if (flctx && !list_empty_careful(&flctx->flc_posix)) { 232 + spin_lock(&flctx->flc_lock); 233 + list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 234 + if (fl->fl_lmops == &nlmsvc_lock_operations) { 235 + spin_unlock(&flctx->flc_lock); 236 + return 1; 237 + } 238 238 } 239 + spin_unlock(&flctx->flc_lock); 239 240 } 240 - spin_unlock(&inode->i_lock); 241 241 file->f_locks = 0; 242 242 return 0; 243 243 }
+307 -262
fs/locks.c
··· 157 157 int leases_enable = 1; 158 158 int lease_break_time = 45; 159 159 160 - #define for_each_lock(inode, lockp) \ 161 - for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 162 - 163 160 /* 164 161 * The global file_lock_list is only used for displaying /proc/locks, so we 165 162 * keep a list on each CPU, with each list protected by its own spinlock via 166 163 * the file_lock_lglock. Note that alterations to the list also require that 167 - * the relevant i_lock is held. 164 + * the relevant flc_lock is held. 168 165 */ 169 166 DEFINE_STATIC_LGLOCK(file_lock_lglock); 170 167 static DEFINE_PER_CPU(struct hlist_head, file_lock_list); ··· 189 192 * contrast to those that are acting as records of acquired locks). 190 193 * 191 194 * Note that when we acquire this lock in order to change the above fields, 192 - * we often hold the i_lock as well. In certain cases, when reading the fields 195 + * we often hold the flc_lock as well. In certain cases, when reading the fields 193 196 * protected by this lock, we can skip acquiring it iff we already hold the 194 - * i_lock. 197 + * flc_lock. 195 198 * 196 199 * In particular, adding an entry to the fl_block list requires that you hold 197 - * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting 198 - * an entry from the list however only requires the file_lock_lock. 200 + * both the flc_lock and the blocked_lock_lock (acquired in that order). 201 + * Deleting an entry from the list however only requires the file_lock_lock. 199 202 */ 200 203 static DEFINE_SPINLOCK(blocked_lock_lock); 201 204 205 + static struct kmem_cache *flctx_cache __read_mostly; 202 206 static struct kmem_cache *filelock_cache __read_mostly; 207 + 208 + static struct file_lock_context * 209 + locks_get_lock_context(struct inode *inode) 210 + { 211 + struct file_lock_context *new; 212 + 213 + if (likely(inode->i_flctx)) 214 + goto out; 215 + 216 + new = kmem_cache_alloc(flctx_cache, GFP_KERNEL); 217 + if (!new) 218 + goto out; 219 + 220 + spin_lock_init(&new->flc_lock); 221 + INIT_LIST_HEAD(&new->flc_flock); 222 + INIT_LIST_HEAD(&new->flc_posix); 223 + INIT_LIST_HEAD(&new->flc_lease); 224 + 225 + /* 226 + * Assign the pointer if it's not already assigned. If it is, then 227 + * free the context we just allocated. 228 + */ 229 + spin_lock(&inode->i_lock); 230 + if (likely(!inode->i_flctx)) { 231 + inode->i_flctx = new; 232 + new = NULL; 233 + } 234 + spin_unlock(&inode->i_lock); 235 + 236 + if (new) 237 + kmem_cache_free(flctx_cache, new); 238 + out: 239 + return inode->i_flctx; 240 + } 241 + 242 + void 243 + locks_free_lock_context(struct file_lock_context *ctx) 244 + { 245 + if (ctx) { 246 + WARN_ON_ONCE(!list_empty(&ctx->flc_flock)); 247 + WARN_ON_ONCE(!list_empty(&ctx->flc_posix)); 248 + WARN_ON_ONCE(!list_empty(&ctx->flc_lease)); 249 + kmem_cache_free(flctx_cache, ctx); 250 + } 251 + } 203 252 204 253 static void locks_init_lock_heads(struct file_lock *fl) 205 254 { 206 255 INIT_HLIST_NODE(&fl->fl_link); 256 + INIT_LIST_HEAD(&fl->fl_list); 207 257 INIT_LIST_HEAD(&fl->fl_block); 208 258 init_waitqueue_head(&fl->fl_wait); 209 259 } ··· 287 243 void locks_free_lock(struct file_lock *fl) 288 244 { 289 245 BUG_ON(waitqueue_active(&fl->fl_wait)); 246 + BUG_ON(!list_empty(&fl->fl_list)); 290 247 BUG_ON(!list_empty(&fl->fl_block)); 291 248 BUG_ON(!hlist_unhashed(&fl->fl_link)); 292 249 ··· 302 257 struct file_lock *fl; 303 258 304 259 while (!list_empty(dispose)) { 305 - fl = list_first_entry(dispose, struct file_lock, fl_block); 306 - list_del_init(&fl->fl_block); 260 + fl = list_first_entry(dispose, struct file_lock, fl_list); 261 + list_del_init(&fl->fl_list); 307 262 locks_free_lock(fl); 308 263 } 309 264 } ··· 558 513 return fl1->fl_owner == fl2->fl_owner; 559 514 } 560 515 561 - /* Must be called with the i_lock held! */ 516 + /* Must be called with the flc_lock held! */ 562 517 static void locks_insert_global_locks(struct file_lock *fl) 563 518 { 564 519 lg_local_lock(&file_lock_lglock); ··· 567 522 lg_local_unlock(&file_lock_lglock); 568 523 } 569 524 570 - /* Must be called with the i_lock held! */ 525 + /* Must be called with the flc_lock held! */ 571 526 static void locks_delete_global_locks(struct file_lock *fl) 572 527 { 573 528 /* 574 529 * Avoid taking lock if already unhashed. This is safe since this check 575 - * is done while holding the i_lock, and new insertions into the list 530 + * is done while holding the flc_lock, and new insertions into the list 576 531 * also require that it be held. 577 532 */ 578 533 if (hlist_unhashed(&fl->fl_link)) ··· 624 579 * the order they blocked. The documentation doesn't require this but 625 580 * it seems like the reasonable thing to do. 626 581 * 627 - * Must be called with both the i_lock and blocked_lock_lock held. The fl_block 628 - * list itself is protected by the blocked_lock_lock, but by ensuring that the 629 - * i_lock is also held on insertions we can avoid taking the blocked_lock_lock 630 - * in some cases when we see that the fl_block list is empty. 582 + * Must be called with both the flc_lock and blocked_lock_lock held. The 583 + * fl_block list itself is protected by the blocked_lock_lock, but by ensuring 584 + * that the flc_lock is also held on insertions we can avoid taking the 585 + * blocked_lock_lock in some cases when we see that the fl_block list is empty. 631 586 */ 632 587 static void __locks_insert_block(struct file_lock *blocker, 633 588 struct file_lock *waiter) ··· 639 594 locks_insert_global_blocked(waiter); 640 595 } 641 596 642 - /* Must be called with i_lock held. */ 597 + /* Must be called with flc_lock held. */ 643 598 static void locks_insert_block(struct file_lock *blocker, 644 599 struct file_lock *waiter) 645 600 { ··· 651 606 /* 652 607 * Wake up processes blocked waiting for blocker. 653 608 * 654 - * Must be called with the inode->i_lock held! 609 + * Must be called with the inode->flc_lock held! 655 610 */ 656 611 static void locks_wake_up_blocks(struct file_lock *blocker) 657 612 { 658 613 /* 659 614 * Avoid taking global lock if list is empty. This is safe since new 660 - * blocked requests are only added to the list under the i_lock, and 661 - * the i_lock is always held here. Note that removal from the fl_block 662 - * list does not require the i_lock, so we must recheck list_empty() 615 + * blocked requests are only added to the list under the flc_lock, and 616 + * the flc_lock is always held here. Note that removal from the fl_block 617 + * list does not require the flc_lock, so we must recheck list_empty() 663 618 * after acquiring the blocked_lock_lock. 664 619 */ 665 620 if (list_empty(&blocker->fl_block)) ··· 680 635 spin_unlock(&blocked_lock_lock); 681 636 } 682 637 683 - /* Insert file lock fl into an inode's lock list at the position indicated 684 - * by pos. At the same time add the lock to the global file lock list. 685 - * 686 - * Must be called with the i_lock held! 687 - */ 688 - static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 638 + static void 639 + locks_insert_lock_ctx(struct file_lock *fl, int *counter, 640 + struct list_head *before) 689 641 { 690 642 fl->fl_nspid = get_pid(task_tgid(current)); 691 - 692 - /* insert into file's list */ 693 - fl->fl_next = *pos; 694 - *pos = fl; 695 - 643 + list_add_tail(&fl->fl_list, before); 644 + ++*counter; 696 645 locks_insert_global_locks(fl); 697 646 } 698 647 699 - /** 700 - * locks_delete_lock - Delete a lock and then free it. 701 - * @thisfl_p: pointer that points to the fl_next field of the previous 702 - * inode->i_flock list entry 703 - * 704 - * Unlink a lock from all lists and free the namespace reference, but don't 705 - * free it yet. Wake up processes that are blocked waiting for this lock and 706 - * notify the FS that the lock has been cleared. 707 - * 708 - * Must be called with the i_lock held! 709 - */ 710 - static void locks_unlink_lock(struct file_lock **thisfl_p) 648 + static void 649 + locks_unlink_lock_ctx(struct file_lock *fl, int *counter) 711 650 { 712 - struct file_lock *fl = *thisfl_p; 713 - 714 651 locks_delete_global_locks(fl); 715 - 716 - *thisfl_p = fl->fl_next; 717 - fl->fl_next = NULL; 718 - 652 + list_del_init(&fl->fl_list); 653 + --*counter; 719 654 if (fl->fl_nspid) { 720 655 put_pid(fl->fl_nspid); 721 656 fl->fl_nspid = NULL; 722 657 } 723 - 724 658 locks_wake_up_blocks(fl); 725 659 } 726 660 727 - /* 728 - * Unlink a lock from all lists and free it. 729 - * 730 - * Must be called with i_lock held! 731 - */ 732 - static void locks_delete_lock(struct file_lock **thisfl_p, 733 - struct list_head *dispose) 661 + static void 662 + locks_delete_lock_ctx(struct file_lock *fl, int *counter, 663 + struct list_head *dispose) 734 664 { 735 - struct file_lock *fl = *thisfl_p; 736 - 737 - locks_unlink_lock(thisfl_p); 665 + locks_unlink_lock_ctx(fl, counter); 738 666 if (dispose) 739 - list_add(&fl->fl_block, dispose); 667 + list_add(&fl->fl_list, dispose); 740 668 else 741 669 locks_free_lock(fl); 742 670 } ··· 764 746 posix_test_lock(struct file *filp, struct file_lock *fl) 765 747 { 766 748 struct file_lock *cfl; 749 + struct file_lock_context *ctx; 767 750 struct inode *inode = file_inode(filp); 768 751 769 - spin_lock(&inode->i_lock); 770 - for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { 771 - if (!IS_POSIX(cfl)) 772 - continue; 773 - if (posix_locks_conflict(fl, cfl)) 774 - break; 775 - } 776 - if (cfl) { 777 - locks_copy_conflock(fl, cfl); 778 - if (cfl->fl_nspid) 779 - fl->fl_pid = pid_vnr(cfl->fl_nspid); 780 - } else 752 + ctx = inode->i_flctx; 753 + if (!ctx || list_empty_careful(&ctx->flc_posix)) { 781 754 fl->fl_type = F_UNLCK; 782 - spin_unlock(&inode->i_lock); 755 + return; 756 + } 757 + 758 + spin_lock(&ctx->flc_lock); 759 + list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { 760 + if (posix_locks_conflict(fl, cfl)) { 761 + locks_copy_conflock(fl, cfl); 762 + if (cfl->fl_nspid) 763 + fl->fl_pid = pid_vnr(cfl->fl_nspid); 764 + goto out; 765 + } 766 + } 767 + fl->fl_type = F_UNLCK; 768 + out: 769 + spin_unlock(&ctx->flc_lock); 783 770 return; 784 771 } 785 772 EXPORT_SYMBOL(posix_test_lock); ··· 868 845 static int flock_lock_file(struct file *filp, struct file_lock *request) 869 846 { 870 847 struct file_lock *new_fl = NULL; 871 - struct file_lock **before; 872 - struct inode * inode = file_inode(filp); 848 + struct file_lock *fl; 849 + struct file_lock_context *ctx; 850 + struct inode *inode = file_inode(filp); 873 851 int error = 0; 874 - int found = 0; 852 + bool found = false; 875 853 LIST_HEAD(dispose); 854 + 855 + ctx = locks_get_lock_context(inode); 856 + if (!ctx) 857 + return -ENOMEM; 876 858 877 859 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { 878 860 new_fl = locks_alloc_lock(); ··· 885 857 return -ENOMEM; 886 858 } 887 859 888 - spin_lock(&inode->i_lock); 860 + spin_lock(&ctx->flc_lock); 889 861 if (request->fl_flags & FL_ACCESS) 890 862 goto find_conflict; 891 863 892 - for_each_lock(inode, before) { 893 - struct file_lock *fl = *before; 894 - if (IS_POSIX(fl)) 895 - break; 896 - if (IS_LEASE(fl)) 897 - continue; 864 + list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 898 865 if (filp != fl->fl_file) 899 866 continue; 900 867 if (request->fl_type == fl->fl_type) 901 868 goto out; 902 - found = 1; 903 - locks_delete_lock(before, &dispose); 869 + found = true; 870 + locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose); 904 871 break; 905 872 } 906 873 ··· 910 887 * give it the opportunity to lock the file. 911 888 */ 912 889 if (found) { 913 - spin_unlock(&inode->i_lock); 890 + spin_unlock(&ctx->flc_lock); 914 891 cond_resched(); 915 - spin_lock(&inode->i_lock); 892 + spin_lock(&ctx->flc_lock); 916 893 } 917 894 918 895 find_conflict: 919 - for_each_lock(inode, before) { 920 - struct file_lock *fl = *before; 921 - if (IS_POSIX(fl)) 922 - break; 923 - if (IS_LEASE(fl)) 924 - continue; 896 + list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 925 897 if (!flock_locks_conflict(request, fl)) 926 898 continue; 927 899 error = -EAGAIN; ··· 929 911 if (request->fl_flags & FL_ACCESS) 930 912 goto out; 931 913 locks_copy_lock(new_fl, request); 932 - locks_insert_lock(before, new_fl); 914 + locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock); 933 915 new_fl = NULL; 934 916 error = 0; 935 917 936 918 out: 937 - spin_unlock(&inode->i_lock); 919 + spin_unlock(&ctx->flc_lock); 938 920 if (new_fl) 939 921 locks_free_lock(new_fl); 940 922 locks_dispose_list(&dispose); ··· 943 925 944 926 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 945 927 { 946 - struct file_lock *fl; 928 + struct file_lock *fl, *tmp; 947 929 struct file_lock *new_fl = NULL; 948 930 struct file_lock *new_fl2 = NULL; 949 931 struct file_lock *left = NULL; 950 932 struct file_lock *right = NULL; 951 - struct file_lock **before; 933 + struct file_lock_context *ctx; 952 934 int error; 953 935 bool added = false; 954 936 LIST_HEAD(dispose); 937 + 938 + ctx = locks_get_lock_context(inode); 939 + if (!ctx) 940 + return -ENOMEM; 955 941 956 942 /* 957 943 * We may need two file_lock structures for this operation, ··· 970 948 new_fl2 = locks_alloc_lock(); 971 949 } 972 950 973 - spin_lock(&inode->i_lock); 951 + spin_lock(&ctx->flc_lock); 974 952 /* 975 953 * New lock request. Walk all POSIX locks and look for conflicts. If 976 954 * there are any, either return error or put the request on the 977 955 * blocker's list of waiters and the global blocked_hash. 978 956 */ 979 957 if (request->fl_type != F_UNLCK) { 980 - for_each_lock(inode, before) { 981 - fl = *before; 958 + list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 982 959 if (!IS_POSIX(fl)) 983 960 continue; 984 961 if (!posix_locks_conflict(request, fl)) ··· 1007 986 if (request->fl_flags & FL_ACCESS) 1008 987 goto out; 1009 988 1010 - /* 1011 - * Find the first old lock with the same owner as the new lock. 1012 - */ 1013 - 1014 - before = &inode->i_flock; 1015 - 1016 - /* First skip locks owned by other processes. */ 1017 - while ((fl = *before) && (!IS_POSIX(fl) || 1018 - !posix_same_owner(request, fl))) { 1019 - before = &fl->fl_next; 989 + /* Find the first old lock with the same owner as the new lock */ 990 + list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 991 + if (posix_same_owner(request, fl)) 992 + break; 1020 993 } 1021 994 1022 995 /* Process locks with this owner. */ 1023 - while ((fl = *before) && posix_same_owner(request, fl)) { 1024 - /* Detect adjacent or overlapping regions (if same lock type) 1025 - */ 996 + list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) { 997 + if (!posix_same_owner(request, fl)) 998 + break; 999 + 1000 + /* Detect adjacent or overlapping regions (if same lock type) */ 1026 1001 if (request->fl_type == fl->fl_type) { 1027 1002 /* In all comparisons of start vs end, use 1028 1003 * "start - 1" rather than "end + 1". If end 1029 1004 * is OFFSET_MAX, end + 1 will become negative. 1030 1005 */ 1031 1006 if (fl->fl_end < request->fl_start - 1) 1032 - goto next_lock; 1007 + continue; 1033 1008 /* If the next lock in the list has entirely bigger 1034 1009 * addresses than the new one, insert the lock here. 1035 1010 */ ··· 1046 1029 else 1047 1030 request->fl_end = fl->fl_end; 1048 1031 if (added) { 1049 - locks_delete_lock(before, &dispose); 1032 + locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt, 1033 + &dispose); 1050 1034 continue; 1051 1035 } 1052 1036 request = fl; 1053 1037 added = true; 1054 - } 1055 - else { 1038 + } else { 1056 1039 /* Processing for different lock types is a bit 1057 1040 * more complex. 1058 1041 */ 1059 1042 if (fl->fl_end < request->fl_start) 1060 - goto next_lock; 1043 + continue; 1061 1044 if (fl->fl_start > request->fl_end) 1062 1045 break; 1063 1046 if (request->fl_type == F_UNLCK) ··· 1076 1059 * one (This may happen several times). 1077 1060 */ 1078 1061 if (added) { 1079 - locks_delete_lock(before, &dispose); 1062 + locks_delete_lock_ctx(fl, 1063 + &ctx->flc_posix_cnt, &dispose); 1080 1064 continue; 1081 1065 } 1082 1066 /* ··· 1093 1075 locks_copy_lock(new_fl, request); 1094 1076 request = new_fl; 1095 1077 new_fl = NULL; 1096 - locks_delete_lock(before, &dispose); 1097 - locks_insert_lock(before, request); 1078 + locks_insert_lock_ctx(request, 1079 + &ctx->flc_posix_cnt, &fl->fl_list); 1080 + locks_delete_lock_ctx(fl, 1081 + &ctx->flc_posix_cnt, &dispose); 1098 1082 added = true; 1099 1083 } 1100 1084 } 1101 - /* Go on to next lock. 1102 - */ 1103 - next_lock: 1104 - before = &fl->fl_next; 1105 1085 } 1106 1086 1107 1087 /* ··· 1124 1108 goto out; 1125 1109 } 1126 1110 locks_copy_lock(new_fl, request); 1127 - locks_insert_lock(before, new_fl); 1111 + locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt, 1112 + &fl->fl_list); 1128 1113 new_fl = NULL; 1129 1114 } 1130 1115 if (right) { ··· 1136 1119 left = new_fl2; 1137 1120 new_fl2 = NULL; 1138 1121 locks_copy_lock(left, right); 1139 - locks_insert_lock(before, left); 1122 + locks_insert_lock_ctx(left, &ctx->flc_posix_cnt, 1123 + &fl->fl_list); 1140 1124 } 1141 1125 right->fl_start = request->fl_end + 1; 1142 1126 locks_wake_up_blocks(right); ··· 1147 1129 locks_wake_up_blocks(left); 1148 1130 } 1149 1131 out: 1150 - spin_unlock(&inode->i_lock); 1132 + spin_unlock(&ctx->flc_lock); 1151 1133 /* 1152 1134 * Free any unused locks. 1153 1135 */ ··· 1217 1199 */ 1218 1200 int locks_mandatory_locked(struct file *file) 1219 1201 { 1202 + int ret; 1220 1203 struct inode *inode = file_inode(file); 1204 + struct file_lock_context *ctx; 1221 1205 struct file_lock *fl; 1206 + 1207 + ctx = inode->i_flctx; 1208 + if (!ctx || list_empty_careful(&ctx->flc_posix)) 1209 + return 0; 1222 1210 1223 1211 /* 1224 1212 * Search the lock list for this inode for any POSIX locks. 1225 1213 */ 1226 - spin_lock(&inode->i_lock); 1227 - for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1228 - if (!IS_POSIX(fl)) 1229 - continue; 1214 + spin_lock(&ctx->flc_lock); 1215 + ret = 0; 1216 + list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 1230 1217 if (fl->fl_owner != current->files && 1231 - fl->fl_owner != file) 1218 + fl->fl_owner != file) { 1219 + ret = -EAGAIN; 1232 1220 break; 1221 + } 1233 1222 } 1234 - spin_unlock(&inode->i_lock); 1235 - return fl ? -EAGAIN : 0; 1223 + spin_unlock(&ctx->flc_lock); 1224 + return ret; 1236 1225 } 1237 1226 1238 1227 /** ··· 1319 1294 } 1320 1295 1321 1296 /* We already had a lease on this file; just change its type */ 1322 - int lease_modify(struct file_lock **before, int arg, struct list_head *dispose) 1297 + int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) 1323 1298 { 1324 - struct file_lock *fl = *before; 1299 + struct file_lock_context *flctx; 1325 1300 int error = assign_type(fl, arg); 1326 1301 1327 1302 if (error) ··· 1331 1306 if (arg == F_UNLCK) { 1332 1307 struct file *filp = fl->fl_file; 1333 1308 1309 + flctx = file_inode(filp)->i_flctx; 1334 1310 f_delown(filp); 1335 1311 filp->f_owner.signum = 0; 1336 1312 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); ··· 1339 1313 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 1340 1314 fl->fl_fasync = NULL; 1341 1315 } 1342 - locks_delete_lock(before, dispose); 1316 + locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose); 1343 1317 } 1344 1318 return 0; 1345 1319 } ··· 1355 1329 1356 1330 static void time_out_leases(struct inode *inode, struct list_head *dispose) 1357 1331 { 1358 - struct file_lock **before; 1359 - struct file_lock *fl; 1332 + struct file_lock_context *ctx = inode->i_flctx; 1333 + struct file_lock *fl, *tmp; 1360 1334 1361 - lockdep_assert_held(&inode->i_lock); 1335 + lockdep_assert_held(&ctx->flc_lock); 1362 1336 1363 - before = &inode->i_flock; 1364 - while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) { 1337 + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { 1365 1338 trace_time_out_leases(inode, fl); 1366 1339 if (past_time(fl->fl_downgrade_time)) 1367 - lease_modify(before, F_RDLCK, dispose); 1340 + lease_modify(fl, F_RDLCK, dispose); 1368 1341 if (past_time(fl->fl_break_time)) 1369 - lease_modify(before, F_UNLCK, dispose); 1370 - if (fl == *before) /* lease_modify may have freed fl */ 1371 - before = &fl->fl_next; 1342 + lease_modify(fl, F_UNLCK, dispose); 1372 1343 } 1373 1344 } 1374 1345 ··· 1379 1356 static bool 1380 1357 any_leases_conflict(struct inode *inode, struct file_lock *breaker) 1381 1358 { 1359 + struct file_lock_context *ctx = inode->i_flctx; 1382 1360 struct file_lock *fl; 1383 1361 1384 - lockdep_assert_held(&inode->i_lock); 1362 + lockdep_assert_held(&ctx->flc_lock); 1385 1363 1386 - for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) { 1364 + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1387 1365 if (leases_conflict(fl, breaker)) 1388 1366 return true; 1389 1367 } ··· 1408 1384 { 1409 1385 int error = 0; 1410 1386 struct file_lock *new_fl; 1411 - struct file_lock *fl, **before; 1387 + struct file_lock_context *ctx = inode->i_flctx; 1388 + struct file_lock *fl; 1412 1389 unsigned long break_time; 1413 1390 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1414 1391 LIST_HEAD(dispose); ··· 1419 1394 return PTR_ERR(new_fl); 1420 1395 new_fl->fl_flags = type; 1421 1396 1422 - spin_lock(&inode->i_lock); 1397 + /* typically we will check that ctx is non-NULL before calling */ 1398 + if (!ctx) { 1399 + WARN_ON_ONCE(1); 1400 + return error; 1401 + } 1402 + 1403 + spin_lock(&ctx->flc_lock); 1423 1404 1424 1405 time_out_leases(inode, &dispose); 1425 1406 ··· 1439 1408 break_time++; /* so that 0 means no break time */ 1440 1409 } 1441 1410 1442 - for (before = &inode->i_flock; 1443 - ((fl = *before) != NULL) && IS_LEASE(fl); 1444 - before = &fl->fl_next) { 1411 + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1445 1412 if (!leases_conflict(fl, new_fl)) 1446 1413 continue; 1447 1414 if (want_write) { ··· 1448 1419 fl->fl_flags |= FL_UNLOCK_PENDING; 1449 1420 fl->fl_break_time = break_time; 1450 1421 } else { 1451 - if (lease_breaking(inode->i_flock)) 1422 + if (lease_breaking(fl)) 1452 1423 continue; 1453 1424 fl->fl_flags |= FL_DOWNGRADE_PENDING; 1454 1425 fl->fl_downgrade_time = break_time; 1455 1426 } 1456 1427 if (fl->fl_lmops->lm_break(fl)) 1457 - locks_delete_lock(before, &dispose); 1428 + locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt, 1429 + &dispose); 1458 1430 } 1459 1431 1460 - fl = inode->i_flock; 1461 - if (!fl || !IS_LEASE(fl)) 1432 + if (list_empty(&ctx->flc_lease)) 1462 1433 goto out; 1463 1434 1464 1435 if (mode & O_NONBLOCK) { ··· 1468 1439 } 1469 1440 1470 1441 restart: 1471 - break_time = inode->i_flock->fl_break_time; 1442 + fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list); 1443 + break_time = fl->fl_break_time; 1472 1444 if (break_time != 0) 1473 1445 break_time -= jiffies; 1474 1446 if (break_time == 0) 1475 1447 break_time++; 1476 - locks_insert_block(inode->i_flock, new_fl); 1448 + locks_insert_block(fl, new_fl); 1477 1449 trace_break_lease_block(inode, new_fl); 1478 - spin_unlock(&inode->i_lock); 1450 + spin_unlock(&ctx->flc_lock); 1479 1451 locks_dispose_list(&dispose); 1480 1452 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1481 1453 !new_fl->fl_next, break_time); 1482 - spin_lock(&inode->i_lock); 1454 + spin_lock(&ctx->flc_lock); 1483 1455 trace_break_lease_unblock(inode, new_fl); 1484 1456 locks_delete_block(new_fl); 1485 1457 if (error >= 0) { ··· 1492 1462 time_out_leases(inode, &dispose); 1493 1463 if (any_leases_conflict(inode, new_fl)) 1494 1464 goto restart; 1495 - 1496 1465 error = 0; 1497 1466 } 1498 - 1499 1467 out: 1500 - spin_unlock(&inode->i_lock); 1468 + spin_unlock(&ctx->flc_lock); 1501 1469 locks_dispose_list(&dispose); 1502 1470 locks_free_lock(new_fl); 1503 1471 return error; ··· 1515 1487 void lease_get_mtime(struct inode *inode, struct timespec *time) 1516 1488 { 1517 1489 bool has_lease = false; 1518 - struct file_lock *flock; 1490 + struct file_lock_context *ctx = inode->i_flctx; 1491 + struct file_lock *fl; 1519 1492 1520 - if (inode->i_flock) { 1521 - spin_lock(&inode->i_lock); 1522 - flock = inode->i_flock; 1523 - if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK)) 1524 - has_lease = true; 1525 - spin_unlock(&inode->i_lock); 1493 + if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1494 + spin_lock(&ctx->flc_lock); 1495 + if (!list_empty(&ctx->flc_lease)) { 1496 + fl = list_first_entry(&ctx->flc_lease, 1497 + struct file_lock, fl_list); 1498 + if (fl->fl_type == F_WRLCK) 1499 + has_lease = true; 1500 + } 1501 + spin_unlock(&ctx->flc_lock); 1526 1502 } 1527 1503 1528 1504 if (has_lease) ··· 1564 1532 { 1565 1533 struct file_lock *fl; 1566 1534 struct inode *inode = file_inode(filp); 1535 + struct file_lock_context *ctx = inode->i_flctx; 1567 1536 int type = F_UNLCK; 1568 1537 LIST_HEAD(dispose); 1569 1538 1570 - spin_lock(&inode->i_lock); 1571 - time_out_leases(file_inode(filp), &dispose); 1572 - for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl); 1573 - fl = fl->fl_next) { 1574 - if (fl->fl_file == filp) { 1539 + if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1540 + spin_lock(&ctx->flc_lock); 1541 + time_out_leases(file_inode(filp), &dispose); 1542 + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1543 + if (fl->fl_file != filp) 1544 + continue; 1575 1545 type = target_leasetype(fl); 1576 1546 break; 1577 1547 } 1548 + spin_unlock(&ctx->flc_lock); 1549 + locks_dispose_list(&dispose); 1578 1550 } 1579 - spin_unlock(&inode->i_lock); 1580 - locks_dispose_list(&dispose); 1581 1551 return type; 1582 1552 } 1583 1553 ··· 1612 1578 static int 1613 1579 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv) 1614 1580 { 1615 - struct file_lock *fl, **before, **my_before = NULL, *lease; 1581 + struct file_lock *fl, *my_fl = NULL, *lease; 1616 1582 struct dentry *dentry = filp->f_path.dentry; 1617 1583 struct inode *inode = dentry->d_inode; 1584 + struct file_lock_context *ctx; 1618 1585 bool is_deleg = (*flp)->fl_flags & FL_DELEG; 1619 1586 int error; 1620 1587 LIST_HEAD(dispose); 1621 1588 1622 1589 lease = *flp; 1623 1590 trace_generic_add_lease(inode, lease); 1591 + 1592 + ctx = locks_get_lock_context(inode); 1593 + if (!ctx) 1594 + return -ENOMEM; 1624 1595 1625 1596 /* 1626 1597 * In the delegation case we need mutual exclusion with ··· 1645 1606 return -EINVAL; 1646 1607 } 1647 1608 1648 - spin_lock(&inode->i_lock); 1609 + spin_lock(&ctx->flc_lock); 1649 1610 time_out_leases(inode, &dispose); 1650 1611 error = check_conflicting_open(dentry, arg); 1651 1612 if (error) ··· 1660 1621 * except for this filp. 1661 1622 */ 1662 1623 error = -EAGAIN; 1663 - for (before = &inode->i_flock; 1664 - ((fl = *before) != NULL) && IS_LEASE(fl); 1665 - before = &fl->fl_next) { 1624 + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1666 1625 if (fl->fl_file == filp) { 1667 - my_before = before; 1626 + my_fl = fl; 1668 1627 continue; 1669 1628 } 1629 + 1670 1630 /* 1671 1631 * No exclusive leases if someone else has a lease on 1672 1632 * this file: ··· 1680 1642 goto out; 1681 1643 } 1682 1644 1683 - if (my_before != NULL) { 1684 - lease = *my_before; 1685 - error = lease->fl_lmops->lm_change(my_before, arg, &dispose); 1645 + if (my_fl != NULL) { 1646 + error = lease->fl_lmops->lm_change(my_fl, arg, &dispose); 1686 1647 if (error) 1687 1648 goto out; 1688 1649 goto out_setup; ··· 1691 1654 if (!leases_enable) 1692 1655 goto out; 1693 1656 1694 - locks_insert_lock(before, lease); 1657 + locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease); 1695 1658 /* 1696 1659 * The check in break_lease() is lockless. It's possible for another 1697 1660 * open to race in after we did the earlier check for a conflicting ··· 1703 1666 */ 1704 1667 smp_mb(); 1705 1668 error = check_conflicting_open(dentry, arg); 1706 - if (error) 1707 - goto out_unlink; 1669 + if (error) { 1670 + locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt); 1671 + goto out; 1672 + } 1708 1673 1709 1674 out_setup: 1710 1675 if (lease->fl_lmops->lm_setup) 1711 1676 lease->fl_lmops->lm_setup(lease, priv); 1712 1677 out: 1713 - spin_unlock(&inode->i_lock); 1678 + spin_unlock(&ctx->flc_lock); 1714 1679 locks_dispose_list(&dispose); 1715 1680 if (is_deleg) 1716 1681 mutex_unlock(&inode->i_mutex); 1717 - if (!error && !my_before) 1682 + if (!error && !my_fl) 1718 1683 *flp = NULL; 1719 1684 return error; 1720 - out_unlink: 1721 - locks_unlink_lock(before); 1722 - goto out; 1723 1685 } 1724 1686 1725 1687 static int generic_delete_lease(struct file *filp) 1726 1688 { 1727 1689 int error = -EAGAIN; 1728 - struct file_lock *fl, **before; 1690 + struct file_lock *fl, *victim = NULL; 1729 1691 struct dentry *dentry = filp->f_path.dentry; 1730 1692 struct inode *inode = dentry->d_inode; 1693 + struct file_lock_context *ctx = inode->i_flctx; 1731 1694 LIST_HEAD(dispose); 1732 1695 1733 - spin_lock(&inode->i_lock); 1734 - time_out_leases(inode, &dispose); 1735 - for (before = &inode->i_flock; 1736 - ((fl = *before) != NULL) && IS_LEASE(fl); 1737 - before = &fl->fl_next) { 1738 - if (fl->fl_file == filp) 1696 + if (!ctx) { 1697 + trace_generic_delete_lease(inode, NULL); 1698 + return error; 1699 + } 1700 + 1701 + spin_lock(&ctx->flc_lock); 1702 + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1703 + if (fl->fl_file == filp) { 1704 + victim = fl; 1739 1705 break; 1706 + } 1740 1707 } 1741 1708 trace_generic_delete_lease(inode, fl); 1742 - if (fl) 1743 - error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); 1744 - spin_unlock(&inode->i_lock); 1709 + if (victim) 1710 + error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); 1711 + spin_unlock(&ctx->flc_lock); 1745 1712 locks_dispose_list(&dispose); 1746 1713 return error; 1747 1714 } ··· 2212 2171 */ 2213 2172 /* 2214 2173 * we need that spin_lock here - it prevents reordering between 2215 - * update of inode->i_flock and check for it done in close(). 2174 + * update of i_flctx->flc_posix and check for it done in close(). 2216 2175 * rcu_read_lock() wouldn't do. 2217 2176 */ 2218 2177 spin_lock(&current->files->file_lock); ··· 2372 2331 void locks_remove_posix(struct file *filp, fl_owner_t owner) 2373 2332 { 2374 2333 struct file_lock lock; 2334 + struct file_lock_context *ctx = file_inode(filp)->i_flctx; 2375 2335 2376 2336 /* 2377 2337 * If there are no locks held on this file, we don't need to call 2378 2338 * posix_lock_file(). Another process could be setting a lock on this 2379 2339 * file at the same time, but we wouldn't remove that lock anyway. 2380 2340 */ 2381 - if (!file_inode(filp)->i_flock) 2341 + if (!ctx || list_empty(&ctx->flc_posix)) 2382 2342 return; 2383 2343 2384 2344 lock.fl_type = F_UNLCK; ··· 2400 2358 2401 2359 EXPORT_SYMBOL(locks_remove_posix); 2402 2360 2361 + /* The i_flctx must be valid when calling into here */ 2362 + static void 2363 + locks_remove_flock(struct file *filp) 2364 + { 2365 + struct file_lock fl = { 2366 + .fl_owner = filp, 2367 + .fl_pid = current->tgid, 2368 + .fl_file = filp, 2369 + .fl_flags = FL_FLOCK, 2370 + .fl_type = F_UNLCK, 2371 + .fl_end = OFFSET_MAX, 2372 + }; 2373 + struct file_lock_context *flctx = file_inode(filp)->i_flctx; 2374 + 2375 + if (list_empty(&flctx->flc_flock)) 2376 + return; 2377 + 2378 + if (filp->f_op->flock) 2379 + filp->f_op->flock(filp, F_SETLKW, &fl); 2380 + else 2381 + flock_lock_file(filp, &fl); 2382 + 2383 + if (fl.fl_ops && fl.fl_ops->fl_release_private) 2384 + fl.fl_ops->fl_release_private(&fl); 2385 + } 2386 + 2387 + /* The i_flctx must be valid when calling into here */ 2388 + static void 2389 + locks_remove_lease(struct file *filp) 2390 + { 2391 + struct inode *inode = file_inode(filp); 2392 + struct file_lock_context *ctx = inode->i_flctx; 2393 + struct file_lock *fl, *tmp; 2394 + LIST_HEAD(dispose); 2395 + 2396 + if (list_empty(&ctx->flc_lease)) 2397 + return; 2398 + 2399 + spin_lock(&ctx->flc_lock); 2400 + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) 2401 + lease_modify(fl, F_UNLCK, &dispose); 2402 + spin_unlock(&ctx->flc_lock); 2403 + locks_dispose_list(&dispose); 2404 + } 2405 + 2403 2406 /* 2404 2407 * This function is called on the last close of an open file. 2405 2408 */ 2406 2409 void locks_remove_file(struct file *filp) 2407 2410 { 2408 - struct inode * inode = file_inode(filp); 2409 - struct file_lock *fl; 2410 - struct file_lock **before; 2411 - LIST_HEAD(dispose); 2412 - 2413 - if (!inode->i_flock) 2411 + if (!file_inode(filp)->i_flctx) 2414 2412 return; 2415 2413 2414 + /* remove any OFD locks */ 2416 2415 locks_remove_posix(filp, filp); 2417 2416 2418 - if (filp->f_op->flock) { 2419 - struct file_lock fl = { 2420 - .fl_owner = filp, 2421 - .fl_pid = current->tgid, 2422 - .fl_file = filp, 2423 - .fl_flags = FL_FLOCK, 2424 - .fl_type = F_UNLCK, 2425 - .fl_end = OFFSET_MAX, 2426 - }; 2427 - filp->f_op->flock(filp, F_SETLKW, &fl); 2428 - if (fl.fl_ops && fl.fl_ops->fl_release_private) 2429 - fl.fl_ops->fl_release_private(&fl); 2430 - } 2417 + /* remove flock locks */ 2418 + locks_remove_flock(filp); 2431 2419 2432 - spin_lock(&inode->i_lock); 2433 - before = &inode->i_flock; 2434 - 2435 - while ((fl = *before) != NULL) { 2436 - if (fl->fl_file == filp) { 2437 - if (IS_LEASE(fl)) { 2438 - lease_modify(before, F_UNLCK, &dispose); 2439 - continue; 2440 - } 2441 - 2442 - /* 2443 - * There's a leftover lock on the list of a type that 2444 - * we didn't expect to see. Most likely a classic 2445 - * POSIX lock that ended up not getting released 2446 - * properly, or that raced onto the list somehow. Log 2447 - * some info about it and then just remove it from 2448 - * the list. 2449 - */ 2450 - WARN(!IS_FLOCK(fl), 2451 - "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n", 2452 - MAJOR(inode->i_sb->s_dev), 2453 - MINOR(inode->i_sb->s_dev), inode->i_ino, 2454 - fl->fl_type, fl->fl_flags, 2455 - fl->fl_start, fl->fl_end); 2456 - 2457 - locks_delete_lock(before, &dispose); 2458 - continue; 2459 - } 2460 - before = &fl->fl_next; 2461 - } 2462 - spin_unlock(&inode->i_lock); 2463 - locks_dispose_list(&dispose); 2420 + /* remove any leases */ 2421 + locks_remove_lease(filp); 2464 2422 } 2465 2423 2466 2424 /** ··· 2662 2620 static int __init filelock_init(void) 2663 2621 { 2664 2622 int i; 2623 + 2624 + flctx_cache = kmem_cache_create("file_lock_ctx", 2625 + sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL); 2665 2626 2666 2627 filelock_cache = kmem_cache_create("file_lock_cache", 2667 2628 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
+14 -9
fs/nfs/delegation.c
··· 85 85 { 86 86 struct inode *inode = state->inode; 87 87 struct file_lock *fl; 88 + struct file_lock_context *flctx = inode->i_flctx; 89 + struct list_head *list; 88 90 int status = 0; 89 91 90 - if (inode->i_flock == NULL) 92 + if (flctx == NULL) 91 93 goto out; 92 94 93 - /* Protect inode->i_flock using the i_lock */ 94 - spin_lock(&inode->i_lock); 95 - for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 96 - if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 97 - continue; 95 + list = &flctx->flc_posix; 96 + spin_lock(&flctx->flc_lock); 97 + restart: 98 + list_for_each_entry(fl, list, fl_list) { 98 99 if (nfs_file_open_context(fl->fl_file) != ctx) 99 100 continue; 100 - spin_unlock(&inode->i_lock); 101 + spin_unlock(&flctx->flc_lock); 101 102 status = nfs4_lock_delegation_recall(fl, state, stateid); 102 103 if (status < 0) 103 104 goto out; 104 - spin_lock(&inode->i_lock); 105 + spin_lock(&flctx->flc_lock); 105 106 } 106 - spin_unlock(&inode->i_lock); 107 + if (list == &flctx->flc_posix) { 108 + list = &flctx->flc_flock; 109 + goto restart; 110 + } 111 + spin_unlock(&flctx->flc_lock); 107 112 out: 108 113 return status; 109 114 }
+38 -32
fs/nfs/nfs4state.c
··· 1366 1366 struct nfs_inode *nfsi = NFS_I(inode); 1367 1367 struct file_lock *fl; 1368 1368 int status = 0; 1369 + struct file_lock_context *flctx = inode->i_flctx; 1370 + struct list_head *list; 1369 1371 1370 - if (inode->i_flock == NULL) 1372 + if (flctx == NULL) 1371 1373 return 0; 1374 + 1375 + list = &flctx->flc_posix; 1372 1376 1373 1377 /* Guard against delegation returns and new lock/unlock calls */ 1374 1378 down_write(&nfsi->rwsem); 1375 - /* Protect inode->i_flock using the BKL */ 1376 - spin_lock(&inode->i_lock); 1377 - for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1378 - if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 1379 - continue; 1379 + spin_lock(&flctx->flc_lock); 1380 + restart: 1381 + list_for_each_entry(fl, list, fl_list) { 1380 1382 if (nfs_file_open_context(fl->fl_file)->state != state) 1381 1383 continue; 1382 - spin_unlock(&inode->i_lock); 1384 + spin_unlock(&flctx->flc_lock); 1383 1385 status = ops->recover_lock(state, fl); 1384 1386 switch (status) { 1385 - case 0: 1386 - break; 1387 - case -ESTALE: 1388 - case -NFS4ERR_ADMIN_REVOKED: 1389 - case -NFS4ERR_STALE_STATEID: 1390 - case -NFS4ERR_BAD_STATEID: 1391 - case -NFS4ERR_EXPIRED: 1392 - case -NFS4ERR_NO_GRACE: 1393 - case -NFS4ERR_STALE_CLIENTID: 1394 - case -NFS4ERR_BADSESSION: 1395 - case -NFS4ERR_BADSLOT: 1396 - case -NFS4ERR_BAD_HIGH_SLOT: 1397 - case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1398 - goto out; 1399 - default: 1400 - printk(KERN_ERR "NFS: %s: unhandled error %d\n", 1401 - __func__, status); 1402 - case -ENOMEM: 1403 - case -NFS4ERR_DENIED: 1404 - case -NFS4ERR_RECLAIM_BAD: 1405 - case -NFS4ERR_RECLAIM_CONFLICT: 1406 - /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1407 - status = 0; 1387 + case 0: 1388 + break; 1389 + case -ESTALE: 1390 + case -NFS4ERR_ADMIN_REVOKED: 1391 + case -NFS4ERR_STALE_STATEID: 1392 + case -NFS4ERR_BAD_STATEID: 1393 + case -NFS4ERR_EXPIRED: 1394 + case -NFS4ERR_NO_GRACE: 1395 + case -NFS4ERR_STALE_CLIENTID: 1396 + case -NFS4ERR_BADSESSION: 1397 + case -NFS4ERR_BADSLOT: 1398 + case -NFS4ERR_BAD_HIGH_SLOT: 1399 + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1400 + goto out; 1401 + default: 1402 + pr_err("NFS: %s: unhandled error %d\n", 1403 + __func__, status); 1404 + case -ENOMEM: 1405 + case -NFS4ERR_DENIED: 1406 + case -NFS4ERR_RECLAIM_BAD: 1407 + case -NFS4ERR_RECLAIM_CONFLICT: 1408 + /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1409 + status = 0; 1408 1410 } 1409 - spin_lock(&inode->i_lock); 1411 + spin_lock(&flctx->flc_lock); 1410 1412 } 1411 - spin_unlock(&inode->i_lock); 1413 + if (list == &flctx->flc_posix) { 1414 + list = &flctx->flc_flock; 1415 + goto restart; 1416 + } 1417 + spin_unlock(&flctx->flc_lock); 1412 1418 out: 1413 1419 up_write(&nfsi->rwsem); 1414 1420 return status;
+5 -1
fs/nfs/pagelist.c
··· 826 826 struct nfs_pageio_descriptor *pgio) 827 827 { 828 828 size_t size; 829 + struct file_lock_context *flctx; 829 830 830 831 if (prev) { 831 832 if (!nfs_match_open_context(req->wb_context, prev->wb_context)) 832 833 return false; 833 - if (req->wb_context->dentry->d_inode->i_flock != NULL && 834 + flctx = req->wb_context->dentry->d_inode->i_flctx; 835 + if (flctx != NULL && 836 + !(list_empty_careful(&flctx->flc_posix) && 837 + list_empty_careful(&flctx->flc_flock)) && 834 838 !nfs_match_lock_context(req->wb_lock_context, 835 839 prev->wb_lock_context)) 836 840 return false;
+35 -6
fs/nfs/write.c
··· 1091 1091 { 1092 1092 struct nfs_open_context *ctx = nfs_file_open_context(file); 1093 1093 struct nfs_lock_context *l_ctx; 1094 + struct file_lock_context *flctx = file_inode(file)->i_flctx; 1094 1095 struct nfs_page *req; 1095 1096 int do_flush, status; 1096 1097 /* ··· 1110 1109 do_flush = req->wb_page != page || req->wb_context != ctx; 1111 1110 /* for now, flush if more than 1 request in page_group */ 1112 1111 do_flush |= req->wb_this_page != req; 1113 - if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { 1112 + if (l_ctx && flctx && 1113 + !(list_empty_careful(&flctx->flc_posix) && 1114 + list_empty_careful(&flctx->flc_flock))) { 1114 1115 do_flush |= l_ctx->lockowner.l_owner != current->files 1115 1116 || l_ctx->lockowner.l_pid != current->tgid; 1116 1117 } ··· 1173 1170 return PageUptodate(page) != 0; 1174 1171 } 1175 1172 1173 + static bool 1174 + is_whole_file_wrlock(struct file_lock *fl) 1175 + { 1176 + return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1177 + fl->fl_type == F_WRLCK; 1178 + } 1179 + 1176 1180 /* If we know the page is up to date, and we're not using byte range locks (or 1177 1181 * if we have the whole file locked for writing), it may be more efficient to 1178 1182 * extend the write to cover the entire page in order to avoid fragmentation ··· 1190 1180 */ 1191 1181 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1192 1182 { 1183 + int ret; 1184 + struct file_lock_context *flctx = inode->i_flctx; 1185 + struct file_lock *fl; 1186 + 1193 1187 if (file->f_flags & O_DSYNC) 1194 1188 return 0; 1195 1189 if (!nfs_write_pageuptodate(page, inode)) 1196 1190 return 0; 1197 1191 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1198 1192 return 1; 1199 - if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && 1200 - inode->i_flock->fl_end == OFFSET_MAX && 1201 - inode->i_flock->fl_type != F_RDLCK)) 1202 - return 1; 1203 - return 0; 1193 + if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1194 + list_empty_careful(&flctx->flc_posix))) 1195 + return 0; 1196 + 1197 + /* Check to see if there are whole file write locks */ 1198 + ret = 0; 1199 + spin_lock(&flctx->flc_lock); 1200 + if (!list_empty(&flctx->flc_posix)) { 1201 + fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1202 + fl_list); 1203 + if (is_whole_file_wrlock(fl)) 1204 + ret = 1; 1205 + } else if (!list_empty(&flctx->flc_flock)) { 1206 + fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1207 + fl_list); 1208 + if (fl->fl_type == F_WRLCK) 1209 + ret = 1; 1210 + } 1211 + spin_unlock(&flctx->flc_lock); 1212 + return ret; 1204 1213 } 1205 1214 1206 1215 /*
+13 -8
fs/nfsd/nfs4state.c
··· 3472 3472 } 3473 3473 3474 3474 static int 3475 - nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose) 3475 + nfsd_change_deleg_cb(struct file_lock *onlist, int arg, 3476 + struct list_head *dispose) 3476 3477 { 3477 3478 if (arg & F_UNLCK) 3478 3479 return lease_modify(onlist, arg, dispose); ··· 5552 5551 static bool 5553 5552 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 5554 5553 { 5555 - struct file_lock **flpp; 5554 + struct file_lock *fl; 5556 5555 int status = false; 5557 5556 struct file *filp = find_any_file(fp); 5558 5557 struct inode *inode; 5558 + struct file_lock_context *flctx; 5559 5559 5560 5560 if (!filp) { 5561 5561 /* Any valid lock stateid should have some sort of access */ ··· 5565 5563 } 5566 5564 5567 5565 inode = file_inode(filp); 5566 + flctx = inode->i_flctx; 5568 5567 5569 - spin_lock(&inode->i_lock); 5570 - for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 5571 - if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 5572 - status = true; 5573 - break; 5568 + if (flctx && !list_empty_careful(&flctx->flc_posix)) { 5569 + spin_lock(&flctx->flc_lock); 5570 + list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 5571 + if (fl->fl_owner == (fl_owner_t)lowner) { 5572 + status = true; 5573 + break; 5574 + } 5574 5575 } 5576 + spin_unlock(&flctx->flc_lock); 5575 5577 } 5576 - spin_unlock(&inode->i_lock); 5577 5578 fput(filp); 5578 5579 return status; 5579 5580 }
+5 -5
fs/notify/fanotify/fanotify_user.c
··· 259 259 struct fsnotify_event *kevent; 260 260 char __user *start; 261 261 int ret; 262 - DEFINE_WAIT(wait); 262 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 263 263 264 264 start = buf; 265 265 group = file->private_data; 266 266 267 267 pr_debug("%s: group=%p\n", __func__, group); 268 268 269 + add_wait_queue(&group->notification_waitq, &wait); 269 270 while (1) { 270 - prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); 271 - 272 271 mutex_lock(&group->notification_mutex); 273 272 kevent = get_one_event(group, count); 274 273 mutex_unlock(&group->notification_mutex); ··· 288 289 289 290 if (start != buf) 290 291 break; 291 - schedule(); 292 + 293 + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 292 294 continue; 293 295 } 294 296 ··· 318 318 buf += ret; 319 319 count -= ret; 320 320 } 321 + remove_wait_queue(&group->notification_waitq, &wait); 321 322 322 - finish_wait(&group->notification_waitq, &wait); 323 323 if (start != buf && ret != -EFAULT) 324 324 ret = buf - start; 325 325 return ret;
+1 -4
fs/ocfs2/dlm/dlmrecovery.c
··· 2023 2023 dlm_lockres_drop_inflight_ref(dlm, res); 2024 2024 spin_unlock(&res->spinlock); 2025 2025 2026 - if (ret < 0) { 2026 + if (ret < 0) 2027 2027 mlog_errno(ret); 2028 - if (newlock) 2029 - dlm_lock_put(newlock); 2030 - } 2031 2028 2032 2029 return ret; 2033 2030 }
+35 -8
fs/ocfs2/namei.c
··· 94 94 struct inode *inode, 95 95 const char *symname); 96 96 97 + static int ocfs2_double_lock(struct ocfs2_super *osb, 98 + struct buffer_head **bh1, 99 + struct inode *inode1, 100 + struct buffer_head **bh2, 101 + struct inode *inode2, 102 + int rename); 103 + 104 + static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); 97 105 /* An orphan dir name is an 8 byte value, printed as a hex string */ 98 106 #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) 99 107 ··· 686 678 { 687 679 handle_t *handle; 688 680 struct inode *inode = old_dentry->d_inode; 681 + struct inode *old_dir = old_dentry->d_parent->d_inode; 689 682 int err; 690 683 struct buffer_head *fe_bh = NULL; 684 + struct buffer_head *old_dir_bh = NULL; 691 685 struct buffer_head *parent_fe_bh = NULL; 692 686 struct ocfs2_dinode *fe = NULL; 693 687 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); ··· 706 696 707 697 dquot_initialize(dir); 708 698 709 - err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); 699 + err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, 700 + &parent_fe_bh, dir, 0); 710 701 if (err < 0) { 711 702 if (err != -ENOENT) 712 703 mlog_errno(err); 713 704 return err; 705 + } 706 + 707 + /* make sure both dirs have bhs 708 + * get an extra ref on old_dir_bh if old==new */ 709 + if (!parent_fe_bh) { 710 + if (old_dir_bh) { 711 + parent_fe_bh = old_dir_bh; 712 + get_bh(parent_fe_bh); 713 + } else { 714 + mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); 715 + err = -EIO; 716 + goto out; 717 + } 714 718 } 715 719 716 720 if (!dir->i_nlink) { ··· 732 708 goto out; 733 709 } 734 710 735 - err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, 711 + err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, 736 712 old_dentry->d_name.len, &old_de_ino); 737 713 if (err) { 738 714 err = -ENOENT; ··· 825 801 ocfs2_inode_unlock(inode, 1); 826 802 827 803 out: 828 - ocfs2_inode_unlock(dir, 1); 804 + ocfs2_double_unlock(old_dir, dir); 829 805 830 806 brelse(fe_bh); 831 807 brelse(parent_fe_bh); 808 + brelse(old_dir_bh); 832 809 833 810 ocfs2_free_dir_lookup_result(&lookup); 834 811 ··· 1097 1072 } 1098 1073 1099 1074 /* 1100 - * The only place this should be used is rename! 1075 + * The only place this should be used is rename and link! 1101 1076 * if they have the same id, then the 1st one is the only one locked. 1102 1077 */ 1103 1078 static int ocfs2_double_lock(struct ocfs2_super *osb, 1104 1079 struct buffer_head **bh1, 1105 1080 struct inode *inode1, 1106 1081 struct buffer_head **bh2, 1107 - struct inode *inode2) 1082 + struct inode *inode2, 1083 + int rename) 1108 1084 { 1109 1085 int status; 1110 1086 int inode1_is_ancestor, inode2_is_ancestor; ··· 1153 1127 } 1154 1128 /* lock id2 */ 1155 1129 status = ocfs2_inode_lock_nested(inode2, bh2, 1, 1156 - OI_LS_RENAME1); 1130 + rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); 1157 1131 if (status < 0) { 1158 1132 if (status != -ENOENT) 1159 1133 mlog_errno(status); ··· 1162 1136 } 1163 1137 1164 1138 /* lock id1 */ 1165 - status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); 1139 + status = ocfs2_inode_lock_nested(inode1, bh1, 1, 1140 + rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); 1166 1141 if (status < 0) { 1167 1142 /* 1168 1143 * An error return must mean that no cluster locks ··· 1279 1252 1280 1253 /* if old and new are the same, this'll just do one lock. */ 1281 1254 status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, 1282 - &new_dir_bh, new_dir); 1255 + &new_dir_bh, new_dir, 1); 1283 1256 if (status < 0) { 1284 1257 mlog_errno(status); 1285 1258 goto bail;
+1 -1
fs/read_write.c
··· 358 358 return retval; 359 359 } 360 360 361 - if (unlikely(inode->i_flock && mandatory_lock(inode))) { 361 + if (unlikely(inode->i_flctx && mandatory_lock(inode))) { 362 362 retval = locks_mandatory_area( 363 363 read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE, 364 364 inode, file, pos, count);
+4 -4
include/acpi/processor.h
··· 196 196 struct acpi_processor { 197 197 acpi_handle handle; 198 198 u32 acpi_id; 199 - u32 apic_id; 200 - u32 id; 199 + u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */ 200 + u32 id; /* CPU logical ID allocated by OS */ 201 201 u32 pblk; 202 202 int performance_platform_limit; 203 203 int throttling_platform_limit; ··· 310 310 #endif /* CONFIG_CPU_FREQ */ 311 311 312 312 /* in processor_core.c */ 313 - int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); 314 - int acpi_map_cpuid(int apic_id, u32 acpi_id); 313 + int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); 314 + int acpi_map_cpuid(int phys_id, u32 acpi_id); 315 315 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); 316 316 317 317 /* in processor_pdc.c */
+6 -2
include/asm-generic/tlb.h
··· 136 136 137 137 static inline void __tlb_reset_range(struct mmu_gather *tlb) 138 138 { 139 - tlb->start = TASK_SIZE; 140 - tlb->end = 0; 139 + if (tlb->fullmm) { 140 + tlb->start = tlb->end = ~0; 141 + } else { 142 + tlb->start = TASK_SIZE; 143 + tlb->end = 0; 144 + } 141 145 } 142 146 143 147 /*
+2 -2
include/linux/acpi.h
··· 147 147 148 148 #ifdef CONFIG_ACPI_HOTPLUG_CPU 149 149 /* Arch dependent functions for cpu hotplug support */ 150 - int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); 151 - int acpi_unmap_lsapic(int cpu); 150 + int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu); 151 + int acpi_unmap_cpu(int cpu); 152 152 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 153 153 154 154 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
+6 -2
include/linux/blk-mq.h
··· 34 34 unsigned long flags; /* BLK_MQ_F_* flags */ 35 35 36 36 struct request_queue *queue; 37 - unsigned int queue_num; 38 37 struct blk_flush_queue *fq; 39 38 40 39 void *driver_data; ··· 53 54 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 54 55 55 56 unsigned int numa_node; 56 - unsigned int cmd_size; /* per-request extra data */ 57 + unsigned int queue_num; 57 58 58 59 atomic_t nr_active; 59 60 ··· 194 195 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 195 196 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 196 197 198 + int blk_mq_request_started(struct request *rq); 197 199 void blk_mq_start_request(struct request *rq); 198 200 void blk_mq_end_request(struct request *rq, int error); 199 201 void __blk_mq_end_request(struct request *rq, int error); 200 202 201 203 void blk_mq_requeue_request(struct request *rq); 202 204 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 205 + void blk_mq_cancel_requeue_work(struct request_queue *q); 203 206 void blk_mq_kick_requeue_list(struct request_queue *q); 207 + void blk_mq_abort_requeue_list(struct request_queue *q); 204 208 void blk_mq_complete_request(struct request *rq); 205 209 206 210 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); ··· 214 212 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 215 213 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 216 214 void *priv); 215 + void blk_mq_unfreeze_queue(struct request_queue *q); 216 + void blk_mq_freeze_queue_start(struct request_queue *q); 217 217 218 218 /* 219 219 * Driver command data is immediately after the request. So subtract request
+2
include/linux/blk_types.h
··· 190 190 __REQ_PM, /* runtime pm request */ 191 191 __REQ_HASHED, /* on IO scheduler merge hash */ 192 192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 193 + __REQ_NO_TIMEOUT, /* requests may never expire */ 193 194 __REQ_NR_BITS, /* stops here */ 194 195 }; 195 196 ··· 244 243 #define REQ_PM (1ULL << __REQ_PM) 245 244 #define REQ_HASHED (1ULL << __REQ_HASHED) 246 245 #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 246 + #define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) 247 247 248 248 #endif /* __LINUX_BLK_TYPES_H */
+2 -2
include/linux/ceph/osd_client.h
··· 87 87 struct ceph_osd_data osd_data; 88 88 } extent; 89 89 struct { 90 - __le32 name_len; 91 - __le32 value_len; 90 + u32 name_len; 91 + u32 value_len; 92 92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ 93 93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ 94 94 struct ceph_osd_data osd_data;
+6 -6
include/linux/compiler.h
··· 215 215 } 216 216 } 217 217 218 - static __always_inline void __assign_once_size(volatile void *p, void *res, int size) 218 + static __always_inline void __write_once_size(volatile void *p, void *res, int size) 219 219 { 220 220 switch (size) { 221 221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; ··· 235 235 /* 236 236 * Prevent the compiler from merging or refetching reads or writes. The 237 237 * compiler is also forbidden from reordering successive instances of 238 - * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the 238 + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 239 239 * compiler is aware of some particular ordering. One way to make the 240 240 * compiler aware of ordering is to put the two invocations of READ_ONCE, 241 - * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. 241 + * WRITE_ONCE or ACCESS_ONCE() in different C statements. 242 242 * 243 243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 244 244 * data types like structs or unions. If the size of the accessed data 245 245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 246 - * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a 246 + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 247 247 * compile-time warning. 248 248 * 249 249 * Their two major use cases are: (1) Mediating communication between ··· 257 257 #define READ_ONCE(x) \ 258 258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) 259 259 260 - #define ASSIGN_ONCE(val, x) \ 261 - ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) 260 + #define WRITE_ONCE(x, val) \ 261 + ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) 262 262 263 263 #endif /* __KERNEL__ */ 264 264
+37 -17
include/linux/fs.h
··· 135 135 #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 136 136 137 137 /* File was opened by fanotify and shouldn't generate fanotify events */ 138 - #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 138 + #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) 139 139 140 140 /* 141 141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector ··· 625 625 atomic_t i_readcount; /* struct files open RO */ 626 626 #endif 627 627 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 628 - struct file_lock *i_flock; 628 + struct file_lock_context *i_flctx; 629 629 struct address_space i_data; 630 630 struct list_head i_devices; 631 631 union { ··· 885 885 /* legacy typedef, should eventually be removed */ 886 886 typedef void *fl_owner_t; 887 887 888 + struct file_lock; 889 + 888 890 struct file_lock_operations { 889 891 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 890 892 void (*fl_release_private)(struct file_lock *); ··· 900 898 void (*lm_notify)(struct file_lock *); /* unblock callback */ 901 899 int (*lm_grant)(struct file_lock *, int); 902 900 bool (*lm_break)(struct file_lock *); 903 - int (*lm_change)(struct file_lock **, int, struct list_head *); 901 + int (*lm_change)(struct file_lock *, int, struct list_head *); 904 902 void (*lm_setup)(struct file_lock *, void **); 905 903 }; 906 904 ··· 925 923 * FIXME: should we create a separate "struct lock_request" to help distinguish 926 924 * these two uses? 927 925 * 928 - * The i_flock list is ordered by: 926 + * The varous i_flctx lists are ordered by: 929 927 * 930 - * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX 931 - * 2) lock owner 932 - * 3) lock range start 933 - * 4) lock range end 928 + * 1) lock owner 929 + * 2) lock range start 930 + * 3) lock range end 934 931 * 935 932 * Obviously, the last two criteria only matter for POSIX locks. 936 933 */ 937 934 struct file_lock { 938 935 struct file_lock *fl_next; /* singly linked list for this inode */ 936 + struct list_head fl_list; /* link into file_lock_context */ 939 937 struct hlist_node fl_link; /* node in global lists */ 940 938 struct list_head fl_block; /* circular list of blocked processes */ 941 939 fl_owner_t fl_owner; ··· 966 964 } fl_u; 967 965 }; 968 966 967 + struct file_lock_context { 968 + spinlock_t flc_lock; 969 + struct list_head flc_flock; 970 + struct list_head flc_posix; 971 + struct list_head flc_lease; 972 + int flc_flock_cnt; 973 + int flc_posix_cnt; 974 + int flc_lease_cnt; 975 + }; 976 + 969 977 /* The following constant reflects the upper bound of the file/locking space */ 970 978 #ifndef OFFSET_MAX 971 979 #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) ··· 1002 990 extern int fcntl_getlease(struct file *filp); 1003 991 1004 992 /* fs/locks.c */ 993 + void locks_free_lock_context(struct file_lock_context *ctx); 1005 994 void locks_free_lock(struct file_lock *fl); 1006 995 extern void locks_init_lock(struct file_lock *); 1007 996 extern struct file_lock * locks_alloc_lock(void); ··· 1023 1010 extern void lease_get_mtime(struct inode *, struct timespec *time); 1024 1011 extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); 1025 1012 extern int vfs_setlease(struct file *, long, struct file_lock **, void **); 1026 - extern int lease_modify(struct file_lock **, int, struct list_head *); 1013 + extern int lease_modify(struct file_lock *, int, struct list_head *); 1027 1014 #else /* !CONFIG_FILE_LOCKING */ 1028 1015 static inline int fcntl_getlk(struct file *file, unsigned int cmd, 1029 1016 struct flock __user *user) ··· 1058 1045 static inline int fcntl_getlease(struct file *filp) 1059 1046 { 1060 1047 return F_UNLCK; 1048 + } 1049 + 1050 + static inline void 1051 + locks_free_lock_context(struct file_lock_context *ctx) 1052 + { 1061 1053 } 1062 1054 1063 1055 static inline void locks_init_lock(struct file_lock *fl) ··· 1155 1137 return -EINVAL; 1156 1138 } 1157 1139 1158 - static inline int lease_modify(struct file_lock **before, int arg, 1140 + static inline int lease_modify(struct file_lock *fl, int arg, 1159 1141 struct list_head *dispose) 1160 1142 { 1161 1143 return -EINVAL; ··· 1977 1959 struct file *filp, 1978 1960 loff_t size) 1979 1961 { 1980 - if (inode->i_flock && mandatory_lock(inode)) 1962 + if (inode->i_flctx && mandatory_lock(inode)) 1981 1963 return locks_mandatory_area( 1982 1964 FLOCK_VERIFY_WRITE, inode, filp, 1983 1965 size < inode->i_size ? size : inode->i_size, ··· 1991 1973 { 1992 1974 /* 1993 1975 * Since this check is lockless, we must ensure that any refcounts 1994 - * taken are done before checking inode->i_flock. Otherwise, we could 1995 - * end up racing with tasks trying to set a new lease on this file. 1976 + * taken are done before checking i_flctx->flc_lease. Otherwise, we 1977 + * could end up racing with tasks trying to set a new lease on this 1978 + * file. 1996 1979 */ 1997 1980 smp_mb(); 1998 - if (inode->i_flock) 1981 + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) 1999 1982 return __break_lease(inode, mode, FL_LEASE); 2000 1983 return 0; 2001 1984 } ··· 2005 1986 { 2006 1987 /* 2007 1988 * Since this check is lockless, we must ensure that any refcounts 2008 - * taken are done before checking inode->i_flock. Otherwise, we could 2009 - * end up racing with tasks trying to set a new lease on this file. 1989 + * taken are done before checking i_flctx->flc_lease. Otherwise, we 1990 + * could end up racing with tasks trying to set a new lease on this 1991 + * file. 2010 1992 */ 2011 1993 smp_mb(); 2012 - if (inode->i_flock) 1994 + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) 2013 1995 return __break_lease(inode, mode, FL_DELEG); 2014 1996 return 0; 2015 1997 }
+53 -9
include/linux/kdb.h
··· 13 13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> 14 14 */ 15 15 16 + /* Shifted versions of the command enable bits are be used if the command 17 + * has no arguments (see kdb_check_flags). This allows commands, such as 18 + * go, to have different permissions depending upon whether it is called 19 + * with an argument. 20 + */ 21 + #define KDB_ENABLE_NO_ARGS_SHIFT 10 22 + 16 23 typedef enum { 17 - KDB_REPEAT_NONE = 0, /* Do not repeat this command */ 18 - KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ 19 - KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ 20 - } kdb_repeat_t; 24 + KDB_ENABLE_ALL = (1 << 0), /* Enable everything */ 25 + KDB_ENABLE_MEM_READ = (1 << 1), 26 + KDB_ENABLE_MEM_WRITE = (1 << 2), 27 + KDB_ENABLE_REG_READ = (1 << 3), 28 + KDB_ENABLE_REG_WRITE = (1 << 4), 29 + KDB_ENABLE_INSPECT = (1 << 5), 30 + KDB_ENABLE_FLOW_CTRL = (1 << 6), 31 + KDB_ENABLE_SIGNAL = (1 << 7), 32 + KDB_ENABLE_REBOOT = (1 << 8), 33 + /* User exposed values stop here, all remaining flags are 34 + * exclusively used to describe a commands behaviour. 35 + */ 36 + 37 + KDB_ENABLE_ALWAYS_SAFE = (1 << 9), 38 + KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1, 39 + 40 + KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT, 41 + KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ 42 + << KDB_ENABLE_NO_ARGS_SHIFT, 43 + KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE 44 + << KDB_ENABLE_NO_ARGS_SHIFT, 45 + KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ 46 + << KDB_ENABLE_NO_ARGS_SHIFT, 47 + KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE 48 + << KDB_ENABLE_NO_ARGS_SHIFT, 49 + KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT 50 + << KDB_ENABLE_NO_ARGS_SHIFT, 51 + KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL 52 + << KDB_ENABLE_NO_ARGS_SHIFT, 53 + KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL 54 + << KDB_ENABLE_NO_ARGS_SHIFT, 55 + KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT 56 + << KDB_ENABLE_NO_ARGS_SHIFT, 57 + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE 58 + << KDB_ENABLE_NO_ARGS_SHIFT, 59 + KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT, 60 + 61 + KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */ 62 + KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */ 63 + } kdb_cmdflags_t; 21 64 22 65 typedef int (*kdb_func_t)(int, const char **); 23 66 ··· 105 62 #define KDB_BADLENGTH (-19) 106 63 #define KDB_NOBP (-20) 107 64 #define KDB_BADADDR (-21) 65 + #define KDB_NOPERM (-22) 108 66 109 67 /* 110 68 * kdb_diemsg ··· 190 146 191 147 /* Dynamic kdb shell command registration */ 192 148 extern int kdb_register(char *, kdb_func_t, char *, char *, short); 193 - extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, 194 - short, kdb_repeat_t); 149 + extern int kdb_register_flags(char *, kdb_func_t, char *, char *, 150 + short, kdb_cmdflags_t); 195 151 extern int kdb_unregister(char *); 196 152 #else /* ! CONFIG_KGDB_KDB */ 197 153 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } 198 154 static inline void kdb_init(int level) {} 199 155 static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, 200 156 char *help, short minlen) { return 0; } 201 - static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, 202 - char *help, short minlen, 203 - kdb_repeat_t repeat) { return 0; } 157 + static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, 158 + char *help, short minlen, 159 + kdb_cmdflags_t flags) { return 0; } 204 160 static inline int kdb_unregister(char *cmd) { return 0; } 205 161 #endif /* CONFIG_KGDB_KDB */ 206 162 enum {
+1 -1
include/linux/mm.h
··· 1952 1952 #if VM_GROWSUP 1953 1953 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1954 1954 #else 1955 - #define expand_upwards(vma, address) do { } while (0) 1955 + #define expand_upwards(vma, address) (0) 1956 1956 #endif 1957 1957 1958 1958 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+1
include/linux/mmc/sdhci.h
··· 137 137 #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ 138 138 #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ 139 139 #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ 140 + #define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ 140 141 141 142 unsigned int version; /* SDHCI spec. version */ 142 143
+3 -3
include/linux/netdevice.h
··· 852 852 * 3. Update dev->stats asynchronously and atomically, and define 853 853 * neither operation. 854 854 * 855 - * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); 855 + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 856 856 * If device support VLAN filtering this function is called when a 857 857 * VLAN id is registered. 858 858 * 859 - * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 859 + * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 860 860 * If device support VLAN filtering this function is called when a 861 861 * VLAN id is unregistered. 862 862 * ··· 2085 2085 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2086 2086 #define for_each_netdev_in_bond_rcu(bond, slave) \ 2087 2087 for_each_netdev_rcu(&init_net, slave) \ 2088 - if (netdev_master_upper_dev_get_rcu(slave) == bond) 2088 + if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 2089 2089 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2090 2090 2091 2091 static inline struct net_device *next_net_device(struct net_device *dev)
+7 -5
include/linux/perf_event.h
··· 79 79 struct perf_branch_entry entries[0]; 80 80 }; 81 81 82 - struct perf_regs { 83 - __u64 abi; 84 - struct pt_regs *regs; 85 - }; 86 - 87 82 struct task_struct; 88 83 89 84 /* ··· 605 610 u32 reserved; 606 611 } cpu_entry; 607 612 struct perf_callchain_entry *callchain; 613 + 614 + /* 615 + * regs_user may point to task_pt_regs or to regs_user_copy, depending 616 + * on arch details. 617 + */ 608 618 struct perf_regs regs_user; 619 + struct pt_regs regs_user_copy; 620 + 609 621 struct perf_regs regs_intr; 610 622 u64 stack_user_size; 611 623 } ____cacheline_aligned;
+16
include/linux/perf_regs.h
··· 1 1 #ifndef _LINUX_PERF_REGS_H 2 2 #define _LINUX_PERF_REGS_H 3 3 4 + struct perf_regs { 5 + __u64 abi; 6 + struct pt_regs *regs; 7 + }; 8 + 4 9 #ifdef CONFIG_HAVE_PERF_REGS 5 10 #include <asm/perf_regs.h> 6 11 u64 perf_reg_value(struct pt_regs *regs, int idx); 7 12 int perf_reg_validate(u64 mask); 8 13 u64 perf_reg_abi(struct task_struct *task); 14 + void perf_get_regs_user(struct perf_regs *regs_user, 15 + struct pt_regs *regs, 16 + struct pt_regs *regs_user_copy); 9 17 #else 10 18 static inline u64 perf_reg_value(struct pt_regs *regs, int idx) 11 19 { ··· 28 20 static inline u64 perf_reg_abi(struct task_struct *task) 29 21 { 30 22 return PERF_SAMPLE_REGS_ABI_NONE; 23 + } 24 + 25 + static inline void perf_get_regs_user(struct perf_regs *regs_user, 26 + struct pt_regs *regs, 27 + struct pt_regs *regs_user_copy) 28 + { 29 + regs_user->regs = task_pt_regs(current); 30 + regs_user->abi = perf_reg_abi(current); 31 31 } 32 32 #endif /* CONFIG_HAVE_PERF_REGS */ 33 33 #endif /* _LINUX_PERF_REGS_H */
+10
include/linux/rmap.h
··· 37 37 atomic_t refcount; 38 38 39 39 /* 40 + * Count of child anon_vmas and VMAs which points to this anon_vma. 41 + * 42 + * This counter is used for making decision about reusing anon_vma 43 + * instead of forking new one. See comments in function anon_vma_clone. 44 + */ 45 + unsigned degree; 46 + 47 + struct anon_vma *parent; /* Parent of this anon_vma */ 48 + 49 + /* 40 50 * NOTE: the LSB of the rb_root.rb_node is set by 41 51 * mm_take_all_locks() _after_ taking the above lock. So the 42 52 * rb_root must only be read/written after taking the above lock
-1
include/linux/writeback.h
··· 177 177 struct writeback_control *wbc, writepage_t writepage, 178 178 void *data); 179 179 int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 180 - void set_page_dirty_balance(struct page *page); 181 180 void writeback_set_ratelimit(void); 182 181 void tag_pages_for_writeback(struct address_space *mapping, 183 182 pgoff_t start, pgoff_t end);
+2 -5
include/net/mac80211.h
··· 1270 1270 * 1271 1271 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the 1272 1272 * driver to indicate that it requires IV generation for this 1273 - * particular key. Setting this flag does not necessarily mean that SKBs 1274 - * will have sufficient tailroom for ICV or MIC. 1273 + * particular key. 1275 1274 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by 1276 1275 * the driver for a TKIP key if it requires Michael MIC 1277 1276 * generation in software. ··· 1282 1283 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver 1283 1284 * if space should be prepared for the IV, but the IV 1284 1285 * itself should not be generated. Do not set together with 1285 - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does 1286 - * not necessarily mean that SKBs will have sufficient tailroom for ICV or 1287 - * MIC. 1286 + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. 1288 1287 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received 1289 1288 * management frames. The flag can help drivers that have a hardware 1290 1289 * crypto implementation that doesn't deal with management frames
-1
include/target/target_core_backend.h
··· 135 135 int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 136 136 int se_dev_set_queue_depth(struct se_device *, u32); 137 137 int se_dev_set_max_sectors(struct se_device *, u32); 138 - int se_dev_set_fabric_max_sectors(struct se_device *, u32); 139 138 int se_dev_set_optimal_sectors(struct se_device *, u32); 140 139 int se_dev_set_block_size(struct se_device *, u32); 141 140
-2
include/target/target_core_backend_configfs.h
··· 98 98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ 99 99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ 100 100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ 101 - DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \ 102 - TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \ 103 101 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ 104 102 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ 105 103 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
-3
include/target/target_core_base.h
··· 77 77 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 78 78 /* Default max_write_same_len, disabled by default */ 79 79 #define DA_MAX_WRITE_SAME_LEN 0 80 - /* Default max transfer length */ 81 - #define DA_FABRIC_MAX_SECTORS 8192 82 80 /* Use a model alias based on the configfs backend device name */ 83 81 #define DA_EMULATE_MODEL_ALIAS 0 84 82 /* Emulation for Direct Page Out */ ··· 692 694 u32 hw_block_size; 693 695 u32 block_size; 694 696 u32 hw_max_sectors; 695 - u32 fabric_max_sectors; 696 697 u32 optimal_sectors; 697 698 u32 hw_queue_depth; 698 699 u32 queue_depth;
+1 -1
include/uapi/asm-generic/fcntl.h
··· 5 5 6 6 /* 7 7 * FMODE_EXEC is 0x20 8 - * FMODE_NONOTIFY is 0x1000000 8 + * FMODE_NONOTIFY is 0x4000000 9 9 * These cannot be used by userspace O_* until internal and external open 10 10 * flags are split. 11 11 * -Eric Paris
+22 -15
include/uapi/linux/kfd_ioctl.h
··· 128 128 uint32_t pad; 129 129 }; 130 130 131 - #define KFD_IOC_MAGIC 'K' 131 + #define AMDKFD_IOCTL_BASE 'K' 132 + #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 133 + #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 134 + #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) 135 + #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) 132 136 133 - #define KFD_IOC_GET_VERSION \ 134 - _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args) 137 + #define AMDKFD_IOC_GET_VERSION \ 138 + AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) 135 139 136 - #define KFD_IOC_CREATE_QUEUE \ 137 - _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args) 140 + #define AMDKFD_IOC_CREATE_QUEUE \ 141 + AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) 138 142 139 - #define KFD_IOC_DESTROY_QUEUE \ 140 - _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args) 143 + #define AMDKFD_IOC_DESTROY_QUEUE \ 144 + AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) 141 145 142 - #define KFD_IOC_SET_MEMORY_POLICY \ 143 - _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args) 146 + #define AMDKFD_IOC_SET_MEMORY_POLICY \ 147 + AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) 144 148 145 - #define KFD_IOC_GET_CLOCK_COUNTERS \ 146 - _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args) 149 + #define AMDKFD_IOC_GET_CLOCK_COUNTERS \ 150 + AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) 147 151 148 - #define KFD_IOC_GET_PROCESS_APERTURES \ 149 - _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args) 152 + #define AMDKFD_IOC_GET_PROCESS_APERTURES \ 153 + AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) 150 154 151 - #define KFD_IOC_UPDATE_QUEUE \ 152 - _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args) 155 + #define AMDKFD_IOC_UPDATE_QUEUE \ 156 + AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) 157 + 158 + #define AMDKFD_COMMAND_START 0x01 159 + #define AMDKFD_COMMAND_END 0x08 153 160 154 161 #endif
+4
include/uapi/linux/openvswitch.h
··· 174 174 OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ 175 175 OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* 176 176 attributes. */ 177 + OVS_PACKET_ATTR_UNUSED1, 178 + OVS_PACKET_ATTR_UNUSED2, 179 + OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe, 180 + error logging should be suppressed. */ 177 181 __OVS_PACKET_ATTR_MAX 178 182 }; 179 183
+51
include/xen/interface/nmi.h
··· 1 + /****************************************************************************** 2 + * nmi.h 3 + * 4 + * NMI callback registration and reason codes. 5 + * 6 + * Copyright (c) 2005, Keir Fraser <keir@xensource.com> 7 + */ 8 + 9 + #ifndef __XEN_PUBLIC_NMI_H__ 10 + #define __XEN_PUBLIC_NMI_H__ 11 + 12 + #include <xen/interface/xen.h> 13 + 14 + /* 15 + * NMI reason codes: 16 + * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. 17 + */ 18 + /* I/O-check error reported via ISA port 0x61, bit 6. */ 19 + #define _XEN_NMIREASON_io_error 0 20 + #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) 21 + /* PCI SERR reported via ISA port 0x61, bit 7. */ 22 + #define _XEN_NMIREASON_pci_serr 1 23 + #define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr) 24 + /* Unknown hardware-generated NMI. */ 25 + #define _XEN_NMIREASON_unknown 2 26 + #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) 27 + 28 + /* 29 + * long nmi_op(unsigned int cmd, void *arg) 30 + * NB. All ops return zero on success, else a negative error code. 31 + */ 32 + 33 + /* 34 + * Register NMI callback for this (calling) VCPU. Currently this only makes 35 + * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. 36 + * arg == pointer to xennmi_callback structure. 37 + */ 38 + #define XENNMI_register_callback 0 39 + struct xennmi_callback { 40 + unsigned long handler_address; 41 + unsigned long pad; 42 + }; 43 + DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback); 44 + 45 + /* 46 + * Deregister NMI callback for this (calling) VCPU. 47 + * arg == NULL. 48 + */ 49 + #define XENNMI_unregister_callback 1 50 + 51 + #endif /* __XEN_PUBLIC_NMI_H__ */
+28 -24
kernel/debug/debug_core.c
··· 27 27 * version 2. This program is licensed "as is" without any warranty of any 28 28 * kind, whether express or implied. 29 29 */ 30 + 31 + #define pr_fmt(fmt) "KGDB: " fmt 32 + 30 33 #include <linux/pid_namespace.h> 31 34 #include <linux/clocksource.h> 32 35 #include <linux/serial_core.h> ··· 199 196 return err; 200 197 err = kgdb_arch_remove_breakpoint(&tmp); 201 198 if (err) 202 - printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " 203 - "memory destroyed at: %lx", addr); 199 + pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n", 200 + addr); 204 201 return err; 205 202 } 206 203 ··· 259 256 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); 260 257 if (error) { 261 258 ret = error; 262 - printk(KERN_INFO "KGDB: BP install failed: %lx", 263 - kgdb_break[i].bpt_addr); 259 + pr_info("BP install failed: %lx\n", 260 + kgdb_break[i].bpt_addr); 264 261 continue; 265 262 } 266 263 ··· 322 319 continue; 323 320 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 324 321 if (error) { 325 - printk(KERN_INFO "KGDB: BP remove failed: %lx\n", 326 - kgdb_break[i].bpt_addr); 322 + pr_info("BP remove failed: %lx\n", 323 + kgdb_break[i].bpt_addr); 327 324 ret = error; 328 325 } 329 326 ··· 370 367 goto setundefined; 371 368 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 372 369 if (error) 373 - printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", 370 + pr_err("breakpoint remove failed: %lx\n", 374 371 kgdb_break[i].bpt_addr); 375 372 setundefined: 376 373 kgdb_break[i].state = BP_UNDEFINED; ··· 403 400 if (print_wait) { 404 401 #ifdef CONFIG_KGDB_KDB 405 402 if (!dbg_kdb_mode) 406 - printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); 403 + pr_crit("waiting... or $3#33 for KDB\n"); 407 404 #else 408 - printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); 405 + pr_crit("Waiting for remote debugger\n"); 409 406 #endif 410 407 } 411 408 return 1; ··· 433 430 exception_level = 0; 434 431 kgdb_skipexception(ks->ex_vector, ks->linux_regs); 435 432 dbg_activate_sw_breakpoints(); 436 - printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", 437 - addr); 433 + pr_crit("re-enter error: breakpoint removed %lx\n", addr); 438 434 WARN_ON_ONCE(1); 439 435 440 436 return 1; ··· 446 444 panic("Recursive entry to debugger"); 447 445 } 448 446 449 - printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); 447 + pr_crit("re-enter exception: ALL breakpoints killed\n"); 450 448 #ifdef CONFIG_KGDB_KDB 451 449 /* Allow kdb to debug itself one level */ 452 450 return 0; ··· 473 471 int cpu; 474 472 int trace_on = 0; 475 473 int online_cpus = num_online_cpus(); 474 + u64 time_left; 476 475 477 476 kgdb_info[ks->cpu].enter_kgdb++; 478 477 kgdb_info[ks->cpu].exception_state |= exception_state; ··· 598 595 /* 599 596 * Wait for the other CPUs to be notified and be waiting for us: 600 597 */ 601 - while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + 602 - atomic_read(&slaves_in_kgdb)) != online_cpus) 598 + time_left = loops_per_jiffy * HZ; 599 + while (kgdb_do_roundup && --time_left && 600 + (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != 601 + online_cpus) 603 602 cpu_relax(); 603 + if (!time_left) 604 + pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); 604 605 605 606 /* 606 607 * At this point the primary processor is completely ··· 802 795 static void sysrq_handle_dbg(int key) 803 796 { 804 797 if (!dbg_io_ops) { 805 - printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); 798 + pr_crit("ERROR: No KGDB I/O module available\n"); 806 799 return; 807 800 } 808 801 if (!kgdb_connected) { 809 802 #ifdef CONFIG_KGDB_KDB 810 803 if (!dbg_kdb_mode) 811 - printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); 804 + pr_crit("KGDB or $3#33 for KDB\n"); 812 805 #else 813 - printk(KERN_CRIT "Entering KGDB\n"); 806 + pr_crit("Entering KGDB\n"); 814 807 #endif 815 808 } 816 809 ··· 952 945 { 953 946 kgdb_break_asap = 0; 954 947 955 - printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); 948 + pr_crit("Waiting for connection from remote gdb...\n"); 956 949 kgdb_breakpoint(); 957 950 } 958 951 ··· 971 964 if (dbg_io_ops) { 972 965 spin_unlock(&kgdb_registration_lock); 973 966 974 - printk(KERN_ERR "kgdb: Another I/O driver is already " 975 - "registered with KGDB.\n"); 967 + pr_err("Another I/O driver is already registered with KGDB\n"); 976 968 return -EBUSY; 977 969 } 978 970 ··· 987 981 988 982 spin_unlock(&kgdb_registration_lock); 989 983 990 - printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", 991 - new_dbg_io_ops->name); 984 + pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); 992 985 993 986 /* Arm KGDB now. */ 994 987 kgdb_register_callbacks(); ··· 1022 1017 1023 1018 spin_unlock(&kgdb_registration_lock); 1024 1019 1025 - printk(KERN_INFO 1026 - "kgdb: Unregistered I/O driver %s, debugger disabled.\n", 1020 + pr_info("Unregistered I/O driver %s, debugger disabled\n", 1027 1021 old_dbg_io_ops->name); 1028 1022 } 1029 1023 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
+21 -14
kernel/debug/kdb/kdb_bp.c
··· 531 531 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) 532 532 bp->bp_free = 1; 533 533 534 - kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", 535 - "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); 536 - kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", 537 - "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); 534 + kdb_register_flags("bp", kdb_bp, "[<vaddr>]", 535 + "Set/Display breakpoints", 0, 536 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 537 + kdb_register_flags("bl", kdb_bp, "[<vaddr>]", 538 + "Display breakpoints", 0, 539 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 538 540 if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) 539 - kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", 540 - "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS); 541 - kdb_register_repeat("bc", kdb_bc, "<bpnum>", 542 - "Clear Breakpoint", 0, KDB_REPEAT_NONE); 543 - kdb_register_repeat("be", kdb_bc, "<bpnum>", 544 - "Enable Breakpoint", 0, KDB_REPEAT_NONE); 545 - kdb_register_repeat("bd", kdb_bc, "<bpnum>", 546 - "Disable Breakpoint", 0, KDB_REPEAT_NONE); 541 + kdb_register_flags("bph", kdb_bp, "[<vaddr>]", 542 + "[datar [length]|dataw [length]] Set hw brk", 0, 543 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 544 + kdb_register_flags("bc", kdb_bc, "<bpnum>", 545 + "Clear Breakpoint", 0, 546 + KDB_ENABLE_FLOW_CTRL); 547 + kdb_register_flags("be", kdb_bc, "<bpnum>", 548 + "Enable Breakpoint", 0, 549 + KDB_ENABLE_FLOW_CTRL); 550 + kdb_register_flags("bd", kdb_bc, "<bpnum>", 551 + "Disable Breakpoint", 0, 552 + KDB_ENABLE_FLOW_CTRL); 547 553 548 - kdb_register_repeat("ss", kdb_ss, "", 549 - "Single Step", 1, KDB_REPEAT_NO_ARGS); 554 + kdb_register_flags("ss", kdb_ss, "", 555 + "Single Step", 1, 556 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 550 557 /* 551 558 * Architecture dependent initialization. 552 559 */
+4
kernel/debug/kdb/kdb_debugger.c
··· 129 129 ks->pass_exception = 1; 130 130 KDB_FLAG_SET(CATASTROPHIC); 131 131 } 132 + /* set CATASTROPHIC if the system contains unresponsive processors */ 133 + for_each_online_cpu(i) 134 + if (!kgdb_info[i].enter_kgdb) 135 + KDB_FLAG_SET(CATASTROPHIC); 132 136 if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { 133 137 KDB_STATE_CLEAR(SSBPT); 134 138 KDB_STATE_CLEAR(DOING_SS);
+170 -95
kernel/debug/kdb/kdb_main.c
··· 12 12 */ 13 13 14 14 #include <linux/ctype.h> 15 + #include <linux/types.h> 15 16 #include <linux/string.h> 16 17 #include <linux/kernel.h> 17 18 #include <linux/kmsg_dump.h> ··· 24 23 #include <linux/vmalloc.h> 25 24 #include <linux/atomic.h> 26 25 #include <linux/module.h> 26 + #include <linux/moduleparam.h> 27 27 #include <linux/mm.h> 28 28 #include <linux/init.h> 29 29 #include <linux/kallsyms.h> ··· 43 41 #include <linux/uaccess.h> 44 42 #include <linux/slab.h> 45 43 #include "kdb_private.h" 44 + 45 + #undef MODULE_PARAM_PREFIX 46 + #define MODULE_PARAM_PREFIX "kdb." 47 + 48 + static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; 49 + module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); 46 50 47 51 #define GREP_LEN 256 48 52 char kdb_grep_string[GREP_LEN]; ··· 129 121 KDBMSG(BADLENGTH, "Invalid length field"), 130 122 KDBMSG(NOBP, "No Breakpoint exists"), 131 123 KDBMSG(BADADDR, "Invalid address"), 124 + KDBMSG(NOPERM, "Permission denied"), 132 125 }; 133 126 #undef KDBMSG 134 127 ··· 194 185 p = krp->p; 195 186 #endif 196 187 return p; 188 + } 189 + 190 + /* 191 + * Check whether the flags of the current command and the permissions 192 + * of the kdb console has allow a command to be run. 193 + */ 194 + static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, 195 + bool no_args) 196 + { 197 + /* permissions comes from userspace so needs massaging slightly */ 198 + permissions &= KDB_ENABLE_MASK; 199 + permissions |= KDB_ENABLE_ALWAYS_SAFE; 200 + 201 + /* some commands change group when launched with no arguments */ 202 + if (no_args) 203 + permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT; 204 + 205 + flags |= KDB_ENABLE_ALL; 206 + 207 + return permissions & flags; 197 208 } 198 209 199 210 /* ··· 505 476 kdb_symtab_t symtab; 506 477 507 478 /* 479 + * If the enable flags prohibit both arbitrary memory access 480 + * and flow control then there are no reasonable grounds to 481 + * provide symbol lookup. 482 + */ 483 + if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL, 484 + kdb_cmd_enabled, false)) 485 + return KDB_NOPERM; 486 + 487 + /* 508 488 * Process arguments which follow the following syntax: 509 489 * 510 490 * symbol | numeric-address [+/- numeric-offset] ··· 679 641 if (!s->count) 680 642 s->usable = 0; 681 643 if (s->usable) 682 - kdb_register(s->name, kdb_exec_defcmd, 683 - s->usage, s->help, 0); 644 + /* macros are always safe because when executed each 645 + * internal command re-enters kdb_parse() and is 646 + * safety checked individually. 647 + */ 648 + kdb_register_flags(s->name, kdb_exec_defcmd, s->usage, 649 + s->help, 0, 650 + KDB_ENABLE_ALWAYS_SAFE); 684 651 return 0; 685 652 } 686 653 if (!s->usable) ··· 1046 1003 1047 1004 if (i < kdb_max_commands) { 1048 1005 int result; 1006 + 1007 + if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1)) 1008 + return KDB_NOPERM; 1009 + 1049 1010 KDB_STATE_SET(CMD); 1050 1011 result = (*tp->cmd_func)(argc-1, (const char **)argv); 1051 1012 if (result && ignore_errors && result > KDB_CMD_GO) 1052 1013 result = 0; 1053 1014 KDB_STATE_CLEAR(CMD); 1054 - switch (tp->cmd_repeat) { 1055 - case KDB_REPEAT_NONE: 1056 - argc = 0; 1057 - if (argv[0]) 1058 - *(argv[0]) = '\0'; 1059 - break; 1060 - case KDB_REPEAT_NO_ARGS: 1061 - argc = 1; 1062 - if (argv[1]) 1063 - *(argv[1]) = '\0'; 1064 - break; 1065 - case KDB_REPEAT_WITH_ARGS: 1066 - break; 1067 - } 1015 + 1016 + if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS) 1017 + return result; 1018 + 1019 + argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0; 1020 + if (argv[argc]) 1021 + *(argv[argc]) = '\0'; 1068 1022 return result; 1069 1023 } 1070 1024 ··· 1961 1921 */ 1962 1922 static int kdb_sr(int argc, const char **argv) 1963 1923 { 1924 + bool check_mask = 1925 + !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false); 1926 + 1964 1927 if (argc != 1) 1965 1928 return KDB_ARGCOUNT; 1929 + 1966 1930 kdb_trap_printk++; 1967 - __handle_sysrq(*argv[1], false); 1931 + __handle_sysrq(*argv[1], check_mask); 1968 1932 kdb_trap_printk--; 1969 1933 1970 1934 return 0; ··· 2201 2157 for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { 2202 2158 if (!cpu_online(i)) { 2203 2159 state = 'F'; /* cpu is offline */ 2160 + } else if (!kgdb_info[i].enter_kgdb) { 2161 + state = 'D'; /* cpu is online but unresponsive */ 2204 2162 } else { 2205 2163 state = ' '; /* cpu is responding to kdb */ 2206 2164 if (kdb_task_state_char(KDB_TSK(i)) == 'I') ··· 2256 2210 /* 2257 2211 * Validate cpunum 2258 2212 */ 2259 - if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) 2213 + if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) 2260 2214 return KDB_BADCPUNUM; 2261 2215 2262 2216 dbg_switch_cpu = cpunum; ··· 2420 2374 if (KDB_FLAG(CMD_INTERRUPT)) 2421 2375 return 0; 2422 2376 if (!kt->cmd_name) 2377 + continue; 2378 + if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true)) 2423 2379 continue; 2424 2380 if (strlen(kt->cmd_usage) > 20) 2425 2381 space = "\n "; ··· 2677 2629 } 2678 2630 2679 2631 /* 2680 - * kdb_register_repeat - This function is used to register a kernel 2632 + * kdb_register_flags - This function is used to register a kernel 2681 2633 * debugger command. 2682 2634 * Inputs: 2683 2635 * cmd Command name ··· 2689 2641 * zero for success, one if a duplicate command. 2690 2642 */ 2691 2643 #define kdb_command_extend 50 /* arbitrary */ 2692 - int kdb_register_repeat(char *cmd, 2693 - kdb_func_t func, 2694 - char *usage, 2695 - char *help, 2696 - short minlen, 2697 - kdb_repeat_t repeat) 2644 + int kdb_register_flags(char *cmd, 2645 + kdb_func_t func, 2646 + char *usage, 2647 + char *help, 2648 + short minlen, 2649 + kdb_cmdflags_t flags) 2698 2650 { 2699 2651 int i; 2700 2652 kdbtab_t *kp; ··· 2742 2694 kp->cmd_func = func; 2743 2695 kp->cmd_usage = usage; 2744 2696 kp->cmd_help = help; 2745 - kp->cmd_flags = 0; 2746 2697 kp->cmd_minlen = minlen; 2747 - kp->cmd_repeat = repeat; 2698 + kp->cmd_flags = flags; 2748 2699 2749 2700 return 0; 2750 2701 } 2751 - EXPORT_SYMBOL_GPL(kdb_register_repeat); 2702 + EXPORT_SYMBOL_GPL(kdb_register_flags); 2752 2703 2753 2704 2754 2705 /* 2755 2706 * kdb_register - Compatibility register function for commands that do 2756 2707 * not need to specify a repeat state. Equivalent to 2757 - * kdb_register_repeat with KDB_REPEAT_NONE. 2708 + * kdb_register_flags with flags set to 0. 2758 2709 * Inputs: 2759 2710 * cmd Command name 2760 2711 * func Function to execute the command ··· 2768 2721 char *help, 2769 2722 short minlen) 2770 2723 { 2771 - return kdb_register_repeat(cmd, func, usage, help, minlen, 2772 - KDB_REPEAT_NONE); 2724 + return kdb_register_flags(cmd, func, usage, help, minlen, 0); 2773 2725 } 2774 2726 EXPORT_SYMBOL_GPL(kdb_register); 2775 2727 ··· 2810 2764 for_each_kdbcmd(kp, i) 2811 2765 kp->cmd_name = NULL; 2812 2766 2813 - kdb_register_repeat("md", kdb_md, "<vaddr>", 2767 + kdb_register_flags("md", kdb_md, "<vaddr>", 2814 2768 "Display Memory Contents, also mdWcN, e.g. md8c1", 1, 2815 - KDB_REPEAT_NO_ARGS); 2816 - kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", 2817 - "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); 2818 - kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", 2819 - "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); 2820 - kdb_register_repeat("mds", kdb_md, "<vaddr>", 2821 - "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); 2822 - kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", 2823 - "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); 2824 - kdb_register_repeat("go", kdb_go, "[<vaddr>]", 2825 - "Continue Execution", 1, KDB_REPEAT_NONE); 2826 - kdb_register_repeat("rd", kdb_rd, "", 2827 - "Display Registers", 0, KDB_REPEAT_NONE); 2828 - kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", 2829 - "Modify Registers", 0, KDB_REPEAT_NONE); 2830 - kdb_register_repeat("ef", kdb_ef, "<vaddr>", 2831 - "Display exception frame", 0, KDB_REPEAT_NONE); 2832 - kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", 2833 - "Stack traceback", 1, KDB_REPEAT_NONE); 2834 - kdb_register_repeat("btp", kdb_bt, "<pid>", 2835 - "Display stack for process <pid>", 0, KDB_REPEAT_NONE); 2836 - kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", 2837 - "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE); 2838 - kdb_register_repeat("btc", kdb_bt, "", 2839 - "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); 2840 - kdb_register_repeat("btt", kdb_bt, "<vaddr>", 2769 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2770 + kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>", 2771 + "Display Raw Memory", 0, 2772 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2773 + kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>", 2774 + "Display Physical Memory", 0, 2775 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2776 + kdb_register_flags("mds", kdb_md, "<vaddr>", 2777 + "Display Memory Symbolically", 0, 2778 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2779 + kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>", 2780 + "Modify Memory Contents", 0, 2781 + KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS); 2782 + kdb_register_flags("go", kdb_go, "[<vaddr>]", 2783 + "Continue Execution", 1, 2784 + KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); 2785 + kdb_register_flags("rd", kdb_rd, "", 2786 + "Display Registers", 0, 2787 + KDB_ENABLE_REG_READ); 2788 + kdb_register_flags("rm", kdb_rm, "<reg> <contents>", 2789 + "Modify Registers", 0, 2790 + KDB_ENABLE_REG_WRITE); 2791 + kdb_register_flags("ef", kdb_ef, "<vaddr>", 2792 + "Display exception frame", 0, 2793 + KDB_ENABLE_MEM_READ); 2794 + kdb_register_flags("bt", kdb_bt, "[<vaddr>]", 2795 + "Stack traceback", 1, 2796 + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); 2797 + kdb_register_flags("btp", kdb_bt, "<pid>", 2798 + "Display stack for process <pid>", 0, 2799 + KDB_ENABLE_INSPECT); 2800 + kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", 2801 + "Backtrace all processes matching state flag", 0, 2802 + KDB_ENABLE_INSPECT); 2803 + kdb_register_flags("btc", kdb_bt, "", 2804 + "Backtrace current process on each cpu", 0, 2805 + KDB_ENABLE_INSPECT); 2806 + kdb_register_flags("btt", kdb_bt, "<vaddr>", 2841 2807 "Backtrace process given its struct task address", 0, 2842 - KDB_REPEAT_NONE); 2843 - kdb_register_repeat("env", kdb_env, "", 2844 - "Show environment variables", 0, KDB_REPEAT_NONE); 2845 - kdb_register_repeat("set", kdb_set, "", 2846 - "Set environment variables", 0, KDB_REPEAT_NONE); 2847 - kdb_register_repeat("help", kdb_help, "", 2848 - "Display Help Message", 1, KDB_REPEAT_NONE); 2849 - kdb_register_repeat("?", kdb_help, "", 2850 - "Display Help Message", 0, KDB_REPEAT_NONE); 2851 - kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", 2852 - "Switch to new cpu", 0, KDB_REPEAT_NONE); 2853 - kdb_register_repeat("kgdb", kdb_kgdb, "", 2854 - "Enter kgdb mode", 0, KDB_REPEAT_NONE); 2855 - kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", 2856 - "Display active task list", 0, KDB_REPEAT_NONE); 2857 - kdb_register_repeat("pid", kdb_pid, "<pidnum>", 2858 - "Switch to another task", 0, KDB_REPEAT_NONE); 2859 - kdb_register_repeat("reboot", kdb_reboot, "", 2860 - "Reboot the machine immediately", 0, KDB_REPEAT_NONE); 2808 + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); 2809 + kdb_register_flags("env", kdb_env, "", 2810 + "Show environment variables", 0, 2811 + KDB_ENABLE_ALWAYS_SAFE); 2812 + kdb_register_flags("set", kdb_set, "", 2813 + "Set environment variables", 0, 2814 + KDB_ENABLE_ALWAYS_SAFE); 2815 + kdb_register_flags("help", kdb_help, "", 2816 + "Display Help Message", 1, 2817 + KDB_ENABLE_ALWAYS_SAFE); 2818 + kdb_register_flags("?", kdb_help, "", 2819 + "Display Help Message", 0, 2820 + KDB_ENABLE_ALWAYS_SAFE); 2821 + kdb_register_flags("cpu", kdb_cpu, "<cpunum>", 2822 + "Switch to new cpu", 0, 2823 + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); 2824 + kdb_register_flags("kgdb", kdb_kgdb, "", 2825 + "Enter kgdb mode", 0, 0); 2826 + kdb_register_flags("ps", kdb_ps, "[<flags>|A]", 2827 + "Display active task list", 0, 2828 + KDB_ENABLE_INSPECT); 2829 + kdb_register_flags("pid", kdb_pid, "<pidnum>", 2830 + "Switch to another task", 0, 2831 + KDB_ENABLE_INSPECT); 2832 + kdb_register_flags("reboot", kdb_reboot, "", 2833 + "Reboot the machine immediately", 0, 2834 + KDB_ENABLE_REBOOT); 2861 2835 #if defined(CONFIG_MODULES) 2862 - kdb_register_repeat("lsmod", kdb_lsmod, "", 2863 - "List loaded kernel modules", 0, KDB_REPEAT_NONE); 2836 + kdb_register_flags("lsmod", kdb_lsmod, "", 2837 + "List loaded kernel modules", 0, 2838 + KDB_ENABLE_INSPECT); 2864 2839 #endif 2865 2840 #if defined(CONFIG_MAGIC_SYSRQ) 2866 - kdb_register_repeat("sr", kdb_sr, "<key>", 2867 - "Magic SysRq key", 0, KDB_REPEAT_NONE); 2841 + kdb_register_flags("sr", kdb_sr, "<key>", 2842 + "Magic SysRq key", 0, 2843 + KDB_ENABLE_ALWAYS_SAFE); 2868 2844 #endif 2869 2845 #if defined(CONFIG_PRINTK) 2870 - kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", 2871 - "Display syslog buffer", 0, KDB_REPEAT_NONE); 2846 + kdb_register_flags("dmesg", kdb_dmesg, "[lines]", 2847 + "Display syslog buffer", 0, 2848 + KDB_ENABLE_ALWAYS_SAFE); 2872 2849 #endif 2873 2850 if (arch_kgdb_ops.enable_nmi) { 2874 - kdb_register_repeat("disable_nmi", kdb_disable_nmi, "", 2875 - "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE); 2851 + kdb_register_flags("disable_nmi", kdb_disable_nmi, "", 2852 + "Disable NMI entry to KDB", 0, 2853 + KDB_ENABLE_ALWAYS_SAFE); 2876 2854 } 2877 - kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", 2878 - "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); 2879 - kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", 2880 - "Send a signal to a process", 0, KDB_REPEAT_NONE); 2881 - kdb_register_repeat("summary", kdb_summary, "", 2882 - "Summarize the system", 4, KDB_REPEAT_NONE); 2883 - kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", 2884 - "Display per_cpu variables", 3, KDB_REPEAT_NONE); 2885 - kdb_register_repeat("grephelp", kdb_grep_help, "", 2886 - "Display help on | grep", 0, KDB_REPEAT_NONE); 2855 + kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"", 2856 + "Define a set of commands, down to endefcmd", 0, 2857 + KDB_ENABLE_ALWAYS_SAFE); 2858 + kdb_register_flags("kill", kdb_kill, "<-signal> <pid>", 2859 + "Send a signal to a process", 0, 2860 + KDB_ENABLE_SIGNAL); 2861 + kdb_register_flags("summary", kdb_summary, "", 2862 + "Summarize the system", 4, 2863 + KDB_ENABLE_ALWAYS_SAFE); 2864 + kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", 2865 + "Display per_cpu variables", 3, 2866 + KDB_ENABLE_MEM_READ); 2867 + kdb_register_flags("grephelp", kdb_grep_help, "", 2868 + "Display help on | grep", 0, 2869 + KDB_ENABLE_ALWAYS_SAFE); 2887 2870 } 2888 2871 2889 2872 /* Execute any commands defined in kdb_cmds. */
+1 -2
kernel/debug/kdb/kdb_private.h
··· 172 172 kdb_func_t cmd_func; /* Function to execute command */ 173 173 char *cmd_usage; /* Usage String for this command */ 174 174 char *cmd_help; /* Help message for this command */ 175 - short cmd_flags; /* Parsing flags */ 176 175 short cmd_minlen; /* Minimum legal # command 177 176 * chars required */ 178 - kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ 177 + kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ 179 178 } kdbtab_t; 180 179 181 180 extern int kdb_bt(int, const char **); /* KDB display back trace */
+8 -11
kernel/events/core.c
··· 4461 4461 } 4462 4462 4463 4463 static void perf_sample_regs_user(struct perf_regs *regs_user, 4464 - struct pt_regs *regs) 4464 + struct pt_regs *regs, 4465 + struct pt_regs *regs_user_copy) 4465 4466 { 4466 - if (!user_mode(regs)) { 4467 - if (current->mm) 4468 - regs = task_pt_regs(current); 4469 - else 4470 - regs = NULL; 4471 - } 4472 - 4473 - if (regs) { 4474 - regs_user->abi = perf_reg_abi(current); 4467 + if (user_mode(regs)) { 4468 + regs_user->abi = perf_reg_abi(current); 4475 4469 regs_user->regs = regs; 4470 + } else if (current->mm) { 4471 + perf_get_regs_user(regs_user, regs, regs_user_copy); 4476 4472 } else { 4477 4473 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 4478 4474 regs_user->regs = NULL; ··· 4947 4951 } 4948 4952 4949 4953 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 4950 - perf_sample_regs_user(&data->regs_user, regs); 4954 + perf_sample_regs_user(&data->regs_user, regs, 4955 + &data->regs_user_copy); 4951 4956 4952 4957 if (sample_type & PERF_SAMPLE_REGS_USER) { 4953 4958 /* regs dump ABI info */
+9 -3
kernel/exit.c
··· 1287 1287 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1288 1288 struct task_struct *p) 1289 1289 { 1290 + /* 1291 + * We can race with wait_task_zombie() from another thread. 1292 + * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1293 + * can't confuse the checks below. 1294 + */ 1295 + int exit_state = ACCESS_ONCE(p->exit_state); 1290 1296 int ret; 1291 1297 1292 - if (unlikely(p->exit_state == EXIT_DEAD)) 1298 + if (unlikely(exit_state == EXIT_DEAD)) 1293 1299 return 0; 1294 1300 1295 1301 ret = eligible_child(wo, p); ··· 1316 1310 return 0; 1317 1311 } 1318 1312 1319 - if (unlikely(p->exit_state == EXIT_TRACE)) { 1313 + if (unlikely(exit_state == EXIT_TRACE)) { 1320 1314 /* 1321 1315 * ptrace == 0 means we are the natural parent. In this case 1322 1316 * we should clear notask_error, debugger will notify us. ··· 1343 1337 } 1344 1338 1345 1339 /* slay zombie? */ 1346 - if (p->exit_state == EXIT_ZOMBIE) { 1340 + if (exit_state == EXIT_ZOMBIE) { 1347 1341 /* we don't reap group leaders with subthreads */ 1348 1342 if (!delay_group_leader(p)) { 1349 1343 /*
+1 -1
kernel/locking/mutex-debug.c
··· 80 80 DEBUG_LOCKS_WARN_ON(lock->owner != current); 81 81 82 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 83 - mutex_clear_owner(lock); 84 83 } 85 84 86 85 /* 87 86 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug 88 87 * mutexes so that we can do it here after we've verified state. 89 88 */ 89 + mutex_clear_owner(lock); 90 90 atomic_set(&lock->count, 1); 91 91 } 92 92
+6 -9
kernel/sched/core.c
··· 7113 7113 #ifdef CONFIG_RT_GROUP_SCHED 7114 7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7115 7115 #endif 7116 - #ifdef CONFIG_CPUMASK_OFFSTACK 7117 - alloc_size += num_possible_cpus() * cpumask_size(); 7118 - #endif 7119 7116 if (alloc_size) { 7120 7117 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7121 7118 ··· 7132 7135 ptr += nr_cpu_ids * sizeof(void **); 7133 7136 7134 7137 #endif /* CONFIG_RT_GROUP_SCHED */ 7135 - #ifdef CONFIG_CPUMASK_OFFSTACK 7136 - for_each_possible_cpu(i) { 7137 - per_cpu(load_balance_mask, i) = (void *)ptr; 7138 - ptr += cpumask_size(); 7139 - } 7140 - #endif /* CONFIG_CPUMASK_OFFSTACK */ 7141 7138 } 7139 + #ifdef CONFIG_CPUMASK_OFFSTACK 7140 + for_each_possible_cpu(i) { 7141 + per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7142 + cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7143 + } 7144 + #endif /* CONFIG_CPUMASK_OFFSTACK */ 7142 7145 7143 7146 init_rt_bandwidth(&def_rt_bandwidth, 7144 7147 global_rt_period(), global_rt_runtime());
+4 -21
kernel/sched/deadline.c
··· 570 570 static 571 571 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) 572 572 { 573 - int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); 574 - int rorun = dl_se->runtime <= 0; 575 - 576 - if (!rorun && !dmiss) 577 - return 0; 578 - 579 - /* 580 - * If we are beyond our current deadline and we are still 581 - * executing, then we have already used some of the runtime of 582 - * the next instance. Thus, if we do not account that, we are 583 - * stealing bandwidth from the system at each deadline miss! 584 - */ 585 - if (dmiss) { 586 - dl_se->runtime = rorun ? dl_se->runtime : 0; 587 - dl_se->runtime -= rq_clock(rq) - dl_se->deadline; 588 - } 589 - 590 - return 1; 573 + return (dl_se->runtime <= 0); 591 574 } 592 575 593 576 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); ··· 809 826 * parameters of the task might need updating. Otherwise, 810 827 * we want a replenishment of its runtime. 811 828 */ 812 - if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) 813 - replenish_dl_entity(dl_se, pi_se); 814 - else 829 + if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) 815 830 update_dl_entity(dl_se, pi_se); 831 + else if (flags & ENQUEUE_REPLENISH) 832 + replenish_dl_entity(dl_se, pi_se); 816 833 817 834 __enqueue_dl_entity(dl_se); 818 835 }
+5 -1
kernel/sched/fair.c
··· 4005 4005 4006 4006 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4007 4007 { 4008 + /* init_cfs_bandwidth() was not called */ 4009 + if (!cfs_b->throttled_cfs_rq.next) 4010 + return; 4011 + 4008 4012 hrtimer_cancel(&cfs_b->period_timer); 4009 4013 hrtimer_cancel(&cfs_b->slack_timer); 4010 4014 } ··· 4428 4424 * wl = S * s'_i; see (2) 4429 4425 */ 4430 4426 if (W > 0 && w < W) 4431 - wl = (w * tg->shares) / W; 4427 + wl = (w * (long)tg->shares) / W; 4432 4428 else 4433 4429 wl = tg->shares; 4434 4430
+2 -2
kernel/trace/trace_kdb.c
··· 132 132 133 133 static __init int kdb_ftrace_register(void) 134 134 { 135 - kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", 136 - "Dump ftrace log", 0, KDB_REPEAT_NONE); 135 + kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", 136 + "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); 137 137 return 0; 138 138 } 139 139
+25
lib/Kconfig.kgdb
··· 73 73 help 74 74 KDB frontend for kernel 75 75 76 + config KDB_DEFAULT_ENABLE 77 + hex "KDB: Select kdb command functions to be enabled by default" 78 + depends on KGDB_KDB 79 + default 0x1 80 + help 81 + Specifiers which kdb commands are enabled by default. This may 82 + be set to 1 or 0 to enable all commands or disable almost all 83 + commands. 84 + 85 + Alternatively the following bitmask applies: 86 + 87 + 0x0002 - allow arbitrary reads from memory and symbol lookup 88 + 0x0004 - allow arbitrary writes to memory 89 + 0x0008 - allow current register state to be inspected 90 + 0x0010 - allow current register state to be modified 91 + 0x0020 - allow passive inspection (backtrace, process list, lsmod) 92 + 0x0040 - allow flow control management (breakpoint, single step) 93 + 0x0080 - enable signalling of processes 94 + 0x0100 - allow machine to be rebooted 95 + 96 + The config option merely sets the default at boot time. Both 97 + issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or 98 + setting with kdb.cmd_enable=X kernel command line option will 99 + override the default settings. 100 + 76 101 config KDB_KEYBOARD 77 102 bool "KGDB_KDB: keyboard as input device" 78 103 depends on VT && KGDB_KDB
+1
lib/assoc_array.c
··· 11 11 * 2 of the Licence, or (at your option) any later version. 12 12 */ 13 13 //#define DEBUG 14 + #include <linux/rcupdate.h> 14 15 #include <linux/slab.h> 15 16 #include <linux/err.h> 16 17 #include <linux/assoc_array_priv.h>
-9
mm/Kconfig.debug
··· 14 14 depends on !KMEMCHECK 15 15 select PAGE_EXTENSION 16 16 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC 17 - select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC 18 17 ---help--- 19 18 Unmap pages from the kernel linear mapping after free_pages(). 20 19 This results in a large slowdown, but helps to find certain types ··· 26 27 that would result in incorrect warnings of memory corruption after 27 28 a resume because free pages are not saved to the suspend image. 28 29 29 - config WANT_PAGE_DEBUG_FLAGS 30 - bool 31 - 32 30 config PAGE_POISONING 33 31 bool 34 - select WANT_PAGE_DEBUG_FLAGS 35 - 36 - config PAGE_GUARD 37 - bool 38 - select WANT_PAGE_DEBUG_FLAGS
+4 -13
mm/memcontrol.c
··· 3043 3043 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3044 3044 mem_cgroup_swap_statistics(from, false); 3045 3045 mem_cgroup_swap_statistics(to, true); 3046 - /* 3047 - * This function is only called from task migration context now. 3048 - * It postpones page_counter and refcount handling till the end 3049 - * of task migration(mem_cgroup_clear_mc()) for performance 3050 - * improvement. But we cannot postpone css_get(to) because if 3051 - * the process that has been moved to @to does swap-in, the 3052 - * refcount of @to might be decreased to 0. 3053 - * 3054 - * We are in attach() phase, so the cgroup is guaranteed to be 3055 - * alive, so we can just call css_get(). 3056 - */ 3057 - css_get(&to->css); 3058 3046 return 0; 3059 3047 } 3060 3048 return -EINVAL; ··· 4667 4679 if (parent_css == NULL) { 4668 4680 root_mem_cgroup = memcg; 4669 4681 page_counter_init(&memcg->memory, NULL); 4682 + memcg->soft_limit = PAGE_COUNTER_MAX; 4670 4683 page_counter_init(&memcg->memsw, NULL); 4671 4684 page_counter_init(&memcg->kmem, NULL); 4672 4685 } ··· 4713 4724 4714 4725 if (parent->use_hierarchy) { 4715 4726 page_counter_init(&memcg->memory, &parent->memory); 4727 + memcg->soft_limit = PAGE_COUNTER_MAX; 4716 4728 page_counter_init(&memcg->memsw, &parent->memsw); 4717 4729 page_counter_init(&memcg->kmem, &parent->kmem); 4718 4730 ··· 4723 4733 */ 4724 4734 } else { 4725 4735 page_counter_init(&memcg->memory, NULL); 4736 + memcg->soft_limit = PAGE_COUNTER_MAX; 4726 4737 page_counter_init(&memcg->memsw, NULL); 4727 4738 page_counter_init(&memcg->kmem, NULL); 4728 4739 /* ··· 4798 4807 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); 4799 4808 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); 4800 4809 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); 4801 - memcg->soft_limit = 0; 4810 + memcg->soft_limit = PAGE_COUNTER_MAX; 4802 4811 } 4803 4812 4804 4813 #ifdef CONFIG_MMU
+23 -16
mm/memory.c
··· 235 235 236 236 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 237 237 { 238 + if (!tlb->end) 239 + return; 240 + 238 241 tlb_flush(tlb); 239 242 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); 240 243 #ifdef CONFIG_HAVE_RCU_TABLE_FREE ··· 250 247 { 251 248 struct mmu_gather_batch *batch; 252 249 253 - for (batch = &tlb->local; batch; batch = batch->next) { 250 + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { 254 251 free_pages_and_swap_cache(batch->pages, batch->nr); 255 252 batch->nr = 0; 256 253 } ··· 259 256 260 257 void tlb_flush_mmu(struct mmu_gather *tlb) 261 258 { 262 - if (!tlb->end) 263 - return; 264 - 265 259 tlb_flush_mmu_tlbonly(tlb); 266 260 tlb_flush_mmu_free(tlb); 267 261 } ··· 2137 2137 if (!dirty_page) 2138 2138 return ret; 2139 2139 2140 - /* 2141 - * Yes, Virginia, this is actually required to prevent a race 2142 - * with clear_page_dirty_for_io() from clearing the page dirty 2143 - * bit after it clear all dirty ptes, but before a racing 2144 - * do_wp_page installs a dirty pte. 2145 - * 2146 - * do_shared_fault is protected similarly. 2147 - */ 2148 2140 if (!page_mkwrite) { 2149 - wait_on_page_locked(dirty_page); 2150 - set_page_dirty_balance(dirty_page); 2141 + struct address_space *mapping; 2142 + int dirtied; 2143 + 2144 + lock_page(dirty_page); 2145 + dirtied = set_page_dirty(dirty_page); 2146 + VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page); 2147 + mapping = dirty_page->mapping; 2148 + unlock_page(dirty_page); 2149 + 2150 + if (dirtied && mapping) { 2151 + /* 2152 + * Some device drivers do not set page.mapping 2153 + * but still dirty their pages 2154 + */ 2155 + balance_dirty_pages_ratelimited(mapping); 2156 + } 2157 + 2151 2158 /* file_update_time outside page_lock */ 2152 2159 if (vma->vm_file) 2153 2160 file_update_time(vma->vm_file); ··· 2600 2593 if (prev && prev->vm_end == address) 2601 2594 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; 2602 2595 2603 - expand_downwards(vma, address - PAGE_SIZE); 2596 + return expand_downwards(vma, address - PAGE_SIZE); 2604 2597 } 2605 2598 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { 2606 2599 struct vm_area_struct *next = vma->vm_next; ··· 2609 2602 if (next && next->vm_start == address + PAGE_SIZE) 2610 2603 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; 2611 2604 2612 - expand_upwards(vma, address + PAGE_SIZE); 2605 + return expand_upwards(vma, address + PAGE_SIZE); 2613 2606 } 2614 2607 return 0; 2615 2608 }
+10 -5
mm/mmap.c
··· 778 778 if (exporter && exporter->anon_vma && !importer->anon_vma) { 779 779 int error; 780 780 781 - error = anon_vma_clone(importer, exporter); 782 - if (error) 783 - return error; 784 781 importer->anon_vma = exporter->anon_vma; 782 + error = anon_vma_clone(importer, exporter); 783 + if (error) { 784 + importer->anon_vma = NULL; 785 + return error; 786 + } 785 787 } 786 788 } 787 789 ··· 2101 2099 { 2102 2100 struct mm_struct *mm = vma->vm_mm; 2103 2101 struct rlimit *rlim = current->signal->rlim; 2104 - unsigned long new_start; 2102 + unsigned long new_start, actual_size; 2105 2103 2106 2104 /* address space limit tests */ 2107 2105 if (!may_expand_vm(mm, grow)) 2108 2106 return -ENOMEM; 2109 2107 2110 2108 /* Stack limit test */ 2111 - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2109 + actual_size = size; 2110 + if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) 2111 + actual_size -= PAGE_SIZE; 2112 + if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2112 2113 return -ENOMEM; 2113 2114 2114 2115 /* mlock limit tests */
+12 -31
mm/page-writeback.c
··· 1541 1541 bdi_start_background_writeback(bdi); 1542 1542 } 1543 1543 1544 - void set_page_dirty_balance(struct page *page) 1545 - { 1546 - if (set_page_dirty(page)) { 1547 - struct address_space *mapping = page_mapping(page); 1548 - 1549 - if (mapping) 1550 - balance_dirty_pages_ratelimited(mapping); 1551 - } 1552 - } 1553 - 1554 1544 static DEFINE_PER_CPU(int, bdp_ratelimits); 1555 1545 1556 1546 /* ··· 2113 2123 * page dirty in that case, but not all the buffers. This is a "bottom-up" 2114 2124 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. 2115 2125 * 2116 - * Most callers have locked the page, which pins the address_space in memory. 2117 - * But zap_pte_range() does not lock the page, however in that case the 2118 - * mapping is pinned by the vma's ->vm_file reference. 2119 - * 2120 - * We take care to handle the case where the page was truncated from the 2121 - * mapping by re-checking page_mapping() inside tree_lock. 2126 + * The caller must ensure this doesn't race with truncation. Most will simply 2127 + * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and 2128 + * the pte lock held, which also locks out truncation. 2122 2129 */ 2123 2130 int __set_page_dirty_nobuffers(struct page *page) 2124 2131 { 2125 2132 if (!TestSetPageDirty(page)) { 2126 2133 struct address_space *mapping = page_mapping(page); 2127 - struct address_space *mapping2; 2128 2134 unsigned long flags; 2129 2135 2130 2136 if (!mapping) 2131 2137 return 1; 2132 2138 2133 2139 spin_lock_irqsave(&mapping->tree_lock, flags); 2134 - mapping2 = page_mapping(page); 2135 - if (mapping2) { /* Race with truncate? */ 2136 - BUG_ON(mapping2 != mapping); 2137 - WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2138 - account_page_dirtied(page, mapping); 2139 - radix_tree_tag_set(&mapping->page_tree, 2140 - page_index(page), PAGECACHE_TAG_DIRTY); 2141 - } 2140 + BUG_ON(page_mapping(page) != mapping); 2141 + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2142 + account_page_dirtied(page, mapping); 2143 + radix_tree_tag_set(&mapping->page_tree, page_index(page), 2144 + PAGECACHE_TAG_DIRTY); 2142 2145 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2143 2146 if (mapping->host) { 2144 2147 /* !PageAnon && !swapper_space */ ··· 2288 2305 /* 2289 2306 * We carefully synchronise fault handlers against 2290 2307 * installing a dirty pte and marking the page dirty 2291 - * at this point. We do this by having them hold the 2292 - * page lock at some point after installing their 2293 - * pte, but before marking the page dirty. 2294 - * Pages are always locked coming in here, so we get 2295 - * the desired exclusion. See mm/memory.c:do_wp_page() 2296 - * for more comments. 2308 + * at this point. We do this by having them hold the 2309 + * page lock while dirtying the page, and pages are 2310 + * always locked coming in here, so we get the desired 2311 + * exclusion. 2297 2312 */ 2298 2313 if (TestClearPageDirty(page)) { 2299 2314 dec_zone_page_state(page, NR_FILE_DIRTY);
+41 -1
mm/rmap.c
··· 72 72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 73 73 if (anon_vma) { 74 74 atomic_set(&anon_vma->refcount, 1); 75 + anon_vma->degree = 1; /* Reference for first vma */ 76 + anon_vma->parent = anon_vma; 75 77 /* 76 78 * Initialise the anon_vma root to point to itself. If called 77 79 * from fork, the root will be reset to the parents anon_vma. ··· 190 188 if (likely(!vma->anon_vma)) { 191 189 vma->anon_vma = anon_vma; 192 190 anon_vma_chain_link(vma, avc, anon_vma); 191 + /* vma reference or self-parent link for new root */ 192 + anon_vma->degree++; 193 193 allocated = NULL; 194 194 avc = NULL; 195 195 } ··· 240 236 /* 241 237 * Attach the anon_vmas from src to dst. 242 238 * Returns 0 on success, -ENOMEM on failure. 239 + * 240 + * If dst->anon_vma is NULL this function tries to find and reuse existing 241 + * anon_vma which has no vmas and only one child anon_vma. This prevents 242 + * degradation of anon_vma hierarchy to endless linear chain in case of 243 + * constantly forking task. On the other hand, an anon_vma with more than one 244 + * child isn't reused even if there was no alive vma, thus rmap walker has a 245 + * good chance of avoiding scanning the whole hierarchy when it searches where 246 + * page is mapped. 243 247 */ 244 248 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 245 249 { ··· 268 256 anon_vma = pavc->anon_vma; 269 257 root = lock_anon_vma_root(root, anon_vma); 270 258 anon_vma_chain_link(dst, avc, anon_vma); 259 + 260 + /* 261 + * Reuse existing anon_vma if its degree lower than two, 262 + * that means it has no vma and only one anon_vma child. 263 + * 264 + * Do not chose parent anon_vma, otherwise first child 265 + * will always reuse it. Root anon_vma is never reused: 266 + * it has self-parent reference and at least one child. 267 + */ 268 + if (!dst->anon_vma && anon_vma != src->anon_vma && 269 + anon_vma->degree < 2) 270 + dst->anon_vma = anon_vma; 271 271 } 272 + if (dst->anon_vma) 273 + dst->anon_vma->degree++; 272 274 unlock_anon_vma_root(root); 273 275 return 0; 274 276 ··· 306 280 if (!pvma->anon_vma) 307 281 return 0; 308 282 283 + /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 284 + vma->anon_vma = NULL; 285 + 309 286 /* 310 287 * First, attach the new VMA to the parent VMA's anon_vmas, 311 288 * so rmap can find non-COWed pages in child processes. ··· 316 287 error = anon_vma_clone(vma, pvma); 317 288 if (error) 318 289 return error; 290 + 291 + /* An existing anon_vma has been reused, all done then. */ 292 + if (vma->anon_vma) 293 + return 0; 319 294 320 295 /* Then add our own anon_vma. */ 321 296 anon_vma = anon_vma_alloc(); ··· 334 301 * lock any of the anon_vmas in this anon_vma tree. 335 302 */ 336 303 anon_vma->root = pvma->anon_vma->root; 304 + anon_vma->parent = pvma->anon_vma; 337 305 /* 338 306 * With refcounts, an anon_vma can stay around longer than the 339 307 * process it belongs to. The root anon_vma needs to be pinned until ··· 345 311 vma->anon_vma = anon_vma; 346 312 anon_vma_lock_write(anon_vma); 347 313 anon_vma_chain_link(vma, avc, anon_vma); 314 + anon_vma->parent->degree++; 348 315 anon_vma_unlock_write(anon_vma); 349 316 350 317 return 0; ··· 376 341 * Leave empty anon_vmas on the list - we'll need 377 342 * to free them outside the lock. 378 343 */ 379 - if (RB_EMPTY_ROOT(&anon_vma->rb_root)) 344 + if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { 345 + anon_vma->parent->degree--; 380 346 continue; 347 + } 381 348 382 349 list_del(&avc->same_vma); 383 350 anon_vma_chain_free(avc); 384 351 } 352 + if (vma->anon_vma) 353 + vma->anon_vma->degree--; 385 354 unlock_anon_vma_root(root); 386 355 387 356 /* ··· 396 357 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 397 358 struct anon_vma *anon_vma = avc->anon_vma; 398 359 360 + BUG_ON(anon_vma->degree); 399 361 put_anon_vma(anon_vma); 400 362 401 363 list_del(&avc->same_vma);
+13 -11
mm/vmscan.c
··· 2921 2921 return false; 2922 2922 2923 2923 /* 2924 - * There is a potential race between when kswapd checks its watermarks 2925 - * and a process gets throttled. There is also a potential race if 2926 - * processes get throttled, kswapd wakes, a large process exits therby 2927 - * balancing the zones that causes kswapd to miss a wakeup. If kswapd 2928 - * is going to sleep, no process should be sleeping on pfmemalloc_wait 2929 - * so wake them now if necessary. If necessary, processes will wake 2930 - * kswapd and get throttled again 2924 + * The throttled processes are normally woken up in balance_pgdat() as 2925 + * soon as pfmemalloc_watermark_ok() is true. But there is a potential 2926 + * race between when kswapd checks the watermarks and a process gets 2927 + * throttled. There is also a potential race if processes get 2928 + * throttled, kswapd wakes, a large process exits thereby balancing the 2929 + * zones, which causes kswapd to exit balance_pgdat() before reaching 2930 + * the wake up checks. If kswapd is going to sleep, no process should 2931 + * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 2932 + * the wake up is premature, processes will wake kswapd and get 2933 + * throttled again. The difference from wake ups in balance_pgdat() is 2934 + * that here we are under prepare_to_wait(). 2931 2935 */ 2932 - if (waitqueue_active(&pgdat->pfmemalloc_wait)) { 2933 - wake_up(&pgdat->pfmemalloc_wait); 2934 - return false; 2935 - } 2936 + if (waitqueue_active(&pgdat->pfmemalloc_wait)) 2937 + wake_up_all(&pgdat->pfmemalloc_wait); 2936 2938 2937 2939 return pgdat_balanced(pgdat, order, classzone_idx); 2938 2940 }
+7 -4
net/batman-adv/multicast.c
··· 685 685 if (orig_initialized) 686 686 atomic_dec(&bat_priv->mcast.num_disabled); 687 687 orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; 688 - /* If mcast support is being switched off increase the disabled 689 - * mcast node counter. 688 + /* If mcast support is being switched off or if this is an initial 689 + * OGM without mcast support then increase the disabled mcast 690 + * node counter. 690 691 */ 691 692 } else if (!orig_mcast_enabled && 692 - orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) { 693 + (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST || 694 + !orig_initialized)) { 693 695 atomic_inc(&bat_priv->mcast.num_disabled); 694 696 orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; 695 697 } ··· 740 738 { 741 739 struct batadv_priv *bat_priv = orig->bat_priv; 742 740 743 - if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) 741 + if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) && 742 + orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST) 744 743 atomic_dec(&bat_priv->mcast.num_disabled); 745 744 746 745 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
+1 -1
net/batman-adv/network-coding.c
··· 133 133 if (!bat_priv->nc.decoding_hash) 134 134 goto err; 135 135 136 - batadv_hash_set_lock_class(bat_priv->nc.coding_hash, 136 + batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, 137 137 &batadv_nc_decoding_hash_lock_class_key); 138 138 139 139 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
+4 -3
net/batman-adv/originator.c
··· 570 570 571 571 batadv_frag_purge_orig(orig_node, NULL); 572 572 573 - batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1, 574 - "originator timed out"); 575 - 576 573 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) 577 574 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); 578 575 ··· 675 678 atomic_set(&orig_node->last_ttvn, 0); 676 679 orig_node->tt_buff = NULL; 677 680 orig_node->tt_buff_len = 0; 681 + orig_node->last_seen = jiffies; 678 682 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 679 683 orig_node->bcast_seqno_reset = reset_time; 680 684 #ifdef CONFIG_BATMAN_ADV_MCAST ··· 975 977 if (batadv_purge_orig_node(bat_priv, orig_node)) { 976 978 batadv_gw_node_delete(bat_priv, orig_node); 977 979 hlist_del_rcu(&orig_node->hash_entry); 980 + batadv_tt_global_del_orig(orig_node->bat_priv, 981 + orig_node, -1, 982 + "originator timed out"); 978 983 batadv_orig_node_free_ref(orig_node); 979 984 continue; 980 985 }
+4 -2
net/batman-adv/routing.c
··· 443 443 444 444 router = batadv_orig_router_get(orig_node, recv_if); 445 445 446 + if (!router) 447 + return router; 448 + 446 449 /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) 447 450 * and if activated. 448 451 */ 449 - if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) || 450 - !router) 452 + if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding))) 451 453 return router; 452 454 453 455 /* bonding: loop through the list of possible routers found
+2 -1
net/bridge/br_input.c
··· 154 154 dst = NULL; 155 155 156 156 if (is_broadcast_ether_addr(dest)) { 157 - if (p->flags & BR_PROXYARP && 157 + if (IS_ENABLED(CONFIG_INET) && 158 + p->flags & BR_PROXYARP && 158 159 skb->protocol == htons(ETH_P_ARP)) 159 160 br_do_proxy_arp(skb, br, vid); 160 161
+1 -1
net/ceph/auth_x.c
··· 676 676 int ret; 677 677 char tmp_enc[40]; 678 678 __le32 tmp[5] = { 679 - 16u, msg->hdr.crc, msg->footer.front_crc, 679 + cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc, 680 680 msg->footer.middle_crc, msg->footer.data_crc, 681 681 }; 682 682 ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
+1 -1
net/ceph/mon_client.c
··· 717 717 if (src_len != sizeof(u32) + dst_len) 718 718 return -EINVAL; 719 719 720 - buf_len = le32_to_cpu(*(u32 *)src); 720 + buf_len = le32_to_cpu(*(__le32 *)src); 721 721 if (buf_len != dst_len) 722 722 return -EINVAL; 723 723
+44
net/core/neighbour.c
··· 2043 2043 case NDTPA_BASE_REACHABLE_TIME: 2044 2044 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2045 2045 nla_get_msecs(tbp[i])); 2046 + /* update reachable_time as well, otherwise, the change will 2047 + * only be effective after the next time neigh_periodic_work 2048 + * decides to recompute it (can be multiple minutes) 2049 + */ 2050 + p->reachable_time = 2051 + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2046 2052 break; 2047 2053 case NDTPA_GC_STALETIME: 2048 2054 NEIGH_VAR_SET(p, GC_STALETIME, ··· 2927 2921 return ret; 2928 2922 } 2929 2923 2924 + static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 2925 + void __user *buffer, 2926 + size_t *lenp, loff_t *ppos) 2927 + { 2928 + struct neigh_parms *p = ctl->extra2; 2929 + int ret; 2930 + 2931 + if (strcmp(ctl->procname, "base_reachable_time") == 0) 2932 + ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 2933 + else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 2934 + ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 2935 + else 2936 + ret = -1; 2937 + 2938 + if (write && ret == 0) { 2939 + /* update reachable_time as well, otherwise, the change will 2940 + * only be effective after the next time neigh_periodic_work 2941 + * decides to recompute it 2942 + */ 2943 + p->reachable_time = 2944 + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2945 + } 2946 + return ret; 2947 + } 2948 + 2930 2949 #define NEIGH_PARMS_DATA_OFFSET(index) \ 2931 2950 (&((struct neigh_parms *) 0)->data[index]) 2932 2951 ··· 3078 3047 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3079 3048 /* ReachableTime (in milliseconds) */ 3080 3049 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3050 + } else { 3051 + /* Those handlers will update p->reachable_time after 3052 + * base_reachable_time(_ms) is set to ensure the new timer starts being 3053 + * applied after the next neighbour update instead of waiting for 3054 + * neigh_periodic_work to update its value (can be multiple minutes) 3055 + * So any handler that replaces them should do this as well 3056 + */ 3057 + /* ReachableTime */ 3058 + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3059 + neigh_proc_base_reachable_time; 3060 + /* ReachableTime (in milliseconds) */ 3061 + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3062 + neigh_proc_base_reachable_time; 3081 3063 } 3082 3064 3083 3065 /* Don't export sysctls to unprivileged users */
+4 -4
net/ipv4/netfilter/nft_redir_ipv4.c
··· 27 27 28 28 memset(&mr, 0, sizeof(mr)); 29 29 if (priv->sreg_proto_min) { 30 - mr.range[0].min.all = (__force __be16) 31 - data[priv->sreg_proto_min].data[0]; 32 - mr.range[0].max.all = (__force __be16) 33 - data[priv->sreg_proto_max].data[0]; 30 + mr.range[0].min.all = 31 + *(__be16 *)&data[priv->sreg_proto_min].data[0]; 32 + mr.range[0].max.all = 33 + *(__be16 *)&data[priv->sreg_proto_max].data[0]; 34 34 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 35 35 } 36 36
+2 -2
net/ipv4/tcp_output.c
··· 2019 2019 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 2020 2020 break; 2021 2021 2022 - if (tso_segs == 1) { 2022 + if (tso_segs == 1 || !max_segs) { 2023 2023 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 2024 2024 (tcp_skb_is_last(sk, skb) ? 2025 2025 nonagle : TCP_NAGLE_PUSH)))) ··· 2032 2032 } 2033 2033 2034 2034 limit = mss_now; 2035 - if (tso_segs > 1 && !tcp_urg_mode(tp)) 2035 + if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) 2036 2036 limit = tcp_mss_split_point(sk, skb, mss_now, 2037 2037 min_t(unsigned int, 2038 2038 cwnd_quota,
+4 -4
net/ipv6/netfilter/nft_redir_ipv6.c
··· 27 27 28 28 memset(&range, 0, sizeof(range)); 29 29 if (priv->sreg_proto_min) { 30 - range.min_proto.all = (__force __be16) 31 - data[priv->sreg_proto_min].data[0]; 32 - range.max_proto.all = (__force __be16) 33 - data[priv->sreg_proto_max].data[0]; 30 + range.min_proto.all = 31 + *(__be16 *)&data[priv->sreg_proto_min].data[0]; 32 + range.max_proto.all = 33 + *(__be16 *)&data[priv->sreg_proto_max].data[0]; 34 34 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 35 35 } 36 36
+9 -3
net/mac80211/key.c
··· 140 140 if (!ret) { 141 141 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 142 142 143 - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 143 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 144 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || 145 + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 144 146 sdata->crypto_tx_tailroom_needed_cnt--; 145 147 146 148 WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && ··· 190 188 sta = key->sta; 191 189 sdata = key->sdata; 192 190 193 - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 191 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 192 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || 193 + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 194 194 increment_tailroom_need_count(sdata); 195 195 196 196 ret = drv_set_key(key->local, DISABLE_KEY, sdata, ··· 888 884 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 889 885 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 890 886 891 - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 887 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 888 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || 889 + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 892 890 increment_tailroom_need_count(key->sdata); 893 891 } 894 892
+5 -5
net/netfilter/ipvs/ip_vs_ftp.c
··· 183 183 struct nf_conn *ct; 184 184 struct net *net; 185 185 186 + *diff = 0; 187 + 186 188 #ifdef CONFIG_IP_VS_IPV6 187 189 /* This application helper doesn't work with IPv6 yet, 188 190 * so turn this into a no-op for IPv6 packets ··· 192 190 if (cp->af == AF_INET6) 193 191 return 1; 194 192 #endif 195 - 196 - *diff = 0; 197 193 198 194 /* Only useful for established sessions */ 199 195 if (cp->state != IP_VS_TCP_S_ESTABLISHED) ··· 322 322 struct ip_vs_conn *n_cp; 323 323 struct net *net; 324 324 325 + /* no diff required for incoming packets */ 326 + *diff = 0; 327 + 325 328 #ifdef CONFIG_IP_VS_IPV6 326 329 /* This application helper doesn't work with IPv6 yet, 327 330 * so turn this into a no-op for IPv6 packets ··· 332 329 if (cp->af == AF_INET6) 333 330 return 1; 334 331 #endif 335 - 336 - /* no diff required for incoming packets */ 337 - *diff = 0; 338 332 339 333 /* Only useful for established sessions */ 340 334 if (cp->state != IP_VS_TCP_S_ESTABLISHED)
+9 -11
net/netfilter/nf_conntrack_core.c
··· 611 611 */ 612 612 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 613 613 pr_debug("Confirming conntrack %p\n", ct); 614 - /* We have to check the DYING flag inside the lock to prevent 615 - a race against nf_ct_get_next_corpse() possibly called from 616 - user context, else we insert an already 'dead' hash, blocking 617 - further use of that particular connection -JM */ 614 + /* We have to check the DYING flag after unlink to prevent 615 + * a race against nf_ct_get_next_corpse() possibly called from 616 + * user context, else we insert an already 'dead' hash, blocking 617 + * further use of that particular connection -JM. 618 + */ 619 + nf_ct_del_from_dying_or_unconfirmed_list(ct); 618 620 619 - if (unlikely(nf_ct_is_dying(ct))) { 620 - nf_conntrack_double_unlock(hash, reply_hash); 621 - local_bh_enable(); 622 - return NF_ACCEPT; 623 - } 621 + if (unlikely(nf_ct_is_dying(ct))) 622 + goto out; 624 623 625 624 /* See if there's one in the list already, including reverse: 626 625 NAT could have grabbed it without realizing, since we're ··· 634 635 &h->tuple) && 635 636 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 636 637 goto out; 637 - 638 - nf_ct_del_from_dying_or_unconfirmed_list(ct); 639 638 640 639 /* Timer relative to confirmation time, not original 641 640 setting time, otherwise we'd get timer wrap in ··· 670 673 return NF_ACCEPT; 671 674 672 675 out: 676 + nf_ct_add_to_dying_list(ct); 673 677 nf_conntrack_double_unlock(hash, reply_hash); 674 678 NF_CT_STAT_INC(net, insert_failed); 675 679 local_bh_enable();
+9 -5
net/netfilter/nf_tables_api.c
··· 713 713 struct nft_chain *chain, *nc; 714 714 struct nft_set *set, *ns; 715 715 716 - list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { 716 + list_for_each_entry(chain, &ctx->table->chains, list) { 717 717 ctx->chain = chain; 718 718 719 719 err = nft_delrule_by_chain(ctx); 720 - if (err < 0) 721 - goto out; 722 - 723 - err = nft_delchain(ctx); 724 720 if (err < 0) 725 721 goto out; 726 722 } ··· 727 731 continue; 728 732 729 733 err = nft_delset(ctx, set); 734 + if (err < 0) 735 + goto out; 736 + } 737 + 738 + list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { 739 + ctx->chain = chain; 740 + 741 + err = nft_delchain(ctx); 730 742 if (err < 0) 731 743 goto out; 732 744 }
+3 -2
net/netfilter/nfnetlink.c
··· 321 321 nlh = nlmsg_hdr(skb); 322 322 err = 0; 323 323 324 - if (nlh->nlmsg_len < NLMSG_HDRLEN) { 324 + if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || 325 + skb->len < nlh->nlmsg_len) { 325 326 err = -EINVAL; 326 327 goto ack; 327 328 } ··· 470 469 int type; 471 470 472 471 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 473 - return -EINVAL; 472 + return 0; 474 473 475 474 type = nfnl_group2type[group]; 476 475
+4 -4
net/netfilter/nft_nat.c
··· 65 65 } 66 66 67 67 if (priv->sreg_proto_min) { 68 - range.min_proto.all = (__force __be16) 69 - data[priv->sreg_proto_min].data[0]; 70 - range.max_proto.all = (__force __be16) 71 - data[priv->sreg_proto_max].data[0]; 68 + range.min_proto.all = 69 + *(__be16 *)&data[priv->sreg_proto_min].data[0]; 70 + range.max_proto.all = 71 + *(__be16 *)&data[priv->sreg_proto_max].data[0]; 72 72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 73 73 } 74 74
+2 -1
net/openvswitch/datapath.c
··· 524 524 struct vport *input_vport; 525 525 int len; 526 526 int err; 527 - bool log = !a[OVS_FLOW_ATTR_PROBE]; 527 + bool log = !a[OVS_PACKET_ATTR_PROBE]; 528 528 529 529 err = -EINVAL; 530 530 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || ··· 610 610 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, 611 611 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, 612 612 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, 613 + [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, 613 614 }; 614 615 615 616 static const struct genl_ops dp_packet_genl_ops[] = {
+3 -2
net/openvswitch/flow.c
··· 70 70 { 71 71 struct flow_stats *stats; 72 72 int node = numa_node_id(); 73 + int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 73 74 74 75 stats = rcu_dereference(flow->stats[node]); 75 76 ··· 106 105 if (likely(new_stats)) { 107 106 new_stats->used = jiffies; 108 107 new_stats->packet_count = 1; 109 - new_stats->byte_count = skb->len; 108 + new_stats->byte_count = len; 110 109 new_stats->tcp_flags = tcp_flags; 111 110 spin_lock_init(&new_stats->lock); 112 111 ··· 121 120 122 121 stats->used = jiffies; 123 122 stats->packet_count++; 124 - stats->byte_count += skb->len; 123 + stats->byte_count += len; 125 124 stats->tcp_flags |= tcp_flags; 126 125 unlock: 127 126 spin_unlock(&stats->lock);
+1 -1
net/openvswitch/vport.c
··· 480 480 stats = this_cpu_ptr(vport->percpu_stats); 481 481 u64_stats_update_begin(&stats->syncp); 482 482 stats->rx_packets++; 483 - stats->rx_bytes += skb->len; 483 + stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 484 484 u64_stats_update_end(&stats->syncp); 485 485 486 486 OVS_CB(skb)->input_vport = vport;
+1 -1
net/packet/af_packet.c
··· 2517 2517 err = -EINVAL; 2518 2518 if (sock->type == SOCK_DGRAM) { 2519 2519 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2520 - if (unlikely(offset) < 0) 2520 + if (unlikely(offset < 0)) 2521 2521 goto out_free; 2522 2522 } else { 2523 2523 if (ll_header_truncated(dev, len))
+3 -2
net/tipc/bcast.c
··· 220 220 struct sk_buff *skb; 221 221 222 222 skb_queue_walk(&bcl->outqueue, skb) { 223 - if (more(buf_seqno(skb), after)) 223 + if (more(buf_seqno(skb), after)) { 224 + tipc_link_retransmit(bcl, skb, mod(to - after)); 224 225 break; 226 + } 225 227 } 226 - tipc_link_retransmit(bcl, skb, mod(to - after)); 227 228 } 228 229 229 230 /**
+8 -8
scripts/Makefile.clean
··· 42 42 43 43 __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) 44 44 45 - # as clean-files is given relative to the current directory, this adds 46 - # a $(obj) prefix, except for absolute paths 45 + # clean-files is given relative to the current directory, unless it 46 + # starts with $(objtree)/ (which means "./", so do not add "./" unless 47 + # you want to delete a file from the toplevel object directory). 47 48 48 49 __clean-files := $(wildcard \ 49 - $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \ 50 - $(filter /%, $(__clean-files))) 50 + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \ 51 + $(filter $(objtree)/%, $(__clean-files))) 51 52 52 - # as clean-dirs is given relative to the current directory, this adds 53 - # a $(obj) prefix, except for absolute paths 53 + # same as clean-files 54 54 55 55 __clean-dirs := $(wildcard \ 56 - $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \ 57 - $(filter /%, $(clean-dirs))) 56 + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \ 57 + $(filter $(objtree)/%, $(clean-dirs))) 58 58 59 59 # ========================================================================== 60 60
+2 -2
security/keys/gc.c
··· 148 148 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 149 149 atomic_dec(&key->user->nikeys); 150 150 151 - key_user_put(key->user); 152 - 153 151 /* now throw away the key memory */ 154 152 if (key->type->destroy) 155 153 key->type->destroy(key); 154 + 155 + key_user_put(key->user); 156 156 157 157 kfree(key->description); 158 158
+1 -1
sound/firewire/fireworks/fireworks_transaction.c
··· 124 124 spin_lock_irq(&efw->lock); 125 125 126 126 t = (struct snd_efw_transaction *)data; 127 - length = min_t(size_t, t->length * sizeof(t->length), length); 127 + length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); 128 128 129 129 if (efw->push_ptr < efw->pull_ptr) 130 130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
+2
sound/pci/hda/patch_hdmi.c
··· 3353 3353 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 3354 3354 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, 3355 3355 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3356 + { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, 3356 3357 { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3357 3358 { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3358 3359 { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, ··· 3414 3413 MODULE_ALIAS("snd-hda-codec-id:10de0067"); 3415 3414 MODULE_ALIAS("snd-hda-codec-id:10de0070"); 3416 3415 MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3416 + MODULE_ALIAS("snd-hda-codec-id:10de0072"); 3417 3417 MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3418 3418 MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3419 3419 MODULE_ALIAS("snd-hda-codec-id:11069f81");
+2 -2
sound/pci/hda/patch_sigmatel.c
··· 568 568 spec->gpio_mask; 569 569 } 570 570 if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) 571 - spec->gpio_mask &= spec->gpio_mask; 572 - if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) 573 571 spec->gpio_dir &= spec->gpio_mask; 572 + if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) 573 + spec->gpio_data &= spec->gpio_mask; 574 574 if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) 575 575 spec->eapd_mask &= spec->gpio_mask; 576 576 if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
+1 -1
sound/usb/caiaq/audio.c
··· 816 816 return -EINVAL; 817 817 } 818 818 819 - if (cdev->n_streams < 2) { 819 + if (cdev->n_streams < 1) { 820 820 dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams); 821 821 return -EINVAL; 822 822 }
+2 -2
tools/lib/lockdep/preload.c
··· 317 317 * 318 318 * TODO: Hook into free() and add that check there as well. 319 319 */ 320 - debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex)); 320 + debug_check_no_locks_freed(mutex, sizeof(*mutex)); 321 321 __del_lock(__get_lock(mutex)); 322 322 return ll_pthread_mutex_destroy(mutex); 323 323 } ··· 341 341 { 342 342 try_init_preload(); 343 343 344 - debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock)); 344 + debug_check_no_locks_freed(rwlock, sizeof(*rwlock)); 345 345 __del_lock(__get_lock(rwlock)); 346 346 return ll_pthread_rwlock_destroy(rwlock); 347 347 }
+1 -1
tools/perf/builtin-annotate.c
··· 232 232 if (nr_samples > 0) { 233 233 total_nr_samples += nr_samples; 234 234 hists__collapse_resort(hists, NULL); 235 - hists__output_resort(hists); 235 + hists__output_resort(hists, NULL); 236 236 237 237 if (symbol_conf.event_group && 238 238 !perf_evsel__is_group_leader(pos))
+45 -1
tools/perf/builtin-diff.c
··· 545 545 return __hist_entry__cmp_compute(p_left, p_right, c); 546 546 } 547 547 548 + static int64_t 549 + hist_entry__cmp_nop(struct hist_entry *left __maybe_unused, 550 + struct hist_entry *right __maybe_unused) 551 + { 552 + return 0; 553 + } 554 + 555 + static int64_t 556 + hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right) 557 + { 558 + if (sort_compute) 559 + return 0; 560 + 561 + if (left->stat.period == right->stat.period) 562 + return 0; 563 + return left->stat.period > right->stat.period ? 1 : -1; 564 + } 565 + 566 + static int64_t 567 + hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right) 568 + { 569 + return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); 570 + } 571 + 572 + static int64_t 573 + hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right) 574 + { 575 + return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); 576 + } 577 + 578 + static int64_t 579 + hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right) 580 + { 581 + return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); 582 + } 583 + 548 584 static void insert_hist_entry_by_compute(struct rb_root *root, 549 585 struct hist_entry *he, 550 586 int c) ··· 641 605 hists__precompute(hists); 642 606 hists__compute_resort(hists); 643 607 } else { 644 - hists__output_resort(hists); 608 + hists__output_resort(hists, NULL); 645 609 } 646 610 647 611 hists__fprintf(hists, true, 0, 0, 0, stdout); ··· 1074 1038 fmt->header = hpp__header; 1075 1039 fmt->width = hpp__width; 1076 1040 fmt->entry = hpp__entry_global; 1041 + fmt->cmp = hist_entry__cmp_nop; 1042 + fmt->collapse = hist_entry__cmp_nop; 1077 1043 1078 1044 /* TODO more colors */ 1079 1045 switch (idx) { 1080 1046 case PERF_HPP_DIFF__BASELINE: 1081 1047 fmt->color = hpp__color_baseline; 1048 + fmt->sort = hist_entry__cmp_baseline; 1082 1049 break; 1083 1050 case PERF_HPP_DIFF__DELTA: 1084 1051 fmt->color = hpp__color_delta; 1052 + fmt->sort = hist_entry__cmp_delta; 1085 1053 break; 1086 1054 case PERF_HPP_DIFF__RATIO: 1087 1055 fmt->color = hpp__color_ratio; 1056 + fmt->sort = hist_entry__cmp_ratio; 1088 1057 break; 1089 1058 case PERF_HPP_DIFF__WEIGHTED_DIFF: 1090 1059 fmt->color = hpp__color_wdiff; 1060 + fmt->sort = hist_entry__cmp_wdiff; 1091 1061 break; 1092 1062 default: 1063 + fmt->sort = hist_entry__cmp_nop; 1093 1064 break; 1094 1065 } 1095 1066 1096 1067 init_header(d, dfmt); 1097 1068 perf_hpp__column_register(fmt); 1069 + perf_hpp__register_sort_field(fmt); 1098 1070 } 1099 1071 1100 1072 static void ui_init(void)
+10 -3
tools/perf/builtin-list.c
··· 19 19 int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) 20 20 { 21 21 int i; 22 - const struct option list_options[] = { 22 + bool raw_dump = false; 23 + struct option list_options[] = { 24 + OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"), 23 25 OPT_END() 24 26 }; 25 27 const char * const list_usage[] = { ··· 29 27 NULL 30 28 }; 31 29 30 + set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN); 31 + 32 32 argc = parse_options(argc, argv, list_options, list_usage, 33 33 PARSE_OPT_STOP_AT_NON_OPTION); 34 34 35 35 setup_pager(); 36 + 37 + if (raw_dump) { 38 + print_events(NULL, true); 39 + return 0; 40 + } 36 41 37 42 if (argc == 0) { 38 43 print_events(NULL, false); ··· 62 53 print_hwcache_events(NULL, false); 63 54 else if (strcmp(argv[i], "pmu") == 0) 64 55 print_pmu_events(NULL, false); 65 - else if (strcmp(argv[i], "--raw-dump") == 0) 66 - print_events(NULL, true); 67 56 else { 68 57 char *sep = strchr(argv[i], ':'), *s; 69 58 int sep_idx;
+22 -2
tools/perf/builtin-report.c
··· 457 457 ui_progress__finish(); 458 458 } 459 459 460 + static void report__output_resort(struct report *rep) 461 + { 462 + struct ui_progress prog; 463 + struct perf_evsel *pos; 464 + 465 + ui_progress__init(&prog, rep->nr_entries, "Sorting events for output..."); 466 + 467 + evlist__for_each(rep->session->evlist, pos) 468 + hists__output_resort(evsel__hists(pos), &prog); 469 + 470 + ui_progress__finish(); 471 + } 472 + 460 473 static int __cmd_report(struct report *rep) 461 474 { 462 475 int ret; ··· 518 505 if (session_done()) 519 506 return 0; 520 507 508 + /* 509 + * recalculate number of entries after collapsing since it 510 + * might be changed during the collapse phase. 511 + */ 512 + rep->nr_entries = 0; 513 + evlist__for_each(session->evlist, pos) 514 + rep->nr_entries += evsel__hists(pos)->nr_entries; 515 + 521 516 if (rep->nr_entries == 0) { 522 517 ui__error("The %s file has no samples!\n", file->path); 523 518 return 0; 524 519 } 525 520 526 - evlist__for_each(session->evlist, pos) 527 - hists__output_resort(evsel__hists(pos)); 521 + report__output_resort(rep); 528 522 529 523 return report__browse_hists(rep); 530 524 }
+2 -2
tools/perf/builtin-top.c
··· 285 285 } 286 286 287 287 hists__collapse_resort(hists, NULL); 288 - hists__output_resort(hists); 288 + hists__output_resort(hists, NULL); 289 289 290 290 hists__output_recalc_col_len(hists, top->print_entries - printed); 291 291 putchar('\n'); ··· 554 554 } 555 555 556 556 hists__collapse_resort(hists, NULL); 557 - hists__output_resort(hists); 557 + hists__output_resort(hists, NULL); 558 558 } 559 559 560 560 static void *display_thread_tui(void *arg)
+34 -34
tools/perf/tests/hists_cumulate.c
··· 187 187 * function since TEST_ASSERT_VAL() returns in case of failure. 188 188 */ 189 189 hists__collapse_resort(hists, NULL); 190 - hists__output_resort(hists); 190 + hists__output_resort(hists, NULL); 191 191 192 192 if (verbose > 2) { 193 193 pr_info("use callchain: %d, cumulate callchain: %d\n", ··· 454 454 * 30.00% 10.00% perf perf [.] cmd_record 455 455 * 20.00% 0.00% bash libc [.] malloc 456 456 * 10.00% 10.00% bash [kernel] [k] page_fault 457 - * 10.00% 10.00% perf [kernel] [k] schedule 458 - * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 459 - * 10.00% 10.00% perf [kernel] [k] page_fault 460 - * 10.00% 10.00% perf libc [.] free 461 - * 10.00% 10.00% perf libc [.] malloc 462 457 * 10.00% 10.00% bash bash [.] xmalloc 458 + * 10.00% 10.00% perf [kernel] [k] page_fault 459 + * 10.00% 10.00% perf libc [.] malloc 460 + * 10.00% 10.00% perf [kernel] [k] schedule 461 + * 10.00% 10.00% perf libc [.] free 462 + * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 463 463 */ 464 464 struct result expected[] = { 465 465 { 7000, 2000, "perf", "perf", "main" }, ··· 468 468 { 3000, 1000, "perf", "perf", "cmd_record" }, 469 469 { 2000, 0, "bash", "libc", "malloc" }, 470 470 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 471 - { 1000, 1000, "perf", "[kernel]", "schedule" }, 472 - { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 471 + { 1000, 1000, "bash", "bash", "xmalloc" }, 473 472 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 473 + { 1000, 1000, "perf", "[kernel]", "schedule" }, 474 474 { 1000, 1000, "perf", "libc", "free" }, 475 475 { 1000, 1000, "perf", "libc", "malloc" }, 476 - { 1000, 1000, "bash", "bash", "xmalloc" }, 476 + { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 477 477 }; 478 478 479 479 symbol_conf.use_callchain = false; ··· 537 537 * malloc 538 538 * main 539 539 * 540 - * 10.00% 10.00% perf [kernel] [k] schedule 540 + * 10.00% 10.00% bash bash [.] xmalloc 541 541 * | 542 - * --- schedule 543 - * run_command 542 + * --- xmalloc 543 + * malloc 544 + * xmalloc <--- NOTE: there's a cycle 545 + * malloc 546 + * xmalloc 544 547 * main 545 548 * 546 549 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open ··· 556 553 * | 557 554 * --- page_fault 558 555 * sys_perf_event_open 556 + * run_command 557 + * main 558 + * 559 + * 10.00% 10.00% perf [kernel] [k] schedule 560 + * | 561 + * --- schedule 559 562 * run_command 560 563 * main 561 564 * ··· 579 570 * run_command 580 571 * main 581 572 * 582 - * 10.00% 10.00% bash bash [.] xmalloc 583 - * | 584 - * --- xmalloc 585 - * malloc 586 - * xmalloc <--- NOTE: there's a cycle 587 - * malloc 588 - * xmalloc 589 - * main 590 - * 591 573 */ 592 574 struct result expected[] = { 593 575 { 7000, 2000, "perf", "perf", "main" }, ··· 587 587 { 3000, 1000, "perf", "perf", "cmd_record" }, 588 588 { 2000, 0, "bash", "libc", "malloc" }, 589 589 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 590 - { 1000, 1000, "perf", "[kernel]", "schedule" }, 590 + { 1000, 1000, "bash", "bash", "xmalloc" }, 591 591 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 592 592 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 593 + { 1000, 1000, "perf", "[kernel]", "schedule" }, 593 594 { 1000, 1000, "perf", "libc", "free" }, 594 595 { 1000, 1000, "perf", "libc", "malloc" }, 595 - { 1000, 1000, "bash", "bash", "xmalloc" }, 596 596 }; 597 597 struct callchain_result expected_callchain[] = { 598 598 { ··· 622 622 { "bash", "main" }, }, 623 623 }, 624 624 { 625 - 3, { { "[kernel]", "schedule" }, 626 - { "perf", "run_command" }, 627 - { "perf", "main" }, }, 625 + 6, { { "bash", "xmalloc" }, 626 + { "libc", "malloc" }, 627 + { "bash", "xmalloc" }, 628 + { "libc", "malloc" }, 629 + { "bash", "xmalloc" }, 630 + { "bash", "main" }, }, 628 631 }, 629 632 { 630 633 3, { { "[kernel]", "sys_perf_event_open" }, ··· 637 634 { 638 635 4, { { "[kernel]", "page_fault" }, 639 636 { "[kernel]", "sys_perf_event_open" }, 637 + { "perf", "run_command" }, 638 + { "perf", "main" }, }, 639 + }, 640 + { 641 + 3, { { "[kernel]", "schedule" }, 640 642 { "perf", "run_command" }, 641 643 { "perf", "main" }, }, 642 644 }, ··· 656 648 { "perf", "cmd_record" }, 657 649 { "perf", "run_command" }, 658 650 { "perf", "main" }, }, 659 - }, 660 - { 661 - 6, { { "bash", "xmalloc" }, 662 - { "libc", "malloc" }, 663 - { "bash", "xmalloc" }, 664 - { "libc", "malloc" }, 665 - { "bash", "xmalloc" }, 666 - { "bash", "main" }, }, 667 651 }, 668 652 }; 669 653
+1 -1
tools/perf/tests/hists_filter.c
··· 138 138 struct hists *hists = evsel__hists(evsel); 139 139 140 140 hists__collapse_resort(hists, NULL); 141 - hists__output_resort(hists); 141 + hists__output_resort(hists, NULL); 142 142 143 143 if (verbose > 2) { 144 144 pr_info("Normal histogram\n");
+5 -5
tools/perf/tests/hists_output.c
··· 152 152 goto out; 153 153 154 154 hists__collapse_resort(hists, NULL); 155 - hists__output_resort(hists); 155 + hists__output_resort(hists, NULL); 156 156 157 157 if (verbose > 2) { 158 158 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 252 252 goto out; 253 253 254 254 hists__collapse_resort(hists, NULL); 255 - hists__output_resort(hists); 255 + hists__output_resort(hists, NULL); 256 256 257 257 if (verbose > 2) { 258 258 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 306 306 goto out; 307 307 308 308 hists__collapse_resort(hists, NULL); 309 - hists__output_resort(hists); 309 + hists__output_resort(hists, NULL); 310 310 311 311 if (verbose > 2) { 312 312 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 384 384 goto out; 385 385 386 386 hists__collapse_resort(hists, NULL); 387 - hists__output_resort(hists); 387 + hists__output_resort(hists, NULL); 388 388 389 389 if (verbose > 2) { 390 390 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 487 487 goto out; 488 488 489 489 hists__collapse_resort(hists, NULL); 490 - hists__output_resort(hists); 490 + hists__output_resort(hists, NULL); 491 491 492 492 if (verbose > 2) { 493 493 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
+1 -1
tools/perf/ui/browsers/hists.c
··· 550 550 bool need_percent; 551 551 552 552 node = rb_first(root); 553 - need_percent = !!rb_next(node); 553 + need_percent = node && rb_next(node); 554 554 555 555 while (node) { 556 556 struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+3
tools/perf/ui/hist.c
··· 204 204 if (ret) 205 205 return ret; 206 206 207 + if (a->thread != b->thread || !symbol_conf.use_callchain) 208 + return 0; 209 + 207 210 ret = b->callchain->max_depth - a->callchain->max_depth; 208 211 } 209 212 return ret;
+24 -2
tools/perf/ui/tui/setup.c
··· 1 1 #include <signal.h> 2 2 #include <stdbool.h> 3 + #ifdef HAVE_BACKTRACE_SUPPORT 4 + #include <execinfo.h> 5 + #endif 3 6 4 7 #include "../../util/cache.h" 5 8 #include "../../util/debug.h" ··· 91 88 return SLkp_getkey(); 92 89 } 93 90 91 + #ifdef HAVE_BACKTRACE_SUPPORT 92 + static void ui__signal_backtrace(int sig) 93 + { 94 + void *stackdump[32]; 95 + size_t size; 96 + 97 + ui__exit(false); 98 + psignal(sig, "perf"); 99 + 100 + printf("-------- backtrace --------\n"); 101 + size = backtrace(stackdump, ARRAY_SIZE(stackdump)); 102 + backtrace_symbols_fd(stackdump, size, STDOUT_FILENO); 103 + 104 + exit(0); 105 + } 106 + #else 107 + # define ui__signal_backtrace ui__signal 108 + #endif 109 + 94 110 static void ui__signal(int sig) 95 111 { 96 112 ui__exit(false); ··· 144 122 ui_browser__init(); 145 123 tui_progress__init(); 146 124 147 - signal(SIGSEGV, ui__signal); 148 - signal(SIGFPE, ui__signal); 125 + signal(SIGSEGV, ui__signal_backtrace); 126 + signal(SIGFPE, ui__signal_backtrace); 149 127 signal(SIGINT, ui__signal); 150 128 signal(SIGQUIT, ui__signal); 151 129 signal(SIGTERM, ui__signal);
+30
tools/perf/util/callchain.c
··· 841 841 842 842 return bf; 843 843 } 844 + 845 + static void free_callchain_node(struct callchain_node *node) 846 + { 847 + struct callchain_list *list, *tmp; 848 + struct callchain_node *child; 849 + struct rb_node *n; 850 + 851 + list_for_each_entry_safe(list, tmp, &node->val, list) { 852 + list_del(&list->list); 853 + free(list); 854 + } 855 + 856 + n = rb_first(&node->rb_root_in); 857 + while (n) { 858 + child = container_of(n, struct callchain_node, rb_node_in); 859 + n = rb_next(n); 860 + rb_erase(&child->rb_node_in, &node->rb_root_in); 861 + 862 + free_callchain_node(child); 863 + free(child); 864 + } 865 + } 866 + 867 + void free_callchain(struct callchain_root *root) 868 + { 869 + if (!symbol_conf.use_callchain) 870 + return; 871 + 872 + free_callchain_node(&root->node); 873 + }
+2
tools/perf/util/callchain.h
··· 198 198 char *callchain_list__sym_name(struct callchain_list *cl, 199 199 char *bf, size_t bfsize, bool show_dso); 200 200 201 + void free_callchain(struct callchain_root *root); 202 + 201 203 #endif /* __PERF_CALLCHAIN_H */
+14 -4
tools/perf/util/hist.c
··· 6 6 #include "evlist.h" 7 7 #include "evsel.h" 8 8 #include "annotate.h" 9 + #include "ui/progress.h" 9 10 #include <math.h> 10 11 11 12 static bool hists__filter_entry_by_dso(struct hists *hists, ··· 304 303 size_t callchain_size = 0; 305 304 struct hist_entry *he; 306 305 307 - if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) 306 + if (symbol_conf.use_callchain) 308 307 callchain_size = sizeof(struct callchain_root); 309 308 310 309 he = zalloc(sizeof(*he) + callchain_size); ··· 737 736 iter->he = he; 738 737 he_cache[iter->curr++] = he; 739 738 740 - callchain_append(he->callchain, &callchain_cursor, sample->period); 739 + hist_entry__append_callchain(he, sample); 741 740 742 741 /* 743 742 * We need to re-initialize the cursor since callchain_append() ··· 810 809 iter->he = he; 811 810 he_cache[iter->curr++] = he; 812 811 813 - callchain_append(he->callchain, &cursor, sample->period); 812 + if (symbol_conf.use_callchain) 813 + callchain_append(he->callchain, &cursor, sample->period); 814 814 return 0; 815 815 } 816 816 ··· 947 945 zfree(&he->mem_info); 948 946 zfree(&he->stat_acc); 949 947 free_srcline(he->srcline); 948 + free_callchain(he->callchain); 950 949 free(he); 951 950 } 952 951 ··· 990 987 else 991 988 p = &(*p)->rb_right; 992 989 } 990 + hists->nr_entries++; 993 991 994 992 rb_link_node(&he->rb_node_in, parent, p); 995 993 rb_insert_color(&he->rb_node_in, root); ··· 1028 1024 if (!sort__need_collapse) 1029 1025 return; 1030 1026 1027 + hists->nr_entries = 0; 1028 + 1031 1029 root = hists__get_rotate_entries_in(hists); 1030 + 1032 1031 next = rb_first(root); 1033 1032 1034 1033 while (next) { ··· 1126 1119 rb_insert_color(&he->rb_node, entries); 1127 1120 } 1128 1121 1129 - void hists__output_resort(struct hists *hists) 1122 + void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1130 1123 { 1131 1124 struct rb_root *root; 1132 1125 struct rb_node *next; ··· 1155 1148 1156 1149 if (!n->filtered) 1157 1150 hists__calc_col_len(hists, n); 1151 + 1152 + if (prog) 1153 + ui_progress__update(prog, 1); 1158 1154 } 1159 1155 } 1160 1156
+1 -1
tools/perf/util/hist.h
··· 121 121 struct hists *hists); 122 122 void hist_entry__free(struct hist_entry *); 123 123 124 - void hists__output_resort(struct hists *hists); 124 + void hists__output_resort(struct hists *hists, struct ui_progress *prog); 125 125 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); 126 126 127 127 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
+4 -2
tools/perf/util/probe-event.c
··· 495 495 } 496 496 497 497 if (ntevs == 0) { /* No error but failed to find probe point. */ 498 - pr_warning("Probe point '%s' not found.\n", 498 + pr_warning("Probe point '%s' not found in debuginfo.\n", 499 499 synthesize_perf_probe_point(&pev->point)); 500 - return -ENOENT; 500 + if (need_dwarf) 501 + return -ENOENT; 502 + return 0; 501 503 } 502 504 /* Error path : ntevs < 0 */ 503 505 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
+17 -1
tools/perf/util/probe-finder.c
··· 989 989 int ret = 0; 990 990 991 991 #if _ELFUTILS_PREREQ(0, 142) 992 + Elf *elf; 993 + GElf_Ehdr ehdr; 994 + GElf_Shdr shdr; 995 + 992 996 /* Get the call frame information from this dwarf */ 993 - pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg)); 997 + elf = dwarf_getelf(dbg->dbg); 998 + if (elf == NULL) 999 + return -EINVAL; 1000 + 1001 + if (gelf_getehdr(elf, &ehdr) == NULL) 1002 + return -EINVAL; 1003 + 1004 + if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) && 1005 + shdr.sh_type == SHT_PROGBITS) { 1006 + pf->cfi = dwarf_getcfi_elf(elf); 1007 + } else { 1008 + pf->cfi = dwarf_getcfi(dbg->dbg); 1009 + } 994 1010 #endif 995 1011 996 1012 off = 0;
+13 -6
tools/testing/selftests/exec/execveat.c
··· 62 62 } 63 63 64 64 static int check_execveat_invoked_rc(int fd, const char *path, int flags, 65 - int expected_rc) 65 + int expected_rc, int expected_rc2) 66 66 { 67 67 int status; 68 68 int rc; ··· 98 98 child, status); 99 99 return 1; 100 100 } 101 - if (WEXITSTATUS(status) != expected_rc) { 102 - printf("[FAIL] (child %d exited with %d not %d)\n", 103 - child, WEXITSTATUS(status), expected_rc); 101 + if ((WEXITSTATUS(status) != expected_rc) && 102 + (WEXITSTATUS(status) != expected_rc2)) { 103 + printf("[FAIL] (child %d exited with %d not %d nor %d)\n", 104 + child, WEXITSTATUS(status), expected_rc, expected_rc2); 104 105 return 1; 105 106 } 106 107 printf("[OK]\n"); ··· 110 109 111 110 static int check_execveat(int fd, const char *path, int flags) 112 111 { 113 - return check_execveat_invoked_rc(fd, path, flags, 99); 112 + return check_execveat_invoked_rc(fd, path, flags, 99, 99); 114 113 } 115 114 116 115 static char *concat(const char *left, const char *right) ··· 193 192 * Execute as a long pathname relative to ".". If this is a script, 194 193 * the interpreter will launch but fail to open the script because its 195 194 * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. 195 + * 196 + * The failure code is usually 127 (POSIX: "If a command is not found, 197 + * the exit status shall be 127."), but some systems give 126 (POSIX: 198 + * "If the command name is found, but it is not an executable utility, 199 + * the exit status shall be 126."), so allow either. 196 200 */ 197 201 if (is_script) 198 - fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127); 202 + fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 203 + 127, 126); 199 204 else 200 205 fail += check_execveat(dot_dfd, longpath, 0); 201 206
+1 -2
tools/testing/selftests/mqueue/mq_perf_tests.c
··· 536 536 { 537 537 struct mq_attr attr; 538 538 char *option, *next_option; 539 - int i, cpu; 539 + int i, cpu, rc; 540 540 struct sigaction sa; 541 541 poptContext popt_context; 542 - char rc; 543 542 void *retval; 544 543 545 544 main_thread = pthread_self();
+1 -1
tools/testing/selftests/vm/Makefile
··· 7 7 8 8 all: $(BINARIES) 9 9 %: %.c 10 - $(CC) $(CFLAGS) -o $@ $^ 10 + $(CC) $(CFLAGS) -o $@ $^ -lrt 11 11 12 12 run_tests: all 13 13 @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)