Merge tag 'mvebu-fixes-4.12-1' of git://git.infradead.org/linux-mvebu into fixes

mvebu fixes for 4.12

Fix the interrupt description of the crypto node for device tree of
the Armada 7K/8K SoCs

* tag 'mvebu-fixes-4.12-1' of git://git.infradead.org/linux-mvebu: (316 commits)
arm64: marvell: dts: fix interrupts in 7k/8k crypto nodes
+ Linux 4.12-rc2

Signed-off-by: Olof Johansson <olof@lixom.net>

+3191 -1432
-31
Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt
··· 1 - Hi6220 SoC ION 2 - =================================================================== 3 - Required properties: 4 - - compatible : "hisilicon,hi6220-ion" 5 - - list of the ION heaps 6 - - heap name : maybe heap_sys_user@0 7 - - heap id : id should be unique in the system. 8 - - heap base : base ddr address of the heap,0 means that 9 - it is dynamic. 10 - - heap size : memory size and 0 means it is dynamic. 11 - - heap type : the heap type of the heap, please also 12 - see the define in ion.h(drivers/staging/android/uapi/ion.h) 13 - ------------------------------------------------------------------- 14 - Example: 15 - hi6220-ion { 16 - compatible = "hisilicon,hi6220-ion"; 17 - heap_sys_user@0 { 18 - heap-name = "sys_user"; 19 - heap-id = <0x0>; 20 - heap-base = <0x0>; 21 - heap-size = <0x0>; 22 - heap-type = "ion_system"; 23 - }; 24 - heap_sys_contig@0 { 25 - heap-name = "sys_contig"; 26 - heap-id = <0x1>; 27 - heap-base = <0x0>; 28 - heap-size = <0x0>; 29 - heap-type = "ion_system_contig"; 30 - }; 31 - };
···
+2 -4
Documentation/usb/typec.rst
··· 114 registering/unregistering cables and their plugs: 115 116 .. kernel-doc:: drivers/usb/typec/typec.c 117 - :functions: typec_register_cable typec_unregister_cable typec_register_plug 118 - typec_unregister_plug 119 120 The class will provide a handle to struct typec_cable and struct typec_plug if 121 the registration is successful, or NULL if it isn't. ··· 136 APIs to report it to the class: 137 138 .. kernel-doc:: drivers/usb/typec/typec.c 139 - :functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role 140 - typec_set_pwr_opmode 141 142 Alternate Modes 143 ~~~~~~~~~~~~~~~
··· 114 registering/unregistering cables and their plugs: 115 116 .. kernel-doc:: drivers/usb/typec/typec.c 117 + :functions: typec_register_cable typec_unregister_cable typec_register_plug typec_unregister_plug 118 119 The class will provide a handle to struct typec_cable and struct typec_plug if 120 the registration is successful, or NULL if it isn't. ··· 137 APIs to report it to the class: 138 139 .. kernel-doc:: drivers/usb/typec/typec.c 140 + :functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role typec_set_pwr_opmode 141 142 Alternate Modes 143 ~~~~~~~~~~~~~~~
+1 -1
Documentation/watchdog/watchdog-parameters.txt
··· 117 ------------------------------------------------- 118 iTCO_wdt: 119 heartbeat: Watchdog heartbeat in seconds. 120 - (2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=30) 121 nowayout: Watchdog cannot be stopped once started 122 (default=kernel config parameter) 123 -------------------------------------------------
··· 117 ------------------------------------------------- 118 iTCO_wdt: 119 heartbeat: Watchdog heartbeat in seconds. 120 + (5<=heartbeat<=74 (TCO v1) or 1226 (TCO v2), default=30) 121 nowayout: Watchdog cannot be stopped once started 122 (default=kernel config parameter) 123 -------------------------------------------------
+9 -6
MAINTAINERS
··· 846 M: Sumit Semwal <sumit.semwal@linaro.org> 847 L: devel@driverdev.osuosl.org 848 S: Supported 849 - F: Documentation/devicetree/bindings/staging/ion/ 850 F: drivers/staging/android/ion 851 F: drivers/staging/android/uapi/ion.h 852 F: drivers/staging/android/uapi/ion_test.h ··· 3112 F: drivers/net/ieee802154/cc2520.c 3113 F: include/linux/spi/cc2520.h 3114 F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt 3115 3116 CEC FRAMEWORK 3117 M: Hans Verkuil <hans.verkuil@cisco.com> ··· 5700 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 5701 S: Maintained 5702 F: drivers/staging/greybus/ 5703 - L: greybus-dev@lists.linaro.org 5704 5705 GREYBUS AUDIO PROTOCOLS DRIVERS 5706 M: Vaibhav Agarwal <vaibhav.sr@gmail.com> ··· 9558 9559 OSD LIBRARY and FILESYSTEM 9560 M: Boaz Harrosh <ooo@electrozaur.com> 9561 - M: Benny Halevy <bhalevy@primarydata.com> 9562 - L: osd-dev@open-osd.org 9563 - W: http://open-osd.org 9564 - T: git git://git.open-osd.org/open-osd.git 9565 S: Maintained 9566 F: drivers/scsi/osd/ 9567 F: include/scsi/osd_*
··· 846 M: Sumit Semwal <sumit.semwal@linaro.org> 847 L: devel@driverdev.osuosl.org 848 S: Supported 849 F: drivers/staging/android/ion 850 F: drivers/staging/android/uapi/ion.h 851 F: drivers/staging/android/uapi/ion_test.h ··· 3113 F: drivers/net/ieee802154/cc2520.c 3114 F: include/linux/spi/cc2520.h 3115 F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt 3116 + 3117 + CCREE ARM TRUSTZONE CRYPTOCELL 700 REE DRIVER 3118 + M: Gilad Ben-Yossef <gilad@benyossef.com> 3119 + L: linux-crypto@vger.kernel.org 3120 + L: driverdev-devel@linuxdriverproject.org 3121 + S: Supported 3122 + F: drivers/staging/ccree/ 3123 + W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family 3124 3125 CEC FRAMEWORK 3126 M: Hans Verkuil <hans.verkuil@cisco.com> ··· 5693 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 5694 S: Maintained 5695 F: drivers/staging/greybus/ 5696 + L: greybus-dev@lists.linaro.org (moderated for non-subscribers) 5697 5698 GREYBUS AUDIO PROTOCOLS DRIVERS 5699 M: Vaibhav Agarwal <vaibhav.sr@gmail.com> ··· 9551 9552 OSD LIBRARY and FILESYSTEM 9553 M: Boaz Harrosh <ooo@electrozaur.com> 9554 S: Maintained 9555 F: drivers/scsi/osd/ 9556 F: include/scsi/osd_*
+2 -2
Makefile
··· 1 VERSION = 4 2 PATCHLEVEL = 12 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc1 5 NAME = Fearless Coyote 6 7 # *DOCUMENTATION* ··· 1172 PHONY += headers_check 1173 headers_check: headers_install 1174 $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 1175 - $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/ $(hdr-dst) HDRCHECK=1 1176 1177 # --------------------------------------------------------------------------- 1178 # Kernel selftest
··· 1 VERSION = 4 2 PATCHLEVEL = 12 3 SUBLEVEL = 0 4 + EXTRAVERSION = -rc2 5 NAME = Fearless Coyote 6 7 # *DOCUMENTATION* ··· 1172 PHONY += headers_check 1173 headers_check: headers_install 1174 $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 1175 + $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst) HDRCHECK=1 1176 1177 # --------------------------------------------------------------------------- 1178 # Kernel selftest
+4 -2
arch/alpha/kernel/osf_sys.c
··· 1201 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1202 return -EFAULT; 1203 1204 - err = 0; 1205 - err |= put_user(status, ustatus); 1206 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1207 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1208 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
··· 1201 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1202 return -EFAULT; 1203 1204 + err = put_user(status, ustatus); 1205 + if (ret < 0) 1206 + return err ? err : ret; 1207 + 1208 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1209 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1210 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
+2 -1
arch/arm/include/asm/kvm_coproc.h
··· 31 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); 32 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 33 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 34 - int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 35 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 36 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 37
··· 31 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); 32 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 33 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 34 + int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 35 + int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 36 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 37 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 38
+74 -32
arch/arm/kvm/coproc.c
··· 32 #include <asm/vfp.h> 33 #include "../vfp/vfpinstr.h" 34 35 #include "trace.h" 36 #include "coproc.h" 37 ··· 107 } 108 109 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 110 - { 111 - kvm_inject_undefined(vcpu); 112 - return 1; 113 - } 114 - 115 - int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 116 { 117 kvm_inject_undefined(vcpu); 118 return 1; ··· 279 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 280 * all PM registers, which doesn't crash the guest kernel at least. 281 */ 282 - static bool pm_fake(struct kvm_vcpu *vcpu, 283 const struct coproc_params *p, 284 const struct coproc_reg *r) 285 { ··· 289 return read_zero(vcpu, p); 290 } 291 292 - #define access_pmcr pm_fake 293 - #define access_pmcntenset pm_fake 294 - #define access_pmcntenclr pm_fake 295 - #define access_pmovsr pm_fake 296 - #define access_pmselr pm_fake 297 - #define access_pmceid0 pm_fake 298 - #define access_pmceid1 pm_fake 299 - #define access_pmccntr pm_fake 300 - #define access_pmxevtyper pm_fake 301 - #define access_pmxevcntr pm_fake 302 - #define access_pmuserenr pm_fake 303 - #define access_pmintenset pm_fake 304 - #define access_pmintenclr pm_fake 305 306 /* Architected CP15 registers. 307 * CRn denotes the primary register number, but is copied to the CRm in the ··· 527 return 1; 528 } 529 530 - /** 531 - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 532 - * @vcpu: The VCPU pointer 533 - * @run: The kvm_run struct 534 - */ 535 - int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 536 { 537 struct coproc_params params; 538 ··· 541 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 542 params.CRm = 0; 543 544 return emulate_cp15(vcpu, &params); 545 } 546 547 static void reset_coproc_regs(struct kvm_vcpu *vcpu, ··· 583 table[i].reset(vcpu, &table[i]); 584 } 585 586 - /** 587 - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 588 - * @vcpu: The VCPU pointer 589 - * @run: The kvm_run struct 590 - */ 591 - int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 592 { 593 struct coproc_params params; 594 ··· 597 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; 598 params.Rt2 = 0; 599 600 return emulate_cp15(vcpu, &params); 601 } 602 603 /******************************************************************************
··· 32 #include <asm/vfp.h> 33 #include "../vfp/vfpinstr.h" 34 35 + #define CREATE_TRACE_POINTS 36 #include "trace.h" 37 #include "coproc.h" 38 ··· 106 } 107 108 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 109 { 110 kvm_inject_undefined(vcpu); 111 return 1; ··· 284 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 285 * all PM registers, which doesn't crash the guest kernel at least. 286 */ 287 + static bool trap_raz_wi(struct kvm_vcpu *vcpu, 288 const struct coproc_params *p, 289 const struct coproc_reg *r) 290 { ··· 294 return read_zero(vcpu, p); 295 } 296 297 + #define access_pmcr trap_raz_wi 298 + #define access_pmcntenset trap_raz_wi 299 + #define access_pmcntenclr trap_raz_wi 300 + #define access_pmovsr trap_raz_wi 301 + #define access_pmselr trap_raz_wi 302 + #define access_pmceid0 trap_raz_wi 303 + #define access_pmceid1 trap_raz_wi 304 + #define access_pmccntr trap_raz_wi 305 + #define access_pmxevtyper trap_raz_wi 306 + #define access_pmxevcntr trap_raz_wi 307 + #define access_pmuserenr trap_raz_wi 308 + #define access_pmintenset trap_raz_wi 309 + #define access_pmintenclr trap_raz_wi 310 311 /* Architected CP15 registers. 312 * CRn denotes the primary register number, but is copied to the CRm in the ··· 532 return 1; 533 } 534 535 + static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu) 536 { 537 struct coproc_params params; 538 ··· 551 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 552 params.CRm = 0; 553 554 + return params; 555 + } 556 + 557 + /** 558 + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 559 + * @vcpu: The VCPU pointer 560 + * @run: The kvm_run struct 561 + */ 562 + int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 563 + { 564 + struct coproc_params params = decode_64bit_hsr(vcpu); 565 + 566 return emulate_cp15(vcpu, &params); 567 + } 568 + 569 + /** 570 + * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access 571 + * @vcpu: The VCPU pointer 572 + * @run: The kvm_run struct 573 + */ 574 + int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 575 + { 576 + struct coproc_params params = decode_64bit_hsr(vcpu); 577 + 578 + /* raz_wi cp14 */ 579 + trap_raz_wi(vcpu, &params, NULL); 580 + 581 + /* handled */ 582 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 583 + return 1; 584 } 585 586 static void reset_coproc_regs(struct kvm_vcpu *vcpu, ··· 564 table[i].reset(vcpu, &table[i]); 565 } 566 567 + static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) 568 { 569 struct coproc_params params; 570 ··· 583 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; 584 params.Rt2 = 0; 585 586 + return params; 587 + } 588 + 589 + /** 590 + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 591 + * @vcpu: The VCPU pointer 592 + * @run: The kvm_run struct 593 + */ 594 + int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 595 + { 596 + struct coproc_params params = decode_32bit_hsr(vcpu); 597 return emulate_cp15(vcpu, &params); 598 + } 599 + 600 + /** 601 + * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access 602 + * @vcpu: The VCPU pointer 603 + * @run: The kvm_run struct 604 + */ 605 + int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 606 + { 607 + struct coproc_params params = decode_32bit_hsr(vcpu); 608 + 609 + /* raz_wi cp14 */ 610 + trap_raz_wi(vcpu, &params, NULL); 611 + 612 + /* handled */ 613 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 614 + return 1; 615 } 616 617 /******************************************************************************
+2 -2
arch/arm/kvm/handle_exit.c
··· 95 [HSR_EC_WFI] = kvm_handle_wfx, 96 [HSR_EC_CP15_32] = kvm_handle_cp15_32, 97 [HSR_EC_CP15_64] = kvm_handle_cp15_64, 98 - [HSR_EC_CP14_MR] = kvm_handle_cp14_access, 99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, 100 - [HSR_EC_CP14_64] = kvm_handle_cp14_access, 101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, 102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id, 103 [HSR_EC_HVC] = handle_hvc,
··· 95 [HSR_EC_WFI] = kvm_handle_wfx, 96 [HSR_EC_CP15_32] = kvm_handle_cp15_32, 97 [HSR_EC_CP15_64] = kvm_handle_cp15_64, 98 + [HSR_EC_CP14_MR] = kvm_handle_cp14_32, 99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, 100 + [HSR_EC_CP14_64] = kvm_handle_cp14_64, 101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, 102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id, 103 [HSR_EC_HVC] = handle_hvc,
+2
arch/arm/kvm/hyp/Makefile
··· 2 # Makefile for Kernel-based Virtual Machine module, HYP part 3 # 4 5 KVM=../../../../virt/kvm 6 7 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
··· 2 # Makefile for Kernel-based Virtual Machine module, HYP part 3 # 4 5 + ccflags-y += -fno-stack-protector 6 + 7 KVM=../../../../virt/kvm 8 9 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+3 -1
arch/arm/kvm/hyp/switch.c
··· 48 write_sysreg(HSTR_T(15), HSTR); 49 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); 50 val = read_sysreg(HDCR); 51 - write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); 52 } 53 54 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
··· 48 write_sysreg(HSTR_T(15), HSTR); 49 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); 50 val = read_sysreg(HDCR); 51 + val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */ 52 + val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */ 53 + write_sysreg(val, HDCR); 54 } 55 56 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+4 -4
arch/arm/kvm/trace.h
··· 1 - #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 2 - #define _TRACE_KVM_H 3 4 #include <linux/tracepoint.h> 5 ··· 74 __entry->vcpu_pc, __entry->r0, __entry->imm) 75 ); 76 77 - #endif /* _TRACE_KVM_H */ 78 79 #undef TRACE_INCLUDE_PATH 80 - #define TRACE_INCLUDE_PATH arch/arm/kvm 81 #undef TRACE_INCLUDE_FILE 82 #define TRACE_INCLUDE_FILE trace 83
··· 1 + #if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 2 + #define _TRACE_ARM_KVM_H 3 4 #include <linux/tracepoint.h> 5 ··· 74 __entry->vcpu_pc, __entry->r0, __entry->imm) 75 ); 76 77 + #endif /* _TRACE_ARM_KVM_H */ 78 79 #undef TRACE_INCLUDE_PATH 80 + #define TRACE_INCLUDE_PATH . 81 #undef TRACE_INCLUDE_FILE 82 #define TRACE_INCLUDE_FILE trace 83
+1 -2
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
··· 231 cpm_crypto: crypto@800000 { 232 compatible = "inside-secure,safexcel-eip197"; 233 reg = <0x800000 0x200000>; 234 - interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 235 - | IRQ_TYPE_LEVEL_HIGH)>, 236 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, 237 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, 238 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
··· 231 cpm_crypto: crypto@800000 { 232 compatible = "inside-secure,safexcel-eip197"; 233 reg = <0x800000 0x200000>; 234 + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, 235 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, 236 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, 237 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+1 -2
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
··· 221 cps_crypto: crypto@800000 { 222 compatible = "inside-secure,safexcel-eip197"; 223 reg = <0x800000 0x200000>; 224 - interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 225 - | IRQ_TYPE_LEVEL_HIGH)>, 226 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, 227 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, 228 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
··· 221 cps_crypto: crypto@800000 { 222 compatible = "inside-secure,safexcel-eip197"; 223 reg = <0x800000 0x200000>; 224 + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, 225 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, 226 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, 227 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
-1
arch/arm64/include/asm/atomic_ll_sc.h
··· 264 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ 265 " cbnz %w[tmp], 1b\n" \ 266 " " #mb "\n" \ 267 - " mov %" #w "[oldval], %" #w "[old]\n" \ 268 "2:" \ 269 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ 270 [v] "+Q" (*(unsigned long *)ptr) \
··· 264 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ 265 " cbnz %w[tmp], 1b\n" \ 266 " " #mb "\n" \ 267 "2:" \ 268 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ 269 [v] "+Q" (*(unsigned long *)ptr) \
+10 -2
arch/arm64/include/asm/cpufeature.h
··· 115 116 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 117 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; 118 119 bool this_cpu_has_cap(unsigned int cap); 120 ··· 125 } 126 127 /* System capability check for constant caps */ 128 - static inline bool cpus_have_const_cap(int num) 129 { 130 if (num >= ARM64_NCAPS) 131 return false; ··· 139 return test_bit(num, cpu_hwcaps); 140 } 141 142 static inline void cpus_set_cap(unsigned int num) 143 { 144 if (num >= ARM64_NCAPS) { ··· 154 num, ARM64_NCAPS); 155 } else { 156 __set_bit(num, cpu_hwcaps); 157 - static_branch_enable(&cpu_hwcap_keys[num]); 158 } 159 } 160
··· 115 116 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 117 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; 118 + extern struct static_key_false arm64_const_caps_ready; 119 120 bool this_cpu_has_cap(unsigned int cap); 121 ··· 124 } 125 126 /* System capability check for constant caps */ 127 + static inline bool __cpus_have_const_cap(int num) 128 { 129 if (num >= ARM64_NCAPS) 130 return false; ··· 138 return test_bit(num, cpu_hwcaps); 139 } 140 141 + static inline bool cpus_have_const_cap(int num) 142 + { 143 + if (static_branch_likely(&arm64_const_caps_ready)) 144 + return __cpus_have_const_cap(num); 145 + else 146 + return cpus_have_cap(num); 147 + } 148 + 149 static inline void cpus_set_cap(unsigned int num) 150 { 151 if (num >= ARM64_NCAPS) { ··· 145 num, ARM64_NCAPS); 146 } else { 147 __set_bit(num, cpu_hwcaps); 148 } 149 } 150
+6 -2
arch/arm64/include/asm/kvm_host.h
··· 24 25 #include <linux/types.h> 26 #include <linux/kvm_types.h> 27 #include <asm/kvm.h> 28 #include <asm/kvm_asm.h> 29 #include <asm/kvm_mmio.h> ··· 356 unsigned long vector_ptr) 357 { 358 /* 359 - * Call initialization code, and switch to the full blown 360 - * HYP code. 361 */ 362 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); 363 } 364
··· 24 25 #include <linux/types.h> 26 #include <linux/kvm_types.h> 27 + #include <asm/cpufeature.h> 28 #include <asm/kvm.h> 29 #include <asm/kvm_asm.h> 30 #include <asm/kvm_mmio.h> ··· 355 unsigned long vector_ptr) 356 { 357 /* 358 + * Call initialization code, and switch to the full blown HYP code. 359 + * If the cpucaps haven't been finalized yet, something has gone very 360 + * wrong, and hyp will crash and burn when it uses any 361 + * cpus_have_const_cap() wrapper. 362 */ 363 + BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); 364 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); 365 } 366
+21 -2
arch/arm64/kernel/cpufeature.c
··· 985 */ 986 void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) 987 { 988 - for (; caps->matches; caps++) 989 - if (caps->enable && cpus_have_cap(caps->capability)) 990 /* 991 * Use stop_machine() as it schedules the work allowing 992 * us to modify PSTATE, instead of on_each_cpu() which ··· 1002 * we return. 1003 */ 1004 stop_machine(caps->enable, NULL, cpu_online_mask); 1005 } 1006 1007 /* ··· 1106 enable_cpu_capabilities(arm64_features); 1107 } 1108 1109 /* 1110 * Check if the current CPU has a given feature capability. 1111 * Should be called from non-preemptible context. ··· 1149 /* Set the CPU feature capabilies */ 1150 setup_feature_capabilities(); 1151 enable_errata_workarounds(); 1152 setup_elf_hwcaps(arm64_elf_hwcaps); 1153 1154 if (system_supports_32bit_el0())
··· 985 */ 986 void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) 987 { 988 + for (; caps->matches; caps++) { 989 + unsigned int num = caps->capability; 990 + 991 + if (!cpus_have_cap(num)) 992 + continue; 993 + 994 + /* Ensure cpus_have_const_cap(num) works */ 995 + static_branch_enable(&cpu_hwcap_keys[num]); 996 + 997 + if (caps->enable) { 998 /* 999 * Use stop_machine() as it schedules the work allowing 1000 * us to modify PSTATE, instead of on_each_cpu() which ··· 994 * we return. 995 */ 996 stop_machine(caps->enable, NULL, cpu_online_mask); 997 + } 998 + } 999 } 1000 1001 /* ··· 1096 enable_cpu_capabilities(arm64_features); 1097 } 1098 1099 + DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); 1100 + EXPORT_SYMBOL(arm64_const_caps_ready); 1101 + 1102 + static void __init mark_const_caps_ready(void) 1103 + { 1104 + static_branch_enable(&arm64_const_caps_ready); 1105 + } 1106 + 1107 /* 1108 * Check if the current CPU has a given feature capability. 1109 * Should be called from non-preemptible context. ··· 1131 /* Set the CPU feature capabilies */ 1132 setup_feature_capabilities(); 1133 enable_errata_workarounds(); 1134 + mark_const_caps_ready(); 1135 setup_elf_hwcaps(arm64_elf_hwcaps); 1136 1137 if (system_supports_32bit_el0())
+16 -7
arch/arm64/kernel/perf_event.c
··· 877 878 if (attr->exclude_idle) 879 return -EPERM; 880 - if (is_kernel_in_hyp_mode() && 881 - attr->exclude_kernel != attr->exclude_hv) 882 - return -EINVAL; 883 if (attr->exclude_user) 884 config_base |= ARMV8_PMU_EXCLUDE_EL0; 885 - if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) 886 - config_base |= ARMV8_PMU_EXCLUDE_EL1; 887 - if (!attr->exclude_hv) 888 - config_base |= ARMV8_PMU_INCLUDE_EL2; 889 890 /* 891 * Install the filter into config_base as this is used to
··· 877 878 if (attr->exclude_idle) 879 return -EPERM; 880 + 881 + /* 882 + * If we're running in hyp mode, then we *are* the hypervisor. 883 + * Therefore we ignore exclude_hv in this configuration, since 884 + * there's no hypervisor to sample anyway. This is consistent 885 + * with other architectures (x86 and Power). 886 + */ 887 + if (is_kernel_in_hyp_mode()) { 888 + if (!attr->exclude_kernel) 889 + config_base |= ARMV8_PMU_INCLUDE_EL2; 890 + } else { 891 + if (attr->exclude_kernel) 892 + config_base |= ARMV8_PMU_EXCLUDE_EL1; 893 + if (!attr->exclude_hv) 894 + config_base |= ARMV8_PMU_INCLUDE_EL2; 895 + } 896 if (attr->exclude_user) 897 config_base |= ARMV8_PMU_EXCLUDE_EL0; 898 899 /* 900 * Install the filter into config_base as this is used to
+2
arch/arm64/kvm/hyp/Makefile
··· 2 # Makefile for Kernel-based Virtual Machine module, HYP part 3 # 4 5 KVM=../../../../virt/kvm 6 7 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
··· 2 # Makefile for Kernel-based Virtual Machine module, HYP part 3 # 4 5 + ccflags-y += -fno-stack-protector 6 + 7 KVM=../../../../virt/kvm 8 9 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+3 -2
arch/arm64/net/bpf_jit_comp.c
··· 253 */ 254 off = offsetof(struct bpf_array, ptrs); 255 emit_a64_mov_i64(tmp, off, ctx); 256 - emit(A64_LDR64(tmp, r2, tmp), ctx); 257 - emit(A64_LDR64(prg, tmp, r3), ctx); 258 emit(A64_CBZ(1, prg, jmp_offset), ctx); 259 260 /* goto *(prog->bpf_func + prologue_size); */
··· 253 */ 254 off = offsetof(struct bpf_array, ptrs); 255 emit_a64_mov_i64(tmp, off, ctx); 256 + emit(A64_ADD(1, tmp, r2, tmp), ctx); 257 + emit(A64_LSL(1, prg, r3, 3), ctx); 258 + emit(A64_LDR64(prg, tmp, prg), ctx); 259 emit(A64_CBZ(1, prg, jmp_offset), ctx); 260 261 /* goto *(prog->bpf_func + prologue_size); */
+4
arch/powerpc/include/asm/module.h
··· 14 #include <asm-generic/module.h> 15 16 17 #ifndef __powerpc64__ 18 /* 19 * Thanks to Paul M for explaining this.
··· 14 #include <asm-generic/module.h> 15 16 17 + #ifdef CC_USING_MPROFILE_KERNEL 18 + #define MODULE_ARCH_VERMAGIC "mprofile-kernel" 19 + #endif 20 + 21 #ifndef __powerpc64__ 22 /* 23 * Thanks to Paul M for explaining this.
+12
arch/powerpc/include/asm/page.h
··· 132 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 133 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 134 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 135 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 136 137 /* 138 * On Book-E parts we need __va to parse the device tree and we can't
··· 132 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 133 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 134 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 135 + 136 + #ifdef CONFIG_PPC_BOOK3S_64 137 + /* 138 + * On hash the vmalloc and other regions alias to the kernel region when passed 139 + * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can 140 + * return true for some vmalloc addresses, which is incorrect. So explicitly 141 + * check that the address is in the kernel region. 142 + */ 143 + #define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \ 144 + pfn_valid(virt_to_pfn(kaddr))) 145 + #else 146 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 147 + #endif 148 149 /* 150 * On Book-E parts we need __va to parse the device tree and we can't
+1 -1
arch/powerpc/kernel/idle_book3s.S
··· 416 * which needs to be restored from the stack. 417 */ 418 li r3, 1 419 - stb r0,PACA_NAPSTATELOST(r13) 420 blr 421 422 /*
··· 416 * which needs to be restored from the stack. 417 */ 418 li r3, 1 419 + stb r3,PACA_NAPSTATELOST(r13) 420 blr 421 422 /*
+2 -1
arch/powerpc/kernel/kprobes.c
··· 305 save_previous_kprobe(kcb); 306 set_current_kprobe(p, regs, kcb); 307 kprobes_inc_nmissed_count(p); 308 - prepare_singlestep(p, regs); 309 kcb->kprobe_status = KPROBE_REENTER; 310 if (p->ainsn.boostable >= 0) { 311 ret = try_to_emulate(p, regs); 312 313 if (ret > 0) { 314 restore_previous_kprobe(kcb); 315 return 1; 316 } 317 } 318 return 1; 319 } else { 320 if (*addr != BREAKPOINT_INSTRUCTION) {
··· 305 save_previous_kprobe(kcb); 306 set_current_kprobe(p, regs, kcb); 307 kprobes_inc_nmissed_count(p); 308 kcb->kprobe_status = KPROBE_REENTER; 309 if (p->ainsn.boostable >= 0) { 310 ret = try_to_emulate(p, regs); 311 312 if (ret > 0) { 313 restore_previous_kprobe(kcb); 314 + preempt_enable_no_resched(); 315 return 1; 316 } 317 } 318 + prepare_singlestep(p, regs); 319 return 1; 320 } else { 321 if (*addr != BREAKPOINT_INSTRUCTION) {
+19
arch/powerpc/kernel/process.c
··· 864 if (!MSR_TM_SUSPENDED(mfmsr())) 865 return; 866 867 giveup_all(container_of(thr, struct task_struct, thread)); 868 869 tm_reclaim(thr, thr->ckpt_regs.msr, cause);
··· 864 if (!MSR_TM_SUSPENDED(mfmsr())) 865 return; 866 867 + /* 868 + * If we are in a transaction and FP is off then we can't have 869 + * used FP inside that transaction. Hence the checkpointed 870 + * state is the same as the live state. We need to copy the 871 + * live state to the checkpointed state so that when the 872 + * transaction is restored, the checkpointed state is correct 873 + * and the aborted transaction sees the correct state. We use 874 + * ckpt_regs.msr here as that's what tm_reclaim will use to 875 + * determine if it's going to write the checkpointed state or 876 + * not. So either this will write the checkpointed registers, 877 + * or reclaim will. Similarly for VMX. 878 + */ 879 + if ((thr->ckpt_regs.msr & MSR_FP) == 0) 880 + memcpy(&thr->ckfp_state, &thr->fp_state, 881 + sizeof(struct thread_fp_state)); 882 + if ((thr->ckpt_regs.msr & MSR_VEC) == 0) 883 + memcpy(&thr->ckvr_state, &thr->vr_state, 884 + sizeof(struct thread_vr_state)); 885 + 886 giveup_all(container_of(thr, struct task_struct, thread)); 887 888 tm_reclaim(thr, thr->ckpt_regs.msr, cause);
+1 -1
arch/powerpc/kvm/Kconfig
··· 67 select KVM_BOOK3S_64_HANDLER 68 select KVM 69 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE 70 - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT 71 ---help--- 72 Support running unmodified book3s_64 and book3s_32 guest kernels 73 in virtual machines on book3s_64 host processors.
··· 67 select KVM_BOOK3S_64_HANDLER 68 select KVM 69 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE 70 + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) 71 ---help--- 72 Support running unmodified book3s_64 and book3s_32 guest kernels 73 in virtual machines on book3s_64 host processors.
+2 -2
arch/powerpc/kvm/Makefile
··· 46 e500_emulate.o 47 kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 48 49 - kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \ 50 book3s_64_vio_hv.o 51 52 kvm-pr-y := \ ··· 90 book3s_xics.o 91 92 kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o 93 94 kvm-book3s_64-module-objs := \ 95 $(common-objs-y) \ 96 book3s.o \ 97 - book3s_64_vio.o \ 98 book3s_rtas.o \ 99 $(kvm-book3s_64-objs-y) 100
··· 46 e500_emulate.o 47 kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 48 49 + kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \ 50 book3s_64_vio_hv.o 51 52 kvm-pr-y := \ ··· 90 book3s_xics.o 91 92 kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o 93 + kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o 94 95 kvm-book3s_64-module-objs := \ 96 $(common-objs-y) \ 97 book3s.o \ 98 book3s_rtas.o \ 99 $(kvm-book3s_64-objs-y) 100
+13
arch/powerpc/kvm/book3s_64_vio_hv.c
··· 301 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ 302 /* liobn, ioba, tce); */ 303 304 stt = kvmppc_find_table(vcpu->kvm, liobn); 305 if (!stt) 306 return H_TOO_HARD; ··· 384 unsigned long *rmap = NULL; 385 bool prereg = false; 386 struct kvmppc_spapr_tce_iommu_table *stit; 387 388 stt = kvmppc_find_table(vcpu->kvm, liobn); 389 if (!stt) ··· 499 long i, ret; 500 struct kvmppc_spapr_tce_iommu_table *stit; 501 502 stt = kvmppc_find_table(vcpu->kvm, liobn); 503 if (!stt) 504 return H_TOO_HARD; ··· 539 return H_SUCCESS; 540 } 541 542 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 543 unsigned long ioba) 544 {
··· 301 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ 302 /* liobn, ioba, tce); */ 303 304 + /* For radix, we might be in virtual mode, so punt */ 305 + if (kvm_is_radix(vcpu->kvm)) 306 + return H_TOO_HARD; 307 + 308 stt = kvmppc_find_table(vcpu->kvm, liobn); 309 if (!stt) 310 return H_TOO_HARD; ··· 380 unsigned long *rmap = NULL; 381 bool prereg = false; 382 struct kvmppc_spapr_tce_iommu_table *stit; 383 + 384 + /* For radix, we might be in virtual mode, so punt */ 385 + if (kvm_is_radix(vcpu->kvm)) 386 + return H_TOO_HARD; 387 388 stt = kvmppc_find_table(vcpu->kvm, liobn); 389 if (!stt) ··· 491 long i, ret; 492 struct kvmppc_spapr_tce_iommu_table *stit; 493 494 + /* For radix, we might be in virtual mode, so punt */ 495 + if (kvm_is_radix(vcpu->kvm)) 496 + return H_TOO_HARD; 497 + 498 stt = kvmppc_find_table(vcpu->kvm, liobn); 499 if (!stt) 500 return H_TOO_HARD; ··· 527 return H_SUCCESS; 528 } 529 530 + /* This can be called in either virtual mode or real mode */ 531 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 532 unsigned long ioba) 533 {
+8 -1
arch/powerpc/kvm/book3s_hv_builtin.c
··· 207 208 long kvmppc_h_random(struct kvm_vcpu *vcpu) 209 { 210 - if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 211 return H_SUCCESS; 212 213 return H_HARDWARE;
··· 207 208 long kvmppc_h_random(struct kvm_vcpu *vcpu) 209 { 210 + int r; 211 + 212 + /* Only need to do the expensive mfmsr() on radix */ 213 + if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) 214 + r = powernv_get_random_long(&vcpu->arch.gpr[4]); 215 + else 216 + r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]); 217 + if (r) 218 return H_SUCCESS; 219 220 return H_HARDWARE;
+58 -22
arch/powerpc/kvm/book3s_pr_papr.c
··· 50 pteg_addr = get_pteg_addr(vcpu, pte_index); 51 52 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 53 - copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); 54 hpte = pteg; 55 56 ret = H_PTEG_FULL; ··· 73 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); 74 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); 75 pteg_addr += i * HPTE_SIZE; 76 - copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); 77 kvmppc_set_gpr(vcpu, 4, pte_index | i); 78 ret = H_SUCCESS; 79 ··· 97 98 pteg = get_pteg_addr(vcpu, pte_index); 99 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 100 - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 101 pte[0] = be64_to_cpu((__force __be64)pte[0]); 102 pte[1] = be64_to_cpu((__force __be64)pte[1]); 103 ··· 109 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) 110 goto done; 111 112 - copy_to_user((void __user *)pteg, &v, sizeof(v)); 113 114 rb = compute_tlbie_rb(pte[0], pte[1], pte_index); 115 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); ··· 179 } 180 181 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); 182 - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 183 pte[0] = be64_to_cpu((__force __be64)pte[0]); 184 pte[1] = be64_to_cpu((__force __be64)pte[1]); 185 ··· 195 tsh |= H_BULK_REMOVE_NOT_FOUND; 196 } else { 197 /* Splat the pteg in (userland) hpt */ 198 - copy_to_user((void __user *)pteg, &v, sizeof(v)); 199 200 rb = compute_tlbie_rb(pte[0], pte[1], 201 tsh & H_BULK_REMOVE_PTEX); ··· 225 226 pteg = get_pteg_addr(vcpu, pte_index); 227 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 228 - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 229 pte[0] = be64_to_cpu((__force __be64)pte[0]); 230 pte[1] = be64_to_cpu((__force __be64)pte[1]); 231 ··· 250 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 251 pte[0] = (__force u64)cpu_to_be64(pte[0]); 252 pte[1] = (__force u64)cpu_to_be64(pte[1]); 253 - copy_to_user((void __user *)pteg, pte, sizeof(pte)); 254 ret = H_SUCCESS; 255 256 done: 257 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); 258 kvmppc_set_gpr(vcpu, 3, ret); 259 260 - return EMULATE_DONE; 261 - } 262 - 263 - static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) 264 - { 265 - unsigned long liobn = kvmppc_get_gpr(vcpu, 4); 266 - unsigned long ioba = kvmppc_get_gpr(vcpu, 5); 267 - unsigned long tce = kvmppc_get_gpr(vcpu, 6); 268 - long rc; 269 - 270 - rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); 271 - if (rc == H_TOO_HARD) 272 - return EMULATE_FAIL; 273 - kvmppc_set_gpr(vcpu, 3, rc); 274 return EMULATE_DONE; 275 } 276 ··· 278 long rc; 279 280 rc = kvmppc_h_logical_ci_store(vcpu); 281 if (rc == H_TOO_HARD) 282 return EMULATE_FAIL; 283 kvmppc_set_gpr(vcpu, 3, rc); ··· 329 kvmppc_set_gpr(vcpu, 3, rc); 330 return EMULATE_DONE; 331 } 332 333 static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) 334 {
··· 50 pteg_addr = get_pteg_addr(vcpu, pte_index); 51 52 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 53 + ret = H_FUNCTION; 54 + if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg))) 55 + goto done; 56 hpte = pteg; 57 58 ret = H_PTEG_FULL; ··· 71 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); 72 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); 73 pteg_addr += i * HPTE_SIZE; 74 + ret = H_FUNCTION; 75 + if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE)) 76 + goto done; 77 kvmppc_set_gpr(vcpu, 4, pte_index | i); 78 ret = H_SUCCESS; 79 ··· 93 94 pteg = get_pteg_addr(vcpu, pte_index); 95 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 96 + ret = H_FUNCTION; 97 + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) 98 + goto done; 99 pte[0] = be64_to_cpu((__force __be64)pte[0]); 100 pte[1] = be64_to_cpu((__force __be64)pte[1]); 101 ··· 103 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) 104 goto done; 105 106 + ret = H_FUNCTION; 107 + if (copy_to_user((void __user *)pteg, &v, sizeof(v))) 108 + goto done; 109 110 rb = compute_tlbie_rb(pte[0], pte[1], pte_index); 111 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); ··· 171 } 172 173 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); 174 + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) { 175 + ret = H_FUNCTION; 176 + break; 177 + } 178 pte[0] = be64_to_cpu((__force __be64)pte[0]); 179 pte[1] = be64_to_cpu((__force __be64)pte[1]); 180 ··· 184 tsh |= H_BULK_REMOVE_NOT_FOUND; 185 } else { 186 /* Splat the pteg in (userland) hpt */ 187 + if (copy_to_user((void __user *)pteg, &v, sizeof(v))) { 188 + ret = H_FUNCTION; 189 + break; 190 + } 191 192 rb = compute_tlbie_rb(pte[0], pte[1], 193 tsh & H_BULK_REMOVE_PTEX); ··· 211 212 pteg = get_pteg_addr(vcpu, pte_index); 213 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 214 + ret = H_FUNCTION; 215 + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) 216 + goto done; 217 pte[0] = be64_to_cpu((__force __be64)pte[0]); 218 pte[1] = be64_to_cpu((__force __be64)pte[1]); 219 ··· 234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 235 pte[0] = (__force u64)cpu_to_be64(pte[0]); 236 pte[1] = (__force u64)cpu_to_be64(pte[1]); 237 + ret = H_FUNCTION; 238 + if (copy_to_user((void __user *)pteg, pte, sizeof(pte))) 239 + goto done; 240 ret = H_SUCCESS; 241 242 done: 243 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); 244 kvmppc_set_gpr(vcpu, 3, ret); 245 246 return EMULATE_DONE; 247 } 248 ··· 274 long rc; 275 276 rc = kvmppc_h_logical_ci_store(vcpu); 277 + if (rc == H_TOO_HARD) 278 + return EMULATE_FAIL; 279 + kvmppc_set_gpr(vcpu, 3, rc); 280 + return EMULATE_DONE; 281 + } 282 + 283 + #ifdef CONFIG_SPAPR_TCE_IOMMU 284 + static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) 285 + { 286 + unsigned long liobn = kvmppc_get_gpr(vcpu, 4); 287 + unsigned long ioba = kvmppc_get_gpr(vcpu, 5); 288 + unsigned long tce = kvmppc_get_gpr(vcpu, 6); 289 + long rc; 290 + 291 + rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); 292 if (rc == H_TOO_HARD) 293 return EMULATE_FAIL; 294 kvmppc_set_gpr(vcpu, 3, rc); ··· 310 kvmppc_set_gpr(vcpu, 3, rc); 311 return EMULATE_DONE; 312 } 313 + 314 + #else /* CONFIG_SPAPR_TCE_IOMMU */ 315 + static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) 316 + { 317 + return EMULATE_FAIL; 318 + } 319 + 320 + static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) 321 + { 322 + return EMULATE_FAIL; 323 + } 324 + 325 + static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) 326 + { 327 + return EMULATE_FAIL; 328 + } 329 + #endif /* CONFIG_SPAPR_TCE_IOMMU */ 330 331 static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) 332 {
+3 -1
arch/powerpc/kvm/powerpc.c
··· 1749 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 1750 break; 1751 } 1752 - #ifdef CONFIG_PPC_BOOK3S_64 1753 case KVM_CREATE_SPAPR_TCE_64: { 1754 struct kvm_create_spapr_tce_64 create_tce_64; 1755 ··· 1780 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 1781 goto out; 1782 } 1783 case KVM_PPC_GET_SMMU_INFO: { 1784 struct kvm_ppc_smmu_info info; 1785 struct kvm *kvm = filp->private_data;
··· 1749 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 1750 break; 1751 } 1752 + #ifdef CONFIG_SPAPR_TCE_IOMMU 1753 case KVM_CREATE_SPAPR_TCE_64: { 1754 struct kvm_create_spapr_tce_64 create_tce_64; 1755 ··· 1780 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 1781 goto out; 1782 } 1783 + #endif 1784 + #ifdef CONFIG_PPC_BOOK3S_64 1785 case KVM_PPC_GET_SMMU_INFO: { 1786 struct kvm_ppc_smmu_info info; 1787 struct kvm *kvm = filp->private_data;
+4 -3
arch/powerpc/mm/dump_linuxpagetables.c
··· 16 */ 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/mm.h> 21 #include <linux/sched.h> ··· 392 393 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 394 addr = start + i * PMD_SIZE; 395 - if (!pmd_none(*pmd)) 396 /* pmd exists */ 397 walk_pte(st, pmd, addr); 398 else ··· 408 409 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 410 addr = start + i * PUD_SIZE; 411 - if (!pud_none(*pud)) 412 /* pud exists */ 413 walk_pmd(st, pud, addr); 414 else ··· 428 */ 429 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 430 addr = KERN_VIRT_START + i * PGDIR_SIZE; 431 - if (!pgd_none(*pgd)) 432 /* pgd exists */ 433 walk_pud(st, pgd, addr); 434 else
··· 16 */ 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 + #include <linux/hugetlb.h> 20 #include <linux/io.h> 21 #include <linux/mm.h> 22 #include <linux/sched.h> ··· 391 392 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 393 addr = start + i * PMD_SIZE; 394 + if (!pmd_none(*pmd) && !pmd_huge(*pmd)) 395 /* pmd exists */ 396 walk_pte(st, pmd, addr); 397 else ··· 407 408 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 409 addr = start + i * PUD_SIZE; 410 + if (!pud_none(*pud) && !pud_huge(*pud)) 411 /* pud exists */ 412 walk_pmd(st, pud, addr); 413 else ··· 427 */ 428 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 429 addr = KERN_VIRT_START + i * PGDIR_SIZE; 430 + if (!pgd_none(*pgd) && !pgd_huge(*pgd)) 431 /* pgd exists */ 432 walk_pud(st, pgd, addr); 433 else
+2 -1
arch/s390/include/asm/debug.h
··· 10 #include <linux/spinlock.h> 11 #include <linux/kernel.h> 12 #include <linux/time.h> 13 #include <uapi/asm/debug.h> 14 15 #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ ··· 32 typedef struct debug_info { 33 struct debug_info* next; 34 struct debug_info* prev; 35 - atomic_t ref_count; 36 spinlock_t lock; 37 int level; 38 int nr_areas;
··· 10 #include <linux/spinlock.h> 11 #include <linux/kernel.h> 12 #include <linux/time.h> 13 + #include <linux/refcount.h> 14 #include <uapi/asm/debug.h> 15 16 #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ ··· 31 typedef struct debug_info { 32 struct debug_info* next; 33 struct debug_info* prev; 34 + refcount_t ref_count; 35 spinlock_t lock; 36 int level; 37 int nr_areas;
+2
arch/s390/include/asm/dis.h
··· 40 return ((((int) code + 64) >> 7) + 1) << 1; 41 } 42 43 void show_code(struct pt_regs *regs); 44 void print_fn_code(unsigned char *code, unsigned long len); 45 int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
··· 40 return ((((int) code + 64) >> 7) + 1) << 1; 41 } 42 43 + struct pt_regs; 44 + 45 void show_code(struct pt_regs *regs); 46 void print_fn_code(unsigned char *code, unsigned long len); 47 int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
+10 -10
arch/s390/include/asm/kprobes.h
··· 27 * 2005-Dec Used as a template for s390 by Mike Grundy 28 * <grundym@us.ibm.com> 29 */ 30 #include <asm-generic/kprobes.h> 31 32 #define BREAKPOINT_INSTRUCTION 0x0002 33 34 #ifdef CONFIG_KPROBES 35 - #include <linux/types.h> 36 #include <linux/ptrace.h> 37 #include <linux/percpu.h> 38 #include <linux/sched/task_stack.h> ··· 64 #define kretprobe_blacklist_size 0 65 66 #define KPROBE_SWAP_INST 0x10 67 - 68 - #define FIXUP_PSW_NORMAL 0x08 69 - #define FIXUP_BRANCH_NOT_TAKEN 0x04 70 - #define FIXUP_RETURN_REGISTER 0x02 71 - #define FIXUP_NOT_REQUIRED 0x01 72 73 /* Architecture specific copy of original instruction */ 74 struct arch_specific_insn { ··· 93 int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 94 int kprobe_exceptions_notify(struct notifier_block *self, 95 unsigned long val, void *data); 96 - 97 - int probe_is_prohibited_opcode(u16 *insn); 98 - int probe_get_fixup_type(u16 *insn); 99 - int probe_is_insn_relative_long(u16 *insn); 100 101 #define flush_insn_slot(p) do { } while (0) 102
··· 27 * 2005-Dec Used as a template for s390 by Mike Grundy 28 * <grundym@us.ibm.com> 29 */ 30 + #include <linux/types.h> 31 #include <asm-generic/kprobes.h> 32 33 #define BREAKPOINT_INSTRUCTION 0x0002 34 35 + #define FIXUP_PSW_NORMAL 0x08 36 + #define FIXUP_BRANCH_NOT_TAKEN 0x04 37 + #define FIXUP_RETURN_REGISTER 0x02 38 + #define FIXUP_NOT_REQUIRED 0x01 39 + 40 + int probe_is_prohibited_opcode(u16 *insn); 41 + int probe_get_fixup_type(u16 *insn); 42 + int probe_is_insn_relative_long(u16 *insn); 43 + 44 #ifdef CONFIG_KPROBES 45 #include <linux/ptrace.h> 46 #include <linux/percpu.h> 47 #include <linux/sched/task_stack.h> ··· 55 #define kretprobe_blacklist_size 0 56 57 #define KPROBE_SWAP_INST 0x10 58 59 /* Architecture specific copy of original instruction */ 60 struct arch_specific_insn { ··· 89 int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 90 int kprobe_exceptions_notify(struct notifier_block *self, 91 unsigned long val, void *data); 92 93 #define flush_insn_slot(p) do { } while (0) 94
+1 -1
arch/s390/include/asm/sysinfo.h
··· 146 * Returns the maximum nesting level supported by the cpu topology code. 147 * The current maximum level is 4 which is the drawer level. 148 */ 149 - static inline int topology_mnest_limit(void) 150 { 151 return min(topology_max_mnest, 4); 152 }
··· 146 * Returns the maximum nesting level supported by the cpu topology code. 147 * The current maximum level is 4 which is the drawer level. 148 */ 149 + static inline unsigned char topology_mnest_limit(void) 150 { 151 return min(topology_max_mnest, 4); 152 }
+4 -4
arch/s390/kernel/debug.c
··· 277 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); 278 memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS * 279 sizeof(struct dentry*)); 280 - atomic_set(&(rc->ref_count), 0); 281 282 return rc; 283 ··· 361 debug_area_last = rc; 362 rc->next = NULL; 363 364 - debug_info_get(rc); 365 out: 366 return rc; 367 } ··· 416 debug_info_get(debug_info_t * db_info) 417 { 418 if (db_info) 419 - atomic_inc(&db_info->ref_count); 420 } 421 422 /* ··· 431 432 if (!db_info) 433 return; 434 - if (atomic_dec_and_test(&db_info->ref_count)) { 435 for (i = 0; i < DEBUG_MAX_VIEWS; i++) { 436 if (!db_info->views[i]) 437 continue;
··· 277 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); 278 memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS * 279 sizeof(struct dentry*)); 280 + refcount_set(&(rc->ref_count), 0); 281 282 return rc; 283 ··· 361 debug_area_last = rc; 362 rc->next = NULL; 363 364 + refcount_set(&rc->ref_count, 1); 365 out: 366 return rc; 367 } ··· 416 debug_info_get(debug_info_t * db_info) 417 { 418 if (db_info) 419 + refcount_inc(&db_info->ref_count); 420 } 421 422 /* ··· 431 432 if (!db_info) 433 return; 434 + if (refcount_dec_and_test(&db_info->ref_count)) { 435 for (i = 0; i < DEBUG_MAX_VIEWS; i++) { 436 if (!db_info->views[i]) 437 continue;
+18 -3
arch/s390/kernel/entry.S
··· 312 lg %r14,__LC_VDSO_PER_CPU 313 lmg %r0,%r10,__PT_R0(%r11) 314 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 315 stpt __LC_EXIT_TIMER 316 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 317 lmg %r11,%r15,__PT_R11(%r11) ··· 624 lg %r14,__LC_VDSO_PER_CPU 625 lmg %r0,%r10,__PT_R0(%r11) 626 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 627 stpt __LC_EXIT_TIMER 628 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 629 lmg %r11,%r15,__PT_R11(%r11) ··· 1176 br %r14 1177 1178 .Lcleanup_sysc_restore: 1179 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1180 je 0f 1181 lg %r9,24(%r11) # get saved pointer to pt_regs 1182 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1183 mvc 0(64,%r11),__PT_R8(%r9) 1184 lmg %r0,%r7,__PT_R0(%r9) 1185 - 0: lmg %r8,%r9,__LC_RETURN_PSW 1186 br %r14 1187 .Lcleanup_sysc_restore_insn: 1188 .quad .Lsysc_done - 4 1189 1190 .Lcleanup_io_tif: ··· 1200 br %r14 1201 1202 .Lcleanup_io_restore: 1203 clg %r9,BASED(.Lcleanup_io_restore_insn) 1204 - je 0f 1205 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1206 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1207 mvc 0(64,%r11),__PT_R8(%r9) 1208 lmg %r0,%r7,__PT_R0(%r9) 1209 - 0: lmg %r8,%r9,__LC_RETURN_PSW 1210 br %r14 1211 .Lcleanup_io_restore_insn: 1212 .quad .Lio_done - 4 1213 1214 .Lcleanup_idle:
··· 312 lg %r14,__LC_VDSO_PER_CPU 313 lmg %r0,%r10,__PT_R0(%r11) 314 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 315 + .Lsysc_exit_timer: 316 stpt __LC_EXIT_TIMER 317 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 318 lmg %r11,%r15,__PT_R11(%r11) ··· 623 lg %r14,__LC_VDSO_PER_CPU 624 lmg %r0,%r10,__PT_R0(%r11) 625 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 626 + .Lio_exit_timer: 627 stpt __LC_EXIT_TIMER 628 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 629 lmg %r11,%r15,__PT_R11(%r11) ··· 1174 br %r14 1175 1176 .Lcleanup_sysc_restore: 1177 + # check if stpt has been executed 1178 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1179 + jh 0f 1180 + mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1181 + cghi %r11,__LC_SAVE_AREA_ASYNC 1182 je 0f 1183 + mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 1184 + 0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) 1185 + je 1f 1186 lg %r9,24(%r11) # get saved pointer to pt_regs 1187 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1188 mvc 0(64,%r11),__PT_R8(%r9) 1189 lmg %r0,%r7,__PT_R0(%r9) 1190 + 1: lmg %r8,%r9,__LC_RETURN_PSW 1191 br %r14 1192 .Lcleanup_sysc_restore_insn: 1193 + .quad .Lsysc_exit_timer 1194 .quad .Lsysc_done - 4 1195 1196 .Lcleanup_io_tif: ··· 1190 br %r14 1191 1192 .Lcleanup_io_restore: 1193 + # check if stpt has been executed 1194 clg %r9,BASED(.Lcleanup_io_restore_insn) 1195 + jh 0f 1196 + mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 1197 + 0: clg %r9,BASED(.Lcleanup_io_restore_insn+8) 1198 + je 1f 1199 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1200 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1201 mvc 0(64,%r11),__PT_R8(%r9) 1202 lmg %r0,%r7,__PT_R0(%r9) 1203 + 1: lmg %r8,%r9,__LC_RETURN_PSW 1204 br %r14 1205 .Lcleanup_io_restore_insn: 1206 + .quad .Lio_exit_timer 1207 .quad .Lio_done - 4 1208 1209 .Lcleanup_idle:
+4
arch/s390/kernel/ftrace.c
··· 173 return 0; 174 } 175 176 static int __init ftrace_plt_init(void) 177 { 178 unsigned int *ip; ··· 192 return 0; 193 } 194 device_initcall(ftrace_plt_init); 195 196 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 197 /*
··· 173 return 0; 174 } 175 176 + #ifdef CONFIG_MODULES 177 + 178 static int __init ftrace_plt_init(void) 179 { 180 unsigned int *ip; ··· 190 return 0; 191 } 192 device_initcall(ftrace_plt_init); 193 + 194 + #endif /* CONFIG_MODULES */ 195 196 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 197 /*
+7 -1
arch/s390/kernel/vmlinux.lds.S
··· 31 { 32 . = 0x00000000; 33 .text : { 34 - _text = .; /* Text and read-only data */ 35 HEAD_TEXT 36 TEXT_TEXT 37 SCHED_TEXT 38 CPUIDLE_TEXT
··· 31 { 32 . = 0x00000000; 33 .text : { 34 + /* Text and read-only data */ 35 HEAD_TEXT 36 + /* 37 + * E.g. perf doesn't like symbols starting at address zero, 38 + * therefore skip the initial PSW and channel program located 39 + * at address zero and let _text start at 0x200. 40 + */ 41 + _text = 0x200; 42 TEXT_TEXT 43 SCHED_TEXT 44 CPUIDLE_TEXT
+1
arch/s390/lib/probes.c
··· 4 * Copyright IBM Corp. 2014 5 */ 6 7 #include <asm/kprobes.h> 8 #include <asm/dis.h> 9
··· 4 * Copyright IBM Corp. 2014 5 */ 6 7 + #include <linux/errno.h> 8 #include <asm/kprobes.h> 9 #include <asm/dis.h> 10
+2 -2
arch/s390/lib/uaccess.c
··· 337 return 0; 338 done = 0; 339 do { 340 - offset = (size_t)src & ~PAGE_MASK; 341 - len = min(size - done, PAGE_SIZE - offset); 342 if (copy_from_user(dst, src, len)) 343 return -EFAULT; 344 len_str = strnlen(dst, len);
··· 337 return 0; 338 done = 0; 339 do { 340 + offset = (size_t)src & (L1_CACHE_BYTES - 1); 341 + len = min(size - done, L1_CACHE_BYTES - offset); 342 if (copy_from_user(dst, src, len)) 343 return -EFAULT; 344 len_str = strnlen(dst, len);
+4 -2
arch/sparc/include/asm/hugetlb.h
··· 24 static inline int prepare_hugepage_range(struct file *file, 25 unsigned long addr, unsigned long len) 26 { 27 - if (len & ~HPAGE_MASK) 28 return -EINVAL; 29 - if (addr & ~HPAGE_MASK) 30 return -EINVAL; 31 return 0; 32 }
··· 24 static inline int prepare_hugepage_range(struct file *file, 25 unsigned long addr, unsigned long len) 26 { 27 + struct hstate *h = hstate_file(file); 28 + 29 + if (len & ~huge_page_mask(h)) 30 return -EINVAL; 31 + if (addr & ~huge_page_mask(h)) 32 return -EINVAL; 33 return 0; 34 }
+2 -2
arch/sparc/include/asm/pgtable_32.h
··· 91 * ZERO_PAGE is a global shared page that is always zero: used 92 * for zero-mapped memory areas etc.. 93 */ 94 - extern unsigned long empty_zero_page; 95 96 - #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) 97 98 /* 99 * In general all page table modifications should use the V8 atomic
··· 91 * ZERO_PAGE is a global shared page that is always zero: used 92 * for zero-mapped memory areas etc.. 93 */ 94 + extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 95 96 + #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 97 98 /* 99 * In general all page table modifications should use the V8 atomic
+1 -1
arch/sparc/include/asm/setup.h
··· 16 */ 17 extern unsigned char boot_cpu_id; 18 19 - extern unsigned long empty_zero_page; 20 21 extern int serial_console; 22 static inline int con_is_present(void)
··· 16 */ 17 extern unsigned char boot_cpu_id; 18 19 + extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 20 21 extern int serial_console; 22 static inline int con_is_present(void)
+7 -8
arch/sparc/kernel/ftrace.c
··· 130 if (unlikely(atomic_read(&current->tracing_graph_pause))) 131 return parent + 8UL; 132 133 if (ftrace_push_return_trace(parent, self_addr, &trace.depth, 134 frame_pointer, NULL) == -EBUSY) 135 return parent + 8UL; 136 - 137 - trace.func = self_addr; 138 - 139 - /* Only trace if the calling function expects to */ 140 - if (!ftrace_graph_entry(&trace)) { 141 - current->curr_ret_stack--; 142 - return parent + 8UL; 143 - } 144 145 return return_hooker; 146 }
··· 130 if (unlikely(atomic_read(&current->tracing_graph_pause))) 131 return parent + 8UL; 132 133 + trace.func = self_addr; 134 + trace.depth = current->curr_ret_stack + 1; 135 + 136 + /* Only trace if the calling function expects to */ 137 + if (!ftrace_graph_entry(&trace)) 138 + return parent + 8UL; 139 + 140 if (ftrace_push_return_trace(parent, self_addr, &trace.depth, 141 frame_pointer, NULL) == -EBUSY) 142 return parent + 8UL; 143 144 return return_hooker; 145 }
+1 -1
arch/sparc/mm/init_32.c
··· 290 291 292 /* Saves us work later. */ 293 - memset((void *)&empty_zero_page, 0, PAGE_SIZE); 294 295 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); 296 i += 1;
··· 290 291 292 /* Saves us work later. */ 293 + memset((void *)empty_zero_page, 0, PAGE_SIZE); 294 295 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); 296 i += 1;
+1 -1
arch/x86/include/asm/kvm_host.h
··· 43 #define KVM_PRIVATE_MEM_SLOTS 3 44 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 45 46 - #define KVM_HALT_POLL_NS_DEFAULT 400000 47 48 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 49
··· 43 #define KVM_PRIVATE_MEM_SLOTS 3 44 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 45 46 + #define KVM_HALT_POLL_NS_DEFAULT 200000 47 48 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 49
+6 -5
arch/x86/include/asm/uaccess.h
··· 319 #define __get_user_asm_u64(x, ptr, retval, errret) \ 320 ({ \ 321 __typeof__(ptr) __ptr = (ptr); \ 322 - asm volatile(ASM_STAC "\n" \ 323 "1: movl %2,%%eax\n" \ 324 "2: movl %3,%%edx\n" \ 325 - "3: " ASM_CLAC "\n" \ 326 ".section .fixup,\"ax\"\n" \ 327 "4: mov %4,%0\n" \ 328 " xorl %%eax,%%eax\n" \ ··· 331 ".previous\n" \ 332 _ASM_EXTABLE(1b, 4b) \ 333 _ASM_EXTABLE(2b, 4b) \ 334 - : "=r" (retval), "=A"(x) \ 335 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ 336 "i" (errret), "0" (retval)); \ 337 }) ··· 703 #define unsafe_put_user(x, ptr, err_label) \ 704 do { \ 705 int __pu_err; \ 706 - __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 707 if (unlikely(__pu_err)) goto err_label; \ 708 } while (0) 709 710 #define unsafe_get_user(x, ptr, err_label) \ 711 do { \ 712 int __gu_err; \ 713 - unsigned long __gu_val; \ 714 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 715 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 716 if (unlikely(__gu_err)) goto err_label; \
··· 319 #define __get_user_asm_u64(x, ptr, retval, errret) \ 320 ({ \ 321 __typeof__(ptr) __ptr = (ptr); \ 322 + asm volatile("\n" \ 323 "1: movl %2,%%eax\n" \ 324 "2: movl %3,%%edx\n" \ 325 + "3:\n" \ 326 ".section .fixup,\"ax\"\n" \ 327 "4: mov %4,%0\n" \ 328 " xorl %%eax,%%eax\n" \ ··· 331 ".previous\n" \ 332 _ASM_EXTABLE(1b, 4b) \ 333 _ASM_EXTABLE(2b, 4b) \ 334 + : "=r" (retval), "=&A"(x) \ 335 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ 336 "i" (errret), "0" (retval)); \ 337 }) ··· 703 #define unsafe_put_user(x, ptr, err_label) \ 704 do { \ 705 int __pu_err; \ 706 + __typeof__(*(ptr)) __pu_val = (x); \ 707 + __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 708 if (unlikely(__pu_err)) goto err_label; \ 709 } while (0) 710 711 #define unsafe_get_user(x, ptr, err_label) \ 712 do { \ 713 int __gu_err; \ 714 + __inttype(*(ptr)) __gu_val; \ 715 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 716 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 717 if (unlikely(__gu_err)) goto err_label; \
+1
arch/x86/kernel/fpu/init.c
··· 90 * Boot time FPU feature detection code: 91 */ 92 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 93 94 static void __init fpu__init_system_mxcsr(void) 95 {
··· 90 * Boot time FPU feature detection code: 91 */ 92 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 93 + EXPORT_SYMBOL_GPL(mxcsr_feature_mask); 94 95 static void __init fpu__init_system_mxcsr(void) 96 {
+1 -1
arch/x86/kvm/emulate.c
··· 4173 4174 static int check_svme(struct x86_emulate_ctxt *ctxt) 4175 { 4176 - u64 efer; 4177 4178 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4179
··· 4173 4174 static int check_svme(struct x86_emulate_ctxt *ctxt) 4175 { 4176 + u64 efer = 0; 4177 4178 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4179
+21 -14
arch/x86/kvm/paging_tmpl.h
··· 283 pt_element_t pte; 284 pt_element_t __user *uninitialized_var(ptep_user); 285 gfn_t table_gfn; 286 - unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey; 287 unsigned nested_access; 288 gpa_t pte_gpa; 289 bool have_ad; 290 int offset; 291 const int write_fault = access & PFERR_WRITE_MASK; 292 const int user_fault = access & PFERR_USER_MASK; 293 const int fetch_fault = access & PFERR_FETCH_MASK; ··· 304 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); 305 306 #if PTTYPE == 64 307 if (walker->level == PT32E_ROOT_LEVEL) { 308 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); 309 trace_kvm_mmu_paging_element(pte, walker->level); ··· 316 walker->max_level = walker->level; 317 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); 318 319 - accessed_dirty = have_ad ? PT_GUEST_ACCESSED_MASK : 0; 320 - 321 /* 322 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging 323 * by the MOV to CR instruction are treated as reads and do not cause the ··· 323 */ 324 nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; 325 326 - pt_access = pte_access = ACC_ALL; 327 ++walker->level; 328 329 do { 330 gfn_t real_gfn; 331 unsigned long host_addr; 332 333 - pt_access &= pte_access; 334 --walker->level; 335 336 index = PT_INDEX(addr, walker->level); ··· 372 373 trace_kvm_mmu_paging_element(pte, walker->level); 374 375 if (unlikely(!FNAME(is_present_gpte)(pte))) 376 goto error; 377 ··· 386 goto error; 387 } 388 389 - accessed_dirty &= pte; 390 - pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); 391 - 392 walker->ptes[walker->level - 1] = pte; 393 } while (!is_last_gpte(mmu, walker->level, pte)); 394 395 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); 396 - errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access); 397 if (unlikely(errcode)) 398 goto error; 399 ··· 412 walker->gfn = real_gpa >> PAGE_SHIFT; 413 414 if (!write_fault) 415 - FNAME(protect_clean_gpte)(mmu, &pte_access, pte); 416 else 417 /* 418 * On a write fault, fold the dirty bit into accessed_dirty. ··· 430 goto retry_walk; 431 } 432 433 - walker->pt_access = pt_access; 434 - walker->pte_access = pte_access; 435 pgprintk("%s: pte %llx pte_access %x pt_access %x\n", 436 - __func__, (u64)pte, pte_access, pt_access); 437 return 1; 438 439 error: ··· 459 */ 460 if (!(errcode & PFERR_RSVD_MASK)) { 461 vcpu->arch.exit_qualification &= 0x187; 462 - vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3; 463 } 464 #endif 465 walker->fault.address = addr;
··· 283 pt_element_t pte; 284 pt_element_t __user *uninitialized_var(ptep_user); 285 gfn_t table_gfn; 286 + u64 pt_access, pte_access; 287 + unsigned index, accessed_dirty, pte_pkey; 288 unsigned nested_access; 289 gpa_t pte_gpa; 290 bool have_ad; 291 int offset; 292 + u64 walk_nx_mask = 0; 293 const int write_fault = access & PFERR_WRITE_MASK; 294 const int user_fault = access & PFERR_USER_MASK; 295 const int fetch_fault = access & PFERR_FETCH_MASK; ··· 302 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); 303 304 #if PTTYPE == 64 305 + walk_nx_mask = 1ULL << PT64_NX_SHIFT; 306 if (walker->level == PT32E_ROOT_LEVEL) { 307 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); 308 trace_kvm_mmu_paging_element(pte, walker->level); ··· 313 walker->max_level = walker->level; 314 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); 315 316 /* 317 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging 318 * by the MOV to CR instruction are treated as reads and do not cause the ··· 322 */ 323 nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; 324 325 + pte_access = ~0; 326 ++walker->level; 327 328 do { 329 gfn_t real_gfn; 330 unsigned long host_addr; 331 332 + pt_access = pte_access; 333 --walker->level; 334 335 index = PT_INDEX(addr, walker->level); ··· 371 372 trace_kvm_mmu_paging_element(pte, walker->level); 373 374 + /* 375 + * Inverting the NX it lets us AND it like other 376 + * permission bits. 377 + */ 378 + pte_access = pt_access & (pte ^ walk_nx_mask); 379 + 380 if (unlikely(!FNAME(is_present_gpte)(pte))) 381 goto error; 382 ··· 379 goto error; 380 } 381 382 walker->ptes[walker->level - 1] = pte; 383 } while (!is_last_gpte(mmu, walker->level, pte)); 384 385 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); 386 + accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0; 387 + 388 + /* Convert to ACC_*_MASK flags for struct guest_walker. */ 389 + walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask); 390 + walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask); 391 + errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); 392 if (unlikely(errcode)) 393 goto error; 394 ··· 403 walker->gfn = real_gpa >> PAGE_SHIFT; 404 405 if (!write_fault) 406 + FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); 407 else 408 /* 409 * On a write fault, fold the dirty bit into accessed_dirty. ··· 421 goto retry_walk; 422 } 423 424 pgprintk("%s: pte %llx pte_access %x pt_access %x\n", 425 + __func__, (u64)pte, walker->pte_access, walker->pt_access); 426 return 1; 427 428 error: ··· 452 */ 453 if (!(errcode & PFERR_RSVD_MASK)) { 454 vcpu->arch.exit_qualification &= 0x187; 455 + vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3; 456 } 457 #endif 458 walker->fault.address = addr;
+1 -1
arch/x86/kvm/pmu_intel.c
··· 294 ((u64)1 << edx.split.bit_width_fixed) - 1; 295 } 296 297 - pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | 298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 299 pmu->global_ctrl_mask = ~pmu->global_ctrl; 300
··· 294 ((u64)1 << edx.split.bit_width_fixed) - 1; 295 } 296 297 + pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | 298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 299 pmu->global_ctrl_mask = ~pmu->global_ctrl; 300
+2 -1
arch/x86/kvm/svm.c
··· 1272 1273 } 1274 1275 - static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index) 1276 { 1277 u64 *avic_physical_id_table; 1278 struct kvm_arch *vm_data = &vcpu->kvm->arch;
··· 1272 1273 } 1274 1275 + static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, 1276 + unsigned int index) 1277 { 1278 u64 *avic_physical_id_table; 1279 struct kvm_arch *vm_data = &vcpu->kvm->arch;
+2 -2
arch/x86/kvm/vmx.c
··· 6504 enable_ept_ad_bits = 0; 6505 } 6506 6507 - if (!cpu_has_vmx_ept_ad_bits()) 6508 enable_ept_ad_bits = 0; 6509 6510 if (!cpu_has_vmx_unrestricted_guest()) ··· 11213 if (!nested_cpu_has_pml(vmcs12)) 11214 return 0; 11215 11216 - if (vmcs12->guest_pml_index > PML_ENTITY_NUM) { 11217 vmx->nested.pml_full = true; 11218 return 1; 11219 }
··· 6504 enable_ept_ad_bits = 0; 6505 } 6506 6507 + if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) 6508 enable_ept_ad_bits = 0; 6509 6510 if (!cpu_has_vmx_unrestricted_guest()) ··· 11213 if (!nested_cpu_has_pml(vmcs12)) 11214 return 0; 11215 11216 + if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 11217 vmx->nested.pml_full = true; 11218 return 1; 11219 }
+33 -12
arch/x86/kvm/x86.c
··· 1763 { 1764 struct kvm_arch *ka = &kvm->arch; 1765 struct pvclock_vcpu_time_info hv_clock; 1766 1767 spin_lock(&ka->pvclock_gtod_sync_lock); 1768 if (!ka->use_master_clock) { ··· 1775 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 1776 spin_unlock(&ka->pvclock_gtod_sync_lock); 1777 1778 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 1779 &hv_clock.tsc_shift, 1780 &hv_clock.tsc_to_system_mul); 1781 - return __pvclock_read_cycles(&hv_clock, rdtsc()); 1782 } 1783 1784 static void kvm_setup_pvclock_page(struct kvm_vcpu *v) ··· 3296 } 3297 } 3298 3299 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 3300 struct kvm_xsave *guest_xsave) 3301 { 3302 u64 xstate_bv = 3303 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 3304 3305 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 3306 /* ··· 3311 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility 3312 * with old userspace. 3313 */ 3314 - if (xstate_bv & ~kvm_supported_xcr0()) 3315 return -EINVAL; 3316 load_xsave(vcpu, (u8 *)guest_xsave->region); 3317 } else { 3318 - if (xstate_bv & ~XFEATURE_MASK_FPSSE) 3319 return -EINVAL; 3320 memcpy(&vcpu->arch.guest_fpu.state.fxsave, 3321 guest_xsave->region, sizeof(struct fxregs_state)); ··· 4831 4832 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4833 { 4834 - /* TODO: String I/O for in kernel device */ 4835 - int r; 4836 4837 - if (vcpu->arch.pio.in) 4838 - r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, 4839 - vcpu->arch.pio.size, pd); 4840 - else 4841 - r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, 4842 - vcpu->arch.pio.port, vcpu->arch.pio.size, 4843 - pd); 4844 return r; 4845 } 4846 ··· 4881 4882 if (vcpu->arch.pio.count) 4883 goto data_avail; 4884 4885 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 4886 if (ret) { ··· 5067 5068 if (var.unusable) { 5069 memset(desc, 0, sizeof(*desc)); 5070 return false; 5071 } 5072
··· 1763 { 1764 struct kvm_arch *ka = &kvm->arch; 1765 struct pvclock_vcpu_time_info hv_clock; 1766 + u64 ret; 1767 1768 spin_lock(&ka->pvclock_gtod_sync_lock); 1769 if (!ka->use_master_clock) { ··· 1774 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 1775 spin_unlock(&ka->pvclock_gtod_sync_lock); 1776 1777 + /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 1778 + get_cpu(); 1779 + 1780 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 1781 &hv_clock.tsc_shift, 1782 &hv_clock.tsc_to_system_mul); 1783 + ret = __pvclock_read_cycles(&hv_clock, rdtsc()); 1784 + 1785 + put_cpu(); 1786 + 1787 + return ret; 1788 } 1789 1790 static void kvm_setup_pvclock_page(struct kvm_vcpu *v) ··· 3288 } 3289 } 3290 3291 + #define XSAVE_MXCSR_OFFSET 24 3292 + 3293 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 3294 struct kvm_xsave *guest_xsave) 3295 { 3296 u64 xstate_bv = 3297 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 3298 + u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; 3299 3300 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 3301 /* ··· 3300 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility 3301 * with old userspace. 3302 */ 3303 + if (xstate_bv & ~kvm_supported_xcr0() || 3304 + mxcsr & ~mxcsr_feature_mask) 3305 return -EINVAL; 3306 load_xsave(vcpu, (u8 *)guest_xsave->region); 3307 } else { 3308 + if (xstate_bv & ~XFEATURE_MASK_FPSSE || 3309 + mxcsr & ~mxcsr_feature_mask) 3310 return -EINVAL; 3311 memcpy(&vcpu->arch.guest_fpu.state.fxsave, 3312 guest_xsave->region, sizeof(struct fxregs_state)); ··· 4818 4819 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4820 { 4821 + int r = 0, i; 4822 4823 + for (i = 0; i < vcpu->arch.pio.count; i++) { 4824 + if (vcpu->arch.pio.in) 4825 + r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, 4826 + vcpu->arch.pio.size, pd); 4827 + else 4828 + r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, 4829 + vcpu->arch.pio.port, vcpu->arch.pio.size, 4830 + pd); 4831 + if (r) 4832 + break; 4833 + pd += vcpu->arch.pio.size; 4834 + } 4835 return r; 4836 } 4837 ··· 4864 4865 if (vcpu->arch.pio.count) 4866 goto data_avail; 4867 + 4868 + memset(vcpu->arch.pio_data, 0, size * count); 4869 4870 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 4871 if (ret) { ··· 5048 5049 if (var.unusable) { 5050 memset(desc, 0, sizeof(*desc)); 5051 + if (base3) 5052 + *base3 = 0; 5053 return false; 5054 } 5055
+4 -11
arch/x86/xen/enlighten_pv.c
··· 142 struct xen_extraversion extra; 143 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 144 145 - pr_info("Booting paravirtualized kernel %son %s\n", 146 - xen_feature(XENFEAT_auto_translated_physmap) ? 147 - "with PVH extensions " : "", pv_info.name); 148 printk(KERN_INFO "Xen version: %d.%d%s%s\n", 149 version >> 16, version & 0xffff, extra.extraversion, 150 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); ··· 955 956 void xen_setup_shared_info(void) 957 { 958 - if (!xen_feature(XENFEAT_auto_translated_physmap)) { 959 - set_fixmap(FIX_PARAVIRT_BOOTMAP, 960 - xen_start_info->shared_info); 961 962 - HYPERVISOR_shared_info = 963 - (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 964 - } else 965 - HYPERVISOR_shared_info = 966 - (struct shared_info *)__va(xen_start_info->shared_info); 967 968 #ifndef CONFIG_SMP 969 /* In UP this is as good a place as any to set up shared info */
··· 142 struct xen_extraversion extra; 143 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 144 145 + pr_info("Booting paravirtualized kernel on %s\n", pv_info.name); 146 printk(KERN_INFO "Xen version: %d.%d%s%s\n", 147 version >> 16, version & 0xffff, extra.extraversion, 148 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); ··· 957 958 void xen_setup_shared_info(void) 959 { 960 + set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info); 961 962 + HYPERVISOR_shared_info = 963 + (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 964 965 #ifndef CONFIG_SMP 966 /* In UP this is as good a place as any to set up shared info */
+1 -1
arch/x86/xen/mmu.c
··· 42 } 43 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); 44 45 - void xen_flush_tlb_all(void) 46 { 47 struct mmuext_op *op; 48 struct multicall_space mcs;
··· 42 } 43 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); 44 45 + static void xen_flush_tlb_all(void) 46 { 47 struct mmuext_op *op; 48 struct multicall_space mcs;
+36 -62
arch/x86/xen/mmu_pv.c
··· 355 pteval_t flags = val & PTE_FLAGS_MASK; 356 unsigned long mfn; 357 358 - if (!xen_feature(XENFEAT_auto_translated_physmap)) 359 - mfn = __pfn_to_mfn(pfn); 360 - else 361 - mfn = pfn; 362 /* 363 * If there's no mfn for the pfn, then just create an 364 * empty non-present pte. Unfortunately this loses ··· 644 /* The limit is the last byte to be touched */ 645 limit--; 646 BUG_ON(limit >= FIXADDR_TOP); 647 - 648 - if (xen_feature(XENFEAT_auto_translated_physmap)) 649 - return 0; 650 651 /* 652 * 64-bit has a great big hole in the middle of the address ··· 1284 1285 static void __init xen_pagetable_p2m_setup(void) 1286 { 1287 - if (xen_feature(XENFEAT_auto_translated_physmap)) 1288 - return; 1289 - 1290 xen_vmalloc_p2m_tree(); 1291 1292 #ifdef CONFIG_X86_64 ··· 1306 xen_build_mfn_list_list(); 1307 1308 /* Remap memory freed due to conflicts with E820 map */ 1309 - if (!xen_feature(XENFEAT_auto_translated_physmap)) 1310 - xen_remap_memory(); 1311 1312 xen_setup_shared_info(); 1313 } ··· 1916 /* Zap identity mapping */ 1917 init_level4_pgt[0] = __pgd(0); 1918 1919 - if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1920 - /* Pre-constructed entries are in pfn, so convert to mfn */ 1921 - /* L4[272] -> level3_ident_pgt 1922 - * L4[511] -> level3_kernel_pgt */ 1923 - convert_pfn_mfn(init_level4_pgt); 1924 1925 - /* L3_i[0] -> level2_ident_pgt */ 1926 - convert_pfn_mfn(level3_ident_pgt); 1927 - /* L3_k[510] -> level2_kernel_pgt 1928 - * L3_k[511] -> level2_fixmap_pgt */ 1929 - convert_pfn_mfn(level3_kernel_pgt); 1930 1931 - /* L3_k[511][506] -> level1_fixmap_pgt */ 1932 - convert_pfn_mfn(level2_fixmap_pgt); 1933 - } 1934 /* We get [511][511] and have Xen's version of level2_kernel_pgt */ 1935 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); 1936 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); ··· 1952 if (i && i < pgd_index(__START_KERNEL_map)) 1953 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; 1954 1955 - if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1956 - /* Make pagetable pieces RO */ 1957 - set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); 1958 - set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); 1959 - set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); 1960 - set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); 1961 - set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); 1962 - set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1963 - set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1964 - set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); 1965 1966 - /* Pin down new L4 */ 1967 - pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, 1968 - PFN_DOWN(__pa_symbol(init_level4_pgt))); 1969 1970 - /* Unpin Xen-provided one */ 1971 - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1972 1973 - /* 1974 - * At this stage there can be no user pgd, and no page 1975 - * structure to attach it to, so make sure we just set kernel 1976 - * pgd. 1977 - */ 1978 - xen_mc_batch(); 1979 - __xen_write_cr3(true, __pa(init_level4_pgt)); 1980 - xen_mc_issue(PARAVIRT_LAZY_CPU); 1981 - } else 1982 - native_write_cr3(__pa(init_level4_pgt)); 1983 1984 /* We can't that easily rip out L3 and L2, as the Xen pagetables are 1985 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for ··· 2389 2390 static void __init xen_post_allocator_init(void) 2391 { 2392 - if (xen_feature(XENFEAT_auto_translated_physmap)) 2393 - return; 2394 - 2395 pv_mmu_ops.set_pte = xen_set_pte; 2396 pv_mmu_ops.set_pmd = xen_set_pmd; 2397 pv_mmu_ops.set_pud = xen_set_pud; ··· 2493 void __init xen_init_mmu_ops(void) 2494 { 2495 x86_init.paging.pagetable_init = xen_pagetable_init; 2496 - 2497 - if (xen_feature(XENFEAT_auto_translated_physmap)) 2498 - return; 2499 2500 pv_mmu_ops = xen_mmu_ops; 2501 ··· 2630 * this function are redundant and can be ignored. 2631 */ 2632 2633 - if (xen_feature(XENFEAT_auto_translated_physmap)) 2634 - return 0; 2635 - 2636 if (unlikely(order > MAX_CONTIG_ORDER)) 2637 return -ENOMEM; 2638 ··· 2665 unsigned long flags; 2666 int success; 2667 unsigned long vstart; 2668 - 2669 - if (xen_feature(XENFEAT_auto_translated_physmap)) 2670 - return; 2671 2672 if (unlikely(order > MAX_CONTIG_ORDER)) 2673 return;
··· 355 pteval_t flags = val & PTE_FLAGS_MASK; 356 unsigned long mfn; 357 358 + mfn = __pfn_to_mfn(pfn); 359 + 360 /* 361 * If there's no mfn for the pfn, then just create an 362 * empty non-present pte. Unfortunately this loses ··· 646 /* The limit is the last byte to be touched */ 647 limit--; 648 BUG_ON(limit >= FIXADDR_TOP); 649 650 /* 651 * 64-bit has a great big hole in the middle of the address ··· 1289 1290 static void __init xen_pagetable_p2m_setup(void) 1291 { 1292 xen_vmalloc_p2m_tree(); 1293 1294 #ifdef CONFIG_X86_64 ··· 1314 xen_build_mfn_list_list(); 1315 1316 /* Remap memory freed due to conflicts with E820 map */ 1317 + xen_remap_memory(); 1318 1319 xen_setup_shared_info(); 1320 } ··· 1925 /* Zap identity mapping */ 1926 init_level4_pgt[0] = __pgd(0); 1927 1928 + /* Pre-constructed entries are in pfn, so convert to mfn */ 1929 + /* L4[272] -> level3_ident_pgt */ 1930 + /* L4[511] -> level3_kernel_pgt */ 1931 + convert_pfn_mfn(init_level4_pgt); 1932 1933 + /* L3_i[0] -> level2_ident_pgt */ 1934 + convert_pfn_mfn(level3_ident_pgt); 1935 + /* L3_k[510] -> level2_kernel_pgt */ 1936 + /* L3_k[511] -> level2_fixmap_pgt */ 1937 + convert_pfn_mfn(level3_kernel_pgt); 1938 1939 + /* L3_k[511][506] -> level1_fixmap_pgt */ 1940 + convert_pfn_mfn(level2_fixmap_pgt); 1941 + 1942 /* We get [511][511] and have Xen's version of level2_kernel_pgt */ 1943 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); 1944 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); ··· 1962 if (i && i < pgd_index(__START_KERNEL_map)) 1963 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; 1964 1965 + /* Make pagetable pieces RO */ 1966 + set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); 1967 + set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); 1968 + set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); 1969 + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); 1970 + set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); 1971 + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1972 + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1973 + set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); 1974 1975 + /* Pin down new L4 */ 1976 + pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, 1977 + PFN_DOWN(__pa_symbol(init_level4_pgt))); 1978 1979 + /* Unpin Xen-provided one */ 1980 + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1981 1982 + /* 1983 + * At this stage there can be no user pgd, and no page structure to 1984 + * attach it to, so make sure we just set kernel pgd. 1985 + */ 1986 + xen_mc_batch(); 1987 + __xen_write_cr3(true, __pa(init_level4_pgt)); 1988 + xen_mc_issue(PARAVIRT_LAZY_CPU); 1989 1990 /* We can't that easily rip out L3 and L2, as the Xen pagetables are 1991 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for ··· 2403 2404 static void __init xen_post_allocator_init(void) 2405 { 2406 pv_mmu_ops.set_pte = xen_set_pte; 2407 pv_mmu_ops.set_pmd = xen_set_pmd; 2408 pv_mmu_ops.set_pud = xen_set_pud; ··· 2510 void __init xen_init_mmu_ops(void) 2511 { 2512 x86_init.paging.pagetable_init = xen_pagetable_init; 2513 2514 pv_mmu_ops = xen_mmu_ops; 2515 ··· 2650 * this function are redundant and can be ignored. 2651 */ 2652 2653 if (unlikely(order > MAX_CONTIG_ORDER)) 2654 return -ENOMEM; 2655 ··· 2688 unsigned long flags; 2689 int success; 2690 unsigned long vstart; 2691 2692 if (unlikely(order > MAX_CONTIG_ORDER)) 2693 return;
+15 -12
drivers/block/drbd/drbd_req.c
··· 315 } 316 317 /* still holds resource->req_lock */ 318 - static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) 319 { 320 struct drbd_device *device = req->device; 321 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); 322 323 if (!atomic_sub_and_test(put, &req->completion_ref)) 324 - return 0; 325 326 drbd_req_complete(req, m); 327 328 if (req->rq_state & RQ_POSTPONED) { 329 /* don't destroy the req object just yet, 330 * but queue it for retry */ 331 drbd_restart_request(req); 332 - return 0; 333 } 334 335 - return 1; 336 } 337 338 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) ··· 527 if (req->i.waiting) 528 wake_up(&device->misc_wait); 529 530 - if (c_put) { 531 - if (drbd_req_put_completion_ref(req, m, c_put)) 532 - kref_put(&req->kref, drbd_req_destroy); 533 - } else { 534 - kref_put(&req->kref, drbd_req_destroy); 535 - } 536 } 537 538 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) ··· 1370 } 1371 1372 out: 1373 - if (drbd_req_put_completion_ref(req, &m, 1)) 1374 - kref_put(&req->kref, drbd_req_destroy); 1375 spin_unlock_irq(&resource->req_lock); 1376 1377 /* Even though above is a kref_put(), this is safe.
··· 315 } 316 317 /* still holds resource->req_lock */ 318 + static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) 319 { 320 struct drbd_device *device = req->device; 321 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); 322 323 + if (!put) 324 + return; 325 + 326 if (!atomic_sub_and_test(put, &req->completion_ref)) 327 + return; 328 329 drbd_req_complete(req, m); 330 + 331 + /* local completion may still come in later, 332 + * we need to keep the req object around. */ 333 + if (req->rq_state & RQ_LOCAL_ABORTED) 334 + return; 335 336 if (req->rq_state & RQ_POSTPONED) { 337 /* don't destroy the req object just yet, 338 * but queue it for retry */ 339 drbd_restart_request(req); 340 + return; 341 } 342 343 + kref_put(&req->kref, drbd_req_destroy); 344 } 345 346 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) ··· 519 if (req->i.waiting) 520 wake_up(&device->misc_wait); 521 522 + drbd_req_put_completion_ref(req, m, c_put); 523 + kref_put(&req->kref, drbd_req_destroy); 524 } 525 526 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) ··· 1366 } 1367 1368 out: 1369 + drbd_req_put_completion_ref(req, &m, 1); 1370 spin_unlock_irq(&resource->req_lock); 1371 1372 /* Even though above is a kref_put(), this is safe.
+5 -3
drivers/block/xen-blkback/xenbus.c
··· 504 505 dev_set_drvdata(&dev->dev, NULL); 506 507 - if (be->blkif) 508 xen_blkif_disconnect(be->blkif); 509 510 - /* Put the reference we set in xen_blkif_alloc(). */ 511 - xen_blkif_put(be->blkif); 512 kfree(be->mode); 513 kfree(be); 514 return 0;
··· 504 505 dev_set_drvdata(&dev->dev, NULL); 506 507 + if (be->blkif) { 508 xen_blkif_disconnect(be->blkif); 509 510 + /* Put the reference we set in xen_blkif_alloc(). */ 511 + xen_blkif_put(be->blkif); 512 + } 513 + 514 kfree(be->mode); 515 kfree(be); 516 return 0;
+5 -1
drivers/char/lp.c
··· 859 } else if (!strcmp(str, "auto")) { 860 parport_nr[0] = LP_PARPORT_AUTO; 861 } else if (!strcmp(str, "none")) { 862 - parport_nr[parport_ptr++] = LP_PARPORT_NONE; 863 } else if (!strcmp(str, "reset")) { 864 reset = 1; 865 }
··· 859 } else if (!strcmp(str, "auto")) { 860 parport_nr[0] = LP_PARPORT_AUTO; 861 } else if (!strcmp(str, "none")) { 862 + if (parport_ptr < LP_NO) 863 + parport_nr[parport_ptr++] = LP_PARPORT_NONE; 864 + else 865 + printk(KERN_INFO "lp: too many ports, %s ignored.\n", 866 + str); 867 } else if (!strcmp(str, "reset")) { 868 reset = 1; 869 }
+5
drivers/char/mem.c
··· 340 static int mmap_mem(struct file *file, struct vm_area_struct *vma) 341 { 342 size_t size = vma->vm_end - vma->vm_start; 343 344 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 345 return -EINVAL;
··· 340 static int mmap_mem(struct file *file, struct vm_area_struct *vma) 341 { 342 size_t size = vma->vm_end - vma->vm_start; 343 + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; 344 + 345 + /* It's illegal to wrap around the end of the physical address space. */ 346 + if (offset + (phys_addr_t)size < offset) 347 + return -EINVAL; 348 349 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 350 return -EINVAL;
+2
drivers/dax/super.c
··· 44 } 45 EXPORT_SYMBOL_GPL(dax_read_unlock); 46 47 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 48 pgoff_t *pgoff) 49 { ··· 113 return 0; 114 } 115 EXPORT_SYMBOL_GPL(__bdev_dax_supported); 116 117 /** 118 * struct dax_device - anchor object for dax services
··· 44 } 45 EXPORT_SYMBOL_GPL(dax_read_unlock); 46 47 + #ifdef CONFIG_BLOCK 48 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 49 pgoff_t *pgoff) 50 { ··· 112 return 0; 113 } 114 EXPORT_SYMBOL_GPL(__bdev_dax_supported); 115 + #endif 116 117 /** 118 * struct dax_device - anchor object for dax services
+19 -21
drivers/edac/amd64_edac.c
··· 782 783 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) 784 { 785 - u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 786 - int dimm, size0, size1; 787 788 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); 789 790 for (dimm = 0; dimm < 4; dimm++) { 791 size0 = 0; 792 793 - if (dcsb[dimm*2] & DCSB_CS_ENABLE) 794 - size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 795 796 size1 = 0; 797 - if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 798 - size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 799 800 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 801 - dimm * 2, size0, 802 - dimm * 2 + 1, size1); 803 } 804 } 805 ··· 2758 * encompasses 2759 * 2760 */ 2761 - static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2762 { 2763 - u32 cs_mode, nr_pages; 2764 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2765 2766 2767 - /* 2768 - * The math on this doesn't look right on the surface because x/2*4 can 2769 - * be simplified to x*2 but this expression makes use of the fact that 2770 - * it is integral math where 1/2=0. This intermediate value becomes the 2771 - * number of bits to shift the DBAM register to extract the proper CSROW 2772 - * field. 2773 - */ 2774 - cs_mode = DBAM_DIMM(csrow_nr / 2, dbam); 2775 2776 - nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) 2777 - << (20 - PAGE_SHIFT); 2778 2779 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2780 - csrow_nr, dct, cs_mode); 2781 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); 2782 2783 return nr_pages;
··· 782 783 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) 784 { 785 + int dimm, size0, size1, cs0, cs1; 786 787 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); 788 789 for (dimm = 0; dimm < 4; dimm++) { 790 size0 = 0; 791 + cs0 = dimm * 2; 792 793 + if (csrow_enabled(cs0, ctrl, pvt)) 794 + size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0); 795 796 size1 = 0; 797 + cs1 = dimm * 2 + 1; 798 + 799 + if (csrow_enabled(cs1, ctrl, pvt)) 800 + size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1); 801 802 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 803 + cs0, size0, 804 + cs1, size1); 805 } 806 } 807 ··· 2756 * encompasses 2757 * 2758 */ 2759 + static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig) 2760 { 2761 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2762 + int csrow_nr = csrow_nr_orig; 2763 + u32 cs_mode, nr_pages; 2764 2765 + if (!pvt->umc) 2766 + csrow_nr >>= 1; 2767 2768 + cs_mode = DBAM_DIMM(csrow_nr, dbam); 2769 2770 + nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr); 2771 + nr_pages <<= 20 - PAGE_SHIFT; 2772 2773 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2774 + csrow_nr_orig, dct, cs_mode); 2775 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); 2776 2777 return nr_pages;
+3 -9
drivers/firmware/efi/efi-pstore.c
··· 155 * efi_pstore_sysfs_entry_iter 156 * 157 * @record: pstore record to pass to callback 158 - * @pos: entry to begin iterating from 159 * 160 * You MUST call efivar_enter_iter_begin() before this function, and 161 * efivar_entry_iter_end() afterwards. 162 * 163 - * It is possible to begin iteration from an arbitrary entry within 164 - * the list by passing @pos. @pos is updated on return to point to 165 - * the next entry of the last one passed to efi_pstore_read_func(). 166 - * To begin iterating from the beginning of the list @pos must be %NULL. 167 */ 168 - static int efi_pstore_sysfs_entry_iter(struct pstore_record *record, 169 - struct efivar_entry **pos) 170 { 171 struct efivar_entry *entry, *n; 172 struct list_head *head = &efivar_sysfs_list; 173 int size = 0; ··· 213 */ 214 static ssize_t efi_pstore_read(struct pstore_record *record) 215 { 216 - struct efivar_entry *entry = (struct efivar_entry *)record->psi->data; 217 ssize_t size; 218 219 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); ··· 223 size = -EINTR; 224 goto out; 225 } 226 - size = efi_pstore_sysfs_entry_iter(record, &entry); 227 efivar_entry_iter_end(); 228 229 out:
··· 155 * efi_pstore_sysfs_entry_iter 156 * 157 * @record: pstore record to pass to callback 158 * 159 * You MUST call efivar_enter_iter_begin() before this function, and 160 * efivar_entry_iter_end() afterwards. 161 * 162 */ 163 + static int efi_pstore_sysfs_entry_iter(struct pstore_record *record) 164 { 165 + struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data; 166 struct efivar_entry *entry, *n; 167 struct list_head *head = &efivar_sysfs_list; 168 int size = 0; ··· 218 */ 219 static ssize_t efi_pstore_read(struct pstore_record *record) 220 { 221 ssize_t size; 222 223 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); ··· 229 size = -EINTR; 230 goto out; 231 } 232 + size = efi_pstore_sysfs_entry_iter(record); 233 efivar_entry_iter_end(); 234 235 out:
+15 -6
drivers/firmware/google/vpd.c
··· 116 return VPD_OK; 117 118 info = kzalloc(sizeof(*info), GFP_KERNEL); 119 - info->key = kzalloc(key_len + 1, GFP_KERNEL); 120 - if (!info->key) 121 return -ENOMEM; 122 123 memcpy(info->key, key, key_len); 124 ··· 139 list_add_tail(&info->list, &sec->attribs); 140 141 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); 142 - if (ret) { 143 - kfree(info->key); 144 - return ret; 145 - } 146 147 return 0; 148 } 149 150 static void vpd_section_attrib_destroy(struct vpd_section *sec)
··· 116 return VPD_OK; 117 118 info = kzalloc(sizeof(*info), GFP_KERNEL); 119 + if (!info) 120 return -ENOMEM; 121 + info->key = kzalloc(key_len + 1, GFP_KERNEL); 122 + if (!info->key) { 123 + ret = -ENOMEM; 124 + goto free_info; 125 + } 126 127 memcpy(info->key, key, key_len); 128 ··· 135 list_add_tail(&info->list, &sec->attribs); 136 137 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); 138 + if (ret) 139 + goto free_info_key; 140 141 return 0; 142 + 143 + free_info_key: 144 + kfree(info->key); 145 + free_info: 146 + kfree(info); 147 + 148 + return ret; 149 } 150 151 static void vpd_section_attrib_destroy(struct vpd_section *sec)
+32 -17
drivers/gpu/drm/arm/hdlcd_crtc.c
··· 10 */ 11 12 #include <drm/drmP.h> 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_crtc.h> 15 #include <drm/drm_crtc_helper.h> ··· 227 static int hdlcd_plane_atomic_check(struct drm_plane *plane, 228 struct drm_plane_state *state) 229 { 230 - u32 src_w, src_h; 231 232 - src_w = state->src_w >> 16; 233 - src_h = state->src_h >> 16; 234 - 235 - /* we can't do any scaling of the plane source */ 236 - if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) 237 return -EINVAL; 238 239 - return 0; 240 } 241 242 static void hdlcd_plane_atomic_update(struct drm_plane *plane, ··· 262 struct drm_framebuffer *fb = plane->state->fb; 263 struct hdlcd_drm_private *hdlcd; 264 struct drm_gem_cma_object *gem; 265 - u32 src_w, src_h, dest_w, dest_h; 266 dma_addr_t scanout_start; 267 268 if (!fb) 269 return; 270 271 - src_w = plane->state->src_w >> 16; 272 - src_h = plane->state->src_h >> 16; 273 - dest_w = plane->state->crtc_w; 274 - dest_h = plane->state->crtc_h; 275 gem = drm_fb_cma_get_gem_obj(fb, 0); 276 scanout_start = gem->paddr + fb->offsets[0] + 277 - plane->state->crtc_y * fb->pitches[0] + 278 - plane->state->crtc_x * 279 - fb->format->cpp[0]; 280 281 hdlcd = plane->dev->dev_private; 282 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); ··· 322 formats, ARRAY_SIZE(formats), 323 DRM_PLANE_TYPE_PRIMARY, NULL); 324 if (ret) { 325 - devm_kfree(drm->dev, plane); 326 return ERR_PTR(ret); 327 } 328 ··· 345 &hdlcd_crtc_funcs, NULL); 346 if (ret) { 347 hdlcd_plane_destroy(primary); 348 - devm_kfree(drm->dev, primary); 349 return ret; 350 } 351
··· 10 */ 11 12 #include <drm/drmP.h> 13 + #include <drm/drm_atomic.h> 14 #include <drm/drm_atomic_helper.h> 15 #include <drm/drm_crtc.h> 16 #include <drm/drm_crtc_helper.h> ··· 226 static int hdlcd_plane_atomic_check(struct drm_plane *plane, 227 struct drm_plane_state *state) 228 { 229 + struct drm_rect clip = { 0 }; 230 + struct drm_crtc_state *crtc_state; 231 + u32 src_h = state->src_h >> 16; 232 233 + /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */ 234 + if (src_h >= HDLCD_MAX_YRES) { 235 + DRM_DEBUG_KMS("Invalid source width: %d\n", src_h); 236 return -EINVAL; 237 + } 238 239 + if (!state->fb || !state->crtc) 240 + return 0; 241 + 242 + crtc_state = drm_atomic_get_existing_crtc_state(state->state, 243 + state->crtc); 244 + if (!crtc_state) { 245 + DRM_DEBUG_KMS("Invalid crtc state\n"); 246 + return -EINVAL; 247 + } 248 + 249 + clip.x2 = crtc_state->adjusted_mode.hdisplay; 250 + clip.y2 = crtc_state->adjusted_mode.vdisplay; 251 + 252 + return drm_plane_helper_check_state(state, &clip, 253 + DRM_PLANE_HELPER_NO_SCALING, 254 + DRM_PLANE_HELPER_NO_SCALING, 255 + false, true); 256 } 257 258 static void hdlcd_plane_atomic_update(struct drm_plane *plane, ··· 244 struct drm_framebuffer *fb = plane->state->fb; 245 struct hdlcd_drm_private *hdlcd; 246 struct drm_gem_cma_object *gem; 247 + u32 src_x, src_y, dest_h; 248 dma_addr_t scanout_start; 249 250 if (!fb) 251 return; 252 253 + src_x = plane->state->src.x1 >> 16; 254 + src_y = plane->state->src.y1 >> 16; 255 + dest_h = drm_rect_height(&plane->state->dst); 256 gem = drm_fb_cma_get_gem_obj(fb, 0); 257 + 258 scanout_start = gem->paddr + fb->offsets[0] + 259 + src_y * fb->pitches[0] + 260 + src_x * fb->format->cpp[0]; 261 262 hdlcd = plane->dev->dev_private; 263 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); ··· 305 formats, ARRAY_SIZE(formats), 306 DRM_PLANE_TYPE_PRIMARY, NULL); 307 if (ret) { 308 return ERR_PTR(ret); 309 } 310 ··· 329 &hdlcd_crtc_funcs, NULL); 330 if (ret) { 331 hdlcd_plane_destroy(primary); 332 return ret; 333 } 334
+12 -20
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
··· 152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 153 }; 154 155 - static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, 156 - const struct device_node *np) 157 { 158 struct atmel_hlcdc_dc *dc = dev->dev_private; 159 struct atmel_hlcdc_rgb_output *output; 160 struct drm_panel *panel; 161 struct drm_bridge *bridge; 162 int ret; 163 164 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); 165 if (!output) ··· 180 return ret; 181 182 output->encoder.possible_crtcs = 0x1; 183 - 184 - ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge); 185 - if (ret) 186 - return ret; 187 188 if (panel) { 189 output->connector.dpms = DRM_MODE_DPMS_OFF; ··· 220 221 int atmel_hlcdc_create_outputs(struct drm_device *dev) 222 { 223 - struct device_node *remote; 224 - int ret = -ENODEV; 225 - int endpoint = 0; 226 227 - while (true) { 228 - /* Loop thru possible multiple connections to the output */ 229 - remote = of_graph_get_remote_node(dev->dev->of_node, 0, 230 - endpoint++); 231 - if (!remote) 232 - break; 233 234 - ret = atmel_hlcdc_attach_endpoint(dev, remote); 235 - of_node_put(remote); 236 - if (ret) 237 - return ret; 238 - } 239 240 return ret; 241 }
··· 152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 153 }; 154 155 + static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) 156 { 157 struct atmel_hlcdc_dc *dc = dev->dev_private; 158 struct atmel_hlcdc_rgb_output *output; 159 struct drm_panel *panel; 160 struct drm_bridge *bridge; 161 int ret; 162 + 163 + ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint, 164 + &panel, &bridge); 165 + if (ret) 166 + return ret; 167 168 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); 169 if (!output) ··· 176 return ret; 177 178 output->encoder.possible_crtcs = 0x1; 179 180 if (panel) { 181 output->connector.dpms = DRM_MODE_DPMS_OFF; ··· 220 221 int atmel_hlcdc_create_outputs(struct drm_device *dev) 222 { 223 + int endpoint, ret = 0; 224 225 + for (endpoint = 0; !ret; endpoint++) 226 + ret = atmel_hlcdc_attach_endpoint(dev, endpoint); 227 228 + /* At least one device was successfully attached.*/ 229 + if (ret == -ENODEV && endpoint) 230 + return 0; 231 232 return ret; 233 }
+3 -1
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 44 45 /* initially, until copy_from_user() and bo lookup succeeds: */ 46 submit->nr_bos = 0; 47 48 ww_acquire_init(&submit->ticket, &reservation_ww_class); 49 } ··· 295 } 296 297 ww_acquire_fini(&submit->ticket); 298 - dma_fence_put(submit->fence); 299 kfree(submit); 300 } 301
··· 44 45 /* initially, until copy_from_user() and bo lookup succeeds: */ 46 submit->nr_bos = 0; 47 + submit->fence = NULL; 48 49 ww_acquire_init(&submit->ticket, &reservation_ww_class); 50 } ··· 294 } 295 296 ww_acquire_fini(&submit->ticket); 297 + if (submit->fence) 298 + dma_fence_put(submit->fence); 299 kfree(submit); 300 } 301
+1 -1
drivers/gpu/drm/i915/gvt/handlers.c
··· 1244 mode = vgpu_vreg(vgpu, offset); 1245 1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { 1247 - WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n", 1248 vgpu->id); 1249 return 0; 1250 }
··· 1244 mode = vgpu_vreg(vgpu, offset); 1245 1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { 1247 + WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n", 1248 vgpu->id); 1249 return 0; 1250 }
+3
drivers/gpu/drm/i915/gvt/render.c
··· 340 } else 341 v = mmio->value; 342 343 I915_WRITE(mmio->reg, v); 344 POSTING_READ(mmio->reg); 345
··· 340 } else 341 v = mmio->value; 342 343 + if (mmio->in_context) 344 + continue; 345 + 346 I915_WRITE(mmio->reg, v); 347 POSTING_READ(mmio->reg); 348
+6 -2
drivers/gpu/drm/i915/gvt/sched_policy.c
··· 129 struct vgpu_sched_data *vgpu_data; 130 ktime_t cur_time; 131 132 - /* no target to schedule */ 133 - if (!scheduler->next_vgpu) 134 return; 135 136 /* 137 * after the flag is set, workload dispatch thread will
··· 129 struct vgpu_sched_data *vgpu_data; 130 ktime_t cur_time; 131 132 + /* no need to schedule if next_vgpu is the same with current_vgpu, 133 + * let scheduler chose next_vgpu again by setting it to NULL. 134 + */ 135 + if (scheduler->next_vgpu == scheduler->current_vgpu) { 136 + scheduler->next_vgpu = NULL; 137 return; 138 + } 139 140 /* 141 * after the flag is set, workload dispatch thread will
+8 -4
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 195 u32 pte_flags; 196 int ret; 197 198 - ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size); 199 - if (ret) 200 - return ret; 201 202 vma->pages = vma->obj->mm.pages; 203 ··· 2309 if (flags & I915_VMA_LOCAL_BIND) { 2310 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; 2311 2312 - if (appgtt->base.allocate_va_range) { 2313 ret = appgtt->base.allocate_va_range(&appgtt->base, 2314 vma->node.start, 2315 vma->node.size);
··· 195 u32 pte_flags; 196 int ret; 197 198 + if (!(vma->flags & I915_VMA_LOCAL_BIND)) { 199 + ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, 200 + vma->size); 201 + if (ret) 202 + return ret; 203 + } 204 205 vma->pages = vma->obj->mm.pages; 206 ··· 2306 if (flags & I915_VMA_LOCAL_BIND) { 2307 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; 2308 2309 + if (!(vma->flags & I915_VMA_LOCAL_BIND) && 2310 + appgtt->base.allocate_va_range) { 2311 ret = appgtt->base.allocate_va_range(&appgtt->base, 2312 vma->node.start, 2313 vma->node.size);
+7 -3
drivers/gpu/drm/i915/i915_reg.h
··· 3051 #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ 3052 #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ 3053 #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ 3054 #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ 3055 - /* Note, below two are guess */ 3056 - #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ 3057 - #define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ 3058 #define CLKCFG_FSB_MASK (7 << 0) 3059 #define CLKCFG_MEM_533 (1 << 4) 3060 #define CLKCFG_MEM_667 (2 << 4)
··· 3051 #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ 3052 #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ 3053 #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ 3054 + #define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */ 3055 #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ 3056 + /* 3057 + * Note that on at least on ELK the below value is reported for both 3058 + * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet 3059 + * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz. 3060 + */ 3061 + #define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */ 3062 #define CLKCFG_FSB_MASK (7 << 0) 3063 #define CLKCFG_MEM_533 (1 << 4) 3064 #define CLKCFG_MEM_667 (2 << 4)
+2 -4
drivers/gpu/drm/i915/intel_cdclk.c
··· 1798 case CLKCFG_FSB_800: 1799 return 200000; 1800 case CLKCFG_FSB_1067: 1801 return 266667; 1802 case CLKCFG_FSB_1333: 1803 return 333333; 1804 - /* these two are just a guess; one of them might be right */ 1805 - case CLKCFG_FSB_1600: 1806 - case CLKCFG_FSB_1600_ALT: 1807 - return 400000; 1808 default: 1809 return 133333; 1810 }
··· 1798 case CLKCFG_FSB_800: 1799 return 200000; 1800 case CLKCFG_FSB_1067: 1801 + case CLKCFG_FSB_1067_ALT: 1802 return 266667; 1803 case CLKCFG_FSB_1333: 1804 + case CLKCFG_FSB_1333_ALT: 1805 return 333333; 1806 default: 1807 return 133333; 1808 }
+3 -4
drivers/gpu/drm/i915/intel_dsi.c
··· 410 val |= (ULPS_STATE_ENTER | DEVICE_READY); 411 I915_WRITE(MIPI_DEVICE_READY(port), val); 412 413 - /* Wait for ULPS Not active */ 414 if (intel_wait_for_register(dev_priv, 415 - MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 416 - GLK_ULPS_NOT_ACTIVE, 20)) 417 - DRM_ERROR("ULPS is still active\n"); 418 419 /* Exit ULPS */ 420 val = I915_READ(MIPI_DEVICE_READY(port));
··· 410 val |= (ULPS_STATE_ENTER | DEVICE_READY); 411 I915_WRITE(MIPI_DEVICE_READY(port), val); 412 413 + /* Wait for ULPS active */ 414 if (intel_wait_for_register(dev_priv, 415 + MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20)) 416 + DRM_ERROR("ULPS not active\n"); 417 418 /* Exit ULPS */ 419 val = I915_READ(MIPI_DEVICE_READY(port));
+5
drivers/gpu/drm/i915/intel_lpe_audio.c
··· 63 #include <linux/acpi.h> 64 #include <linux/device.h> 65 #include <linux/pci.h> 66 67 #include "i915_drv.h" 68 #include <linux/delay.h> ··· 121 } 122 123 kfree(rsc); 124 125 return platdev; 126
··· 63 #include <linux/acpi.h> 64 #include <linux/device.h> 65 #include <linux/pci.h> 66 + #include <linux/pm_runtime.h> 67 68 #include "i915_drv.h" 69 #include <linux/delay.h> ··· 120 } 121 122 kfree(rsc); 123 + 124 + pm_runtime_forbid(&platdev->dev); 125 + pm_runtime_set_active(&platdev->dev); 126 + pm_runtime_enable(&platdev->dev); 127 128 return platdev; 129
+2 -4
drivers/gpu/drm/nouveau/nouveau_display.c
··· 360 pm_runtime_get_sync(drm->dev->dev); 361 362 drm_helper_hpd_irq_event(drm->dev); 363 364 pm_runtime_mark_last_busy(drm->dev->dev); 365 pm_runtime_put_sync(drm->dev->dev); ··· 414 ret = disp->init(dev); 415 if (ret) 416 return ret; 417 - 418 - /* enable polling for external displays */ 419 - if (!dev->mode_config.poll_enabled) 420 - drm_kms_helper_poll_enable(dev); 421 422 /* enable hotplug interrupts */ 423 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
··· 360 pm_runtime_get_sync(drm->dev->dev); 361 362 drm_helper_hpd_irq_event(drm->dev); 363 + /* enable polling for external displays */ 364 + drm_kms_helper_poll_enable(drm->dev); 365 366 pm_runtime_mark_last_busy(drm->dev->dev); 367 pm_runtime_put_sync(drm->dev->dev); ··· 412 ret = disp->init(dev); 413 if (ret) 414 return ret; 415 416 /* enable hotplug interrupts */ 417 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+3 -3
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 502 pm_runtime_allow(dev->dev); 503 pm_runtime_mark_last_busy(dev->dev); 504 pm_runtime_put(dev->dev); 505 } 506 return 0; 507 ··· 776 pci_set_master(pdev); 777 778 ret = nouveau_do_resume(drm_dev, true); 779 - 780 - if (!drm_dev->mode_config.poll_enabled) 781 - drm_kms_helper_poll_enable(drm_dev); 782 783 /* do magic */ 784 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
··· 502 pm_runtime_allow(dev->dev); 503 pm_runtime_mark_last_busy(dev->dev); 504 pm_runtime_put(dev->dev); 505 + } else { 506 + /* enable polling for external displays */ 507 + drm_kms_helper_poll_enable(dev); 508 } 509 return 0; 510 ··· 773 pci_set_master(pdev); 774 775 ret = nouveau_do_resume(drm_dev, true); 776 777 /* do magic */ 778 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
+2 -1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
··· 148 case NVKM_MEM_TARGET_NCOH: target = 3; break; 149 default: 150 WARN_ON(1); 151 - return; 152 } 153 154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | ··· 160 & 0x00100000), 161 msecs_to_jiffies(2000)) == 0) 162 nvkm_error(subdev, "runlist %d update timeout\n", runl); 163 mutex_unlock(&subdev->mutex); 164 } 165
··· 148 case NVKM_MEM_TARGET_NCOH: target = 3; break; 149 default: 150 WARN_ON(1); 151 + goto unlock; 152 } 153 154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | ··· 160 & 0x00100000), 161 msecs_to_jiffies(2000)) == 0) 162 nvkm_error(subdev, "runlist %d update timeout\n", runl); 163 + unlock: 164 mutex_unlock(&subdev->mutex); 165 } 166
+3 -1
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
··· 116 ret = nvkm_firmware_get(subdev->device, f, &sig); 117 if (ret) 118 goto free_data; 119 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 120 if (!img->sig) { 121 ret = -ENOMEM; ··· 127 img->ucode_data = ls_ucode_img_build(bl, code, data, 128 &img->ucode_desc); 129 if (IS_ERR(img->ucode_data)) { 130 ret = PTR_ERR(img->ucode_data); 131 - goto free_data; 132 } 133 img->ucode_size = img->ucode_desc.image_size; 134
··· 116 ret = nvkm_firmware_get(subdev->device, f, &sig); 117 if (ret) 118 goto free_data; 119 + 120 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 121 if (!img->sig) { 122 ret = -ENOMEM; ··· 126 img->ucode_data = ls_ucode_img_build(bl, code, data, 127 &img->ucode_desc); 128 if (IS_ERR(img->ucode_data)) { 129 + kfree(img->sig); 130 ret = PTR_ERR(img->ucode_data); 131 + goto free_sig; 132 } 133 img->ucode_size = img->ucode_desc.image_size; 134
+1
drivers/gpu/host1x/Kconfig
··· 1 config TEGRA_HOST1X 2 tristate "NVIDIA Tegra host1x driver" 3 depends on ARCH_TEGRA || (ARM && COMPILE_TEST) 4 help 5 Driver for the NVIDIA Tegra host1x hardware. 6
··· 1 config TEGRA_HOST1X 2 tristate "NVIDIA Tegra host1x driver" 3 depends on ARCH_TEGRA || (ARM && COMPILE_TEST) 4 + select IOMMU_IOVA if IOMMU_SUPPORT 5 help 6 Driver for the NVIDIA Tegra host1x hardware. 7
+14
drivers/hwmon/coretemp.c
··· 605 struct platform_data *pdata; 606 607 /* 608 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 609 * sensors. We check this bit only, all the early CPUs 610 * without thermal sensors will be filtered out. ··· 660 struct platform_data *pd; 661 struct temp_data *tdata; 662 int indx, target; 663 664 /* If the physical CPU device does not exist, just return */ 665 if (!pdev)
··· 605 struct platform_data *pdata; 606 607 /* 608 + * Don't execute this on resume as the offline callback did 609 + * not get executed on suspend. 610 + */ 611 + if (cpuhp_tasks_frozen) 612 + return 0; 613 + 614 + /* 615 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 616 * sensors. We check this bit only, all the early CPUs 617 * without thermal sensors will be filtered out. ··· 653 struct platform_data *pd; 654 struct temp_data *tdata; 655 int indx, target; 656 + 657 + /* 658 + * Don't execute this on suspend as the device remove locks 659 + * up the machine. 660 + */ 661 + if (cpuhp_tasks_frozen) 662 + return 0; 663 664 /* If the physical CPU device does not exist, just return */ 665 if (!pdev)
+10 -8
drivers/i2c/busses/i2c-designware-platdrv.c
··· 96 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 97 acpi_handle handle = ACPI_HANDLE(&pdev->dev); 98 const struct acpi_device_id *id; 99 struct acpi_device *adev; 100 const char *uid; 101 ··· 108 * Try to get SDA hold time and *CNT values from an ACPI method for 109 * selected speed modes. 110 */ 111 switch (dev->clk_freq) { 112 case 100000: 113 - dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, 114 - &dev->sda_hold_time); 115 break; 116 case 1000000: 117 - dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, 118 - &dev->sda_hold_time); 119 break; 120 case 3400000: 121 - dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, 122 - &dev->sda_hold_time); 123 break; 124 case 400000: 125 default: 126 - dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 127 - &dev->sda_hold_time); 128 break; 129 } 130
··· 96 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 97 acpi_handle handle = ACPI_HANDLE(&pdev->dev); 98 const struct acpi_device_id *id; 99 + u32 ss_ht, fp_ht, hs_ht, fs_ht; 100 struct acpi_device *adev; 101 const char *uid; 102 ··· 107 * Try to get SDA hold time and *CNT values from an ACPI method for 108 * selected speed modes. 109 */ 110 + dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); 111 + dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); 112 + dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); 113 + dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); 114 + 115 switch (dev->clk_freq) { 116 case 100000: 117 + dev->sda_hold_time = ss_ht; 118 break; 119 case 1000000: 120 + dev->sda_hold_time = fp_ht; 121 break; 122 case 3400000: 123 + dev->sda_hold_time = hs_ht; 124 break; 125 case 400000: 126 default: 127 + dev->sda_hold_time = fs_ht; 128 break; 129 } 130
+3 -3
drivers/i2c/busses/i2c-mv64xxx.c
··· 819 rc = -EINVAL; 820 goto out; 821 } 822 - drv_data->irq = irq_of_parse_and_map(np, 0); 823 824 drv_data->rstc = devm_reset_control_get_optional(dev, NULL); 825 if (IS_ERR(drv_data->rstc)) { ··· 901 if (!IS_ERR(drv_data->clk)) 902 clk_prepare_enable(drv_data->clk); 903 904 if (pdata) { 905 drv_data->freq_m = pdata->freq_m; 906 drv_data->freq_n = pdata->freq_n; 907 - drv_data->irq = platform_get_irq(pd, 0); 908 drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); 909 drv_data->offload_enabled = false; 910 memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); ··· 915 goto exit_clk; 916 } 917 if (drv_data->irq < 0) { 918 - rc = -ENXIO; 919 goto exit_reset; 920 } 921
··· 819 rc = -EINVAL; 820 goto out; 821 } 822 823 drv_data->rstc = devm_reset_control_get_optional(dev, NULL); 824 if (IS_ERR(drv_data->rstc)) { ··· 902 if (!IS_ERR(drv_data->clk)) 903 clk_prepare_enable(drv_data->clk); 904 905 + drv_data->irq = platform_get_irq(pd, 0); 906 + 907 if (pdata) { 908 drv_data->freq_m = pdata->freq_m; 909 drv_data->freq_n = pdata->freq_n; 910 drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); 911 drv_data->offload_enabled = false; 912 memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); ··· 915 goto exit_clk; 916 } 917 if (drv_data->irq < 0) { 918 + rc = drv_data->irq; 919 goto exit_reset; 920 } 921
+1
drivers/i2c/busses/i2c-xgene-slimpro.c
··· 416 adapter->class = I2C_CLASS_HWMON; 417 adapter->dev.parent = &pdev->dev; 418 adapter->dev.of_node = pdev->dev.of_node; 419 i2c_set_adapdata(adapter, ctx); 420 rc = i2c_add_adapter(adapter); 421 if (rc) {
··· 416 adapter->class = I2C_CLASS_HWMON; 417 adapter->dev.parent = &pdev->dev; 418 adapter->dev.of_node = pdev->dev.of_node; 419 + ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); 420 i2c_set_adapdata(adapter, ctx); 421 rc = i2c_add_adapter(adapter); 422 if (rc) {
+16 -10
drivers/i2c/i2c-mux.c
··· 395 if (force_nr) { 396 priv->adap.nr = force_nr; 397 ret = i2c_add_numbered_adapter(&priv->adap); 398 - dev_err(&parent->dev, 399 - "failed to add mux-adapter %u as bus %u (error=%d)\n", 400 - chan_id, force_nr, ret); 401 } else { 402 ret = i2c_add_adapter(&priv->adap); 403 - dev_err(&parent->dev, 404 - "failed to add mux-adapter %u (error=%d)\n", 405 - chan_id, ret); 406 - } 407 - if (ret < 0) { 408 - kfree(priv); 409 - return ret; 410 } 411 412 WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, ··· 424 425 muxc->adapter[muxc->num_adapters++] = &priv->adap; 426 return 0; 427 } 428 EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); 429
··· 395 if (force_nr) { 396 priv->adap.nr = force_nr; 397 ret = i2c_add_numbered_adapter(&priv->adap); 398 + if (ret < 0) { 399 + dev_err(&parent->dev, 400 + "failed to add mux-adapter %u as bus %u (error=%d)\n", 401 + chan_id, force_nr, ret); 402 + goto err_free_priv; 403 + } 404 } else { 405 ret = i2c_add_adapter(&priv->adap); 406 + if (ret < 0) { 407 + dev_err(&parent->dev, 408 + "failed to add mux-adapter %u (error=%d)\n", 409 + chan_id, ret); 410 + goto err_free_priv; 411 + } 412 } 413 414 WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, ··· 422 423 muxc->adapter[muxc->num_adapters++] = &priv->adap; 424 return 0; 425 + 426 + err_free_priv: 427 + kfree(priv); 428 + return ret; 429 } 430 EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); 431
+14 -7
drivers/i2c/muxes/i2c-mux-reg.c
··· 196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 197 mux->data.reg_size = resource_size(res); 198 mux->data.reg = devm_ioremap_resource(&pdev->dev, res); 199 - if (IS_ERR(mux->data.reg)) 200 - return PTR_ERR(mux->data.reg); 201 } 202 203 if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && 204 mux->data.reg_size != 1) { 205 dev_err(&pdev->dev, "Invalid register size\n"); 206 - return -EINVAL; 207 } 208 209 muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, 210 i2c_mux_reg_select, NULL); 211 - if (!muxc) 212 - return -ENOMEM; 213 muxc->priv = mux; 214 215 platform_set_drvdata(pdev, muxc); ··· 228 229 ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); 230 if (ret) 231 - goto add_adapter_failed; 232 } 233 234 dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", ··· 236 237 return 0; 238 239 - add_adapter_failed: 240 i2c_mux_del_adapters(muxc); 241 242 return ret; 243 }
··· 196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 197 mux->data.reg_size = resource_size(res); 198 mux->data.reg = devm_ioremap_resource(&pdev->dev, res); 199 + if (IS_ERR(mux->data.reg)) { 200 + ret = PTR_ERR(mux->data.reg); 201 + goto err_put_parent; 202 + } 203 } 204 205 if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && 206 mux->data.reg_size != 1) { 207 dev_err(&pdev->dev, "Invalid register size\n"); 208 + ret = -EINVAL; 209 + goto err_put_parent; 210 } 211 212 muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, 213 i2c_mux_reg_select, NULL); 214 + if (!muxc) { 215 + ret = -ENOMEM; 216 + goto err_put_parent; 217 + } 218 muxc->priv = mux; 219 220 platform_set_drvdata(pdev, muxc); ··· 223 224 ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); 225 if (ret) 226 + goto err_del_mux_adapters; 227 } 228 229 dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", ··· 231 232 return 0; 233 234 + err_del_mux_adapters: 235 i2c_mux_del_adapters(muxc); 236 + err_put_parent: 237 + i2c_put_adapter(parent); 238 239 return ret; 240 }
+8 -5
drivers/iommu/dma-iommu.c
··· 396 dma_addr_t iova, size_t size) 397 { 398 struct iova_domain *iovad = &cookie->iovad; 399 - unsigned long shift = iova_shift(iovad); 400 401 /* The MSI case is only ever cleaning up its most recent allocation */ 402 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 403 cookie->msi_iova -= size; 404 else 405 - free_iova_fast(iovad, iova >> shift, size >> shift); 406 } 407 408 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, ··· 617 { 618 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 619 struct iommu_dma_cookie *cookie = domain->iova_cookie; 620 - struct iova_domain *iovad = &cookie->iovad; 621 - size_t iova_off = iova_offset(iovad, phys); 622 dma_addr_t iova; 623 624 - size = iova_align(iovad, size + iova_off); 625 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 626 if (!iova) 627 return DMA_ERROR_CODE;
··· 396 dma_addr_t iova, size_t size) 397 { 398 struct iova_domain *iovad = &cookie->iovad; 399 400 /* The MSI case is only ever cleaning up its most recent allocation */ 401 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 402 cookie->msi_iova -= size; 403 else 404 + free_iova_fast(iovad, iova_pfn(iovad, iova), 405 + size >> iova_shift(iovad)); 406 } 407 408 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, ··· 617 { 618 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 619 struct iommu_dma_cookie *cookie = domain->iova_cookie; 620 + size_t iova_off = 0; 621 dma_addr_t iova; 622 623 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { 624 + iova_off = iova_offset(&cookie->iovad, phys); 625 + size = iova_align(&cookie->iovad, size + iova_off); 626 + } 627 + 628 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 629 if (!iova) 630 return DMA_ERROR_CODE;
+4 -1
drivers/iommu/intel-iommu.c
··· 2055 if (context_copied(context)) { 2056 u16 did_old = context_domain_id(context); 2057 2058 - if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) 2059 iommu->flush.flush_context(iommu, did_old, 2060 (((u16)bus) << 8) | devfn, 2061 DMA_CCMD_MASK_NOBIT, 2062 DMA_CCMD_DEVICE_INVL); 2063 } 2064 2065 pgd = domain->pgd;
··· 2055 if (context_copied(context)) { 2056 u16 did_old = context_domain_id(context); 2057 2058 + if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) { 2059 iommu->flush.flush_context(iommu, did_old, 2060 (((u16)bus) << 8) | devfn, 2061 DMA_CCMD_MASK_NOBIT, 2062 DMA_CCMD_DEVICE_INVL); 2063 + iommu->flush.flush_iotlb(iommu, did_old, 0, 0, 2064 + DMA_TLB_DSI_FLUSH); 2065 + } 2066 } 2067 2068 pgd = domain->pgd;
+1
drivers/iommu/mtk_iommu_v1.c
··· 18 #include <linux/clk.h> 19 #include <linux/component.h> 20 #include <linux/device.h> 21 #include <linux/dma-iommu.h> 22 #include <linux/err.h> 23 #include <linux/interrupt.h>
··· 18 #include <linux/clk.h> 19 #include <linux/component.h> 20 #include <linux/device.h> 21 + #include <linux/dma-mapping.h> 22 #include <linux/dma-iommu.h> 23 #include <linux/err.h> 24 #include <linux/interrupt.h>
+10 -7
drivers/irqchip/irq-mbigen.c
··· 106 static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, 107 u32 *mask, u32 *addr) 108 { 109 - unsigned int ofst; 110 - 111 - hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP; 112 - ofst = hwirq / 32 * 4; 113 114 *mask = 1 << (hwirq % 32); 115 *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; ··· 334 mgn_chip->pdev = pdev; 335 336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 337 - mgn_chip->base = devm_ioremap_resource(&pdev->dev, res); 338 - if (IS_ERR(mgn_chip->base)) 339 - return PTR_ERR(mgn_chip->base); 340 341 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) 342 err = mbigen_of_create_domain(pdev, mgn_chip);
··· 106 static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, 107 u32 *mask, u32 *addr) 108 { 109 + unsigned int ofst = (hwirq / 32) * 4; 110 111 *mask = 1 << (hwirq % 32); 112 *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; ··· 337 mgn_chip->pdev = pdev; 338 339 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 340 + if (!res) 341 + return -EINVAL; 342 + 343 + mgn_chip->base = devm_ioremap(&pdev->dev, res->start, 344 + resource_size(res)); 345 + if (!mgn_chip->base) { 346 + dev_err(&pdev->dev, "failed to ioremap %pR\n", res); 347 + return -ENOMEM; 348 + } 349 350 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) 351 err = mbigen_of_create_domain(pdev, mgn_chip);
+8 -8
drivers/md/dm-bufio.c
··· 218 * Buffers are freed after this timeout 219 */ 220 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; 221 - static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; 222 223 static unsigned long dm_bufio_peak_allocated; 224 static unsigned long dm_bufio_allocated_kmem_cache; ··· 1558 return true; 1559 } 1560 1561 - static unsigned get_retain_buffers(struct dm_bufio_client *c) 1562 { 1563 - unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); 1564 - return retain_bytes / c->block_size; 1565 } 1566 1567 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, ··· 1571 struct dm_buffer *b, *tmp; 1572 unsigned long freed = 0; 1573 unsigned long count = nr_to_scan; 1574 - unsigned retain_target = get_retain_buffers(c); 1575 1576 for (l = 0; l < LIST_SIZE; l++) { 1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { ··· 1794 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) 1795 { 1796 struct dm_buffer *b, *tmp; 1797 - unsigned retain_target = get_retain_buffers(c); 1798 - unsigned count; 1799 LIST_HEAD(write_list); 1800 1801 dm_bufio_lock(c); ··· 1955 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); 1956 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); 1957 1958 - module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); 1959 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); 1960 1961 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
··· 218 * Buffers are freed after this timeout 219 */ 220 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; 221 + static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; 222 223 static unsigned long dm_bufio_peak_allocated; 224 static unsigned long dm_bufio_allocated_kmem_cache; ··· 1558 return true; 1559 } 1560 1561 + static unsigned long get_retain_buffers(struct dm_bufio_client *c) 1562 { 1563 + unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); 1564 + return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); 1565 } 1566 1567 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, ··· 1571 struct dm_buffer *b, *tmp; 1572 unsigned long freed = 0; 1573 unsigned long count = nr_to_scan; 1574 + unsigned long retain_target = get_retain_buffers(c); 1575 1576 for (l = 0; l < LIST_SIZE; l++) { 1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { ··· 1794 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) 1795 { 1796 struct dm_buffer *b, *tmp; 1797 + unsigned long retain_target = get_retain_buffers(c); 1798 + unsigned long count; 1799 LIST_HEAD(write_list); 1800 1801 dm_bufio_lock(c); ··· 1955 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); 1956 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); 1957 1958 + module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR); 1959 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); 1960 1961 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
+5
drivers/md/dm-cache-background-tracker.c
··· 33 { 34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); 35 36 b->max_work = max_work; 37 atomic_set(&b->pending_promotes, 0); 38 atomic_set(&b->pending_writebacks, 0);
··· 33 { 34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); 35 36 + if (!b) { 37 + DMERR("couldn't create background_tracker"); 38 + return NULL; 39 + } 40 + 41 b->max_work = max_work; 42 atomic_set(&b->pending_promotes, 0); 43 atomic_set(&b->pending_writebacks, 0);
+12 -19
drivers/md/dm-cache-policy-smq.c
··· 1120 * Cache entries may not be populated. So we cannot rely on the 1121 * size of the clean queue. 1122 */ 1123 - unsigned nr_clean; 1124 - 1125 if (idle) { 1126 /* 1127 * We'd like to clean everything. ··· 1127 return q_size(&mq->dirty) == 0u; 1128 } 1129 1130 - nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); 1131 - return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= 1132 - percent_to_target(mq, CLEAN_TARGET); 1133 } 1134 1135 - static bool free_target_met(struct smq_policy *mq, bool idle) 1136 { 1137 unsigned nr_free; 1138 - 1139 - if (!idle) 1140 - return true; 1141 1142 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; 1143 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= ··· 1186 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) 1187 return; 1188 1189 - e = q_peek(&mq->clean, mq->clean.nr_levels, true); 1190 if (!e) { 1191 - if (!clean_target_met(mq, false)) 1192 queue_writeback(mq); 1193 return; 1194 } ··· 1216 * We always claim to be 'idle' to ensure some demotions happen 1217 * with continuous loads. 1218 */ 1219 - if (!free_target_met(mq, true)) 1220 queue_demotion(mq); 1221 return; 1222 } ··· 1417 spin_lock_irqsave(&mq->lock, flags); 1418 r = btracker_issue(mq->bg_work, result); 1419 if (r == -ENODATA) { 1420 - /* find some writeback work to do */ 1421 - if (mq->migrations_allowed && !free_target_met(mq, idle)) 1422 - queue_demotion(mq); 1423 - 1424 - else if (!clean_target_met(mq, idle)) 1425 queue_writeback(mq); 1426 - 1427 - r = btracker_issue(mq->bg_work, result); 1428 } 1429 spin_unlock_irqrestore(&mq->lock, flags); 1430 ··· 1444 clear_pending(mq, e); 1445 if (success) { 1446 e->oblock = work->oblock; 1447 push(mq, e); 1448 // h, q, a 1449 } else {
··· 1120 * Cache entries may not be populated. So we cannot rely on the 1121 * size of the clean queue. 1122 */ 1123 if (idle) { 1124 /* 1125 * We'd like to clean everything. ··· 1129 return q_size(&mq->dirty) == 0u; 1130 } 1131 1132 + /* 1133 + * If we're busy we don't worry about cleaning at all. 1134 + */ 1135 + return true; 1136 } 1137 1138 + static bool free_target_met(struct smq_policy *mq) 1139 { 1140 unsigned nr_free; 1141 1142 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; 1143 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= ··· 1190 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) 1191 return; 1192 1193 + e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); 1194 if (!e) { 1195 + if (!clean_target_met(mq, true)) 1196 queue_writeback(mq); 1197 return; 1198 } ··· 1220 * We always claim to be 'idle' to ensure some demotions happen 1221 * with continuous loads. 1222 */ 1223 + if (!free_target_met(mq)) 1224 queue_demotion(mq); 1225 return; 1226 } ··· 1421 spin_lock_irqsave(&mq->lock, flags); 1422 r = btracker_issue(mq->bg_work, result); 1423 if (r == -ENODATA) { 1424 + if (!clean_target_met(mq, idle)) { 1425 queue_writeback(mq); 1426 + r = btracker_issue(mq->bg_work, result); 1427 + } 1428 } 1429 spin_unlock_irqrestore(&mq->lock, flags); 1430 ··· 1452 clear_pending(mq, e); 1453 if (success) { 1454 e->oblock = work->oblock; 1455 + e->level = NR_CACHE_LEVELS - 1; 1456 push(mq, e); 1457 // h, q, a 1458 } else {
+13 -14
drivers/md/dm-cache-target.c
··· 94 95 static void __iot_io_end(struct io_tracker *iot, sector_t len) 96 { 97 iot->in_flight -= len; 98 if (!iot->in_flight) 99 iot->idle_time = jiffies; ··· 477 spinlock_t invalidation_lock; 478 struct list_head invalidation_requests; 479 480 - struct io_tracker origin_tracker; 481 482 struct work_struct commit_ws; 483 struct batcher committer; ··· 904 905 static bool accountable_bio(struct cache *cache, struct bio *bio) 906 { 907 - return ((bio->bi_bdev == cache->origin_dev->bdev) && 908 - bio_op(bio) != REQ_OP_DISCARD); 909 } 910 911 static void accounted_begin(struct cache *cache, struct bio *bio) ··· 914 915 if (accountable_bio(cache, bio)) { 916 pb->len = bio_sectors(bio); 917 - iot_io_begin(&cache->origin_tracker, pb->len); 918 } 919 } 920 ··· 923 size_t pb_data_size = get_per_bio_data_size(cache); 924 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 925 926 - iot_io_end(&cache->origin_tracker, pb->len); 927 } 928 929 static void accounted_request(struct cache *cache, struct bio *bio) ··· 1718 1719 enum busy { 1720 IDLE, 1721 - MODERATE, 1722 BUSY 1723 }; 1724 1725 static enum busy spare_migration_bandwidth(struct cache *cache) 1726 { 1727 - bool idle = iot_idle_for(&cache->origin_tracker, HZ); 1728 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * 1729 cache->sectors_per_block; 1730 1731 - if (current_volume <= cache->migration_threshold) 1732 - return idle ? IDLE : MODERATE; 1733 else 1734 - return idle ? MODERATE : BUSY; 1735 } 1736 1737 static void inc_hit_counter(struct cache *cache, struct bio *bio) ··· 2046 2047 for (;;) { 2048 b = spare_migration_bandwidth(cache); 2049 - if (b == BUSY) 2050 - break; 2051 2052 r = policy_get_background_work(cache->policy, b == IDLE, &op); 2053 if (r == -ENODATA) ··· 2716 2717 batcher_init(&cache->committer, commit_op, cache, 2718 issue_op, cache, cache->wq); 2719 - iot_init(&cache->origin_tracker); 2720 2721 init_rwsem(&cache->background_work_lock); 2722 prevent_background_work(cache); ··· 2940 2941 cancel_delayed_work(&cache->waker); 2942 flush_workqueue(cache->wq); 2943 - WARN_ON(cache->origin_tracker.in_flight); 2944 2945 /* 2946 * If it's a flush suspend there won't be any deferred bios, so this
··· 94 95 static void __iot_io_end(struct io_tracker *iot, sector_t len) 96 { 97 + if (!len) 98 + return; 99 + 100 iot->in_flight -= len; 101 if (!iot->in_flight) 102 iot->idle_time = jiffies; ··· 474 spinlock_t invalidation_lock; 475 struct list_head invalidation_requests; 476 477 + struct io_tracker tracker; 478 479 struct work_struct commit_ws; 480 struct batcher committer; ··· 901 902 static bool accountable_bio(struct cache *cache, struct bio *bio) 903 { 904 + return bio_op(bio) != REQ_OP_DISCARD; 905 } 906 907 static void accounted_begin(struct cache *cache, struct bio *bio) ··· 912 913 if (accountable_bio(cache, bio)) { 914 pb->len = bio_sectors(bio); 915 + iot_io_begin(&cache->tracker, pb->len); 916 } 917 } 918 ··· 921 size_t pb_data_size = get_per_bio_data_size(cache); 922 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 923 924 + iot_io_end(&cache->tracker, pb->len); 925 } 926 927 static void accounted_request(struct cache *cache, struct bio *bio) ··· 1716 1717 enum busy { 1718 IDLE, 1719 BUSY 1720 }; 1721 1722 static enum busy spare_migration_bandwidth(struct cache *cache) 1723 { 1724 + bool idle = iot_idle_for(&cache->tracker, HZ); 1725 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * 1726 cache->sectors_per_block; 1727 1728 + if (idle && current_volume <= cache->migration_threshold) 1729 + return IDLE; 1730 else 1731 + return BUSY; 1732 } 1733 1734 static void inc_hit_counter(struct cache *cache, struct bio *bio) ··· 2045 2046 for (;;) { 2047 b = spare_migration_bandwidth(cache); 2048 2049 r = policy_get_background_work(cache->policy, b == IDLE, &op); 2050 if (r == -ENODATA) ··· 2717 2718 batcher_init(&cache->committer, commit_op, cache, 2719 issue_op, cache, cache->wq); 2720 + iot_init(&cache->tracker); 2721 2722 init_rwsem(&cache->background_work_lock); 2723 prevent_background_work(cache); ··· 2941 2942 cancel_delayed_work(&cache->waker); 2943 flush_workqueue(cache->wq); 2944 + WARN_ON(cache->tracker.in_flight); 2945 2946 /* 2947 * If it's a flush suspend there won't be any deferred bios, so this
+11 -8
drivers/md/dm-mpath.c
··· 447 * it has been invoked. 448 */ 449 #define dm_report_EIO(m) \ 450 - ({ \ 451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \ 452 \ 453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ ··· 455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ 456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ 457 dm_noflush_suspending((m)->ti)); \ 458 - -EIO; \ 459 - }) 460 461 /* 462 * Map cloned requests (request-based multipath) ··· 480 if (!pgpath) { 481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 482 return DM_MAPIO_DELAY_REQUEUE; 483 - return dm_report_EIO(m); /* Failed */ 484 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || 485 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 486 if (pg_init_all_paths(m)) ··· 558 if (!pgpath) { 559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 560 return DM_MAPIO_REQUEUE; 561 - return dm_report_EIO(m); 562 } 563 564 mpio->pgpath = pgpath; ··· 1494 if (atomic_read(&m->nr_valid_paths) == 0 && 1495 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1496 if (error == -EIO) 1497 - error = dm_report_EIO(m); 1498 /* complete with the original error */ 1499 r = DM_ENDIO_DONE; 1500 } ··· 1525 fail_path(mpio->pgpath); 1526 1527 if (atomic_read(&m->nr_valid_paths) == 0 && 1528 - !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1529 - return dm_report_EIO(m); 1530 1531 /* Queue for the daemon to resubmit */ 1532 dm_bio_restore(get_bio_details_from_bio(clone), clone);
··· 447 * it has been invoked. 448 */ 449 #define dm_report_EIO(m) \ 450 + do { \ 451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \ 452 \ 453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ ··· 455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ 456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ 457 dm_noflush_suspending((m)->ti)); \ 458 + } while (0) 459 460 /* 461 * Map cloned requests (request-based multipath) ··· 481 if (!pgpath) { 482 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 483 return DM_MAPIO_DELAY_REQUEUE; 484 + dm_report_EIO(m); /* Failed */ 485 + return DM_MAPIO_KILL; 486 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || 487 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 488 if (pg_init_all_paths(m)) ··· 558 if (!pgpath) { 559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 560 return DM_MAPIO_REQUEUE; 561 + dm_report_EIO(m); 562 + return -EIO; 563 } 564 565 mpio->pgpath = pgpath; ··· 1493 if (atomic_read(&m->nr_valid_paths) == 0 && 1494 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1495 if (error == -EIO) 1496 + dm_report_EIO(m); 1497 /* complete with the original error */ 1498 r = DM_ENDIO_DONE; 1499 } ··· 1524 fail_path(mpio->pgpath); 1525 1526 if (atomic_read(&m->nr_valid_paths) == 0 && 1527 + !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1528 + dm_report_EIO(m); 1529 + return -EIO; 1530 + } 1531 1532 /* Queue for the daemon to resubmit */ 1533 dm_bio_restore(get_bio_details_from_bio(clone), clone);
+1
drivers/md/dm-rq.c
··· 507 case DM_MAPIO_KILL: 508 /* The target wants to complete the I/O */ 509 dm_kill_unmapped_request(rq, -EIO); 510 default: 511 DMWARN("unimplemented target map return value: %d", r); 512 BUG();
··· 507 case DM_MAPIO_KILL: 508 /* The target wants to complete the I/O */ 509 dm_kill_unmapped_request(rq, -EIO); 510 + break; 511 default: 512 DMWARN("unimplemented target map return value: %d", r); 513 BUG();
+2 -2
drivers/md/dm-thin-metadata.c
··· 484 if (r < 0) 485 return r; 486 487 - r = save_sm_roots(pmd); 488 if (r < 0) 489 return r; 490 491 - r = dm_tm_pre_commit(pmd->tm); 492 if (r < 0) 493 return r; 494
··· 484 if (r < 0) 485 return r; 486 487 + r = dm_tm_pre_commit(pmd->tm); 488 if (r < 0) 489 return r; 490 491 + r = save_sm_roots(pmd); 492 if (r < 0) 493 return r; 494
+8 -12
drivers/md/md.c
··· 8022 * may proceed without blocking. It is important to call this before 8023 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8024 * Must be called with mddev_lock held. 8025 - * 8026 - * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock 8027 - * is dropped, so return -EAGAIN after notifying userspace. 8028 */ 8029 - int md_allow_write(struct mddev *mddev) 8030 { 8031 if (!mddev->pers) 8032 - return 0; 8033 if (mddev->ro) 8034 - return 0; 8035 if (!mddev->pers->sync_request) 8036 - return 0; 8037 8038 spin_lock(&mddev->lock); 8039 if (mddev->in_sync) { ··· 8043 spin_unlock(&mddev->lock); 8044 md_update_sb(mddev, 0); 8045 sysfs_notify_dirent_safe(mddev->sysfs_state); 8046 } else 8047 spin_unlock(&mddev->lock); 8048 - 8049 - if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 8050 - return -EAGAIN; 8051 - else 8052 - return 0; 8053 } 8054 EXPORT_SYMBOL_GPL(md_allow_write); 8055
··· 8022 * may proceed without blocking. It is important to call this before 8023 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8024 * Must be called with mddev_lock held. 8025 */ 8026 + void md_allow_write(struct mddev *mddev) 8027 { 8028 if (!mddev->pers) 8029 + return; 8030 if (mddev->ro) 8031 + return; 8032 if (!mddev->pers->sync_request) 8033 + return; 8034 8035 spin_lock(&mddev->lock); 8036 if (mddev->in_sync) { ··· 8046 spin_unlock(&mddev->lock); 8047 md_update_sb(mddev, 0); 8048 sysfs_notify_dirent_safe(mddev->sysfs_state); 8049 + /* wait for the dirty state to be recorded in the metadata */ 8050 + wait_event(mddev->sb_wait, 8051 + !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) && 8052 + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8053 } else 8054 spin_unlock(&mddev->lock); 8055 } 8056 EXPORT_SYMBOL_GPL(md_allow_write); 8057
+1 -1
drivers/md/md.h
··· 665 bool metadata_op); 666 extern void md_do_sync(struct md_thread *thread); 667 extern void md_new_event(struct mddev *mddev); 668 - extern int md_allow_write(struct mddev *mddev); 669 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 670 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 671 extern int md_check_no_bitmap(struct mddev *mddev);
··· 665 bool metadata_op); 666 extern void md_do_sync(struct md_thread *thread); 667 extern void md_new_event(struct mddev *mddev); 668 + extern void md_allow_write(struct mddev *mddev); 669 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 670 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 671 extern int md_check_no_bitmap(struct mddev *mddev);
+14 -1
drivers/md/persistent-data/dm-space-map-disk.c
··· 142 143 static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) 144 { 145 enum allocation_event ev; 146 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); 147 148 - return sm_ll_dec(&smd->ll, b, &ev); 149 } 150 151 static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
··· 142 143 static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) 144 { 145 + int r; 146 + uint32_t old_count; 147 enum allocation_event ev; 148 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); 149 150 + r = sm_ll_dec(&smd->ll, b, &ev); 151 + if (!r && (ev == SM_FREE)) { 152 + /* 153 + * It's only free if it's also free in the last 154 + * transaction. 155 + */ 156 + r = sm_ll_lookup(&smd->old_ll, b, &old_count); 157 + if (!r && !old_count) 158 + smd->nr_allocated_this_transaction--; 159 + } 160 + 161 + return r; 162 } 163 164 static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
+102 -14
drivers/md/raid0.c
··· 385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 387 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); 388 - blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); 389 390 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); 391 blk_queue_io_opt(mddev->queue, ··· 459 } 460 } 461 462 static void raid0_make_request(struct mddev *mddev, struct bio *bio) 463 { 464 struct strip_zone *zone; ··· 559 560 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 561 md_flush_request(mddev, bio); 562 return; 563 } 564 ··· 592 bio->bi_iter.bi_sector = sector + zone->dev_start + 593 tmp_dev->data_offset; 594 595 - if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 596 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { 597 - /* Just ignore it */ 598 - bio_endio(bio); 599 - } else { 600 - if (mddev->gendisk) 601 - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 602 - bio, disk_devt(mddev->gendisk), 603 - bio_sector); 604 - mddev_check_writesame(mddev, bio); 605 - mddev_check_write_zeroes(mddev, bio); 606 - generic_make_request(bio); 607 - } 608 } 609 610 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
··· 385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 387 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); 388 + blk_queue_max_discard_sectors(mddev->queue, UINT_MAX); 389 390 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); 391 blk_queue_io_opt(mddev->queue, ··· 459 } 460 } 461 462 + static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) 463 + { 464 + struct r0conf *conf = mddev->private; 465 + struct strip_zone *zone; 466 + sector_t start = bio->bi_iter.bi_sector; 467 + sector_t end; 468 + unsigned int stripe_size; 469 + sector_t first_stripe_index, last_stripe_index; 470 + sector_t start_disk_offset; 471 + unsigned int start_disk_index; 472 + sector_t end_disk_offset; 473 + unsigned int end_disk_index; 474 + unsigned int disk; 475 + 476 + zone = find_zone(conf, &start); 477 + 478 + if (bio_end_sector(bio) > zone->zone_end) { 479 + struct bio *split = bio_split(bio, 480 + zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, 481 + mddev->bio_set); 482 + bio_chain(split, bio); 483 + generic_make_request(bio); 484 + bio = split; 485 + end = zone->zone_end; 486 + } else 487 + end = bio_end_sector(bio); 488 + 489 + if (zone != conf->strip_zone) 490 + end = end - zone[-1].zone_end; 491 + 492 + /* Now start and end is the offset in zone */ 493 + stripe_size = zone->nb_dev * mddev->chunk_sectors; 494 + 495 + first_stripe_index = start; 496 + sector_div(first_stripe_index, stripe_size); 497 + last_stripe_index = end; 498 + sector_div(last_stripe_index, stripe_size); 499 + 500 + start_disk_index = (int)(start - first_stripe_index * stripe_size) / 501 + mddev->chunk_sectors; 502 + start_disk_offset = ((int)(start - first_stripe_index * stripe_size) % 503 + mddev->chunk_sectors) + 504 + first_stripe_index * mddev->chunk_sectors; 505 + end_disk_index = (int)(end - last_stripe_index * stripe_size) / 506 + mddev->chunk_sectors; 507 + end_disk_offset = ((int)(end - last_stripe_index * stripe_size) % 508 + mddev->chunk_sectors) + 509 + last_stripe_index * mddev->chunk_sectors; 510 + 511 + for (disk = 0; disk < zone->nb_dev; disk++) { 512 + sector_t dev_start, dev_end; 513 + struct bio *discard_bio = NULL; 514 + struct md_rdev *rdev; 515 + 516 + if (disk < start_disk_index) 517 + dev_start = (first_stripe_index + 1) * 518 + mddev->chunk_sectors; 519 + else if (disk > start_disk_index) 520 + dev_start = first_stripe_index * mddev->chunk_sectors; 521 + else 522 + dev_start = start_disk_offset; 523 + 524 + if (disk < end_disk_index) 525 + dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; 526 + else if (disk > end_disk_index) 527 + dev_end = last_stripe_index * mddev->chunk_sectors; 528 + else 529 + dev_end = end_disk_offset; 530 + 531 + if (dev_end <= dev_start) 532 + continue; 533 + 534 + rdev = conf->devlist[(zone - conf->strip_zone) * 535 + conf->strip_zone[0].nb_dev + disk]; 536 + if (__blkdev_issue_discard(rdev->bdev, 537 + dev_start + zone->dev_start + rdev->data_offset, 538 + dev_end - dev_start, GFP_NOIO, 0, &discard_bio) || 539 + !discard_bio) 540 + continue; 541 + bio_chain(discard_bio, bio); 542 + if (mddev->gendisk) 543 + trace_block_bio_remap(bdev_get_queue(rdev->bdev), 544 + discard_bio, disk_devt(mddev->gendisk), 545 + bio->bi_iter.bi_sector); 546 + generic_make_request(discard_bio); 547 + } 548 + bio_endio(bio); 549 + } 550 + 551 static void raid0_make_request(struct mddev *mddev, struct bio *bio) 552 { 553 struct strip_zone *zone; ··· 470 471 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 472 md_flush_request(mddev, bio); 473 + return; 474 + } 475 + 476 + if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { 477 + raid0_handle_discard(mddev, bio); 478 return; 479 } 480 ··· 498 bio->bi_iter.bi_sector = sector + zone->dev_start + 499 tmp_dev->data_offset; 500 501 + if (mddev->gendisk) 502 + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 503 + bio, disk_devt(mddev->gendisk), 504 + bio_sector); 505 + mddev_check_writesame(mddev, bio); 506 + mddev_check_write_zeroes(mddev, bio); 507 + generic_make_request(bio); 508 } 509 510 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
+10 -11
drivers/md/raid1.c
··· 666 break; 667 } 668 continue; 669 - } else 670 best_good_sectors = sectors; 671 672 if (best_disk >= 0) 673 /* At least two disks to choose from so failfast is OK */ ··· 1532 plug = container_of(cb, struct raid1_plug_cb, cb); 1533 else 1534 plug = NULL; 1535 - spin_lock_irqsave(&conf->device_lock, flags); 1536 if (plug) { 1537 bio_list_add(&plug->pending, mbio); 1538 plug->pending_cnt++; 1539 } else { 1540 bio_list_add(&conf->pending_bio_list, mbio); 1541 conf->pending_count++; 1542 - } 1543 - spin_unlock_irqrestore(&conf->device_lock, flags); 1544 - if (!plug) 1545 md_wakeup_thread(mddev->thread); 1546 } 1547 1548 r1_bio_write_done(r1_bio); ··· 3199 struct r1conf *conf = mddev->private; 3200 int cnt, raid_disks; 3201 unsigned long flags; 3202 - int d, d2, err; 3203 3204 /* Cannot change chunk_size, layout, or level */ 3205 if (mddev->chunk_sectors != mddev->new_chunk_sectors || ··· 3211 return -EINVAL; 3212 } 3213 3214 - if (!mddev_is_clustered(mddev)) { 3215 - err = md_allow_write(mddev); 3216 - if (err) 3217 - return err; 3218 - } 3219 3220 raid_disks = mddev->raid_disks + mddev->delta_disks; 3221
··· 666 break; 667 } 668 continue; 669 + } else { 670 + if ((sectors > best_good_sectors) && (best_disk >= 0)) 671 + best_disk = -1; 672 best_good_sectors = sectors; 673 + } 674 675 if (best_disk >= 0) 676 /* At least two disks to choose from so failfast is OK */ ··· 1529 plug = container_of(cb, struct raid1_plug_cb, cb); 1530 else 1531 plug = NULL; 1532 if (plug) { 1533 bio_list_add(&plug->pending, mbio); 1534 plug->pending_cnt++; 1535 } else { 1536 + spin_lock_irqsave(&conf->device_lock, flags); 1537 bio_list_add(&conf->pending_bio_list, mbio); 1538 conf->pending_count++; 1539 + spin_unlock_irqrestore(&conf->device_lock, flags); 1540 md_wakeup_thread(mddev->thread); 1541 + } 1542 } 1543 1544 r1_bio_write_done(r1_bio); ··· 3197 struct r1conf *conf = mddev->private; 3198 int cnt, raid_disks; 3199 unsigned long flags; 3200 + int d, d2; 3201 3202 /* Cannot change chunk_size, layout, or level */ 3203 if (mddev->chunk_sectors != mddev->new_chunk_sectors || ··· 3209 return -EINVAL; 3210 } 3211 3212 + if (!mddev_is_clustered(mddev)) 3213 + md_allow_write(mddev); 3214 3215 raid_disks = mddev->raid_disks + mddev->delta_disks; 3216
+3 -4
drivers/md/raid10.c
··· 1282 plug = container_of(cb, struct raid10_plug_cb, cb); 1283 else 1284 plug = NULL; 1285 - spin_lock_irqsave(&conf->device_lock, flags); 1286 if (plug) { 1287 bio_list_add(&plug->pending, mbio); 1288 plug->pending_cnt++; 1289 } else { 1290 bio_list_add(&conf->pending_bio_list, mbio); 1291 conf->pending_count++; 1292 - } 1293 - spin_unlock_irqrestore(&conf->device_lock, flags); 1294 - if (!plug) 1295 md_wakeup_thread(mddev->thread); 1296 } 1297 1298 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
··· 1282 plug = container_of(cb, struct raid10_plug_cb, cb); 1283 else 1284 plug = NULL; 1285 if (plug) { 1286 bio_list_add(&plug->pending, mbio); 1287 plug->pending_cnt++; 1288 } else { 1289 + spin_lock_irqsave(&conf->device_lock, flags); 1290 bio_list_add(&conf->pending_bio_list, mbio); 1291 conf->pending_count++; 1292 + spin_unlock_irqrestore(&conf->device_lock, flags); 1293 md_wakeup_thread(mddev->thread); 1294 + } 1295 } 1296 1297 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+35 -12
drivers/md/raid5-cache.c
··· 24 #include "md.h" 25 #include "raid5.h" 26 #include "bitmap.h" 27 28 /* 29 * metadata/data stored in disk with 4k size unit (a block) regardless ··· 623 __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 624 spin_unlock_irqrestore(&log->io_list_lock, flags); 625 626 if (io->has_flush) 627 io->current_bio->bi_opf |= REQ_PREFLUSH; 628 if (io->has_fua) 629 io->current_bio->bi_opf |= REQ_FUA; 630 submit_bio(io->current_bio); 631 - 632 - if (!io->split_bio) 633 - return; 634 - 635 - if (io->has_flush) 636 - io->split_bio->bi_opf |= REQ_PREFLUSH; 637 - if (io->has_fua) 638 - io->split_bio->bi_opf |= REQ_FUA; 639 - submit_bio(io->split_bio); 640 } 641 642 /* deferred io_unit will be dispatched here */ ··· 681 return; 682 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", 683 mdname(mddev)); 684 mddev_suspend(mddev); 685 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 686 mddev_resume(mddev); ··· 2637 * When run in degraded mode, array is set to write-through mode. 2638 * This check helps drain pending write safely in the transition to 2639 * write-through mode. 2640 */ 2641 - if (s->failed) { 2642 r5c_make_stripe_write_out(sh); 2643 return -EAGAIN; 2644 } ··· 2844 } 2845 2846 r5l_append_flush_payload(log, sh->sector); 2847 } 2848 2849 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) ··· 2995 return ret; 2996 } 2997 2998 - void r5c_update_on_rdev_error(struct mddev *mddev) 2999 { 3000 struct r5conf *conf = mddev->private; 3001 struct r5l_log *log = conf->log; ··· 3003 if (!log) 3004 return; 3005 3006 - if (raid5_calc_degraded(conf) > 0 && 3007 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) 3008 schedule_work(&log->disable_writeback_work); 3009 }
··· 24 #include "md.h" 25 #include "raid5.h" 26 #include "bitmap.h" 27 + #include "raid5-log.h" 28 29 /* 30 * metadata/data stored in disk with 4k size unit (a block) regardless ··· 622 __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 623 spin_unlock_irqrestore(&log->io_list_lock, flags); 624 625 + /* 626 + * In case of journal device failures, submit_bio will get error 627 + * and calls endio, then active stripes will continue write 628 + * process. Therefore, it is not necessary to check Faulty bit 629 + * of journal device here. 630 + * 631 + * We can't check split_bio after current_bio is submitted. If 632 + * io->split_bio is null, after current_bio is submitted, current_bio 633 + * might already be completed and the io_unit is freed. We submit 634 + * split_bio first to avoid the issue. 635 + */ 636 + if (io->split_bio) { 637 + if (io->has_flush) 638 + io->split_bio->bi_opf |= REQ_PREFLUSH; 639 + if (io->has_fua) 640 + io->split_bio->bi_opf |= REQ_FUA; 641 + submit_bio(io->split_bio); 642 + } 643 + 644 if (io->has_flush) 645 io->current_bio->bi_opf |= REQ_PREFLUSH; 646 if (io->has_fua) 647 io->current_bio->bi_opf |= REQ_FUA; 648 submit_bio(io->current_bio); 649 } 650 651 /* deferred io_unit will be dispatched here */ ··· 670 return; 671 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", 672 mdname(mddev)); 673 + 674 + /* wait superblock change before suspend */ 675 + wait_event(mddev->sb_wait, 676 + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 677 + 678 mddev_suspend(mddev); 679 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 680 mddev_resume(mddev); ··· 2621 * When run in degraded mode, array is set to write-through mode. 2622 * This check helps drain pending write safely in the transition to 2623 * write-through mode. 2624 + * 2625 + * When a stripe is syncing, the write is also handled in write 2626 + * through mode. 2627 */ 2628 + if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) { 2629 r5c_make_stripe_write_out(sh); 2630 return -EAGAIN; 2631 } ··· 2825 } 2826 2827 r5l_append_flush_payload(log, sh->sector); 2828 + /* stripe is flused to raid disks, we can do resync now */ 2829 + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 2830 + set_bit(STRIPE_HANDLE, &sh->state); 2831 } 2832 2833 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) ··· 2973 return ret; 2974 } 2975 2976 + void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) 2977 { 2978 struct r5conf *conf = mddev->private; 2979 struct r5l_log *log = conf->log; ··· 2981 if (!log) 2982 return; 2983 2984 + if ((raid5_calc_degraded(conf) > 0 || 2985 + test_bit(Journal, &rdev->flags)) && 2986 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) 2987 schedule_work(&log->disable_writeback_work); 2988 }
+2 -1
drivers/md/raid5-log.h
··· 28 extern void r5c_check_stripe_cache_usage(struct r5conf *conf); 29 extern void r5c_check_cached_full_stripe(struct r5conf *conf); 30 extern struct md_sysfs_entry r5c_journal_mode; 31 - extern void r5c_update_on_rdev_error(struct mddev *mddev); 32 extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); 33 34 extern struct dma_async_tx_descriptor *
··· 28 extern void r5c_check_stripe_cache_usage(struct r5conf *conf); 29 extern void r5c_check_cached_full_stripe(struct r5conf *conf); 30 extern struct md_sysfs_entry r5c_journal_mode; 31 + extern void r5c_update_on_rdev_error(struct mddev *mddev, 32 + struct md_rdev *rdev); 33 extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); 34 35 extern struct dma_async_tx_descriptor *
+48 -31
drivers/md/raid5.c
··· 103 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 104 { 105 int i; 106 - local_irq_disable(); 107 - spin_lock(conf->hash_locks); 108 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 109 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 110 spin_lock(&conf->device_lock); ··· 113 { 114 int i; 115 spin_unlock(&conf->device_lock); 116 - for (i = NR_STRIPE_HASH_LOCKS; i; i--) 117 - spin_unlock(conf->hash_locks + i - 1); 118 - local_irq_enable(); 119 } 120 121 /* Find first data disk in a raid6 stripe */ ··· 233 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 234 injournal++; 235 /* 236 - * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with 237 - * data in journal, so they are not released to cached lists 238 */ 239 - if (conf->quiesce && r5c_is_writeback(conf->log) && 240 - !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { 241 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 242 r5c_make_stripe_write_out(sh); 243 set_bit(STRIPE_HANDLE, &sh->state); ··· 717 718 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 719 { 720 - local_irq_disable(); 721 if (sh1 > sh2) { 722 - spin_lock(&sh2->stripe_lock); 723 spin_lock_nested(&sh1->stripe_lock, 1); 724 } else { 725 - spin_lock(&sh1->stripe_lock); 726 spin_lock_nested(&sh2->stripe_lock, 1); 727 } 728 } ··· 729 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 730 { 731 spin_unlock(&sh1->stripe_lock); 732 - spin_unlock(&sh2->stripe_lock); 733 - local_irq_enable(); 734 } 735 736 /* Only freshly new full stripe normal write stripe can be added to a batch list */ ··· 2313 struct stripe_head *osh, *nsh; 2314 LIST_HEAD(newstripes); 2315 struct disk_info *ndisks; 2316 - int err; 2317 struct kmem_cache *sc; 2318 int i; 2319 int hash, cnt; 2320 2321 - err = md_allow_write(conf->mddev); 2322 - if (err) 2323 - return err; 2324 2325 /* Step 1 */ 2326 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], ··· 2693 bdevname(rdev->bdev, b), 2694 mdname(mddev), 2695 conf->raid_disks - mddev->degraded); 2696 - r5c_update_on_rdev_error(mddev); 2697 } 2698 2699 /* ··· 3054 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 3055 * no_space_stripes list. 3056 * 3057 */ 3058 static inline bool delay_towrite(struct r5conf *conf, 3059 struct r5dev *dev, ··· 3071 /* case 2 above */ 3072 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 3073 s->injournal > 0) 3074 return true; 3075 return false; 3076 } ··· 4660 4661 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4662 spin_lock(&sh->stripe_lock); 4663 - /* Cannot process 'sync' concurrently with 'discard' */ 4664 - if (!test_bit(STRIPE_DISCARD, &sh->state) && 4665 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4666 set_bit(STRIPE_SYNCING, &sh->state); 4667 clear_bit(STRIPE_INSYNC, &sh->state); ··· 4713 " to_write=%d failed=%d failed_num=%d,%d\n", 4714 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4715 s.failed_num[0], s.failed_num[1]); 4716 - /* check if the array has lost more than max_degraded devices and, 4717 * if so, some requests might need to be failed. 4718 */ 4719 - if (s.failed > conf->max_degraded || s.log_failed) { 4720 sh->check_state = 0; 4721 sh->reconstruct_state = 0; 4722 break_stripe_batch_list(sh, 0); ··· 5294 struct stripe_head *sh, *tmp; 5295 struct list_head *handle_list = NULL; 5296 struct r5worker_group *wg; 5297 - bool second_try = !r5c_is_writeback(conf->log); 5298 - bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state); 5299 5300 again: 5301 wg = NULL; ··· 6332 raid5_set_cache_size(struct mddev *mddev, int size) 6333 { 6334 struct r5conf *conf = mddev->private; 6335 - int err; 6336 6337 if (size <= 16 || size > 32768) 6338 return -EINVAL; ··· 6343 ; 6344 mutex_unlock(&conf->cache_size_mutex); 6345 6346 - 6347 - err = md_allow_write(mddev); 6348 - if (err) 6349 - return err; 6350 6351 mutex_lock(&conf->cache_size_mutex); 6352 while (size > conf->max_nr_stripes) ··· 7545 * neilb: there is no locking about new writes here, 7546 * so this cannot be safe. 7547 */ 7548 - if (atomic_read(&conf->active_stripes)) { 7549 return -EBUSY; 7550 } 7551 log_exit(conf);
··· 103 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 104 { 105 int i; 106 + spin_lock_irq(conf->hash_locks); 107 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 108 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 109 spin_lock(&conf->device_lock); ··· 114 { 115 int i; 116 spin_unlock(&conf->device_lock); 117 + for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) 118 + spin_unlock(conf->hash_locks + i); 119 + spin_unlock_irq(conf->hash_locks); 120 } 121 122 /* Find first data disk in a raid6 stripe */ ··· 234 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 235 injournal++; 236 /* 237 + * In the following cases, the stripe cannot be released to cached 238 + * lists. Therefore, we make the stripe write out and set 239 + * STRIPE_HANDLE: 240 + * 1. when quiesce in r5c write back; 241 + * 2. when resync is requested fot the stripe. 242 */ 243 + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || 244 + (conf->quiesce && r5c_is_writeback(conf->log) && 245 + !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { 246 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 247 r5c_make_stripe_write_out(sh); 248 set_bit(STRIPE_HANDLE, &sh->state); ··· 714 715 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 716 { 717 if (sh1 > sh2) { 718 + spin_lock_irq(&sh2->stripe_lock); 719 spin_lock_nested(&sh1->stripe_lock, 1); 720 } else { 721 + spin_lock_irq(&sh1->stripe_lock); 722 spin_lock_nested(&sh2->stripe_lock, 1); 723 } 724 } ··· 727 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 728 { 729 spin_unlock(&sh1->stripe_lock); 730 + spin_unlock_irq(&sh2->stripe_lock); 731 } 732 733 /* Only freshly new full stripe normal write stripe can be added to a batch list */ ··· 2312 struct stripe_head *osh, *nsh; 2313 LIST_HEAD(newstripes); 2314 struct disk_info *ndisks; 2315 + int err = 0; 2316 struct kmem_cache *sc; 2317 int i; 2318 int hash, cnt; 2319 2320 + md_allow_write(conf->mddev); 2321 2322 /* Step 1 */ 2323 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], ··· 2694 bdevname(rdev->bdev, b), 2695 mdname(mddev), 2696 conf->raid_disks - mddev->degraded); 2697 + r5c_update_on_rdev_error(mddev, rdev); 2698 } 2699 2700 /* ··· 3055 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 3056 * no_space_stripes list. 3057 * 3058 + * 3. during journal failure 3059 + * In journal failure, we try to flush all cached data to raid disks 3060 + * based on data in stripe cache. The array is read-only to upper 3061 + * layers, so we would skip all pending writes. 3062 + * 3063 */ 3064 static inline bool delay_towrite(struct r5conf *conf, 3065 struct r5dev *dev, ··· 3067 /* case 2 above */ 3068 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 3069 s->injournal > 0) 3070 + return true; 3071 + /* case 3 above */ 3072 + if (s->log_failed && s->injournal) 3073 return true; 3074 return false; 3075 } ··· 4653 4654 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4655 spin_lock(&sh->stripe_lock); 4656 + /* 4657 + * Cannot process 'sync' concurrently with 'discard'. 4658 + * Flush data in r5cache before 'sync'. 4659 + */ 4660 + if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && 4661 + !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && 4662 + !test_bit(STRIPE_DISCARD, &sh->state) && 4663 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4664 set_bit(STRIPE_SYNCING, &sh->state); 4665 clear_bit(STRIPE_INSYNC, &sh->state); ··· 4701 " to_write=%d failed=%d failed_num=%d,%d\n", 4702 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4703 s.failed_num[0], s.failed_num[1]); 4704 + /* 4705 + * check if the array has lost more than max_degraded devices and, 4706 * if so, some requests might need to be failed. 4707 + * 4708 + * When journal device failed (log_failed), we will only process 4709 + * the stripe if there is data need write to raid disks 4710 */ 4711 + if (s.failed > conf->max_degraded || 4712 + (s.log_failed && s.injournal == 0)) { 4713 sh->check_state = 0; 4714 sh->reconstruct_state = 0; 4715 break_stripe_batch_list(sh, 0); ··· 5277 struct stripe_head *sh, *tmp; 5278 struct list_head *handle_list = NULL; 5279 struct r5worker_group *wg; 5280 + bool second_try = !r5c_is_writeback(conf->log) && 5281 + !r5l_log_disk_error(conf); 5282 + bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || 5283 + r5l_log_disk_error(conf); 5284 5285 again: 5286 wg = NULL; ··· 6313 raid5_set_cache_size(struct mddev *mddev, int size) 6314 { 6315 struct r5conf *conf = mddev->private; 6316 6317 if (size <= 16 || size > 32768) 6318 return -EINVAL; ··· 6325 ; 6326 mutex_unlock(&conf->cache_size_mutex); 6327 6328 + md_allow_write(mddev); 6329 6330 mutex_lock(&conf->cache_size_mutex); 6331 while (size > conf->max_nr_stripes) ··· 7530 * neilb: there is no locking about new writes here, 7531 * so this cannot be safe. 7532 */ 7533 + if (atomic_read(&conf->active_stripes) || 7534 + atomic_read(&conf->r5c_cached_full_stripes) || 7535 + atomic_read(&conf->r5c_cached_partial_stripes)) { 7536 return -EBUSY; 7537 } 7538 log_exit(conf);
+1
drivers/misc/Kconfig
··· 492 493 config PCI_ENDPOINT_TEST 494 depends on PCI 495 tristate "PCI Endpoint Test driver" 496 ---help--- 497 Enable this configuration option to enable the host side test driver
··· 492 493 config PCI_ENDPOINT_TEST 494 depends on PCI 495 + select CRC32 496 tristate "PCI Endpoint Test driver" 497 ---help--- 498 Enable this configuration option to enable the host side test driver
+3
drivers/net/dsa/mv88e6xxx/chip.c
··· 849 mv88e6xxx_g1_stats_read(chip, reg, &low); 850 if (s->sizeof_stat == 8) 851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high); 852 } 853 value = (((u64)high) << 16) | low; 854 return value;
··· 849 mv88e6xxx_g1_stats_read(chip, reg, &low); 850 if (s->sizeof_stat == 8) 851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high); 852 + break; 853 + default: 854 + return UINT64_MAX; 855 } 856 value = (((u64)high) << 16) | low; 857 return value;
+1 -12
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
··· 200 static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, 201 struct aq_nic_cfg_s *aq_nic_cfg) 202 { 203 - int err = 0; 204 - 205 /* TX checksums offloads*/ 206 tpo_ipv4header_crc_offload_en_set(self, 1); 207 tpo_tcp_udp_crc_offload_en_set(self, 1); 208 - if (err < 0) 209 - goto err_exit; 210 211 /* RX checksums offloads*/ 212 rpo_ipv4header_crc_offload_en_set(self, 1); 213 rpo_tcp_udp_crc_offload_en_set(self, 1); 214 - if (err < 0) 215 - goto err_exit; 216 217 /* LSO offloads*/ 218 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 219 - if (err < 0) 220 - goto err_exit; 221 222 - err = aq_hw_err_from_flags(self); 223 - 224 - err_exit: 225 - return err; 226 } 227 228 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
··· 200 static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, 201 struct aq_nic_cfg_s *aq_nic_cfg) 202 { 203 /* TX checksums offloads*/ 204 tpo_ipv4header_crc_offload_en_set(self, 1); 205 tpo_tcp_udp_crc_offload_en_set(self, 1); 206 207 /* RX checksums offloads*/ 208 rpo_ipv4header_crc_offload_en_set(self, 1); 209 rpo_tcp_udp_crc_offload_en_set(self, 1); 210 211 /* LSO offloads*/ 212 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 213 214 + return aq_hw_err_from_flags(self); 215 } 216 217 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
+1 -11
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
··· 200 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, 201 struct aq_nic_cfg_s *aq_nic_cfg) 202 { 203 - int err = 0; 204 unsigned int i; 205 206 /* TX checksums offloads*/ 207 tpo_ipv4header_crc_offload_en_set(self, 1); 208 tpo_tcp_udp_crc_offload_en_set(self, 1); 209 - if (err < 0) 210 - goto err_exit; 211 212 /* RX checksums offloads*/ 213 rpo_ipv4header_crc_offload_en_set(self, 1); 214 rpo_tcp_udp_crc_offload_en_set(self, 1); 215 - if (err < 0) 216 - goto err_exit; 217 218 /* LSO offloads*/ 219 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 220 - if (err < 0) 221 - goto err_exit; 222 223 /* LRO offloads */ 224 { ··· 238 239 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 240 } 241 - err = aq_hw_err_from_flags(self); 242 - 243 - err_exit: 244 - return err; 245 } 246 247 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
··· 200 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, 201 struct aq_nic_cfg_s *aq_nic_cfg) 202 { 203 unsigned int i; 204 205 /* TX checksums offloads*/ 206 tpo_ipv4header_crc_offload_en_set(self, 1); 207 tpo_tcp_udp_crc_offload_en_set(self, 1); 208 209 /* RX checksums offloads*/ 210 rpo_ipv4header_crc_offload_en_set(self, 1); 211 rpo_tcp_udp_crc_offload_en_set(self, 1); 212 213 /* LSO offloads*/ 214 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 215 216 /* LRO offloads */ 217 { ··· 245 246 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 247 } 248 + return aq_hw_err_from_flags(self); 249 } 250 251 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
+1 -2
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 7630 dev->min_mtu = ETH_ZLEN; 7631 dev->max_mtu = BNXT_MAX_MTU; 7632 7633 - bnxt_dcb_init(bp); 7634 - 7635 #ifdef CONFIG_BNXT_SRIOV 7636 init_waitqueue_head(&bp->sriov_cfg_wait); 7637 #endif ··· 7667 bnxt_hwrm_func_qcfg(bp); 7668 bnxt_hwrm_port_led_qcaps(bp); 7669 bnxt_ethtool_init(bp); 7670 7671 bnxt_set_rx_skb_mode(bp, false); 7672 bnxt_set_tpa_flags(bp);
··· 7630 dev->min_mtu = ETH_ZLEN; 7631 dev->max_mtu = BNXT_MAX_MTU; 7632 7633 #ifdef CONFIG_BNXT_SRIOV 7634 init_waitqueue_head(&bp->sriov_cfg_wait); 7635 #endif ··· 7669 bnxt_hwrm_func_qcfg(bp); 7670 bnxt_hwrm_port_led_qcaps(bp); 7671 bnxt_ethtool_init(bp); 7672 + bnxt_dcb_init(bp); 7673 7674 bnxt_set_rx_skb_mode(bp, false); 7675 bnxt_set_tpa_flags(bp);
+4 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
··· 553 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) 554 return 1; 555 556 - if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp)) 557 - return 1; 558 559 if (mode == bp->dcbx_cap) 560 return 0;
··· 553 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) 554 return 1; 555 556 + if (mode & DCB_CAP_DCBX_HOST) { 557 + if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) 558 + return 1; 559 + } 560 561 if (mode == bp->dcbx_cap) 562 return 0;
+3 -3
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
··· 37 38 #define T4FW_VERSION_MAJOR 0x01 39 #define T4FW_VERSION_MINOR 0x10 40 - #define T4FW_VERSION_MICRO 0x21 41 #define T4FW_VERSION_BUILD 0x00 42 43 #define T4FW_MIN_VERSION_MAJOR 0x01 ··· 46 47 #define T5FW_VERSION_MAJOR 0x01 48 #define T5FW_VERSION_MINOR 0x10 49 - #define T5FW_VERSION_MICRO 0x21 50 #define T5FW_VERSION_BUILD 0x00 51 52 #define T5FW_MIN_VERSION_MAJOR 0x00 ··· 55 56 #define T6FW_VERSION_MAJOR 0x01 57 #define T6FW_VERSION_MINOR 0x10 58 - #define T6FW_VERSION_MICRO 0x21 59 #define T6FW_VERSION_BUILD 0x00 60 61 #define T6FW_MIN_VERSION_MAJOR 0x00
··· 37 38 #define T4FW_VERSION_MAJOR 0x01 39 #define T4FW_VERSION_MINOR 0x10 40 + #define T4FW_VERSION_MICRO 0x2B 41 #define T4FW_VERSION_BUILD 0x00 42 43 #define T4FW_MIN_VERSION_MAJOR 0x01 ··· 46 47 #define T5FW_VERSION_MAJOR 0x01 48 #define T5FW_VERSION_MINOR 0x10 49 + #define T5FW_VERSION_MICRO 0x2B 50 #define T5FW_VERSION_BUILD 0x00 51 52 #define T5FW_MIN_VERSION_MAJOR 0x00 ··· 55 56 #define T6FW_VERSION_MAJOR 0x01 57 #define T6FW_VERSION_MINOR 0x10 58 + #define T6FW_VERSION_MICRO 0x2B 59 #define T6FW_VERSION_BUILD 0x00 60 61 #define T6FW_MIN_VERSION_MAJOR 0x00
+7
drivers/net/ethernet/faraday/ftmac100.c
··· 1174 return 0; 1175 } 1176 1177 static struct platform_driver ftmac100_driver = { 1178 .probe = ftmac100_probe, 1179 .remove = ftmac100_remove, 1180 .driver = { 1181 .name = DRV_NAME, 1182 }, 1183 }; 1184 ··· 1208 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1209 MODULE_DESCRIPTION("FTMAC100 driver"); 1210 MODULE_LICENSE("GPL");
··· 1174 return 0; 1175 } 1176 1177 + static const struct of_device_id ftmac100_of_ids[] = { 1178 + { .compatible = "andestech,atmac100" }, 1179 + { } 1180 + }; 1181 + 1182 static struct platform_driver ftmac100_driver = { 1183 .probe = ftmac100_probe, 1184 .remove = ftmac100_remove, 1185 .driver = { 1186 .name = DRV_NAME, 1187 + .of_match_table = ftmac100_of_ids 1188 }, 1189 }; 1190 ··· 1202 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1203 MODULE_DESCRIPTION("FTMAC100 driver"); 1204 MODULE_LICENSE("GPL"); 1205 + MODULE_DEVICE_TABLE(of, ftmac100_of_ids);
+4 -6
drivers/net/ethernet/mellanox/mlx4/main.c
··· 2862 int port = 0; 2863 2864 if (msi_x) { 2865 - int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2866 - 2867 - nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2868 - nreq); 2869 - if (nreq > MAX_MSIX) 2870 - nreq = MAX_MSIX; 2871 2872 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2873 if (!entries)
··· 2862 int port = 0; 2863 2864 if (msi_x) { 2865 + int nreq = min3(dev->caps.num_ports * 2866 + (int)num_online_cpus() + 1, 2867 + dev->caps.num_eqs - dev->caps.reserved_eqs, 2868 + MAX_MSIX); 2869 2870 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2871 if (!entries)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 13 14 config MLX5_CORE_EN 15 bool "Mellanox Technologies ConnectX-4 Ethernet support" 16 - depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 17 depends on IPV6=y || IPV6=n || MLX5_CORE=m 18 imply PTP_1588_CLOCK 19 default n
··· 13 14 config MLX5_CORE_EN 15 bool "Mellanox Technologies ConnectX-4 Ethernet support" 16 + depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE 17 depends on IPV6=y || IPV6=n || MLX5_CORE=m 18 imply PTP_1588_CLOCK 19 default n
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 1003 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); 1004 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1005 1006 - int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn); 1007 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); 1008 1009 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
··· 1003 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); 1004 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1005 1006 + int mlx5e_create_ttc_table(struct mlx5e_priv *priv); 1007 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); 1008 1009 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
+6 -3
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); 795 ptys2ethtool_supported_link(supported, eth_proto_cap); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 797 - ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause); 798 } 799 800 static void get_advertising(u32 eth_proto_cap, u8 tx_pause, ··· 803 unsigned long *advertising = link_ksettings->link_modes.advertising; 804 805 ptys2ethtool_adver_link(advertising, eth_proto_cap); 806 - if (tx_pause) 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 808 if (tx_pause ^ rx_pause) 809 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); ··· 848 struct mlx5e_priv *priv = netdev_priv(netdev); 849 struct mlx5_core_dev *mdev = priv->mdev; 850 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 851 u32 eth_proto_cap; 852 u32 eth_proto_admin; 853 u32 eth_proto_lp; ··· 872 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); 873 an_status = MLX5_GET(ptys_reg, out, an_status); 874 875 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 876 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 877 878 get_supported(eth_proto_cap, link_ksettings); 879 - get_advertising(eth_proto_admin, 0, 0, link_ksettings); 880 get_speed_duplex(netdev, eth_proto_oper, link_ksettings); 881 882 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
··· 794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); 795 ptys2ethtool_supported_link(supported, eth_proto_cap); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 797 } 798 799 static void get_advertising(u32 eth_proto_cap, u8 tx_pause, ··· 804 unsigned long *advertising = link_ksettings->link_modes.advertising; 805 806 ptys2ethtool_adver_link(advertising, eth_proto_cap); 807 + if (rx_pause) 808 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 809 if (tx_pause ^ rx_pause) 810 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); ··· 849 struct mlx5e_priv *priv = netdev_priv(netdev); 850 struct mlx5_core_dev *mdev = priv->mdev; 851 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 852 + u32 rx_pause = 0; 853 + u32 tx_pause = 0; 854 u32 eth_proto_cap; 855 u32 eth_proto_admin; 856 u32 eth_proto_lp; ··· 871 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); 872 an_status = MLX5_GET(ptys_reg, out, an_status); 873 874 + mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); 875 + 876 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 877 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 878 879 get_supported(eth_proto_cap, link_ksettings); 880 + get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings); 881 get_speed_duplex(netdev, eth_proto_oper, link_ksettings); 882 883 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
··· 800 mlx5e_destroy_flow_table(&ttc->ft); 801 } 802 803 - int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn) 804 { 805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc; 806 struct mlx5_flow_table_attr ft_attr = {}; ··· 810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; 811 ft_attr.level = MLX5E_TTC_FT_LEVEL; 812 ft_attr.prio = MLX5E_NIC_PRIO; 813 - ft_attr.underlay_qpn = underlay_qpn; 814 815 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 816 if (IS_ERR(ft->t)) { ··· 1146 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 1147 } 1148 1149 - err = mlx5e_create_ttc_table(priv, 0); 1150 if (err) { 1151 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 1152 err);
··· 800 mlx5e_destroy_flow_table(&ttc->ft); 801 } 802 803 + int mlx5e_create_ttc_table(struct mlx5e_priv *priv) 804 { 805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc; 806 struct mlx5_flow_table_attr ft_attr = {}; ··· 810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; 811 ft_attr.level = MLX5E_TTC_FT_LEVEL; 812 ft_attr.prio = MLX5E_NIC_PRIO; 813 814 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 815 if (IS_ERR(ft->t)) { ··· 1147 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 1148 } 1149 1150 + err = mlx5e_create_ttc_table(priv); 1151 if (err) { 1152 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 1153 err);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 2976 new_channels.params = priv->channels.params; 2977 new_channels.params.num_tc = tc ? tc : 1; 2978 2979 - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { 2980 priv->channels.params = new_channels.params; 2981 goto out; 2982 }
··· 2976 new_channels.params = priv->channels.params; 2977 new_channels.params.num_tc = tc ? tc : 1; 2978 2979 + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 2980 priv->channels.params = new_channels.params; 2981 goto out; 2982 }
+3 -6
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 40 #include "eswitch.h" 41 42 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 43 - struct mlx5_flow_table *ft) 44 { 45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; 46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; 47 48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 49 - ft->underlay_qpn == 0) 50 return 0; 51 52 MLX5_SET(set_flow_table_root_in, in, opcode, 53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); 55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 56 if (ft->vport) { 57 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 58 MLX5_SET(set_flow_table_root_in, in, other_vport, 1); 59 } 60 - 61 - if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 62 - ft->underlay_qpn != 0) 63 - MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn); 64 65 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 66 }
··· 40 #include "eswitch.h" 41 42 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 43 + struct mlx5_flow_table *ft, u32 underlay_qpn) 44 { 45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; 46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; 47 48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 49 + underlay_qpn == 0) 50 return 0; 51 52 MLX5_SET(set_flow_table_root_in, in, opcode, 53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); 55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 56 + MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); 57 if (ft->vport) { 58 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 59 MLX5_SET(set_flow_table_root_in, in, other_vport, 1); 60 } 61 62 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 63 }
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
··· 71 unsigned int index); 72 73 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 74 - struct mlx5_flow_table *ft); 75 76 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); 77 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
··· 71 unsigned int index); 72 73 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 74 + struct mlx5_flow_table *ft, 75 + u32 underlay_qpn); 76 77 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); 78 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
+21 -4
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 650 if (ft->level >= min_level) 651 return 0; 652 653 - err = mlx5_cmd_update_root_ft(root->dev, ft); 654 if (err) 655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 656 ft->id); ··· 817 err = -ENOMEM; 818 goto unlock_root; 819 } 820 - 821 - ft->underlay_qpn = ft_attr->underlay_qpn; 822 823 tree_init_node(&ft->node, 1, del_flow_table); 824 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; ··· 1487 1488 new_root_ft = find_next_ft(ft); 1489 if (new_root_ft) { 1490 - int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); 1491 1492 if (err) { 1493 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", ··· 2061 mlx5_cleanup_fs(dev); 2062 return err; 2063 }
··· 650 if (ft->level >= min_level) 651 return 0; 652 653 + err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn); 654 if (err) 655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 656 ft->id); ··· 817 err = -ENOMEM; 818 goto unlock_root; 819 } 820 821 tree_init_node(&ft->node, 1, del_flow_table); 822 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; ··· 1489 1490 new_root_ft = find_next_ft(ft); 1491 if (new_root_ft) { 1492 + int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, 1493 + root->underlay_qpn); 1494 1495 if (err) { 1496 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", ··· 2062 mlx5_cleanup_fs(dev); 2063 return err; 2064 } 2065 + 2066 + int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) 2067 + { 2068 + struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; 2069 + 2070 + root->underlay_qpn = underlay_qpn; 2071 + return 0; 2072 + } 2073 + EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn); 2074 + 2075 + int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) 2076 + { 2077 + struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; 2078 + 2079 + root->underlay_qpn = 0; 2080 + return 0; 2081 + } 2082 + EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
··· 118 /* FWD rules that point on this flow table */ 119 struct list_head fwd_rules; 120 u32 flags; 121 - u32 underlay_qpn; 122 }; 123 124 struct mlx5_fc_cache { ··· 194 struct mlx5_flow_table *root_ft; 195 /* Should be held when chaining flow tables */ 196 struct mutex chain_lock; 197 }; 198 199 int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
··· 118 /* FWD rules that point on this flow table */ 119 struct list_head fwd_rules; 120 u32 flags; 121 }; 122 123 struct mlx5_fc_cache { ··· 195 struct mlx5_flow_table *root_ft; 196 /* Should be held when chaining flow tables */ 197 struct mutex chain_lock; 198 + u32 underlay_qpn; 199 }; 200 201 int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+9 -2
drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
··· 66 67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 68 69 mutex_init(&priv->state_lock); 70 71 netdev->hw_features |= NETIF_F_SG; ··· 160 161 static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 162 { 163 mlx5_core_destroy_qp(mdev, qp); 164 } 165 ··· 175 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); 176 return err; 177 } 178 179 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); 180 if (err) { ··· 197 198 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 199 { 200 - struct mlx5i_priv *ipriv = priv->ppriv; 201 int err; 202 203 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, ··· 212 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 213 } 214 215 - err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn); 216 if (err) { 217 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 218 err);
··· 66 67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 68 69 + /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ 70 + mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST); 71 + priv->channels.params.lro_en = false; 72 + 73 mutex_init(&priv->state_lock); 74 75 netdev->hw_features |= NETIF_F_SG; ··· 156 157 static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 158 { 159 + mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn); 160 + 161 mlx5_core_destroy_qp(mdev, qp); 162 } 163 ··· 169 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); 170 return err; 171 } 172 + 173 + mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); 174 175 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); 176 if (err) { ··· 189 190 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 191 { 192 int err; 193 194 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, ··· 205 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 206 } 207 208 + err = mlx5e_create_ttc_table(priv); 209 if (err) { 210 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 211 err);
+2 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
··· 199 200 entry->counter_valid = false; 201 entry->counter = 0; 202 if (!counters_enabled) 203 return 0; 204 205 - entry->index = mlxsw_sp_rif_index(rif); 206 err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, 207 MLXSW_SP_RIF_COUNTER_EGRESS, 208 &cnt);
··· 199 200 entry->counter_valid = false; 201 entry->counter = 0; 202 + entry->index = mlxsw_sp_rif_index(rif); 203 + 204 if (!counters_enabled) 205 return 0; 206 207 err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, 208 MLXSW_SP_RIF_COUNTER_EGRESS, 209 &cnt);
+3
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 206 { 207 unsigned int *p_counter_index; 208 209 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 210 if (WARN_ON(!p_counter_index)) 211 return;
··· 206 { 207 unsigned int *p_counter_index; 208 209 + if (!mlxsw_sp_rif_counter_valid_get(rif, dir)) 210 + return; 211 + 212 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 213 if (WARN_ON(!p_counter_index)) 214 return;
+2 -4
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1498 adding, true); 1499 if (err) { 1500 - if (net_ratelimit()) 1501 - netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1502 return; 1503 } 1504 ··· 1557 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1558 adding, true); 1559 if (err) { 1560 - if (net_ratelimit()) 1561 - netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1562 return; 1563 } 1564
··· 1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1498 adding, true); 1499 if (err) { 1500 + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 1501 return; 1502 } 1503 ··· 1558 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1559 adding, true); 1560 if (err) { 1561 + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 1562 return; 1563 } 1564
+1 -1
drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
··· 247 cmd.req.arg3 = 0; 248 249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) 250 - netxen_issue_cmd(adapter, &cmd); 251 252 if (rcode != NX_RCODE_SUCCESS) 253 return -EIO;
··· 247 cmd.req.arg3 = 0; 248 249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) 250 + rcode = netxen_issue_cmd(adapter, &cmd); 251 252 if (rcode != NX_RCODE_SUCCESS) 253 return -EIO;
+1 -1
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
··· 983 memset(&camline, 0, sizeof(union gft_cam_line_union)); 984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 985 camline.cam_line_mapped.camline); 986 - memset(&ramline, 0, sizeof(union gft_cam_line_union)); 987 988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { 989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
··· 983 memset(&camline, 0, sizeof(union gft_cam_line_union)); 984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 985 camline.cam_line_mapped.camline); 986 + memset(&ramline, 0, sizeof(ramline)); 987 988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { 989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
+2 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
··· 37 38 #define _QLCNIC_LINUX_MAJOR 5 39 #define _QLCNIC_LINUX_MINOR 3 40 - #define _QLCNIC_LINUX_SUBVERSION 65 41 - #define QLCNIC_LINUX_VERSIONID "5.3.65" 42 #define QLCNIC_DRV_IDC_VER 0x01 43 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
··· 37 38 #define _QLCNIC_LINUX_MAJOR 5 39 #define _QLCNIC_LINUX_MINOR 3 40 + #define _QLCNIC_LINUX_SUBVERSION 66 41 + #define QLCNIC_LINUX_VERSIONID "5.3.66" 42 #define QLCNIC_DRV_IDC_VER 0x01 43 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+34
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 3168 return 0; 3169 } 3170 3171 int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) 3172 { 3173 u8 pci_func;
··· 3168 return 0; 3169 } 3170 3171 + void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter) 3172 + { 3173 + struct qlcnic_hardware_context *ahw = adapter->ahw; 3174 + struct qlcnic_cmd_args cmd; 3175 + u32 config; 3176 + int err; 3177 + 3178 + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); 3179 + if (err) 3180 + return; 3181 + 3182 + err = qlcnic_issue_cmd(adapter, &cmd); 3183 + if (err) { 3184 + dev_info(&adapter->pdev->dev, 3185 + "Get Link Status Command failed: 0x%x\n", err); 3186 + goto out; 3187 + } else { 3188 + config = cmd.rsp.arg[3]; 3189 + 3190 + switch (QLC_83XX_SFP_MODULE_TYPE(config)) { 3191 + case QLC_83XX_MODULE_FIBRE_1000BASE_SX: 3192 + case QLC_83XX_MODULE_FIBRE_1000BASE_LX: 3193 + case QLC_83XX_MODULE_FIBRE_1000BASE_CX: 3194 + case QLC_83XX_MODULE_TP_1000BASE_T: 3195 + ahw->port_type = QLCNIC_GBE; 3196 + break; 3197 + default: 3198 + ahw->port_type = QLCNIC_XGBE; 3199 + } 3200 + } 3201 + out: 3202 + qlcnic_free_mbx_args(&cmd); 3203 + } 3204 + 3205 int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) 3206 { 3207 u8 pci_func;
+1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
··· 637 int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, 638 struct ethtool_pauseparam *); 639 int qlcnic_83xx_test_link(struct qlcnic_adapter *); 640 int qlcnic_83xx_reg_test(struct qlcnic_adapter *); 641 int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); 642 int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
··· 637 int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, 638 struct ethtool_pauseparam *); 639 int qlcnic_83xx_test_link(struct qlcnic_adapter *); 640 + void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter); 641 int qlcnic_83xx_reg_test(struct qlcnic_adapter *); 642 int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); 643 int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
··· 486 u32 ret = 0; 487 struct qlcnic_adapter *adapter = netdev_priv(dev); 488 489 if (adapter->ahw->port_type != QLCNIC_GBE) 490 return -EOPNOTSUPP; 491
··· 486 u32 ret = 0; 487 struct qlcnic_adapter *adapter = netdev_priv(dev); 488 489 + if (qlcnic_83xx_check(adapter)) 490 + qlcnic_83xx_get_port_type(adapter); 491 + 492 if (adapter->ahw->port_type != QLCNIC_GBE) 493 return -EOPNOTSUPP; 494
+6 -4
drivers/net/ethernet/qualcomm/qca_spi.c
··· 296 297 /* Allocate rx SKB if we don't have one available. */ 298 if (!qca->rx_skb) { 299 - qca->rx_skb = netdev_alloc_skb(net_dev, 300 - net_dev->mtu + VLAN_ETH_HLEN); 301 if (!qca->rx_skb) { 302 netdev_dbg(net_dev, "out of RX resources\n"); 303 qca->stats.out_of_mem++; ··· 378 qca->rx_skb, qca->rx_skb->dev); 379 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 380 netif_rx_ni(qca->rx_skb); 381 - qca->rx_skb = netdev_alloc_skb(net_dev, 382 net_dev->mtu + VLAN_ETH_HLEN); 383 if (!qca->rx_skb) { 384 netdev_dbg(net_dev, "out of RX resources\n"); ··· 760 if (!qca->rx_buffer) 761 return -ENOBUFS; 762 763 - qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); 764 if (!qca->rx_skb) { 765 kfree(qca->rx_buffer); 766 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
··· 296 297 /* Allocate rx SKB if we don't have one available. */ 298 if (!qca->rx_skb) { 299 + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, 300 + net_dev->mtu + 301 + VLAN_ETH_HLEN); 302 if (!qca->rx_skb) { 303 netdev_dbg(net_dev, "out of RX resources\n"); 304 qca->stats.out_of_mem++; ··· 377 qca->rx_skb, qca->rx_skb->dev); 378 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 379 netif_rx_ni(qca->rx_skb); 380 + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, 381 net_dev->mtu + VLAN_ETH_HLEN); 382 if (!qca->rx_skb) { 383 netdev_dbg(net_dev, "out of RX resources\n"); ··· 759 if (!qca->rx_buffer) 760 return -ENOBUFS; 761 762 + qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu + 763 + VLAN_ETH_HLEN); 764 if (!qca->rx_skb) { 765 kfree(qca->rx_buffer); 766 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
+2 -1
drivers/net/ethernet/renesas/sh_eth.c
··· 3220 /* MDIO bus init */ 3221 ret = sh_mdio_init(mdp, pd); 3222 if (ret) { 3223 - dev_err(&ndev->dev, "failed to initialise MDIO\n"); 3224 goto out_release; 3225 } 3226
··· 3220 /* MDIO bus init */ 3221 ret = sh_mdio_init(mdp, pd); 3222 if (ret) { 3223 + if (ret != -EPROBE_DEFER) 3224 + dev_err(&pdev->dev, "MDIO init failed: %d\n", ret); 3225 goto out_release; 3226 } 3227
+6 -2
drivers/net/ethernet/sfc/nic.h
··· 18 #include "mcdi.h" 19 20 enum { 21 - EFX_REV_SIENA_A0 = 0, 22 - EFX_REV_HUNT_A0 = 1, 23 }; 24 25 static inline int efx_nic_rev(struct efx_nic *efx)
··· 18 #include "mcdi.h" 19 20 enum { 21 + /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. 22 + * They are not supported by this driver but these revision numbers 23 + * form part of the ethtool API for register dumping. 24 + */ 25 + EFX_REV_SIENA_A0 = 3, 26 + EFX_REV_HUNT_A0 = 4, 27 }; 28 29 static inline int efx_nic_rev(struct efx_nic *efx)
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 3725 ep++; 3726 } else { 3727 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3728 - i, (unsigned int)virt_to_phys(ep), 3729 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3730 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3731 p++;
··· 3725 ep++; 3726 } else { 3727 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3728 + i, (unsigned int)virt_to_phys(p), 3729 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3730 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3731 p++;
+2 -2
drivers/net/ethernet/sun/ldmvsw.c
··· 411 412 if (port) { 413 del_timer_sync(&port->vio.timer); 414 415 napi_disable(&port->napi); 416 417 list_del_rcu(&port->list); 418 419 synchronize_rcu(); 420 - del_timer_sync(&port->clean_timer); 421 spin_lock_irqsave(&port->vp->lock, flags); 422 sunvnet_port_rm_txq_common(port); 423 spin_unlock_irqrestore(&port->vp->lock, flags); ··· 428 429 dev_set_drvdata(&vdev->dev, NULL); 430 431 - unregister_netdev(port->dev); 432 free_netdev(port->dev); 433 } 434
··· 411 412 if (port) { 413 del_timer_sync(&port->vio.timer); 414 + del_timer_sync(&port->clean_timer); 415 416 napi_disable(&port->napi); 417 + unregister_netdev(port->dev); 418 419 list_del_rcu(&port->list); 420 421 synchronize_rcu(); 422 spin_lock_irqsave(&port->vp->lock, flags); 423 sunvnet_port_rm_txq_common(port); 424 spin_unlock_irqrestore(&port->vp->lock, flags); ··· 427 428 dev_set_drvdata(&vdev->dev, NULL); 429 430 free_netdev(port->dev); 431 } 432
+4 -2
drivers/net/ethernet/ti/netcp_core.c
··· 1353 1354 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1355 tx_pipe->dma_chan_name, &config); 1356 - if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { 1357 dev_err(dev, "failed opening tx chan(%s)\n", 1358 tx_pipe->dma_chan_name); 1359 goto err; 1360 } 1361 ··· 1674 1675 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1676 netcp->dma_chan_name, &config); 1677 - if (IS_ERR_OR_NULL(netcp->rx_channel)) { 1678 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1679 netcp->dma_chan_name); 1680 goto fail; 1681 } 1682
··· 1353 1354 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1355 tx_pipe->dma_chan_name, &config); 1356 + if (IS_ERR(tx_pipe->dma_channel)) { 1357 dev_err(dev, "failed opening tx chan(%s)\n", 1358 tx_pipe->dma_chan_name); 1359 + ret = PTR_ERR(tx_pipe->dma_channel); 1360 goto err; 1361 } 1362 ··· 1673 1674 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1675 netcp->dma_chan_name, &config); 1676 + if (IS_ERR(netcp->rx_channel)) { 1677 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1678 netcp->dma_chan_name); 1679 + ret = PTR_ERR(netcp->rx_channel); 1680 goto fail; 1681 } 1682
-1
drivers/net/ethernet/ti/netcp_ethss.c
··· 2651 case HWTSTAMP_FILTER_NONE: 2652 cpts_rx_enable(cpts, 0); 2653 break; 2654 - case HWTSTAMP_FILTER_ALL: 2655 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2656 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2657 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
··· 2651 case HWTSTAMP_FILTER_NONE: 2652 cpts_rx_enable(cpts, 0); 2653 break; 2654 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2655 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2656 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+1 -1
drivers/net/irda/irda-usb.c
··· 1077 * are "42101001.sb" or "42101002.sb" 1078 */ 1079 sprintf(stir421x_fw_name, "4210%4X.sb", 1080 - self->usbdev->descriptor.bcdDevice); 1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); 1082 if (ret < 0) 1083 return ret;
··· 1077 * are "42101001.sb" or "42101002.sb" 1078 */ 1079 sprintf(stir421x_fw_name, "4210%4X.sb", 1080 + le16_to_cpu(self->usbdev->descriptor.bcdDevice)); 1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); 1082 if (ret < 0) 1083 return ret;
+5 -2
drivers/net/macvlan.c
··· 789 */ 790 static struct lock_class_key macvlan_netdev_addr_lock_key; 791 792 - #define ALWAYS_ON_FEATURES \ 793 - (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ 794 NETIF_F_GSO_ROBUST) 795 796 #define MACVLAN_FEATURES \ 797 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ ··· 829 dev->features |= ALWAYS_ON_FEATURES; 830 dev->hw_features |= NETIF_F_LRO; 831 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; 832 dev->gso_max_size = lowerdev->gso_max_size; 833 dev->gso_max_segs = lowerdev->gso_max_segs; 834 dev->hard_header_len = lowerdev->hard_header_len;
··· 789 */ 790 static struct lock_class_key macvlan_netdev_addr_lock_key; 791 792 + #define ALWAYS_ON_OFFLOADS \ 793 + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ 794 NETIF_F_GSO_ROBUST) 795 + 796 + #define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX) 797 798 #define MACVLAN_FEATURES \ 799 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ ··· 827 dev->features |= ALWAYS_ON_FEATURES; 828 dev->hw_features |= NETIF_F_LRO; 829 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; 830 + dev->vlan_features |= ALWAYS_ON_OFFLOADS; 831 dev->gso_max_size = lowerdev->gso_max_size; 832 dev->gso_max_segs = lowerdev->gso_max_segs; 833 dev->hard_header_len = lowerdev->hard_header_len;
+6 -5
drivers/net/phy/mdio-mux.c
··· 122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 123 if (pb == NULL) { 124 ret_val = -ENOMEM; 125 - goto err_parent_bus; 126 } 127 - 128 129 pb->switch_data = data; 130 pb->switch_fn = switch_fn; ··· 153 cb->mii_bus = mdiobus_alloc(); 154 if (!cb->mii_bus) { 155 ret_val = -ENOMEM; 156 of_node_put(child_bus_node); 157 break; 158 } ··· 170 mdiobus_free(cb->mii_bus); 171 devm_kfree(dev, cb); 172 } else { 173 - of_node_get(child_bus_node); 174 cb->next = pb->children; 175 pb->children = cb; 176 } ··· 180 return 0; 181 } 182 183 /* balance the reference of_mdio_find_bus() took */ 184 - put_device(&pb->mii_bus->dev); 185 - 186 err_parent_bus: 187 of_node_put(parent_bus_node); 188 return ret_val;
··· 122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 123 if (pb == NULL) { 124 ret_val = -ENOMEM; 125 + goto err_pb_kz; 126 } 127 128 pb->switch_data = data; 129 pb->switch_fn = switch_fn; ··· 154 cb->mii_bus = mdiobus_alloc(); 155 if (!cb->mii_bus) { 156 ret_val = -ENOMEM; 157 + devm_kfree(dev, cb); 158 of_node_put(child_bus_node); 159 break; 160 } ··· 170 mdiobus_free(cb->mii_bus); 171 devm_kfree(dev, cb); 172 } else { 173 cb->next = pb->children; 174 pb->children = cb; 175 } ··· 181 return 0; 182 } 183 184 + devm_kfree(dev, pb); 185 + err_pb_kz: 186 /* balance the reference of_mdio_find_bus() took */ 187 + if (!mux_bus) 188 + put_device(&parent_bus->dev); 189 err_parent_bus: 190 of_node_put(parent_bus_node); 191 return ret_val;
+3 -3
drivers/net/phy/mdio_bus.c
··· 364 365 mutex_init(&bus->mdio_lock); 366 367 - if (bus->reset) 368 - bus->reset(bus); 369 - 370 /* de-assert bus level PHY GPIO resets */ 371 if (bus->num_reset_gpios > 0) { 372 bus->reset_gpiod = devm_kcalloc(&bus->dev, ··· 392 gpiod_set_value_cansleep(gpiod, 0); 393 } 394 } 395 396 for (i = 0; i < PHY_MAX_ADDR; i++) { 397 if ((bus->phy_mask & (1 << i)) == 0) {
··· 364 365 mutex_init(&bus->mdio_lock); 366 367 /* de-assert bus level PHY GPIO resets */ 368 if (bus->num_reset_gpios > 0) { 369 bus->reset_gpiod = devm_kcalloc(&bus->dev, ··· 395 gpiod_set_value_cansleep(gpiod, 0); 396 } 397 } 398 + 399 + if (bus->reset) 400 + bus->reset(bus); 401 402 for (i = 0; i < PHY_MAX_ADDR; i++) { 403 if ((bus->phy_mask & (1 << i)) == 0) {
+2 -2
drivers/net/usb/ch9200.c
··· 310 int rd_mac_len = 0; 311 312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", 313 - dev->udev->descriptor.idVendor, 314 - dev->udev->descriptor.idProduct); 315 316 memset(mac_addr, 0, sizeof(mac_addr)); 317 rd_mac_len = control_read(dev, REQUEST_READ, 0,
··· 310 int rd_mac_len = 0; 311 312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", 313 + le16_to_cpu(dev->udev->descriptor.idVendor), 314 + le16_to_cpu(dev->udev->descriptor.idProduct)); 315 316 memset(mac_addr, 0, sizeof(mac_addr)); 317 rd_mac_len = control_read(dev, REQUEST_READ, 0,
+2
drivers/net/usb/qmi_wwan.c
··· 1196 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1197 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1198 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1199 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1200 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1201 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
··· 1196 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1197 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1198 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1199 + {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1200 + {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1201 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1202 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1203 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
+5
drivers/net/vmxnet3/vmxnet3_drv.c
··· 2962 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2963 for (i = 0; i < adapter->num_rx_queues; i++) 2964 napi_enable(&adapter->rx_queue[i].napi); 2965 dev_close(adapter->netdev); 2966 } 2967
··· 2962 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2963 for (i = 0; i < adapter->num_rx_queues; i++) 2964 napi_enable(&adapter->rx_queue[i].napi); 2965 + /* 2966 + * Need to clear the quiesce bit to ensure that vmxnet3_close 2967 + * can quiesce the device properly 2968 + */ 2969 + clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2970 dev_close(adapter->netdev); 2971 } 2972
+2 -1
drivers/net/vrf.c
··· 989 990 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 991 { 992 return 0; 993 } 994 ··· 999 { 1000 struct net *net = dev_net(dev); 1001 1002 - if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0) 1003 skb = NULL; /* kfree_skb(skb) handled by nf code */ 1004 1005 return skb;
··· 989 990 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 991 { 992 + kfree_skb(skb); 993 return 0; 994 } 995 ··· 998 { 999 struct net *net = dev_net(dev); 1000 1001 + if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1) 1002 skb = NULL; /* kfree_skb(skb) handled by nf code */ 1003 1004 return skb;
+1 -2
drivers/net/xen-netfront.c
··· 1934 xennet_disconnect_backend(info); 1935 xennet_destroy_queues(info); 1936 out: 1937 - unregister_netdev(info->netdev); 1938 - xennet_free_netdev(info->netdev); 1939 return err; 1940 } 1941
··· 1934 xennet_disconnect_backend(info); 1935 xennet_destroy_queues(info); 1936 out: 1937 + device_unregister(&dev->dev); 1938 return err; 1939 } 1940
+10
drivers/nvme/host/fc.c
··· 1754 dev_info(ctrl->ctrl.device, 1755 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 1756 1757 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1758 dev_err(ctrl->ctrl.device, 1759 "NVME-FC{%d}: error_recovery: Couldn't change state " ··· 2723 struct nvme_fc_ctrl *ctrl; 2724 unsigned long flags; 2725 int ret, idx; 2726 2727 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 2728 if (!ctrl) {
··· 1754 dev_info(ctrl->ctrl.device, 1755 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 1756 1757 + /* stop the queues on error, cleanup is in reset thread */ 1758 + if (ctrl->queue_count > 1) 1759 + nvme_stop_queues(&ctrl->ctrl); 1760 + 1761 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1762 dev_err(ctrl->ctrl.device, 1763 "NVME-FC{%d}: error_recovery: Couldn't change state " ··· 2719 struct nvme_fc_ctrl *ctrl; 2720 unsigned long flags; 2721 int ret, idx; 2722 + 2723 + if (!(rport->remoteport.port_role & 2724 + (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 2725 + ret = -EBADR; 2726 + goto out_fail; 2727 + } 2728 2729 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 2730 if (!ctrl) {
+6 -1
drivers/nvme/host/pci.c
··· 1506 if (dev->cmb) { 1507 iounmap(dev->cmb); 1508 dev->cmb = NULL; 1509 } 1510 } 1511 ··· 1784 { 1785 struct pci_dev *pdev = to_pci_dev(dev->dev); 1786 1787 pci_free_irq_vectors(pdev); 1788 1789 if (pci_is_enabled(pdev)) { ··· 2190 nvme_dev_disable(dev, true); 2191 nvme_dev_remove_admin(dev); 2192 nvme_free_queues(dev, 0); 2193 - nvme_release_cmb(dev); 2194 nvme_release_prp_pools(dev); 2195 nvme_dev_unmap(dev); 2196 nvme_put_ctrl(&dev->ctrl);
··· 1506 if (dev->cmb) { 1507 iounmap(dev->cmb); 1508 dev->cmb = NULL; 1509 + if (dev->cmbsz) { 1510 + sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1511 + &dev_attr_cmb.attr, NULL); 1512 + dev->cmbsz = 0; 1513 + } 1514 } 1515 } 1516 ··· 1779 { 1780 struct pci_dev *pdev = to_pci_dev(dev->dev); 1781 1782 + nvme_release_cmb(dev); 1783 pci_free_irq_vectors(pdev); 1784 1785 if (pci_is_enabled(pdev)) { ··· 2184 nvme_dev_disable(dev, true); 2185 nvme_dev_remove_admin(dev); 2186 nvme_free_queues(dev, 0); 2187 nvme_release_prp_pools(dev); 2188 nvme_dev_unmap(dev); 2189 nvme_put_ctrl(&dev->ctrl);
+6
drivers/nvme/target/core.c
··· 529 } 530 EXPORT_SYMBOL_GPL(nvmet_req_init); 531 532 static inline bool nvmet_cc_en(u32 cc) 533 { 534 return cc & 0x1;
··· 529 } 530 EXPORT_SYMBOL_GPL(nvmet_req_init); 531 532 + void nvmet_req_uninit(struct nvmet_req *req) 533 + { 534 + percpu_ref_put(&req->sq->ref); 535 + } 536 + EXPORT_SYMBOL_GPL(nvmet_req_uninit); 537 + 538 static inline bool nvmet_cc_en(u32 cc) 539 { 540 return cc & 0x1;
+1 -3
drivers/nvme/target/fc.c
··· 517 { 518 int cpu, idx, cnt; 519 520 - if (!(tgtport->ops->target_features & 521 - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) || 522 - tgtport->ops->max_hw_queues == 1) 523 return WORK_CPU_UNBOUND; 524 525 /* Simple cpu selection based on qid modulo active cpu count */
··· 517 { 518 int cpu, idx, cnt; 519 520 + if (tgtport->ops->max_hw_queues == 1) 521 return WORK_CPU_UNBOUND; 522 523 /* Simple cpu selection based on qid modulo active cpu count */
-1
drivers/nvme/target/fcloop.c
··· 698 .dma_boundary = FCLOOP_DMABOUND_4G, 699 /* optional features */ 700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | 701 - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | 702 NVMET_FCTGTFEAT_OPDONE_IN_ISR, 703 /* sizes of additional private data for data structures */ 704 .target_priv_sz = sizeof(struct fcloop_tport),
··· 698 .dma_boundary = FCLOOP_DMABOUND_4G, 699 /* optional features */ 700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | 701 NVMET_FCTGTFEAT_OPDONE_IN_ISR, 702 /* sizes of additional private data for data structures */ 703 .target_priv_sz = sizeof(struct fcloop_tport),
+1
drivers/nvme/target/nvmet.h
··· 261 262 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 263 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); 264 void nvmet_req_complete(struct nvmet_req *req, u16 status); 265 266 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
··· 261 262 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 263 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); 264 + void nvmet_req_uninit(struct nvmet_req *req); 265 void nvmet_req_complete(struct nvmet_req *req, u16 status); 266 267 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+1
drivers/nvme/target/rdma.c
··· 567 rsp->n_rdma = 0; 568 569 if (unlikely(wc->status != IB_WC_SUCCESS)) { 570 nvmet_rdma_release_rsp(rsp); 571 if (wc->status != IB_WC_WR_FLUSH_ERR) { 572 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
··· 567 rsp->n_rdma = 0; 568 569 if (unlikely(wc->status != IB_WC_SUCCESS)) { 570 + nvmet_req_uninit(&rsp->req); 571 nvmet_rdma_release_rsp(rsp); 572 if (wc->status != IB_WC_WR_FLUSH_ERR) { 573 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
+3
drivers/of/fdt.c
··· 507 508 /* Allocate memory for the expanded device tree */ 509 mem = dt_alloc(size + 4, __alignof__(struct device_node)); 510 memset(mem, 0, size); 511 512 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
··· 507 508 /* Allocate memory for the expanded device tree */ 509 mem = dt_alloc(size + 4, __alignof__(struct device_node)); 510 + if (!mem) 511 + return NULL; 512 + 513 memset(mem, 0, size); 514 515 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
+1 -1
drivers/of/of_reserved_mem.c
··· 197 const struct of_device_id *i; 198 199 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 200 - int const (*initfn)(struct reserved_mem *rmem) = i->data; 201 const char *compat = i->compatible; 202 203 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
··· 197 const struct of_device_id *i; 198 199 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 200 + reservedmem_of_init_fn initfn = i->data; 201 const char *compat = i->compatible; 202 203 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
+2 -2
drivers/s390/cio/ccwgroup.c
··· 35 static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 36 { 37 int i; 38 - char str[8]; 39 40 for (i = 0; i < gdev->count; i++) { 41 sprintf(str, "cdev%d", i); ··· 238 239 static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 240 { 241 - char str[8]; 242 int i, rc; 243 244 for (i = 0; i < gdev->count; i++) {
··· 35 static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 36 { 37 int i; 38 + char str[16]; 39 40 for (i = 0; i < gdev->count; i++) { 41 sprintf(str, "cdev%d", i); ··· 238 239 static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 240 { 241 + char str[16]; 242 int i, rc; 243 244 for (i = 0; i < gdev->count; i++) {
+1 -1
drivers/s390/cio/qdio_debug.h
··· 11 #include "qdio.h" 12 13 /* that gives us 15 characters in the text event views */ 14 - #define QDIO_DBF_LEN 16 15 16 extern debug_info_t *qdio_dbf_setup; 17 extern debug_info_t *qdio_dbf_error;
··· 11 #include "qdio.h" 12 13 /* that gives us 15 characters in the text event views */ 14 + #define QDIO_DBF_LEN 32 15 16 extern debug_info_t *qdio_dbf_setup; 17 extern debug_info_t *qdio_dbf_error;
+4
drivers/s390/net/qeth_core.h
··· 701 }; 702 703 struct qeth_discipline { 704 void (*start_poll)(struct ccw_device *, int, unsigned long); 705 qdio_handler_t *input_handler; 706 qdio_handler_t *output_handler; ··· 876 extern struct qeth_discipline qeth_l3_discipline; 877 extern const struct attribute_group *qeth_generic_attr_groups[]; 878 extern const struct attribute_group *qeth_osn_attr_groups[]; 879 extern struct workqueue_struct *qeth_wq; 880 881 int qeth_card_hw_is_reachable(struct qeth_card *);
··· 701 }; 702 703 struct qeth_discipline { 704 + const struct device_type *devtype; 705 void (*start_poll)(struct ccw_device *, int, unsigned long); 706 qdio_handler_t *input_handler; 707 qdio_handler_t *output_handler; ··· 875 extern struct qeth_discipline qeth_l3_discipline; 876 extern const struct attribute_group *qeth_generic_attr_groups[]; 877 extern const struct attribute_group *qeth_osn_attr_groups[]; 878 + extern const struct attribute_group qeth_device_attr_group; 879 + extern const struct attribute_group qeth_device_blkt_group; 880 + extern const struct device_type qeth_generic_devtype; 881 extern struct workqueue_struct *qeth_wq; 882 883 int qeth_card_hw_is_reachable(struct qeth_card *);
+12 -9
drivers/s390/net/qeth_core_main.c
··· 5530 card->discipline = NULL; 5531 } 5532 5533 - static const struct device_type qeth_generic_devtype = { 5534 .name = "qeth_generic", 5535 .groups = qeth_generic_attr_groups, 5536 }; 5537 static const struct device_type qeth_osn_devtype = { 5538 .name = "qeth_osn", 5539 .groups = qeth_osn_attr_groups, ··· 5661 goto err_card; 5662 } 5663 5664 - if (card->info.type == QETH_CARD_TYPE_OSN) 5665 - gdev->dev.type = &qeth_osn_devtype; 5666 - else 5667 - gdev->dev.type = &qeth_generic_devtype; 5668 - 5669 switch (card->info.type) { 5670 case QETH_CARD_TYPE_OSN: 5671 case QETH_CARD_TYPE_OSM: 5672 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); 5673 if (rc) 5674 goto err_card; 5675 rc = card->discipline->setup(card->gdev); 5676 if (rc) 5677 goto err_disc; 5678 - case QETH_CARD_TYPE_OSD: 5679 - case QETH_CARD_TYPE_OSX: 5680 default: 5681 break; 5682 } 5683 ··· 5732 if (rc) 5733 goto err; 5734 rc = card->discipline->setup(card->gdev); 5735 - if (rc) 5736 goto err; 5737 } 5738 rc = card->discipline->set_online(gdev); 5739 err:
··· 5530 card->discipline = NULL; 5531 } 5532 5533 + const struct device_type qeth_generic_devtype = { 5534 .name = "qeth_generic", 5535 .groups = qeth_generic_attr_groups, 5536 }; 5537 + EXPORT_SYMBOL_GPL(qeth_generic_devtype); 5538 + 5539 static const struct device_type qeth_osn_devtype = { 5540 .name = "qeth_osn", 5541 .groups = qeth_osn_attr_groups, ··· 5659 goto err_card; 5660 } 5661 5662 switch (card->info.type) { 5663 case QETH_CARD_TYPE_OSN: 5664 case QETH_CARD_TYPE_OSM: 5665 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); 5666 if (rc) 5667 goto err_card; 5668 + 5669 + gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) 5670 + ? card->discipline->devtype 5671 + : &qeth_osn_devtype; 5672 rc = card->discipline->setup(card->gdev); 5673 if (rc) 5674 goto err_disc; 5675 + break; 5676 default: 5677 + gdev->dev.type = &qeth_generic_devtype; 5678 break; 5679 } 5680 ··· 5731 if (rc) 5732 goto err; 5733 rc = card->discipline->setup(card->gdev); 5734 + if (rc) { 5735 + qeth_core_free_discipline(card); 5736 goto err; 5737 + } 5738 } 5739 rc = card->discipline->set_online(gdev); 5740 err:
+16 -8
drivers/s390/net/qeth_core_sys.c
··· 413 414 if (card->options.layer2 == newdis) 415 goto out; 416 - else { 417 - card->info.mac_bits = 0; 418 - if (card->discipline) { 419 - card->discipline->remove(card->gdev); 420 - qeth_core_free_discipline(card); 421 - } 422 } 423 424 rc = qeth_core_load_discipline(card, newdis); ··· 430 goto out; 431 432 rc = card->discipline->setup(card->gdev); 433 out: 434 mutex_unlock(&card->discipline_mutex); 435 return rc ? rc : count; ··· 709 &dev_attr_inter_jumbo.attr, 710 NULL, 711 }; 712 - static struct attribute_group qeth_device_blkt_group = { 713 .name = "blkt", 714 .attrs = qeth_blkt_device_attrs, 715 }; 716 717 static struct attribute *qeth_device_attrs[] = { 718 &dev_attr_state.attr, ··· 733 &dev_attr_switch_attrs.attr, 734 NULL, 735 }; 736 - static struct attribute_group qeth_device_attr_group = { 737 .attrs = qeth_device_attrs, 738 }; 739 740 const struct attribute_group *qeth_generic_attr_groups[] = { 741 &qeth_device_attr_group,
··· 413 414 if (card->options.layer2 == newdis) 415 goto out; 416 + if (card->info.type == QETH_CARD_TYPE_OSM) { 417 + /* fixed layer, can't switch */ 418 + rc = -EOPNOTSUPP; 419 + goto out; 420 + } 421 + 422 + card->info.mac_bits = 0; 423 + if (card->discipline) { 424 + card->discipline->remove(card->gdev); 425 + qeth_core_free_discipline(card); 426 } 427 428 rc = qeth_core_load_discipline(card, newdis); ··· 426 goto out; 427 428 rc = card->discipline->setup(card->gdev); 429 + if (rc) 430 + qeth_core_free_discipline(card); 431 out: 432 mutex_unlock(&card->discipline_mutex); 433 return rc ? rc : count; ··· 703 &dev_attr_inter_jumbo.attr, 704 NULL, 705 }; 706 + const struct attribute_group qeth_device_blkt_group = { 707 .name = "blkt", 708 .attrs = qeth_blkt_device_attrs, 709 }; 710 + EXPORT_SYMBOL_GPL(qeth_device_blkt_group); 711 712 static struct attribute *qeth_device_attrs[] = { 713 &dev_attr_state.attr, ··· 726 &dev_attr_switch_attrs.attr, 727 NULL, 728 }; 729 + const struct attribute_group qeth_device_attr_group = { 730 .attrs = qeth_device_attrs, 731 }; 732 + EXPORT_SYMBOL_GPL(qeth_device_attr_group); 733 734 const struct attribute_group *qeth_generic_attr_groups[] = { 735 &qeth_device_attr_group,
+2
drivers/s390/net/qeth_l2.h
··· 8 9 #include "qeth_core.h" 10 11 int qeth_l2_create_device_attributes(struct device *); 12 void qeth_l2_remove_device_attributes(struct device *); 13 void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
··· 8 9 #include "qeth_core.h" 10 11 + extern const struct attribute_group *qeth_l2_attr_groups[]; 12 + 13 int qeth_l2_create_device_attributes(struct device *); 14 void qeth_l2_remove_device_attributes(struct device *); 15 void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+20 -6
drivers/s390/net/qeth_l2_main.c
··· 880 return 0; 881 } 882 883 static int qeth_l2_probe_device(struct ccwgroup_device *gdev) 884 { 885 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 886 887 - qeth_l2_create_device_attributes(&gdev->dev); 888 INIT_LIST_HEAD(&card->vid_list); 889 hash_init(card->mac_htable); 890 card->options.layer2 = 1; ··· 906 { 907 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 908 909 - qeth_l2_remove_device_attributes(&cgdev->dev); 910 qeth_set_allowed_threads(card, 0, 1); 911 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 912 ··· 965 case QETH_CARD_TYPE_OSN: 966 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, 967 ether_setup); 968 - card->dev->flags |= IFF_NOARP; 969 break; 970 default: 971 card->dev = alloc_etherdev(0); ··· 979 card->dev->min_mtu = 64; 980 card->dev->max_mtu = ETH_MAX_MTU; 981 card->dev->netdev_ops = &qeth_l2_netdev_ops; 982 - card->dev->ethtool_ops = 983 - (card->info.type != QETH_CARD_TYPE_OSN) ? 984 - &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; 985 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 986 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { 987 card->dev->hw_features = NETIF_F_SG; ··· 1282 } 1283 1284 struct qeth_discipline qeth_l2_discipline = { 1285 .start_poll = qeth_qdio_start_poll, 1286 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 1287 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
··· 880 return 0; 881 } 882 883 + static const struct device_type qeth_l2_devtype = { 884 + .name = "qeth_layer2", 885 + .groups = qeth_l2_attr_groups, 886 + }; 887 + 888 static int qeth_l2_probe_device(struct ccwgroup_device *gdev) 889 { 890 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 891 + int rc; 892 893 + if (gdev->dev.type == &qeth_generic_devtype) { 894 + rc = qeth_l2_create_device_attributes(&gdev->dev); 895 + if (rc) 896 + return rc; 897 + } 898 INIT_LIST_HEAD(&card->vid_list); 899 hash_init(card->mac_htable); 900 card->options.layer2 = 1; ··· 896 { 897 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 898 899 + if (cgdev->dev.type == &qeth_generic_devtype) 900 + qeth_l2_remove_device_attributes(&cgdev->dev); 901 qeth_set_allowed_threads(card, 0, 1); 902 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 903 ··· 954 case QETH_CARD_TYPE_OSN: 955 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, 956 ether_setup); 957 break; 958 default: 959 card->dev = alloc_etherdev(0); ··· 969 card->dev->min_mtu = 64; 970 card->dev->max_mtu = ETH_MAX_MTU; 971 card->dev->netdev_ops = &qeth_l2_netdev_ops; 972 + if (card->info.type == QETH_CARD_TYPE_OSN) { 973 + card->dev->ethtool_ops = &qeth_l2_osn_ops; 974 + card->dev->flags |= IFF_NOARP; 975 + } else { 976 + card->dev->ethtool_ops = &qeth_l2_ethtool_ops; 977 + } 978 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 979 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { 980 card->dev->hw_features = NETIF_F_SG; ··· 1269 } 1270 1271 struct qeth_discipline qeth_l2_discipline = { 1272 + .devtype = &qeth_l2_devtype, 1273 .start_poll = qeth_qdio_start_poll, 1274 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 1275 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
+8
drivers/s390/net/qeth_l2_sys.c
··· 269 } else 270 qeth_bridgeport_an_set(card, 0); 271 }
··· 269 } else 270 qeth_bridgeport_an_set(card, 0); 271 } 272 + 273 + const struct attribute_group *qeth_l2_attr_groups[] = { 274 + &qeth_device_attr_group, 275 + &qeth_device_blkt_group, 276 + /* l2 specific, see l2_{create,remove}_device_attributes(): */ 277 + &qeth_l2_bridgeport_attr_group, 278 + NULL, 279 + };
+7 -1
drivers/s390/net/qeth_l3_main.c
··· 3039 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3040 { 3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3042 3043 - qeth_l3_create_device_attributes(&gdev->dev); 3044 card->options.layer2 = 0; 3045 card->info.hwtrap = 0; 3046 return 0; ··· 3311 } 3312 3313 struct qeth_discipline qeth_l3_discipline = { 3314 .start_poll = qeth_qdio_start_poll, 3315 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 3316 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
··· 3039 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3040 { 3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3042 + int rc; 3043 3044 + rc = qeth_l3_create_device_attributes(&gdev->dev); 3045 + if (rc) 3046 + return rc; 3047 + hash_init(card->ip_htable); 3048 + hash_init(card->ip_mc_htable); 3049 card->options.layer2 = 0; 3050 card->info.hwtrap = 0; 3051 return 0; ··· 3306 } 3307 3308 struct qeth_discipline qeth_l3_discipline = { 3309 + .devtype = &qeth_generic_devtype, 3310 .start_poll = qeth_qdio_start_poll, 3311 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 3312 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
+1 -1
drivers/s390/virtio/virtio_ccw.c
··· 87 } __packed; 88 89 struct virtio_feature_desc { 90 - __u32 features; 91 __u8 index; 92 } __packed; 93
··· 87 } __packed; 88 89 struct virtio_feature_desc { 90 + __le32 features; 91 __u8 index; 92 } __packed; 93
+1
drivers/scsi/cxlflash/Kconfig
··· 5 config CXLFLASH 6 tristate "Support for IBM CAPI Flash" 7 depends on PCI && SCSI && CXL && EEH 8 default m 9 help 10 Allows CAPI Accelerated IO to Flash
··· 5 config CXLFLASH 6 tristate "Support for IBM CAPI Flash" 7 depends on PCI && SCSI && CXL && EEH 8 + select IRQ_POLL 9 default m 10 help 11 Allows CAPI Accelerated IO to Flash
+9 -6
drivers/scsi/libfc/fc_fcp.c
··· 407 * can_queue. Eventually we will hit the point where we run 408 * on all reserved structs. 409 */ 410 - static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 411 { 412 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 413 unsigned long flags; 414 int can_queue; 415 416 spin_lock_irqsave(lport->host->host_lock, flags); 417 ··· 428 if (!can_queue) 429 can_queue = 1; 430 lport->host->can_queue = can_queue; 431 432 unlock: 433 spin_unlock_irqrestore(lport->host->host_lock, flags); 434 } 435 436 /* ··· 1899 1900 if (!fc_fcp_lport_queue_ready(lport)) { 1901 if (lport->qfull) { 1902 - fc_fcp_can_queue_ramp_down(lport); 1903 - shost_printk(KERN_ERR, lport->host, 1904 - "libfc: queue full, " 1905 - "reducing can_queue to %d.\n", 1906 - lport->host->can_queue); 1907 } 1908 rc = SCSI_MLQUEUE_HOST_BUSY; 1909 goto out;
··· 407 * can_queue. Eventually we will hit the point where we run 408 * on all reserved structs. 409 */ 410 + static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 411 { 412 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 413 unsigned long flags; 414 int can_queue; 415 + bool changed = false; 416 417 spin_lock_irqsave(lport->host->host_lock, flags); 418 ··· 427 if (!can_queue) 428 can_queue = 1; 429 lport->host->can_queue = can_queue; 430 + changed = true; 431 432 unlock: 433 spin_unlock_irqrestore(lport->host->host_lock, flags); 434 + return changed; 435 } 436 437 /* ··· 1896 1897 if (!fc_fcp_lport_queue_ready(lport)) { 1898 if (lport->qfull) { 1899 + if (fc_fcp_can_queue_ramp_down(lport)) 1900 + shost_printk(KERN_ERR, lport->host, 1901 + "libfc: queue full, " 1902 + "reducing can_queue to %d.\n", 1903 + lport->host->can_queue); 1904 } 1905 rc = SCSI_MLQUEUE_HOST_BUSY; 1906 goto out;
+1
drivers/scsi/lpfc/lpfc_crtn.h
··· 294 void lpfc_reset_barrier(struct lpfc_hba *); 295 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 296 int lpfc_sli_brdkill(struct lpfc_hba *); 297 int lpfc_sli_brdreset(struct lpfc_hba *); 298 int lpfc_sli_brdrestart(struct lpfc_hba *); 299 int lpfc_sli_hba_setup(struct lpfc_hba *);
··· 294 void lpfc_reset_barrier(struct lpfc_hba *); 295 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 296 int lpfc_sli_brdkill(struct lpfc_hba *); 297 + int lpfc_sli_chipset_init(struct lpfc_hba *phba); 298 int lpfc_sli_brdreset(struct lpfc_hba *); 299 int lpfc_sli_brdrestart(struct lpfc_hba *); 300 int lpfc_sli_hba_setup(struct lpfc_hba *);
+1 -1
drivers/scsi/lpfc/lpfc_ct.c
··· 630 NLP_EVT_DEVICE_RECOVERY); 631 spin_lock_irq(shost->host_lock); 632 ndlp->nlp_flag &= ~NLP_NVMET_RECOV; 633 - spin_lock_irq(shost->host_lock); 634 } 635 } 636
··· 630 NLP_EVT_DEVICE_RECOVERY); 631 spin_lock_irq(shost->host_lock); 632 ndlp->nlp_flag &= ~NLP_NVMET_RECOV; 633 + spin_unlock_irq(shost->host_lock); 634 } 635 } 636
+8 -1
drivers/scsi/lpfc/lpfc_init.c
··· 3602 LPFC_MBOXQ_t *mboxq; 3603 MAILBOX_t *mb; 3604 3605 3606 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3607 GFP_KERNEL); ··· 8854 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 8855 8856 /* Unset ELS work queue */ 8857 - if (phba->sli4_hba.els_cq) 8858 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 8859 8860 /* Unset unsolicited receive queue */
··· 3602 LPFC_MBOXQ_t *mboxq; 3603 MAILBOX_t *mb; 3604 3605 + if (phba->sli_rev < LPFC_SLI_REV4) { 3606 + /* Reset the port first */ 3607 + lpfc_sli_brdrestart(phba); 3608 + rc = lpfc_sli_chipset_init(phba); 3609 + if (rc) 3610 + return (uint64_t)-1; 3611 + } 3612 3613 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3614 GFP_KERNEL); ··· 8847 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 8848 8849 /* Unset ELS work queue */ 8850 + if (phba->sli4_hba.els_wq) 8851 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 8852 8853 /* Unset unsolicited receive queue */
-1
drivers/scsi/lpfc/lpfc_nvmet.c
··· 764 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 765 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 766 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 767 - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | 768 NVMET_FCTGTFEAT_CMD_IN_ISR | 769 NVMET_FCTGTFEAT_OPDONE_IN_ISR; 770
··· 764 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 765 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 766 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 767 NVMET_FCTGTFEAT_CMD_IN_ISR | 768 NVMET_FCTGTFEAT_OPDONE_IN_ISR; 769
+12 -7
drivers/scsi/lpfc/lpfc_sli.c
··· 4204 /* Reset HBA */ 4205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4206 "0325 Reset HBA Data: x%x x%x\n", 4207 - phba->pport->port_state, psli->sli_flag); 4208 4209 /* perform board reset */ 4210 phba->fc_eventTag = 0; 4211 phba->link_events = 0; 4212 - phba->pport->fc_myDID = 0; 4213 - phba->pport->fc_prevDID = 0; 4214 4215 /* Turn off parity checking and serr during the physical reset */ 4216 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); ··· 4339 /* Restart HBA */ 4340 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4341 "0337 Restart HBA Data: x%x x%x\n", 4342 - phba->pport->port_state, psli->sli_flag); 4343 4344 word0 = 0; 4345 mb = (MAILBOX_t *) &word0; ··· 4354 readl(to_slim); /* flush */ 4355 4356 /* Only skip post after fc_ffinit is completed */ 4357 - if (phba->pport->port_state) 4358 word0 = 1; /* This is really setting up word1 */ 4359 else 4360 word0 = 0; /* This is really setting up word1 */ ··· 4363 readl(to_slim); /* flush */ 4364 4365 lpfc_sli_brdreset(phba); 4366 - phba->pport->stopped = 0; 4367 phba->link_state = LPFC_INIT_START; 4368 phba->hba_flag = 0; 4369 spin_unlock_irq(&phba->hbalock); ··· 4451 * iteration, the function will restart the HBA again. The function returns 4452 * zero if HBA successfully restarted else returns negative error code. 4453 **/ 4454 - static int 4455 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4456 { 4457 uint32_t status, i = 0;
··· 4204 /* Reset HBA */ 4205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4206 "0325 Reset HBA Data: x%x x%x\n", 4207 + (phba->pport) ? phba->pport->port_state : 0, 4208 + psli->sli_flag); 4209 4210 /* perform board reset */ 4211 phba->fc_eventTag = 0; 4212 phba->link_events = 0; 4213 + if (phba->pport) { 4214 + phba->pport->fc_myDID = 0; 4215 + phba->pport->fc_prevDID = 0; 4216 + } 4217 4218 /* Turn off parity checking and serr during the physical reset */ 4219 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); ··· 4336 /* Restart HBA */ 4337 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4338 "0337 Restart HBA Data: x%x x%x\n", 4339 + (phba->pport) ? phba->pport->port_state : 0, 4340 + psli->sli_flag); 4341 4342 word0 = 0; 4343 mb = (MAILBOX_t *) &word0; ··· 4350 readl(to_slim); /* flush */ 4351 4352 /* Only skip post after fc_ffinit is completed */ 4353 + if (phba->pport && phba->pport->port_state) 4354 word0 = 1; /* This is really setting up word1 */ 4355 else 4356 word0 = 0; /* This is really setting up word1 */ ··· 4359 readl(to_slim); /* flush */ 4360 4361 lpfc_sli_brdreset(phba); 4362 + if (phba->pport) 4363 + phba->pport->stopped = 0; 4364 phba->link_state = LPFC_INIT_START; 4365 phba->hba_flag = 0; 4366 spin_unlock_irq(&phba->hbalock); ··· 4446 * iteration, the function will restart the HBA again. The function returns 4447 * zero if HBA successfully restarted else returns negative error code. 4448 **/ 4449 + int 4450 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4451 { 4452 uint32_t status, i = 0;
-3
drivers/scsi/pmcraid.c
··· 3770 pmcraid_err("couldn't build passthrough ioadls\n"); 3771 goto out_free_cmd; 3772 } 3773 - } else if (request_size < 0) { 3774 - rc = -EINVAL; 3775 - goto out_free_cmd; 3776 } 3777 3778 /* If data is being written into the device, copy the data from user
··· 3770 pmcraid_err("couldn't build passthrough ioadls\n"); 3771 goto out_free_cmd; 3772 } 3773 } 3774 3775 /* If data is being written into the device, copy the data from user
+1 -1
drivers/scsi/qedf/qedf.h
··· 259 uint16_t task_id; 260 uint32_t port_id; /* Remote port fabric ID */ 261 int lun; 262 - char op; /* SCSI CDB */ 263 uint8_t lba[4]; 264 unsigned int bufflen; /* SCSI buffer length */ 265 unsigned int sg_count; /* Number of SG elements */
··· 259 uint16_t task_id; 260 uint32_t port_id; /* Remote port fabric ID */ 261 int lun; 262 + unsigned char op; /* SCSI CDB */ 263 uint8_t lba[4]; 264 unsigned int bufflen; /* SCSI buffer length */ 265 unsigned int sg_count; /* Number of SG elements */
+1 -1
drivers/scsi/qedf/qedf_els.c
··· 109 did = fcport->rdata->ids.port_id; 110 sid = fcport->sid; 111 112 - __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did, 113 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 114 FC_FC_SEQ_INIT, 0); 115
··· 109 did = fcport->rdata->ids.port_id; 110 sid = fcport->sid; 111 112 + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, 113 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 114 FC_FC_SEQ_INIT, 0); 115
+1 -1
drivers/scsi/qedf/qedf_main.c
··· 2895 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; 2896 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; 2897 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; 2898 - memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); 2899 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); 2900 if (rc) { 2901 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
··· 2895 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; 2896 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; 2897 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; 2898 + strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); 2899 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); 2900 if (rc) { 2901 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
+2
drivers/scsi/scsi.c
··· 763 struct scsi_device *sdev; 764 765 list_for_each_entry(sdev, &shost->__devices, siblings) { 766 if (sdev->channel == channel && sdev->id == id && 767 sdev->lun ==lun) 768 return sdev;
··· 763 struct scsi_device *sdev; 764 765 list_for_each_entry(sdev, &shost->__devices, siblings) { 766 + if (sdev->sdev_state == SDEV_DEL) 767 + continue; 768 if (sdev->channel == channel && sdev->id == id && 769 sdev->lun ==lun) 770 return sdev;
+1
drivers/scsi/scsi_lib.c
··· 30 #include <scsi/scsi_driver.h> 31 #include <scsi/scsi_eh.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_dh.h> 34 35 #include <trace/events/scsi.h>
··· 30 #include <scsi/scsi_driver.h> 31 #include <scsi/scsi_eh.h> 32 #include <scsi/scsi_host.h> 33 + #include <scsi/scsi_transport.h> /* __scsi_init_queue() */ 34 #include <scsi/scsi_dh.h> 35 36 #include <trace/events/scsi.h>
+1 -1
drivers/soc/ti/knav_dma.c
··· 413 * @name: slave channel name 414 * @config: dma configuration parameters 415 * 416 - * Returns pointer to appropriate DMA channel on success or NULL. 417 */ 418 void *knav_dma_open_channel(struct device *dev, const char *name, 419 struct knav_dma_cfg *config)
··· 413 * @name: slave channel name 414 * @config: dma configuration parameters 415 * 416 + * Returns pointer to appropriate DMA channel on success or error. 417 */ 418 void *knav_dma_open_channel(struct device *dev, const char *name, 419 struct knav_dma_cfg *config)
-51
drivers/staging/android/ion/devicetree.txt
··· 1 - Ion Memory Manager 2 - 3 - Ion is a memory manager that allows for sharing of buffers via dma-buf. 4 - Ion allows for different types of allocation via an abstraction called 5 - a 'heap'. A heap represents a specific type of memory. Each heap has 6 - a different type. There can be multiple instances of the same heap 7 - type. 8 - 9 - Specific heap instances are tied to heap IDs. Heap IDs are not to be specified 10 - in the devicetree. 11 - 12 - Required properties for Ion 13 - 14 - - compatible: "linux,ion" PLUS a compatible property for the device 15 - 16 - All child nodes of a linux,ion node are interpreted as heaps 17 - 18 - required properties for heaps 19 - 20 - - compatible: compatible string for a heap type PLUS a compatible property 21 - for the specific instance of the heap. Current heap types 22 - -- linux,ion-heap-system 23 - -- linux,ion-heap-system-contig 24 - -- linux,ion-heap-carveout 25 - -- linux,ion-heap-chunk 26 - -- linux,ion-heap-dma 27 - -- linux,ion-heap-custom 28 - 29 - Optional properties 30 - - memory-region: A phandle to a memory region. Required for DMA heap type 31 - (see reserved-memory.txt for details on the reservation) 32 - 33 - Example: 34 - 35 - ion { 36 - compatbile = "hisilicon,ion", "linux,ion"; 37 - 38 - ion-system-heap { 39 - compatbile = "hisilicon,system-heap", "linux,ion-heap-system" 40 - }; 41 - 42 - ion-camera-region { 43 - compatible = "hisilicon,camera-heap", "linux,ion-heap-dma" 44 - memory-region = <&camera_region>; 45 - }; 46 - 47 - ion-fb-region { 48 - compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma" 49 - memory-region = <&fb_region>; 50 - }; 51 - }
···
-1
drivers/staging/ccree/ssi_request_mgr.c
··· 376 rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); 377 if (rc != 0) { 378 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); 379 - spin_unlock_bh(&req_mgr_h->hw_lock); 380 return rc; 381 } 382 #endif
··· 376 rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); 377 if (rc != 0) { 378 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); 379 return rc; 380 } 381 #endif
+1
drivers/staging/fsl-dpaa2/Kconfig
··· 12 config FSL_DPAA2_ETH 13 tristate "Freescale DPAA2 Ethernet" 14 depends on FSL_DPAA2 && FSL_MC_DPIO 15 ---help--- 16 Ethernet driver for Freescale DPAA2 SoCs, using the 17 Freescale MC bus driver
··· 12 config FSL_DPAA2_ETH 13 tristate "Freescale DPAA2 Ethernet" 14 depends on FSL_DPAA2 && FSL_MC_DPIO 15 + depends on NETDEVICES && ETHERNET 16 ---help--- 17 Ethernet driver for Freescale DPAA2 SoCs, using the 18 Freescale MC bus driver
+15 -9
drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
··· 97 98 switch (variable) { 99 case HW_VAR_BSSID: 100 - rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]); 101 - rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]); 102 break; 103 104 case HW_VAR_MEDIA_STATUS: ··· 625 struct r8192_priv *priv = rtllib_priv(dev); 626 627 RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); 628 - curCR = rtl92e_readl(dev, EPROM_CMD); 629 RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, 630 curCR); 631 priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : ··· 962 rtl92e_config_rate(dev, &rate_config); 963 priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; 964 priv->basic_rate = rate_config &= 0x15f; 965 - rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]); 966 - rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]); 967 968 if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { 969 rtl92e_writew(dev, ATIMWND, 2); ··· 1183 struct cb_desc *cb_desc, struct sk_buff *skb) 1184 { 1185 struct r8192_priv *priv = rtllib_priv(dev); 1186 - dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, 1187 - PCI_DMA_TODEVICE); 1188 struct tx_fwinfo_8190pci *pTxFwInfo; 1189 1190 pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; ··· 1194 pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, 1195 pTxFwInfo->TxRate, cb_desc); 1196 1197 - if (pci_dma_mapping_error(priv->pdev, mapping)) 1198 - netdev_err(dev, "%s(): DMA Mapping error\n", __func__); 1199 if (cb_desc->bAMPDUEnable) { 1200 pTxFwInfo->AllowAggregation = 1; 1201 pTxFwInfo->RxMF = cb_desc->ampdu_factor; ··· 1228 } 1229 1230 memset((u8 *)pdesc, 0, 12); 1231 pdesc->LINIP = 0; 1232 pdesc->CmdInit = 1; 1233 pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
··· 97 98 switch (variable) { 99 case HW_VAR_BSSID: 100 + /* BSSIDR 2 byte alignment */ 101 + rtl92e_writew(dev, BSSIDR, *(u16 *)val); 102 + rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2)); 103 break; 104 105 case HW_VAR_MEDIA_STATUS: ··· 624 struct r8192_priv *priv = rtllib_priv(dev); 625 626 RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); 627 + curCR = rtl92e_readw(dev, EPROM_CMD); 628 RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, 629 curCR); 630 priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : ··· 961 rtl92e_config_rate(dev, &rate_config); 962 priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; 963 priv->basic_rate = rate_config &= 0x15f; 964 + rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid); 965 + rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2)); 966 967 if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { 968 rtl92e_writew(dev, ATIMWND, 2); ··· 1182 struct cb_desc *cb_desc, struct sk_buff *skb) 1183 { 1184 struct r8192_priv *priv = rtllib_priv(dev); 1185 + dma_addr_t mapping; 1186 struct tx_fwinfo_8190pci *pTxFwInfo; 1187 1188 pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; ··· 1194 pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, 1195 pTxFwInfo->TxRate, cb_desc); 1196 1197 if (cb_desc->bAMPDUEnable) { 1198 pTxFwInfo->AllowAggregation = 1; 1199 pTxFwInfo->RxMF = cb_desc->ampdu_factor; ··· 1230 } 1231 1232 memset((u8 *)pdesc, 0, 12); 1233 + 1234 + mapping = pci_map_single(priv->pdev, skb->data, skb->len, 1235 + PCI_DMA_TODEVICE); 1236 + if (pci_dma_mapping_error(priv->pdev, mapping)) { 1237 + netdev_err(dev, "%s(): DMA Mapping error\n", __func__); 1238 + return; 1239 + } 1240 + 1241 pdesc->LINIP = 0; 1242 pdesc->CmdInit = 1; 1243 pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
+4 -11
drivers/staging/rtl8192e/rtl819x_TSProc.c
··· 306 pTsCommonInfo->TClasNum = TCLAS_Num; 307 } 308 309 - static bool IsACValid(unsigned int tid) 310 - { 311 - return tid < 7; 312 - } 313 - 314 bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, 315 u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) 316 { ··· 323 if (ieee->current_network.qos_data.supported == 0) { 324 UP = 0; 325 } else { 326 - if (!IsACValid(TID)) { 327 - netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n", 328 - __func__, TID); 329 - return false; 330 - } 331 - 332 switch (TID) { 333 case 0: 334 case 3: ··· 340 case 7: 341 UP = 7; 342 break; 343 } 344 } 345
··· 306 pTsCommonInfo->TClasNum = TCLAS_Num; 307 } 308 309 bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, 310 u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) 311 { ··· 328 if (ieee->current_network.qos_data.supported == 0) { 329 UP = 0; 330 } else { 331 switch (TID) { 332 case 0: 333 case 3: ··· 351 case 7: 352 UP = 7; 353 break; 354 + default: 355 + netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n", 356 + __func__, TID); 357 + return false; 358 } 359 } 360
-1
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
··· 3531 pwdev_priv->power_mgmt = true; 3532 else 3533 pwdev_priv->power_mgmt = false; 3534 - kfree((u8 *)wdev); 3535 3536 return ret; 3537
··· 3531 pwdev_priv->power_mgmt = true; 3532 else 3533 pwdev_priv->power_mgmt = false; 3534 3535 return ret; 3536
+47 -43
drivers/staging/typec/fusb302/fusb302.c
··· 264 265 #define FUSB302_RESUME_RETRY 10 266 #define FUSB302_RESUME_RETRY_SLEEP 50 267 static int fusb302_i2c_write(struct fusb302_chip *chip, 268 u8 address, u8 data) 269 { 270 - int retry_cnt; 271 int ret = 0; 272 273 atomic_set(&chip->i2c_busy, 1); 274 - for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 275 - if (atomic_read(&chip->pm_suspend)) { 276 - pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 277 - retry_cnt + 1, FUSB302_RESUME_RETRY); 278 - msleep(FUSB302_RESUME_RETRY_SLEEP); 279 - } else { 280 - break; 281 - } 282 } 283 ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data); 284 if (ret < 0) 285 fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d", ··· 306 static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address, 307 u8 length, const u8 *data) 308 { 309 - int retry_cnt; 310 int ret = 0; 311 312 if (length <= 0) 313 return ret; 314 atomic_set(&chip->i2c_busy, 1); 315 - for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 316 - if (atomic_read(&chip->pm_suspend)) { 317 - pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 318 - retry_cnt + 1, FUSB302_RESUME_RETRY); 319 - msleep(FUSB302_RESUME_RETRY_SLEEP); 320 - } else { 321 - break; 322 - } 323 } 324 ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address, 325 length, data); 326 if (ret < 0) ··· 330 static int fusb302_i2c_read(struct fusb302_chip *chip, 331 u8 address, u8 *data) 332 { 333 - int retry_cnt; 334 int ret = 0; 335 336 atomic_set(&chip->i2c_busy, 1); 337 - for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 338 - if (atomic_read(&chip->pm_suspend)) { 339 - pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 340 - retry_cnt + 1, FUSB302_RESUME_RETRY); 341 - msleep(FUSB302_RESUME_RETRY_SLEEP); 342 - } else { 343 - break; 344 - } 345 } 346 ret = i2c_smbus_read_byte_data(chip->i2c_client, address); 347 *data = (u8)ret; 348 if (ret < 0) ··· 351 static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address, 352 u8 length, u8 *data) 353 { 354 - int retry_cnt; 355 int ret = 0; 356 357 if (length <= 0) 358 return ret; 359 atomic_set(&chip->i2c_busy, 1); 360 - for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 361 - if (atomic_read(&chip->pm_suspend)) { 362 - pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 363 - retry_cnt + 1, FUSB302_RESUME_RETRY); 364 - msleep(FUSB302_RESUME_RETRY_SLEEP); 365 - } else { 366 - break; 367 - } 368 } 369 ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address, 370 length, data); 371 if (ret < 0) { 372 fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d", 373 address, length, ret); 374 - return ret; 375 } 376 if (ret != length) { 377 fusb302_log(chip, "only read %d/%d bytes from 0x%02x", 378 ret, length, address); 379 - return -EIO; 380 } 381 atomic_set(&chip->i2c_busy, 0); 382 383 return ret; ··· 493 ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data); 494 if (ret < 0) 495 return ret; 496 - chip->vbus_present = !!(FUSB_REG_STATUS0 & FUSB_REG_STATUS0_VBUSOK); 497 ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data); 498 if (ret < 0) 499 return ret; ··· 1029 buf[pos++] = FUSB302_TKN_SYNC1; 1030 buf[pos++] = FUSB302_TKN_SYNC2; 1031 1032 - len = pd_header_cnt(msg->header) * 4; 1033 /* plug 2 for header */ 1034 len += 2; 1035 if (len > 0x1F) { ··· 1485 (u8 *)&msg->header); 1486 if (ret < 0) 1487 return ret; 1488 - len = pd_header_cnt(msg->header) * 4; 1489 /* add 4 to length to include the CRC */ 1490 if (len > PD_MAX_PAYLOAD * 4) { 1491 fusb302_log(chip, "PD message too long %d", len); ··· 1667 if (ret < 0) { 1668 fusb302_log(chip, 1669 "cannot set GPIO Int_N to input, ret=%d", ret); 1670 - gpio_free(chip->gpio_int_n); 1671 return ret; 1672 } 1673 ret = gpio_to_irq(chip->gpio_int_n); 1674 if (ret < 0) { 1675 fusb302_log(chip, 1676 "cannot request IRQ for GPIO Int_N, ret=%d", ret); 1677 - gpio_free(chip->gpio_int_n); 1678 return ret; 1679 } 1680 chip->gpio_int_n_irq = ret; ··· 1789 {.compatible = "fcs,fusb302"}, 1790 {}, 1791 }; 1792 1793 static const struct i2c_device_id fusb302_i2c_device_id[] = { 1794 {"typec_fusb302", 0}, 1795 {}, 1796 }; 1797 1798 static const struct dev_pm_ops fusb302_pm_ops = { 1799 .suspend = fusb302_pm_suspend,
··· 264 265 #define FUSB302_RESUME_RETRY 10 266 #define FUSB302_RESUME_RETRY_SLEEP 50 267 + 268 + static bool fusb302_is_suspended(struct fusb302_chip *chip) 269 + { 270 + int retry_cnt; 271 + 272 + for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 273 + if (atomic_read(&chip->pm_suspend)) { 274 + dev_err(chip->dev, "i2c: pm suspend, retry %d/%d\n", 275 + retry_cnt + 1, FUSB302_RESUME_RETRY); 276 + msleep(FUSB302_RESUME_RETRY_SLEEP); 277 + } else { 278 + return false; 279 + } 280 + } 281 + 282 + return true; 283 + } 284 + 285 static int fusb302_i2c_write(struct fusb302_chip *chip, 286 u8 address, u8 data) 287 { 288 int ret = 0; 289 290 atomic_set(&chip->i2c_busy, 1); 291 + 292 + if (fusb302_is_suspended(chip)) { 293 + atomic_set(&chip->i2c_busy, 0); 294 + return -ETIMEDOUT; 295 } 296 + 297 ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data); 298 if (ret < 0) 299 fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d", ··· 292 static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address, 293 u8 length, const u8 *data) 294 { 295 int ret = 0; 296 297 if (length <= 0) 298 return ret; 299 atomic_set(&chip->i2c_busy, 1); 300 + 301 + if (fusb302_is_suspended(chip)) { 302 + atomic_set(&chip->i2c_busy, 0); 303 + return -ETIMEDOUT; 304 } 305 + 306 ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address, 307 length, data); 308 if (ret < 0) ··· 320 static int fusb302_i2c_read(struct fusb302_chip *chip, 321 u8 address, u8 *data) 322 { 323 int ret = 0; 324 325 atomic_set(&chip->i2c_busy, 1); 326 + 327 + if (fusb302_is_suspended(chip)) { 328 + atomic_set(&chip->i2c_busy, 0); 329 + return -ETIMEDOUT; 330 } 331 + 332 ret = i2c_smbus_read_byte_data(chip->i2c_client, address); 333 *data = (u8)ret; 334 if (ret < 0) ··· 345 static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address, 346 u8 length, u8 *data) 347 { 348 int ret = 0; 349 350 if (length <= 0) 351 return ret; 352 atomic_set(&chip->i2c_busy, 1); 353 + 354 + if (fusb302_is_suspended(chip)) { 355 + atomic_set(&chip->i2c_busy, 0); 356 + return -ETIMEDOUT; 357 } 358 + 359 ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address, 360 length, data); 361 if (ret < 0) { 362 fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d", 363 address, length, ret); 364 + goto done; 365 } 366 if (ret != length) { 367 fusb302_log(chip, "only read %d/%d bytes from 0x%02x", 368 ret, length, address); 369 + ret = -EIO; 370 } 371 + 372 + done: 373 atomic_set(&chip->i2c_busy, 0); 374 375 return ret; ··· 489 ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data); 490 if (ret < 0) 491 return ret; 492 + chip->vbus_present = !!(data & FUSB_REG_STATUS0_VBUSOK); 493 ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data); 494 if (ret < 0) 495 return ret; ··· 1025 buf[pos++] = FUSB302_TKN_SYNC1; 1026 buf[pos++] = FUSB302_TKN_SYNC2; 1027 1028 + len = pd_header_cnt_le(msg->header) * 4; 1029 /* plug 2 for header */ 1030 len += 2; 1031 if (len > 0x1F) { ··· 1481 (u8 *)&msg->header); 1482 if (ret < 0) 1483 return ret; 1484 + len = pd_header_cnt_le(msg->header) * 4; 1485 /* add 4 to length to include the CRC */ 1486 if (len > PD_MAX_PAYLOAD * 4) { 1487 fusb302_log(chip, "PD message too long %d", len); ··· 1663 if (ret < 0) { 1664 fusb302_log(chip, 1665 "cannot set GPIO Int_N to input, ret=%d", ret); 1666 return ret; 1667 } 1668 ret = gpio_to_irq(chip->gpio_int_n); 1669 if (ret < 0) { 1670 fusb302_log(chip, 1671 "cannot request IRQ for GPIO Int_N, ret=%d", ret); 1672 return ret; 1673 } 1674 chip->gpio_int_n_irq = ret; ··· 1787 {.compatible = "fcs,fusb302"}, 1788 {}, 1789 }; 1790 + MODULE_DEVICE_TABLE(of, fusb302_dt_match); 1791 1792 static const struct i2c_device_id fusb302_i2c_device_id[] = { 1793 {"typec_fusb302", 0}, 1794 {}, 1795 }; 1796 + MODULE_DEVICE_TABLE(i2c, fusb302_i2c_device_id); 1797 1798 static const struct dev_pm_ops fusb302_pm_ops = { 1799 .suspend = fusb302_pm_suspend,
+10
drivers/staging/typec/pd.h
··· 92 return pd_header_type(le16_to_cpu(header)); 93 } 94 95 #define PD_MAX_PAYLOAD 7 96 97 struct pd_message {
··· 92 return pd_header_type(le16_to_cpu(header)); 93 } 94 95 + static inline unsigned int pd_header_msgid(u16 header) 96 + { 97 + return (header >> PD_HEADER_ID_SHIFT) & PD_HEADER_ID_MASK; 98 + } 99 + 100 + static inline unsigned int pd_header_msgid_le(__le16 header) 101 + { 102 + return pd_header_msgid(le16_to_cpu(header)); 103 + } 104 + 105 #define PD_MAX_PAYLOAD 7 106 107 struct pd_message {
+3 -1
drivers/staging/typec/pd_vdo.h
··· 22 * VDM object is minimum of VDM header + 6 additional data objects. 23 */ 24 25 /* 26 * VDM header 27 * ---------- ··· 37 * <5> :: reserved (SVDM), command type (UVDM) 38 * <4:0> :: command 39 */ 40 - #define VDO_MAX_SIZE 7 41 #define VDO(vid, type, custom) \ 42 (((vid) << 16) | \ 43 ((type) << 15) | \
··· 22 * VDM object is minimum of VDM header + 6 additional data objects. 23 */ 24 25 + #define VDO_MAX_OBJECTS 6 26 + #define VDO_MAX_SIZE (VDO_MAX_OBJECTS + 1) 27 + 28 /* 29 * VDM header 30 * ---------- ··· 34 * <5> :: reserved (SVDM), command type (UVDM) 35 * <4:0> :: command 36 */ 37 #define VDO(vid, type, custom) \ 38 (((vid) << 16) | \ 39 ((type) << 15) | \
+1 -1
drivers/staging/typec/tcpci.c
··· 425 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */ 426 }; 427 428 - const struct tcpc_config tcpci_tcpc_config = { 429 .type = TYPEC_PORT_DFP, 430 .default_role = TYPEC_SINK, 431 };
··· 425 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */ 426 }; 427 428 + static const struct tcpc_config tcpci_tcpc_config = { 429 .type = TYPEC_PORT_DFP, 430 .default_role = TYPEC_SINK, 431 };
+73 -4
drivers/staging/typec/tcpm.c
··· 238 unsigned int hard_reset_count; 239 bool pd_capable; 240 bool explicit_contract; 241 242 /* Partner capabilities/requests */ 243 u32 sink_request; ··· 252 unsigned int nr_src_pdo; 253 u32 snk_pdo[PDO_MAX_OBJECTS]; 254 unsigned int nr_snk_pdo; 255 256 unsigned int max_snk_mv; 257 unsigned int max_snk_ma; ··· 1000 struct pd_mode_data *modep; 1001 int rlen = 0; 1002 u16 svid; 1003 1004 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d", 1005 p0, cmd_type, cmd, cnt); ··· 1011 case CMDT_INIT: 1012 switch (cmd) { 1013 case CMD_DISCOVER_IDENT: 1014 break; 1015 case CMD_DISCOVER_SVID: 1016 break; ··· 1427 break; 1428 case SOFT_RESET_SEND: 1429 port->message_id = 0; 1430 if (port->pwr_role == TYPEC_SOURCE) 1431 next_state = SRC_SEND_CAPABILITIES; 1432 else ··· 1516 port->attached); 1517 1518 if (port->attached) { 1519 /* 1520 * If both ends believe to be DFP/host, we have a data role 1521 * mismatch. ··· 1549 } 1550 } 1551 1552 mutex_unlock(&port->lock); 1553 kfree(event); 1554 } ··· 1749 } 1750 ma = min(ma, port->max_snk_ma); 1751 1752 - /* XXX: Any other flags need to be set? */ 1753 - flags = 0; 1754 1755 /* Set mismatch bit if offered power is less than operating power */ 1756 mw = ma * mv / 1000; ··· 1986 port->attached = false; 1987 port->pd_capable = false; 1988 1989 port->tcpc->set_pd_rx(port->tcpc, false); 1990 tcpm_init_vbus(port); /* also disables charging */ 1991 tcpm_init_vconn(port); ··· 2205 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2206 port->caps_count = 0; 2207 port->message_id = 0; 2208 port->explicit_contract = false; 2209 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); 2210 break; ··· 2365 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB); 2366 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2367 port->message_id = 0; 2368 port->explicit_contract = false; 2369 tcpm_set_state(port, SNK_DISCOVERY, 0); 2370 break; ··· 2533 /* Soft_Reset states */ 2534 case SOFT_RESET: 2535 port->message_id = 0; 2536 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 2537 if (port->pwr_role == TYPEC_SOURCE) 2538 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); ··· 2542 break; 2543 case SOFT_RESET_SEND: 2544 port->message_id = 0; 2545 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) 2546 tcpm_set_state_cond(port, hard_reset_state(port), 0); 2547 else ··· 2607 break; 2608 case PR_SWAP_SRC_SNK_SOURCE_OFF: 2609 tcpm_set_cc(port, TYPEC_CC_RD); 2610 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { 2611 tcpm_set_state(port, ERROR_RECOVERY, 0); 2612 break; ··· 2622 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON); 2623 break; 2624 case PR_SWAP_SRC_SNK_SINK_ON: 2625 - tcpm_set_pwr_role(port, TYPEC_SINK); 2626 tcpm_swap_complete(port, 0); 2627 tcpm_set_state(port, SNK_STARTUP, 0); 2628 break; ··· 2633 case PR_SWAP_SNK_SRC_SOURCE_ON: 2634 tcpm_set_cc(port, tcpm_rp_cc(port)); 2635 tcpm_set_vbus(port, true); 2636 - tcpm_pd_send_control(port, PD_CTRL_PS_RDY); 2637 tcpm_set_pwr_role(port, TYPEC_SOURCE); 2638 tcpm_swap_complete(port, 0); 2639 tcpm_set_state(port, SRC_STARTUP, 0); 2640 break; ··· 3345 return nr_pdo; 3346 } 3347 3348 void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, 3349 unsigned int nr_pdo) 3350 { ··· 3449 tcpc->config->nr_src_pdo); 3450 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, 3451 tcpc->config->nr_snk_pdo); 3452 3453 port->max_snk_mv = tcpc->config->max_snk_mv; 3454 port->max_snk_ma = tcpc->config->max_snk_ma;
··· 238 unsigned int hard_reset_count; 239 bool pd_capable; 240 bool explicit_contract; 241 + unsigned int rx_msgid; 242 243 /* Partner capabilities/requests */ 244 u32 sink_request; ··· 251 unsigned int nr_src_pdo; 252 u32 snk_pdo[PDO_MAX_OBJECTS]; 253 unsigned int nr_snk_pdo; 254 + u32 snk_vdo[VDO_MAX_OBJECTS]; 255 + unsigned int nr_snk_vdo; 256 257 unsigned int max_snk_mv; 258 unsigned int max_snk_ma; ··· 997 struct pd_mode_data *modep; 998 int rlen = 0; 999 u16 svid; 1000 + int i; 1001 1002 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d", 1003 p0, cmd_type, cmd, cnt); ··· 1007 case CMDT_INIT: 1008 switch (cmd) { 1009 case CMD_DISCOVER_IDENT: 1010 + /* 6.4.4.3.1: Only respond as UFP (device) */ 1011 + if (port->data_role == TYPEC_DEVICE && 1012 + port->nr_snk_vdo) { 1013 + for (i = 0; i < port->nr_snk_vdo; i++) 1014 + response[i + 1] 1015 + = cpu_to_le32(port->snk_vdo[i]); 1016 + rlen = port->nr_snk_vdo + 1; 1017 + } 1018 break; 1019 case CMD_DISCOVER_SVID: 1020 break; ··· 1415 break; 1416 case SOFT_RESET_SEND: 1417 port->message_id = 0; 1418 + port->rx_msgid = -1; 1419 if (port->pwr_role == TYPEC_SOURCE) 1420 next_state = SRC_SEND_CAPABILITIES; 1421 else ··· 1503 port->attached); 1504 1505 if (port->attached) { 1506 + enum pd_ctrl_msg_type type = pd_header_type_le(msg->header); 1507 + unsigned int msgid = pd_header_msgid_le(msg->header); 1508 + 1509 + /* 1510 + * USB PD standard, 6.6.1.2: 1511 + * "... if MessageID value in a received Message is the 1512 + * same as the stored value, the receiver shall return a 1513 + * GoodCRC Message with that MessageID value and drop 1514 + * the Message (this is a retry of an already received 1515 + * Message). Note: this shall not apply to the Soft_Reset 1516 + * Message which always has a MessageID value of zero." 1517 + */ 1518 + if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET) 1519 + goto done; 1520 + port->rx_msgid = msgid; 1521 + 1522 /* 1523 * If both ends believe to be DFP/host, we have a data role 1524 * mismatch. ··· 1520 } 1521 } 1522 1523 + done: 1524 mutex_unlock(&port->lock); 1525 kfree(event); 1526 } ··· 1719 } 1720 ma = min(ma, port->max_snk_ma); 1721 1722 + flags = RDO_USB_COMM | RDO_NO_SUSPEND; 1723 1724 /* Set mismatch bit if offered power is less than operating power */ 1725 mw = ma * mv / 1000; ··· 1957 port->attached = false; 1958 port->pd_capable = false; 1959 1960 + /* 1961 + * First Rx ID should be 0; set this to a sentinel of -1 so that 1962 + * we can check tcpm_pd_rx_handler() if we had seen it before. 1963 + */ 1964 + port->rx_msgid = -1; 1965 + 1966 port->tcpc->set_pd_rx(port->tcpc, false); 1967 tcpm_init_vbus(port); /* also disables charging */ 1968 tcpm_init_vconn(port); ··· 2170 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2171 port->caps_count = 0; 2172 port->message_id = 0; 2173 + port->rx_msgid = -1; 2174 port->explicit_contract = false; 2175 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); 2176 break; ··· 2329 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB); 2330 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2331 port->message_id = 0; 2332 + port->rx_msgid = -1; 2333 port->explicit_contract = false; 2334 tcpm_set_state(port, SNK_DISCOVERY, 0); 2335 break; ··· 2496 /* Soft_Reset states */ 2497 case SOFT_RESET: 2498 port->message_id = 0; 2499 + port->rx_msgid = -1; 2500 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 2501 if (port->pwr_role == TYPEC_SOURCE) 2502 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); ··· 2504 break; 2505 case SOFT_RESET_SEND: 2506 port->message_id = 0; 2507 + port->rx_msgid = -1; 2508 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) 2509 tcpm_set_state_cond(port, hard_reset_state(port), 0); 2510 else ··· 2568 break; 2569 case PR_SWAP_SRC_SNK_SOURCE_OFF: 2570 tcpm_set_cc(port, TYPEC_CC_RD); 2571 + /* 2572 + * USB-PD standard, 6.2.1.4, Port Power Role: 2573 + * "During the Power Role Swap Sequence, for the initial Source 2574 + * Port, the Port Power Role field shall be set to Sink in the 2575 + * PS_RDY Message indicating that the initial Source’s power 2576 + * supply is turned off" 2577 + */ 2578 + tcpm_set_pwr_role(port, TYPEC_SINK); 2579 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { 2580 tcpm_set_state(port, ERROR_RECOVERY, 0); 2581 break; ··· 2575 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON); 2576 break; 2577 case PR_SWAP_SRC_SNK_SINK_ON: 2578 tcpm_swap_complete(port, 0); 2579 tcpm_set_state(port, SNK_STARTUP, 0); 2580 break; ··· 2587 case PR_SWAP_SNK_SRC_SOURCE_ON: 2588 tcpm_set_cc(port, tcpm_rp_cc(port)); 2589 tcpm_set_vbus(port, true); 2590 + /* 2591 + * USB PD standard, 6.2.1.4: 2592 + * "Subsequent Messages initiated by the Policy Engine, 2593 + * such as the PS_RDY Message sent to indicate that Vbus 2594 + * is ready, will have the Port Power Role field set to 2595 + * Source." 2596 + */ 2597 tcpm_set_pwr_role(port, TYPEC_SOURCE); 2598 + tcpm_pd_send_control(port, PD_CTRL_PS_RDY); 2599 tcpm_swap_complete(port, 0); 2600 tcpm_set_state(port, SRC_STARTUP, 0); 2601 break; ··· 3292 return nr_pdo; 3293 } 3294 3295 + static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo, 3296 + unsigned int nr_vdo) 3297 + { 3298 + unsigned int i; 3299 + 3300 + if (nr_vdo > VDO_MAX_OBJECTS) 3301 + nr_vdo = VDO_MAX_OBJECTS; 3302 + 3303 + for (i = 0; i < nr_vdo; i++) 3304 + dest_vdo[i] = src_vdo[i]; 3305 + 3306 + return nr_vdo; 3307 + } 3308 + 3309 void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, 3310 unsigned int nr_pdo) 3311 { ··· 3382 tcpc->config->nr_src_pdo); 3383 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, 3384 tcpc->config->nr_snk_pdo); 3385 + port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, 3386 + tcpc->config->nr_snk_vdo); 3387 3388 port->max_snk_mv = tcpc->config->max_snk_mv; 3389 port->max_snk_ma = tcpc->config->max_snk_ma;
+3
drivers/staging/typec/tcpm.h
··· 60 const u32 *snk_pdo; 61 unsigned int nr_snk_pdo; 62 63 unsigned int max_snk_mv; 64 unsigned int max_snk_ma; 65 unsigned int max_snk_mw;
··· 60 const u32 *snk_pdo; 61 unsigned int nr_snk_pdo; 62 63 + const u32 *snk_vdo; 64 + unsigned int nr_snk_vdo; 65 + 66 unsigned int max_snk_mv; 67 unsigned int max_snk_ma; 68 unsigned int max_snk_mw;
+19 -12
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
··· 502 */ 503 sg_init_table(scatterlist, num_pages); 504 /* Now set the pages for each scatterlist */ 505 - for (i = 0; i < num_pages; i++) 506 - sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0); 507 508 dma_buffers = dma_map_sg(g_dev, 509 scatterlist, ··· 531 u32 addr = sg_dma_address(sg); 532 533 /* Note: addrs is the address + page_count - 1 534 - * The firmware expects the block to be page 535 * aligned and a multiple of the page size 536 */ 537 WARN_ON(len == 0); 538 - WARN_ON(len & ~PAGE_MASK); 539 - WARN_ON(addr & ~PAGE_MASK); 540 if (k > 0 && 541 - ((addrs[k - 1] & PAGE_MASK) | 542 - ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT) 543 - == addr) { 544 - addrs[k - 1] += (len >> PAGE_SHIFT); 545 - } else { 546 - addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1); 547 - } 548 } 549 550 /* Partial cache lines (fragments) require special measures */
··· 502 */ 503 sg_init_table(scatterlist, num_pages); 504 /* Now set the pages for each scatterlist */ 505 + for (i = 0; i < num_pages; i++) { 506 + unsigned int len = PAGE_SIZE - offset; 507 + 508 + if (len > count) 509 + len = count; 510 + sg_set_page(scatterlist + i, pages[i], len, offset); 511 + offset = 0; 512 + count -= len; 513 + } 514 515 dma_buffers = dma_map_sg(g_dev, 516 scatterlist, ··· 524 u32 addr = sg_dma_address(sg); 525 526 /* Note: addrs is the address + page_count - 1 527 + * The firmware expects blocks after the first to be page- 528 * aligned and a multiple of the page size 529 */ 530 WARN_ON(len == 0); 531 + WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); 532 + WARN_ON(i && (addr & ~PAGE_MASK)); 533 if (k > 0 && 534 + ((addrs[k - 1] & PAGE_MASK) + 535 + (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)) 536 + == (addr & PAGE_MASK)) 537 + addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); 538 + else 539 + addrs[k++] = (addr & PAGE_MASK) | 540 + (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); 541 } 542 543 /* Partial cache lines (fragments) require special measures */
+4 -4
drivers/uio/uio.c
··· 279 map = kzalloc(sizeof(*map), GFP_KERNEL); 280 if (!map) { 281 ret = -ENOMEM; 282 - goto err_map_kobj; 283 } 284 kobject_init(&map->kobj, &map_attr_type); 285 map->mem = mem; ··· 289 goto err_map_kobj; 290 ret = kobject_uevent(&map->kobj, KOBJ_ADD); 291 if (ret) 292 - goto err_map; 293 } 294 295 for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { ··· 308 portio = kzalloc(sizeof(*portio), GFP_KERNEL); 309 if (!portio) { 310 ret = -ENOMEM; 311 - goto err_portio_kobj; 312 } 313 kobject_init(&portio->kobj, &portio_attr_type); 314 portio->port = port; ··· 319 goto err_portio_kobj; 320 ret = kobject_uevent(&portio->kobj, KOBJ_ADD); 321 if (ret) 322 - goto err_portio; 323 } 324 325 return 0;
··· 279 map = kzalloc(sizeof(*map), GFP_KERNEL); 280 if (!map) { 281 ret = -ENOMEM; 282 + goto err_map; 283 } 284 kobject_init(&map->kobj, &map_attr_type); 285 map->mem = mem; ··· 289 goto err_map_kobj; 290 ret = kobject_uevent(&map->kobj, KOBJ_ADD); 291 if (ret) 292 + goto err_map_kobj; 293 } 294 295 for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { ··· 308 portio = kzalloc(sizeof(*portio), GFP_KERNEL); 309 if (!portio) { 310 ret = -ENOMEM; 311 + goto err_portio; 312 } 313 kobject_init(&portio->kobj, &portio_attr_type); 314 portio->port = port; ··· 319 goto err_portio_kobj; 320 ret = kobject_uevent(&portio->kobj, KOBJ_ADD); 321 if (ret) 322 + goto err_portio_kobj; 323 } 324 325 return 0;
+7 -7
drivers/usb/core/devio.c
··· 475 476 if (userurb) { /* Async */ 477 if (when == SUBMIT) 478 - dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " 479 "length %u\n", 480 userurb, ep, t, d, length); 481 else 482 - dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " 483 "actual_length %u status %d\n", 484 userurb, ep, t, d, length, 485 timeout_or_status); ··· 1895 if (as) { 1896 int retval; 1897 1898 - snoop(&ps->dev->dev, "reap %p\n", as->userurb); 1899 retval = processcompl(as, (void __user * __user *)arg); 1900 free_async(as); 1901 return retval; ··· 1912 1913 as = async_getcompleted(ps); 1914 if (as) { 1915 - snoop(&ps->dev->dev, "reap %p\n", as->userurb); 1916 retval = processcompl(as, (void __user * __user *)arg); 1917 free_async(as); 1918 } else { ··· 2043 if (as) { 2044 int retval; 2045 2046 - snoop(&ps->dev->dev, "reap %p\n", as->userurb); 2047 retval = processcompl_compat(as, (void __user * __user *)arg); 2048 free_async(as); 2049 return retval; ··· 2060 2061 as = async_getcompleted(ps); 2062 if (as) { 2063 - snoop(&ps->dev->dev, "reap %p\n", as->userurb); 2064 retval = processcompl_compat(as, (void __user * __user *)arg); 2065 free_async(as); 2066 } else { ··· 2489 #endif 2490 2491 case USBDEVFS_DISCARDURB: 2492 - snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p); 2493 ret = proc_unlinkurb(ps, p); 2494 break; 2495
··· 475 476 if (userurb) { /* Async */ 477 if (when == SUBMIT) 478 + dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " 479 "length %u\n", 480 userurb, ep, t, d, length); 481 else 482 + dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " 483 "actual_length %u status %d\n", 484 userurb, ep, t, d, length, 485 timeout_or_status); ··· 1895 if (as) { 1896 int retval; 1897 1898 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); 1899 retval = processcompl(as, (void __user * __user *)arg); 1900 free_async(as); 1901 return retval; ··· 1912 1913 as = async_getcompleted(ps); 1914 if (as) { 1915 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); 1916 retval = processcompl(as, (void __user * __user *)arg); 1917 free_async(as); 1918 } else { ··· 2043 if (as) { 2044 int retval; 2045 2046 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); 2047 retval = processcompl_compat(as, (void __user * __user *)arg); 2048 free_async(as); 2049 return retval; ··· 2060 2061 as = async_getcompleted(ps); 2062 if (as) { 2063 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); 2064 retval = processcompl_compat(as, (void __user * __user *)arg); 2065 free_async(as); 2066 } else { ··· 2489 #endif 2490 2491 case USBDEVFS_DISCARDURB: 2492 + snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p); 2493 ret = proc_unlinkurb(ps, p); 2494 break; 2495
+3 -2
drivers/usb/core/hcd.c
··· 1723 if (retval == 0) 1724 retval = -EINPROGRESS; 1725 else if (retval != -EIDRM && retval != -EBUSY) 1726 - dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", 1727 urb, retval); 1728 usb_put_dev(udev); 1729 } ··· 1890 /* kick hcd */ 1891 unlink1(hcd, urb, -ESHUTDOWN); 1892 dev_dbg (hcd->self.controller, 1893 - "shutdown urb %p ep%d%s%s\n", 1894 urb, usb_endpoint_num(&ep->desc), 1895 is_in ? "in" : "out", 1896 ({ char *s; ··· 2520 hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), 2521 GFP_KERNEL); 2522 if (!hcd->bandwidth_mutex) { 2523 kfree(hcd); 2524 dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); 2525 return NULL;
··· 1723 if (retval == 0) 1724 retval = -EINPROGRESS; 1725 else if (retval != -EIDRM && retval != -EBUSY) 1726 + dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n", 1727 urb, retval); 1728 usb_put_dev(udev); 1729 } ··· 1890 /* kick hcd */ 1891 unlink1(hcd, urb, -ESHUTDOWN); 1892 dev_dbg (hcd->self.controller, 1893 + "shutdown urb %pK ep%d%s%s\n", 1894 urb, usb_endpoint_num(&ep->desc), 1895 is_in ? "in" : "out", 1896 ({ char *s; ··· 2520 hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), 2521 GFP_KERNEL); 2522 if (!hcd->bandwidth_mutex) { 2523 + kfree(hcd->address0_mutex); 2524 kfree(hcd); 2525 dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); 2526 return NULL;
+21 -6
drivers/usb/core/hub.c
··· 362 } 363 364 /* USB 2.0 spec Section 11.24.4.5 */ 365 - static int get_hub_descriptor(struct usb_device *hdev, void *data) 366 { 367 int i, ret, size; 368 unsigned dtype; ··· 379 for (i = 0; i < 3; i++) { 380 ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), 381 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, 382 - dtype << 8, 0, data, size, 383 USB_CTRL_GET_TIMEOUT); 384 - if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) 385 return ret; 386 } 387 return -EINVAL; 388 } ··· 1322 } 1323 mutex_init(&hub->status_mutex); 1324 1325 - hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); 1326 if (!hub->descriptor) { 1327 ret = -ENOMEM; 1328 goto fail; ··· 1330 1331 /* Request the entire hub descriptor. 1332 * hub->descriptor can handle USB_MAXCHILDREN ports, 1333 - * but the hub can/will return fewer bytes here. 1334 */ 1335 ret = get_hub_descriptor(hdev, hub->descriptor); 1336 if (ret < 0) { 1337 message = "can't read hub descriptor"; 1338 goto fail; 1339 - } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { 1340 message = "hub has too many ports!"; 1341 ret = -ENODEV; 1342 goto fail;
··· 362 } 363 364 /* USB 2.0 spec Section 11.24.4.5 */ 365 + static int get_hub_descriptor(struct usb_device *hdev, 366 + struct usb_hub_descriptor *desc) 367 { 368 int i, ret, size; 369 unsigned dtype; ··· 378 for (i = 0; i < 3; i++) { 379 ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), 380 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, 381 + dtype << 8, 0, desc, size, 382 USB_CTRL_GET_TIMEOUT); 383 + if (hub_is_superspeed(hdev)) { 384 + if (ret == size) 385 + return ret; 386 + } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) { 387 + /* Make sure we have the DeviceRemovable field. */ 388 + size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1; 389 + if (ret < size) 390 + return -EMSGSIZE; 391 return ret; 392 + } 393 } 394 return -EINVAL; 395 } ··· 1313 } 1314 mutex_init(&hub->status_mutex); 1315 1316 + hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL); 1317 if (!hub->descriptor) { 1318 ret = -ENOMEM; 1319 goto fail; ··· 1321 1322 /* Request the entire hub descriptor. 1323 * hub->descriptor can handle USB_MAXCHILDREN ports, 1324 + * but a (non-SS) hub can/will return fewer bytes here. 1325 */ 1326 ret = get_hub_descriptor(hdev, hub->descriptor); 1327 if (ret < 0) { 1328 message = "can't read hub descriptor"; 1329 goto fail; 1330 + } 1331 + 1332 + maxchild = USB_MAXCHILDREN; 1333 + if (hub_is_superspeed(hdev)) 1334 + maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS); 1335 + 1336 + if (hub->descriptor->bNbrPorts > maxchild) { 1337 message = "hub has too many ports!"; 1338 ret = -ENODEV; 1339 goto fail;
+3
drivers/usb/core/of.c
··· 53 * 54 * Find the companion device from platform bus. 55 * 56 * Return: On success, a pointer to the companion device, %NULL on failure. 57 */ 58 struct device *usb_of_get_companion_dev(struct device *dev)
··· 53 * 54 * Find the companion device from platform bus. 55 * 56 + * Takes a reference to the returned struct device which needs to be dropped 57 + * after use. 58 + * 59 * Return: On success, a pointer to the companion device, %NULL on failure. 60 */ 61 struct device *usb_of_get_companion_dev(struct device *dev)
+1 -1
drivers/usb/core/urb.c
··· 338 if (!urb || !urb->complete) 339 return -EINVAL; 340 if (urb->hcpriv) { 341 - WARN_ONCE(1, "URB %p submitted while active\n", urb); 342 return -EBUSY; 343 } 344
··· 338 if (!urb || !urb->complete) 339 return -EINVAL; 340 if (urb->hcpriv) { 341 + WARN_ONCE(1, "URB %pK submitted while active\n", urb); 342 return -EBUSY; 343 } 344
+4
drivers/usb/dwc3/dwc3-keystone.c
··· 107 return PTR_ERR(kdwc->usbss); 108 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 110 111 error = clk_prepare_enable(kdwc->clk); 112 if (error < 0) {
··· 107 return PTR_ERR(kdwc->usbss); 108 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 110 + if (IS_ERR(kdwc->clk)) { 111 + dev_err(kdwc->dev, "unable to get usb clock\n"); 112 + return PTR_ERR(kdwc->clk); 113 + } 114 115 error = clk_prepare_enable(kdwc->clk); 116 if (error < 0) {
+4
drivers/usb/dwc3/dwc3-pci.c
··· 39 #define PCI_DEVICE_ID_INTEL_APL 0x5aaa 40 #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 41 #define PCI_DEVICE_ID_INTEL_GLK 0x31aa 42 43 #define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" 44 #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 ··· 272 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 273 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, 274 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, 275 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 276 { } /* Terminating Entry */ 277 };
··· 39 #define PCI_DEVICE_ID_INTEL_APL 0x5aaa 40 #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 41 #define PCI_DEVICE_ID_INTEL_GLK 0x31aa 42 + #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee 43 + #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e 44 45 #define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" 46 #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 ··· 270 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 271 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, 272 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, 273 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), }, 274 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), }, 275 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 276 { } /* Terminating Entry */ 277 };
+20 -1
drivers/usb/dwc3/gadget.c
··· 1261 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1262 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1263 } 1264 } 1265 - return 0; 1266 } 1267 1268 if (!dwc3_calc_trbs_left(dep)) 1269 return 0; 1270 1271 ret = __dwc3_gadget_kick_transfer(dep, 0); 1272 if (ret == -EBUSY) 1273 ret = 0; 1274 ··· 3035 dwc->pending_events = true; 3036 return IRQ_HANDLED; 3037 } 3038 3039 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3040 count &= DWC3_GEVNTCOUNT_MASK;
··· 1261 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1262 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1263 } 1264 + return 0; 1265 } 1266 + 1267 + if ((dep->flags & DWC3_EP_BUSY) && 1268 + !(dep->flags & DWC3_EP_MISSED_ISOC)) { 1269 + WARN_ON_ONCE(!dep->resource_index); 1270 + ret = __dwc3_gadget_kick_transfer(dep, 1271 + dep->resource_index); 1272 + } 1273 + 1274 + goto out; 1275 } 1276 1277 if (!dwc3_calc_trbs_left(dep)) 1278 return 0; 1279 1280 ret = __dwc3_gadget_kick_transfer(dep, 0); 1281 + out: 1282 if (ret == -EBUSY) 1283 ret = 0; 1284 ··· 3025 dwc->pending_events = true; 3026 return IRQ_HANDLED; 3027 } 3028 + 3029 + /* 3030 + * With PCIe legacy interrupt, test shows that top-half irq handler can 3031 + * be called again after HW interrupt deassertion. Check if bottom-half 3032 + * irq event handler completes before caching new event to prevent 3033 + * losing events. 3034 + */ 3035 + if (evt->flags & DWC3_EVENT_PENDING) 3036 + return IRQ_HANDLED; 3037 3038 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3039 count &= DWC3_GEVNTCOUNT_MASK;
+5 -5
drivers/usb/gadget/function/f_fs.c
··· 1858 ep->ep->driver_data = ep; 1859 ep->ep->desc = ds; 1860 1861 - comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + 1862 - USB_DT_ENDPOINT_SIZE); 1863 - ep->ep->maxburst = comp_desc->bMaxBurst + 1; 1864 - 1865 - if (needs_comp_desc) 1866 ep->ep->comp_desc = comp_desc; 1867 1868 ret = usb_ep_enable(ep->ep); 1869 if (likely(!ret)) {
··· 1858 ep->ep->driver_data = ep; 1859 ep->ep->desc = ds; 1860 1861 + if (needs_comp_desc) { 1862 + comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + 1863 + USB_DT_ENDPOINT_SIZE); 1864 + ep->ep->maxburst = comp_desc->bMaxBurst + 1; 1865 ep->ep->comp_desc = comp_desc; 1866 + } 1867 1868 ret = usb_ep_enable(ep->ep); 1869 if (likely(!ret)) {
+1 -1
drivers/usb/gadget/function/u_serial.c
··· 1256 struct gscons_info *info = &gscons_info; 1257 1258 unregister_console(&gserial_cons); 1259 - if (info->console_thread != NULL) 1260 kthread_stop(info->console_thread); 1261 gs_buf_free(&info->con_buf); 1262 }
··· 1256 struct gscons_info *info = &gscons_info; 1257 1258 unregister_console(&gserial_cons); 1259 + if (!IS_ERR_OR_NULL(info->console_thread)) 1260 kthread_stop(info->console_thread); 1261 gs_buf_free(&info->con_buf); 1262 }
+3 -3
drivers/usb/gadget/udc/dummy_hcd.c
··· 2008 HUB_CHAR_COMMON_OCPM); 2009 desc->bNbrPorts = 1; 2010 desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ 2011 - desc->u.ss.DeviceRemovable = 0xffff; 2012 } 2013 2014 static inline void hub_descriptor(struct usb_hub_descriptor *desc) ··· 2020 HUB_CHAR_INDV_PORT_LPSM | 2021 HUB_CHAR_COMMON_OCPM); 2022 desc->bNbrPorts = 1; 2023 - desc->u.hs.DeviceRemovable[0] = 0xff; 2024 - desc->u.hs.DeviceRemovable[1] = 0xff; 2025 } 2026 2027 static int dummy_hub_control(
··· 2008 HUB_CHAR_COMMON_OCPM); 2009 desc->bNbrPorts = 1; 2010 desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ 2011 + desc->u.ss.DeviceRemovable = 0; 2012 } 2013 2014 static inline void hub_descriptor(struct usb_hub_descriptor *desc) ··· 2020 HUB_CHAR_INDV_PORT_LPSM | 2021 HUB_CHAR_COMMON_OCPM); 2022 desc->bNbrPorts = 1; 2023 + desc->u.hs.DeviceRemovable[0] = 0; 2024 + desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */ 2025 } 2026 2027 static int dummy_hub_control(
+3 -1
drivers/usb/host/ehci-platform.c
··· 384 } 385 386 companion_dev = usb_of_get_companion_dev(hcd->self.controller); 387 - if (companion_dev) 388 device_pm_wait_for_dev(hcd->self.controller, companion_dev); 389 390 ehci_resume(hcd, priv->reset_on_resume); 391 return 0;
··· 384 } 385 386 companion_dev = usb_of_get_companion_dev(hcd->self.controller); 387 + if (companion_dev) { 388 device_pm_wait_for_dev(hcd->self.controller, companion_dev); 389 + put_device(companion_dev); 390 + } 391 392 ehci_resume(hcd, priv->reset_on_resume); 393 return 0;
+4 -2
drivers/usb/host/r8a66597-hcd.c
··· 1269 time = 30; 1270 break; 1271 default: 1272 - time = 300; 1273 break; 1274 } 1275 ··· 1785 pipe = td->pipe; 1786 pipe_stop(r8a66597, pipe); 1787 1788 new_td = td; 1789 do { 1790 list_move_tail(&new_td->queue, ··· 1795 new_td = td; 1796 break; 1797 } 1798 - } while (td != new_td && td->address == new_td->address); 1799 1800 start_transfer(r8a66597, new_td); 1801
··· 1269 time = 30; 1270 break; 1271 default: 1272 + time = 50; 1273 break; 1274 } 1275 ··· 1785 pipe = td->pipe; 1786 pipe_stop(r8a66597, pipe); 1787 1788 + /* Select a different address or endpoint */ 1789 new_td = td; 1790 do { 1791 list_move_tail(&new_td->queue, ··· 1794 new_td = td; 1795 break; 1796 } 1797 + } while (td != new_td && td->address == new_td->address && 1798 + td->pipe->info.epnum == new_td->pipe->info.epnum); 1799 1800 start_transfer(r8a66597, new_td); 1801
+1 -1
drivers/usb/host/xhci-hub.c
··· 419 wait_for_completion(cmd->completion); 420 421 if (cmd->status == COMP_COMMAND_ABORTED || 422 - cmd->status == COMP_STOPPED) { 423 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); 424 ret = -ETIME; 425 }
··· 419 wait_for_completion(cmd->completion); 420 421 if (cmd->status == COMP_COMMAND_ABORTED || 422 + cmd->status == COMP_COMMAND_RING_STOPPED) { 423 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); 424 ret = -ETIME; 425 }
+6 -5
drivers/usb/host/xhci-mem.c
··· 56 } 57 58 if (max_packet) { 59 - seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); 60 if (!seg->bounce_buf) { 61 dma_pool_free(xhci->segment_pool, seg->trbs, dma); 62 kfree(seg); ··· 1724 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1725 for (i = 0; i < num_sp; i++) { 1726 dma_addr_t dma; 1727 - void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1728 flags); 1729 if (!buf) 1730 goto fail_sp4; ··· 2307 /* Place limits on the number of roothub ports so that the hub 2308 * descriptors aren't longer than the USB core will allocate. 2309 */ 2310 - if (xhci->num_usb3_ports > 15) { 2311 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2312 - "Limiting USB 3.0 roothub ports to 15."); 2313 - xhci->num_usb3_ports = 15; 2314 } 2315 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2316 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
··· 56 } 57 58 if (max_packet) { 59 + seg->bounce_buf = kzalloc(max_packet, flags); 60 if (!seg->bounce_buf) { 61 dma_pool_free(xhci->segment_pool, seg->trbs, dma); 62 kfree(seg); ··· 1724 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1725 for (i = 0; i < num_sp; i++) { 1726 dma_addr_t dma; 1727 + void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, 1728 flags); 1729 if (!buf) 1730 goto fail_sp4; ··· 2307 /* Place limits on the number of roothub ports so that the hub 2308 * descriptors aren't longer than the USB core will allocate. 2309 */ 2310 + if (xhci->num_usb3_ports > USB_SS_MAXPORTS) { 2311 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2312 + "Limiting USB 3.0 roothub ports to %u.", 2313 + USB_SS_MAXPORTS); 2314 + xhci->num_usb3_ports = USB_SS_MAXPORTS; 2315 } 2316 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2317 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+5 -2
drivers/usb/host/xhci-pci.c
··· 52 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 53 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 54 #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 55 56 static const char hcd_name[] = "xhci_hcd"; 57 ··· 167 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || 169 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || 170 - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { 171 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 172 } 173 if (pdev->vendor == PCI_VENDOR_ID_INTEL && ··· 177 } 178 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 179 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 180 - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) 181 xhci->quirks |= XHCI_MISSING_CAS; 182 183 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
··· 52 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 53 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 54 #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 55 + #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 56 57 static const char hcd_name[] = "xhci_hcd"; 58 ··· 166 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 167 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || 168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || 169 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || 170 + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) { 171 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 172 } 173 if (pdev->vendor == PCI_VENDOR_ID_INTEL && ··· 175 } 176 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 177 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 178 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || 179 + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) 180 xhci->quirks |= XHCI_MISSING_CAS; 181 182 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+1 -1
drivers/usb/host/xhci-plat.c
··· 177 178 irq = platform_get_irq(pdev, 0); 179 if (irq < 0) 180 - return -ENODEV; 181 182 /* 183 * sysdev must point to a device that is known to the system firmware
··· 177 178 irq = platform_get_irq(pdev, 0); 179 if (irq < 0) 180 + return irq; 181 182 /* 183 * sysdev must point to a device that is known to the system firmware
+9 -11
drivers/usb/host/xhci-ring.c
··· 323 if (i_cmd->status != COMP_COMMAND_ABORTED) 324 continue; 325 326 - i_cmd->status = COMP_STOPPED; 327 328 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 329 i_cmd->command_trb); ··· 641 xhci_urb_free_priv(urb_priv); 642 usb_hcd_unlink_urb_from_ep(hcd, urb); 643 spin_unlock(&xhci->lock); 644 - usb_hcd_giveback_urb(hcd, urb, status); 645 trace_xhci_urb_giveback(urb); 646 spin_lock(&xhci->lock); 647 } 648 ··· 1380 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1381 1382 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1383 - if (cmd_comp_code == COMP_STOPPED) { 1384 complete_all(&xhci->cmd_ring_stop_completion); 1385 return; 1386 } ··· 1436 break; 1437 case TRB_CMD_NOOP: 1438 /* Is this an aborted command turned to NO-OP? */ 1439 - if (cmd->status == COMP_STOPPED) 1440 - cmd_comp_code = COMP_STOPPED; 1441 break; 1442 case TRB_RESET_EP: 1443 WARN_ON(slot_id != TRB_TO_SLOT_ID( ··· 2677 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2678 union xhci_trb *event_ring_deq; 2679 irqreturn_t ret = IRQ_NONE; 2680 dma_addr_t deq; 2681 u64 temp_64; 2682 u32 status; 2683 2684 - spin_lock(&xhci->lock); 2685 /* Check if the xHC generated the interrupt, or the irq is shared */ 2686 status = readl(&xhci->op_regs->status); 2687 if (status == ~(u32)0) { ··· 2708 */ 2709 status |= STS_EINT; 2710 writel(status, &xhci->op_regs->status); 2711 - /* FIXME when MSI-X is supported and there are multiple vectors */ 2712 - /* Clear the MSI-X event interrupt status */ 2713 2714 - if (hcd->irq) { 2715 u32 irq_pending; 2716 - /* Acknowledge the PCI interrupt */ 2717 irq_pending = readl(&xhci->ir_set->irq_pending); 2718 irq_pending |= IMAN_IP; 2719 writel(irq_pending, &xhci->ir_set->irq_pending); ··· 2755 ret = IRQ_HANDLED; 2756 2757 out: 2758 - spin_unlock(&xhci->lock); 2759 2760 return ret; 2761 }
··· 323 if (i_cmd->status != COMP_COMMAND_ABORTED) 324 continue; 325 326 + i_cmd->status = COMP_COMMAND_RING_STOPPED; 327 328 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 329 i_cmd->command_trb); ··· 641 xhci_urb_free_priv(urb_priv); 642 usb_hcd_unlink_urb_from_ep(hcd, urb); 643 spin_unlock(&xhci->lock); 644 trace_xhci_urb_giveback(urb); 645 + usb_hcd_giveback_urb(hcd, urb, status); 646 spin_lock(&xhci->lock); 647 } 648 ··· 1380 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1381 1382 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1383 + if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { 1384 complete_all(&xhci->cmd_ring_stop_completion); 1385 return; 1386 } ··· 1436 break; 1437 case TRB_CMD_NOOP: 1438 /* Is this an aborted command turned to NO-OP? */ 1439 + if (cmd->status == COMP_COMMAND_RING_STOPPED) 1440 + cmd_comp_code = COMP_COMMAND_RING_STOPPED; 1441 break; 1442 case TRB_RESET_EP: 1443 WARN_ON(slot_id != TRB_TO_SLOT_ID( ··· 2677 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2678 union xhci_trb *event_ring_deq; 2679 irqreturn_t ret = IRQ_NONE; 2680 + unsigned long flags; 2681 dma_addr_t deq; 2682 u64 temp_64; 2683 u32 status; 2684 2685 + spin_lock_irqsave(&xhci->lock, flags); 2686 /* Check if the xHC generated the interrupt, or the irq is shared */ 2687 status = readl(&xhci->op_regs->status); 2688 if (status == ~(u32)0) { ··· 2707 */ 2708 status |= STS_EINT; 2709 writel(status, &xhci->op_regs->status); 2710 2711 + if (!hcd->msi_enabled) { 2712 u32 irq_pending; 2713 irq_pending = readl(&xhci->ir_set->irq_pending); 2714 irq_pending |= IMAN_IP; 2715 writel(irq_pending, &xhci->ir_set->irq_pending); ··· 2757 ret = IRQ_HANDLED; 2758 2759 out: 2760 + spin_unlock_irqrestore(&xhci->lock, flags); 2761 2762 return ret; 2763 }
+7 -6
drivers/usb/host/xhci.c
··· 359 /* fall back to msi*/ 360 ret = xhci_setup_msi(xhci); 361 362 - if (!ret) 363 - /* hcd->irq is 0, we have MSI */ 364 return 0; 365 366 if (!pdev->irq) { 367 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); ··· 1764 1765 switch (*cmd_status) { 1766 case COMP_COMMAND_ABORTED: 1767 - case COMP_STOPPED: 1768 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1769 ret = -ETIME; 1770 break; ··· 1814 1815 switch (*cmd_status) { 1816 case COMP_COMMAND_ABORTED: 1817 - case COMP_STOPPED: 1818 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 1819 ret = -ETIME; 1820 break; ··· 3433 ret = reset_device_cmd->status; 3434 switch (ret) { 3435 case COMP_COMMAND_ABORTED: 3436 - case COMP_STOPPED: 3437 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3438 ret = -ETIME; 3439 goto command_cleanup; ··· 3818 */ 3819 switch (command->status) { 3820 case COMP_COMMAND_ABORTED: 3821 - case COMP_STOPPED: 3822 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 3823 ret = -ETIME; 3824 break;
··· 359 /* fall back to msi*/ 360 ret = xhci_setup_msi(xhci); 361 362 + if (!ret) { 363 + hcd->msi_enabled = 1; 364 return 0; 365 + } 366 367 if (!pdev->irq) { 368 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); ··· 1763 1764 switch (*cmd_status) { 1765 case COMP_COMMAND_ABORTED: 1766 + case COMP_COMMAND_RING_STOPPED: 1767 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1768 ret = -ETIME; 1769 break; ··· 1813 1814 switch (*cmd_status) { 1815 case COMP_COMMAND_ABORTED: 1816 + case COMP_COMMAND_RING_STOPPED: 1817 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 1818 ret = -ETIME; 1819 break; ··· 3432 ret = reset_device_cmd->status; 3433 switch (ret) { 3434 case COMP_COMMAND_ABORTED: 3435 + case COMP_COMMAND_RING_STOPPED: 3436 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3437 ret = -ETIME; 3438 goto command_cleanup; ··· 3817 */ 3818 switch (command->status) { 3819 case COMP_COMMAND_ABORTED: 3820 + case COMP_COMMAND_RING_STOPPED: 3821 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 3822 ret = -ETIME; 3823 break;
+1 -1
drivers/usb/misc/chaoskey.c
··· 192 193 dev->in_ep = in_ep; 194 195 - if (udev->descriptor.idVendor != ALEA_VENDOR_ID) 196 dev->reads_started = 1; 197 198 dev->size = size;
··· 192 193 dev->in_ep = in_ep; 194 195 + if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) 196 dev->reads_started = 1; 197 198 dev->size = size;
+1 -1
drivers/usb/misc/iowarrior.c
··· 554 info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); 555 556 /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ 557 - info.speed = le16_to_cpu(dev->udev->speed); 558 info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; 559 info.report_size = dev->report_size; 560
··· 554 info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); 555 556 /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ 557 + info.speed = dev->udev->speed; 558 info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; 559 info.report_size = dev->report_size; 560
+1
drivers/usb/misc/legousbtower.c
··· 926 USB_MAJOR, dev->minor); 927 928 exit: 929 return retval; 930 931 error:
··· 926 USB_MAJOR, dev->minor); 927 928 exit: 929 + kfree(get_version_reply); 930 return retval; 931 932 error:
+1 -1
drivers/usb/misc/sisusbvga/sisusb_con.c
··· 973 974 mutex_unlock(&sisusb->lock); 975 976 - return 1; 977 } 978 979 /* Interface routine */
··· 973 974 mutex_unlock(&sisusb->lock); 975 976 + return true; 977 } 978 979 /* Interface routine */
+5 -4
drivers/usb/musb/musb_host.c
··· 2780 int ret; 2781 struct usb_hcd *hcd = musb->hcd; 2782 2783 - MUSB_HST_MODE(musb); 2784 - musb->xceiv->otg->default_a = 1; 2785 - musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2786 - 2787 otg_set_host(musb->xceiv->otg, &hcd->self); 2788 hcd->self.otg_port = 1; 2789 musb->xceiv->otg->host = &hcd->self;
··· 2780 int ret; 2781 struct usb_hcd *hcd = musb->hcd; 2782 2783 + if (musb->port_mode == MUSB_PORT_MODE_HOST) { 2784 + MUSB_HST_MODE(musb); 2785 + musb->xceiv->otg->default_a = 1; 2786 + musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2787 + } 2788 otg_set_host(musb->xceiv->otg, &hcd->self); 2789 hcd->self.otg_port = 1; 2790 musb->xceiv->otg->host = &hcd->self;
+9 -4
drivers/usb/musb/tusb6010_omap.c
··· 219 u32 dma_remaining; 220 int src_burst, dst_burst; 221 u16 csr; 222 int ch; 223 s8 dmareq; 224 s8 sync_dev; ··· 391 392 if (chdat->tx) { 393 /* Send transfer_packet_sz packets at a time */ 394 - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 395 - chdat->transfer_packet_sz); 396 397 musb_writel(ep_conf, TUSB_EP_TX_OFFSET, 398 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 399 } else { 400 /* Receive transfer_packet_sz packets at a time */ 401 - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 402 - chdat->transfer_packet_sz << 16); 403 404 musb_writel(ep_conf, TUSB_EP_RX_OFFSET, 405 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
··· 219 u32 dma_remaining; 220 int src_burst, dst_burst; 221 u16 csr; 222 + u32 psize; 223 int ch; 224 s8 dmareq; 225 s8 sync_dev; ··· 390 391 if (chdat->tx) { 392 /* Send transfer_packet_sz packets at a time */ 393 + psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); 394 + psize &= ~0x7ff; 395 + psize |= chdat->transfer_packet_sz; 396 + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); 397 398 musb_writel(ep_conf, TUSB_EP_TX_OFFSET, 399 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 400 } else { 401 /* Receive transfer_packet_sz packets at a time */ 402 + psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); 403 + psize &= ~(0x7ff << 16); 404 + psize |= (chdat->transfer_packet_sz << 16); 405 + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); 406 407 musb_writel(ep_conf, TUSB_EP_RX_OFFSET, 408 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+5 -5
drivers/usb/serial/ftdi_sio.c
··· 809 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, 810 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), 811 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 812 - { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 813 - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 814 - { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), 815 - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 816 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), 817 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 818 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), ··· 1527 (new_serial.flags & ASYNC_FLAGS)); 1528 priv->custom_divisor = new_serial.custom_divisor; 1529 1530 write_latency_timer(port); 1531 1532 - check_and_exit: 1533 if ((old_priv.flags & ASYNC_SPD_MASK) != 1534 (priv->flags & ASYNC_SPD_MASK)) { 1535 if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
··· 809 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, 810 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), 811 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 812 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) }, 813 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) }, 814 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) }, 815 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) }, 816 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), 817 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 818 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), ··· 1527 (new_serial.flags & ASYNC_FLAGS)); 1528 priv->custom_divisor = new_serial.custom_divisor; 1529 1530 + check_and_exit: 1531 write_latency_timer(port); 1532 1533 if ((old_priv.flags & ASYNC_SPD_MASK) != 1534 (priv->flags & ASYNC_SPD_MASK)) { 1535 if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+2
drivers/usb/serial/ftdi_sio_ids.h
··· 882 /* Olimex */ 883 #define OLIMEX_VID 0x15BA 884 #define OLIMEX_ARM_USB_OCD_PID 0x0003 885 #define OLIMEX_ARM_USB_OCD_H_PID 0x002b 886 887 /*
··· 882 /* Olimex */ 883 #define OLIMEX_VID 0x15BA 884 #define OLIMEX_ARM_USB_OCD_PID 0x0003 885 + #define OLIMEX_ARM_USB_TINY_PID 0x0004 886 + #define OLIMEX_ARM_USB_TINY_H_PID 0x002a 887 #define OLIMEX_ARM_USB_OCD_H_PID 0x002b 888 889 /*
+4 -1
drivers/usb/serial/io_ti.c
··· 2336 if (!baud) { 2337 /* pick a default, any default... */ 2338 baud = 9600; 2339 - } else 2340 tty_encode_baud_rate(tty, baud, baud); 2341 2342 edge_port->baud_rate = baud; 2343 config->wBaudRate = (__u16)((461550L + baud/2) / baud);
··· 2336 if (!baud) { 2337 /* pick a default, any default... */ 2338 baud = 9600; 2339 + } else { 2340 + /* Avoid a zero divisor. */ 2341 + baud = min(baud, 461550); 2342 tty_encode_baud_rate(tty, baud, baud); 2343 + } 2344 2345 edge_port->baud_rate = baud; 2346 config->wBaudRate = (__u16)((461550L + baud/2) / baud);
+12 -9
drivers/usb/serial/ir-usb.c
··· 197 static int ir_startup(struct usb_serial *serial) 198 { 199 struct usb_irda_cs_descriptor *irda_desc; 200 201 irda_desc = irda_usb_find_class_desc(serial, 0); 202 if (!irda_desc) { ··· 206 return -ENODEV; 207 } 208 209 dev_dbg(&serial->dev->dev, 210 "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n", 211 __func__, 212 - (irda_desc->wBaudRate & USB_IRDA_BR_2400) ? " 2400" : "", 213 - (irda_desc->wBaudRate & USB_IRDA_BR_9600) ? " 9600" : "", 214 - (irda_desc->wBaudRate & USB_IRDA_BR_19200) ? " 19200" : "", 215 - (irda_desc->wBaudRate & USB_IRDA_BR_38400) ? " 38400" : "", 216 - (irda_desc->wBaudRate & USB_IRDA_BR_57600) ? " 57600" : "", 217 - (irda_desc->wBaudRate & USB_IRDA_BR_115200) ? " 115200" : "", 218 - (irda_desc->wBaudRate & USB_IRDA_BR_576000) ? " 576000" : "", 219 - (irda_desc->wBaudRate & USB_IRDA_BR_1152000) ? " 1152000" : "", 220 - (irda_desc->wBaudRate & USB_IRDA_BR_4000000) ? " 4000000" : ""); 221 222 switch (irda_desc->bmAdditionalBOFs) { 223 case USB_IRDA_AB_48:
··· 197 static int ir_startup(struct usb_serial *serial) 198 { 199 struct usb_irda_cs_descriptor *irda_desc; 200 + int rates; 201 202 irda_desc = irda_usb_find_class_desc(serial, 0); 203 if (!irda_desc) { ··· 205 return -ENODEV; 206 } 207 208 + rates = le16_to_cpu(irda_desc->wBaudRate); 209 + 210 dev_dbg(&serial->dev->dev, 211 "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n", 212 __func__, 213 + (rates & USB_IRDA_BR_2400) ? " 2400" : "", 214 + (rates & USB_IRDA_BR_9600) ? " 9600" : "", 215 + (rates & USB_IRDA_BR_19200) ? " 19200" : "", 216 + (rates & USB_IRDA_BR_38400) ? " 38400" : "", 217 + (rates & USB_IRDA_BR_57600) ? " 57600" : "", 218 + (rates & USB_IRDA_BR_115200) ? " 115200" : "", 219 + (rates & USB_IRDA_BR_576000) ? " 576000" : "", 220 + (rates & USB_IRDA_BR_1152000) ? " 1152000" : "", 221 + (rates & USB_IRDA_BR_4000000) ? " 4000000" : ""); 222 223 switch (irda_desc->bmAdditionalBOFs) { 224 case USB_IRDA_AB_48:
+1 -1
drivers/usb/serial/mct_u232.c
··· 189 return -ENOMEM; 190 191 divisor = mct_u232_calculate_baud_rate(serial, value, &speed); 192 - put_unaligned_le32(cpu_to_le32(divisor), buf); 193 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 194 MCT_U232_SET_BAUD_RATE_REQUEST, 195 MCT_U232_SET_REQUEST_TYPE,
··· 189 return -ENOMEM; 190 191 divisor = mct_u232_calculate_baud_rate(serial, value, &speed); 192 + put_unaligned_le32(divisor, buf); 193 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 194 MCT_U232_SET_BAUD_RATE_REQUEST, 195 MCT_U232_SET_REQUEST_TYPE,
+8
drivers/usb/serial/option.c
··· 281 #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 282 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 283 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 284 #define TELIT_PRODUCT_LE920 0x1200 285 #define TELIT_PRODUCT_LE910 0x1201 286 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 ··· 639 640 static const struct option_blacklist_info simcom_sim7100e_blacklist = { 641 .reserved = BIT(5) | BIT(6), 642 }; 643 644 static const struct option_blacklist_info telit_le910_blacklist = { ··· 1241 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1242 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), 1243 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1244 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1245 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1246 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
··· 281 #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 282 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 283 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 284 + #define TELIT_PRODUCT_ME910 0x1100 285 #define TELIT_PRODUCT_LE920 0x1200 286 #define TELIT_PRODUCT_LE910 0x1201 287 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 ··· 638 639 static const struct option_blacklist_info simcom_sim7100e_blacklist = { 640 .reserved = BIT(5) | BIT(6), 641 + }; 642 + 643 + static const struct option_blacklist_info telit_me910_blacklist = { 644 + .sendsetup = BIT(0), 645 + .reserved = BIT(1) | BIT(3), 646 }; 647 648 static const struct option_blacklist_info telit_le910_blacklist = { ··· 1235 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1236 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), 1237 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1238 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), 1239 + .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, 1240 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1241 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1242 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+2
drivers/usb/serial/qcserial.c
··· 162 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ 163 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ 164 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ 165 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 166 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 167 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
··· 162 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ 163 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ 164 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ 165 + {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ 166 + {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ 167 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 168 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+55 -35
drivers/usb/storage/ene_ub6250.c
··· 446 #define SD_BLOCK_LEN 9 447 448 struct ene_ub6250_info { 449 /* for 6250 code */ 450 struct SD_STATUS SD_Status; 451 struct MS_STATUS MS_Status; ··· 497 498 static void ene_ub6250_info_destructor(void *extra) 499 { 500 if (!extra) 501 return; 502 } 503 504 static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) ··· 867 u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) 868 { 869 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 870 int result; 871 - u8 ExtBuf[4]; 872 u32 bn = PhyBlockAddr * 0x20 + PageNum; 873 874 result = ene_load_bincode(us, MS_RW_PATTERN); ··· 909 bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); 910 bcb->CDB[6] = 0x01; 911 912 - result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); 913 if (result != USB_STOR_XFER_GOOD) 914 return USB_STOR_TRANSPORT_ERROR; 915 ··· 918 ExtraDat->status0 = 0x10; /* Not yet,fireware support */ 919 920 ExtraDat->status1 = 0x00; /* Not yet,fireware support */ 921 - ExtraDat->ovrflg = ExtBuf[0]; 922 - ExtraDat->mngflg = ExtBuf[1]; 923 - ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); 924 925 return USB_STOR_TRANSPORT_GOOD; 926 } ··· 1340 u8 PageNum, struct ms_lib_type_extdat *ExtraDat) 1341 { 1342 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 1343 int result; 1344 - u8 ExtBuf[4]; 1345 1346 memset(bcb, 0, sizeof(struct bulk_cb_wrap)); 1347 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); ··· 1356 bcb->CDB[2] = (unsigned char)(PhyBlock>>16); 1357 bcb->CDB[6] = 0x01; 1358 1359 - result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); 1360 if (result != USB_STOR_XFER_GOOD) 1361 return USB_STOR_TRANSPORT_ERROR; 1362 ··· 1364 ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ 1365 ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ 1366 ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ 1367 - ExtraDat->ovrflg = ExtBuf[0]; 1368 - ExtraDat->mngflg = ExtBuf[1]; 1369 - ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); 1370 1371 return USB_STOR_TRANSPORT_GOOD; 1372 } ··· 1565 u16 PhyBlock, newblk, i; 1566 u16 LogStart, LogEnde; 1567 struct ms_lib_type_extdat extdat; 1568 - u8 buf[0x200]; 1569 u32 count = 0, index = 0; 1570 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 1571 1572 for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { 1573 ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); ··· 1581 } 1582 1583 if (count == PhyBlock) { 1584 - ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf); 1585 count += 0x80; 1586 } 1587 index = (PhyBlock % 0x80) * 4; 1588 1589 - extdat.ovrflg = buf[index]; 1590 - extdat.mngflg = buf[index+1]; 1591 - extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]); 1592 1593 if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { 1594 ms_lib_setacquired_errorblock(us, PhyBlock); ··· 2073 { 2074 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2075 int result; 2076 - u8 buf[0x200]; 2077 u16 MSP_BlockSize, MSP_UserAreaBlocks; 2078 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2079 2080 printk(KERN_INFO "transport --- ENE_MSInit\n"); 2081 ··· 2094 bcb->CDB[0] = 0xF1; 2095 bcb->CDB[1] = 0x01; 2096 2097 - result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); 2098 if (result != USB_STOR_XFER_GOOD) { 2099 printk(KERN_ERR "Execution MS Init Code Fail !!\n"); 2100 return USB_STOR_TRANSPORT_ERROR; 2101 } 2102 /* the same part to test ENE */ 2103 - info->MS_Status = *(struct MS_STATUS *)&buf[0]; 2104 2105 if (info->MS_Status.Insert && info->MS_Status.Ready) { 2106 printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); ··· 2109 printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); 2110 printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); 2111 if (info->MS_Status.IsMSPro) { 2112 - MSP_BlockSize = (buf[6] << 8) | buf[7]; 2113 - MSP_UserAreaBlocks = (buf[10] << 8) | buf[11]; 2114 info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; 2115 } else { 2116 ms_card_init(us); /* Card is MS (to ms.c)*/ 2117 } 2118 usb_stor_dbg(us, "MS Init Code OK !!\n"); 2119 } else { 2120 - usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]); 2121 return USB_STOR_TRANSPORT_ERROR; 2122 } 2123 ··· 2127 static int ene_sd_init(struct us_data *us) 2128 { 2129 int result; 2130 - u8 buf[0x200]; 2131 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2132 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2133 2134 usb_stor_dbg(us, "transport --- ENE_SDInit\n"); 2135 /* SD Init Part-1 */ ··· 2163 bcb->Flags = US_BULK_FLAG_IN; 2164 bcb->CDB[0] = 0xF1; 2165 2166 - result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); 2167 if (result != USB_STOR_XFER_GOOD) { 2168 usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); 2169 return USB_STOR_TRANSPORT_ERROR; 2170 } 2171 2172 - info->SD_Status = *(struct SD_STATUS *)&buf[0]; 2173 if (info->SD_Status.Insert && info->SD_Status.Ready) { 2174 struct SD_STATUS *s = &info->SD_Status; 2175 2176 - ene_get_card_status(us, (unsigned char *)&buf); 2177 usb_stor_dbg(us, "Insert = %x\n", s->Insert); 2178 usb_stor_dbg(us, "Ready = %x\n", s->Ready); 2179 usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); ··· 2181 usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); 2182 usb_stor_dbg(us, "WtP = %x\n", s->WtP); 2183 } else { 2184 - usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]); 2185 return USB_STOR_TRANSPORT_ERROR; 2186 } 2187 return USB_STOR_TRANSPORT_GOOD; ··· 2191 static int ene_init(struct us_data *us) 2192 { 2193 int result; 2194 - u8 misc_reg03 = 0; 2195 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); 2196 2197 - result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); 2198 if (result != USB_STOR_XFER_GOOD) 2199 return USB_STOR_TRANSPORT_ERROR; 2200 2201 if (misc_reg03 & 0x01) { 2202 if (!info->SD_Status.Ready) { 2203 result = ene_sd_init(us); ··· 2316 const struct usb_device_id *id) 2317 { 2318 int result; 2319 - u8 misc_reg03 = 0; 2320 struct us_data *us; 2321 2322 result = usb_stor_probe1(&us, intf, id, 2323 (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, ··· 2327 return result; 2328 2329 /* FIXME: where should the code alloc extra buf ? */ 2330 - if (!us->extra) { 2331 - us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); 2332 - if (!us->extra) 2333 - return -ENOMEM; 2334 - us->extra_destructor = ene_ub6250_info_destructor; 2335 } 2336 2337 us->transport_name = "ene_ub6250"; ··· 2348 return result; 2349 2350 /* probe card type */ 2351 - result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); 2352 if (result != USB_STOR_XFER_GOOD) { 2353 usb_stor_disconnect(intf); 2354 return USB_STOR_TRANSPORT_ERROR; 2355 } 2356 2357 if (!(misc_reg03 & 0x01)) { 2358 pr_info("ums_eneub6250: This driver only supports SD/MS cards. " 2359 "It does not support SM cards.\n");
··· 446 #define SD_BLOCK_LEN 9 447 448 struct ene_ub6250_info { 449 + 450 + /* I/O bounce buffer */ 451 + u8 *bbuf; 452 + 453 /* for 6250 code */ 454 struct SD_STATUS SD_Status; 455 struct MS_STATUS MS_Status; ··· 493 494 static void ene_ub6250_info_destructor(void *extra) 495 { 496 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra; 497 + 498 if (!extra) 499 return; 500 + kfree(info->bbuf); 501 } 502 503 static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) ··· 860 u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) 861 { 862 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 863 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 864 + u8 *bbuf = info->bbuf; 865 int result; 866 u32 bn = PhyBlockAddr * 0x20 + PageNum; 867 868 result = ene_load_bincode(us, MS_RW_PATTERN); ··· 901 bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); 902 bcb->CDB[6] = 0x01; 903 904 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); 905 if (result != USB_STOR_XFER_GOOD) 906 return USB_STOR_TRANSPORT_ERROR; 907 ··· 910 ExtraDat->status0 = 0x10; /* Not yet,fireware support */ 911 912 ExtraDat->status1 = 0x00; /* Not yet,fireware support */ 913 + ExtraDat->ovrflg = bbuf[0]; 914 + ExtraDat->mngflg = bbuf[1]; 915 + ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); 916 917 return USB_STOR_TRANSPORT_GOOD; 918 } ··· 1332 u8 PageNum, struct ms_lib_type_extdat *ExtraDat) 1333 { 1334 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 1335 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 1336 + u8 *bbuf = info->bbuf; 1337 int result; 1338 1339 memset(bcb, 0, sizeof(struct bulk_cb_wrap)); 1340 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); ··· 1347 bcb->CDB[2] = (unsigned char)(PhyBlock>>16); 1348 bcb->CDB[6] = 0x01; 1349 1350 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); 1351 if (result != USB_STOR_XFER_GOOD) 1352 return USB_STOR_TRANSPORT_ERROR; 1353 ··· 1355 ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ 1356 ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ 1357 ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ 1358 + ExtraDat->ovrflg = bbuf[0]; 1359 + ExtraDat->mngflg = bbuf[1]; 1360 + ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); 1361 1362 return USB_STOR_TRANSPORT_GOOD; 1363 } ··· 1556 u16 PhyBlock, newblk, i; 1557 u16 LogStart, LogEnde; 1558 struct ms_lib_type_extdat extdat; 1559 u32 count = 0, index = 0; 1560 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 1561 + u8 *bbuf = info->bbuf; 1562 1563 for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { 1564 ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); ··· 1572 } 1573 1574 if (count == PhyBlock) { 1575 + ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, 1576 + bbuf); 1577 count += 0x80; 1578 } 1579 index = (PhyBlock % 0x80) * 4; 1580 1581 + extdat.ovrflg = bbuf[index]; 1582 + extdat.mngflg = bbuf[index+1]; 1583 + extdat.logadr = memstick_logaddr(bbuf[index+2], 1584 + bbuf[index+3]); 1585 1586 if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { 1587 ms_lib_setacquired_errorblock(us, PhyBlock); ··· 2062 { 2063 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2064 int result; 2065 u16 MSP_BlockSize, MSP_UserAreaBlocks; 2066 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2067 + u8 *bbuf = info->bbuf; 2068 2069 printk(KERN_INFO "transport --- ENE_MSInit\n"); 2070 ··· 2083 bcb->CDB[0] = 0xF1; 2084 bcb->CDB[1] = 0x01; 2085 2086 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); 2087 if (result != USB_STOR_XFER_GOOD) { 2088 printk(KERN_ERR "Execution MS Init Code Fail !!\n"); 2089 return USB_STOR_TRANSPORT_ERROR; 2090 } 2091 /* the same part to test ENE */ 2092 + info->MS_Status = *(struct MS_STATUS *) bbuf; 2093 2094 if (info->MS_Status.Insert && info->MS_Status.Ready) { 2095 printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); ··· 2098 printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); 2099 printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); 2100 if (info->MS_Status.IsMSPro) { 2101 + MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; 2102 + MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; 2103 info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; 2104 } else { 2105 ms_card_init(us); /* Card is MS (to ms.c)*/ 2106 } 2107 usb_stor_dbg(us, "MS Init Code OK !!\n"); 2108 } else { 2109 + usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]); 2110 return USB_STOR_TRANSPORT_ERROR; 2111 } 2112 ··· 2116 static int ene_sd_init(struct us_data *us) 2117 { 2118 int result; 2119 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2120 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2121 + u8 *bbuf = info->bbuf; 2122 2123 usb_stor_dbg(us, "transport --- ENE_SDInit\n"); 2124 /* SD Init Part-1 */ ··· 2152 bcb->Flags = US_BULK_FLAG_IN; 2153 bcb->CDB[0] = 0xF1; 2154 2155 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); 2156 if (result != USB_STOR_XFER_GOOD) { 2157 usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); 2158 return USB_STOR_TRANSPORT_ERROR; 2159 } 2160 2161 + info->SD_Status = *(struct SD_STATUS *) bbuf; 2162 if (info->SD_Status.Insert && info->SD_Status.Ready) { 2163 struct SD_STATUS *s = &info->SD_Status; 2164 2165 + ene_get_card_status(us, bbuf); 2166 usb_stor_dbg(us, "Insert = %x\n", s->Insert); 2167 usb_stor_dbg(us, "Ready = %x\n", s->Ready); 2168 usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); ··· 2170 usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); 2171 usb_stor_dbg(us, "WtP = %x\n", s->WtP); 2172 } else { 2173 + usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); 2174 return USB_STOR_TRANSPORT_ERROR; 2175 } 2176 return USB_STOR_TRANSPORT_GOOD; ··· 2180 static int ene_init(struct us_data *us) 2181 { 2182 int result; 2183 + u8 misc_reg03; 2184 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); 2185 + u8 *bbuf = info->bbuf; 2186 2187 + result = ene_get_card_type(us, REG_CARD_STATUS, bbuf); 2188 if (result != USB_STOR_XFER_GOOD) 2189 return USB_STOR_TRANSPORT_ERROR; 2190 2191 + misc_reg03 = bbuf[0]; 2192 if (misc_reg03 & 0x01) { 2193 if (!info->SD_Status.Ready) { 2194 result = ene_sd_init(us); ··· 2303 const struct usb_device_id *id) 2304 { 2305 int result; 2306 + u8 misc_reg03; 2307 struct us_data *us; 2308 + struct ene_ub6250_info *info; 2309 2310 result = usb_stor_probe1(&us, intf, id, 2311 (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, ··· 2313 return result; 2314 2315 /* FIXME: where should the code alloc extra buf ? */ 2316 + us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); 2317 + if (!us->extra) 2318 + return -ENOMEM; 2319 + us->extra_destructor = ene_ub6250_info_destructor; 2320 + 2321 + info = (struct ene_ub6250_info *)(us->extra); 2322 + info->bbuf = kmalloc(512, GFP_KERNEL); 2323 + if (!info->bbuf) { 2324 + kfree(us->extra); 2325 + return -ENOMEM; 2326 } 2327 2328 us->transport_name = "ene_ub6250"; ··· 2329 return result; 2330 2331 /* probe card type */ 2332 + result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf); 2333 if (result != USB_STOR_XFER_GOOD) { 2334 usb_stor_disconnect(intf); 2335 return USB_STOR_TRANSPORT_ERROR; 2336 } 2337 2338 + misc_reg03 = info->bbuf[0]; 2339 if (!(misc_reg03 & 0x01)) { 2340 pr_info("ums_eneub6250: This driver only supports SD/MS cards. " 2341 "It does not support SM cards.\n");
+8 -3
drivers/usb/usbip/vhci_hcd.c
··· 235 236 static inline void hub_descriptor(struct usb_hub_descriptor *desc) 237 { 238 memset(desc, 0, sizeof(*desc)); 239 desc->bDescriptorType = USB_DT_HUB; 240 - desc->bDescLength = 9; 241 desc->wHubCharacteristics = cpu_to_le16( 242 HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); 243 desc->bNbrPorts = VHCI_HC_PORTS; 244 - desc->u.hs.DeviceRemovable[0] = 0xff; 245 - desc->u.hs.DeviceRemovable[1] = 0xff; 246 } 247 248 static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
··· 235 236 static inline void hub_descriptor(struct usb_hub_descriptor *desc) 237 { 238 + int width; 239 + 240 memset(desc, 0, sizeof(*desc)); 241 desc->bDescriptorType = USB_DT_HUB; 242 desc->wHubCharacteristics = cpu_to_le16( 243 HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); 244 + 245 desc->bNbrPorts = VHCI_HC_PORTS; 246 + BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN); 247 + width = desc->bNbrPorts / 8 + 1; 248 + desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width; 249 + memset(&desc->u.hs.DeviceRemovable[0], 0, width); 250 + memset(&desc->u.hs.DeviceRemovable[width], 0xff, width); 251 } 252 253 static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+3 -2
drivers/uwb/i1480/dfu/usb.c
··· 341 static 342 int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) 343 { 344 struct i1480_usb *i1480_usb; 345 struct i1480 *i1480; 346 struct device *dev = &iface->dev; ··· 353 iface->cur_altsetting->desc.bInterfaceNumber); 354 goto error; 355 } 356 - if (iface->num_altsetting > 1 357 - && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { 358 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ 359 result = usb_set_interface(interface_to_usbdev(iface), 0, 1); 360 if (result < 0)
··· 341 static 342 int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) 343 { 344 + struct usb_device *udev = interface_to_usbdev(iface); 345 struct i1480_usb *i1480_usb; 346 struct i1480 *i1480; 347 struct device *dev = &iface->dev; ··· 352 iface->cur_altsetting->desc.bInterfaceNumber); 353 goto error; 354 } 355 + if (iface->num_altsetting > 1 && 356 + le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) { 357 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ 358 result = usb_set_interface(interface_to_usbdev(iface), 0, 1); 359 if (result < 0)
+1 -1
drivers/watchdog/Kconfig
··· 452 453 config ORION_WATCHDOG 454 tristate "Orion watchdog" 455 - depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || COMPILE_TEST 456 depends on ARM 457 select WATCHDOG_CORE 458 help
··· 452 453 config ORION_WATCHDOG 454 tristate "Orion watchdog" 455 + depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110) 456 depends on ARM 457 select WATCHDOG_CORE 458 help
+2 -1
drivers/watchdog/bcm_kona_wdt.c
··· 304 if (!wdt) 305 return -ENOMEM; 306 307 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 308 wdt->base = devm_ioremap_resource(dev, res); 309 if (IS_ERR(wdt->base)) ··· 318 return ret; 319 } 320 321 - spin_lock_init(&wdt->lock); 322 platform_set_drvdata(pdev, wdt); 323 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); 324 bcm_kona_wdt_wdd.parent = &pdev->dev;
··· 304 if (!wdt) 305 return -ENOMEM; 306 307 + spin_lock_init(&wdt->lock); 308 + 309 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 310 wdt->base = devm_ioremap_resource(dev, res); 311 if (IS_ERR(wdt->base)) ··· 316 return ret; 317 } 318 319 platform_set_drvdata(pdev, wdt); 320 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); 321 bcm_kona_wdt_wdd.parent = &pdev->dev;
+1 -1
drivers/watchdog/cadence_wdt.c
··· 49 /* Counter maximum value */ 50 #define CDNS_WDT_COUNTER_MAX 0xFFF 51 52 - static int wdt_timeout = CDNS_WDT_DEFAULT_TIMEOUT; 53 static int nowayout = WATCHDOG_NOWAYOUT; 54 55 module_param(wdt_timeout, int, 0);
··· 49 /* Counter maximum value */ 50 #define CDNS_WDT_COUNTER_MAX 0xFFF 51 52 + static int wdt_timeout; 53 static int nowayout = WATCHDOG_NOWAYOUT; 54 55 module_param(wdt_timeout, int, 0);
+11 -13
drivers/watchdog/iTCO_wdt.c
··· 306 307 iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); 308 309 - /* Reload the timer by writing to the TCO Timer Counter register */ 310 - if (p->iTCO_version >= 2) { 311 - outw(0x01, TCO_RLD(p)); 312 - } else if (p->iTCO_version == 1) { 313 - /* Reset the timeout status bit so that the timer 314 - * needs to count down twice again before rebooting */ 315 - outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */ 316 317 outb(0x01, TCO_RLD(p)); 318 - } 319 320 spin_unlock(&p->io_lock); 321 return 0; ··· 327 unsigned char val8; 328 unsigned int tmrval; 329 330 - tmrval = seconds_to_ticks(p, t); 331 - 332 - /* For TCO v1 the timer counts down twice before rebooting */ 333 - if (p->iTCO_version == 1) 334 - tmrval /= 2; 335 336 /* from the specs: */ 337 /* "Values of 0h-3h are ignored and should not be attempted" */ ··· 381 spin_lock(&p->io_lock); 382 val16 = inw(TCO_RLD(p)); 383 val16 &= 0x3ff; 384 spin_unlock(&p->io_lock); 385 386 time_left = ticks_to_seconds(p, val16);
··· 306 307 iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); 308 309 + /* Reset the timeout status bit so that the timer 310 + * needs to count down twice again before rebooting */ 311 + outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */ 312 313 + /* Reload the timer by writing to the TCO Timer Counter register */ 314 + if (p->iTCO_version >= 2) 315 + outw(0x01, TCO_RLD(p)); 316 + else if (p->iTCO_version == 1) 317 outb(0x01, TCO_RLD(p)); 318 319 spin_unlock(&p->io_lock); 320 return 0; ··· 328 unsigned char val8; 329 unsigned int tmrval; 330 331 + /* The timer counts down twice before rebooting */ 332 + tmrval = seconds_to_ticks(p, t) / 2; 333 334 /* from the specs: */ 335 /* "Values of 0h-3h are ignored and should not be attempted" */ ··· 385 spin_lock(&p->io_lock); 386 val16 = inw(TCO_RLD(p)); 387 val16 &= 0x3ff; 388 + if (!(inw(TCO1_STS(p)) & 0x0008)) 389 + val16 += (inw(TCOv2_TMR(p)) & 0x3ff); 390 spin_unlock(&p->io_lock); 391 392 time_left = ticks_to_seconds(p, val16);
+3
drivers/watchdog/pcwd_usb.c
··· 630 return -ENODEV; 631 } 632 633 /* check out the endpoint: it has to be Interrupt & IN */ 634 endpoint = &iface_desc->endpoint[0].desc; 635
··· 630 return -ENODEV; 631 } 632 633 + if (iface_desc->desc.bNumEndpoints < 1) 634 + return -ENODEV; 635 + 636 /* check out the endpoint: it has to be Interrupt & IN */ 637 endpoint = &iface_desc->endpoint[0].desc; 638
+57 -20
drivers/watchdog/sama5d4_wdt.c
··· 6 * Licensed under GPLv2. 7 */ 8 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> ··· 30 struct watchdog_device wdd; 31 void __iomem *reg_base; 32 u32 mr; 33 }; 34 35 static int wdt_timeout = WDT_DEFAULT_TIMEOUT; ··· 46 "Watchdog cannot be stopped once started (default=" 47 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 48 49 #define wdt_read(wdt, field) \ 50 readl_relaxed((wdt)->reg_base + (field)) 51 52 - #define wdt_write(wtd, field, val) \ 53 - writel_relaxed((val), (wdt)->reg_base + (field)) 54 55 static int sama5d4_wdt_start(struct watchdog_device *wdd) 56 { ··· 114 wdt->mr &= ~AT91_WDT_WDD; 115 wdt->mr |= AT91_WDT_SET_WDV(value); 116 wdt->mr |= AT91_WDT_SET_WDD(value); 117 - wdt_write(wdt, AT91_WDT_MR, wdt->mr); 118 119 wdd->timeout = timeout; 120 ··· 179 180 static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) 181 { 182 - struct watchdog_device *wdd = &wdt->wdd; 183 - u32 value = WDT_SEC2TICKS(wdd->timeout); 184 u32 reg; 185 - 186 /* 187 - * Because the fields WDV and WDD must not be modified when the WDDIS 188 - * bit is set, so clear the WDDIS bit before writing the WDT_MR. 189 */ 190 - reg = wdt_read(wdt, AT91_WDT_MR); 191 - reg &= ~AT91_WDT_WDDIS; 192 - wdt_write(wdt, AT91_WDT_MR, reg); 193 - 194 - wdt->mr |= AT91_WDT_SET_WDD(value); 195 - wdt->mr |= AT91_WDT_SET_WDV(value); 196 - 197 - wdt_write(wdt, AT91_WDT_MR, wdt->mr); 198 - 199 return 0; 200 } 201 ··· 204 struct resource *res; 205 void __iomem *regs; 206 u32 irq = 0; 207 int ret; 208 209 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); ··· 217 wdd->ops = &sama5d4_wdt_ops; 218 wdd->min_timeout = MIN_WDT_TIMEOUT; 219 wdd->max_timeout = MAX_WDT_TIMEOUT; 220 221 watchdog_set_drvdata(wdd, wdt); 222 ··· 254 dev_err(&pdev->dev, "unable to set timeout value\n"); 255 return ret; 256 } 257 258 ret = sama5d4_wdt_init(wdt); 259 if (ret) ··· 302 { 303 struct sama5d4_wdt *wdt = dev_get_drvdata(dev); 304 305 - wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS); 306 - if (wdt->mr & AT91_WDT_WDDIS) 307 - wdt_write(wdt, AT91_WDT_MR, wdt->mr); 308 309 return 0; 310 }
··· 6 * Licensed under GPLv2. 7 */ 8 9 + #include <linux/delay.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/kernel.h> ··· 29 struct watchdog_device wdd; 30 void __iomem *reg_base; 31 u32 mr; 32 + unsigned long last_ping; 33 }; 34 35 static int wdt_timeout = WDT_DEFAULT_TIMEOUT; ··· 44 "Watchdog cannot be stopped once started (default=" 45 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 46 47 + #define wdt_enabled (!(wdt->mr & AT91_WDT_WDDIS)) 48 + 49 #define wdt_read(wdt, field) \ 50 readl_relaxed((wdt)->reg_base + (field)) 51 52 + /* 4 slow clock periods is 4/32768 = 122.07µs*/ 53 + #define WDT_DELAY usecs_to_jiffies(123) 54 + 55 + static void wdt_write(struct sama5d4_wdt *wdt, u32 field, u32 val) 56 + { 57 + /* 58 + * WDT_CR and WDT_MR must not be modified within three slow clock 59 + * periods following a restart of the watchdog performed by a write 60 + * access in WDT_CR. 61 + */ 62 + while (time_before(jiffies, wdt->last_ping + WDT_DELAY)) 63 + usleep_range(30, 125); 64 + writel_relaxed(val, wdt->reg_base + field); 65 + wdt->last_ping = jiffies; 66 + } 67 + 68 + static void wdt_write_nosleep(struct sama5d4_wdt *wdt, u32 field, u32 val) 69 + { 70 + if (time_before(jiffies, wdt->last_ping + WDT_DELAY)) 71 + udelay(123); 72 + writel_relaxed(val, wdt->reg_base + field); 73 + wdt->last_ping = jiffies; 74 + } 75 76 static int sama5d4_wdt_start(struct watchdog_device *wdd) 77 { ··· 89 wdt->mr &= ~AT91_WDT_WDD; 90 wdt->mr |= AT91_WDT_SET_WDV(value); 91 wdt->mr |= AT91_WDT_SET_WDD(value); 92 + 93 + /* 94 + * WDDIS has to be 0 when updating WDD/WDV. The datasheet states: When 95 + * setting the WDDIS bit, and while it is set, the fields WDV and WDD 96 + * must not be modified. 97 + * If the watchdog is enabled, then the timeout can be updated. Else, 98 + * wait that the user enables it. 99 + */ 100 + if (wdt_enabled) 101 + wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS); 102 103 wdd->timeout = timeout; 104 ··· 145 146 static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) 147 { 148 u32 reg; 149 /* 150 + * When booting and resuming, the bootloader may have changed the 151 + * watchdog configuration. 152 + * If the watchdog is already running, we can safely update it. 153 + * Else, we have to disable it properly. 154 */ 155 + if (wdt_enabled) { 156 + wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr); 157 + } else { 158 + reg = wdt_read(wdt, AT91_WDT_MR); 159 + if (!(reg & AT91_WDT_WDDIS)) 160 + wdt_write_nosleep(wdt, AT91_WDT_MR, 161 + reg | AT91_WDT_WDDIS); 162 + } 163 return 0; 164 } 165 ··· 172 struct resource *res; 173 void __iomem *regs; 174 u32 irq = 0; 175 + u32 timeout; 176 int ret; 177 178 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); ··· 184 wdd->ops = &sama5d4_wdt_ops; 185 wdd->min_timeout = MIN_WDT_TIMEOUT; 186 wdd->max_timeout = MAX_WDT_TIMEOUT; 187 + wdt->last_ping = jiffies; 188 189 watchdog_set_drvdata(wdd, wdt); 190 ··· 220 dev_err(&pdev->dev, "unable to set timeout value\n"); 221 return ret; 222 } 223 + 224 + timeout = WDT_SEC2TICKS(wdd->timeout); 225 + 226 + wdt->mr |= AT91_WDT_SET_WDD(timeout); 227 + wdt->mr |= AT91_WDT_SET_WDV(timeout); 228 229 ret = sama5d4_wdt_init(wdt); 230 if (ret) ··· 263 { 264 struct sama5d4_wdt *wdt = dev_get_drvdata(dev); 265 266 + sama5d4_wdt_init(wdt); 267 268 return 0; 269 }
+1 -1
drivers/watchdog/wdt_pci.c
··· 332 pr_crit("Would Reboot\n"); 333 #else 334 pr_crit("Initiating system reboot\n"); 335 - emergency_restart(NULL); 336 #endif 337 #else 338 pr_crit("Reset in 5ms\n");
··· 332 pr_crit("Would Reboot\n"); 333 #else 334 pr_crit("Initiating system reboot\n"); 335 + emergency_restart(); 336 #endif 337 #else 338 pr_crit("Reset in 5ms\n");
+1 -3
drivers/watchdog/zx2967_wdt.c
··· 211 212 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 213 wdt->reg_base = devm_ioremap_resource(dev, base); 214 - if (IS_ERR(wdt->reg_base)) { 215 - dev_err(dev, "ioremap failed\n"); 216 return PTR_ERR(wdt->reg_base); 217 - } 218 219 zx2967_wdt_reset_sysctrl(dev); 220
··· 211 212 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 213 wdt->reg_base = devm_ioremap_resource(dev, base); 214 + if (IS_ERR(wdt->reg_base)) 215 return PTR_ERR(wdt->reg_base); 216 217 zx2967_wdt_reset_sysctrl(dev); 218
+14 -16
fs/cifs/cifsacl.c
··· 1135 u32 acllen = 0; 1136 int rc = 0; 1137 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1138 - struct cifs_tcon *tcon; 1139 1140 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1141 1142 if (IS_ERR(tlink)) 1143 return PTR_ERR(tlink); 1144 - tcon = tlink_tcon(tlink); 1145 1146 - if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) 1147 - pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, 1148 - &acllen); 1149 - else if (tcon->ses->server->ops->get_acl) 1150 - pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1151 - &acllen); 1152 else { 1153 cifs_put_tlink(tlink); 1154 return -EOPNOTSUPP; ··· 1180 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1181 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1182 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1183 - struct cifs_tcon *tcon; 1184 1185 if (IS_ERR(tlink)) 1186 return PTR_ERR(tlink); 1187 - tcon = tlink_tcon(tlink); 1188 1189 cifs_dbg(NOISY, "set ACL from mode for %s\n", path); 1190 1191 /* Get the security descriptor */ 1192 1193 - if (tcon->ses->server->ops->get_acl == NULL) { 1194 cifs_put_tlink(tlink); 1195 return -EOPNOTSUPP; 1196 } 1197 1198 - pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1199 - &secdesclen); 1200 if (IS_ERR(pntsd)) { 1201 rc = PTR_ERR(pntsd); 1202 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); ··· 1223 1224 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); 1225 1226 - if (tcon->ses->server->ops->set_acl == NULL) 1227 rc = -EOPNOTSUPP; 1228 1229 if (!rc) { 1230 /* Set the security descriptor */ 1231 - rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode, 1232 - path, aclflag); 1233 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); 1234 } 1235 cifs_put_tlink(tlink);
··· 1135 u32 acllen = 0; 1136 int rc = 0; 1137 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1138 + struct smb_version_operations *ops; 1139 1140 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1141 1142 if (IS_ERR(tlink)) 1143 return PTR_ERR(tlink); 1144 1145 + ops = tlink_tcon(tlink)->ses->server->ops; 1146 + 1147 + if (pfid && (ops->get_acl_by_fid)) 1148 + pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen); 1149 + else if (ops->get_acl) 1150 + pntsd = ops->get_acl(cifs_sb, inode, path, &acllen); 1151 else { 1152 cifs_put_tlink(tlink); 1153 return -EOPNOTSUPP; ··· 1181 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1182 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1183 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1184 + struct smb_version_operations *ops; 1185 1186 if (IS_ERR(tlink)) 1187 return PTR_ERR(tlink); 1188 + 1189 + ops = tlink_tcon(tlink)->ses->server->ops; 1190 1191 cifs_dbg(NOISY, "set ACL from mode for %s\n", path); 1192 1193 /* Get the security descriptor */ 1194 1195 + if (ops->get_acl == NULL) { 1196 cifs_put_tlink(tlink); 1197 return -EOPNOTSUPP; 1198 } 1199 1200 + pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen); 1201 if (IS_ERR(pntsd)) { 1202 rc = PTR_ERR(pntsd); 1203 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); ··· 1224 1225 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); 1226 1227 + if (ops->set_acl == NULL) 1228 rc = -EOPNOTSUPP; 1229 1230 if (!rc) { 1231 /* Set the security descriptor */ 1232 + rc = ops->set_acl(pnntsd, secdesclen, inode, path, aclflag); 1233 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); 1234 } 1235 cifs_put_tlink(tlink);
+1 -1
fs/cifs/cifsglob.h
··· 418 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); 419 ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *, 420 const unsigned char *, const unsigned char *, char *, 421 - size_t, const struct nls_table *, int); 422 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, 423 const char *, const void *, const __u16, 424 const struct nls_table *, int);
··· 418 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); 419 ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *, 420 const unsigned char *, const unsigned char *, char *, 421 + size_t, struct cifs_sb_info *); 422 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, 423 const char *, const void *, const __u16, 424 const struct nls_table *, int);
+1 -2
fs/cifs/cifsproto.h
··· 480 extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 481 const unsigned char *searchName, 482 const unsigned char *ea_name, char *EAData, 483 - size_t bufsize, const struct nls_table *nls_codepage, 484 - int remap_special_chars); 485 extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, 486 const char *fileName, const char *ea_name, 487 const void *ea_value, const __u16 ea_value_len,
··· 480 extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 481 const unsigned char *searchName, 482 const unsigned char *ea_name, char *EAData, 483 + size_t bufsize, struct cifs_sb_info *cifs_sb); 484 extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, 485 const char *fileName, const char *ea_name, 486 const void *ea_value, const __u16 ea_value_len,
+3 -8
fs/cifs/cifssmb.c
··· 697 { 698 struct TCP_Server_Info *server = mid->callback_data; 699 700 - mutex_lock(&server->srv_mutex); 701 DeleteMidQEntry(mid); 702 - mutex_unlock(&server->srv_mutex); 703 add_credits(server, 1, CIFS_ECHO_OP); 704 } 705 ··· 1597 } 1598 1599 queue_work(cifsiod_wq, &rdata->work); 1600 - mutex_lock(&server->srv_mutex); 1601 DeleteMidQEntry(mid); 1602 - mutex_unlock(&server->srv_mutex); 1603 add_credits(server, 1, 0); 1604 } 1605 ··· 2054 { 2055 struct cifs_writedata *wdata = mid->callback_data; 2056 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2057 - struct TCP_Server_Info *server = tcon->ses->server; 2058 unsigned int written; 2059 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; 2060 ··· 2090 } 2091 2092 queue_work(cifsiod_wq, &wdata->work); 2093 - mutex_lock(&server->srv_mutex); 2094 DeleteMidQEntry(mid); 2095 - mutex_unlock(&server->srv_mutex); 2096 add_credits(tcon->ses->server, 1, 0); 2097 } 2098 ··· 6069 CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 6070 const unsigned char *searchName, const unsigned char *ea_name, 6071 char *EAData, size_t buf_size, 6072 - const struct nls_table *nls_codepage, int remap) 6073 { 6074 /* BB assumes one setup word */ 6075 TRANSACTION2_QPI_REQ *pSMB = NULL; 6076 TRANSACTION2_QPI_RSP *pSMBr = NULL; 6077 int rc = 0; 6078 int bytes_returned; 6079 int list_len;
··· 697 { 698 struct TCP_Server_Info *server = mid->callback_data; 699 700 DeleteMidQEntry(mid); 701 add_credits(server, 1, CIFS_ECHO_OP); 702 } 703 ··· 1599 } 1600 1601 queue_work(cifsiod_wq, &rdata->work); 1602 DeleteMidQEntry(mid); 1603 add_credits(server, 1, 0); 1604 } 1605 ··· 2058 { 2059 struct cifs_writedata *wdata = mid->callback_data; 2060 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2061 unsigned int written; 2062 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; 2063 ··· 2095 } 2096 2097 queue_work(cifsiod_wq, &wdata->work); 2098 DeleteMidQEntry(mid); 2099 add_credits(tcon->ses->server, 1, 0); 2100 } 2101 ··· 6076 CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 6077 const unsigned char *searchName, const unsigned char *ea_name, 6078 char *EAData, size_t buf_size, 6079 + struct cifs_sb_info *cifs_sb) 6080 { 6081 /* BB assumes one setup word */ 6082 TRANSACTION2_QPI_REQ *pSMB = NULL; 6083 TRANSACTION2_QPI_RSP *pSMBr = NULL; 6084 + int remap = cifs_remap(cifs_sb); 6085 + struct nls_table *nls_codepage = cifs_sb->local_nls; 6086 int rc = 0; 6087 int bytes_returned; 6088 int list_len;
+1 -1
fs/cifs/file.c
··· 582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 583 int rc = 0; 584 585 - down_read(&cinode->lock_sem); 586 if (cinode->can_cache_brlcks) { 587 /* can cache locks - no need to relock */ 588 up_read(&cinode->lock_sem);
··· 582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 583 int rc = 0; 584 585 + down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); 586 if (cinode->can_cache_brlcks) { 587 /* can cache locks - no need to relock */ 588 up_read(&cinode->lock_sem);
+1 -2
fs/cifs/inode.c
··· 563 564 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path, 565 "SETFILEBITS", ea_value, 4 /* size of buf */, 566 - cifs_sb->local_nls, 567 - cifs_remap(cifs_sb)); 568 cifs_put_tlink(tlink); 569 if (rc < 0) 570 return (int)rc;
··· 563 564 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path, 565 "SETFILEBITS", ea_value, 4 /* size of buf */, 566 + cifs_sb); 567 cifs_put_tlink(tlink); 568 if (rc < 0) 569 return (int)rc;
+9 -12
fs/cifs/smb2pdu.c
··· 1240 goto tcon_exit; 1241 } 1242 1243 - if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) 1244 cifs_dbg(FYI, "connection to disk share\n"); 1245 - else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { 1246 tcon->ipc = true; 1247 cifs_dbg(FYI, "connection to pipe share\n"); 1248 - } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { 1249 - tcon->print = true; 1250 cifs_dbg(FYI, "connection to printer\n"); 1251 - } else { 1252 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 1253 rc = -EOPNOTSUPP; 1254 goto tcon_error_exit; ··· 2177 if (mid->mid_state == MID_RESPONSE_RECEIVED) 2178 credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); 2179 2180 - mutex_lock(&server->srv_mutex); 2181 DeleteMidQEntry(mid); 2182 - mutex_unlock(&server->srv_mutex); 2183 add_credits(server, credits_received, CIFS_ECHO_OP); 2184 } 2185 ··· 2435 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 2436 2437 queue_work(cifsiod_wq, &rdata->work); 2438 - mutex_lock(&server->srv_mutex); 2439 DeleteMidQEntry(mid); 2440 - mutex_unlock(&server->srv_mutex); 2441 add_credits(server, credits_received, 0); 2442 } 2443 ··· 2594 { 2595 struct cifs_writedata *wdata = mid->callback_data; 2596 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2597 - struct TCP_Server_Info *server = tcon->ses->server; 2598 unsigned int written; 2599 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 2600 unsigned int credits_received = 1; ··· 2633 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 2634 2635 queue_work(cifsiod_wq, &wdata->work); 2636 - mutex_lock(&server->srv_mutex); 2637 DeleteMidQEntry(mid); 2638 - mutex_unlock(&server->srv_mutex); 2639 add_credits(tcon->ses->server, credits_received, 0); 2640 } 2641
··· 1240 goto tcon_exit; 1241 } 1242 1243 + switch (rsp->ShareType) { 1244 + case SMB2_SHARE_TYPE_DISK: 1245 cifs_dbg(FYI, "connection to disk share\n"); 1246 + break; 1247 + case SMB2_SHARE_TYPE_PIPE: 1248 tcon->ipc = true; 1249 cifs_dbg(FYI, "connection to pipe share\n"); 1250 + break; 1251 + case SMB2_SHARE_TYPE_PRINT: 1252 + tcon->ipc = true; 1253 cifs_dbg(FYI, "connection to printer\n"); 1254 + break; 1255 + default: 1256 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 1257 rc = -EOPNOTSUPP; 1258 goto tcon_error_exit; ··· 2173 if (mid->mid_state == MID_RESPONSE_RECEIVED) 2174 credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); 2175 2176 DeleteMidQEntry(mid); 2177 add_credits(server, credits_received, CIFS_ECHO_OP); 2178 } 2179 ··· 2433 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 2434 2435 queue_work(cifsiod_wq, &rdata->work); 2436 DeleteMidQEntry(mid); 2437 add_credits(server, credits_received, 0); 2438 } 2439 ··· 2594 { 2595 struct cifs_writedata *wdata = mid->callback_data; 2596 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2597 unsigned int written; 2598 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 2599 unsigned int credits_received = 1; ··· 2634 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 2635 2636 queue_work(cifsiod_wq, &wdata->work); 2637 DeleteMidQEntry(mid); 2638 add_credits(tcon->ses->server, credits_received, 0); 2639 } 2640
+1 -3
fs/cifs/transport.c
··· 94 now = jiffies; 95 /* commands taking longer than one second are indications that 96 something is wrong, unless it is quite a slow link or server */ 97 - if ((now - midEntry->when_alloc) > HZ) { 98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { 99 pr_debug(" CIFS slow rsp: cmd %d mid %llu", 100 midEntry->command, midEntry->mid); ··· 613 } 614 spin_unlock(&GlobalMid_Lock); 615 616 - mutex_lock(&server->srv_mutex); 617 DeleteMidQEntry(mid); 618 - mutex_unlock(&server->srv_mutex); 619 return rc; 620 } 621
··· 94 now = jiffies; 95 /* commands taking longer than one second are indications that 96 something is wrong, unless it is quite a slow link or server */ 97 + if (time_after(now, midEntry->when_alloc + HZ)) { 98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { 99 pr_debug(" CIFS slow rsp: cmd %d mid %llu", 100 midEntry->command, midEntry->mid); ··· 613 } 614 spin_unlock(&GlobalMid_Lock); 615 616 DeleteMidQEntry(mid); 617 return rc; 618 } 619
+2 -4
fs/cifs/xattr.c
··· 235 236 if (pTcon->ses->server->ops->query_all_EAs) 237 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 238 - full_path, name, value, size, 239 - cifs_sb->local_nls, cifs_remap(cifs_sb)); 240 break; 241 242 case XATTR_CIFS_ACL: { ··· 335 336 if (pTcon->ses->server->ops->query_all_EAs) 337 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 338 - full_path, NULL, data, buf_size, 339 - cifs_sb->local_nls, cifs_remap(cifs_sb)); 340 list_ea_exit: 341 kfree(full_path); 342 free_xid(xid);
··· 235 236 if (pTcon->ses->server->ops->query_all_EAs) 237 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 238 + full_path, name, value, size, cifs_sb); 239 break; 240 241 case XATTR_CIFS_ACL: { ··· 336 337 if (pTcon->ses->server->ops->query_all_EAs) 338 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 339 + full_path, NULL, data, buf_size, cifs_sb); 340 list_ea_exit: 341 kfree(full_path); 342 free_xid(xid);
+2 -2
fs/ext2/inode.c
··· 817 iomap->bdev = bdev; 818 iomap->offset = (u64)first_block << blkbits; 819 if (blk_queue_dax(bdev->bd_queue)) 820 - iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 821 else 822 iomap->dax_dev = NULL; 823 ··· 841 ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, 842 ssize_t written, unsigned flags, struct iomap *iomap) 843 { 844 - put_dax(iomap->dax_dev); 845 if (iomap->type == IOMAP_MAPPED && 846 written < length && 847 (flags & IOMAP_WRITE))
··· 817 iomap->bdev = bdev; 818 iomap->offset = (u64)first_block << blkbits; 819 if (blk_queue_dax(bdev->bd_queue)) 820 + iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); 821 else 822 iomap->dax_dev = NULL; 823 ··· 841 ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, 842 ssize_t written, unsigned flags, struct iomap *iomap) 843 { 844 + fs_put_dax(iomap->dax_dev); 845 if (iomap->type == IOMAP_MAPPED && 846 written < length && 847 (flags & IOMAP_WRITE))
+2 -2
fs/ext4/inode.c
··· 3412 bdev = inode->i_sb->s_bdev; 3413 iomap->bdev = bdev; 3414 if (blk_queue_dax(bdev->bd_queue)) 3415 - iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 3416 else 3417 iomap->dax_dev = NULL; 3418 iomap->offset = first_block << blkbits; ··· 3447 int blkbits = inode->i_blkbits; 3448 bool truncate = false; 3449 3450 - put_dax(iomap->dax_dev); 3451 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) 3452 return 0; 3453
··· 3412 bdev = inode->i_sb->s_bdev; 3413 iomap->bdev = bdev; 3414 if (blk_queue_dax(bdev->bd_queue)) 3415 + iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); 3416 else 3417 iomap->dax_dev = NULL; 3418 iomap->offset = first_block << blkbits; ··· 3447 int blkbits = inode->i_blkbits; 3448 bool truncate = false; 3449 3450 + fs_put_dax(iomap->dax_dev); 3451 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) 3452 return 0; 3453
+8 -1
fs/fuse/inode.c
··· 975 int err; 976 char *suffix = ""; 977 978 - if (sb->s_bdev) 979 suffix = "-fuseblk"; 980 err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), 981 MINOR(fc->dev), suffix); 982 if (err)
··· 975 int err; 976 char *suffix = ""; 977 978 + if (sb->s_bdev) { 979 suffix = "-fuseblk"; 980 + /* 981 + * sb->s_bdi points to blkdev's bdi however we want to redirect 982 + * it to our private bdi... 983 + */ 984 + bdi_put(sb->s_bdi); 985 + sb->s_bdi = &noop_backing_dev_info; 986 + } 987 err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), 988 MINOR(fc->dev), suffix); 989 if (err)
+2 -2
fs/xfs/xfs_iomap.c
··· 1068 /* optionally associate a dax device with the iomap bdev */ 1069 bdev = iomap->bdev; 1070 if (blk_queue_dax(bdev->bd_queue)) 1071 - iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 1072 else 1073 iomap->dax_dev = NULL; 1074 ··· 1149 unsigned flags, 1150 struct iomap *iomap) 1151 { 1152 - put_dax(iomap->dax_dev); 1153 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1154 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1155 length, written, iomap);
··· 1068 /* optionally associate a dax device with the iomap bdev */ 1069 bdev = iomap->bdev; 1070 if (blk_queue_dax(bdev->bd_queue)) 1071 + iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); 1072 else 1073 iomap->dax_dev = NULL; 1074 ··· 1149 unsigned flags, 1150 struct iomap *iomap) 1151 { 1152 + fs_put_dax(iomap->dax_dev); 1153 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1154 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1155 length, written, iomap);
+4 -1
include/kvm/arm_vgic.h
··· 195 /* either a GICv2 CPU interface */ 196 gpa_t vgic_cpu_base; 197 /* or a number of GICv3 redistributor regions */ 198 - gpa_t vgic_redist_base; 199 }; 200 201 /* distributor enabled */
··· 195 /* either a GICv2 CPU interface */ 196 gpa_t vgic_cpu_base; 197 /* or a number of GICv3 redistributor regions */ 198 + struct { 199 + gpa_t vgic_redist_base; 200 + gpa_t vgic_redist_free_offset; 201 + }; 202 }; 203 204 /* distributor enabled */
+4
include/linux/bpf_verifier.h
··· 40 */ 41 s64 min_value; 42 u64 max_value; 43 }; 44 45 enum bpf_stack_slot_type { ··· 90 struct bpf_prog *prog; /* eBPF program being verified */ 91 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 92 int stack_size; /* number of states to be processed */ 93 struct bpf_verifier_state cur_state; /* current verifier state */ 94 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 95 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
··· 40 */ 41 s64 min_value; 42 u64 max_value; 43 + u32 min_align; 44 + u32 aux_off; 45 + u32 aux_off_align; 46 }; 47 48 enum bpf_stack_slot_type { ··· 87 struct bpf_prog *prog; /* eBPF program being verified */ 88 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 89 int stack_size; /* number of states to be processed */ 90 + bool strict_alignment; /* perform strict pointer alignment checks */ 91 struct bpf_verifier_state cur_state; /* current verifier state */ 92 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 93 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
+34 -14
include/linux/dax.h
··· 18 void **, pfn_t *); 19 }; 20 21 - int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 22 - #if IS_ENABLED(CONFIG_FS_DAX) 23 - int __bdev_dax_supported(struct super_block *sb, int blocksize); 24 - static inline int bdev_dax_supported(struct super_block *sb, int blocksize) 25 - { 26 - return __bdev_dax_supported(sb, blocksize); 27 - } 28 - #else 29 - static inline int bdev_dax_supported(struct super_block *sb, int blocksize) 30 - { 31 - return -EOPNOTSUPP; 32 - } 33 - #endif 34 - 35 #if IS_ENABLED(CONFIG_DAX) 36 struct dax_device *dax_get_by_host(const char *host); 37 void put_dax(struct dax_device *dax_dev); ··· 28 } 29 30 static inline void put_dax(struct dax_device *dax_dev) 31 { 32 } 33 #endif
··· 18 void **, pfn_t *); 19 }; 20 21 #if IS_ENABLED(CONFIG_DAX) 22 struct dax_device *dax_get_by_host(const char *host); 23 void put_dax(struct dax_device *dax_dev); ··· 42 } 43 44 static inline void put_dax(struct dax_device *dax_dev) 45 + { 46 + } 47 + #endif 48 + 49 + int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 50 + #if IS_ENABLED(CONFIG_FS_DAX) 51 + int __bdev_dax_supported(struct super_block *sb, int blocksize); 52 + static inline int bdev_dax_supported(struct super_block *sb, int blocksize) 53 + { 54 + return __bdev_dax_supported(sb, blocksize); 55 + } 56 + 57 + static inline struct dax_device *fs_dax_get_by_host(const char *host) 58 + { 59 + return dax_get_by_host(host); 60 + } 61 + 62 + static inline void fs_put_dax(struct dax_device *dax_dev) 63 + { 64 + put_dax(dax_dev); 65 + } 66 + 67 + #else 68 + static inline int bdev_dax_supported(struct super_block *sb, int blocksize) 69 + { 70 + return -EOPNOTSUPP; 71 + } 72 + 73 + static inline struct dax_device *fs_dax_get_by_host(const char *host) 74 + { 75 + return NULL; 76 + } 77 + 78 + static inline void fs_put_dax(struct dax_device *dax_dev) 79 { 80 } 81 #endif
+3
include/linux/kprobes.h
··· 349 int write, void __user *buffer, 350 size_t *length, loff_t *ppos); 351 #endif 352 #endif /* CONFIG_OPTPROBES */ 353 #ifdef CONFIG_KPROBES_ON_FTRACE 354 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
··· 349 int write, void __user *buffer, 350 size_t *length, loff_t *ppos); 351 #endif 352 + extern void wait_for_kprobe_optimizer(void); 353 + #else 354 + static inline void wait_for_kprobe_optimizer(void) { } 355 #endif /* CONFIG_OPTPROBES */ 356 #ifdef CONFIG_KPROBES_ON_FTRACE 357 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+3 -1
include/linux/mlx5/fs.h
··· 109 int max_fte; 110 u32 level; 111 u32 flags; 112 - u32 underlay_qpn; 113 }; 114 115 struct mlx5_flow_table * ··· 166 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 167 void mlx5_fc_query_cached(struct mlx5_fc *counter, 168 u64 *bytes, u64 *packets, u64 *lastuse); 169 #endif
··· 109 int max_fte; 110 u32 level; 111 u32 flags; 112 }; 113 114 struct mlx5_flow_table * ··· 167 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 168 void mlx5_fc_query_cached(struct mlx5_fc *counter, 169 u64 *bytes, u64 *packets, u64 *lastuse); 170 + int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 171 + int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 172 + 173 #endif
+6 -2
include/linux/netdevice.h
··· 3296 int dev_get_phys_port_name(struct net_device *dev, 3297 char *name, size_t len); 3298 int dev_change_proto_down(struct net_device *dev, bool proto_down); 3299 - int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 3300 - int fd, u32 flags); 3301 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3302 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3303 struct netdev_queue *txq, int *ret); 3304 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3305 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3306 bool is_skb_forwardable(const struct net_device *dev,
··· 3296 int dev_get_phys_port_name(struct net_device *dev, 3297 char *name, size_t len); 3298 int dev_change_proto_down(struct net_device *dev, bool proto_down); 3299 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3300 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3301 struct netdev_queue *txq, int *ret); 3302 + 3303 + typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp); 3304 + int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 3305 + int fd, u32 flags); 3306 + bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op); 3307 + 3308 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3309 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3310 bool is_skb_forwardable(const struct net_device *dev,
+4 -12
include/linux/nvme-fc-driver.h
··· 27 28 /* FC Port role bitmask - can merge with FC Port Roles in fc transport */ 29 #define FC_PORT_ROLE_NVME_INITIATOR 0x10 30 - #define FC_PORT_ROLE_NVME_TARGET 0x11 31 - #define FC_PORT_ROLE_NVME_DISCOVERY 0x12 32 33 34 /** ··· 642 * sequence in one LLDD operation. Errors during Data 643 * sequence transmit must not allow RSP sequence to be sent. 644 */ 645 - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), 646 - /* Bit 1: When 0, the LLDD will deliver FCP CMD 647 - * on the CPU it should be affinitized to. Thus work will 648 - * be scheduled on the cpu received on. When 1, the LLDD 649 - * may not deliver the CMD on the CPU it should be worked 650 - * on. The transport should pick a cpu to schedule the work 651 - * on. 652 - */ 653 - NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2), 654 /* Bit 2: When 0, the LLDD is calling the cmd rcv handler 655 * in a non-isr context, allowing the transport to finish 656 * op completion in the calling context. When 1, the LLDD ··· 650 * requiring the transport to transition to a workqueue 651 * for op completion. 652 */ 653 - NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3), 654 /* Bit 3: When 0, the LLDD is calling the op done handler 655 * in a non-isr context, allowing the transport to finish 656 * op completion in the calling context. When 1, the LLDD
··· 27 28 /* FC Port role bitmask - can merge with FC Port Roles in fc transport */ 29 #define FC_PORT_ROLE_NVME_INITIATOR 0x10 30 + #define FC_PORT_ROLE_NVME_TARGET 0x20 31 + #define FC_PORT_ROLE_NVME_DISCOVERY 0x40 32 33 34 /** ··· 642 * sequence in one LLDD operation. Errors during Data 643 * sequence transmit must not allow RSP sequence to be sent. 644 */ 645 + NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1), 646 /* Bit 2: When 0, the LLDD is calling the cmd rcv handler 647 * in a non-isr context, allowing the transport to finish 648 * op completion in the calling context. When 1, the LLDD ··· 658 * requiring the transport to transition to a workqueue 659 * for op completion. 660 */ 661 + NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2), 662 /* Bit 3: When 0, the LLDD is calling the op done handler 663 * in a non-isr context, allowing the transport to finish 664 * op completion in the calling context. When 1, the LLDD
+1 -1
include/linux/of_irq.h
··· 8 #include <linux/ioport.h> 9 #include <linux/of.h> 10 11 - typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *); 12 13 /* 14 * Workarounds only applied to 32bit powermac machines
··· 8 #include <linux/ioport.h> 9 #include <linux/of.h> 10 11 + typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); 12 13 /* 14 * Workarounds only applied to 32bit powermac machines
+1
include/linux/usb/hcd.h
··· 148 unsigned rh_registered:1;/* is root hub registered? */ 149 unsigned rh_pollable:1; /* may we poll the root hub? */ 150 unsigned msix_enabled:1; /* driver has MSI-X enabled? */ 151 unsigned remove_phy:1; /* auto-remove USB phy */ 152 153 /* The next flag is a stopgap, to be removed when all the HCDs
··· 148 unsigned rh_registered:1;/* is root hub registered? */ 149 unsigned rh_pollable:1; /* may we poll the root hub? */ 150 unsigned msix_enabled:1; /* driver has MSI-X enabled? */ 151 + unsigned msi_enabled:1; /* driver has MSI enabled? */ 152 unsigned remove_phy:1; /* auto-remove USB phy */ 153 154 /* The next flag is a stopgap, to be removed when all the HCDs
+2 -2
include/net/x25.h
··· 298 299 /* sysctl_net_x25.c */ 300 #ifdef CONFIG_SYSCTL 301 - void x25_register_sysctl(void); 302 void x25_unregister_sysctl(void); 303 #else 304 - static inline void x25_register_sysctl(void) {}; 305 static inline void x25_unregister_sysctl(void) {}; 306 #endif /* CONFIG_SYSCTL */ 307
··· 298 299 /* sysctl_net_x25.c */ 300 #ifdef CONFIG_SYSCTL 301 + int x25_register_sysctl(void); 302 void x25_unregister_sysctl(void); 303 #else 304 + static inline int x25_register_sysctl(void) { return 0; }; 305 static inline void x25_unregister_sysctl(void) {}; 306 #endif /* CONFIG_SYSCTL */ 307
+8
include/uapi/linux/bpf.h
··· 132 */ 133 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 134 135 #define BPF_PSEUDO_MAP_FD 1 136 137 /* flags for BPF_MAP_UPDATE_ELEM command */ ··· 184 __u32 log_size; /* size of user buffer */ 185 __aligned_u64 log_buf; /* user supplied buffer */ 186 __u32 kern_version; /* checked when prog_type=kprobe */ 187 }; 188 189 struct { /* anonymous struct used by BPF_OBJ_* commands */
··· 132 */ 133 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 134 135 + /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 136 + * verifier will perform strict alignment checking as if the kernel 137 + * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 138 + * and NET_IP_ALIGN defined to 2. 139 + */ 140 + #define BPF_F_STRICT_ALIGNMENT (1U << 0) 141 + 142 #define BPF_PSEUDO_MAP_FD 1 143 144 /* flags for BPF_MAP_UPDATE_ELEM command */ ··· 177 __u32 log_size; /* size of user buffer */ 178 __aligned_u64 log_buf; /* user supplied buffer */ 179 __u32 kern_version; /* checked when prog_type=kprobe */ 180 + __u32 prog_flags; 181 }; 182 183 struct { /* anonymous struct used by BPF_OBJ_* commands */
+11 -2
include/uapi/linux/if_link.h
··· 888 /* XDP section */ 889 890 #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) 891 - #define XDP_FLAGS_SKB_MODE (2U << 0) 892 #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ 893 - XDP_FLAGS_SKB_MODE) 894 895 enum { 896 IFLA_XDP_UNSPEC,
··· 888 /* XDP section */ 889 890 #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) 891 + #define XDP_FLAGS_SKB_MODE (1U << 1) 892 + #define XDP_FLAGS_DRV_MODE (1U << 2) 893 #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ 894 + XDP_FLAGS_SKB_MODE | \ 895 + XDP_FLAGS_DRV_MODE) 896 + 897 + /* These are stored into IFLA_XDP_ATTACHED on dump. */ 898 + enum { 899 + XDP_ATTACHED_NONE = 0, 900 + XDP_ATTACHED_DRV, 901 + XDP_ATTACHED_SKB, 902 + }; 903 904 enum { 905 IFLA_XDP_UNSPEC,
+3
include/uapi/linux/usb/ch11.h
··· 22 */ 23 #define USB_MAXCHILDREN 31 24 25 /* 26 * Hub request types 27 */
··· 22 */ 23 #define USB_MAXCHILDREN 31 24 25 + /* See USB 3.1 spec Table 10-5 */ 26 + #define USB_SS_MAXPORTS 15 27 + 28 /* 29 * Hub request types 30 */
+4 -1
kernel/bpf/syscall.c
··· 783 EXPORT_SYMBOL_GPL(bpf_prog_get_type); 784 785 /* last field in 'union bpf_attr' used by this command */ 786 - #define BPF_PROG_LOAD_LAST_FIELD kern_version 787 788 static int bpf_prog_load(union bpf_attr *attr) 789 { ··· 794 bool is_gpl; 795 796 if (CHECK_ATTR(BPF_PROG_LOAD)) 797 return -EINVAL; 798 799 /* copy eBPF program license from user space */
··· 783 EXPORT_SYMBOL_GPL(bpf_prog_get_type); 784 785 /* last field in 'union bpf_attr' used by this command */ 786 + #define BPF_PROG_LOAD_LAST_FIELD prog_flags 787 788 static int bpf_prog_load(union bpf_attr *attr) 789 { ··· 794 bool is_gpl; 795 796 if (CHECK_ATTR(BPF_PROG_LOAD)) 797 + return -EINVAL; 798 + 799 + if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT) 800 return -EINVAL; 801 802 /* copy eBPF program license from user space */
+120 -25
kernel/bpf/verifier.c
··· 140 struct bpf_verifier_stack_elem *next; 141 }; 142 143 - #define BPF_COMPLEXITY_LIMIT_INSNS 65536 144 #define BPF_COMPLEXITY_LIMIT_STACK 1024 145 146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) ··· 241 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 242 verbose(",max_value=%llu", 243 (unsigned long long)reg->max_value); 244 } 245 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 246 if (state->stack_slot_type[i] == STACK_SPILL) ··· 472 regs[i].imm = 0; 473 regs[i].min_value = BPF_REGISTER_MIN_RANGE; 474 regs[i].max_value = BPF_REGISTER_MAX_RANGE; 475 } 476 477 /* frame pointer */ ··· 501 { 502 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 503 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 504 } 505 506 static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, ··· 789 } 790 791 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, 792 - int off, int size) 793 { 794 - if (reg->id && size != 1) { 795 - verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n"); 796 - return -EACCES; 797 } 798 799 - /* skb->data is NET_IP_ALIGN-ed */ 800 - if ((NET_IP_ALIGN + reg->off + off) % size != 0) { 801 verbose("misaligned packet access off %d+%d+%d size %d\n", 802 - NET_IP_ALIGN, reg->off, off, size); 803 return -EACCES; 804 } 805 ··· 823 } 824 825 static int check_val_ptr_alignment(const struct bpf_reg_state *reg, 826 - int size) 827 { 828 - if (size != 1) { 829 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); 830 return -EACCES; 831 } ··· 833 return 0; 834 } 835 836 - static int check_ptr_alignment(const struct bpf_reg_state *reg, 837 int off, int size) 838 { 839 switch (reg->type) { 840 case PTR_TO_PACKET: 841 - return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : 842 - check_pkt_ptr_alignment(reg, off, size); 843 case PTR_TO_MAP_VALUE_ADJ: 844 - return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : 845 - check_val_ptr_alignment(reg, size); 846 default: 847 if (off % size != 0) { 848 verbose("misaligned access off %d size %d\n", ··· 879 if (size < 0) 880 return size; 881 882 - err = check_ptr_alignment(reg, off, size); 883 if (err) 884 return err; 885 ··· 913 value_regno); 914 /* note that reg.[id|off|range] == 0 */ 915 state->regs[value_regno].type = reg_type; 916 } 917 918 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { ··· 1487 */ 1488 dst_reg->off += imm; 1489 } else { 1490 if (src_reg->type == PTR_TO_PACKET) { 1491 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1492 tmp_reg = *dst_reg; /* save r7 state */ ··· 1522 src_reg->imm); 1523 return -EACCES; 1524 } 1525 /* dst_reg stays as pkt_ptr type and since some positive 1526 * integer value was added to the pointer, increment its 'id' 1527 */ 1528 dst_reg->id = ++env->id_gen; 1529 1530 - /* something was added to pkt_ptr, set range and off to zero */ 1531 dst_reg->off = 0; 1532 dst_reg->range = 0; 1533 } 1534 return 0; 1535 } ··· 1712 reg->min_value = BPF_REGISTER_MIN_RANGE; 1713 } 1714 1715 static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, 1716 struct bpf_insn *insn) 1717 { ··· 1726 s64 min_val = BPF_REGISTER_MIN_RANGE; 1727 u64 max_val = BPF_REGISTER_MAX_RANGE; 1728 u8 opcode = BPF_OP(insn->code); 1729 1730 dst_reg = &regs[insn->dst_reg]; 1731 if (BPF_SRC(insn->code) == BPF_X) { 1732 check_reg_overflow(&regs[insn->src_reg]); 1733 min_val = regs[insn->src_reg].min_value; ··· 1745 regs[insn->src_reg].type != UNKNOWN_VALUE) { 1746 min_val = BPF_REGISTER_MIN_RANGE; 1747 max_val = BPF_REGISTER_MAX_RANGE; 1748 } 1749 } else if (insn->imm < BPF_REGISTER_MAX_RANGE && 1750 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { 1751 min_val = max_val = insn->imm; 1752 } 1753 1754 /* We don't know anything about what was done to this register, mark it 1755 * as unknown. ··· 1781 dst_reg->min_value += min_val; 1782 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1783 dst_reg->max_value += max_val; 1784 break; 1785 case BPF_SUB: 1786 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1787 dst_reg->min_value -= min_val; 1788 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1789 dst_reg->max_value -= max_val; 1790 break; 1791 case BPF_MUL: 1792 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1793 dst_reg->min_value *= min_val; 1794 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1795 dst_reg->max_value *= max_val; 1796 break; 1797 case BPF_AND: 1798 /* Disallow AND'ing of negative numbers, ain't nobody got time ··· 1807 else 1808 dst_reg->min_value = 0; 1809 dst_reg->max_value = max_val; 1810 break; 1811 case BPF_LSH: 1812 /* Gotta have special overflow logic here, if we're shifting 1813 * more than MAX_RANGE then just assume we have an invalid 1814 * range. 1815 */ 1816 - if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1817 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1818 - else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1819 - dst_reg->min_value <<= min_val; 1820 - 1821 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1822 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1823 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) ··· 1833 /* RSH by a negative number is undefined, and the BPF_RSH is an 1834 * unsigned shift, so make the appropriate casts. 1835 */ 1836 - if (min_val < 0 || dst_reg->min_value < 0) 1837 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1838 - else 1839 dst_reg->min_value = 1840 (u64)(dst_reg->min_value) >> min_val; 1841 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1842 dst_reg->max_value >>= max_val; 1843 break; ··· 1947 regs[insn->dst_reg].imm = insn->imm; 1948 regs[insn->dst_reg].max_value = insn->imm; 1949 regs[insn->dst_reg].min_value = insn->imm; 1950 } 1951 1952 } else if (opcode > BPF_END) { ··· 2640 env->explored_states[t + 1] = STATE_LIST_MARK; 2641 } else { 2642 /* conditional jump with two edges */ 2643 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2644 if (ret == 1) 2645 goto peek_stack; ··· 2799 rcur->type != NOT_INIT)) 2800 continue; 2801 2802 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2803 compare_ptrs_to_packet(rold, rcur)) 2804 continue; ··· 2939 goto process_bpf_exit; 2940 } 2941 2942 - if (log_level && do_print_state) { 2943 - verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 2944 print_verifier_state(&env->cur_state); 2945 do_print_state = false; 2946 } ··· 3584 } else { 3585 log_level = 0; 3586 } 3587 3588 ret = replace_map_fd_with_map_ptr(env); 3589 if (ret < 0) ··· 3693 mutex_lock(&bpf_verifier_lock); 3694 3695 log_level = 0; 3696 3697 env->explored_states = kcalloc(env->prog->len, 3698 sizeof(struct bpf_verifier_state_list *),
··· 140 struct bpf_verifier_stack_elem *next; 141 }; 142 143 + #define BPF_COMPLEXITY_LIMIT_INSNS 98304 144 #define BPF_COMPLEXITY_LIMIT_STACK 1024 145 146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) ··· 241 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 242 verbose(",max_value=%llu", 243 (unsigned long long)reg->max_value); 244 + if (reg->min_align) 245 + verbose(",min_align=%u", reg->min_align); 246 + if (reg->aux_off) 247 + verbose(",aux_off=%u", reg->aux_off); 248 + if (reg->aux_off_align) 249 + verbose(",aux_off_align=%u", reg->aux_off_align); 250 } 251 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 252 if (state->stack_slot_type[i] == STACK_SPILL) ··· 466 regs[i].imm = 0; 467 regs[i].min_value = BPF_REGISTER_MIN_RANGE; 468 regs[i].max_value = BPF_REGISTER_MAX_RANGE; 469 + regs[i].min_align = 0; 470 + regs[i].aux_off = 0; 471 + regs[i].aux_off_align = 0; 472 } 473 474 /* frame pointer */ ··· 492 { 493 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 494 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 495 + regs[regno].min_align = 0; 496 } 497 498 static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, ··· 779 } 780 781 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, 782 + int off, int size, bool strict) 783 { 784 + int ip_align; 785 + int reg_off; 786 + 787 + /* Byte size accesses are always allowed. */ 788 + if (!strict || size == 1) 789 + return 0; 790 + 791 + reg_off = reg->off; 792 + if (reg->id) { 793 + if (reg->aux_off_align % size) { 794 + verbose("Packet access is only %u byte aligned, %d byte access not allowed\n", 795 + reg->aux_off_align, size); 796 + return -EACCES; 797 + } 798 + reg_off += reg->aux_off; 799 } 800 801 + /* skb->data is NET_IP_ALIGN-ed, but for strict alignment checking 802 + * we force this to 2 which is universally what architectures use 803 + * when they don't set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. 804 + */ 805 + ip_align = strict ? 2 : NET_IP_ALIGN; 806 + if ((ip_align + reg_off + off) % size != 0) { 807 verbose("misaligned packet access off %d+%d+%d size %d\n", 808 + ip_align, reg_off, off, size); 809 return -EACCES; 810 } 811 ··· 797 } 798 799 static int check_val_ptr_alignment(const struct bpf_reg_state *reg, 800 + int size, bool strict) 801 { 802 + if (strict && size != 1) { 803 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); 804 return -EACCES; 805 } ··· 807 return 0; 808 } 809 810 + static int check_ptr_alignment(struct bpf_verifier_env *env, 811 + const struct bpf_reg_state *reg, 812 int off, int size) 813 { 814 + bool strict = env->strict_alignment; 815 + 816 + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 817 + strict = true; 818 + 819 switch (reg->type) { 820 case PTR_TO_PACKET: 821 + return check_pkt_ptr_alignment(reg, off, size, strict); 822 case PTR_TO_MAP_VALUE_ADJ: 823 + return check_val_ptr_alignment(reg, size, strict); 824 default: 825 if (off % size != 0) { 826 verbose("misaligned access off %d size %d\n", ··· 849 if (size < 0) 850 return size; 851 852 + err = check_ptr_alignment(env, reg, off, size); 853 if (err) 854 return err; 855 ··· 883 value_regno); 884 /* note that reg.[id|off|range] == 0 */ 885 state->regs[value_regno].type = reg_type; 886 + state->regs[value_regno].aux_off = 0; 887 + state->regs[value_regno].aux_off_align = 0; 888 } 889 890 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { ··· 1455 */ 1456 dst_reg->off += imm; 1457 } else { 1458 + bool had_id; 1459 + 1460 if (src_reg->type == PTR_TO_PACKET) { 1461 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1462 tmp_reg = *dst_reg; /* save r7 state */ ··· 1488 src_reg->imm); 1489 return -EACCES; 1490 } 1491 + 1492 + had_id = (dst_reg->id != 0); 1493 + 1494 /* dst_reg stays as pkt_ptr type and since some positive 1495 * integer value was added to the pointer, increment its 'id' 1496 */ 1497 dst_reg->id = ++env->id_gen; 1498 1499 + /* something was added to pkt_ptr, set range to zero */ 1500 + dst_reg->aux_off += dst_reg->off; 1501 dst_reg->off = 0; 1502 dst_reg->range = 0; 1503 + if (had_id) 1504 + dst_reg->aux_off_align = min(dst_reg->aux_off_align, 1505 + src_reg->min_align); 1506 + else 1507 + dst_reg->aux_off_align = src_reg->min_align; 1508 } 1509 return 0; 1510 } ··· 1669 reg->min_value = BPF_REGISTER_MIN_RANGE; 1670 } 1671 1672 + static u32 calc_align(u32 imm) 1673 + { 1674 + if (!imm) 1675 + return 1U << 31; 1676 + return imm - ((imm - 1) & imm); 1677 + } 1678 + 1679 static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, 1680 struct bpf_insn *insn) 1681 { ··· 1676 s64 min_val = BPF_REGISTER_MIN_RANGE; 1677 u64 max_val = BPF_REGISTER_MAX_RANGE; 1678 u8 opcode = BPF_OP(insn->code); 1679 + u32 dst_align, src_align; 1680 1681 dst_reg = &regs[insn->dst_reg]; 1682 + src_align = 0; 1683 if (BPF_SRC(insn->code) == BPF_X) { 1684 check_reg_overflow(&regs[insn->src_reg]); 1685 min_val = regs[insn->src_reg].min_value; ··· 1693 regs[insn->src_reg].type != UNKNOWN_VALUE) { 1694 min_val = BPF_REGISTER_MIN_RANGE; 1695 max_val = BPF_REGISTER_MAX_RANGE; 1696 + src_align = 0; 1697 + } else { 1698 + src_align = regs[insn->src_reg].min_align; 1699 } 1700 } else if (insn->imm < BPF_REGISTER_MAX_RANGE && 1701 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { 1702 min_val = max_val = insn->imm; 1703 + src_align = calc_align(insn->imm); 1704 } 1705 + 1706 + dst_align = dst_reg->min_align; 1707 1708 /* We don't know anything about what was done to this register, mark it 1709 * as unknown. ··· 1723 dst_reg->min_value += min_val; 1724 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1725 dst_reg->max_value += max_val; 1726 + dst_reg->min_align = min(src_align, dst_align); 1727 break; 1728 case BPF_SUB: 1729 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1730 dst_reg->min_value -= min_val; 1731 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1732 dst_reg->max_value -= max_val; 1733 + dst_reg->min_align = min(src_align, dst_align); 1734 break; 1735 case BPF_MUL: 1736 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1737 dst_reg->min_value *= min_val; 1738 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1739 dst_reg->max_value *= max_val; 1740 + dst_reg->min_align = max(src_align, dst_align); 1741 break; 1742 case BPF_AND: 1743 /* Disallow AND'ing of negative numbers, ain't nobody got time ··· 1746 else 1747 dst_reg->min_value = 0; 1748 dst_reg->max_value = max_val; 1749 + dst_reg->min_align = max(src_align, dst_align); 1750 break; 1751 case BPF_LSH: 1752 /* Gotta have special overflow logic here, if we're shifting 1753 * more than MAX_RANGE then just assume we have an invalid 1754 * range. 1755 */ 1756 + if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) { 1757 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1758 + dst_reg->min_align = 1; 1759 + } else { 1760 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1761 + dst_reg->min_value <<= min_val; 1762 + if (!dst_reg->min_align) 1763 + dst_reg->min_align = 1; 1764 + dst_reg->min_align <<= min_val; 1765 + } 1766 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1767 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1768 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) ··· 1766 /* RSH by a negative number is undefined, and the BPF_RSH is an 1767 * unsigned shift, so make the appropriate casts. 1768 */ 1769 + if (min_val < 0 || dst_reg->min_value < 0) { 1770 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1771 + } else { 1772 dst_reg->min_value = 1773 (u64)(dst_reg->min_value) >> min_val; 1774 + } 1775 + if (min_val < 0) { 1776 + dst_reg->min_align = 1; 1777 + } else { 1778 + dst_reg->min_align >>= (u64) min_val; 1779 + if (!dst_reg->min_align) 1780 + dst_reg->min_align = 1; 1781 + } 1782 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1783 dst_reg->max_value >>= max_val; 1784 break; ··· 1872 regs[insn->dst_reg].imm = insn->imm; 1873 regs[insn->dst_reg].max_value = insn->imm; 1874 regs[insn->dst_reg].min_value = insn->imm; 1875 + regs[insn->dst_reg].min_align = calc_align(insn->imm); 1876 } 1877 1878 } else if (opcode > BPF_END) { ··· 2564 env->explored_states[t + 1] = STATE_LIST_MARK; 2565 } else { 2566 /* conditional jump with two edges */ 2567 + env->explored_states[t] = STATE_LIST_MARK; 2568 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2569 if (ret == 1) 2570 goto peek_stack; ··· 2722 rcur->type != NOT_INIT)) 2723 continue; 2724 2725 + /* Don't care about the reg->id in this case. */ 2726 + if (rold->type == PTR_TO_MAP_VALUE_OR_NULL && 2727 + rcur->type == PTR_TO_MAP_VALUE_OR_NULL && 2728 + rold->map_ptr == rcur->map_ptr) 2729 + continue; 2730 + 2731 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2732 compare_ptrs_to_packet(rold, rcur)) 2733 continue; ··· 2856 goto process_bpf_exit; 2857 } 2858 2859 + if (need_resched()) 2860 + cond_resched(); 2861 + 2862 + if (log_level > 1 || (log_level && do_print_state)) { 2863 + if (log_level > 1) 2864 + verbose("%d:", insn_idx); 2865 + else 2866 + verbose("\nfrom %d to %d:", 2867 + prev_insn_idx, insn_idx); 2868 print_verifier_state(&env->cur_state); 2869 do_print_state = false; 2870 } ··· 3494 } else { 3495 log_level = 0; 3496 } 3497 + if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT) 3498 + env->strict_alignment = true; 3499 + else 3500 + env->strict_alignment = false; 3501 3502 ret = replace_map_fd_with_map_ptr(env); 3503 if (ret < 0) ··· 3599 mutex_lock(&bpf_verifier_lock); 3600 3601 log_level = 0; 3602 + env->strict_alignment = false; 3603 3604 env->explored_states = kcalloc(env->prog->len, 3605 sizeof(struct bpf_verifier_state_list *),
+6 -2
kernel/fork.c
··· 1845 */ 1846 recalc_sigpending(); 1847 if (signal_pending(current)) { 1848 - spin_unlock(&current->sighand->siglock); 1849 - write_unlock_irq(&tasklist_lock); 1850 retval = -ERESTARTNOINTR; 1851 goto bad_fork_cancel_cgroup; 1852 } 1853 ··· 1909 return p; 1910 1911 bad_fork_cancel_cgroup: 1912 cgroup_cancel_fork(p); 1913 bad_fork_free_pid: 1914 cgroup_threadgroup_change_end(current);
··· 1845 */ 1846 recalc_sigpending(); 1847 if (signal_pending(current)) { 1848 retval = -ERESTARTNOINTR; 1849 + goto bad_fork_cancel_cgroup; 1850 + } 1851 + if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) { 1852 + retval = -ENOMEM; 1853 goto bad_fork_cancel_cgroup; 1854 } 1855 ··· 1907 return p; 1908 1909 bad_fork_cancel_cgroup: 1910 + spin_unlock(&current->sighand->siglock); 1911 + write_unlock_irq(&tasklist_lock); 1912 cgroup_cancel_fork(p); 1913 bad_fork_free_pid: 1914 cgroup_threadgroup_change_end(current);
+1 -1
kernel/irq/chip.c
··· 880 if (!desc) 881 return; 882 883 - __irq_do_set_handler(desc, handle, 1, NULL); 884 desc->irq_common_data.handler_data = data; 885 886 irq_put_desc_busunlock(desc, flags); 887 }
··· 880 if (!desc) 881 return; 882 883 desc->irq_common_data.handler_data = data; 884 + __irq_do_set_handler(desc, handle, 1, NULL); 885 886 irq_put_desc_busunlock(desc, flags); 887 }
+7 -1
kernel/kprobes.c
··· 595 } 596 597 /* Wait for completing optimization and unoptimization */ 598 - static void wait_for_kprobe_optimizer(void) 599 { 600 mutex_lock(&kprobe_mutex); 601 ··· 2183 * The vaddr this probe is installed will soon 2184 * be vfreed buy not synced to disk. Hence, 2185 * disarming the breakpoint isn't needed. 2186 */ 2187 kill_kprobe(p); 2188 }
··· 595 } 596 597 /* Wait for completing optimization and unoptimization */ 598 + void wait_for_kprobe_optimizer(void) 599 { 600 mutex_lock(&kprobe_mutex); 601 ··· 2183 * The vaddr this probe is installed will soon 2184 * be vfreed buy not synced to disk. Hence, 2185 * disarming the breakpoint isn't needed. 2186 + * 2187 + * Note, this will also move any optimized probes 2188 + * that are pending to be removed from their 2189 + * corresponding lists to the freeing_list and 2190 + * will not be touched by the delayed 2191 + * kprobe_optimizer work handler. 2192 */ 2193 kill_kprobe(p); 2194 }
+1 -1
kernel/pid_namespace.c
··· 277 * if reparented. 278 */ 279 for (;;) { 280 - set_current_state(TASK_UNINTERRUPTIBLE); 281 if (pid_ns->nr_hashed == init_pids) 282 break; 283 schedule();
··· 277 * if reparented. 278 */ 279 for (;;) { 280 + set_current_state(TASK_INTERRUPTIBLE); 281 if (pid_ns->nr_hashed == init_pids) 282 break; 283 schedule();
+25
kernel/sched/core.c
··· 3502 } 3503 EXPORT_SYMBOL(schedule); 3504 3505 #ifdef CONFIG_CONTEXT_TRACKING 3506 asmlinkage __visible void __sched schedule_user(void) 3507 {
··· 3502 } 3503 EXPORT_SYMBOL(schedule); 3504 3505 + /* 3506 + * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 3507 + * state (have scheduled out non-voluntarily) by making sure that all 3508 + * tasks have either left the run queue or have gone into user space. 3509 + * As idle tasks do not do either, they must not ever be preempted 3510 + * (schedule out non-voluntarily). 3511 + * 3512 + * schedule_idle() is similar to schedule_preempt_disable() except that it 3513 + * never enables preemption because it does not call sched_submit_work(). 3514 + */ 3515 + void __sched schedule_idle(void) 3516 + { 3517 + /* 3518 + * As this skips calling sched_submit_work(), which the idle task does 3519 + * regardless because that function is a nop when the task is in a 3520 + * TASK_RUNNING state, make sure this isn't used someplace that the 3521 + * current task can be in any other state. Note, idle is always in the 3522 + * TASK_RUNNING state. 3523 + */ 3524 + WARN_ON_ONCE(current->state); 3525 + do { 3526 + __schedule(false); 3527 + } while (need_resched()); 3528 + } 3529 + 3530 #ifdef CONFIG_CONTEXT_TRACKING 3531 asmlinkage __visible void __sched schedule_user(void) 3532 {
+1 -1
kernel/sched/idle.c
··· 265 smp_mb__after_atomic(); 266 267 sched_ttwu_pending(); 268 - schedule_preempt_disabled(); 269 270 if (unlikely(klp_patch_pending(current))) 271 klp_update_patch_state(current);
··· 265 smp_mb__after_atomic(); 266 267 sched_ttwu_pending(); 268 + schedule_idle(); 269 270 if (unlikely(klp_patch_pending(current))) 271 klp_update_patch_state(current);
+2
kernel/sched/sched.h
··· 1467 } 1468 #endif 1469 1470 extern void sysrq_sched_debug_show(void); 1471 extern void sched_init_granularity(void); 1472 extern void update_max_interval(void);
··· 1467 } 1468 #endif 1469 1470 + extern void schedule_idle(void); 1471 + 1472 extern void sysrq_sched_debug_show(void); 1473 extern void sched_init_granularity(void); 1474 extern void update_max_interval(void);
+2 -2
kernel/trace/blktrace.c
··· 1662 goto out; 1663 1664 if (attr == &dev_attr_act_mask) { 1665 - if (sscanf(buf, "%llx", &value) != 1) { 1666 /* Assume it is a list of trace category names */ 1667 ret = blk_trace_str2mask(buf); 1668 if (ret < 0) 1669 goto out; 1670 value = ret; 1671 } 1672 - } else if (sscanf(buf, "%llu", &value) != 1) 1673 goto out; 1674 1675 ret = -ENXIO;
··· 1662 goto out; 1663 1664 if (attr == &dev_attr_act_mask) { 1665 + if (kstrtoull(buf, 0, &value)) { 1666 /* Assume it is a list of trace category names */ 1667 ret = blk_trace_str2mask(buf); 1668 if (ret < 0) 1669 goto out; 1670 value = ret; 1671 } 1672 + } else if (kstrtoull(buf, 0, &value)) 1673 goto out; 1674 1675 ret = -ENXIO;
+10 -2
kernel/trace/ftrace.c
··· 4144 int i, ret = -ENODEV; 4145 int size; 4146 4147 - if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 4148 func_g.search = NULL; 4149 - else if (glob) { 4150 int not; 4151 4152 func_g.type = filter_parse_regex(glob, strlen(glob), ··· 4254 err_unlock_ftrace: 4255 mutex_unlock(&ftrace_lock); 4256 return ret; 4257 } 4258 4259 static LIST_HEAD(ftrace_commands);
··· 4144 int i, ret = -ENODEV; 4145 int size; 4146 4147 + if (!glob || !strlen(glob) || !strcmp(glob, "*")) 4148 func_g.search = NULL; 4149 + else { 4150 int not; 4151 4152 func_g.type = filter_parse_regex(glob, strlen(glob), ··· 4254 err_unlock_ftrace: 4255 mutex_unlock(&ftrace_lock); 4256 return ret; 4257 + } 4258 + 4259 + void clear_ftrace_function_probes(struct trace_array *tr) 4260 + { 4261 + struct ftrace_func_probe *probe, *n; 4262 + 4263 + list_for_each_entry_safe(probe, n, &tr->func_probes, list) 4264 + unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 4265 } 4266 4267 static LIST_HEAD(ftrace_commands);
+32 -2
kernel/trace/trace.c
··· 1558 1559 return 0; 1560 } 1561 - early_initcall(init_trace_selftests); 1562 #else 1563 static inline int run_tracer_selftest(struct tracer *type) 1564 { ··· 2568 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 2569 int pc) 2570 { 2571 - __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); 2572 } 2573 2574 /** ··· 7579 } 7580 7581 tracing_set_nop(tr); 7582 event_trace_del_tracer(tr); 7583 ftrace_clear_pids(tr); 7584 ftrace_destroy_function_files(tr);
··· 1558 1559 return 0; 1560 } 1561 + core_initcall(init_trace_selftests); 1562 #else 1563 static inline int run_tracer_selftest(struct tracer *type) 1564 { ··· 2568 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 2569 int pc) 2570 { 2571 + struct ring_buffer *buffer = tr->trace_buffer.buffer; 2572 + 2573 + if (rcu_is_watching()) { 2574 + __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 2575 + return; 2576 + } 2577 + 2578 + /* 2579 + * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), 2580 + * but if the above rcu_is_watching() failed, then the NMI 2581 + * triggered someplace critical, and rcu_irq_enter() should 2582 + * not be called from NMI. 2583 + */ 2584 + if (unlikely(in_nmi())) 2585 + return; 2586 + 2587 + /* 2588 + * It is possible that a function is being traced in a 2589 + * location that RCU is not watching. A call to 2590 + * rcu_irq_enter() will make sure that it is, but there's 2591 + * a few internal rcu functions that could be traced 2592 + * where that wont work either. In those cases, we just 2593 + * do nothing. 2594 + */ 2595 + if (unlikely(rcu_irq_enter_disabled())) 2596 + return; 2597 + 2598 + rcu_irq_enter_irqson(); 2599 + __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 2600 + rcu_irq_exit_irqson(); 2601 } 2602 2603 /** ··· 7550 } 7551 7552 tracing_set_nop(tr); 7553 + clear_ftrace_function_probes(tr); 7554 event_trace_del_tracer(tr); 7555 ftrace_clear_pids(tr); 7556 ftrace_destroy_function_files(tr);
+5
kernel/trace/trace.h
··· 980 extern int 981 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 982 struct ftrace_probe_ops *ops); 983 984 int register_ftrace_command(struct ftrace_func_command *cmd); 985 int unregister_ftrace_command(struct ftrace_func_command *cmd); ··· 999 { 1000 return -EINVAL; 1001 } 1002 /* 1003 * The ops parameter passed in is usually undefined. 1004 * This must be a macro.
··· 980 extern int 981 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 982 struct ftrace_probe_ops *ops); 983 + extern void clear_ftrace_function_probes(struct trace_array *tr); 984 985 int register_ftrace_command(struct ftrace_func_command *cmd); 986 int unregister_ftrace_command(struct ftrace_func_command *cmd); ··· 998 { 999 return -EINVAL; 1000 } 1001 + static inline void clear_ftrace_function_probes(struct trace_array *tr) 1002 + { 1003 + } 1004 + 1005 /* 1006 * The ops parameter passed in is usually undefined. 1007 * This must be a macro.
+5
kernel/trace/trace_kprobe.c
··· 1535 1536 end: 1537 release_all_trace_kprobes(); 1538 if (warn) 1539 pr_cont("NG: Some tests are failed. Please check them.\n"); 1540 else
··· 1535 1536 end: 1537 release_all_trace_kprobes(); 1538 + /* 1539 + * Wait for the optimizer work to finish. Otherwise it might fiddle 1540 + * with probes in already freed __init text. 1541 + */ 1542 + wait_for_kprobe_optimizer(); 1543 if (warn) 1544 pr_cont("NG: Some tests are failed. Please check them.\n"); 1545 else
+4 -4
net/9p/trans_xen.c
··· 454 goto error_xenbus; 455 } 456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); 457 - if (!priv->tag) { 458 - ret = -EINVAL; 459 goto error_xenbus; 460 } 461 ret = xenbus_transaction_end(xbt, 0); ··· 525 .otherend_changed = xen_9pfs_front_changed, 526 }; 527 528 - int p9_trans_xen_init(void) 529 { 530 if (!xen_domain()) 531 return -ENODEV; ··· 537 } 538 module_init(p9_trans_xen_init); 539 540 - void p9_trans_xen_exit(void) 541 { 542 v9fs_unregister_trans(&p9_xen_trans); 543 return xenbus_unregister_driver(&xen_9pfs_front_driver);
··· 454 goto error_xenbus; 455 } 456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); 457 + if (IS_ERR(priv->tag)) { 458 + ret = PTR_ERR(priv->tag); 459 goto error_xenbus; 460 } 461 ret = xenbus_transaction_end(xbt, 0); ··· 525 .otherend_changed = xen_9pfs_front_changed, 526 }; 527 528 + static int p9_trans_xen_init(void) 529 { 530 if (!xen_domain()) 531 return -ENODEV; ··· 537 } 538 module_init(p9_trans_xen_init); 539 540 + static void p9_trans_xen_exit(void) 541 { 542 v9fs_unregister_trans(&p9_xen_trans); 543 return xenbus_unregister_driver(&xen_9pfs_front_driver);
+7
net/bridge/br_netlink.c
··· 835 return -EPROTONOSUPPORT; 836 } 837 } 838 #endif 839 840 return 0;
··· 835 return -EPROTONOSUPPORT; 836 } 837 } 838 + 839 + if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 840 + __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 841 + 842 + if (defpvid >= VLAN_VID_MASK) 843 + return -EINVAL; 844 + } 845 #endif 846 847 return 0;
+38 -19
net/core/dev.c
··· 6852 } 6853 EXPORT_SYMBOL(dev_change_proto_down); 6854 6855 /** 6856 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 6857 * @dev: device ··· 6890 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 6891 int fd, u32 flags) 6892 { 6893 - int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp); 6894 const struct net_device_ops *ops = dev->netdev_ops; 6895 struct bpf_prog *prog = NULL; 6896 - struct netdev_xdp xdp; 6897 int err; 6898 6899 ASSERT_RTNL(); 6900 6901 - xdp_op = ops->ndo_xdp; 6902 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 6903 xdp_op = generic_xdp_install; 6904 6905 if (fd >= 0) { 6906 - if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { 6907 - memset(&xdp, 0, sizeof(xdp)); 6908 - xdp.command = XDP_QUERY_PROG; 6909 - 6910 - err = xdp_op(dev, &xdp); 6911 - if (err < 0) 6912 - return err; 6913 - if (xdp.prog_attached) 6914 - return -EBUSY; 6915 - } 6916 6917 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 6918 if (IS_ERR(prog)) 6919 return PTR_ERR(prog); 6920 } 6921 6922 - memset(&xdp, 0, sizeof(xdp)); 6923 - xdp.command = XDP_SETUP_PROG; 6924 - xdp.extack = extack; 6925 - xdp.prog = prog; 6926 - 6927 - err = xdp_op(dev, &xdp); 6928 if (err < 0 && prog) 6929 bpf_prog_put(prog); 6930
··· 6852 } 6853 EXPORT_SYMBOL(dev_change_proto_down); 6854 6855 + bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op) 6856 + { 6857 + struct netdev_xdp xdp; 6858 + 6859 + memset(&xdp, 0, sizeof(xdp)); 6860 + xdp.command = XDP_QUERY_PROG; 6861 + 6862 + /* Query must always succeed. */ 6863 + WARN_ON(xdp_op(dev, &xdp) < 0); 6864 + return xdp.prog_attached; 6865 + } 6866 + 6867 + static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op, 6868 + struct netlink_ext_ack *extack, 6869 + struct bpf_prog *prog) 6870 + { 6871 + struct netdev_xdp xdp; 6872 + 6873 + memset(&xdp, 0, sizeof(xdp)); 6874 + xdp.command = XDP_SETUP_PROG; 6875 + xdp.extack = extack; 6876 + xdp.prog = prog; 6877 + 6878 + return xdp_op(dev, &xdp); 6879 + } 6880 + 6881 /** 6882 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 6883 * @dev: device ··· 6864 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 6865 int fd, u32 flags) 6866 { 6867 const struct net_device_ops *ops = dev->netdev_ops; 6868 struct bpf_prog *prog = NULL; 6869 + xdp_op_t xdp_op, xdp_chk; 6870 int err; 6871 6872 ASSERT_RTNL(); 6873 6874 + xdp_op = xdp_chk = ops->ndo_xdp; 6875 + if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE)) 6876 + return -EOPNOTSUPP; 6877 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 6878 xdp_op = generic_xdp_install; 6879 + if (xdp_op == xdp_chk) 6880 + xdp_chk = generic_xdp_install; 6881 6882 if (fd >= 0) { 6883 + if (xdp_chk && __dev_xdp_attached(dev, xdp_chk)) 6884 + return -EEXIST; 6885 + if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && 6886 + __dev_xdp_attached(dev, xdp_op)) 6887 + return -EBUSY; 6888 6889 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 6890 if (IS_ERR(prog)) 6891 return PTR_ERR(prog); 6892 } 6893 6894 + err = dev_xdp_install(dev, xdp_op, extack, prog); 6895 if (err < 0 && prog) 6896 bpf_prog_put(prog); 6897
+10 -4
net/core/neighbour.c
··· 1132 lladdr = neigh->ha; 1133 } 1134 1135 - if (new & NUD_CONNECTED) 1136 - neigh->confirmed = jiffies; 1137 - neigh->updated = jiffies; 1138 - 1139 /* If entry was valid and address is not changed, 1140 do not change entry state, if new one is STALE. 1141 */ ··· 1151 !(flags & NEIGH_UPDATE_F_ADMIN)) 1152 new = old; 1153 } 1154 } 1155 1156 if (new != old) {
··· 1132 lladdr = neigh->ha; 1133 } 1134 1135 /* If entry was valid and address is not changed, 1136 do not change entry state, if new one is STALE. 1137 */ ··· 1155 !(flags & NEIGH_UPDATE_F_ADMIN)) 1156 new = old; 1157 } 1158 + } 1159 + 1160 + /* Update timestamps only once we know we will make a change to the 1161 + * neighbour entry. Otherwise we risk to move the locktime window with 1162 + * noop updates and ignore relevant ARP updates. 1163 + */ 1164 + if (new != old || lladdr != neigh->ha) { 1165 + if (new & NUD_CONNECTED) 1166 + neigh->confirmed = jiffies; 1167 + neigh->updated = jiffies; 1168 } 1169 1170 if (new != old) {
+46 -33
net/core/rtnetlink.c
··· 899 static size_t rtnl_xdp_size(void) 900 { 901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 902 - nla_total_size(1) + /* XDP_ATTACHED */ 903 - nla_total_size(4); /* XDP_FLAGS */ 904 905 return xdp_size; 906 } ··· 1246 return 0; 1247 } 1248 1249 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1250 { 1251 struct nlattr *xdp; 1252 - u32 xdp_flags = 0; 1253 - u8 val = 0; 1254 int err; 1255 1256 xdp = nla_nest_start(skb, IFLA_XDP); 1257 if (!xdp) 1258 return -EMSGSIZE; 1259 - if (rcu_access_pointer(dev->xdp_prog)) { 1260 - xdp_flags = XDP_FLAGS_SKB_MODE; 1261 - val = 1; 1262 - } else if (dev->netdev_ops->ndo_xdp) { 1263 - struct netdev_xdp xdp_op = {}; 1264 1265 - xdp_op.command = XDP_QUERY_PROG; 1266 - err = dev->netdev_ops->ndo_xdp(dev, &xdp_op); 1267 - if (err) 1268 - goto err_cancel; 1269 - val = xdp_op.prog_attached; 1270 - } 1271 - err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val); 1272 if (err) 1273 goto err_cancel; 1274 1275 - if (xdp_flags) { 1276 - err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags); 1277 - if (err) 1278 - goto err_cancel; 1279 - } 1280 nla_nest_end(skb, xdp); 1281 return 0; 1282 ··· 1627 cb->nlh->nlmsg_seq, 0, 1628 flags, 1629 ext_filter_mask); 1630 - /* If we ran out of room on the first message, 1631 - * we're in trouble 1632 - */ 1633 - WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 1634 1635 - if (err < 0) 1636 - goto out; 1637 1638 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1639 cont: ··· 1641 } 1642 } 1643 out: 1644 cb->args[1] = idx; 1645 cb->args[0] = h; 1646 1647 - return skb->len; 1648 } 1649 1650 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, ··· 2194 if (xdp[IFLA_XDP_FLAGS]) { 2195 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 2196 if (xdp_flags & ~XDP_FLAGS_MASK) { 2197 err = -EINVAL; 2198 goto errout; 2199 } ··· 3455 err = br_dev->netdev_ops->ndo_bridge_getlink( 3456 skb, portid, seq, dev, 3457 filter_mask, NLM_F_MULTI); 3458 - if (err < 0 && err != -EOPNOTSUPP) 3459 - break; 3460 } 3461 idx++; 3462 } ··· 3471 seq, dev, 3472 filter_mask, 3473 NLM_F_MULTI); 3474 - if (err < 0 && err != -EOPNOTSUPP) 3475 - break; 3476 } 3477 idx++; 3478 } 3479 } 3480 rcu_read_unlock(); 3481 cb->args[0] = idx; 3482 3483 - return skb->len; 3484 } 3485 3486 static inline size_t bridge_nlmsg_size(void)
··· 899 static size_t rtnl_xdp_size(void) 900 { 901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 902 + nla_total_size(1); /* XDP_ATTACHED */ 903 904 return xdp_size; 905 } ··· 1247 return 0; 1248 } 1249 1250 + static u8 rtnl_xdp_attached_mode(struct net_device *dev) 1251 + { 1252 + const struct net_device_ops *ops = dev->netdev_ops; 1253 + 1254 + ASSERT_RTNL(); 1255 + 1256 + if (rcu_access_pointer(dev->xdp_prog)) 1257 + return XDP_ATTACHED_SKB; 1258 + if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp)) 1259 + return XDP_ATTACHED_DRV; 1260 + 1261 + return XDP_ATTACHED_NONE; 1262 + } 1263 + 1264 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1265 { 1266 struct nlattr *xdp; 1267 int err; 1268 1269 xdp = nla_nest_start(skb, IFLA_XDP); 1270 if (!xdp) 1271 return -EMSGSIZE; 1272 1273 + err = nla_put_u8(skb, IFLA_XDP_ATTACHED, 1274 + rtnl_xdp_attached_mode(dev)); 1275 if (err) 1276 goto err_cancel; 1277 1278 nla_nest_end(skb, xdp); 1279 return 0; 1280 ··· 1631 cb->nlh->nlmsg_seq, 0, 1632 flags, 1633 ext_filter_mask); 1634 1635 + if (err < 0) { 1636 + if (likely(skb->len)) 1637 + goto out; 1638 + 1639 + goto out_err; 1640 + } 1641 1642 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1643 cont: ··· 1645 } 1646 } 1647 out: 1648 + err = skb->len; 1649 + out_err: 1650 cb->args[1] = idx; 1651 cb->args[0] = h; 1652 1653 + return err; 1654 } 1655 1656 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, ··· 2196 if (xdp[IFLA_XDP_FLAGS]) { 2197 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 2198 if (xdp_flags & ~XDP_FLAGS_MASK) { 2199 + err = -EINVAL; 2200 + goto errout; 2201 + } 2202 + if ((xdp_flags & XDP_FLAGS_SKB_MODE) && 2203 + (xdp_flags & XDP_FLAGS_DRV_MODE)) { 2204 err = -EINVAL; 2205 goto errout; 2206 } ··· 3452 err = br_dev->netdev_ops->ndo_bridge_getlink( 3453 skb, portid, seq, dev, 3454 filter_mask, NLM_F_MULTI); 3455 + if (err < 0 && err != -EOPNOTSUPP) { 3456 + if (likely(skb->len)) 3457 + break; 3458 + 3459 + goto out_err; 3460 + } 3461 } 3462 idx++; 3463 } ··· 3464 seq, dev, 3465 filter_mask, 3466 NLM_F_MULTI); 3467 + if (err < 0 && err != -EOPNOTSUPP) { 3468 + if (likely(skb->len)) 3469 + break; 3470 + 3471 + goto out_err; 3472 + } 3473 } 3474 idx++; 3475 } 3476 } 3477 + err = skb->len; 3478 + out_err: 3479 rcu_read_unlock(); 3480 cb->args[0] = idx; 3481 3482 + return err; 3483 } 3484 3485 static inline size_t bridge_nlmsg_size(void)
+8 -15
net/core/sock.c
··· 139 140 #include <trace/events/sock.h> 141 142 - #ifdef CONFIG_INET 143 #include <net/tcp.h> 144 - #endif 145 - 146 #include <net/busy_poll.h> 147 148 static DEFINE_MUTEX(proto_list_mutex); ··· 1800 * delay queue. We want to allow the owner socket to send more 1801 * packets, as if they were already TX completed by a typical driver. 1802 * But we also want to keep skb->sk set because some packet schedulers 1803 - * rely on it (sch_fq for example). So we set skb->truesize to a small 1804 - * amount (1) and decrease sk_wmem_alloc accordingly. 1805 */ 1806 void skb_orphan_partial(struct sk_buff *skb) 1807 { 1808 - /* If this skb is a TCP pure ACK or already went here, 1809 - * we have nothing to do. 2 is already a very small truesize. 1810 - */ 1811 - if (skb->truesize <= 2) 1812 return; 1813 1814 - /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, 1815 - * so we do not completely orphan skb, but transfert all 1816 - * accounted bytes but one, to avoid unexpected reorders. 1817 - */ 1818 if (skb->destructor == sock_wfree 1819 #ifdef CONFIG_INET 1820 || skb->destructor == tcp_wfree 1821 #endif 1822 ) { 1823 - atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1824 - skb->truesize = 1; 1825 } else { 1826 skb_orphan(skb); 1827 }
··· 139 140 #include <trace/events/sock.h> 141 142 #include <net/tcp.h> 143 #include <net/busy_poll.h> 144 145 static DEFINE_MUTEX(proto_list_mutex); ··· 1803 * delay queue. We want to allow the owner socket to send more 1804 * packets, as if they were already TX completed by a typical driver. 1805 * But we also want to keep skb->sk set because some packet schedulers 1806 + * rely on it (sch_fq for example). 1807 */ 1808 void skb_orphan_partial(struct sk_buff *skb) 1809 { 1810 + if (skb_is_tcp_pure_ack(skb)) 1811 return; 1812 1813 if (skb->destructor == sock_wfree 1814 #ifdef CONFIG_INET 1815 || skb->destructor == tcp_wfree 1816 #endif 1817 ) { 1818 + struct sock *sk = skb->sk; 1819 + 1820 + if (atomic_inc_not_zero(&sk->sk_refcnt)) { 1821 + atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 1822 + skb->destructor = sock_efree; 1823 + } 1824 } else { 1825 skb_orphan(skb); 1826 }
+6
net/dccp/ipv6.c
··· 426 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 427 newnp->pktoptions = NULL; 428 newnp->opt = NULL; 429 newnp->mcast_oif = inet6_iif(skb); 430 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 431 ··· 493 /* Clone RX bits */ 494 newnp->rxopt.all = np->rxopt.all; 495 496 newnp->pktoptions = NULL; 497 newnp->opt = NULL; 498 newnp->mcast_oif = inet6_iif(skb);
··· 426 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 427 newnp->pktoptions = NULL; 428 newnp->opt = NULL; 429 + newnp->ipv6_mc_list = NULL; 430 + newnp->ipv6_ac_list = NULL; 431 + newnp->ipv6_fl_list = NULL; 432 newnp->mcast_oif = inet6_iif(skb); 433 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 434 ··· 490 /* Clone RX bits */ 491 newnp->rxopt.all = np->rxopt.all; 492 493 + newnp->ipv6_mc_list = NULL; 494 + newnp->ipv6_ac_list = NULL; 495 + newnp->ipv6_fl_list = NULL; 496 newnp->pktoptions = NULL; 497 newnp->opt = NULL; 498 newnp->mcast_oif = inet6_iif(skb);
+14 -2
net/ipv4/arp.c
··· 653 unsigned char *arp_ptr; 654 struct rtable *rt; 655 unsigned char *sha; 656 __be32 sip, tip; 657 u16 dev_type = dev->type; 658 int addr_type; ··· 725 break; 726 #endif 727 default: 728 arp_ptr += dev->addr_len; 729 } 730 memcpy(&tip, arp_ptr, 4); ··· 844 It is possible, that this option should be enabled for some 845 devices (strip is candidate) 846 */ 847 - is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && 848 - addr_type == RTN_UNICAST; 849 850 if (!n && 851 ((arp->ar_op == htons(ARPOP_REPLY) &&
··· 653 unsigned char *arp_ptr; 654 struct rtable *rt; 655 unsigned char *sha; 656 + unsigned char *tha = NULL; 657 __be32 sip, tip; 658 u16 dev_type = dev->type; 659 int addr_type; ··· 724 break; 725 #endif 726 default: 727 + tha = arp_ptr; 728 arp_ptr += dev->addr_len; 729 } 730 memcpy(&tip, arp_ptr, 4); ··· 842 It is possible, that this option should be enabled for some 843 devices (strip is candidate) 844 */ 845 + is_garp = tip == sip && addr_type == RTN_UNICAST; 846 + 847 + /* Unsolicited ARP _replies_ also require target hwaddr to be 848 + * the same as source. 849 + */ 850 + if (is_garp && arp->ar_op == htons(ARPOP_REPLY)) 851 + is_garp = 852 + /* IPv4 over IEEE 1394 doesn't provide target 853 + * hardware address field in its ARP payload. 854 + */ 855 + tha && 856 + !memcmp(tha, sha, dev->addr_len); 857 858 if (!n && 859 ((arp->ar_op == htons(ARPOP_REPLY) &&
+11 -4
net/ipv4/fib_frontend.c
··· 763 unsigned int e = 0, s_e; 764 struct fib_table *tb; 765 struct hlist_head *head; 766 - int dumped = 0; 767 768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && 769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) ··· 783 if (dumped) 784 memset(&cb->args[2], 0, sizeof(cb->args) - 785 2 * sizeof(cb->args[0])); 786 - if (fib_table_dump(tb, skb, cb) < 0) 787 - goto out; 788 dumped = 1; 789 next: 790 e++; 791 } 792 } 793 out: 794 rcu_read_unlock(); 795 796 cb->args[1] = e; 797 cb->args[0] = h; 798 799 - return skb->len; 800 } 801 802 /* Prepare and feed intra-kernel routing request.
··· 763 unsigned int e = 0, s_e; 764 struct fib_table *tb; 765 struct hlist_head *head; 766 + int dumped = 0, err; 767 768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && 769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) ··· 783 if (dumped) 784 memset(&cb->args[2], 0, sizeof(cb->args) - 785 2 * sizeof(cb->args[0])); 786 + err = fib_table_dump(tb, skb, cb); 787 + if (err < 0) { 788 + if (likely(skb->len)) 789 + goto out; 790 + 791 + goto out_err; 792 + } 793 dumped = 1; 794 next: 795 e++; 796 } 797 } 798 out: 799 + err = skb->len; 800 + out_err: 801 rcu_read_unlock(); 802 803 cb->args[1] = e; 804 cb->args[0] = h; 805 806 + return err; 807 } 808 809 /* Prepare and feed intra-kernel routing request.
+14 -12
net/ipv4/fib_trie.c
··· 1983 1984 /* rcu_read_lock is hold by caller */ 1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { 1986 if (i < s_i) { 1987 i++; 1988 continue; ··· 1995 continue; 1996 } 1997 1998 - if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, 1999 - cb->nlh->nlmsg_seq, 2000 - RTM_NEWROUTE, 2001 - tb->tb_id, 2002 - fa->fa_type, 2003 - xkey, 2004 - KEYLENGTH - fa->fa_slen, 2005 - fa->fa_tos, 2006 - fa->fa_info, NLM_F_MULTI) < 0) { 2007 cb->args[4] = i; 2008 - return -1; 2009 } 2010 i++; 2011 } ··· 2024 t_key key = cb->args[3]; 2025 2026 while ((l = leaf_walk_rcu(&tp, key)) != NULL) { 2027 - if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { 2028 cb->args[3] = key; 2029 cb->args[2] = count; 2030 - return -1; 2031 } 2032 2033 ++count;
··· 1983 1984 /* rcu_read_lock is hold by caller */ 1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { 1986 + int err; 1987 + 1988 if (i < s_i) { 1989 i++; 1990 continue; ··· 1993 continue; 1994 } 1995 1996 + err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, 1997 + cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1998 + tb->tb_id, fa->fa_type, 1999 + xkey, KEYLENGTH - fa->fa_slen, 2000 + fa->fa_tos, fa->fa_info, NLM_F_MULTI); 2001 + if (err < 0) { 2002 cb->args[4] = i; 2003 + return err; 2004 } 2005 i++; 2006 } ··· 2025 t_key key = cb->args[3]; 2026 2027 while ((l = leaf_walk_rcu(&tp, key)) != NULL) { 2028 + int err; 2029 + 2030 + err = fn_trie_dump_leaf(l, tb, skb, cb); 2031 + if (err < 0) { 2032 cb->args[3] = key; 2033 cb->args[2] = count; 2034 + return err; 2035 } 2036 2037 ++count;
+16 -2
net/ipv4/ipmr.c
··· 1980 struct net *net = dev_net(skb->dev); 1981 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1982 struct mr_table *mrt; 1983 1984 /* Packet is looped back after forward, it should not be 1985 * forwarded second time, but still can be delivered locally. ··· 2031 /* already under rcu_read_lock() */ 2032 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 2033 if (!cache) { 2034 - int vif = ipmr_find_vif(mrt, skb->dev); 2035 2036 if (vif >= 0) 2037 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, ··· 2051 } 2052 2053 read_lock(&mrt_lock); 2054 - vif = ipmr_find_vif(mrt, skb->dev); 2055 if (vif >= 0) { 2056 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 2057 read_unlock(&mrt_lock);
··· 1980 struct net *net = dev_net(skb->dev); 1981 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1982 struct mr_table *mrt; 1983 + struct net_device *dev; 1984 + 1985 + /* skb->dev passed in is the loX master dev for vrfs. 1986 + * As there are no vifs associated with loopback devices, 1987 + * get the proper interface that does have a vif associated with it. 1988 + */ 1989 + dev = skb->dev; 1990 + if (netif_is_l3_master(skb->dev)) { 1991 + dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); 1992 + if (!dev) { 1993 + kfree_skb(skb); 1994 + return -ENODEV; 1995 + } 1996 + } 1997 1998 /* Packet is looped back after forward, it should not be 1999 * forwarded second time, but still can be delivered locally. ··· 2017 /* already under rcu_read_lock() */ 2018 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 2019 if (!cache) { 2020 + int vif = ipmr_find_vif(mrt, dev); 2021 2022 if (vif >= 0) 2023 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, ··· 2037 } 2038 2039 read_lock(&mrt_lock); 2040 + vif = ipmr_find_vif(mrt, dev); 2041 if (vif >= 0) { 2042 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 2043 read_unlock(&mrt_lock);
+6 -5
net/ipv4/tcp_input.c
··· 1179 */ 1180 if (pkt_len > mss) { 1181 unsigned int new_len = (pkt_len / mss) * mss; 1182 - if (!in_sack && new_len < pkt_len) { 1183 new_len += mss; 1184 - if (new_len >= skb->len) 1185 - return 0; 1186 - } 1187 pkt_len = new_len; 1188 } 1189 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); 1190 if (err < 0) 1191 return err; ··· 3190 int delta; 3191 3192 /* Non-retransmitted hole got filled? That's reordering */ 3193 - if (reord < prior_fackets) 3194 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3195 3196 delta = tcp_is_fack(tp) ? pkts_acked :
··· 1179 */ 1180 if (pkt_len > mss) { 1181 unsigned int new_len = (pkt_len / mss) * mss; 1182 + if (!in_sack && new_len < pkt_len) 1183 new_len += mss; 1184 pkt_len = new_len; 1185 } 1186 + 1187 + if (pkt_len >= skb->len && !in_sack) 1188 + return 0; 1189 + 1190 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); 1191 if (err < 0) 1192 return err; ··· 3189 int delta; 3190 3191 /* Non-retransmitted hole got filled? That's reordering */ 3192 + if (reord < prior_fackets && reord <= tp->fackets_out) 3193 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3194 3195 delta = tcp_is_fack(tp) ? pkts_acked :
+2 -2
net/ipv4/udp.c
··· 1612 udp_lib_rehash(sk, new_hash); 1613 } 1614 1615 - int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1616 { 1617 int rc; 1618 ··· 1657 * Note that in the success and error cases, the skb is assumed to 1658 * have either been requeued or freed. 1659 */ 1660 - int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1661 { 1662 struct udp_sock *up = udp_sk(sk); 1663 int is_udplite = IS_UDPLITE(sk);
··· 1612 udp_lib_rehash(sk, new_hash); 1613 } 1614 1615 + static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1616 { 1617 int rc; 1618 ··· 1657 * Note that in the success and error cases, the skb is assumed to 1658 * have either been requeued or freed. 1659 */ 1660 + static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1661 { 1662 struct udp_sock *up = udp_sk(sk); 1663 int is_udplite = IS_UDPLITE(sk);
-1
net/ipv4/udp_impl.h
··· 25 int flags, int *addr_len); 26 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 27 int flags); 28 - int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 29 void udp_destroy_sock(struct sock *sk); 30 31 #ifdef CONFIG_PROC_FS
··· 25 int flags, int *addr_len); 26 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 27 int flags); 28 void udp_destroy_sock(struct sock *sk); 29 30 #ifdef CONFIG_PROC_FS
+4 -1
net/ipv6/addrconf.c
··· 1022 INIT_HLIST_NODE(&ifa->addr_lst); 1023 ifa->scope = scope; 1024 ifa->prefix_len = pfxlen; 1025 - ifa->flags = flags | IFA_F_TENTATIVE; 1026 ifa->valid_lft = valid_lft; 1027 ifa->prefered_lft = prefered_lft; 1028 ifa->cstamp = ifa->tstamp = jiffies;
··· 1022 INIT_HLIST_NODE(&ifa->addr_lst); 1023 ifa->scope = scope; 1024 ifa->prefix_len = pfxlen; 1025 + ifa->flags = flags; 1026 + /* No need to add the TENTATIVE flag for addresses with NODAD */ 1027 + if (!(flags & IFA_F_NODAD)) 1028 + ifa->flags |= IFA_F_TENTATIVE; 1029 ifa->valid_lft = valid_lft; 1030 ifa->prefered_lft = prefered_lft; 1031 ifa->cstamp = ifa->tstamp = jiffies;
+4 -3
net/ipv6/ip6_offload.c
··· 63 const struct net_offload *ops; 64 int proto; 65 struct frag_hdr *fptr; 66 - unsigned int unfrag_ip6hlen; 67 unsigned int payload_len; 68 u8 *prevhdr; 69 int offset = 0; ··· 115 skb->network_header = (u8 *)ipv6h - skb->head; 116 117 if (udpfrag) { 118 - unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 119 - fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); 120 fptr->frag_off = htons(offset); 121 if (skb->next) 122 fptr->frag_off |= htons(IP6_MF);
··· 63 const struct net_offload *ops; 64 int proto; 65 struct frag_hdr *fptr; 66 unsigned int payload_len; 67 u8 *prevhdr; 68 int offset = 0; ··· 116 skb->network_header = (u8 *)ipv6h - skb->head; 117 118 if (udpfrag) { 119 + int err = ip6_find_1stfragopt(skb, &prevhdr); 120 + if (err < 0) 121 + return ERR_PTR(err); 122 + fptr = (struct frag_hdr *)((u8 *)ipv6h + err); 123 fptr->frag_off = htons(offset); 124 if (skb->next) 125 fptr->frag_off |= htons(IP6_MF);
+4 -1
net/ipv6/ip6_output.c
··· 597 int ptr, offset = 0, err = 0; 598 u8 *prevhdr, nexthdr = 0; 599 600 - hlen = ip6_find_1stfragopt(skb, &prevhdr); 601 nexthdr = *prevhdr; 602 603 mtu = ip6_skb_dst_mtu(skb);
··· 597 int ptr, offset = 0, err = 0; 598 u8 *prevhdr, nexthdr = 0; 599 600 + err = ip6_find_1stfragopt(skb, &prevhdr); 601 + if (err < 0) 602 + goto fail; 603 + hlen = err; 604 nexthdr = *prevhdr; 605 606 mtu = ip6_skb_dst_mtu(skb);
+8 -6
net/ipv6/output_core.c
··· 79 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 80 { 81 u16 offset = sizeof(struct ipv6hdr); 82 - struct ipv6_opt_hdr *exthdr = 83 - (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); 84 unsigned int packet_len = skb_tail_pointer(skb) - 85 skb_network_header(skb); 86 int found_rhdr = 0; 87 *nexthdr = &ipv6_hdr(skb)->nexthdr; 88 89 - while (offset + 1 <= packet_len) { 90 91 switch (**nexthdr) { 92 ··· 106 return offset; 107 } 108 109 - offset += ipv6_optlen(exthdr); 110 - *nexthdr = &exthdr->nexthdr; 111 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 112 offset); 113 } 114 115 - return offset; 116 } 117 EXPORT_SYMBOL(ip6_find_1stfragopt); 118
··· 79 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 80 { 81 u16 offset = sizeof(struct ipv6hdr); 82 unsigned int packet_len = skb_tail_pointer(skb) - 83 skb_network_header(skb); 84 int found_rhdr = 0; 85 *nexthdr = &ipv6_hdr(skb)->nexthdr; 86 87 + while (offset <= packet_len) { 88 + struct ipv6_opt_hdr *exthdr; 89 90 switch (**nexthdr) { 91 ··· 107 return offset; 108 } 109 110 + if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) 111 + return -EINVAL; 112 + 113 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 114 offset); 115 + offset += ipv6_optlen(exthdr); 116 + *nexthdr = &exthdr->nexthdr; 117 } 118 119 + return -EINVAL; 120 } 121 EXPORT_SYMBOL(ip6_find_1stfragopt); 122
+2
net/ipv6/tcp_ipv6.c
··· 1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1063 #endif 1064 1065 newnp->ipv6_ac_list = NULL; 1066 newnp->ipv6_fl_list = NULL; 1067 newnp->pktoptions = NULL; ··· 1132 First: no IPv4 options. 1133 */ 1134 newinet->inet_opt = NULL; 1135 newnp->ipv6_ac_list = NULL; 1136 newnp->ipv6_fl_list = NULL; 1137
··· 1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1063 #endif 1064 1065 + newnp->ipv6_mc_list = NULL; 1066 newnp->ipv6_ac_list = NULL; 1067 newnp->ipv6_fl_list = NULL; 1068 newnp->pktoptions = NULL; ··· 1131 First: no IPv4 options. 1132 */ 1133 newinet->inet_opt = NULL; 1134 + newnp->ipv6_mc_list = NULL; 1135 newnp->ipv6_ac_list = NULL; 1136 newnp->ipv6_fl_list = NULL; 1137
+2 -2
net/ipv6/udp.c
··· 526 return; 527 } 528 529 - int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 530 { 531 int rc; 532 ··· 569 } 570 EXPORT_SYMBOL(udpv6_encap_enable); 571 572 - int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 573 { 574 struct udp_sock *up = udp_sk(sk); 575 int is_udplite = IS_UDPLITE(sk);
··· 526 return; 527 } 528 529 + static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 530 { 531 int rc; 532 ··· 569 } 570 EXPORT_SYMBOL(udpv6_encap_enable); 571 572 + static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 573 { 574 struct udp_sock *up = udp_sk(sk); 575 int is_udplite = IS_UDPLITE(sk);
-1
net/ipv6/udp_impl.h
··· 26 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 27 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 28 int flags, int *addr_len); 29 - int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 30 void udpv6_destroy_sock(struct sock *sk); 31 32 #ifdef CONFIG_PROC_FS
··· 26 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 27 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 28 int flags, int *addr_len); 29 void udpv6_destroy_sock(struct sock *sk); 30 31 #ifdef CONFIG_PROC_FS
+5 -1
net/ipv6/udp_offload.c
··· 29 u8 frag_hdr_sz = sizeof(struct frag_hdr); 30 __wsum csum; 31 int tnl_hlen; 32 33 mss = skb_shinfo(skb)->gso_size; 34 if (unlikely(skb->len <= mss)) ··· 91 /* Find the unfragmentable header and shift it left by frag_hdr_sz 92 * bytes to insert fragment header. 93 */ 94 - unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 95 nexthdr = *prevhdr; 96 *prevhdr = NEXTHDR_FRAGMENT; 97 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
··· 29 u8 frag_hdr_sz = sizeof(struct frag_hdr); 30 __wsum csum; 31 int tnl_hlen; 32 + int err; 33 34 mss = skb_shinfo(skb)->gso_size; 35 if (unlikely(skb->len <= mss)) ··· 90 /* Find the unfragmentable header and shift it left by frag_hdr_sz 91 * bytes to insert fragment header. 92 */ 93 + err = ip6_find_1stfragopt(skb, &prevhdr); 94 + if (err < 0) 95 + return ERR_PTR(err); 96 + unfrag_ip6hlen = err; 97 nexthdr = *prevhdr; 98 *prevhdr = NEXTHDR_FRAGMENT; 99 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+7 -7
net/packet/af_packet.c
··· 2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2659 } 2660 2661 - sockc.tsflags = po->sk.sk_tsflags; 2662 - if (msg->msg_controllen) { 2663 - err = sock_cmsg_send(&po->sk, msg, &sockc); 2664 - if (unlikely(err)) 2665 - goto out; 2666 - } 2667 - 2668 err = -ENXIO; 2669 if (unlikely(dev == NULL)) 2670 goto out; 2671 err = -ENETDOWN; 2672 if (unlikely(!(dev->flags & IFF_UP))) 2673 goto out_put; 2674 2675 if (po->sk.sk_socket->type == SOCK_RAW) 2676 reserve = dev->hard_header_len;
··· 2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2659 } 2660 2661 err = -ENXIO; 2662 if (unlikely(dev == NULL)) 2663 goto out; 2664 err = -ENETDOWN; 2665 if (unlikely(!(dev->flags & IFF_UP))) 2666 goto out_put; 2667 + 2668 + sockc.tsflags = po->sk.sk_tsflags; 2669 + if (msg->msg_controllen) { 2670 + err = sock_cmsg_send(&po->sk, msg, &sockc); 2671 + if (unlikely(err)) 2672 + goto out_put; 2673 + } 2674 2675 if (po->sk.sk_socket->type == SOCK_RAW) 2676 reserve = dev->hard_header_len;
+6
net/sched/sch_api.c
··· 1831 if (!qdisc_dev(root)) 1832 return 0; 1833 1834 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1835 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 1836 return -1;
··· 1831 if (!qdisc_dev(root)) 1832 return 0; 1833 1834 + if (tcm->tcm_parent) { 1835 + q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); 1836 + if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 1837 + return -1; 1838 + return 0; 1839 + } 1840 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1841 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 1842 return -1;
+32 -17
net/sctp/ipv6.c
··· 240 struct sctp_bind_addr *bp; 241 struct ipv6_pinfo *np = inet6_sk(sk); 242 struct sctp_sockaddr_entry *laddr; 243 - union sctp_addr *baddr = NULL; 244 union sctp_addr *daddr = &t->ipaddr; 245 union sctp_addr dst_saddr; 246 struct in6_addr *final_p, final; 247 __u8 matchlen = 0; 248 - __u8 bmatchlen; 249 sctp_scope_t scope; 250 251 memset(fl6, 0, sizeof(struct flowi6)); ··· 310 */ 311 rcu_read_lock(); 312 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 313 - if (!laddr->valid) 314 continue; 315 - if ((laddr->state == SCTP_ADDR_SRC) && 316 - (laddr->a.sa.sa_family == AF_INET6) && 317 - (scope <= sctp_scope(&laddr->a))) { 318 - bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 319 - if (!baddr || (matchlen < bmatchlen)) { 320 - baddr = &laddr->a; 321 - matchlen = bmatchlen; 322 - } 323 - } 324 - } 325 - if (baddr) { 326 - fl6->saddr = baddr->v6.sin6_addr; 327 - fl6->fl6_sport = baddr->v6.sin6_port; 328 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 329 - dst = ip6_dst_lookup_flow(sk, fl6, final_p); 330 } 331 rcu_read_unlock(); 332 ··· 677 newnp = inet6_sk(newsk); 678 679 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 680 681 rcu_read_lock(); 682 opt = rcu_dereference(np->opt);
··· 240 struct sctp_bind_addr *bp; 241 struct ipv6_pinfo *np = inet6_sk(sk); 242 struct sctp_sockaddr_entry *laddr; 243 union sctp_addr *daddr = &t->ipaddr; 244 union sctp_addr dst_saddr; 245 struct in6_addr *final_p, final; 246 __u8 matchlen = 0; 247 sctp_scope_t scope; 248 249 memset(fl6, 0, sizeof(struct flowi6)); ··· 312 */ 313 rcu_read_lock(); 314 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 315 + struct dst_entry *bdst; 316 + __u8 bmatchlen; 317 + 318 + if (!laddr->valid || 319 + laddr->state != SCTP_ADDR_SRC || 320 + laddr->a.sa.sa_family != AF_INET6 || 321 + scope > sctp_scope(&laddr->a)) 322 continue; 323 + 324 + fl6->saddr = laddr->a.v6.sin6_addr; 325 + fl6->fl6_sport = laddr->a.v6.sin6_port; 326 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 327 + bdst = ip6_dst_lookup_flow(sk, fl6, final_p); 328 + 329 + if (!IS_ERR(bdst) && 330 + ipv6_chk_addr(dev_net(bdst->dev), 331 + &laddr->a.v6.sin6_addr, bdst->dev, 1)) { 332 + if (!IS_ERR_OR_NULL(dst)) 333 + dst_release(dst); 334 + dst = bdst; 335 + break; 336 + } 337 + 338 + bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 339 + if (matchlen > bmatchlen) 340 + continue; 341 + 342 + if (!IS_ERR_OR_NULL(dst)) 343 + dst_release(dst); 344 + dst = bdst; 345 + matchlen = bmatchlen; 346 } 347 rcu_read_unlock(); 348 ··· 665 newnp = inet6_sk(newsk); 666 667 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 668 + newnp->ipv6_mc_list = NULL; 669 + newnp->ipv6_ac_list = NULL; 670 + newnp->ipv6_fl_list = NULL; 671 672 rcu_read_lock(); 673 opt = rcu_dereference(np->opt);
+4
net/smc/Kconfig
··· 8 The Linux implementation of the SMC-R solution is designed as 9 a separate socket family SMC. 10 11 Select this option if you want to run SMC socket applications 12 13 config SMC_DIAG
··· 8 The Linux implementation of the SMC-R solution is designed as 9 a separate socket family SMC. 10 11 + Warning: SMC will expose all memory for remote reads and writes 12 + once a connection is established. Don't enable this option except 13 + for tightly controlled lab environment. 14 + 15 Select this option if you want to run SMC socket applications 16 17 config SMC_DIAG
+2 -2
net/smc/smc_clc.c
··· 204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 205 hton24(cclc.qpn, link->roce_qp->qp_num); 206 cclc.rmb_rkey = 207 - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ 209 cclc.rmbe_alert_token = htonl(conn->alert_token_local); 210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); ··· 256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 257 hton24(aclc.qpn, link->roce_qp->qp_num); 258 aclc.rmb_rkey = 259 - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ 261 aclc.rmbe_alert_token = htonl(conn->alert_token_local); 262 aclc.qp_mtu = link->path_mtu;
··· 204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 205 hton24(cclc.qpn, link->roce_qp->qp_num); 206 cclc.rmb_rkey = 207 + htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]); 208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ 209 cclc.rmbe_alert_token = htonl(conn->alert_token_local); 210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); ··· 256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 257 hton24(aclc.qpn, link->roce_qp->qp_num); 258 aclc.rmb_rkey = 259 + htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]); 260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ 261 aclc.rmbe_alert_token = htonl(conn->alert_token_local); 262 aclc.qp_mtu = link->path_mtu;
+3 -13
net/smc/smc_core.c
··· 613 rmb_desc = NULL; 614 continue; /* if mapping failed, try smaller one */ 615 } 616 - rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, 617 - IB_ACCESS_REMOTE_WRITE | 618 - IB_ACCESS_LOCAL_WRITE, 619 - &rmb_desc->mr_rx[SMC_SINGLE_LINK]); 620 - if (rc) { 621 - smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, 622 - tmp_bufsize, rmb_desc, 623 - DMA_FROM_DEVICE); 624 - kfree(rmb_desc->cpu_addr); 625 - kfree(rmb_desc); 626 - rmb_desc = NULL; 627 - continue; 628 - } 629 rmb_desc->used = 1; 630 write_lock_bh(&lgr->rmbs_lock); 631 list_add(&rmb_desc->list, ··· 657 658 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { 659 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && 660 test_bit(i, lgr->rtokens_used_mask)) { 661 conn->rtoken_idx = i; 662 return 0;
··· 613 rmb_desc = NULL; 614 continue; /* if mapping failed, try smaller one */ 615 } 616 + rmb_desc->rkey[SMC_SINGLE_LINK] = 617 + lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey; 618 rmb_desc->used = 1; 619 write_lock_bh(&lgr->rmbs_lock); 620 list_add(&rmb_desc->list, ··· 668 669 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { 670 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && 671 + (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) && 672 test_bit(i, lgr->rtokens_used_mask)) { 673 conn->rtoken_idx = i; 674 return 0;
+1 -1
net/smc/smc_core.h
··· 93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; 94 /* mapped address of buffer */ 95 void *cpu_addr; /* virtual address of buffer */ 96 - struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; 97 /* for rmb only: 98 * rkey provided to peer 99 */
··· 93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; 94 /* mapped address of buffer */ 95 void *cpu_addr; /* virtual address of buffer */ 96 + u32 rkey[SMC_LINKS_PER_LGR_MAX]; 97 /* for rmb only: 98 * rkey provided to peer 99 */
+2 -19
net/smc/smc_ib.c
··· 37 * identifier 38 */ 39 40 - int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, 41 - struct ib_mr **mr) 42 - { 43 - int rc; 44 - 45 - if (*mr) 46 - return 0; /* already done */ 47 - 48 - /* obtain unique key - 49 - * next invocation of get_dma_mr returns a different key! 50 - */ 51 - *mr = pd->device->get_dma_mr(pd, access_flags); 52 - rc = PTR_ERR_OR_ZERO(*mr); 53 - if (IS_ERR(*mr)) 54 - *mr = NULL; 55 - return rc; 56 - } 57 - 58 static int smc_ib_modify_qp_init(struct smc_link *lnk) 59 { 60 struct ib_qp_attr qp_attr; ··· 192 { 193 int rc; 194 195 - lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); 196 rc = PTR_ERR_OR_ZERO(lnk->roce_pd); 197 if (IS_ERR(lnk->roce_pd)) 198 lnk->roce_pd = NULL;
··· 37 * identifier 38 */ 39 40 static int smc_ib_modify_qp_init(struct smc_link *lnk) 41 { 42 struct ib_qp_attr qp_attr; ··· 210 { 211 int rc; 212 213 + lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 214 + IB_PD_UNSAFE_GLOBAL_RKEY); 215 rc = PTR_ERR_OR_ZERO(lnk->roce_pd); 216 if (IS_ERR(lnk->roce_pd)) 217 lnk->roce_pd = NULL;
-2
net/smc/smc_ib.h
··· 61 int smc_ib_create_protection_domain(struct smc_link *lnk); 62 void smc_ib_destroy_queue_pair(struct smc_link *lnk); 63 int smc_ib_create_queue_pair(struct smc_link *lnk); 64 - int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, 65 - struct ib_mr **mr); 66 int smc_ib_ready_link(struct smc_link *lnk); 67 int smc_ib_modify_qp_rts(struct smc_link *lnk); 68 int smc_ib_modify_qp_reset(struct smc_link *lnk);
··· 61 int smc_ib_create_protection_domain(struct smc_link *lnk); 62 void smc_ib_destroy_queue_pair(struct smc_link *lnk); 63 int smc_ib_create_queue_pair(struct smc_link *lnk); 64 int smc_ib_ready_link(struct smc_link *lnk); 65 int smc_ib_modify_qp_rts(struct smc_link *lnk); 66 int smc_ib_modify_qp_reset(struct smc_link *lnk);
+19 -19
net/tipc/socket.c
··· 362 return 0; 363 } 364 365 - #define tipc_wait_for_cond(sock_, timeout_, condition_) \ 366 - ({ \ 367 - int rc_ = 0; \ 368 - int done_ = 0; \ 369 - \ 370 - while (!(condition_) && !done_) { \ 371 - struct sock *sk_ = sock->sk; \ 372 - DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 373 - \ 374 - rc_ = tipc_sk_sock_err(sock_, timeout_); \ 375 - if (rc_) \ 376 - break; \ 377 - prepare_to_wait(sk_sleep(sk_), &wait_, \ 378 - TASK_INTERRUPTIBLE); \ 379 - done_ = sk_wait_event(sk_, timeout_, \ 380 - (condition_), &wait_); \ 381 - remove_wait_queue(sk_sleep(sk_), &wait_); \ 382 - } \ 383 - rc_; \ 384 }) 385 386 /**
··· 362 return 0; 363 } 364 365 + #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 366 + ({ \ 367 + struct sock *sk_; \ 368 + int rc_; \ 369 + \ 370 + while ((rc_ = !(condition_))) { \ 371 + DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 372 + sk_ = (sock_)->sk; \ 373 + rc_ = tipc_sk_sock_err((sock_), timeo_); \ 374 + if (rc_) \ 375 + break; \ 376 + prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 377 + release_sock(sk_); \ 378 + *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 379 + sched_annotate_sleep(); \ 380 + lock_sock(sk_); \ 381 + remove_wait_queue(sk_sleep(sk_), &wait_); \ 382 + } \ 383 + rc_; \ 384 }) 385 386 /**
+16 -8
net/x25/af_x25.c
··· 1791 1792 static int __init x25_init(void) 1793 { 1794 - int rc = proto_register(&x25_proto, 0); 1795 1796 - if (rc != 0) 1797 goto out; 1798 1799 rc = sock_register(&x25_family_ops); 1800 - if (rc != 0) 1801 goto out_proto; 1802 1803 dev_add_pack(&x25_packet_type); 1804 1805 rc = register_netdevice_notifier(&x25_dev_notifier); 1806 - if (rc != 0) 1807 goto out_sock; 1808 1809 pr_info("Linux Version 0.2\n"); 1810 1811 - x25_register_sysctl(); 1812 - rc = x25_proc_init(); 1813 - if (rc != 0) 1814 - goto out_dev; 1815 out: 1816 return rc; 1817 out_dev: 1818 unregister_netdevice_notifier(&x25_dev_notifier); 1819 out_sock: 1820 sock_unregister(AF_X25); 1821 out_proto: 1822 proto_unregister(&x25_proto);
··· 1791 1792 static int __init x25_init(void) 1793 { 1794 + int rc; 1795 1796 + rc = proto_register(&x25_proto, 0); 1797 + if (rc) 1798 goto out; 1799 1800 rc = sock_register(&x25_family_ops); 1801 + if (rc) 1802 goto out_proto; 1803 1804 dev_add_pack(&x25_packet_type); 1805 1806 rc = register_netdevice_notifier(&x25_dev_notifier); 1807 + if (rc) 1808 goto out_sock; 1809 + 1810 + rc = x25_register_sysctl(); 1811 + if (rc) 1812 + goto out_dev; 1813 + 1814 + rc = x25_proc_init(); 1815 + if (rc) 1816 + goto out_sysctl; 1817 1818 pr_info("Linux Version 0.2\n"); 1819 1820 out: 1821 return rc; 1822 + out_sysctl: 1823 + x25_unregister_sysctl(); 1824 out_dev: 1825 unregister_netdevice_notifier(&x25_dev_notifier); 1826 out_sock: 1827 + dev_remove_pack(&x25_packet_type); 1828 sock_unregister(AF_X25); 1829 out_proto: 1830 proto_unregister(&x25_proto);
+4 -1
net/x25/sysctl_net_x25.c
··· 73 { }, 74 }; 75 76 - void __init x25_register_sysctl(void) 77 { 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); 79 } 80 81 void x25_unregister_sysctl(void)
··· 73 { }, 74 }; 75 76 + int __init x25_register_sysctl(void) 77 { 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); 79 + if (!x25_table_header) 80 + return -ENOMEM; 81 + return 0; 82 } 83 84 void x25_unregister_sysctl(void)
+3 -1
samples/bpf/cookie_uid_helper_example.c
··· 306 prog_attach_iptables(argv[2]); 307 if (cfg_test_traffic) { 308 if (signal(SIGINT, finish) == SIG_ERR) 309 - error(1, errno, "register handler failed"); 310 while (!test_finish) { 311 print_table(); 312 printf("\n");
··· 306 prog_attach_iptables(argv[2]); 307 if (cfg_test_traffic) { 308 if (signal(SIGINT, finish) == SIG_ERR) 309 + error(1, errno, "register SIGINT handler failed"); 310 + if (signal(SIGTERM, finish) == SIG_ERR) 311 + error(1, errno, "register SIGTERM handler failed"); 312 while (!test_finish) { 313 print_table(); 314 printf("\n");
+1
samples/bpf/offwaketime_user.c
··· 100 setrlimit(RLIMIT_MEMLOCK, &r); 101 102 signal(SIGINT, int_exit); 103 104 if (load_kallsyms()) { 105 printf("failed to process /proc/kallsyms\n");
··· 100 setrlimit(RLIMIT_MEMLOCK, &r); 101 102 signal(SIGINT, int_exit); 103 + signal(SIGTERM, int_exit); 104 105 if (load_kallsyms()) { 106 printf("failed to process /proc/kallsyms\n");
+1
samples/bpf/sampleip_user.c
··· 180 return 1; 181 } 182 signal(SIGINT, int_exit); 183 184 /* do sampling */ 185 printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
··· 180 return 1; 181 } 182 signal(SIGINT, int_exit); 183 + signal(SIGTERM, int_exit); 184 185 /* do sampling */ 186 printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
+1
samples/bpf/trace_event_user.c
··· 192 setrlimit(RLIMIT_MEMLOCK, &r); 193 194 signal(SIGINT, int_exit); 195 196 if (load_kallsyms()) { 197 printf("failed to process /proc/kallsyms\n");
··· 192 setrlimit(RLIMIT_MEMLOCK, &r); 193 194 signal(SIGINT, int_exit); 195 + signal(SIGTERM, int_exit); 196 197 if (load_kallsyms()) { 198 printf("failed to process /proc/kallsyms\n");
+1
samples/bpf/tracex2_user.c
··· 127 } 128 129 signal(SIGINT, int_exit); 130 131 /* start 'ping' in the background to have some kfree_skb events */ 132 f = popen("ping -c5 localhost", "r");
··· 127 } 128 129 signal(SIGINT, int_exit); 130 + signal(SIGTERM, int_exit); 131 132 /* start 'ping' in the background to have some kfree_skb events */ 133 f = popen("ping -c5 localhost", "r");
+7 -2
samples/bpf/xdp1_user.c
··· 62 fprintf(stderr, 63 "usage: %s [OPTS] IFINDEX\n\n" 64 "OPTS:\n" 65 - " -S use skb-mode\n", 66 prog); 67 } 68 69 int main(int argc, char **argv) 70 { 71 - const char *optstr = "S"; 72 char filename[256]; 73 int opt; 74 ··· 77 switch (opt) { 78 case 'S': 79 xdp_flags |= XDP_FLAGS_SKB_MODE; 80 break; 81 default: 82 usage(basename(argv[0])); ··· 106 } 107 108 signal(SIGINT, int_exit); 109 110 if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { 111 printf("link set xdp fd failed\n");
··· 62 fprintf(stderr, 63 "usage: %s [OPTS] IFINDEX\n\n" 64 "OPTS:\n" 65 + " -S use skb-mode\n" 66 + " -N enforce native mode\n", 67 prog); 68 } 69 70 int main(int argc, char **argv) 71 { 72 + const char *optstr = "SN"; 73 char filename[256]; 74 int opt; 75 ··· 76 switch (opt) { 77 case 'S': 78 xdp_flags |= XDP_FLAGS_SKB_MODE; 79 + break; 80 + case 'N': 81 + xdp_flags |= XDP_FLAGS_DRV_MODE; 82 break; 83 default: 84 usage(basename(argv[0])); ··· 102 } 103 104 signal(SIGINT, int_exit); 105 + signal(SIGTERM, int_exit); 106 107 if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { 108 printf("link set xdp fd failed\n");
+7 -1
samples/bpf/xdp_tx_iptunnel_user.c
··· 79 printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n"); 80 printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n"); 81 printf(" -P <IP-Protocol> Default is TCP\n"); 82 printf(" -h Display this help\n"); 83 } 84 ··· 140 { 141 unsigned char opt_flags[256] = {}; 142 unsigned int kill_after_s = 0; 143 - const char *optstr = "i:a:p:s:d:m:T:P:Sh"; 144 int min_port = 0, max_port = 0; 145 struct iptnl_info tnl = {}; 146 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; ··· 208 case 'S': 209 xdp_flags |= XDP_FLAGS_SKB_MODE; 210 break; 211 default: 212 usage(argv[0]); 213 return 1; ··· 244 } 245 246 signal(SIGINT, int_exit); 247 248 while (min_port <= max_port) { 249 vip.dport = htons(min_port++);
··· 79 printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n"); 80 printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n"); 81 printf(" -P <IP-Protocol> Default is TCP\n"); 82 + printf(" -S use skb-mode\n"); 83 + printf(" -N enforce native mode\n"); 84 printf(" -h Display this help\n"); 85 } 86 ··· 138 { 139 unsigned char opt_flags[256] = {}; 140 unsigned int kill_after_s = 0; 141 + const char *optstr = "i:a:p:s:d:m:T:P:SNh"; 142 int min_port = 0, max_port = 0; 143 struct iptnl_info tnl = {}; 144 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; ··· 206 case 'S': 207 xdp_flags |= XDP_FLAGS_SKB_MODE; 208 break; 209 + case 'N': 210 + xdp_flags |= XDP_FLAGS_DRV_MODE; 211 + break; 212 default: 213 usage(argv[0]); 214 return 1; ··· 239 } 240 241 signal(SIGINT, int_exit); 242 + signal(SIGTERM, int_exit); 243 244 while (min_port <= max_port) { 245 vip.dport = htons(min_port++);
+27 -16
scripts/Makefile.headersinst
··· 8 # 9 # ========================================================================== 10 11 # generated header directory 12 gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) 13 ··· 38 kbuild-file := $(srctree)/$(obj)/Kbuild 39 -include $(kbuild-file) 40 41 - # called may set destination dir (when installing to asm/) 42 - _dst := $(if $(dst),$(dst),$(obj)) 43 - 44 old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild 45 ifneq ($(wildcard $(old-kbuild-file)),) 46 include $(old-kbuild-file) 47 endif 48 49 - include scripts/Kbuild.include 50 - 51 installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst)) 52 53 - srcdir := $(srctree)/$(obj) 54 gendir := $(objtree)/$(gen) 55 - subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) 56 header-files := $(notdir $(wildcard $(srcdir)/*.h)) 57 header-files += $(notdir $(wildcard $(srcdir)/*.agh)) 58 header-files := $(filter-out $(no-export-headers), $(header-files)) ··· 104 $(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \ 105 touch $@ 106 107 - PHONY += __headersinst __headerscheck 108 - 109 ifndef HDRCHECK 110 # Rules for installing headers 111 - __headersinst: $(subdirs) $(install-file) 112 @: 113 114 targets += $(install-file) ··· 118 $(call if_changed,install) 119 120 else 121 - __headerscheck: $(subdirs) $(check-file) 122 @: 123 124 targets += $(check-file) ··· 127 128 endif 129 130 - # Recursion 131 - .PHONY: $(subdirs) 132 - $(subdirs): 133 - $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@ 134 - 135 targets := $(wildcard $(sort $(targets))) 136 cmd_files := $(wildcard \ 137 $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) ··· 134 ifneq ($(cmd_files),) 135 include $(cmd_files) 136 endif 137 138 .PHONY: $(PHONY) 139 PHONY += FORCE
··· 8 # 9 # ========================================================================== 10 11 + PHONY := __headers 12 + __headers: 13 + 14 + include scripts/Kbuild.include 15 + 16 + srcdir := $(srctree)/$(obj) 17 + subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) 18 + # caller may set destination dir (when installing to asm/) 19 + _dst := $(if $(dst),$(dst),$(obj)) 20 + 21 + # Recursion 22 + __headers: $(subdirs) 23 + 24 + .PHONY: $(subdirs) 25 + $(subdirs): 26 + $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@ 27 + 28 + # Skip header install/check for include/uapi and arch/$(hdr-arch)/include/uapi. 29 + # We have only sub-directories there. 30 + skip-inst := $(if $(filter %/uapi,$(obj)),1) 31 + 32 + ifeq ($(skip-inst),) 33 + 34 # generated header directory 35 gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) 36 ··· 15 kbuild-file := $(srctree)/$(obj)/Kbuild 16 -include $(kbuild-file) 17 18 old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild 19 ifneq ($(wildcard $(old-kbuild-file)),) 20 include $(old-kbuild-file) 21 endif 22 23 installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst)) 24 25 gendir := $(objtree)/$(gen) 26 header-files := $(notdir $(wildcard $(srcdir)/*.h)) 27 header-files += $(notdir $(wildcard $(srcdir)/*.agh)) 28 header-files := $(filter-out $(no-export-headers), $(header-files)) ··· 88 $(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \ 89 touch $@ 90 91 ifndef HDRCHECK 92 # Rules for installing headers 93 + __headers: $(install-file) 94 @: 95 96 targets += $(install-file) ··· 104 $(call if_changed,install) 105 106 else 107 + __headers: $(check-file) 108 @: 109 110 targets += $(check-file) ··· 113 114 endif 115 116 targets := $(wildcard $(sort $(targets))) 117 cmd_files := $(wildcard \ 118 $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) ··· 125 ifneq ($(cmd_files),) 126 include $(cmd_files) 127 endif 128 + 129 + endif # skip-inst 130 131 .PHONY: $(PHONY) 132 PHONY += FORCE
+1 -1
scripts/dtc/checks.c
··· 873 while (size--) 874 reg = (reg << 32) | fdt32_to_cpu(*(cells++)); 875 876 - snprintf(unit_addr, sizeof(unit_addr), "%lx", reg); 877 if (!streq(unitname, unit_addr)) 878 FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"", 879 node->fullpath, unit_addr);
··· 873 while (size--) 874 reg = (reg << 32) | fdt32_to_cpu(*(cells++)); 875 876 + snprintf(unit_addr, sizeof(unit_addr), "%zx", reg); 877 if (!streq(unitname, unit_addr)) 878 FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"", 879 node->fullpath, unit_addr);
-4
sound/x86/intel_hdmi_audio.c
··· 1809 pdata->notify_pending = false; 1810 spin_unlock_irq(&pdata->lpe_audio_slock); 1811 1812 - /* runtime PM isn't enabled as default, since it won't save much on 1813 - * BYT/CHT devices; user who want the runtime PM should adjust the 1814 - * power/ontrol and power/autosuspend_delay_ms sysfs entries instead 1815 - */ 1816 pm_runtime_use_autosuspend(&pdev->dev); 1817 pm_runtime_mark_last_busy(&pdev->dev); 1818 pm_runtime_set_active(&pdev->dev);
··· 1809 pdata->notify_pending = false; 1810 spin_unlock_irq(&pdata->lpe_audio_slock); 1811 1812 pm_runtime_use_autosuspend(&pdev->dev); 1813 pm_runtime_mark_last_busy(&pdev->dev); 1814 pm_runtime_set_active(&pdev->dev);
+1
tools/build/feature/test-bpf.c
··· 29 attr.log_size = 0; 30 attr.log_level = 0; 31 attr.kern_version = 0; 32 33 /* 34 * Test existence of __NR_bpf and BPF_PROG_LOAD.
··· 29 attr.log_size = 0; 30 attr.log_level = 0; 31 attr.kern_version = 0; 32 + attr.prog_flags = 0; 33 34 /* 35 * Test existence of __NR_bpf and BPF_PROG_LOAD.
+9 -2
tools/include/uapi/linux/bpf.h
··· 132 */ 133 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 134 135 #define BPF_PSEUDO_MAP_FD 1 136 137 /* flags for BPF_MAP_UPDATE_ELEM command */ ··· 184 __u32 log_size; /* size of user buffer */ 185 __aligned_u64 log_buf; /* user supplied buffer */ 186 __u32 kern_version; /* checked when prog_type=kprobe */ 187 }; 188 189 struct { /* anonymous struct used by BPF_OBJ_* commands */ ··· 489 * u32 bpf_get_socket_uid(skb) 490 * Get the owner uid of the socket stored inside sk_buff. 491 * @skb: pointer to skb 492 - * Return: uid of the socket owner on success or 0 if the socket pointer 493 - * inside sk_buff is NULL 494 */ 495 #define __BPF_FUNC_MAPPER(FN) \ 496 FN(unspec), \
··· 132 */ 133 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 134 135 + /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 136 + * verifier will perform strict alignment checking as if the kernel 137 + * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 138 + * and NET_IP_ALIGN defined to 2. 139 + */ 140 + #define BPF_F_STRICT_ALIGNMENT (1U << 0) 141 + 142 #define BPF_PSEUDO_MAP_FD 1 143 144 /* flags for BPF_MAP_UPDATE_ELEM command */ ··· 177 __u32 log_size; /* size of user buffer */ 178 __aligned_u64 log_buf; /* user supplied buffer */ 179 __u32 kern_version; /* checked when prog_type=kprobe */ 180 + __u32 prog_flags; 181 }; 182 183 struct { /* anonymous struct used by BPF_OBJ_* commands */ ··· 481 * u32 bpf_get_socket_uid(skb) 482 * Get the owner uid of the socket stored inside sk_buff. 483 * @skb: pointer to skb 484 + * Return: uid of the socket owner on success or overflowuid if failed. 485 */ 486 #define __BPF_FUNC_MAPPER(FN) \ 487 FN(unspec), \
+22
tools/lib/bpf/bpf.c
··· 117 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 118 } 119 120 int bpf_map_update_elem(int fd, const void *key, const void *value, 121 __u64 flags) 122 {
··· 117 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 118 } 119 120 + int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, 121 + size_t insns_cnt, int strict_alignment, 122 + const char *license, __u32 kern_version, 123 + char *log_buf, size_t log_buf_sz) 124 + { 125 + union bpf_attr attr; 126 + 127 + bzero(&attr, sizeof(attr)); 128 + attr.prog_type = type; 129 + attr.insn_cnt = (__u32)insns_cnt; 130 + attr.insns = ptr_to_u64(insns); 131 + attr.license = ptr_to_u64(license); 132 + attr.log_buf = ptr_to_u64(log_buf); 133 + attr.log_size = log_buf_sz; 134 + attr.log_level = 2; 135 + log_buf[0] = 0; 136 + attr.kern_version = kern_version; 137 + attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0; 138 + 139 + return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 140 + } 141 + 142 int bpf_map_update_elem(int fd, const void *key, const void *value, 143 __u64 flags) 144 {
+4
tools/lib/bpf/bpf.h
··· 35 size_t insns_cnt, const char *license, 36 __u32 kern_version, char *log_buf, 37 size_t log_buf_sz); 38 39 int bpf_map_update_elem(int fd, const void *key, const void *value, 40 __u64 flags);
··· 35 size_t insns_cnt, const char *license, 36 __u32 kern_version, char *log_buf, 37 size_t log_buf_sz); 38 + int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, 39 + size_t insns_cnt, int strict_alignment, 40 + const char *license, __u32 kern_version, 41 + char *log_buf, size_t log_buf_sz); 42 43 int bpf_map_update_elem(int fd, const void *key, const void *value, 44 __u64 flags);
+4 -2
tools/testing/selftests/bpf/Makefile
··· 11 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 12 LDLIBS += -lcap -lelf 13 14 - TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs 15 16 TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o 17 ··· 35 CLANG ?= clang 36 37 %.o: %.c 38 - $(CLANG) -I. -I../../../include/uapi -I../../../../samples/bpf/ \ 39 -Wno-compare-distinct-pointer-types \ 40 -O2 -target bpf -c $< -o $@
··· 11 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 12 LDLIBS += -lcap -lelf 13 14 + TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ 15 + test_align 16 17 TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o 18 ··· 34 CLANG ?= clang 35 36 %.o: %.c 37 + $(CLANG) -I. -I./include/uapi -I../../../include/uapi \ 38 + -I../../../../samples/bpf/ \ 39 -Wno-compare-distinct-pointer-types \ 40 -O2 -target bpf -c $< -o $@
+22
tools/testing/selftests/bpf/include/uapi/linux/types.h
···
··· 1 + #ifndef _UAPI_LINUX_TYPES_H 2 + #define _UAPI_LINUX_TYPES_H 3 + 4 + #include <asm-generic/int-ll64.h> 5 + 6 + /* copied from linux:include/uapi/linux/types.h */ 7 + #define __bitwise 8 + typedef __u16 __bitwise __le16; 9 + typedef __u16 __bitwise __be16; 10 + typedef __u32 __bitwise __le32; 11 + typedef __u32 __bitwise __be32; 12 + typedef __u64 __bitwise __le64; 13 + typedef __u64 __bitwise __be64; 14 + 15 + typedef __u16 __bitwise __sum16; 16 + typedef __u32 __bitwise __wsum; 17 + 18 + #define __aligned_u64 __u64 __attribute__((aligned(8))) 19 + #define __aligned_be64 __be64 __attribute__((aligned(8))) 20 + #define __aligned_le64 __le64 __attribute__((aligned(8))) 21 + 22 + #endif /* _UAPI_LINUX_TYPES_H */
+453
tools/testing/selftests/bpf/test_align.c
···
··· 1 + #include <asm/types.h> 2 + #include <linux/types.h> 3 + #include <stdint.h> 4 + #include <stdio.h> 5 + #include <stdlib.h> 6 + #include <unistd.h> 7 + #include <errno.h> 8 + #include <string.h> 9 + #include <stddef.h> 10 + #include <stdbool.h> 11 + 12 + #include <linux/unistd.h> 13 + #include <linux/filter.h> 14 + #include <linux/bpf_perf_event.h> 15 + #include <linux/bpf.h> 16 + 17 + #include <bpf/bpf.h> 18 + 19 + #include "../../../include/linux/filter.h" 20 + 21 + #ifndef ARRAY_SIZE 22 + # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 23 + #endif 24 + 25 + #define MAX_INSNS 512 26 + #define MAX_MATCHES 16 27 + 28 + struct bpf_align_test { 29 + const char *descr; 30 + struct bpf_insn insns[MAX_INSNS]; 31 + enum { 32 + UNDEF, 33 + ACCEPT, 34 + REJECT 35 + } result; 36 + enum bpf_prog_type prog_type; 37 + const char *matches[MAX_MATCHES]; 38 + }; 39 + 40 + static struct bpf_align_test tests[] = { 41 + { 42 + .descr = "mov", 43 + .insns = { 44 + BPF_MOV64_IMM(BPF_REG_3, 2), 45 + BPF_MOV64_IMM(BPF_REG_3, 4), 46 + BPF_MOV64_IMM(BPF_REG_3, 8), 47 + BPF_MOV64_IMM(BPF_REG_3, 16), 48 + BPF_MOV64_IMM(BPF_REG_3, 32), 49 + BPF_MOV64_IMM(BPF_REG_0, 0), 50 + BPF_EXIT_INSN(), 51 + }, 52 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 53 + .matches = { 54 + "1: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", 55 + "2: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", 56 + "3: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", 57 + "4: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", 58 + "5: R1=ctx R3=imm32,min_value=32,max_value=32,min_align=32 R10=fp", 59 + }, 60 + }, 61 + { 62 + .descr = "shift", 63 + .insns = { 64 + BPF_MOV64_IMM(BPF_REG_3, 1), 65 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 66 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 67 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 68 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 69 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4), 70 + BPF_MOV64_IMM(BPF_REG_4, 32), 71 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 72 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 73 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 74 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 75 + BPF_MOV64_IMM(BPF_REG_0, 0), 76 + BPF_EXIT_INSN(), 77 + }, 78 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 79 + .matches = { 80 + "1: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", 81 + "2: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", 82 + "3: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", 83 + "4: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", 84 + "5: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", 85 + "6: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", 86 + "7: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm32,min_value=32,max_value=32,min_align=32 R10=fp", 87 + "8: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm16,min_value=16,max_value=16,min_align=16 R10=fp", 88 + "9: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", 89 + "10: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm4,min_value=4,max_value=4,min_align=4 R10=fp", 90 + "11: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm2,min_value=2,max_value=2,min_align=2 R10=fp", 91 + }, 92 + }, 93 + { 94 + .descr = "addsub", 95 + .insns = { 96 + BPF_MOV64_IMM(BPF_REG_3, 4), 97 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4), 98 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2), 99 + BPF_MOV64_IMM(BPF_REG_4, 8), 100 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 101 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), 102 + BPF_MOV64_IMM(BPF_REG_0, 0), 103 + BPF_EXIT_INSN(), 104 + }, 105 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 106 + .matches = { 107 + "1: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", 108 + "2: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=4 R10=fp", 109 + "3: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R10=fp", 110 + "4: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", 111 + "5: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm12,min_value=12,max_value=12,min_align=4 R10=fp", 112 + "6: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm14,min_value=14,max_value=14,min_align=2 R10=fp", 113 + }, 114 + }, 115 + { 116 + .descr = "mul", 117 + .insns = { 118 + BPF_MOV64_IMM(BPF_REG_3, 7), 119 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1), 120 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2), 121 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4), 122 + BPF_MOV64_IMM(BPF_REG_0, 0), 123 + BPF_EXIT_INSN(), 124 + }, 125 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 126 + .matches = { 127 + "1: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", 128 + "2: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", 129 + "3: R1=ctx R3=imm14,min_value=14,max_value=14,min_align=2 R10=fp", 130 + "4: R1=ctx R3=imm56,min_value=56,max_value=56,min_align=4 R10=fp", 131 + }, 132 + }, 133 + 134 + #define PREP_PKT_POINTERS \ 135 + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ 136 + offsetof(struct __sk_buff, data)), \ 137 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ 138 + offsetof(struct __sk_buff, data_end)) 139 + 140 + #define LOAD_UNKNOWN(DST_REG) \ 141 + PREP_PKT_POINTERS, \ 142 + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \ 143 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \ 144 + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \ 145 + BPF_EXIT_INSN(), \ 146 + BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0) 147 + 148 + { 149 + .descr = "unknown shift", 150 + .insns = { 151 + LOAD_UNKNOWN(BPF_REG_3), 152 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 153 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 154 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 155 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 156 + LOAD_UNKNOWN(BPF_REG_4), 157 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5), 158 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 159 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 160 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 161 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 162 + BPF_MOV64_IMM(BPF_REG_0, 0), 163 + BPF_EXIT_INSN(), 164 + }, 165 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 166 + .matches = { 167 + "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", 168 + "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv55,min_align=2 R10=fp", 169 + "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv54,min_align=4 R10=fp", 170 + "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv53,min_align=8 R10=fp", 171 + "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv52,min_align=16 R10=fp", 172 + "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv56 R10=fp", 173 + "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv51,min_align=32 R10=fp", 174 + "20: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv52,min_align=16 R10=fp", 175 + "21: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv53,min_align=8 R10=fp", 176 + "22: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv54,min_align=4 R10=fp", 177 + "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv55,min_align=2 R10=fp", 178 + }, 179 + }, 180 + { 181 + .descr = "unknown mul", 182 + .insns = { 183 + LOAD_UNKNOWN(BPF_REG_3), 184 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 185 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1), 186 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 187 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 188 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 189 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4), 190 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 191 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8), 192 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 193 + BPF_MOV64_IMM(BPF_REG_0, 0), 194 + BPF_EXIT_INSN(), 195 + }, 196 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 197 + .matches = { 198 + "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", 199 + "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", 200 + "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv55,min_align=1 R10=fp", 201 + "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", 202 + "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv54,min_align=2 R10=fp", 203 + "12: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", 204 + "13: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv53,min_align=4 R10=fp", 205 + "14: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", 206 + "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv52,min_align=8 R10=fp", 207 + "16: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv50,min_align=8 R10=fp" 208 + }, 209 + }, 210 + { 211 + .descr = "packet const offset", 212 + .insns = { 213 + PREP_PKT_POINTERS, 214 + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 215 + 216 + BPF_MOV64_IMM(BPF_REG_0, 0), 217 + 218 + /* Skip over ethernet header. */ 219 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 220 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 221 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 222 + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 223 + BPF_EXIT_INSN(), 224 + 225 + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0), 226 + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1), 227 + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2), 228 + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3), 229 + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0), 230 + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2), 231 + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 232 + 233 + BPF_MOV64_IMM(BPF_REG_0, 0), 234 + BPF_EXIT_INSN(), 235 + }, 236 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 237 + .matches = { 238 + "4: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=0,r=0) R10=fp", 239 + "5: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=14,r=0) R10=fp", 240 + "6: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R4=pkt(id=0,off=14,r=0) R5=pkt(id=0,off=14,r=0) R10=fp", 241 + "10: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv56 R5=pkt(id=0,off=14,r=18) R10=fp", 242 + "14: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", 243 + "15: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", 244 + }, 245 + }, 246 + { 247 + .descr = "packet variable offset", 248 + .insns = { 249 + LOAD_UNKNOWN(BPF_REG_6), 250 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 251 + 252 + /* First, add a constant to the R5 packet pointer, 253 + * then a variable with a known alignment. 254 + */ 255 + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 256 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 257 + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 258 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 259 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 260 + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 261 + BPF_EXIT_INSN(), 262 + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 263 + 264 + /* Now, test in the other direction. Adding first 265 + * the variable offset to R5, then the constant. 266 + */ 267 + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 268 + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 269 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 270 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 271 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 272 + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 273 + BPF_EXIT_INSN(), 274 + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 275 + 276 + /* Test multiple accumulations of unknown values 277 + * into a packet pointer. 278 + */ 279 + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 280 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 281 + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 282 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), 283 + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 284 + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 285 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 286 + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 287 + BPF_EXIT_INSN(), 288 + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 289 + 290 + BPF_MOV64_IMM(BPF_REG_0, 0), 291 + BPF_EXIT_INSN(), 292 + }, 293 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 294 + .matches = { 295 + /* Calculated offset in R6 has unknown value, but known 296 + * alignment of 4. 297 + */ 298 + "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R6=inv54,min_align=4 R10=fp", 299 + 300 + /* Offset is added to packet pointer R5, resulting in known 301 + * auxiliary alignment and offset. 302 + */ 303 + "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R5=pkt(id=1,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", 304 + 305 + /* At the time the word size load is performed from R5, 306 + * it's total offset is NET_IP_ALIGN + reg->off (0) + 307 + * reg->aux_off (14) which is 16. Then the variable 308 + * offset is considered using reg->aux_off_align which 309 + * is 4 and meets the load's requirements. 310 + */ 311 + "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=1,off=4,r=4),aux_off=14,aux_off_align=4 R5=pkt(id=1,off=0,r=4),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", 312 + 313 + 314 + /* Variable offset is added to R5 packet pointer, 315 + * resulting in auxiliary alignment of 4. 316 + */ 317 + "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=0,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", 318 + 319 + /* Constant offset is added to R5, resulting in 320 + * reg->off of 14. 321 + */ 322 + "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=14,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", 323 + 324 + /* At the time the word size load is performed from R5, 325 + * it's total offset is NET_IP_ALIGN + reg->off (14) which 326 + * is 16. Then the variable offset is considered using 327 + * reg->aux_off_align which is 4 and meets the load's 328 + * requirements. 329 + */ 330 + "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=2,off=18,r=18),aux_off_align=4 R5=pkt(id=2,off=14,r=18),aux_off_align=4 R6=inv54,min_align=4 R10=fp", 331 + 332 + /* Constant offset is added to R5 packet pointer, 333 + * resulting in reg->off value of 14. 334 + */ 335 + "26: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=0,off=14,r=8) R6=inv54,min_align=4 R10=fp", 336 + /* Variable offset is added to R5, resulting in an 337 + * auxiliary offset of 14, and an auxiliary alignment of 4. 338 + */ 339 + "27: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", 340 + /* Constant is added to R5 again, setting reg->off to 4. */ 341 + "28: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=4,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", 342 + /* And once more we add a variable, which causes an accumulation 343 + * of reg->off into reg->aux_off_align, with resulting value of 344 + * 18. The auxiliary alignment stays at 4. 345 + */ 346 + "29: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=4,off=0,r=0),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", 347 + /* At the time the word size load is performed from R5, 348 + * it's total offset is NET_IP_ALIGN + reg->off (0) + 349 + * reg->aux_off (18) which is 20. Then the variable offset 350 + * is considered using reg->aux_off_align which is 4 and meets 351 + * the load's requirements. 352 + */ 353 + "33: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=4,off=4,r=4),aux_off=18,aux_off_align=4 R5=pkt(id=4,off=0,r=4),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", 354 + }, 355 + }, 356 + }; 357 + 358 + static int probe_filter_length(const struct bpf_insn *fp) 359 + { 360 + int len; 361 + 362 + for (len = MAX_INSNS - 1; len > 0; --len) 363 + if (fp[len].code != 0 || fp[len].imm != 0) 364 + break; 365 + return len + 1; 366 + } 367 + 368 + static char bpf_vlog[32768]; 369 + 370 + static int do_test_single(struct bpf_align_test *test) 371 + { 372 + struct bpf_insn *prog = test->insns; 373 + int prog_type = test->prog_type; 374 + int prog_len, i; 375 + int fd_prog; 376 + int ret; 377 + 378 + prog_len = probe_filter_length(prog); 379 + fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 380 + prog, prog_len, 1, "GPL", 0, 381 + bpf_vlog, sizeof(bpf_vlog)); 382 + if (fd_prog < 0) { 383 + printf("Failed to load program.\n"); 384 + printf("%s", bpf_vlog); 385 + ret = 1; 386 + } else { 387 + ret = 0; 388 + for (i = 0; i < MAX_MATCHES; i++) { 389 + const char *t, *m = test->matches[i]; 390 + 391 + if (!m) 392 + break; 393 + t = strstr(bpf_vlog, m); 394 + if (!t) { 395 + printf("Failed to find match: %s\n", m); 396 + ret = 1; 397 + printf("%s", bpf_vlog); 398 + break; 399 + } 400 + } 401 + close(fd_prog); 402 + } 403 + return ret; 404 + } 405 + 406 + static int do_test(unsigned int from, unsigned int to) 407 + { 408 + int all_pass = 0; 409 + int all_fail = 0; 410 + unsigned int i; 411 + 412 + for (i = from; i < to; i++) { 413 + struct bpf_align_test *test = &tests[i]; 414 + int fail; 415 + 416 + printf("Test %3d: %s ... ", 417 + i, test->descr); 418 + fail = do_test_single(test); 419 + if (fail) { 420 + all_fail++; 421 + printf("FAIL\n"); 422 + } else { 423 + all_pass++; 424 + printf("PASS\n"); 425 + } 426 + } 427 + printf("Results: %d pass %d fail\n", 428 + all_pass, all_fail); 429 + return 0; 430 + } 431 + 432 + int main(int argc, char **argv) 433 + { 434 + unsigned int from = 0, to = ARRAY_SIZE(tests); 435 + 436 + if (argc == 3) { 437 + unsigned int l = atoi(argv[argc - 2]); 438 + unsigned int u = atoi(argv[argc - 1]); 439 + 440 + if (l < to && u < to) { 441 + from = l; 442 + to = u + 1; 443 + } 444 + } else if (argc == 2) { 445 + unsigned int t = atoi(argv[argc - 1]); 446 + 447 + if (t < to) { 448 + from = t; 449 + to = t + 1; 450 + } 451 + } 452 + return do_test(from, to); 453 + }
+1
tools/testing/selftests/bpf/test_pkt_access.c
··· 5 * License as published by the Free Software Foundation. 6 */ 7 #include <stddef.h> 8 #include <linux/bpf.h> 9 #include <linux/if_ether.h> 10 #include <linux/if_packet.h>
··· 5 * License as published by the Free Software Foundation. 6 */ 7 #include <stddef.h> 8 + #include <string.h> 9 #include <linux/bpf.h> 10 #include <linux/if_ether.h> 11 #include <linux/if_packet.h>
+1 -1
tools/testing/selftests/ftrace/ftracetest
··· 58 ;; 59 --verbose|-v|-vv) 60 VERBOSE=$((VERBOSE + 1)) 61 - [ $1 == '-vv' ] && VERBOSE=$((VERBOSE + 1)) 62 shift 1 63 ;; 64 --debug|-d)
··· 58 ;; 59 --verbose|-v|-vv) 60 VERBOSE=$((VERBOSE + 1)) 61 + [ $1 = '-vv' ] && VERBOSE=$((VERBOSE + 1)) 62 shift 1 63 ;; 64 --debug|-d)
+1 -1
tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
··· 48 e=`cat $EVENT_ENABLE` 49 if [ "$e" != $val ]; then 50 echo "Expected $val but found $e" 51 - exit -1 52 fi 53 } 54
··· 48 e=`cat $EVENT_ENABLE` 49 if [ "$e" != $val ]; then 50 echo "Expected $val but found $e" 51 + exit 1 52 fi 53 } 54
+2 -2
tools/testing/selftests/ftrace/test.d/functions
··· 34 echo > set_ftrace_filter 35 grep -v '^#' set_ftrace_filter | while read t; do 36 tr=`echo $t | cut -d: -f2` 37 - if [ "$tr" == "" ]; then 38 continue 39 fi 40 - if [ $tr == "enable_event" -o $tr == "disable_event" ]; then 41 tr=`echo $t | cut -d: -f1-4` 42 limit=`echo $t | cut -d: -f5` 43 else
··· 34 echo > set_ftrace_filter 35 grep -v '^#' set_ftrace_filter | while read t; do 36 tr=`echo $t | cut -d: -f2` 37 + if [ "$tr" = "" ]; then 38 continue 39 fi 40 + if [ $tr = "enable_event" -o $tr = "disable_event" ]; then 41 tr=`echo $t | cut -d: -f1-4` 42 limit=`echo $t | cut -d: -f5` 43 else
+6 -2
tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
··· 75 if [ -d foo ]; then 76 fail "foo still exists" 77 fi 78 - exit 0 79 80 - 81 82 83 instance_slam() {
··· 75 if [ -d foo ]; then 76 fail "foo still exists" 77 fi 78 79 + mkdir foo 80 + echo "schedule:enable_event:sched:sched_switch" > foo/set_ftrace_filter 81 + rmdir foo 82 + if [ -d foo ]; then 83 + fail "foo still exists" 84 + fi 85 86 87 instance_slam() {
+1
tools/testing/selftests/powerpc/tm/.gitignore
··· 11 tm-signal-context-chk-gpr 12 tm-signal-context-chk-vmx 13 tm-signal-context-chk-vsx
··· 11 tm-signal-context-chk-gpr 12 tm-signal-context-chk-vmx 13 tm-signal-context-chk-vsx 14 + tm-vmx-unavail
+3 -1
tools/testing/selftests/powerpc/tm/Makefile
··· 2 tm-signal-context-chk-vmx tm-signal-context-chk-vsx 3 4 TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ 5 - tm-vmxcopy tm-fork tm-tar tm-tmspr $(SIGNAL_CONTEXT_CHK_TESTS) 6 7 include ../../lib.mk 8 ··· 14 $(OUTPUT)/tm-syscall: tm-syscall-asm.S 15 $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include 16 $(OUTPUT)/tm-tmspr: CFLAGS += -pthread 17 18 SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) 19 $(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
··· 2 tm-signal-context-chk-vmx tm-signal-context-chk-vsx 3 4 TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ 5 + tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail \ 6 + $(SIGNAL_CONTEXT_CHK_TESTS) 7 8 include ../../lib.mk 9 ··· 13 $(OUTPUT)/tm-syscall: tm-syscall-asm.S 14 $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include 15 $(OUTPUT)/tm-tmspr: CFLAGS += -pthread 16 + $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 17 18 SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) 19 $(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
+118
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
···
··· 1 + /* 2 + * Copyright 2017, Michael Neuling, IBM Corp. 3 + * Licensed under GPLv2. 4 + * Original: Breno Leitao <brenohl@br.ibm.com> & 5 + * Gustavo Bueno Romero <gromero@br.ibm.com> 6 + * Edited: Michael Neuling 7 + * 8 + * Force VMX unavailable during a transaction and see if it corrupts 9 + * the checkpointed VMX register state after the abort. 10 + */ 11 + 12 + #include <inttypes.h> 13 + #include <htmintrin.h> 14 + #include <string.h> 15 + #include <stdlib.h> 16 + #include <stdio.h> 17 + #include <pthread.h> 18 + #include <sys/mman.h> 19 + #include <unistd.h> 20 + #include <pthread.h> 21 + 22 + #include "tm.h" 23 + #include "utils.h" 24 + 25 + int passed; 26 + 27 + void *worker(void *unused) 28 + { 29 + __int128 vmx0; 30 + uint64_t texasr; 31 + 32 + asm goto ( 33 + "li 3, 1;" /* Stick non-zero value in VMX0 */ 34 + "std 3, 0(%[vmx0_ptr]);" 35 + "lvx 0, 0, %[vmx0_ptr];" 36 + 37 + /* Wait here a bit so we get scheduled out 255 times */ 38 + "lis 3, 0x3fff;" 39 + "1: ;" 40 + "addi 3, 3, -1;" 41 + "cmpdi 3, 0;" 42 + "bne 1b;" 43 + 44 + /* Kernel will hopefully turn VMX off now */ 45 + 46 + "tbegin. ;" 47 + "beq failure;" 48 + 49 + /* Cause VMX unavail. Any VMX instruction */ 50 + "vaddcuw 0,0,0;" 51 + 52 + "tend. ;" 53 + "b %l[success];" 54 + 55 + /* Check VMX0 sanity after abort */ 56 + "failure: ;" 57 + "lvx 1, 0, %[vmx0_ptr];" 58 + "vcmpequb. 2, 0, 1;" 59 + "bc 4, 24, %l[value_mismatch];" 60 + "b %l[value_match];" 61 + : 62 + : [vmx0_ptr] "r"(&vmx0) 63 + : "r3" 64 + : success, value_match, value_mismatch 65 + ); 66 + 67 + /* HTM aborted and VMX0 is corrupted */ 68 + value_mismatch: 69 + texasr = __builtin_get_texasr(); 70 + 71 + printf("\n\n==============\n\n"); 72 + printf("Failure with error: %lx\n", _TEXASR_FAILURE_CODE(texasr)); 73 + printf("Summary error : %lx\n", _TEXASR_FAILURE_SUMMARY(texasr)); 74 + printf("TFIAR exact : %lx\n\n", _TEXASR_TFIAR_EXACT(texasr)); 75 + 76 + passed = 0; 77 + return NULL; 78 + 79 + /* HTM aborted but VMX0 is correct */ 80 + value_match: 81 + // printf("!"); 82 + return NULL; 83 + 84 + success: 85 + // printf("."); 86 + return NULL; 87 + } 88 + 89 + int tm_vmx_unavail_test() 90 + { 91 + int threads; 92 + pthread_t *thread; 93 + 94 + SKIP_IF(!have_htm()); 95 + 96 + passed = 1; 97 + 98 + threads = sysconf(_SC_NPROCESSORS_ONLN) * 4; 99 + thread = malloc(sizeof(pthread_t)*threads); 100 + if (!thread) 101 + return EXIT_FAILURE; 102 + 103 + for (uint64_t i = 0; i < threads; i++) 104 + pthread_create(&thread[i], NULL, &worker, NULL); 105 + 106 + for (uint64_t i = 0; i < threads; i++) 107 + pthread_join(thread[i], NULL); 108 + 109 + free(thread); 110 + 111 + return passed ? EXIT_SUCCESS : EXIT_FAILURE; 112 + } 113 + 114 + 115 + int main(int argc, char **argv) 116 + { 117 + return test_harness(tm_vmx_unavail_test, "tm_vmx_unavail_test"); 118 + }
+9 -9
virt/kvm/arm/hyp/vgic-v3-sr.c
··· 22 #include <asm/kvm_hyp.h> 23 24 #define vtr_to_max_lr_idx(v) ((v) & 0xf) 25 - #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) 26 27 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 28 { ··· 135 136 if (used_lrs) { 137 int i; 138 - u32 nr_pri_bits; 139 140 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); 141 142 write_gicreg(0, ICH_HCR_EL2); 143 val = read_gicreg(ICH_VTR_EL2); 144 - nr_pri_bits = vtr_to_nr_pri_bits(val); 145 146 for (i = 0; i < used_lrs; i++) { 147 if (cpu_if->vgic_elrsr & (1 << i)) ··· 152 __gic_v3_set_lr(0, i); 153 } 154 155 - switch (nr_pri_bits) { 156 case 7: 157 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); 158 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); ··· 162 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); 163 } 164 165 - switch (nr_pri_bits) { 166 case 7: 167 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); 168 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); ··· 198 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 199 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; 200 u64 val; 201 - u32 nr_pri_bits; 202 int i; 203 204 /* ··· 217 } 218 219 val = read_gicreg(ICH_VTR_EL2); 220 - nr_pri_bits = vtr_to_nr_pri_bits(val); 221 222 if (used_lrs) { 223 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 224 225 - switch (nr_pri_bits) { 226 case 7: 227 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); 228 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); ··· 232 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); 233 } 234 235 - switch (nr_pri_bits) { 236 case 7: 237 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); 238 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
··· 22 #include <asm/kvm_hyp.h> 23 24 #define vtr_to_max_lr_idx(v) ((v) & 0xf) 25 + #define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1) 26 27 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 28 { ··· 135 136 if (used_lrs) { 137 int i; 138 + u32 nr_pre_bits; 139 140 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); 141 142 write_gicreg(0, ICH_HCR_EL2); 143 val = read_gicreg(ICH_VTR_EL2); 144 + nr_pre_bits = vtr_to_nr_pre_bits(val); 145 146 for (i = 0; i < used_lrs; i++) { 147 if (cpu_if->vgic_elrsr & (1 << i)) ··· 152 __gic_v3_set_lr(0, i); 153 } 154 155 + switch (nr_pre_bits) { 156 case 7: 157 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); 158 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); ··· 162 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); 163 } 164 165 + switch (nr_pre_bits) { 166 case 7: 167 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); 168 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); ··· 198 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 199 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; 200 u64 val; 201 + u32 nr_pre_bits; 202 int i; 203 204 /* ··· 217 } 218 219 val = read_gicreg(ICH_VTR_EL2); 220 + nr_pre_bits = vtr_to_nr_pre_bits(val); 221 222 if (used_lrs) { 223 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 224 225 + switch (nr_pre_bits) { 226 case 7: 227 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); 228 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); ··· 232 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); 233 } 234 235 + switch (nr_pre_bits) { 236 case 7: 237 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); 238 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+21 -12
virt/kvm/arm/mmu.c
··· 295 assert_spin_locked(&kvm->mmu_lock); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 297 do { 298 next = stage2_pgd_addr_end(addr, end); 299 if (!stage2_pgd_none(*pgd)) 300 unmap_stage2_puds(kvm, pgd, addr, next); ··· 836 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all 837 * underlying level-2 and level-3 tables before freeing the actual level-1 table 838 * and setting the struct pointer to NULL. 839 - * 840 - * Note we don't need locking here as this is only called when the VM is 841 - * destroyed, which can only be done once. 842 */ 843 void kvm_free_stage2_pgd(struct kvm *kvm) 844 { 845 - if (kvm->arch.pgd == NULL) 846 - return; 847 848 spin_lock(&kvm->mmu_lock); 849 - unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 850 spin_unlock(&kvm->mmu_lock); 851 852 /* Free the HW pgd, one page at a time */ 853 - free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 854 - kvm->arch.pgd = NULL; 855 } 856 857 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, ··· 1177 * large. Otherwise, we may see kernel panics with 1178 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, 1179 * CONFIG_LOCKDEP. Additionally, holding the lock too long 1180 - * will also starve other vCPUs. 1181 */ 1182 - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 1183 - cond_resched_lock(&kvm->mmu_lock); 1184 - 1185 next = stage2_pgd_addr_end(addr, end); 1186 if (stage2_pgd_present(*pgd)) 1187 stage2_wp_puds(pgd, addr, next);
··· 295 assert_spin_locked(&kvm->mmu_lock); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 297 do { 298 + /* 299 + * Make sure the page table is still active, as another thread 300 + * could have possibly freed the page table, while we released 301 + * the lock. 302 + */ 303 + if (!READ_ONCE(kvm->arch.pgd)) 304 + break; 305 next = stage2_pgd_addr_end(addr, end); 306 if (!stage2_pgd_none(*pgd)) 307 unmap_stage2_puds(kvm, pgd, addr, next); ··· 829 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all 830 * underlying level-2 and level-3 tables before freeing the actual level-1 table 831 * and setting the struct pointer to NULL. 832 */ 833 void kvm_free_stage2_pgd(struct kvm *kvm) 834 { 835 + void *pgd = NULL; 836 837 spin_lock(&kvm->mmu_lock); 838 + if (kvm->arch.pgd) { 839 + unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 840 + pgd = READ_ONCE(kvm->arch.pgd); 841 + kvm->arch.pgd = NULL; 842 + } 843 spin_unlock(&kvm->mmu_lock); 844 845 /* Free the HW pgd, one page at a time */ 846 + if (pgd) 847 + free_pages_exact(pgd, S2_PGD_SIZE); 848 } 849 850 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, ··· 1170 * large. Otherwise, we may see kernel panics with 1171 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, 1172 * CONFIG_LOCKDEP. Additionally, holding the lock too long 1173 + * will also starve other vCPUs. We have to also make sure 1174 + * that the page tables are not freed while we released 1175 + * the lock. 1176 */ 1177 + cond_resched_lock(&kvm->mmu_lock); 1178 + if (!READ_ONCE(kvm->arch.pgd)) 1179 + break; 1180 next = stage2_pgd_addr_end(addr, end); 1181 if (stage2_pgd_present(*pgd)) 1182 stage2_wp_puds(pgd, addr, next);
+4 -1
virt/kvm/arm/vgic/vgic-init.c
··· 242 * If we are creating a VCPU with a GICv3 we must also register the 243 * KVM io device for the redistributor that belongs to this VCPU. 244 */ 245 - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 246 ret = vgic_register_redist_iodev(vcpu); 247 return ret; 248 } 249
··· 242 * If we are creating a VCPU with a GICv3 we must also register the 243 * KVM io device for the redistributor that belongs to this VCPU. 244 */ 245 + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 246 + mutex_lock(&vcpu->kvm->lock); 247 ret = vgic_register_redist_iodev(vcpu); 248 + mutex_unlock(&vcpu->kvm->lock); 249 + } 250 return ret; 251 } 252
+9 -3
virt/kvm/arm/vgic/vgic-mmio-v3.c
··· 586 if (!vgic_v3_check_base(kvm)) 587 return -EINVAL; 588 589 - rd_base = vgic->vgic_redist_base + kvm_vcpu_get_idx(vcpu) * SZ_64K * 2; 590 sgi_base = rd_base + SZ_64K; 591 592 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); ··· 614 mutex_lock(&kvm->slots_lock); 615 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, 616 SZ_64K, &sgi_dev->dev); 617 - mutex_unlock(&kvm->slots_lock); 618 - if (ret) 619 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 620 &rd_dev->dev); 621 622 return ret; 623 } 624 ··· 648 649 if (ret) { 650 /* The current c failed, so we start with the previous one. */ 651 for (c--; c >= 0; c--) { 652 vcpu = kvm_get_vcpu(kvm, c); 653 vgic_unregister_redist_iodev(vcpu); 654 } 655 } 656 657 return ret;
··· 586 if (!vgic_v3_check_base(kvm)) 587 return -EINVAL; 588 589 + rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset; 590 sgi_base = rd_base + SZ_64K; 591 592 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); ··· 614 mutex_lock(&kvm->slots_lock); 615 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, 616 SZ_64K, &sgi_dev->dev); 617 + if (ret) { 618 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 619 &rd_dev->dev); 620 + goto out; 621 + } 622 623 + vgic->vgic_redist_free_offset += 2 * SZ_64K; 624 + out: 625 + mutex_unlock(&kvm->slots_lock); 626 return ret; 627 } 628 ··· 644 645 if (ret) { 646 /* The current c failed, so we start with the previous one. */ 647 + mutex_lock(&kvm->slots_lock); 648 for (c--; c >= 0; c--) { 649 vcpu = kvm_get_vcpu(kvm, c); 650 vgic_unregister_redist_iodev(vcpu); 651 } 652 + mutex_unlock(&kvm->slots_lock); 653 } 654 655 return ret;
+7
virt/kvm/arm/vgic/vgic-v2.c
··· 149 if (irq->hw) { 150 val |= GICH_LR_HW; 151 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; 152 } else { 153 if (irq->config == VGIC_CONFIG_LEVEL) 154 val |= GICH_LR_EOI;
··· 149 if (irq->hw) { 150 val |= GICH_LR_HW; 151 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; 152 + /* 153 + * Never set pending+active on a HW interrupt, as the 154 + * pending state is kept at the physical distributor 155 + * level. 156 + */ 157 + if (irq->active && irq_is_pending(irq)) 158 + val &= ~GICH_LR_PENDING_BIT; 159 } else { 160 if (irq->config == VGIC_CONFIG_LEVEL) 161 val |= GICH_LR_EOI;
+7
virt/kvm/arm/vgic/vgic-v3.c
··· 127 if (irq->hw) { 128 val |= ICH_LR_HW; 129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; 130 } else { 131 if (irq->config == VGIC_CONFIG_LEVEL) 132 val |= ICH_LR_EOI;
··· 127 if (irq->hw) { 128 val |= ICH_LR_HW; 129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; 130 + /* 131 + * Never set pending+active on a HW interrupt, as the 132 + * pending state is kept at the physical distributor 133 + * level. 134 + */ 135 + if (irq->active && irq_is_pending(irq)) 136 + val &= ~ICH_LR_PENDING_BIT; 137 } else { 138 if (irq->config == VGIC_CONFIG_LEVEL) 139 val |= ICH_LR_EOI;