Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
drivers/net/vxlan.c
drivers/vhost/net.c
include/linux/if_vlan.h
net/core/dev.c

The net/core/dev.c conflict was the overlap of one commit marking an
existing function static whilst another was adding a new function.

In the include/linux/if_vlan.h case, the type used for a local
variable was changed in 'net', whereas the function got rewritten
to fix a stacked vlan bug in 'net-next'.

In drivers/vhost/net.c, Al Viro's iov_iter conversions in 'net-next'
overlapped with an endainness fix for VHOST 1.0 in 'net'.

In drivers/net/vxlan.c, vxlan_find_vni() added a 'flags' parameter
in 'net-next' whereas in 'net' there was a bug fix to pass in the
correct network namespace pointer in calls to this function.

Signed-off-by: David S. Miller <davem@davemloft.net>

+2583 -1662
+1 -1
Documentation/devicetree/bindings/i2c/i2c-st.txt
··· 31 31 compatible = "st,comms-ssc4-i2c"; 32 32 reg = <0xfed40000 0x110>; 33 33 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; 34 - clocks = <&CLK_S_ICN_REG_0>; 34 + clocks = <&clk_s_a0_ls CLK_ICN_REG>; 35 35 clock-names = "ssc"; 36 36 clock-frequency = <400000>; 37 37 pinctrl-names = "default";
+1
Documentation/devicetree/bindings/i2c/trivial-devices.txt
··· 47 47 dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O 48 48 dallas,ds75 Digital Thermometer and Thermostat 49 49 dlg,da9053 DA9053: flexible system level PMIC with multicore support 50 + dlg,da9063 DA9063: system PMIC for quad-core application processors 50 51 epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE 51 52 epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE 52 53 fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
+11
MAINTAINERS
··· 708 708 F: drivers/staging/iio/*/ad* 709 709 F: staging/iio/trigger/iio-trig-bfin-timer.c 710 710 711 + ANDROID DRIVERS 712 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 713 + M: Arve Hj�nnev�g <arve@android.com> 714 + M: Riley Andrews <riandrews@android.com> 715 + T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git 716 + L: devel@driverdev.osuosl.org 717 + S: Supported 718 + F: drivers/android/ 719 + F: drivers/staging/android/ 720 + 711 721 AOA (Apple Onboard Audio) ALSA DRIVER 712 722 M: Johannes Berg <johannes@sipsolutions.net> 713 723 L: linuxppc-dev@lists.ozlabs.org ··· 10191 10181 M: "Hans J. Koch" <hjk@hansjkoch.de> 10192 10182 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 10193 10183 S: Maintained 10184 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git 10194 10185 F: Documentation/DocBook/uio-howto.tmpl 10195 10186 F: drivers/uio/ 10196 10187 F: include/linux/uio*.h
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 19 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Diseased Newt 6 6 7 7 # *DOCUMENTATION*
+2
arch/alpha/mm/fault.c
··· 156 156 if (unlikely(fault & VM_FAULT_ERROR)) { 157 157 if (fault & VM_FAULT_OOM) 158 158 goto out_of_memory; 159 + else if (fault & VM_FAULT_SIGSEGV) 160 + goto bad_area; 159 161 else if (fault & VM_FAULT_SIGBUS) 160 162 goto do_sigbus; 161 163 BUG();
+2
arch/arc/mm/fault.c
··· 161 161 162 162 if (fault & VM_FAULT_OOM) 163 163 goto out_of_memory; 164 + else if (fault & VM_FAULT_SIGSEGV) 165 + goto bad_area; 164 166 else if (fault & VM_FAULT_SIGBUS) 165 167 goto do_sigbus; 166 168
+30 -9
arch/arm/boot/compressed/head.S
··· 263 263 * OK... Let's do some funky business here. 264 264 * If we do have a DTB appended to zImage, and we do have 265 265 * an ATAG list around, we want the later to be translated 266 - * and folded into the former here. To be on the safe side, 267 - * let's temporarily move the stack away into the malloc 268 - * area. No GOT fixup has occurred yet, but none of the 269 - * code we're about to call uses any global variable. 266 + * and folded into the former here. No GOT fixup has occurred 267 + * yet, but none of the code we're about to call uses any 268 + * global variable. 270 269 */ 271 - add sp, sp, #0x10000 270 + 271 + /* Get the initial DTB size */ 272 + ldr r5, [r6, #4] 273 + #ifndef __ARMEB__ 274 + /* convert to little endian */ 275 + eor r1, r5, r5, ror #16 276 + bic r1, r1, #0x00ff0000 277 + mov r5, r5, ror #8 278 + eor r5, r5, r1, lsr #8 279 + #endif 280 + /* 50% DTB growth should be good enough */ 281 + add r5, r5, r5, lsr #1 282 + /* preserve 64-bit alignment */ 283 + add r5, r5, #7 284 + bic r5, r5, #7 285 + /* clamp to 32KB min and 1MB max */ 286 + cmp r5, #(1 << 15) 287 + movlo r5, #(1 << 15) 288 + cmp r5, #(1 << 20) 289 + movhi r5, #(1 << 20) 290 + /* temporarily relocate the stack past the DTB work space */ 291 + add sp, sp, r5 292 + 272 293 stmfd sp!, {r0-r3, ip, lr} 273 294 mov r0, r8 274 295 mov r1, r6 275 - sub r2, sp, r6 296 + mov r2, r5 276 297 bl atags_to_fdt 277 298 278 299 /* ··· 306 285 bic r0, r0, #1 307 286 add r0, r0, #0x100 308 287 mov r1, r6 309 - sub r2, sp, r6 288 + mov r2, r5 310 289 bleq atags_to_fdt 311 290 312 291 ldmfd sp!, {r0-r3, ip, lr} 313 - sub sp, sp, #0x10000 292 + sub sp, sp, r5 314 293 #endif 315 294 316 295 mov r8, r6 @ use the appended device tree ··· 327 306 subs r1, r5, r1 328 307 addhi r9, r9, r1 329 308 330 - /* Get the dtb's size */ 309 + /* Get the current DTB size */ 331 310 ldr r5, [r6, #4] 332 311 #ifndef __ARMEB__ 333 312 /* convert r5 (dtb size) to little endian */
+10 -10
arch/arm/boot/dts/sun4i-a10.dtsi
··· 17 17 18 18 aliases { 19 19 ethernet0 = &emac; 20 - serial0 = &uart0; 21 - serial1 = &uart1; 22 - serial2 = &uart2; 23 - serial3 = &uart3; 24 - serial4 = &uart4; 25 - serial5 = &uart5; 26 - serial6 = &uart6; 27 - serial7 = &uart7; 28 20 }; 29 21 30 22 chosen { ··· 29 37 allwinner,pipeline = "de_be0-lcd0-hdmi"; 30 38 clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>, 31 39 <&ahb_gates 44>; 40 + status = "disabled"; 41 + }; 42 + 43 + framebuffer@1 { 44 + compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; 45 + allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; 46 + clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>, 47 + <&ahb_gates 44>, <&ahb_gates 46>; 32 48 status = "disabled"; 33 49 }; 34 50 }; ··· 438 438 reg-names = "phy_ctrl", "pmu1", "pmu2"; 439 439 clocks = <&usb_clk 8>; 440 440 clock-names = "usb_phy"; 441 - resets = <&usb_clk 1>, <&usb_clk 2>; 442 - reset-names = "usb1_reset", "usb2_reset"; 441 + resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>; 442 + reset-names = "usb0_reset", "usb1_reset", "usb2_reset"; 443 443 status = "disabled"; 444 444 }; 445 445
+6
arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
··· 55 55 model = "Olimex A10s-Olinuxino Micro"; 56 56 compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; 57 57 58 + aliases { 59 + serial0 = &uart0; 60 + serial1 = &uart2; 61 + serial2 = &uart3; 62 + }; 63 + 58 64 soc@01c00000 { 59 65 emac: ethernet@01c0b000 { 60 66 pinctrl-names = "default";
+2 -6
arch/arm/boot/dts/sun5i-a10s.dtsi
··· 18 18 19 19 aliases { 20 20 ethernet0 = &emac; 21 - serial0 = &uart0; 22 - serial1 = &uart1; 23 - serial2 = &uart2; 24 - serial3 = &uart3; 25 21 }; 26 22 27 23 chosen { ··· 386 390 reg-names = "phy_ctrl", "pmu1"; 387 391 clocks = <&usb_clk 8>; 388 392 clock-names = "usb_phy"; 389 - resets = <&usb_clk 1>; 390 - reset-names = "usb1_reset"; 393 + resets = <&usb_clk 0>, <&usb_clk 1>; 394 + reset-names = "usb0_reset", "usb1_reset"; 391 395 status = "disabled"; 392 396 }; 393 397
+4
arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
··· 53 53 model = "HSG H702"; 54 54 compatible = "hsg,h702", "allwinner,sun5i-a13"; 55 55 56 + aliases { 57 + serial0 = &uart1; 58 + }; 59 + 56 60 soc@01c00000 { 57 61 mmc0: mmc@01c0f000 { 58 62 pinctrl-names = "default";
+4
arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
··· 54 54 model = "Olimex A13-Olinuxino Micro"; 55 55 compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; 56 56 57 + aliases { 58 + serial0 = &uart1; 59 + }; 60 + 57 61 soc@01c00000 { 58 62 mmc0: mmc@01c0f000 { 59 63 pinctrl-names = "default";
+4
arch/arm/boot/dts/sun5i-a13-olinuxino.dts
··· 55 55 model = "Olimex A13-Olinuxino"; 56 56 compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; 57 57 58 + aliases { 59 + serial0 = &uart1; 60 + }; 61 + 58 62 soc@01c00000 { 59 63 mmc0: mmc@01c0f000 { 60 64 pinctrl-names = "default";
+2 -7
arch/arm/boot/dts/sun5i-a13.dtsi
··· 16 16 / { 17 17 interrupt-parent = <&intc>; 18 18 19 - aliases { 20 - serial0 = &uart1; 21 - serial1 = &uart3; 22 - }; 23 - 24 19 cpus { 25 20 #address-cells = <1>; 26 21 #size-cells = <0>; ··· 344 349 reg-names = "phy_ctrl", "pmu1"; 345 350 clocks = <&usb_clk 8>; 346 351 clock-names = "usb_phy"; 347 - resets = <&usb_clk 1>; 348 - reset-names = "usb1_reset"; 352 + resets = <&usb_clk 0>, <&usb_clk 1>; 353 + reset-names = "usb0_reset", "usb1_reset"; 349 354 status = "disabled"; 350 355 }; 351 356
-6
arch/arm/boot/dts/sun6i-a31.dtsi
··· 53 53 interrupt-parent = <&gic>; 54 54 55 55 aliases { 56 - serial0 = &uart0; 57 - serial1 = &uart1; 58 - serial2 = &uart2; 59 - serial3 = &uart3; 60 - serial4 = &uart4; 61 - serial5 = &uart5; 62 56 ethernet0 = &gmac; 63 57 }; 64 58
+6
arch/arm/boot/dts/sun7i-a20-bananapi.dts
··· 55 55 model = "LeMaker Banana Pi"; 56 56 compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; 57 57 58 + aliases { 59 + serial0 = &uart0; 60 + serial1 = &uart3; 61 + serial2 = &uart7; 62 + }; 63 + 58 64 soc@01c00000 { 59 65 spi0: spi@01c05000 { 60 66 pinctrl-names = "default";
+8
arch/arm/boot/dts/sun7i-a20-hummingbird.dts
··· 19 19 model = "Merrii A20 Hummingbird"; 20 20 compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; 21 21 22 + aliases { 23 + serial0 = &uart0; 24 + serial1 = &uart2; 25 + serial2 = &uart3; 26 + serial3 = &uart4; 27 + serial4 = &uart5; 28 + }; 29 + 22 30 soc@01c00000 { 23 31 mmc0: mmc@01c0f000 { 24 32 pinctrl-names = "default";
+3
arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
··· 20 20 compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; 21 21 22 22 aliases { 23 + serial0 = &uart0; 24 + serial1 = &uart6; 25 + serial2 = &uart7; 23 26 spi0 = &spi1; 24 27 spi1 = &spi2; 25 28 };
-8
arch/arm/boot/dts/sun7i-a20.dtsi
··· 54 54 55 55 aliases { 56 56 ethernet0 = &gmac; 57 - serial0 = &uart0; 58 - serial1 = &uart1; 59 - serial2 = &uart2; 60 - serial3 = &uart3; 61 - serial4 = &uart4; 62 - serial5 = &uart5; 63 - serial6 = &uart6; 64 - serial7 = &uart7; 65 57 }; 66 58 67 59 chosen {
+4
arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
··· 55 55 model = "Ippo Q8H Dual Core Tablet (v5)"; 56 56 compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; 57 57 58 + aliases { 59 + serial0 = &r_uart; 60 + }; 61 + 58 62 chosen { 59 63 bootargs = "earlyprintk console=ttyS0,115200"; 60 64 };
-9
arch/arm/boot/dts/sun8i-a23.dtsi
··· 52 52 / { 53 53 interrupt-parent = <&gic>; 54 54 55 - aliases { 56 - serial0 = &uart0; 57 - serial1 = &uart1; 58 - serial2 = &uart2; 59 - serial3 = &uart3; 60 - serial4 = &uart4; 61 - serial5 = &r_uart; 62 - }; 63 - 64 55 cpus { 65 56 #address-cells = <1>; 66 57 #size-cells = <0>;
+5
arch/arm/boot/dts/sun9i-a80-optimus.dts
··· 54 54 model = "Merrii A80 Optimus Board"; 55 55 compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; 56 56 57 + aliases { 58 + serial0 = &uart0; 59 + serial1 = &uart4; 60 + }; 61 + 57 62 chosen { 58 63 bootargs = "earlyprintk console=ttyS0,115200"; 59 64 };
-10
arch/arm/boot/dts/sun9i-a80.dtsi
··· 52 52 / { 53 53 interrupt-parent = <&gic>; 54 54 55 - aliases { 56 - serial0 = &uart0; 57 - serial1 = &uart1; 58 - serial2 = &uart2; 59 - serial3 = &uart3; 60 - serial4 = &uart4; 61 - serial5 = &uart5; 62 - serial6 = &r_uart; 63 - }; 64 - 65 55 cpus { 66 56 #address-cells = <1>; 67 57 #size-cells = <0>;
+10
arch/arm/include/asm/kvm_emulate.h
··· 38 38 vcpu->arch.hcr = HCR_GUEST_MASK; 39 39 } 40 40 41 + static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) 42 + { 43 + return vcpu->arch.hcr; 44 + } 45 + 46 + static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) 47 + { 48 + vcpu->arch.hcr = hcr; 49 + } 50 + 41 51 static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) 42 52 { 43 53 return 1;
-3
arch/arm/include/asm/kvm_host.h
··· 125 125 * Anything that is not used directly from assembly code goes 126 126 * here. 127 127 */ 128 - /* dcache set/way operation pending */ 129 - int last_pcpu; 130 - cpumask_t require_dcache_flush; 131 128 132 129 /* Don't run the guest on this vcpu */ 133 130 bool pause;
+67 -10
arch/arm/include/asm/kvm_mmu.h
··· 44 44 45 45 #ifndef __ASSEMBLY__ 46 46 47 + #include <linux/highmem.h> 47 48 #include <asm/cacheflush.h> 48 49 #include <asm/pgalloc.h> 49 50 ··· 162 161 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; 163 162 } 164 163 165 - static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 166 - unsigned long size, 167 - bool ipa_uncached) 164 + static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, 165 + unsigned long size, 166 + bool ipa_uncached) 168 167 { 169 - if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 170 - kvm_flush_dcache_to_poc((void *)hva, size); 171 - 172 168 /* 173 169 * If we are going to insert an instruction page and the icache is 174 170 * either VIPT or PIPT, there is a potential problem where the host ··· 177 179 * 178 180 * VIVT caches are tagged using both the ASID and the VMID and doesn't 179 181 * need any kind of flushing (DDI 0406C.b - Page B3-1392). 182 + * 183 + * We need to do this through a kernel mapping (using the 184 + * user-space mapping has proved to be the wrong 185 + * solution). For that, we need to kmap one page at a time, 186 + * and iterate over the range. 180 187 */ 181 - if (icache_is_pipt()) { 182 - __cpuc_coherent_user_range(hva, hva + size); 183 - } else if (!icache_is_vivt_asid_tagged()) { 188 + 189 + bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; 190 + 191 + VM_BUG_ON(size & PAGE_MASK); 192 + 193 + if (!need_flush && !icache_is_pipt()) 194 + goto vipt_cache; 195 + 196 + while (size) { 197 + void *va = kmap_atomic_pfn(pfn); 198 + 199 + if (need_flush) 200 + kvm_flush_dcache_to_poc(va, PAGE_SIZE); 201 + 202 + if (icache_is_pipt()) 203 + __cpuc_coherent_user_range((unsigned long)va, 204 + (unsigned long)va + PAGE_SIZE); 205 + 206 + size -= PAGE_SIZE; 207 + pfn++; 208 + 209 + kunmap_atomic(va); 210 + } 211 + 212 + vipt_cache: 213 + if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { 184 214 /* any kind of VIPT cache */ 185 215 __flush_icache_all(); 186 216 } 187 217 } 188 218 219 + static inline void __kvm_flush_dcache_pte(pte_t pte) 220 + { 221 + void *va = kmap_atomic(pte_page(pte)); 222 + 223 + kvm_flush_dcache_to_poc(va, PAGE_SIZE); 224 + 225 + kunmap_atomic(va); 226 + } 227 + 228 + static inline void __kvm_flush_dcache_pmd(pmd_t pmd) 229 + { 230 + unsigned long size = PMD_SIZE; 231 + pfn_t pfn = pmd_pfn(pmd); 232 + 233 + while (size) { 234 + void *va = kmap_atomic_pfn(pfn); 235 + 236 + kvm_flush_dcache_to_poc(va, PAGE_SIZE); 237 + 238 + pfn++; 239 + size -= PAGE_SIZE; 240 + 241 + kunmap_atomic(va); 242 + } 243 + } 244 + 245 + static inline void __kvm_flush_dcache_pud(pud_t pud) 246 + { 247 + } 248 + 189 249 #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 190 250 191 - void stage2_flush_vm(struct kvm *kvm); 251 + void kvm_set_way_flush(struct kvm_vcpu *vcpu); 252 + void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 192 253 193 254 #endif /* !__ASSEMBLY__ */ 194 255
+2
arch/arm/kernel/entry-v7m.S
··· 22 22 23 23 __invalid_entry: 24 24 v7m_exception_entry 25 + #ifdef CONFIG_PRINTK 25 26 adr r0, strerr 26 27 mrs r1, ipsr 27 28 mov r2, lr 28 29 bl printk 30 + #endif 29 31 mov r0, sp 30 32 bl show_regs 31 33 1: b 1b
-10
arch/arm/kvm/arm.c
··· 281 281 vcpu->cpu = cpu; 282 282 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); 283 283 284 - /* 285 - * Check whether this vcpu requires the cache to be flushed on 286 - * this physical CPU. This is a consequence of doing dcache 287 - * operations by set/way on this vcpu. We do it here to be in 288 - * a non-preemptible section. 289 - */ 290 - if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) 291 - flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ 292 - 293 284 kvm_arm_set_running_vcpu(vcpu); 294 285 } 295 286 ··· 532 541 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); 533 542 534 543 vcpu->mode = OUTSIDE_GUEST_MODE; 535 - vcpu->arch.last_pcpu = smp_processor_id(); 536 544 kvm_guest_exit(); 537 545 trace_kvm_exit(*vcpu_pc(vcpu)); 538 546 /*
+14 -56
arch/arm/kvm/coproc.c
··· 189 189 return true; 190 190 } 191 191 192 - /* See note at ARM ARM B1.14.4 */ 192 + /* 193 + * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 194 + */ 193 195 static bool access_dcsw(struct kvm_vcpu *vcpu, 194 196 const struct coproc_params *p, 195 197 const struct coproc_reg *r) 196 198 { 197 - unsigned long val; 198 - int cpu; 199 - 200 199 if (!p->is_write) 201 200 return read_from_write_only(vcpu, p); 202 201 203 - cpu = get_cpu(); 204 - 205 - cpumask_setall(&vcpu->arch.require_dcache_flush); 206 - cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); 207 - 208 - /* If we were already preempted, take the long way around */ 209 - if (cpu != vcpu->arch.last_pcpu) { 210 - flush_cache_all(); 211 - goto done; 212 - } 213 - 214 - val = *vcpu_reg(vcpu, p->Rt1); 215 - 216 - switch (p->CRm) { 217 - case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ 218 - case 14: /* DCCISW */ 219 - asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); 220 - break; 221 - 222 - case 10: /* DCCSW */ 223 - asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); 224 - break; 225 - } 226 - 227 - done: 228 - put_cpu(); 229 - 202 + kvm_set_way_flush(vcpu); 230 203 return true; 231 204 } 232 205 233 206 /* 234 207 * Generic accessor for VM registers. Only called as long as HCR_TVM 235 - * is set. 208 + * is set. If the guest enables the MMU, we stop trapping the VM 209 + * sys_regs and leave it in complete control of the caches. 210 + * 211 + * Used by the cpu-specific code. 236 212 */ 237 - static bool access_vm_reg(struct kvm_vcpu *vcpu, 238 - const struct coproc_params *p, 239 - const struct coproc_reg *r) 213 + bool access_vm_reg(struct kvm_vcpu *vcpu, 214 + const struct coproc_params *p, 215 + const struct coproc_reg *r) 240 216 { 217 + bool was_enabled = vcpu_has_cache_enabled(vcpu); 218 + 241 219 BUG_ON(!p->is_write); 242 220 243 221 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); 244 222 if (p->is_64bit) 245 223 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); 246 224 247 - return true; 248 - } 249 - 250 - /* 251 - * SCTLR accessor. Only called as long as HCR_TVM is set. If the 252 - * guest enables the MMU, we stop trapping the VM sys_regs and leave 253 - * it in complete control of the caches. 254 - * 255 - * Used by the cpu-specific code. 256 - */ 257 - bool access_sctlr(struct kvm_vcpu *vcpu, 258 - const struct coproc_params *p, 259 - const struct coproc_reg *r) 260 - { 261 - access_vm_reg(vcpu, p, r); 262 - 263 - if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ 264 - vcpu->arch.hcr &= ~HCR_TVM; 265 - stage2_flush_vm(vcpu->kvm); 266 - } 267 - 225 + kvm_toggle_cache(vcpu, was_enabled); 268 226 return true; 269 227 } 270 228
+3 -3
arch/arm/kvm/coproc.h
··· 153 153 #define is64 .is_64 = true 154 154 #define is32 .is_64 = false 155 155 156 - bool access_sctlr(struct kvm_vcpu *vcpu, 157 - const struct coproc_params *p, 158 - const struct coproc_reg *r); 156 + bool access_vm_reg(struct kvm_vcpu *vcpu, 157 + const struct coproc_params *p, 158 + const struct coproc_reg *r); 159 159 160 160 #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
+1 -1
arch/arm/kvm/coproc_a15.c
··· 34 34 static const struct coproc_reg a15_regs[] = { 35 35 /* SCTLR: swapped by interrupt.S. */ 36 36 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, 37 - access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, 37 + access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 }, 38 38 }; 39 39 40 40 static struct kvm_coproc_target_table a15_target_table = {
+1 -1
arch/arm/kvm/coproc_a7.c
··· 37 37 static const struct coproc_reg a7_regs[] = { 38 38 /* SCTLR: swapped by interrupt.S. */ 39 39 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, 40 - access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, 40 + access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 }, 41 41 }; 42 42 43 43 static struct kvm_coproc_target_table a7_target_table = {
+144 -20
arch/arm/kvm/mmu.c
··· 58 58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); 59 59 } 60 60 61 + /* 62 + * D-Cache management functions. They take the page table entries by 63 + * value, as they are flushing the cache using the kernel mapping (or 64 + * kmap on 32bit). 65 + */ 66 + static void kvm_flush_dcache_pte(pte_t pte) 67 + { 68 + __kvm_flush_dcache_pte(pte); 69 + } 70 + 71 + static void kvm_flush_dcache_pmd(pmd_t pmd) 72 + { 73 + __kvm_flush_dcache_pmd(pmd); 74 + } 75 + 76 + static void kvm_flush_dcache_pud(pud_t pud) 77 + { 78 + __kvm_flush_dcache_pud(pud); 79 + } 80 + 61 81 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 62 82 int min, int max) 63 83 { ··· 139 119 put_page(virt_to_page(pmd)); 140 120 } 141 121 122 + /* 123 + * Unmapping vs dcache management: 124 + * 125 + * If a guest maps certain memory pages as uncached, all writes will 126 + * bypass the data cache and go directly to RAM. However, the CPUs 127 + * can still speculate reads (not writes) and fill cache lines with 128 + * data. 129 + * 130 + * Those cache lines will be *clean* cache lines though, so a 131 + * clean+invalidate operation is equivalent to an invalidate 132 + * operation, because no cache lines are marked dirty. 133 + * 134 + * Those clean cache lines could be filled prior to an uncached write 135 + * by the guest, and the cache coherent IO subsystem would therefore 136 + * end up writing old data to disk. 137 + * 138 + * This is why right after unmapping a page/section and invalidating 139 + * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure 140 + * the IO subsystem will never hit in the cache. 141 + */ 142 142 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, 143 143 phys_addr_t addr, phys_addr_t end) 144 144 { ··· 168 128 start_pte = pte = pte_offset_kernel(pmd, addr); 169 129 do { 170 130 if (!pte_none(*pte)) { 131 + pte_t old_pte = *pte; 132 + 171 133 kvm_set_pte(pte, __pte(0)); 172 - put_page(virt_to_page(pte)); 173 134 kvm_tlb_flush_vmid_ipa(kvm, addr); 135 + 136 + /* No need to invalidate the cache for device mappings */ 137 + if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 138 + kvm_flush_dcache_pte(old_pte); 139 + 140 + put_page(virt_to_page(pte)); 174 141 } 175 142 } while (pte++, addr += PAGE_SIZE, addr != end); 176 143 ··· 196 149 next = kvm_pmd_addr_end(addr, end); 197 150 if (!pmd_none(*pmd)) { 198 151 if (kvm_pmd_huge(*pmd)) { 152 + pmd_t old_pmd = *pmd; 153 + 199 154 pmd_clear(pmd); 200 155 kvm_tlb_flush_vmid_ipa(kvm, addr); 156 + 157 + kvm_flush_dcache_pmd(old_pmd); 158 + 201 159 put_page(virt_to_page(pmd)); 202 160 } else { 203 161 unmap_ptes(kvm, pmd, addr, next); ··· 225 173 next = kvm_pud_addr_end(addr, end); 226 174 if (!pud_none(*pud)) { 227 175 if (pud_huge(*pud)) { 176 + pud_t old_pud = *pud; 177 + 228 178 pud_clear(pud); 229 179 kvm_tlb_flush_vmid_ipa(kvm, addr); 180 + 181 + kvm_flush_dcache_pud(old_pud); 182 + 230 183 put_page(virt_to_page(pud)); 231 184 } else { 232 185 unmap_pmds(kvm, pud, addr, next); ··· 266 209 267 210 pte = pte_offset_kernel(pmd, addr); 268 211 do { 269 - if (!pte_none(*pte)) { 270 - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 271 - kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); 272 - } 212 + if (!pte_none(*pte) && 213 + (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 214 + kvm_flush_dcache_pte(*pte); 273 215 } while (pte++, addr += PAGE_SIZE, addr != end); 274 216 } 275 217 ··· 282 226 do { 283 227 next = kvm_pmd_addr_end(addr, end); 284 228 if (!pmd_none(*pmd)) { 285 - if (kvm_pmd_huge(*pmd)) { 286 - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 287 - kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); 288 - } else { 229 + if (kvm_pmd_huge(*pmd)) 230 + kvm_flush_dcache_pmd(*pmd); 231 + else 289 232 stage2_flush_ptes(kvm, pmd, addr, next); 290 - } 291 233 } 292 234 } while (pmd++, addr = next, addr != end); 293 235 } ··· 300 246 do { 301 247 next = kvm_pud_addr_end(addr, end); 302 248 if (!pud_none(*pud)) { 303 - if (pud_huge(*pud)) { 304 - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 305 - kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); 306 - } else { 249 + if (pud_huge(*pud)) 250 + kvm_flush_dcache_pud(*pud); 251 + else 307 252 stage2_flush_pmds(kvm, pud, addr, next); 308 - } 309 253 } 310 254 } while (pud++, addr = next, addr != end); 311 255 } ··· 330 278 * Go through the stage 2 page tables and invalidate any cache lines 331 279 * backing memory already mapped to the VM. 332 280 */ 333 - void stage2_flush_vm(struct kvm *kvm) 281 + static void stage2_flush_vm(struct kvm *kvm) 334 282 { 335 283 struct kvm_memslots *slots; 336 284 struct kvm_memory_slot *memslot; ··· 957 905 return !pfn_valid(pfn); 958 906 } 959 907 908 + static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, 909 + unsigned long size, bool uncached) 910 + { 911 + __coherent_cache_guest_page(vcpu, pfn, size, uncached); 912 + } 913 + 960 914 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 961 915 struct kvm_memory_slot *memslot, unsigned long hva, 962 916 unsigned long fault_status) ··· 1052 994 kvm_set_s2pmd_writable(&new_pmd); 1053 995 kvm_set_pfn_dirty(pfn); 1054 996 } 1055 - coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, 1056 - fault_ipa_uncached); 997 + coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); 1057 998 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 1058 999 } else { 1059 1000 pte_t new_pte = pfn_pte(pfn, mem_type); ··· 1060 1003 kvm_set_s2pte_writable(&new_pte); 1061 1004 kvm_set_pfn_dirty(pfn); 1062 1005 } 1063 - coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, 1064 - fault_ipa_uncached); 1006 + coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); 1065 1007 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, 1066 1008 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); 1067 1009 } ··· 1466 1410 spin_lock(&kvm->mmu_lock); 1467 1411 unmap_stage2_range(kvm, gpa, size); 1468 1412 spin_unlock(&kvm->mmu_lock); 1413 + } 1414 + 1415 + /* 1416 + * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 1417 + * 1418 + * Main problems: 1419 + * - S/W ops are local to a CPU (not broadcast) 1420 + * - We have line migration behind our back (speculation) 1421 + * - System caches don't support S/W at all (damn!) 1422 + * 1423 + * In the face of the above, the best we can do is to try and convert 1424 + * S/W ops to VA ops. Because the guest is not allowed to infer the 1425 + * S/W to PA mapping, it can only use S/W to nuke the whole cache, 1426 + * which is a rather good thing for us. 1427 + * 1428 + * Also, it is only used when turning caches on/off ("The expected 1429 + * usage of the cache maintenance instructions that operate by set/way 1430 + * is associated with the cache maintenance instructions associated 1431 + * with the powerdown and powerup of caches, if this is required by 1432 + * the implementation."). 1433 + * 1434 + * We use the following policy: 1435 + * 1436 + * - If we trap a S/W operation, we enable VM trapping to detect 1437 + * caches being turned on/off, and do a full clean. 1438 + * 1439 + * - We flush the caches on both caches being turned on and off. 1440 + * 1441 + * - Once the caches are enabled, we stop trapping VM ops. 1442 + */ 1443 + void kvm_set_way_flush(struct kvm_vcpu *vcpu) 1444 + { 1445 + unsigned long hcr = vcpu_get_hcr(vcpu); 1446 + 1447 + /* 1448 + * If this is the first time we do a S/W operation 1449 + * (i.e. HCR_TVM not set) flush the whole memory, and set the 1450 + * VM trapping. 1451 + * 1452 + * Otherwise, rely on the VM trapping to wait for the MMU + 1453 + * Caches to be turned off. At that point, we'll be able to 1454 + * clean the caches again. 1455 + */ 1456 + if (!(hcr & HCR_TVM)) { 1457 + trace_kvm_set_way_flush(*vcpu_pc(vcpu), 1458 + vcpu_has_cache_enabled(vcpu)); 1459 + stage2_flush_vm(vcpu->kvm); 1460 + vcpu_set_hcr(vcpu, hcr | HCR_TVM); 1461 + } 1462 + } 1463 + 1464 + void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) 1465 + { 1466 + bool now_enabled = vcpu_has_cache_enabled(vcpu); 1467 + 1468 + /* 1469 + * If switching the MMU+caches on, need to invalidate the caches. 1470 + * If switching it off, need to clean the caches. 1471 + * Clean + invalidate does the trick always. 1472 + */ 1473 + if (now_enabled != was_enabled) 1474 + stage2_flush_vm(vcpu->kvm); 1475 + 1476 + /* Caches are now on, stop trapping VM ops (until a S/W op) */ 1477 + if (now_enabled) 1478 + vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); 1479 + 1480 + trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); 1469 1481 }
+39
arch/arm/kvm/trace.h
··· 223 223 __entry->vcpu_pc, __entry->r0, __entry->imm) 224 224 ); 225 225 226 + TRACE_EVENT(kvm_set_way_flush, 227 + TP_PROTO(unsigned long vcpu_pc, bool cache), 228 + TP_ARGS(vcpu_pc, cache), 229 + 230 + TP_STRUCT__entry( 231 + __field( unsigned long, vcpu_pc ) 232 + __field( bool, cache ) 233 + ), 234 + 235 + TP_fast_assign( 236 + __entry->vcpu_pc = vcpu_pc; 237 + __entry->cache = cache; 238 + ), 239 + 240 + TP_printk("S/W flush at 0x%016lx (cache %s)", 241 + __entry->vcpu_pc, __entry->cache ? "on" : "off") 242 + ); 243 + 244 + TRACE_EVENT(kvm_toggle_cache, 245 + TP_PROTO(unsigned long vcpu_pc, bool was, bool now), 246 + TP_ARGS(vcpu_pc, was, now), 247 + 248 + TP_STRUCT__entry( 249 + __field( unsigned long, vcpu_pc ) 250 + __field( bool, was ) 251 + __field( bool, now ) 252 + ), 253 + 254 + TP_fast_assign( 255 + __entry->vcpu_pc = vcpu_pc; 256 + __entry->was = was; 257 + __entry->now = now; 258 + ), 259 + 260 + TP_printk("VM op at 0x%016lx (cache was %s, now %s)", 261 + __entry->vcpu_pc, __entry->was ? "on" : "off", 262 + __entry->now ? "on" : "off") 263 + ); 264 + 226 265 #endif /* _TRACE_KVM_H */ 227 266 228 267 #undef TRACE_INCLUDE_PATH
+7
arch/arm/mach-mvebu/coherency.c
··· 190 190 arch_ioremap_caller = armada_pcie_wa_ioremap_caller; 191 191 192 192 /* 193 + * We should switch the PL310 to I/O coherency mode only if 194 + * I/O coherency is actually enabled. 195 + */ 196 + if (!coherency_available()) 197 + return; 198 + 199 + /* 193 200 * Add the PL310 property "arm,io-coherent". This makes sure the 194 201 * outer sync operation is not used, which allows to 195 202 * workaround the system erratum that causes deadlocks when
+20
arch/arm/mach-shmobile/board-ape6evm.c
··· 18 18 #include <linux/gpio_keys.h> 19 19 #include <linux/input.h> 20 20 #include <linux/interrupt.h> 21 + #include <linux/irqchip.h> 22 + #include <linux/irqchip/arm-gic.h> 21 23 #include <linux/kernel.h> 22 24 #include <linux/mfd/tmio.h> 23 25 #include <linux/mmc/host.h> ··· 275 273 sizeof(ape6evm_leds_pdata)); 276 274 } 277 275 276 + static void __init ape6evm_legacy_init_time(void) 277 + { 278 + /* Do not invoke DT-based timers via clocksource_of_init() */ 279 + } 280 + 281 + static void __init ape6evm_legacy_init_irq(void) 282 + { 283 + void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); 284 + void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); 285 + 286 + gic_init(0, 29, gic_dist_base, gic_cpu_base); 287 + 288 + /* Do not invoke DT-based interrupt code via irqchip_init() */ 289 + } 290 + 291 + 278 292 static const char *ape6evm_boards_compat_dt[] __initdata = { 279 293 "renesas,ape6evm", 280 294 NULL, ··· 298 280 299 281 DT_MACHINE_START(APE6EVM_DT, "ape6evm") 300 282 .init_early = shmobile_init_delay, 283 + .init_irq = ape6evm_legacy_init_irq, 301 284 .init_machine = ape6evm_add_standard_devices, 302 285 .init_late = shmobile_init_late, 303 286 .dt_compat = ape6evm_boards_compat_dt, 287 + .init_time = ape6evm_legacy_init_time, 304 288 MACHINE_END
+13
arch/arm/mach-shmobile/board-lager.c
··· 21 21 #include <linux/input.h> 22 22 #include <linux/interrupt.h> 23 23 #include <linux/irq.h> 24 + #include <linux/irqchip.h> 25 + #include <linux/irqchip/arm-gic.h> 24 26 #include <linux/kernel.h> 25 27 #include <linux/leds.h> 26 28 #include <linux/mfd/tmio.h> ··· 813 811 lager_ksz8041_fixup); 814 812 } 815 813 814 + static void __init lager_legacy_init_irq(void) 815 + { 816 + void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); 817 + void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); 818 + 819 + gic_init(0, 29, gic_dist_base, gic_cpu_base); 820 + 821 + /* Do not invoke DT-based interrupt code via irqchip_init() */ 822 + } 823 + 816 824 static const char * const lager_boards_compat_dt[] __initconst = { 817 825 "renesas,lager", 818 826 NULL, ··· 831 819 DT_MACHINE_START(LAGER_DT, "lager") 832 820 .smp = smp_ops(r8a7790_smp_ops), 833 821 .init_early = shmobile_init_delay, 822 + .init_irq = lager_legacy_init_irq, 834 823 .init_time = rcar_gen2_timer_init, 835 824 .init_machine = lager_init, 836 825 .init_late = shmobile_init_late,
+2
arch/arm/mach-shmobile/setup-rcar-gen2.c
··· 133 133 #ifdef CONFIG_COMMON_CLK 134 134 rcar_gen2_clocks_init(mode); 135 135 #endif 136 + #ifdef CONFIG_ARCH_SHMOBILE_MULTI 136 137 clocksource_of_init(); 138 + #endif 137 139 } 138 140 139 141 struct memory_reserve_config {
+12
arch/arm/mach-shmobile/timer.c
··· 70 70 if (!max_freq) 71 71 return; 72 72 73 + #ifdef CONFIG_ARCH_SHMOBILE_LEGACY 74 + /* Non-multiplatform r8a73a4 SoC cannot use arch timer due 75 + * to GIC being initialized from C and arch timer via DT */ 76 + if (of_machine_is_compatible("renesas,r8a73a4")) 77 + has_arch_timer = false; 78 + 79 + /* Non-multiplatform r8a7790 SoC cannot use arch timer due 80 + * to GIC being initialized from C and arch timer via DT */ 81 + if (of_machine_is_compatible("renesas,r8a7790")) 82 + has_arch_timer = false; 83 + #endif 84 + 73 85 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { 74 86 if (is_a7_a8_a9) 75 87 shmobile_setup_delay_hz(max_freq, 1, 3);
+1
arch/arm/mm/Kconfig
··· 1012 1012 1013 1013 config ARM_KERNMEM_PERMS 1014 1014 bool "Restrict kernel memory permissions" 1015 + depends on MMU 1015 1016 help 1016 1017 If this is set, kernel memory other than kernel text (and rodata) 1017 1018 will be made non-executable. The tradeoff is that each region is
+11 -15
arch/arm/mm/context.c
··· 144 144 /* Update the list of reserved ASIDs and the ASID bitmap. */ 145 145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 146 146 for_each_possible_cpu(i) { 147 - if (i == cpu) { 148 - asid = 0; 149 - } else { 150 - asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 151 - /* 152 - * If this CPU has already been through a 153 - * rollover, but hasn't run another task in 154 - * the meantime, we must preserve its reserved 155 - * ASID, as this is the only trace we have of 156 - * the process it is still running. 157 - */ 158 - if (asid == 0) 159 - asid = per_cpu(reserved_asids, i); 160 - __set_bit(asid & ~ASID_MASK, asid_map); 161 - } 147 + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 148 + /* 149 + * If this CPU has already been through a 150 + * rollover, but hasn't run another task in 151 + * the meantime, we must preserve its reserved 152 + * ASID, as this is the only trace we have of 153 + * the process it is still running. 154 + */ 155 + if (asid == 0) 156 + asid = per_cpu(reserved_asids, i); 157 + __set_bit(asid & ~ASID_MASK, asid_map); 162 158 per_cpu(reserved_asids, i) = asid; 163 159 } 164 160
+44 -18
arch/arm/mm/dma-mapping.c
··· 1940 1940 } 1941 1941 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 1942 1942 1943 - /** 1944 - * arm_iommu_attach_device 1945 - * @dev: valid struct device pointer 1946 - * @mapping: io address space mapping structure (returned from 1947 - * arm_iommu_create_mapping) 1948 - * 1949 - * Attaches specified io address space mapping to the provided device, 1950 - * More than one client might be attached to the same io address space 1951 - * mapping. 1952 - */ 1953 - int arm_iommu_attach_device(struct device *dev, 1954 - struct dma_iommu_mapping *mapping) 1943 + static int __arm_iommu_attach_device(struct device *dev, 1944 + struct dma_iommu_mapping *mapping) 1955 1945 { 1956 1946 int err; 1957 1947 ··· 1955 1965 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1956 1966 return 0; 1957 1967 } 1958 - EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1959 1968 1960 1969 /** 1961 - * arm_iommu_detach_device 1970 + * arm_iommu_attach_device 1962 1971 * @dev: valid struct device pointer 1972 + * @mapping: io address space mapping structure (returned from 1973 + * arm_iommu_create_mapping) 1963 1974 * 1964 - * Detaches the provided device from a previously attached map. 1975 + * Attaches specified io address space mapping to the provided device. 1976 + * This replaces the dma operations (dma_map_ops pointer) with the 1977 + * IOMMU aware version. 1978 + * 1979 + * More than one client might be attached to the same io address space 1980 + * mapping. 1965 1981 */ 1966 - void arm_iommu_detach_device(struct device *dev) 1982 + int arm_iommu_attach_device(struct device *dev, 1983 + struct dma_iommu_mapping *mapping) 1984 + { 1985 + int err; 1986 + 1987 + err = __arm_iommu_attach_device(dev, mapping); 1988 + if (err) 1989 + return err; 1990 + 1991 + set_dma_ops(dev, &iommu_ops); 1992 + return 0; 1993 + } 1994 + EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1995 + 1996 + static void __arm_iommu_detach_device(struct device *dev) 1967 1997 { 1968 1998 struct dma_iommu_mapping *mapping; 1969 1999 ··· 1998 1988 dev->archdata.mapping = NULL; 1999 1989 2000 1990 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 1991 + } 1992 + 1993 + /** 1994 + * arm_iommu_detach_device 1995 + * @dev: valid struct device pointer 1996 + * 1997 + * Detaches the provided device from a previously attached map. 1998 + * This voids the dma operations (dma_map_ops pointer) 1999 + */ 2000 + void arm_iommu_detach_device(struct device *dev) 2001 + { 2002 + __arm_iommu_detach_device(dev); 2003 + set_dma_ops(dev, NULL); 2001 2004 } 2002 2005 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2003 2006 ··· 2034 2011 return false; 2035 2012 } 2036 2013 2037 - if (arm_iommu_attach_device(dev, mapping)) { 2014 + if (__arm_iommu_attach_device(dev, mapping)) { 2038 2015 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 2039 2016 dev_name(dev)); 2040 2017 arm_iommu_release_mapping(mapping); ··· 2048 2025 { 2049 2026 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 2050 2027 2051 - arm_iommu_detach_device(dev); 2028 + if (!mapping) 2029 + return; 2030 + 2031 + __arm_iommu_detach_device(dev); 2052 2032 arm_iommu_release_mapping(mapping); 2053 2033 } 2054 2034
+10
arch/arm64/include/asm/kvm_emulate.h
··· 45 45 vcpu->arch.hcr_el2 &= ~HCR_RW; 46 46 } 47 47 48 + static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) 49 + { 50 + return vcpu->arch.hcr_el2; 51 + } 52 + 53 + static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) 54 + { 55 + vcpu->arch.hcr_el2 = hcr; 56 + } 57 + 48 58 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 49 59 { 50 60 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
-3
arch/arm64/include/asm/kvm_host.h
··· 116 116 * Anything that is not used directly from assembly code goes 117 117 * here. 118 118 */ 119 - /* dcache set/way operation pending */ 120 - int last_pcpu; 121 - cpumask_t require_dcache_flush; 122 119 123 120 /* Don't run the guest */ 124 121 bool pause;
+28 -6
arch/arm64/include/asm/kvm_mmu.h
··· 243 243 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 244 244 } 245 245 246 - static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 247 - unsigned long size, 248 - bool ipa_uncached) 246 + static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, 247 + unsigned long size, 248 + bool ipa_uncached) 249 249 { 250 + void *va = page_address(pfn_to_page(pfn)); 251 + 250 252 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 251 - kvm_flush_dcache_to_poc((void *)hva, size); 253 + kvm_flush_dcache_to_poc(va, size); 252 254 253 255 if (!icache_is_aliasing()) { /* PIPT */ 254 - flush_icache_range(hva, hva + size); 256 + flush_icache_range((unsigned long)va, 257 + (unsigned long)va + size); 255 258 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 256 259 /* any kind of VIPT cache */ 257 260 __flush_icache_all(); 258 261 } 259 262 } 260 263 264 + static inline void __kvm_flush_dcache_pte(pte_t pte) 265 + { 266 + struct page *page = pte_page(pte); 267 + kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); 268 + } 269 + 270 + static inline void __kvm_flush_dcache_pmd(pmd_t pmd) 271 + { 272 + struct page *page = pmd_page(pmd); 273 + kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); 274 + } 275 + 276 + static inline void __kvm_flush_dcache_pud(pud_t pud) 277 + { 278 + struct page *page = pud_page(pud); 279 + kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); 280 + } 281 + 261 282 #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 262 283 263 - void stage2_flush_vm(struct kvm *kvm); 284 + void kvm_set_way_flush(struct kvm_vcpu *vcpu); 285 + void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 264 286 265 287 #endif /* __ASSEMBLY__ */ 266 288 #endif /* __ARM64_KVM_MMU_H__ */
+10 -65
arch/arm64/kvm/sys_regs.c
··· 69 69 return ccsidr; 70 70 } 71 71 72 - static void do_dc_cisw(u32 val) 73 - { 74 - asm volatile("dc cisw, %x0" : : "r" (val)); 75 - dsb(ish); 76 - } 77 - 78 - static void do_dc_csw(u32 val) 79 - { 80 - asm volatile("dc csw, %x0" : : "r" (val)); 81 - dsb(ish); 82 - } 83 - 84 - /* See note at ARM ARM B1.14.4 */ 72 + /* 73 + * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 74 + */ 85 75 static bool access_dcsw(struct kvm_vcpu *vcpu, 86 76 const struct sys_reg_params *p, 87 77 const struct sys_reg_desc *r) 88 78 { 89 - unsigned long val; 90 - int cpu; 91 - 92 79 if (!p->is_write) 93 80 return read_from_write_only(vcpu, p); 94 81 95 - cpu = get_cpu(); 96 - 97 - cpumask_setall(&vcpu->arch.require_dcache_flush); 98 - cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); 99 - 100 - /* If we were already preempted, take the long way around */ 101 - if (cpu != vcpu->arch.last_pcpu) { 102 - flush_cache_all(); 103 - goto done; 104 - } 105 - 106 - val = *vcpu_reg(vcpu, p->Rt); 107 - 108 - switch (p->CRm) { 109 - case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ 110 - case 14: /* DCCISW */ 111 - do_dc_cisw(val); 112 - break; 113 - 114 - case 10: /* DCCSW */ 115 - do_dc_csw(val); 116 - break; 117 - } 118 - 119 - done: 120 - put_cpu(); 121 - 82 + kvm_set_way_flush(vcpu); 122 83 return true; 123 84 } 124 85 125 86 /* 126 87 * Generic accessor for VM registers. Only called as long as HCR_TVM 127 - * is set. 88 + * is set. If the guest enables the MMU, we stop trapping the VM 89 + * sys_regs and leave it in complete control of the caches. 128 90 */ 129 91 static bool access_vm_reg(struct kvm_vcpu *vcpu, 130 92 const struct sys_reg_params *p, 131 93 const struct sys_reg_desc *r) 132 94 { 133 95 unsigned long val; 96 + bool was_enabled = vcpu_has_cache_enabled(vcpu); 134 97 135 98 BUG_ON(!p->is_write); 136 99 ··· 106 143 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 107 144 } 108 145 109 - return true; 110 - } 111 - 112 - /* 113 - * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the 114 - * guest enables the MMU, we stop trapping the VM sys_regs and leave 115 - * it in complete control of the caches. 116 - */ 117 - static bool access_sctlr(struct kvm_vcpu *vcpu, 118 - const struct sys_reg_params *p, 119 - const struct sys_reg_desc *r) 120 - { 121 - access_vm_reg(vcpu, p, r); 122 - 123 - if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ 124 - vcpu->arch.hcr_el2 &= ~HCR_TVM; 125 - stage2_flush_vm(vcpu->kvm); 126 - } 127 - 146 + kvm_toggle_cache(vcpu, was_enabled); 128 147 return true; 129 148 } 130 149 ··· 322 377 NULL, reset_mpidr, MPIDR_EL1 }, 323 378 /* SCTLR_EL1 */ 324 379 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 325 - access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, 380 + access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 326 381 /* CPACR_EL1 */ 327 382 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 328 383 NULL, reset_val, CPACR_EL1, 0 }, ··· 602 657 * register). 603 658 */ 604 659 static const struct sys_reg_desc cp15_regs[] = { 605 - { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, 660 + { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, 606 661 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 607 662 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 608 663 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
+2
arch/avr32/mm/fault.c
··· 142 142 if (unlikely(fault & VM_FAULT_ERROR)) { 143 143 if (fault & VM_FAULT_OOM) 144 144 goto out_of_memory; 145 + else if (fault & VM_FAULT_SIGSEGV) 146 + goto bad_area; 145 147 else if (fault & VM_FAULT_SIGBUS) 146 148 goto do_sigbus; 147 149 BUG();
+2
arch/cris/mm/fault.c
··· 176 176 if (unlikely(fault & VM_FAULT_ERROR)) { 177 177 if (fault & VM_FAULT_OOM) 178 178 goto out_of_memory; 179 + else if (fault & VM_FAULT_SIGSEGV) 180 + goto bad_area; 179 181 else if (fault & VM_FAULT_SIGBUS) 180 182 goto do_sigbus; 181 183 BUG();
+2
arch/frv/mm/fault.c
··· 168 168 if (unlikely(fault & VM_FAULT_ERROR)) { 169 169 if (fault & VM_FAULT_OOM) 170 170 goto out_of_memory; 171 + else if (fault & VM_FAULT_SIGSEGV) 172 + goto bad_area; 171 173 else if (fault & VM_FAULT_SIGBUS) 172 174 goto do_sigbus; 173 175 BUG();
+2
arch/ia64/mm/fault.c
··· 172 172 */ 173 173 if (fault & VM_FAULT_OOM) { 174 174 goto out_of_memory; 175 + } else if (fault & VM_FAULT_SIGSEGV) { 176 + goto bad_area; 175 177 } else if (fault & VM_FAULT_SIGBUS) { 176 178 signal = SIGBUS; 177 179 goto bad_area;
+2
arch/m32r/mm/fault.c
··· 200 200 if (unlikely(fault & VM_FAULT_ERROR)) { 201 201 if (fault & VM_FAULT_OOM) 202 202 goto out_of_memory; 203 + else if (fault & VM_FAULT_SIGSEGV) 204 + goto bad_area; 203 205 else if (fault & VM_FAULT_SIGBUS) 204 206 goto do_sigbus; 205 207 BUG();
+2
arch/m68k/mm/fault.c
··· 145 145 if (unlikely(fault & VM_FAULT_ERROR)) { 146 146 if (fault & VM_FAULT_OOM) 147 147 goto out_of_memory; 148 + else if (fault & VM_FAULT_SIGSEGV) 149 + goto map_err; 148 150 else if (fault & VM_FAULT_SIGBUS) 149 151 goto bus_err; 150 152 BUG();
+2
arch/metag/mm/fault.c
··· 141 141 if (unlikely(fault & VM_FAULT_ERROR)) { 142 142 if (fault & VM_FAULT_OOM) 143 143 goto out_of_memory; 144 + else if (fault & VM_FAULT_SIGSEGV) 145 + goto bad_area; 144 146 else if (fault & VM_FAULT_SIGBUS) 145 147 goto do_sigbus; 146 148 BUG();
+2
arch/microblaze/mm/fault.c
··· 224 224 if (unlikely(fault & VM_FAULT_ERROR)) { 225 225 if (fault & VM_FAULT_OOM) 226 226 goto out_of_memory; 227 + else if (fault & VM_FAULT_SIGSEGV) 228 + goto bad_area; 227 229 else if (fault & VM_FAULT_SIGBUS) 228 230 goto do_sigbus; 229 231 BUG();
+2
arch/mips/mm/fault.c
··· 158 158 if (unlikely(fault & VM_FAULT_ERROR)) { 159 159 if (fault & VM_FAULT_OOM) 160 160 goto out_of_memory; 161 + else if (fault & VM_FAULT_SIGSEGV) 162 + goto bad_area; 161 163 else if (fault & VM_FAULT_SIGBUS) 162 164 goto do_sigbus; 163 165 BUG();
+2
arch/mn10300/mm/fault.c
··· 262 262 if (unlikely(fault & VM_FAULT_ERROR)) { 263 263 if (fault & VM_FAULT_OOM) 264 264 goto out_of_memory; 265 + else if (fault & VM_FAULT_SIGSEGV) 266 + goto bad_area; 265 267 else if (fault & VM_FAULT_SIGBUS) 266 268 goto do_sigbus; 267 269 BUG();
+2
arch/nios2/mm/fault.c
··· 135 135 if (unlikely(fault & VM_FAULT_ERROR)) { 136 136 if (fault & VM_FAULT_OOM) 137 137 goto out_of_memory; 138 + else if (fault & VM_FAULT_SIGSEGV) 139 + goto bad_area; 138 140 else if (fault & VM_FAULT_SIGBUS) 139 141 goto do_sigbus; 140 142 BUG();
+2
arch/openrisc/mm/fault.c
··· 171 171 if (unlikely(fault & VM_FAULT_ERROR)) { 172 172 if (fault & VM_FAULT_OOM) 173 173 goto out_of_memory; 174 + else if (fault & VM_FAULT_SIGSEGV) 175 + goto bad_area; 174 176 else if (fault & VM_FAULT_SIGBUS) 175 177 goto do_sigbus; 176 178 BUG();
+2
arch/parisc/mm/fault.c
··· 256 256 */ 257 257 if (fault & VM_FAULT_OOM) 258 258 goto out_of_memory; 259 + else if (fault & VM_FAULT_SIGSEGV) 260 + goto bad_area; 259 261 else if (fault & VM_FAULT_SIGBUS) 260 262 goto bad_area; 261 263 BUG();
+1 -1
arch/powerpc/mm/copro_fault.c
··· 76 76 if (*flt & VM_FAULT_OOM) { 77 77 ret = -ENOMEM; 78 78 goto out_unlock; 79 - } else if (*flt & VM_FAULT_SIGBUS) { 79 + } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { 80 80 ret = -EFAULT; 81 81 goto out_unlock; 82 82 }
+2
arch/powerpc/mm/fault.c
··· 437 437 */ 438 438 fault = handle_mm_fault(mm, vma, address, flags); 439 439 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 440 + if (fault & VM_FAULT_SIGSEGV) 441 + goto bad_area; 440 442 rc = mm_fault_error(regs, address, fault); 441 443 if (rc >= MM_FAULT_RETURN) 442 444 goto bail;
+6
arch/s390/mm/fault.c
··· 374 374 do_no_context(regs); 375 375 else 376 376 pagefault_out_of_memory(); 377 + } else if (fault & VM_FAULT_SIGSEGV) { 378 + /* Kernel mode? Handle exceptions or die */ 379 + if (!user_mode(regs)) 380 + do_no_context(regs); 381 + else 382 + do_sigsegv(regs, SEGV_MAPERR); 377 383 } else if (fault & VM_FAULT_SIGBUS) { 378 384 /* Kernel mode? Handle exceptions or die */ 379 385 if (!user_mode(regs))
+2
arch/score/mm/fault.c
··· 114 114 if (unlikely(fault & VM_FAULT_ERROR)) { 115 115 if (fault & VM_FAULT_OOM) 116 116 goto out_of_memory; 117 + else if (fault & VM_FAULT_SIGSEGV) 118 + goto bad_area; 117 119 else if (fault & VM_FAULT_SIGBUS) 118 120 goto do_sigbus; 119 121 BUG();
+2
arch/sh/mm/fault.c
··· 353 353 } else { 354 354 if (fault & VM_FAULT_SIGBUS) 355 355 do_sigbus(regs, error_code, address); 356 + else if (fault & VM_FAULT_SIGSEGV) 357 + bad_area(regs, error_code, address); 356 358 else 357 359 BUG(); 358 360 }
+2
arch/sparc/mm/fault_32.c
··· 249 249 if (unlikely(fault & VM_FAULT_ERROR)) { 250 250 if (fault & VM_FAULT_OOM) 251 251 goto out_of_memory; 252 + else if (fault & VM_FAULT_SIGSEGV) 253 + goto bad_area; 252 254 else if (fault & VM_FAULT_SIGBUS) 253 255 goto do_sigbus; 254 256 BUG();
+2
arch/sparc/mm/fault_64.c
··· 446 446 if (unlikely(fault & VM_FAULT_ERROR)) { 447 447 if (fault & VM_FAULT_OOM) 448 448 goto out_of_memory; 449 + else if (fault & VM_FAULT_SIGSEGV) 450 + goto bad_area; 449 451 else if (fault & VM_FAULT_SIGBUS) 450 452 goto do_sigbus; 451 453 BUG();
+2
arch/tile/mm/fault.c
··· 442 442 if (unlikely(fault & VM_FAULT_ERROR)) { 443 443 if (fault & VM_FAULT_OOM) 444 444 goto out_of_memory; 445 + else if (fault & VM_FAULT_SIGSEGV) 446 + goto bad_area; 445 447 else if (fault & VM_FAULT_SIGBUS) 446 448 goto do_sigbus; 447 449 BUG();
+2
arch/um/kernel/trap.c
··· 80 80 if (unlikely(fault & VM_FAULT_ERROR)) { 81 81 if (fault & VM_FAULT_OOM) { 82 82 goto out_of_memory; 83 + } else if (fault & VM_FAULT_SIGSEGV) { 84 + goto out; 83 85 } else if (fault & VM_FAULT_SIGBUS) { 84 86 err = -EACCES; 85 87 goto out;
+1
arch/x86/kernel/cpu/perf_event_intel.c
··· 2431 2431 break; 2432 2432 2433 2433 case 55: /* 22nm Atom "Silvermont" */ 2434 + case 76: /* 14nm Atom "Airmont" */ 2434 2435 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ 2435 2436 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2436 2437 sizeof(hw_cache_event_ids));
+1 -1
arch/x86/kernel/cpu/perf_event_intel_rapl.c
··· 142 142 * or use ldexp(count, -32). 143 143 * Watts = Joules/Time delta 144 144 */ 145 - return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); 145 + return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit); 146 146 } 147 147 148 148 static u64 rapl_event_update(struct perf_event *event)
+2 -7
arch/x86/kernel/cpu/perf_event_intel_uncore.c
··· 840 840 box->phys_id = phys_id; 841 841 box->pci_dev = pdev; 842 842 box->pmu = pmu; 843 - uncore_box_init(box); 844 843 pci_set_drvdata(pdev, box); 845 844 846 845 raw_spin_lock(&uncore_box_lock); ··· 1003 1004 pmu = &type->pmus[j]; 1004 1005 box = *per_cpu_ptr(pmu->box, cpu); 1005 1006 /* called by uncore_cpu_init? */ 1006 - if (box && box->phys_id >= 0) { 1007 - uncore_box_init(box); 1007 + if (box && box->phys_id >= 0) 1008 1008 continue; 1009 - } 1010 1009 1011 1010 for_each_online_cpu(k) { 1012 1011 exist = *per_cpu_ptr(pmu->box, k); ··· 1020 1023 } 1021 1024 } 1022 1025 1023 - if (box) { 1026 + if (box) 1024 1027 box->phys_id = phys_id; 1025 - uncore_box_init(box); 1026 - } 1027 1028 } 1028 1029 } 1029 1030 return 0;
+10 -8
arch/x86/kernel/cpu/perf_event_intel_uncore.h
··· 257 257 return box->pmu->type->num_counters; 258 258 } 259 259 260 + static inline void uncore_box_init(struct intel_uncore_box *box) 261 + { 262 + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 263 + if (box->pmu->type->ops->init_box) 264 + box->pmu->type->ops->init_box(box); 265 + } 266 + } 267 + 260 268 static inline void uncore_disable_box(struct intel_uncore_box *box) 261 269 { 262 270 if (box->pmu->type->ops->disable_box) ··· 273 265 274 266 static inline void uncore_enable_box(struct intel_uncore_box *box) 275 267 { 268 + uncore_box_init(box); 269 + 276 270 if (box->pmu->type->ops->enable_box) 277 271 box->pmu->type->ops->enable_box(box); 278 272 } ··· 295 285 struct perf_event *event) 296 286 { 297 287 return box->pmu->type->ops->read_counter(box, event); 298 - } 299 - 300 - static inline void uncore_box_init(struct intel_uncore_box *box) 301 - { 302 - if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 303 - if (box->pmu->type->ops->init_box) 304 - box->pmu->type->ops->init_box(box); 305 - } 306 288 } 307 289 308 290 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
+3
arch/x86/kvm/lapic.c
··· 192 192 u16 cid, lid; 193 193 u32 ldr, aid; 194 194 195 + if (!kvm_apic_present(vcpu)) 196 + continue; 197 + 195 198 aid = kvm_apic_id(apic); 196 199 ldr = kvm_apic_get_reg(apic, APIC_LDR); 197 200 cid = apic_cluster_id(new, ldr);
+2
arch/x86/mm/fault.c
··· 898 898 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 899 899 VM_FAULT_HWPOISON_LARGE)) 900 900 do_sigbus(regs, error_code, address, fault); 901 + else if (fault & VM_FAULT_SIGSEGV) 902 + bad_area_nosemaphore(regs, error_code, address); 901 903 else 902 904 BUG(); 903 905 }
+16
arch/x86/pci/common.c
··· 448 448 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), 449 449 }, 450 450 }, 451 + { 452 + .callback = set_scan_all, 453 + .ident = "Stratus/NEC ftServer", 454 + .matches = { 455 + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), 456 + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"), 457 + }, 458 + }, 459 + { 460 + .callback = set_scan_all, 461 + .ident = "Stratus/NEC ftServer", 462 + .matches = { 463 + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), 464 + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"), 465 + }, 466 + }, 451 467 {} 452 468 }; 453 469
+2
arch/xtensa/mm/fault.c
··· 117 117 if (unlikely(fault & VM_FAULT_ERROR)) { 118 118 if (fault & VM_FAULT_OOM) 119 119 goto out_of_memory; 120 + else if (fault & VM_FAULT_SIGSEGV) 121 + goto bad_area; 120 122 else if (fault & VM_FAULT_SIGBUS) 121 123 goto do_sigbus; 122 124 BUG();
+2 -23
block/blk-mq-sysfs.c
··· 15 15 16 16 static void blk_mq_sysfs_release(struct kobject *kobj) 17 17 { 18 - struct request_queue *q; 19 - 20 - q = container_of(kobj, struct request_queue, mq_kobj); 21 - free_percpu(q->queue_ctx); 22 - } 23 - 24 - static void blk_mq_ctx_release(struct kobject *kobj) 25 - { 26 - struct blk_mq_ctx *ctx; 27 - 28 - ctx = container_of(kobj, struct blk_mq_ctx, kobj); 29 - kobject_put(&ctx->queue->mq_kobj); 30 - } 31 - 32 - static void blk_mq_hctx_release(struct kobject *kobj) 33 - { 34 - struct blk_mq_hw_ctx *hctx; 35 - 36 - hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 37 - kfree(hctx); 38 18 } 39 19 40 20 struct blk_mq_ctx_sysfs_entry { ··· 318 338 static struct kobj_type blk_mq_ctx_ktype = { 319 339 .sysfs_ops = &blk_mq_sysfs_ops, 320 340 .default_attrs = default_ctx_attrs, 321 - .release = blk_mq_ctx_release, 341 + .release = blk_mq_sysfs_release, 322 342 }; 323 343 324 344 static struct kobj_type blk_mq_hw_ktype = { 325 345 .sysfs_ops = &blk_mq_hw_sysfs_ops, 326 346 .default_attrs = default_hw_ctx_attrs, 327 - .release = blk_mq_hctx_release, 347 + .release = blk_mq_sysfs_release, 328 348 }; 329 349 330 350 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) ··· 355 375 return ret; 356 376 357 377 hctx_for_each_ctx(hctx, ctx, i) { 358 - kobject_get(&q->mq_kobj); 359 378 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 360 379 if (ret) 361 380 break;
+21 -2
block/blk-mq.c
··· 1867 1867 mutex_unlock(&set->tag_list_lock); 1868 1868 } 1869 1869 1870 + /* 1871 + * It is the actual release handler for mq, but we do it from 1872 + * request queue's release handler for avoiding use-after-free 1873 + * and headache because q->mq_kobj shouldn't have been introduced, 1874 + * but we can't group ctx/kctx kobj without it. 1875 + */ 1876 + void blk_mq_release(struct request_queue *q) 1877 + { 1878 + struct blk_mq_hw_ctx *hctx; 1879 + unsigned int i; 1880 + 1881 + /* hctx kobj stays in hctx */ 1882 + queue_for_each_hw_ctx(q, hctx, i) 1883 + kfree(hctx); 1884 + 1885 + kfree(q->queue_hw_ctx); 1886 + 1887 + /* ctx kobj stays in queue_ctx */ 1888 + free_percpu(q->queue_ctx); 1889 + } 1890 + 1870 1891 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1871 1892 { 1872 1893 struct blk_mq_hw_ctx **hctxs; ··· 2021 2000 2022 2001 percpu_ref_exit(&q->mq_usage_counter); 2023 2002 2024 - kfree(q->queue_hw_ctx); 2025 2003 kfree(q->mq_map); 2026 2004 2027 - q->queue_hw_ctx = NULL; 2028 2005 q->mq_map = NULL; 2029 2006 2030 2007 mutex_lock(&all_q_mutex);
+2
block/blk-mq.h
··· 62 62 63 63 extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 64 64 65 + void blk_mq_release(struct request_queue *q); 66 + 65 67 /* 66 68 * Basic implementation of sparser bitmap, allowing the user to spread 67 69 * the bits over more cachelines.
+2
block/blk-sysfs.c
··· 517 517 518 518 if (!q->mq_ops) 519 519 blk_free_flush_queue(q->fq); 520 + else 521 + blk_mq_release(q); 520 522 521 523 blk_trace_shutdown(q); 522 524
-2
drivers/Kconfig
··· 134 134 135 135 source "drivers/platform/Kconfig" 136 136 137 - source "drivers/soc/Kconfig" 138 - 139 137 source "drivers/clk/Kconfig" 140 138 141 139 source "drivers/hwspinlock/Kconfig"
+7 -28
drivers/acpi/acpi_lpss.c
··· 1 1 /* 2 2 * ACPI support for Intel Lynxpoint LPSS. 3 3 * 4 - * Copyright (C) 2013, 2014, Intel Corporation 4 + * Copyright (C) 2013, Intel Corporation 5 5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 6 6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 7 7 * ··· 60 60 #define LPSS_CLK_DIVIDER BIT(2) 61 61 #define LPSS_LTR BIT(3) 62 62 #define LPSS_SAVE_CTX BIT(4) 63 - #define LPSS_DEV_PROXY BIT(5) 64 - #define LPSS_PROXY_REQ BIT(6) 65 63 66 64 struct lpss_private_data; 67 65 ··· 70 72 void (*setup)(struct lpss_private_data *pdata); 71 73 }; 72 74 73 - static struct device *proxy_device; 74 - 75 75 static struct lpss_device_desc lpss_dma_desc = { 76 - .flags = LPSS_CLK | LPSS_PROXY_REQ, 76 + .flags = LPSS_CLK, 77 77 }; 78 78 79 79 struct lpss_private_data { ··· 146 150 }; 147 151 148 152 static struct lpss_device_desc byt_uart_dev_desc = { 149 - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 150 - LPSS_DEV_PROXY, 153 + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 151 154 .prv_offset = 0x800, 152 155 .setup = lpss_uart_setup, 153 156 }; 154 157 155 158 static struct lpss_device_desc byt_spi_dev_desc = { 156 - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | 157 - LPSS_DEV_PROXY, 159 + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 158 160 .prv_offset = 0x400, 159 161 }; 160 162 161 163 static struct lpss_device_desc byt_sdio_dev_desc = { 162 - .flags = LPSS_CLK | LPSS_DEV_PROXY, 164 + .flags = LPSS_CLK, 163 165 }; 164 166 165 167 static struct lpss_device_desc byt_i2c_dev_desc = { 166 - .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY, 168 + .flags = LPSS_CLK | LPSS_SAVE_CTX, 167 169 .prv_offset = 0x800, 168 170 .setup = byt_i2c_setup, 169 171 }; ··· 368 374 adev->driver_data = pdata; 369 375 pdev = acpi_create_platform_device(adev); 370 376 if (!IS_ERR_OR_NULL(pdev)) { 371 - if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY) 372 - proxy_device = &pdev->dev; 373 377 return 1; 374 378 } 375 379 ··· 592 600 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 593 601 acpi_lpss_save_ctx(dev, pdata); 594 602 595 - ret = acpi_dev_runtime_suspend(dev); 596 - if (ret) 597 - return ret; 598 - 599 - if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) 600 - return pm_runtime_put_sync_suspend(proxy_device); 601 - 602 - return 0; 603 + return acpi_dev_runtime_suspend(dev); 603 604 } 604 605 605 606 static int acpi_lpss_runtime_resume(struct device *dev) 606 607 { 607 608 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 608 609 int ret; 609 - 610 - if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) { 611 - ret = pm_runtime_get_sync(proxy_device); 612 - if (ret) 613 - return ret; 614 - } 615 610 616 611 ret = acpi_dev_runtime_resume(dev); 617 612 if (ret)
+7 -18
drivers/block/rbd.c
··· 2098 2098 * If an image has a non-zero parent overlap, get a reference to its 2099 2099 * parent. 2100 2100 * 2101 - * We must get the reference before checking for the overlap to 2102 - * coordinate properly with zeroing the parent overlap in 2103 - * rbd_dev_v2_parent_info() when an image gets flattened. We 2104 - * drop it again if there is no overlap. 2105 - * 2106 2101 * Returns true if the rbd device has a parent with a non-zero 2107 2102 * overlap and a reference for it was successfully taken, or 2108 2103 * false otherwise. 2109 2104 */ 2110 2105 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) 2111 2106 { 2112 - int counter; 2107 + int counter = 0; 2113 2108 2114 2109 if (!rbd_dev->parent_spec) 2115 2110 return false; 2116 2111 2117 - counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 2118 - if (counter > 0 && rbd_dev->parent_overlap) 2119 - return true; 2120 - 2121 - /* Image was flattened, but parent is not yet torn down */ 2112 + down_read(&rbd_dev->header_rwsem); 2113 + if (rbd_dev->parent_overlap) 2114 + counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 2115 + up_read(&rbd_dev->header_rwsem); 2122 2116 2123 2117 if (counter < 0) 2124 2118 rbd_warn(rbd_dev, "parent reference overflow"); 2125 2119 2126 - return false; 2120 + return counter > 0; 2127 2121 } 2128 2122 2129 2123 /* ··· 4233 4239 */ 4234 4240 if (rbd_dev->parent_overlap) { 4235 4241 rbd_dev->parent_overlap = 0; 4236 - smp_mb(); 4237 4242 rbd_dev_parent_put(rbd_dev); 4238 4243 pr_info("%s: clone image has been flattened\n", 4239 4244 rbd_dev->disk->disk_name); ··· 4278 4285 * treat it specially. 4279 4286 */ 4280 4287 rbd_dev->parent_overlap = overlap; 4281 - smp_mb(); 4282 4288 if (!overlap) { 4283 4289 4284 4290 /* A null parent_spec indicates it's the initial probe */ ··· 5106 5114 { 5107 5115 struct rbd_image_header *header; 5108 5116 5109 - /* Drop parent reference unless it's already been done (or none) */ 5110 - 5111 - if (rbd_dev->parent_overlap) 5112 - rbd_dev_parent_put(rbd_dev); 5117 + rbd_dev_parent_put(rbd_dev); 5113 5118 5114 5119 /* Free dynamic fields from the header, then zero it out */ 5115 5120
+10 -7
drivers/gpio/gpio-mcp23s08.c
··· 801 801 client->irq = irq_of_parse_and_map(client->dev.of_node, 0); 802 802 } else { 803 803 pdata = dev_get_platdata(&client->dev); 804 - if (!pdata || !gpio_is_valid(pdata->base)) { 805 - dev_dbg(&client->dev, "invalid platform data\n"); 806 - return -EINVAL; 804 + if (!pdata) { 805 + pdata = devm_kzalloc(&client->dev, 806 + sizeof(struct mcp23s08_platform_data), 807 + GFP_KERNEL); 808 + pdata->base = -1; 807 809 } 808 810 } 809 811 ··· 926 924 } else { 927 925 type = spi_get_device_id(spi)->driver_data; 928 926 pdata = dev_get_platdata(&spi->dev); 929 - if (!pdata || !gpio_is_valid(pdata->base)) { 930 - dev_dbg(&spi->dev, 931 - "invalid or missing platform data\n"); 932 - return -EINVAL; 927 + if (!pdata) { 928 + pdata = devm_kzalloc(&spi->dev, 929 + sizeof(struct mcp23s08_platform_data), 930 + GFP_KERNEL); 931 + pdata->base = -1; 933 932 } 934 933 935 934 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
+33 -6
drivers/gpio/gpio-omap.c
··· 88 88 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 89 89 #define LINE_USED(line, offset) (line & (BIT(offset))) 90 90 91 + static void omap_gpio_unmask_irq(struct irq_data *d); 92 + 91 93 static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 92 94 { 93 95 return bank->chip.base + gpio_irq; ··· 479 477 return readl_relaxed(reg) & mask; 480 478 } 481 479 480 + static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio, 481 + unsigned offset) 482 + { 483 + if (!LINE_USED(bank->mod_usage, offset)) { 484 + omap_enable_gpio_module(bank, offset); 485 + omap_set_gpio_direction(bank, offset, 1); 486 + } 487 + bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); 488 + } 489 + 482 490 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 483 491 { 484 492 struct gpio_bank *bank = omap_irq_data_get_bank(d); ··· 518 506 spin_lock_irqsave(&bank->lock, flags); 519 507 offset = GPIO_INDEX(bank, gpio); 520 508 retval = omap_set_gpio_triggering(bank, offset, type); 521 - if (!LINE_USED(bank->mod_usage, offset)) { 522 - omap_enable_gpio_module(bank, offset); 523 - omap_set_gpio_direction(bank, offset, 1); 524 - } else if (!omap_gpio_is_input(bank, BIT(offset))) { 509 + omap_gpio_init_irq(bank, gpio, offset); 510 + if (!omap_gpio_is_input(bank, BIT(offset))) { 525 511 spin_unlock_irqrestore(&bank->lock, flags); 526 512 return -EINVAL; 527 513 } 528 - 529 - bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); 530 514 spin_unlock_irqrestore(&bank->lock, flags); 531 515 532 516 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) ··· 798 790 if (!unmasked) 799 791 chained_irq_exit(irqchip, desc); 800 792 pm_runtime_put(bank->dev); 793 + } 794 + 795 + static unsigned int omap_gpio_irq_startup(struct irq_data *d) 796 + { 797 + struct gpio_bank *bank = omap_irq_data_get_bank(d); 798 + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); 799 + unsigned long flags; 800 + unsigned offset = GPIO_INDEX(bank, gpio); 801 + 802 + if (!BANK_USED(bank)) 803 + pm_runtime_get_sync(bank->dev); 804 + 805 + spin_lock_irqsave(&bank->lock, flags); 806 + omap_gpio_init_irq(bank, gpio, offset); 807 + spin_unlock_irqrestore(&bank->lock, flags); 808 + omap_gpio_unmask_irq(d); 809 + 810 + return 0; 801 811 } 802 812 803 813 static void omap_gpio_irq_shutdown(struct irq_data *d) ··· 1207 1181 if (!irqc) 1208 1182 return -ENOMEM; 1209 1183 1184 + irqc->irq_startup = omap_gpio_irq_startup, 1210 1185 irqc->irq_shutdown = omap_gpio_irq_shutdown, 1211 1186 irqc->irq_ack = omap_gpio_ack_irq, 1212 1187 irqc->irq_mask = omap_gpio_mask_irq,
+2 -1
drivers/gpio/gpiolib-sysfs.c
··· 648 648 if (tdev != NULL) { 649 649 status = sysfs_create_link(&dev->kobj, &tdev->kobj, 650 650 name); 651 + put_device(tdev); 651 652 } else { 652 653 status = -ENODEV; 653 654 } ··· 696 695 } 697 696 698 697 status = sysfs_set_active_low(desc, dev, value); 699 - 698 + put_device(dev); 700 699 unlock: 701 700 mutex_unlock(&sysfs_lock); 702 701
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 26 26 #include <linux/slab.h> 27 27 #include "kfd_priv.h" 28 28 #include "kfd_device_queue_manager.h" 29 + #include "kfd_pm4_headers.h" 29 30 30 31 #define MQD_SIZE_ALIGNED 768 31 32 ··· 170 169 kfd->shared_resources = *gpu_resources; 171 170 172 171 /* calculate max size of mqds needed for queues */ 173 - size = max_num_of_processes * 174 - max_num_of_queues_per_process * 175 - kfd->device_info->mqd_size_aligned; 172 + size = max_num_of_queues_per_device * 173 + kfd->device_info->mqd_size_aligned; 176 174 177 175 /* add another 512KB for all other allocations on gart */ 178 176 size += 512 * 1024;
+76 -2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 183 183 184 184 mutex_lock(&dqm->lock); 185 185 186 + if (dqm->total_queue_count >= max_num_of_queues_per_device) { 187 + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", 188 + dqm->total_queue_count); 189 + mutex_unlock(&dqm->lock); 190 + return -EPERM; 191 + } 192 + 186 193 if (list_empty(&qpd->queues_list)) { 187 194 retval = allocate_vmid(dqm, qpd, q); 188 195 if (retval != 0) { ··· 213 206 214 207 list_add(&q->list, &qpd->queues_list); 215 208 dqm->queue_count++; 209 + 210 + /* 211 + * Unconditionally increment this counter, regardless of the queue's 212 + * type or whether the queue is active. 213 + */ 214 + dqm->total_queue_count++; 215 + pr_debug("Total of %d queues are accountable so far\n", 216 + dqm->total_queue_count); 216 217 217 218 mutex_unlock(&dqm->lock); 218 219 return 0; ··· 341 326 if (list_empty(&qpd->queues_list)) 342 327 deallocate_vmid(dqm, qpd, q); 343 328 dqm->queue_count--; 329 + 330 + /* 331 + * Unconditionally decrement this counter, regardless of the queue's 332 + * type 333 + */ 334 + dqm->total_queue_count--; 335 + pr_debug("Total of %d queues are accountable so far\n", 336 + dqm->total_queue_count); 337 + 344 338 out: 345 339 mutex_unlock(&dqm->lock); 346 340 return retval; ··· 565 541 566 542 for (i = 0; i < pipes_num; i++) { 567 543 inx = i + first_pipe; 544 + /* 545 + * HPD buffer on GTT is allocated by amdkfd, no need to waste 546 + * space in GTT for pipelines we don't initialize 547 + */ 568 548 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; 569 549 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); 570 550 /* = log2(bytes/4)-1 */ 571 - kfd2kgd->init_pipeline(dqm->dev->kgd, i, 551 + kfd2kgd->init_pipeline(dqm->dev->kgd, inx, 572 552 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); 573 553 } 574 554 ··· 588 560 589 561 pr_debug("kfd: In %s\n", __func__); 590 562 591 - retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); 563 + retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); 592 564 if (retval != 0) 593 565 return retval; 594 566 ··· 780 752 pr_debug("kfd: In func %s\n", __func__); 781 753 782 754 mutex_lock(&dqm->lock); 755 + if (dqm->total_queue_count >= max_num_of_queues_per_device) { 756 + pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", 757 + dqm->total_queue_count); 758 + mutex_unlock(&dqm->lock); 759 + return -EPERM; 760 + } 761 + 762 + /* 763 + * Unconditionally increment this counter, regardless of the queue's 764 + * type or whether the queue is active. 765 + */ 766 + dqm->total_queue_count++; 767 + pr_debug("Total of %d queues are accountable so far\n", 768 + dqm->total_queue_count); 769 + 783 770 list_add(&kq->list, &qpd->priv_queue_list); 784 771 dqm->queue_count++; 785 772 qpd->is_debug = true; ··· 818 775 dqm->queue_count--; 819 776 qpd->is_debug = false; 820 777 execute_queues_cpsch(dqm, false); 778 + /* 779 + * Unconditionally decrement this counter, regardless of the queue's 780 + * type. 781 + */ 782 + dqm->total_queue_count--; 783 + pr_debug("Total of %d queues are accountable so far\n", 784 + dqm->total_queue_count); 821 785 mutex_unlock(&dqm->lock); 822 786 } 823 787 ··· 843 793 844 794 mutex_lock(&dqm->lock); 845 795 796 + if (dqm->total_queue_count >= max_num_of_queues_per_device) { 797 + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", 798 + dqm->total_queue_count); 799 + retval = -EPERM; 800 + goto out; 801 + } 802 + 846 803 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); 847 804 if (mqd == NULL) { 848 805 mutex_unlock(&dqm->lock); ··· 866 809 dqm->queue_count++; 867 810 retval = execute_queues_cpsch(dqm, false); 868 811 } 812 + 813 + /* 814 + * Unconditionally increment this counter, regardless of the queue's 815 + * type or whether the queue is active. 816 + */ 817 + dqm->total_queue_count++; 818 + 819 + pr_debug("Total of %d queues are accountable so far\n", 820 + dqm->total_queue_count); 869 821 870 822 out: 871 823 mutex_unlock(&dqm->lock); ··· 995 929 execute_queues_cpsch(dqm, false); 996 930 997 931 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 932 + 933 + /* 934 + * Unconditionally decrement this counter, regardless of the queue's 935 + * type 936 + */ 937 + dqm->total_queue_count--; 938 + pr_debug("Total of %d queues are accountable so far\n", 939 + dqm->total_queue_count); 998 940 999 941 mutex_unlock(&dqm->lock); 1000 942
+1
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
··· 130 130 struct list_head queues; 131 131 unsigned int processes_count; 132 132 unsigned int queue_count; 133 + unsigned int total_queue_count; 133 134 unsigned int next_pipe_to_allocate; 134 135 unsigned int *allocated_queues; 135 136 unsigned int vmid_bitmap;
+8 -19
drivers/gpu/drm/amd/amdkfd/kfd_module.c
··· 50 50 MODULE_PARM_DESC(sched_policy, 51 51 "Kernel cmdline parameter that defines the amdkfd scheduling policy"); 52 52 53 - int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; 54 - module_param(max_num_of_processes, int, 0444); 55 - MODULE_PARM_DESC(max_num_of_processes, 56 - "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); 57 - 58 - int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; 59 - module_param(max_num_of_queues_per_process, int, 0444); 60 - MODULE_PARM_DESC(max_num_of_queues_per_process, 61 - "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); 53 + int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; 54 + module_param(max_num_of_queues_per_device, int, 0444); 55 + MODULE_PARM_DESC(max_num_of_queues_per_device, 56 + "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); 62 57 63 58 bool kgd2kfd_init(unsigned interface_version, 64 59 const struct kfd2kgd_calls *f2g, ··· 95 100 } 96 101 97 102 /* Verify module parameters */ 98 - if ((max_num_of_processes < 0) || 99 - (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { 100 - pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); 101 - return -1; 102 - } 103 - 104 - if ((max_num_of_queues_per_process < 0) || 105 - (max_num_of_queues_per_process > 106 - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { 107 - pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); 103 + if ((max_num_of_queues_per_device < 1) || 104 + (max_num_of_queues_per_device > 105 + KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { 106 + pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); 108 107 return -1; 109 108 } 110 109
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
··· 30 30 31 31 int kfd_pasid_init(void) 32 32 { 33 - pasid_limit = max_num_of_processes; 33 + pasid_limit = KFD_MAX_NUM_OF_PROCESSES; 34 34 35 35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); 36 36 if (!pasid_bitmap)
+8 -9
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 52 52 #define kfd_alloc_struct(ptr_to_struct) \ 53 53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 54 54 55 - /* Kernel module parameter to specify maximum number of supported processes */ 56 - extern int max_num_of_processes; 57 - 58 - #define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 59 55 #define KFD_MAX_NUM_OF_PROCESSES 512 56 + #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 60 57 61 58 /* 62 - * Kernel module parameter to specify maximum number of supported queues 63 - * per process 59 + * Kernel module parameter to specify maximum number of supported queues per 60 + * device 64 61 */ 65 - extern int max_num_of_queues_per_process; 62 + extern int max_num_of_queues_per_device; 66 63 67 - #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 68 - #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 64 + #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 65 + #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 66 + (KFD_MAX_NUM_OF_PROCESSES * \ 67 + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 69 68 70 69 #define KFD_KERNEL_QUEUE_SIZE 2048 71 70
+13 -5
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 54 54 pr_debug("kfd: in %s\n", __func__); 55 55 56 56 found = find_first_zero_bit(pqm->queue_slot_bitmap, 57 - max_num_of_queues_per_process); 57 + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 58 58 59 59 pr_debug("kfd: the new slot id %lu\n", found); 60 60 61 - if (found >= max_num_of_queues_per_process) { 61 + if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { 62 62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n", 63 63 pqm->process->pasid); 64 64 return -ENOMEM; ··· 76 76 77 77 INIT_LIST_HEAD(&pqm->queues); 78 78 pqm->queue_slot_bitmap = 79 - kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, 79 + kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, 80 80 BITS_PER_BYTE), GFP_KERNEL); 81 81 if (pqm->queue_slot_bitmap == NULL) 82 82 return -ENOMEM; ··· 203 203 pqn->kq = NULL; 204 204 retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, 205 205 &q->properties.vmid); 206 + pr_debug("DQM returned %d for create_queue\n", retval); 206 207 print_queue(q); 207 208 break; 208 209 case KFD_QUEUE_TYPE_DIQ: ··· 223 222 } 224 223 225 224 if (retval != 0) { 226 - pr_err("kfd: error dqm create queue\n"); 225 + pr_debug("Error dqm create queue\n"); 227 226 goto err_create_queue; 228 227 } 229 228 ··· 242 241 err_create_queue: 243 242 kfree(pqn); 244 243 err_allocate_pqn: 244 + /* check if queues list is empty unregister process from device */ 245 245 clear_bit(*qid, pqm->queue_slot_bitmap); 246 + if (list_empty(&pqm->queues)) 247 + dev->dqm->unregister_process(dev->dqm, &pdd->qpd); 246 248 return retval; 247 249 } 248 250 ··· 315 311 BUG_ON(!pqm); 316 312 317 313 pqn = get_queue_by_qid(pqm, qid); 318 - BUG_ON(!pqn); 314 + if (!pqn) { 315 + pr_debug("amdkfd: No queue %d exists for update operation\n", 316 + qid); 317 + return -EFAULT; 318 + } 319 319 320 320 pqn->q->properties.queue_address = p->queue_address; 321 321 pqn->q->properties.queue_size = p->queue_size;
+3
drivers/gpu/drm/cirrus/cirrus_drv.c
··· 16 16 #include "cirrus_drv.h" 17 17 18 18 int cirrus_modeset = -1; 19 + int cirrus_bpp = 24; 19 20 20 21 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); 21 22 module_param_named(modeset, cirrus_modeset, int, 0400); 23 + MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)"); 24 + module_param_named(bpp, cirrus_bpp, int, 0400); 22 25 23 26 /* 24 27 * This is the generic driver code. This binds the driver to the drm core,
+3
drivers/gpu/drm/cirrus/cirrus_drv.h
··· 262 262 263 263 int cirrus_bo_push_sysram(struct cirrus_bo *bo); 264 264 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); 265 + 266 + extern int cirrus_bpp; 267 + 265 268 #endif /* __CIRRUS_DRV_H__ */
+2
drivers/gpu/drm/cirrus/cirrus_main.c
··· 320 320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ 321 321 const int max_size = cdev->mc.vram_size; 322 322 323 + if (bpp > cirrus_bpp) 324 + return false; 323 325 if (bpp > 32) 324 326 return false; 325 327
+7 -2
drivers/gpu/drm/cirrus/cirrus_mode.c
··· 501 501 int count; 502 502 503 503 /* Just add a static list of modes */ 504 - count = drm_add_modes_noedid(connector, 1280, 1024); 505 - drm_set_preferred_mode(connector, 1024, 768); 504 + if (cirrus_bpp <= 24) { 505 + count = drm_add_modes_noedid(connector, 1280, 1024); 506 + drm_set_preferred_mode(connector, 1024, 768); 507 + } else { 508 + count = drm_add_modes_noedid(connector, 800, 600); 509 + drm_set_preferred_mode(connector, 800, 600); 510 + } 506 511 return count; 507 512 } 508 513
+30
drivers/gpu/drm/drm_fb_helper.c
··· 145 145 } 146 146 EXPORT_SYMBOL(drm_fb_helper_add_one_connector); 147 147 148 + static void remove_from_modeset(struct drm_mode_set *set, 149 + struct drm_connector *connector) 150 + { 151 + int i, j; 152 + 153 + for (i = 0; i < set->num_connectors; i++) { 154 + if (set->connectors[i] == connector) 155 + break; 156 + } 157 + 158 + if (i == set->num_connectors) 159 + return; 160 + 161 + for (j = i + 1; j < set->num_connectors; j++) { 162 + set->connectors[j - 1] = set->connectors[j]; 163 + } 164 + set->num_connectors--; 165 + 166 + /* because i915 is pissy about this.. 167 + * TODO maybe need to makes sure we set it back to !=NULL somewhere? 168 + */ 169 + if (set->num_connectors == 0) 170 + set->fb = NULL; 171 + } 172 + 148 173 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 149 174 struct drm_connector *connector) 150 175 { ··· 192 167 } 193 168 fb_helper->connector_count--; 194 169 kfree(fb_helper_connector); 170 + 171 + /* also cleanup dangling references to the connector: */ 172 + for (i = 0; i < fb_helper->crtc_count; i++) 173 + remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); 174 + 195 175 return 0; 196 176 } 197 177 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
+42 -10
drivers/gpu/drm/i2c/tda998x_drv.c
··· 32 32 struct tda998x_priv { 33 33 struct i2c_client *cec; 34 34 struct i2c_client *hdmi; 35 + struct mutex mutex; 36 + struct delayed_work dwork; 35 37 uint16_t rev; 36 38 uint8_t current_page; 37 39 int dpms; ··· 404 402 uint8_t addr = REG2ADDR(reg); 405 403 int ret; 406 404 405 + mutex_lock(&priv->mutex); 407 406 ret = set_page(priv, reg); 408 407 if (ret < 0) 409 - return ret; 408 + goto out; 410 409 411 410 ret = i2c_master_send(client, &addr, sizeof(addr)); 412 411 if (ret < 0) ··· 417 414 if (ret < 0) 418 415 goto fail; 419 416 420 - return ret; 417 + goto out; 421 418 422 419 fail: 423 420 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); 421 + out: 422 + mutex_unlock(&priv->mutex); 424 423 return ret; 425 424 } 426 425 ··· 436 431 buf[0] = REG2ADDR(reg); 437 432 memcpy(&buf[1], p, cnt); 438 433 434 + mutex_lock(&priv->mutex); 439 435 ret = set_page(priv, reg); 440 436 if (ret < 0) 441 - return; 437 + goto out; 442 438 443 439 ret = i2c_master_send(client, buf, cnt + 1); 444 440 if (ret < 0) 445 441 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 442 + out: 443 + mutex_unlock(&priv->mutex); 446 444 } 447 445 448 446 static int ··· 467 459 uint8_t buf[] = {REG2ADDR(reg), val}; 468 460 int ret; 469 461 462 + mutex_lock(&priv->mutex); 470 463 ret = set_page(priv, reg); 471 464 if (ret < 0) 472 - return; 465 + goto out; 473 466 474 467 ret = i2c_master_send(client, buf, sizeof(buf)); 475 468 if (ret < 0) 476 469 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 470 + out: 471 + mutex_unlock(&priv->mutex); 477 472 } 478 473 479 474 static void ··· 486 475 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; 487 476 int ret; 488 477 478 + mutex_lock(&priv->mutex); 489 479 ret = set_page(priv, reg); 490 480 if (ret < 0) 491 - return; 481 + goto out; 492 482 493 483 ret = i2c_master_send(client, buf, sizeof(buf)); 494 484 if (ret < 0) 495 485 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 486 + out: 487 + mutex_unlock(&priv->mutex); 496 488 } 497 489 498 490 static void ··· 550 536 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); 551 537 } 552 538 539 + /* handle HDMI connect/disconnect */ 540 + static void tda998x_hpd(struct work_struct *work) 541 + { 542 + struct delayed_work *dwork = to_delayed_work(work); 543 + struct tda998x_priv *priv = 544 + container_of(dwork, struct tda998x_priv, dwork); 545 + 546 + if (priv->encoder && priv->encoder->dev) 547 + drm_kms_helper_hotplug_event(priv->encoder->dev); 548 + } 549 + 553 550 /* 554 551 * only 2 interrupts may occur: screen plug/unplug and EDID read 555 552 */ ··· 584 559 priv->wq_edid_wait = 0; 585 560 wake_up(&priv->wq_edid); 586 561 } else if (cec != 0) { /* HPD change */ 587 - if (priv->encoder && priv->encoder->dev) 588 - drm_helper_hpd_irq_event(priv->encoder->dev); 562 + schedule_delayed_work(&priv->dwork, HZ/10); 589 563 } 590 564 return IRQ_HANDLED; 591 565 } ··· 1194 1170 /* disable all IRQs and free the IRQ handler */ 1195 1171 cec_write(priv, REG_CEC_RXSHPDINTENA, 0); 1196 1172 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); 1197 - if (priv->hdmi->irq) 1173 + if (priv->hdmi->irq) { 1198 1174 free_irq(priv->hdmi->irq, priv); 1175 + cancel_delayed_work_sync(&priv->dwork); 1176 + } 1199 1177 1200 1178 i2c_unregister_device(priv->cec); 1201 1179 } ··· 1281 1255 struct device_node *np = client->dev.of_node; 1282 1256 u32 video; 1283 1257 int rev_lo, rev_hi, ret; 1258 + unsigned short cec_addr; 1284 1259 1285 1260 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); 1286 1261 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); ··· 1289 1262 1290 1263 priv->current_page = 0xff; 1291 1264 priv->hdmi = client; 1292 - priv->cec = i2c_new_dummy(client->adapter, 0x34); 1265 + /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ 1266 + cec_addr = 0x34 + (client->addr & 0x03); 1267 + priv->cec = i2c_new_dummy(client->adapter, cec_addr); 1293 1268 if (!priv->cec) 1294 1269 return -ENODEV; 1295 1270 1296 1271 priv->dpms = DRM_MODE_DPMS_OFF; 1272 + 1273 + mutex_init(&priv->mutex); /* protect the page access */ 1297 1274 1298 1275 /* wake up the device: */ 1299 1276 cec_write(priv, REG_CEC_ENAMODS, ··· 1354 1323 if (client->irq) { 1355 1324 int irqf_trigger; 1356 1325 1357 - /* init read EDID waitqueue */ 1326 + /* init read EDID waitqueue and HDP work */ 1358 1327 init_waitqueue_head(&priv->wq_edid); 1328 + INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd); 1359 1329 1360 1330 /* clear pending interrupts */ 1361 1331 reg_read(priv, REG_INT_FLAGS_0);
+4 -10
drivers/gpu/drm/i915/i915_drv.c
··· 462 462 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 463 463 dev_priv->pch_type = PCH_LPT; 464 464 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 465 - WARN_ON(!IS_HASWELL(dev)); 466 - WARN_ON(IS_HSW_ULT(dev)); 467 - } else if (IS_BROADWELL(dev)) { 468 - dev_priv->pch_type = PCH_LPT; 469 - dev_priv->pch_id = 470 - INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; 471 - DRM_DEBUG_KMS("This is Broadwell, assuming " 472 - "LynxPoint LP PCH\n"); 465 + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 466 + WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); 473 467 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 474 468 dev_priv->pch_type = PCH_LPT; 475 469 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 476 - WARN_ON(!IS_HASWELL(dev)); 477 - WARN_ON(!IS_HSW_ULT(dev)); 470 + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 471 + WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); 478 472 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 479 473 dev_priv->pch_type = PCH_SPT; 480 474 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+1 -2
drivers/gpu/drm/i915/i915_drv.h
··· 2159 2159 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2160 2160 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2161 2161 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2162 - ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ 2163 - (INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2162 + ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2164 2163 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2165 2164 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2166 2165 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+13 -13
drivers/gpu/drm/i915/i915_gem.c
··· 3148 3148 u32 size = i915_gem_obj_ggtt_size(obj); 3149 3149 uint64_t val; 3150 3150 3151 + /* Adjust fence size to match tiled area */ 3152 + if (obj->tiling_mode != I915_TILING_NONE) { 3153 + uint32_t row_size = obj->stride * 3154 + (obj->tiling_mode == I915_TILING_Y ? 32 : 8); 3155 + size = (size / row_size) * row_size; 3156 + } 3157 + 3151 3158 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 3152 3159 0xfffff000) << 32; 3153 3160 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; ··· 4891 4884 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4892 4885 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4893 4886 4894 - /* 4895 - * XXX: Contexts should only be initialized once. Doing a switch to the 4896 - * default context switch however is something we'd like to do after 4897 - * reset or thaw (the latter may not actually be necessary for HW, but 4898 - * goes with our code better). Context switching requires rings (for 4899 - * the do_switch), but before enabling PPGTT. So don't move this. 4900 - */ 4887 + ret = i915_ppgtt_init_hw(dev); 4888 + if (ret && ret != -EIO) { 4889 + DRM_ERROR("PPGTT enable failed %d\n", ret); 4890 + i915_gem_cleanup_ringbuffer(dev); 4891 + } 4892 + 4901 4893 ret = i915_gem_context_enable(dev_priv); 4902 4894 if (ret && ret != -EIO) { 4903 4895 DRM_ERROR("Context enable failed %d\n", ret); 4904 4896 i915_gem_cleanup_ringbuffer(dev); 4905 4897 4906 4898 return ret; 4907 - } 4908 - 4909 - ret = i915_ppgtt_init_hw(dev); 4910 - if (ret && ret != -EIO) { 4911 - DRM_ERROR("PPGTT enable failed %d\n", ret); 4912 - i915_gem_cleanup_ringbuffer(dev); 4913 4899 } 4914 4900 4915 4901 return ret;
+1 -1
drivers/gpu/drm/i915/intel_panel.c
··· 962 962 963 963 WARN_ON(panel->backlight.max == 0); 964 964 965 - if (panel->backlight.level == 0) { 965 + if (panel->backlight.level <= panel->backlight.min) { 966 966 panel->backlight.level = panel->backlight.max; 967 967 if (panel->backlight.device) 968 968 panel->backlight.device->props.brightness =
-1
drivers/gpu/drm/radeon/cik_sdma.c
··· 816 816 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 817 817 if (flags & R600_PTE_SYSTEM) { 818 818 value = radeon_vm_map_gart(rdev, addr); 819 - value &= 0xFFFFFFFFFFFFF000ULL; 820 819 } else if (flags & R600_PTE_VALID) { 821 820 value = addr; 822 821 } else {
-1
drivers/gpu/drm/radeon/ni_dma.c
··· 372 372 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 373 373 if (flags & R600_PTE_SYSTEM) { 374 374 value = radeon_vm_map_gart(rdev, addr); 375 - value &= 0xFFFFFFFFFFFFF000ULL; 376 375 } else if (flags & R600_PTE_VALID) { 377 376 value = addr; 378 377 } else {
+8 -2
drivers/gpu/drm/radeon/r100.c
··· 644 644 return r; 645 645 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 646 646 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 647 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; 647 648 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 648 649 return radeon_gart_table_ram_alloc(rdev); 649 650 } ··· 682 681 WREG32(RADEON_AIC_HI_ADDR, 0); 683 682 } 684 683 684 + uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) 685 + { 686 + return addr; 687 + } 688 + 685 689 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 686 - uint64_t addr, uint32_t flags) 690 + uint64_t entry) 687 691 { 688 692 u32 *gtt = rdev->gart.ptr; 689 - gtt[i] = cpu_to_le32(lower_32_bits(addr)); 693 + gtt[i] = cpu_to_le32(lower_32_bits(entry)); 690 694 } 691 695 692 696 void r100_pci_gart_fini(struct radeon_device *rdev)
+11 -5
drivers/gpu/drm/radeon/r300.c
··· 73 73 #define R300_PTE_WRITEABLE (1 << 2) 74 74 #define R300_PTE_READABLE (1 << 3) 75 75 76 - void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 77 - uint64_t addr, uint32_t flags) 76 + uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) 78 77 { 79 - void __iomem *ptr = rdev->gart.ptr; 80 - 81 78 addr = (lower_32_bits(addr) >> 8) | 82 79 ((upper_32_bits(addr) & 0xff) << 24); 83 80 if (flags & RADEON_GART_PAGE_READ) ··· 83 86 addr |= R300_PTE_WRITEABLE; 84 87 if (!(flags & RADEON_GART_PAGE_SNOOP)) 85 88 addr |= R300_PTE_UNSNOOPED; 89 + return addr; 90 + } 91 + 92 + void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 93 + uint64_t entry) 94 + { 95 + void __iomem *ptr = rdev->gart.ptr; 96 + 86 97 /* on x86 we want this to be CPU endian, on powerpc 87 98 * on powerpc without HW swappers, it'll get swapped on way 88 99 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 89 - writel(addr, ((void __iomem *)ptr) + (i * 4)); 100 + writel(entry, ((void __iomem *)ptr) + (i * 4)); 90 101 } 91 102 92 103 int rv370_pcie_gart_init(struct radeon_device *rdev) ··· 114 109 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 115 110 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 116 111 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 112 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; 117 113 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 118 114 return radeon_gart_table_vram_alloc(rdev); 119 115 }
+6 -3
drivers/gpu/drm/radeon/radeon.h
··· 242 242 * Dummy page 243 243 */ 244 244 struct radeon_dummy_page { 245 + uint64_t entry; 245 246 struct page *page; 246 247 dma_addr_t addr; 247 248 }; ··· 646 645 unsigned num_cpu_pages; 647 646 unsigned table_size; 648 647 struct page **pages; 649 - dma_addr_t *pages_addr; 648 + uint64_t *pages_entry; 650 649 bool ready; 651 650 }; 652 651 ··· 1848 1847 /* gart */ 1849 1848 struct { 1850 1849 void (*tlb_flush)(struct radeon_device *rdev); 1850 + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); 1851 1851 void (*set_page)(struct radeon_device *rdev, unsigned i, 1852 - uint64_t addr, uint32_t flags); 1852 + uint64_t entry); 1853 1853 } gart; 1854 1854 struct { 1855 1855 int (*init)(struct radeon_device *rdev); ··· 2854 2852 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2855 2853 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2856 2854 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2857 - #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) 2855 + #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) 2856 + #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) 2858 2857 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2859 2858 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2860 2859 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+24
drivers/gpu/drm/radeon/radeon_asic.c
··· 159 159 DRM_INFO("Forcing AGP to PCIE mode\n"); 160 160 rdev->flags |= RADEON_IS_PCIE; 161 161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 162 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; 162 163 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 163 164 } else { 164 165 DRM_INFO("Forcing AGP to PCI mode\n"); 165 166 rdev->flags |= RADEON_IS_PCI; 166 167 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 168 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; 167 169 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 168 170 } 169 171 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; ··· 201 199 .mc_wait_for_idle = &r100_mc_wait_for_idle, 202 200 .gart = { 203 201 .tlb_flush = &r100_pci_gart_tlb_flush, 202 + .get_page_entry = &r100_pci_gart_get_page_entry, 204 203 .set_page = &r100_pci_gart_set_page, 205 204 }, 206 205 .ring = { ··· 268 265 .mc_wait_for_idle = &r100_mc_wait_for_idle, 269 266 .gart = { 270 267 .tlb_flush = &r100_pci_gart_tlb_flush, 268 + .get_page_entry = &r100_pci_gart_get_page_entry, 271 269 .set_page = &r100_pci_gart_set_page, 272 270 }, 273 271 .ring = { ··· 363 359 .mc_wait_for_idle = &r300_mc_wait_for_idle, 364 360 .gart = { 365 361 .tlb_flush = &r100_pci_gart_tlb_flush, 362 + .get_page_entry = &r100_pci_gart_get_page_entry, 366 363 .set_page = &r100_pci_gart_set_page, 367 364 }, 368 365 .ring = { ··· 430 425 .mc_wait_for_idle = &r300_mc_wait_for_idle, 431 426 .gart = { 432 427 .tlb_flush = &rv370_pcie_gart_tlb_flush, 428 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 433 429 .set_page = &rv370_pcie_gart_set_page, 434 430 }, 435 431 .ring = { ··· 497 491 .mc_wait_for_idle = &r300_mc_wait_for_idle, 498 492 .gart = { 499 493 .tlb_flush = &rv370_pcie_gart_tlb_flush, 494 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 500 495 .set_page = &rv370_pcie_gart_set_page, 501 496 }, 502 497 .ring = { ··· 564 557 .mc_wait_for_idle = &rs400_mc_wait_for_idle, 565 558 .gart = { 566 559 .tlb_flush = &rs400_gart_tlb_flush, 560 + .get_page_entry = &rs400_gart_get_page_entry, 567 561 .set_page = &rs400_gart_set_page, 568 562 }, 569 563 .ring = { ··· 631 623 .mc_wait_for_idle = &rs600_mc_wait_for_idle, 632 624 .gart = { 633 625 .tlb_flush = &rs600_gart_tlb_flush, 626 + .get_page_entry = &rs600_gart_get_page_entry, 634 627 .set_page = &rs600_gart_set_page, 635 628 }, 636 629 .ring = { ··· 700 691 .mc_wait_for_idle = &rs690_mc_wait_for_idle, 701 692 .gart = { 702 693 .tlb_flush = &rs400_gart_tlb_flush, 694 + .get_page_entry = &rs400_gart_get_page_entry, 703 695 .set_page = &rs400_gart_set_page, 704 696 }, 705 697 .ring = { ··· 769 759 .mc_wait_for_idle = &rv515_mc_wait_for_idle, 770 760 .gart = { 771 761 .tlb_flush = &rv370_pcie_gart_tlb_flush, 762 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 772 763 .set_page = &rv370_pcie_gart_set_page, 773 764 }, 774 765 .ring = { ··· 836 825 .mc_wait_for_idle = &r520_mc_wait_for_idle, 837 826 .gart = { 838 827 .tlb_flush = &rv370_pcie_gart_tlb_flush, 828 + .get_page_entry = &rv370_pcie_gart_get_page_entry, 839 829 .set_page = &rv370_pcie_gart_set_page, 840 830 }, 841 831 .ring = { ··· 931 919 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 932 920 .gart = { 933 921 .tlb_flush = &r600_pcie_gart_tlb_flush, 922 + .get_page_entry = &rs600_gart_get_page_entry, 934 923 .set_page = &rs600_gart_set_page, 935 924 }, 936 925 .ring = { ··· 1017 1004 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1018 1005 .gart = { 1019 1006 .tlb_flush = &r600_pcie_gart_tlb_flush, 1007 + .get_page_entry = &rs600_gart_get_page_entry, 1020 1008 .set_page = &rs600_gart_set_page, 1021 1009 }, 1022 1010 .ring = { ··· 1109 1095 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1110 1096 .gart = { 1111 1097 .tlb_flush = &r600_pcie_gart_tlb_flush, 1098 + .get_page_entry = &rs600_gart_get_page_entry, 1112 1099 .set_page = &rs600_gart_set_page, 1113 1100 }, 1114 1101 .ring = { ··· 1214 1199 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1215 1200 .gart = { 1216 1201 .tlb_flush = &r600_pcie_gart_tlb_flush, 1202 + .get_page_entry = &rs600_gart_get_page_entry, 1217 1203 .set_page = &rs600_gart_set_page, 1218 1204 }, 1219 1205 .ring = { ··· 1333 1317 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1334 1318 .gart = { 1335 1319 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1320 + .get_page_entry = &rs600_gart_get_page_entry, 1336 1321 .set_page = &rs600_gart_set_page, 1337 1322 }, 1338 1323 .ring = { ··· 1426 1409 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1427 1410 .gart = { 1428 1411 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1412 + .get_page_entry = &rs600_gart_get_page_entry, 1429 1413 .set_page = &rs600_gart_set_page, 1430 1414 }, 1431 1415 .ring = { ··· 1518 1500 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1519 1501 .gart = { 1520 1502 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1503 + .get_page_entry = &rs600_gart_get_page_entry, 1521 1504 .set_page = &rs600_gart_set_page, 1522 1505 }, 1523 1506 .ring = { ··· 1654 1635 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1655 1636 .gart = { 1656 1637 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1638 + .get_page_entry = &rs600_gart_get_page_entry, 1657 1639 .set_page = &rs600_gart_set_page, 1658 1640 }, 1659 1641 .vm = { ··· 1758 1738 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1759 1739 .gart = { 1760 1740 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1741 + .get_page_entry = &rs600_gart_get_page_entry, 1761 1742 .set_page = &rs600_gart_set_page, 1762 1743 }, 1763 1744 .vm = { ··· 1892 1871 .get_gpu_clock_counter = &si_get_gpu_clock_counter, 1893 1872 .gart = { 1894 1873 .tlb_flush = &si_pcie_gart_tlb_flush, 1874 + .get_page_entry = &rs600_gart_get_page_entry, 1895 1875 .set_page = &rs600_gart_set_page, 1896 1876 }, 1897 1877 .vm = { ··· 2054 2032 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2055 2033 .gart = { 2056 2034 .tlb_flush = &cik_pcie_gart_tlb_flush, 2035 + .get_page_entry = &rs600_gart_get_page_entry, 2057 2036 .set_page = &rs600_gart_set_page, 2058 2037 }, 2059 2038 .vm = { ··· 2162 2139 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2163 2140 .gart = { 2164 2141 .tlb_flush = &cik_pcie_gart_tlb_flush, 2142 + .get_page_entry = &rs600_gart_get_page_entry, 2165 2143 .set_page = &rs600_gart_set_page, 2166 2144 }, 2167 2145 .vm = {
+8 -4
drivers/gpu/drm/radeon/radeon_asic.h
··· 67 67 int r100_asic_reset(struct radeon_device *rdev); 68 68 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 69 69 void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 70 + uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); 70 71 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 71 - uint64_t addr, uint32_t flags); 72 + uint64_t entry); 72 73 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 73 74 int r100_irq_set(struct radeon_device *rdev); 74 75 int r100_irq_process(struct radeon_device *rdev); ··· 173 172 struct radeon_fence *fence); 174 173 extern int r300_cs_parse(struct radeon_cs_parser *p); 175 174 extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 175 + extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); 176 176 extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 177 - uint64_t addr, uint32_t flags); 177 + uint64_t entry); 178 178 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 179 179 extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 180 180 extern void r300_set_reg_safe(struct radeon_device *rdev); ··· 210 208 extern int rs400_suspend(struct radeon_device *rdev); 211 209 extern int rs400_resume(struct radeon_device *rdev); 212 210 void rs400_gart_tlb_flush(struct radeon_device *rdev); 211 + uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); 213 212 void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 214 - uint64_t addr, uint32_t flags); 213 + uint64_t entry); 215 214 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 216 215 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 217 216 int rs400_gart_init(struct radeon_device *rdev); ··· 235 232 void rs600_irq_disable(struct radeon_device *rdev); 236 233 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 237 234 void rs600_gart_tlb_flush(struct radeon_device *rdev); 235 + uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); 238 236 void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 239 - uint64_t addr, uint32_t flags); 237 + uint64_t entry); 240 238 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 241 239 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 242 240 void rs600_bandwidth_update(struct radeon_device *rdev);
+8 -5
drivers/gpu/drm/radeon/radeon_benchmark.c
··· 34 34 35 35 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, 36 36 uint64_t saddr, uint64_t daddr, 37 - int flag, int n) 37 + int flag, int n, 38 + struct reservation_object *resv) 38 39 { 39 40 unsigned long start_jiffies; 40 41 unsigned long end_jiffies; ··· 48 47 case RADEON_BENCHMARK_COPY_DMA: 49 48 fence = radeon_copy_dma(rdev, saddr, daddr, 50 49 size / RADEON_GPU_PAGE_SIZE, 51 - NULL); 50 + resv); 52 51 break; 53 52 case RADEON_BENCHMARK_COPY_BLIT: 54 53 fence = radeon_copy_blit(rdev, saddr, daddr, 55 54 size / RADEON_GPU_PAGE_SIZE, 56 - NULL); 55 + resv); 57 56 break; 58 57 default: 59 58 DRM_ERROR("Unknown copy method\n"); ··· 121 120 122 121 if (rdev->asic->copy.dma) { 123 122 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 124 - RADEON_BENCHMARK_COPY_DMA, n); 123 + RADEON_BENCHMARK_COPY_DMA, n, 124 + dobj->tbo.resv); 125 125 if (time < 0) 126 126 goto out_cleanup; 127 127 if (time > 0) ··· 132 130 133 131 if (rdev->asic->copy.blit) { 134 132 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 135 - RADEON_BENCHMARK_COPY_BLIT, n); 133 + RADEON_BENCHMARK_COPY_BLIT, n, 134 + dobj->tbo.resv); 136 135 if (time < 0) 137 136 goto out_cleanup; 138 137 if (time > 0)
+2
drivers/gpu/drm/radeon/radeon_device.c
··· 774 774 rdev->dummy_page.page = NULL; 775 775 return -ENOMEM; 776 776 } 777 + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, 778 + RADEON_GART_PAGE_DUMMY); 777 779 return 0; 778 780 } 779 781
+3
drivers/gpu/drm/radeon/radeon_display.c
··· 960 960 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && 961 961 pll->flags & RADEON_PLL_USE_REF_DIV) 962 962 ref_div_max = pll->reference_div; 963 + else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) 964 + /* fix for problems on RS880 */ 965 + ref_div_max = min(pll->max_ref_div, 7u); 963 966 else 964 967 ref_div_max = pll->max_ref_div; 965 968
+32 -22
drivers/gpu/drm/radeon/radeon_gart.c
··· 165 165 radeon_bo_unpin(rdev->gart.robj); 166 166 radeon_bo_unreserve(rdev->gart.robj); 167 167 rdev->gart.table_addr = gpu_addr; 168 + 169 + if (!r) { 170 + int i; 171 + 172 + /* We might have dropped some GART table updates while it wasn't 173 + * mapped, restore all entries 174 + */ 175 + for (i = 0; i < rdev->gart.num_gpu_pages; i++) 176 + radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); 177 + mb(); 178 + radeon_gart_tlb_flush(rdev); 179 + } 180 + 168 181 return r; 169 182 } 170 183 ··· 241 228 unsigned t; 242 229 unsigned p; 243 230 int i, j; 244 - u64 page_base; 245 231 246 232 if (!rdev->gart.ready) { 247 233 WARN(1, "trying to unbind memory from uninitialized GART !\n"); ··· 251 239 for (i = 0; i < pages; i++, p++) { 252 240 if (rdev->gart.pages[p]) { 253 241 rdev->gart.pages[p] = NULL; 254 - rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 255 - page_base = rdev->gart.pages_addr[p]; 256 242 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 243 + rdev->gart.pages_entry[t] = rdev->dummy_page.entry; 257 244 if (rdev->gart.ptr) { 258 - radeon_gart_set_page(rdev, t, page_base, 259 - RADEON_GART_PAGE_DUMMY); 245 + radeon_gart_set_page(rdev, t, 246 + rdev->dummy_page.entry); 260 247 } 261 - page_base += RADEON_GPU_PAGE_SIZE; 262 248 } 263 249 } 264 250 } ··· 284 274 { 285 275 unsigned t; 286 276 unsigned p; 287 - uint64_t page_base; 277 + uint64_t page_base, page_entry; 288 278 int i, j; 289 279 290 280 if (!rdev->gart.ready) { ··· 295 285 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 296 286 297 287 for (i = 0; i < pages; i++, p++) { 298 - rdev->gart.pages_addr[p] = dma_addr[i]; 299 288 rdev->gart.pages[p] = pagelist[i]; 300 - if (rdev->gart.ptr) { 301 - page_base = rdev->gart.pages_addr[p]; 302 - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 303 - radeon_gart_set_page(rdev, t, page_base, flags); 304 - page_base += RADEON_GPU_PAGE_SIZE; 289 + page_base = dma_addr[i]; 290 + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 291 + page_entry = radeon_gart_get_page_entry(page_base, flags); 292 + rdev->gart.pages_entry[t] = page_entry; 293 + if (rdev->gart.ptr) { 294 + radeon_gart_set_page(rdev, t, page_entry); 305 295 } 296 + page_base += RADEON_GPU_PAGE_SIZE; 306 297 } 307 298 } 308 299 mb(); ··· 345 334 radeon_gart_fini(rdev); 346 335 return -ENOMEM; 347 336 } 348 - rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * 349 - rdev->gart.num_cpu_pages); 350 - if (rdev->gart.pages_addr == NULL) { 337 + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * 338 + rdev->gart.num_gpu_pages); 339 + if (rdev->gart.pages_entry == NULL) { 351 340 radeon_gart_fini(rdev); 352 341 return -ENOMEM; 353 342 } 354 343 /* set GART entry to point to the dummy page by default */ 355 - for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 356 - rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 357 - } 344 + for (i = 0; i < rdev->gart.num_gpu_pages; i++) 345 + rdev->gart.pages_entry[i] = rdev->dummy_page.entry; 358 346 return 0; 359 347 } 360 348 ··· 366 356 */ 367 357 void radeon_gart_fini(struct radeon_device *rdev) 368 358 { 369 - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { 359 + if (rdev->gart.ready) { 370 360 /* unbind pages */ 371 361 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 372 362 } 373 363 rdev->gart.ready = false; 374 364 vfree(rdev->gart.pages); 375 - vfree(rdev->gart.pages_addr); 365 + vfree(rdev->gart.pages_entry); 376 366 rdev->gart.pages = NULL; 377 - rdev->gart.pages_addr = NULL; 367 + rdev->gart.pages_entry = NULL; 378 368 379 369 radeon_dummy_page_fini(rdev); 380 370 }
+4 -2
drivers/gpu/drm/radeon/radeon_gem.c
··· 146 146 struct radeon_bo_va *bo_va; 147 147 int r; 148 148 149 - if (rdev->family < CHIP_CAYMAN) { 149 + if ((rdev->family < CHIP_CAYMAN) || 150 + (!rdev->accel_working)) { 150 151 return 0; 151 152 } 152 153 ··· 177 176 struct radeon_bo_va *bo_va; 178 177 int r; 179 178 180 - if (rdev->family < CHIP_CAYMAN) { 179 + if ((rdev->family < CHIP_CAYMAN) || 180 + (!rdev->accel_working)) { 181 181 return; 182 182 } 183 183
+1 -1
drivers/gpu/drm/radeon/radeon_kfd.c
··· 436 436 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 437 437 uint32_t hpd_size, uint64_t hpd_gpu_addr) 438 438 { 439 - uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; 439 + uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; 440 440 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); 441 441 442 442 lock_srbm(kgd, mec, pipe, 0, 0);
+8 -8
drivers/gpu/drm/radeon/radeon_kms.c
··· 605 605 return -ENOMEM; 606 606 } 607 607 608 - vm = &fpriv->vm; 609 - r = radeon_vm_init(rdev, vm); 610 - if (r) { 611 - kfree(fpriv); 612 - return r; 613 - } 614 - 615 608 if (rdev->accel_working) { 609 + vm = &fpriv->vm; 610 + r = radeon_vm_init(rdev, vm); 611 + if (r) { 612 + kfree(fpriv); 613 + return r; 614 + } 615 + 616 616 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 617 617 if (r) { 618 618 radeon_vm_fini(rdev, vm); ··· 668 668 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 669 669 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 670 670 } 671 + radeon_vm_fini(rdev, vm); 671 672 } 672 673 673 - radeon_vm_fini(rdev, vm); 674 674 kfree(fpriv); 675 675 file_priv->driver_priv = NULL; 676 676 }
+4 -4
drivers/gpu/drm/radeon/radeon_test.c
··· 119 119 if (ring == R600_RING_TYPE_DMA_INDEX) 120 120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 121 121 size / RADEON_GPU_PAGE_SIZE, 122 - NULL); 122 + vram_obj->tbo.resv); 123 123 else 124 124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 125 125 size / RADEON_GPU_PAGE_SIZE, 126 - NULL); 126 + vram_obj->tbo.resv); 127 127 if (IS_ERR(fence)) { 128 128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 129 129 r = PTR_ERR(fence); ··· 170 170 if (ring == R600_RING_TYPE_DMA_INDEX) 171 171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 172 172 size / RADEON_GPU_PAGE_SIZE, 173 - NULL); 173 + vram_obj->tbo.resv); 174 174 else 175 175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 176 176 size / RADEON_GPU_PAGE_SIZE, 177 - NULL); 177 + vram_obj->tbo.resv); 178 178 if (IS_ERR(fence)) { 179 179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 180 180 r = PTR_ERR(fence);
+6 -6
drivers/gpu/drm/radeon/radeon_vm.c
··· 587 587 uint64_t result; 588 588 589 589 /* page table offset */ 590 - result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; 591 - 592 - /* in case cpu page size != gpu page size*/ 593 - result |= addr & (~PAGE_MASK); 590 + result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; 591 + result &= ~RADEON_GPU_PAGE_MASK; 594 592 595 593 return result; 596 594 } ··· 743 745 */ 744 746 745 747 /* NI is optimized for 256KB fragments, SI and newer for 64KB */ 746 - uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? 748 + uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || 749 + (rdev->family == CHIP_ARUBA)) ? 747 750 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; 748 - uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; 751 + uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || 752 + (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; 749 753 750 754 uint64_t frag_start = ALIGN(pe_start, frag_align); 751 755 uint64_t frag_end = pe_end & ~(frag_align - 1);
+9 -5
drivers/gpu/drm/radeon/rs400.c
··· 212 212 #define RS400_PTE_WRITEABLE (1 << 2) 213 213 #define RS400_PTE_READABLE (1 << 3) 214 214 215 - void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 216 - uint64_t addr, uint32_t flags) 215 + uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) 217 216 { 218 217 uint32_t entry; 219 - u32 *gtt = rdev->gart.ptr; 220 218 221 219 entry = (lower_32_bits(addr) & PAGE_MASK) | 222 220 ((upper_32_bits(addr) & 0xff) << 4); ··· 224 226 entry |= RS400_PTE_WRITEABLE; 225 227 if (!(flags & RADEON_GART_PAGE_SNOOP)) 226 228 entry |= RS400_PTE_UNSNOOPED; 227 - entry = cpu_to_le32(entry); 228 - gtt[i] = entry; 229 + return entry; 230 + } 231 + 232 + void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 233 + uint64_t entry) 234 + { 235 + u32 *gtt = rdev->gart.ptr; 236 + gtt[i] = cpu_to_le32(lower_32_bits(entry)); 229 237 } 230 238 231 239 int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+9 -5
drivers/gpu/drm/radeon/rs600.c
··· 625 625 radeon_gart_table_vram_free(rdev); 626 626 } 627 627 628 - void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 629 - uint64_t addr, uint32_t flags) 628 + uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) 630 629 { 631 - void __iomem *ptr = (void *)rdev->gart.ptr; 632 - 633 630 addr = addr & 0xFFFFFFFFFFFFF000ULL; 634 631 addr |= R600_PTE_SYSTEM; 635 632 if (flags & RADEON_GART_PAGE_VALID) ··· 637 640 addr |= R600_PTE_WRITEABLE; 638 641 if (flags & RADEON_GART_PAGE_SNOOP) 639 642 addr |= R600_PTE_SNOOPED; 640 - writeq(addr, ptr + (i * 8)); 643 + return addr; 644 + } 645 + 646 + void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 647 + uint64_t entry) 648 + { 649 + void __iomem *ptr = (void *)rdev->gart.ptr; 650 + writeq(entry, ptr + (i * 8)); 641 651 } 642 652 643 653 int rs600_irq_set(struct radeon_device *rdev)
-1
drivers/gpu/drm/radeon/si_dma.c
··· 123 123 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 124 124 if (flags & R600_PTE_SYSTEM) { 125 125 value = radeon_vm_map_gart(rdev, addr); 126 - value &= 0xFFFFFFFFFFFFF000ULL; 127 126 } else if (flags & R600_PTE_VALID) { 128 127 value = addr; 129 128 } else {
+5 -23
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 406 406 if (unlikely(ret != 0)) 407 407 --dev_priv->num_3d_resources; 408 408 } else if (unhide_svga) { 409 - mutex_lock(&dev_priv->hw_mutex); 410 409 vmw_write(dev_priv, SVGA_REG_ENABLE, 411 410 vmw_read(dev_priv, SVGA_REG_ENABLE) & 412 411 ~SVGA_REG_ENABLE_HIDE); 413 - mutex_unlock(&dev_priv->hw_mutex); 414 412 } 415 413 416 414 mutex_unlock(&dev_priv->release_mutex); ··· 431 433 mutex_lock(&dev_priv->release_mutex); 432 434 if (unlikely(--dev_priv->num_3d_resources == 0)) 433 435 vmw_release_device(dev_priv); 434 - else if (hide_svga) { 435 - mutex_lock(&dev_priv->hw_mutex); 436 + else if (hide_svga) 436 437 vmw_write(dev_priv, SVGA_REG_ENABLE, 437 438 vmw_read(dev_priv, SVGA_REG_ENABLE) | 438 439 SVGA_REG_ENABLE_HIDE); 439 - mutex_unlock(&dev_priv->hw_mutex); 440 - } 441 440 442 441 n3d = (int32_t) dev_priv->num_3d_resources; 443 442 mutex_unlock(&dev_priv->release_mutex); ··· 595 600 dev_priv->dev = dev; 596 601 dev_priv->vmw_chipset = chipset; 597 602 dev_priv->last_read_seqno = (uint32_t) -100; 598 - mutex_init(&dev_priv->hw_mutex); 599 603 mutex_init(&dev_priv->cmdbuf_mutex); 600 604 mutex_init(&dev_priv->release_mutex); 601 605 mutex_init(&dev_priv->binding_mutex); 602 606 rwlock_init(&dev_priv->resource_lock); 603 607 ttm_lock_init(&dev_priv->reservation_sem); 608 + spin_lock_init(&dev_priv->hw_lock); 609 + spin_lock_init(&dev_priv->waiter_lock); 610 + spin_lock_init(&dev_priv->cap_lock); 604 611 605 612 for (i = vmw_res_context; i < vmw_res_max; ++i) { 606 613 idr_init(&dev_priv->res_idr[i]); ··· 623 626 624 627 dev_priv->enable_fb = enable_fbdev; 625 628 626 - mutex_lock(&dev_priv->hw_mutex); 627 - 628 629 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 629 630 svga_id = vmw_read(dev_priv, SVGA_REG_ID); 630 631 if (svga_id != SVGA_ID_2) { 631 632 ret = -ENOSYS; 632 633 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 633 - mutex_unlock(&dev_priv->hw_mutex); 634 634 goto out_err0; 635 635 } 636 636 ··· 677 683 dev_priv->prim_bb_mem = dev_priv->vram_size; 678 684 679 685 ret = vmw_dma_masks(dev_priv); 680 - if (unlikely(ret != 0)) { 681 - mutex_unlock(&dev_priv->hw_mutex); 686 + if (unlikely(ret != 0)) 682 687 goto out_err0; 683 - } 684 688 685 689 /* 686 690 * Limit back buffer size to VRAM size. Remove this once ··· 686 694 */ 687 695 if (dev_priv->prim_bb_mem > dev_priv->vram_size) 688 696 dev_priv->prim_bb_mem = dev_priv->vram_size; 689 - 690 - mutex_unlock(&dev_priv->hw_mutex); 691 697 692 698 vmw_print_capabilities(dev_priv->capabilities); 693 699 ··· 1150 1160 if (unlikely(ret != 0)) 1151 1161 return ret; 1152 1162 vmw_kms_save_vga(dev_priv); 1153 - mutex_lock(&dev_priv->hw_mutex); 1154 1163 vmw_write(dev_priv, SVGA_REG_TRACES, 0); 1155 - mutex_unlock(&dev_priv->hw_mutex); 1156 1164 } 1157 1165 1158 1166 if (active) { ··· 1184 1196 if (!dev_priv->enable_fb) { 1185 1197 vmw_kms_restore_vga(dev_priv); 1186 1198 vmw_3d_resource_dec(dev_priv, true); 1187 - mutex_lock(&dev_priv->hw_mutex); 1188 1199 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1189 - mutex_unlock(&dev_priv->hw_mutex); 1190 1200 } 1191 1201 return ret; 1192 1202 } ··· 1219 1233 DRM_ERROR("Unable to clean VRAM on master drop.\n"); 1220 1234 vmw_kms_restore_vga(dev_priv); 1221 1235 vmw_3d_resource_dec(dev_priv, true); 1222 - mutex_lock(&dev_priv->hw_mutex); 1223 1236 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 1224 - mutex_unlock(&dev_priv->hw_mutex); 1225 1237 } 1226 1238 1227 1239 dev_priv->active_master = &dev_priv->fbdev_master; ··· 1351 1367 struct drm_device *dev = pci_get_drvdata(pdev); 1352 1368 struct vmw_private *dev_priv = vmw_priv(dev); 1353 1369 1354 - mutex_lock(&dev_priv->hw_mutex); 1355 1370 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1356 1371 (void) vmw_read(dev_priv, SVGA_REG_ID); 1357 - mutex_unlock(&dev_priv->hw_mutex); 1358 1372 1359 1373 /** 1360 1374 * Reclaim 3d reference held by fbdev and potentially
+21 -4
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 399 399 uint32_t memory_size; 400 400 bool has_gmr; 401 401 bool has_mob; 402 - struct mutex hw_mutex; 402 + spinlock_t hw_lock; 403 + spinlock_t cap_lock; 403 404 404 405 /* 405 406 * VGA registers. ··· 450 449 atomic_t marker_seq; 451 450 wait_queue_head_t fence_queue; 452 451 wait_queue_head_t fifo_queue; 453 - int fence_queue_waiters; /* Protected by hw_mutex */ 454 - int goal_queue_waiters; /* Protected by hw_mutex */ 452 + spinlock_t waiter_lock; 453 + int fence_queue_waiters; /* Protected by waiter_lock */ 454 + int goal_queue_waiters; /* Protected by waiter_lock */ 455 455 atomic_t fifo_queue_waiters; 456 456 uint32_t last_read_seqno; 457 457 spinlock_t irq_lock; ··· 555 553 return (struct vmw_master *) master->driver_priv; 556 554 } 557 555 556 + /* 557 + * The locking here is fine-grained, so that it is performed once 558 + * for every read- and write operation. This is of course costly, but we 559 + * don't perform much register access in the timing critical paths anyway. 560 + * Instead we have the extra benefit of being sure that we don't forget 561 + * the hw lock around register accesses. 562 + */ 558 563 static inline void vmw_write(struct vmw_private *dev_priv, 559 564 unsigned int offset, uint32_t value) 560 565 { 566 + unsigned long irq_flags; 567 + 568 + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); 561 569 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 562 570 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 571 + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); 563 572 } 564 573 565 574 static inline uint32_t vmw_read(struct vmw_private *dev_priv, 566 575 unsigned int offset) 567 576 { 568 - uint32_t val; 577 + unsigned long irq_flags; 578 + u32 val; 569 579 580 + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); 570 581 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 571 582 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 583 + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); 584 + 572 585 return val; 573 586 } 574 587
+2 -16
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 35 35 struct vmw_private *dev_priv; 36 36 spinlock_t lock; 37 37 struct list_head fence_list; 38 - struct work_struct work, ping_work; 38 + struct work_struct work; 39 39 u32 user_fence_size; 40 40 u32 fence_size; 41 41 u32 event_fence_action_size; ··· 134 134 return "svga"; 135 135 } 136 136 137 - static void vmw_fence_ping_func(struct work_struct *work) 138 - { 139 - struct vmw_fence_manager *fman = 140 - container_of(work, struct vmw_fence_manager, ping_work); 141 - 142 - vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); 143 - } 144 - 145 137 static bool vmw_fence_enable_signaling(struct fence *f) 146 138 { 147 139 struct vmw_fence_obj *fence = ··· 147 155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 148 156 return false; 149 157 150 - if (mutex_trylock(&dev_priv->hw_mutex)) { 151 - vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); 152 - mutex_unlock(&dev_priv->hw_mutex); 153 - } else 154 - schedule_work(&fman->ping_work); 158 + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 155 159 156 160 return true; 157 161 } ··· 293 305 INIT_LIST_HEAD(&fman->fence_list); 294 306 INIT_LIST_HEAD(&fman->cleanup_list); 295 307 INIT_WORK(&fman->work, &vmw_fence_work_func); 296 - INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); 297 308 fman->fifo_down = true; 298 309 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 299 310 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); ··· 310 323 bool lists_empty; 311 324 312 325 (void) cancel_work_sync(&fman->work); 313 - (void) cancel_work_sync(&fman->ping_work); 314 326 315 327 spin_lock_irqsave(&fman->lock, irq_flags); 316 328 lists_empty = list_empty(&fman->fence_list) &&
+15 -21
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 44 44 if (!dev_priv->has_mob) 45 45 return false; 46 46 47 - mutex_lock(&dev_priv->hw_mutex); 47 + spin_lock(&dev_priv->cap_lock); 48 48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); 49 49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 50 - mutex_unlock(&dev_priv->hw_mutex); 50 + spin_unlock(&dev_priv->cap_lock); 51 51 52 52 return (result != 0); 53 53 } ··· 120 120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 121 121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 122 122 123 - mutex_lock(&dev_priv->hw_mutex); 124 123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 125 124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 126 125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); ··· 142 143 mb(); 143 144 144 145 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 145 - mutex_unlock(&dev_priv->hw_mutex); 146 146 147 147 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 148 148 min = ioread32(fifo_mem + SVGA_FIFO_MIN); ··· 158 160 return vmw_fifo_send_fence(dev_priv, &dummy); 159 161 } 160 162 161 - void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) 163 + void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 162 164 { 163 165 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 166 + static DEFINE_SPINLOCK(ping_lock); 167 + unsigned long irq_flags; 164 168 169 + /* 170 + * The ping_lock is needed because we don't have an atomic 171 + * test-and-set of the SVGA_FIFO_BUSY register. 172 + */ 173 + spin_lock_irqsave(&ping_lock, irq_flags); 165 174 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { 166 175 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); 167 176 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 168 177 } 169 - } 170 - 171 - void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 172 - { 173 - mutex_lock(&dev_priv->hw_mutex); 174 - 175 - vmw_fifo_ping_host_locked(dev_priv, reason); 176 - 177 - mutex_unlock(&dev_priv->hw_mutex); 178 + spin_unlock_irqrestore(&ping_lock, irq_flags); 178 179 } 179 180 180 181 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 181 182 { 182 183 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 183 - 184 - mutex_lock(&dev_priv->hw_mutex); 185 184 186 185 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 187 186 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) ··· 193 198 vmw_write(dev_priv, SVGA_REG_TRACES, 194 199 dev_priv->traces_state); 195 200 196 - mutex_unlock(&dev_priv->hw_mutex); 197 201 vmw_marker_queue_takedown(&fifo->marker_queue); 198 202 199 203 if (likely(fifo->static_buffer != NULL)) { ··· 265 271 return vmw_fifo_wait_noirq(dev_priv, bytes, 266 272 interruptible, timeout); 267 273 268 - mutex_lock(&dev_priv->hw_mutex); 274 + spin_lock(&dev_priv->waiter_lock); 269 275 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { 270 276 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 271 277 outl(SVGA_IRQFLAG_FIFO_PROGRESS, ··· 274 280 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 275 281 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 276 282 } 277 - mutex_unlock(&dev_priv->hw_mutex); 283 + spin_unlock(&dev_priv->waiter_lock); 278 284 279 285 if (interruptible) 280 286 ret = wait_event_interruptible_timeout ··· 290 296 else if (likely(ret > 0)) 291 297 ret = 0; 292 298 293 - mutex_lock(&dev_priv->hw_mutex); 299 + spin_lock(&dev_priv->waiter_lock); 294 300 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { 295 301 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 296 302 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; 297 303 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 298 304 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 299 305 } 300 - mutex_unlock(&dev_priv->hw_mutex); 306 + spin_unlock(&dev_priv->waiter_lock); 301 307 302 308 return ret; 303 309 }
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
··· 135 135 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); 136 136 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; 137 137 138 - mutex_lock(&dev_priv->hw_mutex); 138 + spin_lock(&dev_priv->cap_lock); 139 139 for (i = 0; i < max_size; ++i) { 140 140 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 141 141 compat_cap->pairs[i][0] = i; 142 142 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 143 143 } 144 - mutex_unlock(&dev_priv->hw_mutex); 144 + spin_unlock(&dev_priv->cap_lock); 145 145 146 146 return 0; 147 147 } ··· 191 191 if (num > SVGA3D_DEVCAP_MAX) 192 192 num = SVGA3D_DEVCAP_MAX; 193 193 194 - mutex_lock(&dev_priv->hw_mutex); 194 + spin_lock(&dev_priv->cap_lock); 195 195 for (i = 0; i < num; ++i) { 196 196 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 197 197 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 198 198 } 199 - mutex_unlock(&dev_priv->hw_mutex); 199 + spin_unlock(&dev_priv->cap_lock); 200 200 } else if (gb_objects) { 201 201 ret = vmw_fill_compat_cap(dev_priv, bounce, size); 202 202 if (unlikely(ret != 0))
+9 -16
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
··· 62 62 63 63 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 64 64 { 65 - uint32_t busy; 66 65 67 - mutex_lock(&dev_priv->hw_mutex); 68 - busy = vmw_read(dev_priv, SVGA_REG_BUSY); 69 - mutex_unlock(&dev_priv->hw_mutex); 70 - 71 - return (busy == 0); 66 + return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); 72 67 } 73 68 74 69 void vmw_update_seqno(struct vmw_private *dev_priv, ··· 179 184 180 185 void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 181 186 { 182 - mutex_lock(&dev_priv->hw_mutex); 187 + spin_lock(&dev_priv->waiter_lock); 183 188 if (dev_priv->fence_queue_waiters++ == 0) { 184 189 unsigned long irq_flags; 185 190 ··· 190 195 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 191 196 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 192 197 } 193 - mutex_unlock(&dev_priv->hw_mutex); 198 + spin_unlock(&dev_priv->waiter_lock); 194 199 } 195 200 196 201 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 197 202 { 198 - mutex_lock(&dev_priv->hw_mutex); 203 + spin_lock(&dev_priv->waiter_lock); 199 204 if (--dev_priv->fence_queue_waiters == 0) { 200 205 unsigned long irq_flags; 201 206 ··· 204 209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 205 210 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 206 211 } 207 - mutex_unlock(&dev_priv->hw_mutex); 212 + spin_unlock(&dev_priv->waiter_lock); 208 213 } 209 214 210 215 211 216 void vmw_goal_waiter_add(struct vmw_private *dev_priv) 212 217 { 213 - mutex_lock(&dev_priv->hw_mutex); 218 + spin_lock(&dev_priv->waiter_lock); 214 219 if (dev_priv->goal_queue_waiters++ == 0) { 215 220 unsigned long irq_flags; 216 221 ··· 221 226 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 222 227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 223 228 } 224 - mutex_unlock(&dev_priv->hw_mutex); 229 + spin_unlock(&dev_priv->waiter_lock); 225 230 } 226 231 227 232 void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 228 233 { 229 - mutex_lock(&dev_priv->hw_mutex); 234 + spin_lock(&dev_priv->waiter_lock); 230 235 if (--dev_priv->goal_queue_waiters == 0) { 231 236 unsigned long irq_flags; 232 237 ··· 235 240 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 236 241 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 237 242 } 238 - mutex_unlock(&dev_priv->hw_mutex); 243 + spin_unlock(&dev_priv->waiter_lock); 239 244 } 240 245 241 246 int vmw_wait_seqno(struct vmw_private *dev_priv, ··· 310 315 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 311 316 return; 312 317 313 - mutex_lock(&dev_priv->hw_mutex); 314 318 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 315 - mutex_unlock(&dev_priv->hw_mutex); 316 319 317 320 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 318 321 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
-2
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 1828 1828 struct vmw_private *dev_priv = vmw_priv(dev); 1829 1829 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1830 1830 1831 - mutex_lock(&dev_priv->hw_mutex); 1832 1831 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 1833 - mutex_unlock(&dev_priv->hw_mutex); 1834 1832 1835 1833 return ((vmw_connector_to_du(connector)->unit < num_displays && 1836 1834 du->pref_active) ?
+1
drivers/i2c/busses/Kconfig
··· 881 881 config I2C_RCAR 882 882 tristate "Renesas R-Car I2C Controller" 883 883 depends on ARCH_SHMOBILE || COMPILE_TEST 884 + select I2C_SLAVE 884 885 help 885 886 If you say yes to this option, support will be included for the 886 887 R-Car I2C controller.
+17 -6
drivers/i2c/busses/i2c-s3c2410.c
··· 785 785 int ret; 786 786 787 787 pm_runtime_get_sync(&adap->dev); 788 - clk_prepare_enable(i2c->clk); 788 + ret = clk_enable(i2c->clk); 789 + if (ret) 790 + return ret; 789 791 790 792 for (retry = 0; retry < adap->retries; retry++) { 791 793 792 794 ret = s3c24xx_i2c_doxfer(i2c, msgs, num); 793 795 794 796 if (ret != -EAGAIN) { 795 - clk_disable_unprepare(i2c->clk); 797 + clk_disable(i2c->clk); 796 798 pm_runtime_put(&adap->dev); 797 799 return ret; 798 800 } ··· 804 802 udelay(100); 805 803 } 806 804 807 - clk_disable_unprepare(i2c->clk); 805 + clk_disable(i2c->clk); 808 806 pm_runtime_put(&adap->dev); 809 807 return -EREMOTEIO; 810 808 } ··· 1199 1197 1200 1198 clk_prepare_enable(i2c->clk); 1201 1199 ret = s3c24xx_i2c_init(i2c); 1202 - clk_disable_unprepare(i2c->clk); 1200 + clk_disable(i2c->clk); 1203 1201 if (ret != 0) { 1204 1202 dev_err(&pdev->dev, "I2C controller init failed\n"); 1205 1203 return ret; ··· 1212 1210 i2c->irq = ret = platform_get_irq(pdev, 0); 1213 1211 if (ret <= 0) { 1214 1212 dev_err(&pdev->dev, "cannot find IRQ\n"); 1213 + clk_unprepare(i2c->clk); 1215 1214 return ret; 1216 1215 } 1217 1216 ··· 1221 1218 1222 1219 if (ret != 0) { 1223 1220 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); 1221 + clk_unprepare(i2c->clk); 1224 1222 return ret; 1225 1223 } 1226 1224 } ··· 1229 1225 ret = s3c24xx_i2c_register_cpufreq(i2c); 1230 1226 if (ret < 0) { 1231 1227 dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); 1228 + clk_unprepare(i2c->clk); 1232 1229 return ret; 1233 1230 } 1234 1231 ··· 1246 1241 if (ret < 0) { 1247 1242 dev_err(&pdev->dev, "failed to add bus to i2c core\n"); 1248 1243 s3c24xx_i2c_deregister_cpufreq(i2c); 1244 + clk_unprepare(i2c->clk); 1249 1245 return ret; 1250 1246 } 1251 1247 ··· 1267 1261 static int s3c24xx_i2c_remove(struct platform_device *pdev) 1268 1262 { 1269 1263 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1264 + 1265 + clk_unprepare(i2c->clk); 1270 1266 1271 1267 pm_runtime_disable(&i2c->adap.dev); 1272 1268 pm_runtime_disable(&pdev->dev); ··· 1301 1293 { 1302 1294 struct platform_device *pdev = to_platform_device(dev); 1303 1295 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1296 + int ret; 1304 1297 1305 1298 if (!IS_ERR(i2c->sysreg)) 1306 1299 regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); 1307 1300 1308 - clk_prepare_enable(i2c->clk); 1301 + ret = clk_enable(i2c->clk); 1302 + if (ret) 1303 + return ret; 1309 1304 s3c24xx_i2c_init(i2c); 1310 - clk_disable_unprepare(i2c->clk); 1305 + clk_disable(i2c->clk); 1311 1306 i2c->suspended = 0; 1312 1307 1313 1308 return 0;
+11 -1
drivers/i2c/busses/i2c-sh_mobile.c
··· 139 139 int pos; 140 140 int sr; 141 141 bool send_stop; 142 + bool stop_after_dma; 142 143 143 144 struct resource *res; 144 145 struct dma_chan *dma_tx; ··· 408 407 409 408 if (pd->pos == pd->msg->len) { 410 409 /* Send stop if we haven't yet (DMA case) */ 411 - if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY)) 410 + if (pd->send_stop && pd->stop_after_dma) 412 411 i2c_op(pd, OP_TX_STOP, 0); 413 412 return 1; 414 413 } ··· 450 449 real_pos = pd->pos - 2; 451 450 452 451 if (pd->pos == pd->msg->len) { 452 + if (pd->stop_after_dma) { 453 + /* Simulate PIO end condition after DMA transfer */ 454 + i2c_op(pd, OP_RX_STOP, 0); 455 + pd->pos++; 456 + break; 457 + } 458 + 453 459 if (real_pos < 0) { 454 460 i2c_op(pd, OP_RX_STOP, 0); 455 461 break; ··· 544 536 545 537 sh_mobile_i2c_dma_unmap(pd); 546 538 pd->pos = pd->msg->len; 539 + pd->stop_after_dma = true; 547 540 548 541 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); 549 542 } ··· 735 726 bool do_start = pd->send_stop || !i; 736 727 msg = &msgs[i]; 737 728 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; 729 + pd->stop_after_dma = false; 738 730 739 731 err = start_ch(pd, msg, do_start); 740 732 if (err)
+2
drivers/i2c/i2c-core.c
··· 2972 2972 } 2973 2973 EXPORT_SYMBOL(i2c_smbus_xfer); 2974 2974 2975 + #if IS_ENABLED(CONFIG_I2C_SLAVE) 2975 2976 int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) 2976 2977 { 2977 2978 int ret; ··· 3020 3019 return ret; 3021 3020 } 3022 3021 EXPORT_SYMBOL_GPL(i2c_slave_unregister); 3022 + #endif 3023 3023 3024 3024 MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); 3025 3025 MODULE_DESCRIPTION("I2C-Bus main module");
+2 -2
drivers/i2c/i2c-slave-eeprom.c
··· 74 74 struct eeprom_data *eeprom; 75 75 unsigned long flags; 76 76 77 - if (off + count >= attr->size) 77 + if (off + count > attr->size) 78 78 return -EFBIG; 79 79 80 80 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); ··· 92 92 struct eeprom_data *eeprom; 93 93 unsigned long flags; 94 94 95 - if (off + count >= attr->size) 95 + if (off + count > attr->size) 96 96 return -EFBIG; 97 97 98 98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
-1
drivers/infiniband/core/uverbs_main.c
··· 123 123 struct ib_udata *uhw) = { 124 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 125 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 126 - [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device 127 126 }; 128 127 129 128 static void ib_uverbs_add_one(struct ib_device *device);
+6 -13
drivers/infiniband/ulp/ipoib/ipoib.h
··· 98 98 99 99 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ 100 100 IPOIB_MCAST_FLAG_SENDONLY = 1, 101 - /* 102 - * For IPOIB_MCAST_FLAG_BUSY 103 - * When set, in flight join and mcast->mc is unreliable 104 - * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or 105 - * haven't started yet 106 - * When clear and mcast->mc is valid pointer, join was successful 107 - */ 108 - IPOIB_MCAST_FLAG_BUSY = 2, 101 + IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ 109 102 IPOIB_MCAST_FLAG_ATTACHED = 3, 103 + IPOIB_MCAST_JOIN_STARTED = 4, 110 104 111 105 MAX_SEND_CQE = 16, 112 106 IPOIB_CM_COPYBREAK = 256, ··· 317 323 struct list_head multicast_list; 318 324 struct rb_root multicast_tree; 319 325 320 - struct workqueue_struct *wq; 321 326 struct delayed_work mcast_task; 322 327 struct work_struct carrier_on_task; 323 328 struct work_struct flush_light; ··· 477 484 void ipoib_pkey_event(struct work_struct *work); 478 485 void ipoib_ib_dev_cleanup(struct net_device *dev); 479 486 480 - int ipoib_ib_dev_open(struct net_device *dev); 487 + int ipoib_ib_dev_open(struct net_device *dev, int flush); 481 488 int ipoib_ib_dev_up(struct net_device *dev); 482 - int ipoib_ib_dev_down(struct net_device *dev); 483 - int ipoib_ib_dev_stop(struct net_device *dev); 489 + int ipoib_ib_dev_down(struct net_device *dev, int flush); 490 + int ipoib_ib_dev_stop(struct net_device *dev, int flush); 484 491 void ipoib_pkey_dev_check_presence(struct net_device *dev); 485 492 486 493 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); ··· 492 499 493 500 void ipoib_mcast_restart_task(struct work_struct *work); 494 501 int ipoib_mcast_start_thread(struct net_device *dev); 495 - int ipoib_mcast_stop_thread(struct net_device *dev); 502 + int ipoib_mcast_stop_thread(struct net_device *dev, int flush); 496 503 497 504 void ipoib_mcast_dev_down(struct net_device *dev); 498 505 void ipoib_mcast_dev_flush(struct net_device *dev);
+9 -9
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 474 474 } 475 475 476 476 spin_lock_irq(&priv->lock); 477 - queue_delayed_work(priv->wq, 477 + queue_delayed_work(ipoib_workqueue, 478 478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 479 479 /* Add this entry to passive ids list head, but do not re-add it 480 480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ ··· 576 576 spin_lock_irqsave(&priv->lock, flags); 577 577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 578 578 ipoib_cm_start_rx_drain(priv); 579 - queue_work(priv->wq, &priv->cm.rx_reap_task); 579 + queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); 580 580 spin_unlock_irqrestore(&priv->lock, flags); 581 581 } else 582 582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", ··· 603 603 spin_lock_irqsave(&priv->lock, flags); 604 604 list_move(&p->list, &priv->cm.rx_reap_list); 605 605 spin_unlock_irqrestore(&priv->lock, flags); 606 - queue_work(priv->wq, &priv->cm.rx_reap_task); 606 + queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); 607 607 } 608 608 return; 609 609 } ··· 827 827 828 828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 829 829 list_move(&tx->list, &priv->cm.reap_list); 830 - queue_work(priv->wq, &priv->cm.reap_task); 830 + queue_work(ipoib_workqueue, &priv->cm.reap_task); 831 831 } 832 832 833 833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); ··· 1255 1255 1256 1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1257 1257 list_move(&tx->list, &priv->cm.reap_list); 1258 - queue_work(priv->wq, &priv->cm.reap_task); 1258 + queue_work(ipoib_workqueue, &priv->cm.reap_task); 1259 1259 } 1260 1260 1261 1261 spin_unlock_irqrestore(&priv->lock, flags); ··· 1284 1284 tx->dev = dev; 1285 1285 list_add(&tx->list, &priv->cm.start_list); 1286 1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1287 - queue_work(priv->wq, &priv->cm.start_task); 1287 + queue_work(ipoib_workqueue, &priv->cm.start_task); 1288 1288 return tx; 1289 1289 } 1290 1290 ··· 1295 1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1296 1296 spin_lock_irqsave(&priv->lock, flags); 1297 1297 list_move(&tx->list, &priv->cm.reap_list); 1298 - queue_work(priv->wq, &priv->cm.reap_task); 1298 + queue_work(ipoib_workqueue, &priv->cm.reap_task); 1299 1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n", 1300 1300 tx->neigh->daddr + 4); 1301 1301 tx->neigh = NULL; ··· 1417 1417 1418 1418 skb_queue_tail(&priv->cm.skb_queue, skb); 1419 1419 if (e) 1420 - queue_work(priv->wq, &priv->cm.skb_task); 1420 + queue_work(ipoib_workqueue, &priv->cm.skb_task); 1421 1421 } 1422 1422 1423 1423 static void ipoib_cm_rx_reap(struct work_struct *work) ··· 1450 1450 } 1451 1451 1452 1452 if (!list_empty(&priv->cm.passive_ids)) 1453 - queue_delayed_work(priv->wq, 1453 + queue_delayed_work(ipoib_workqueue, 1454 1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1455 1455 spin_unlock_irq(&priv->lock); 1456 1456 }
+14 -13
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 655 655 __ipoib_reap_ah(dev); 656 656 657 657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 658 - queue_delayed_work(priv->wq, &priv->ah_reap_task, 658 + queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 659 659 round_jiffies_relative(HZ)); 660 660 } 661 661 ··· 664 664 drain_tx_cq((struct net_device *)ctx); 665 665 } 666 666 667 - int ipoib_ib_dev_open(struct net_device *dev) 667 + int ipoib_ib_dev_open(struct net_device *dev, int flush) 668 668 { 669 669 struct ipoib_dev_priv *priv = netdev_priv(dev); 670 670 int ret; ··· 696 696 } 697 697 698 698 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 699 - queue_delayed_work(priv->wq, &priv->ah_reap_task, 699 + queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 700 700 round_jiffies_relative(HZ)); 701 701 702 702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ··· 706 706 dev_stop: 707 707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 708 708 napi_enable(&priv->napi); 709 - ipoib_ib_dev_stop(dev); 709 + ipoib_ib_dev_stop(dev, flush); 710 710 return -1; 711 711 } 712 712 ··· 738 738 return ipoib_mcast_start_thread(dev); 739 739 } 740 740 741 - int ipoib_ib_dev_down(struct net_device *dev) 741 + int ipoib_ib_dev_down(struct net_device *dev, int flush) 742 742 { 743 743 struct ipoib_dev_priv *priv = netdev_priv(dev); 744 744 ··· 747 747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 748 748 netif_carrier_off(dev); 749 749 750 - ipoib_mcast_stop_thread(dev); 750 + ipoib_mcast_stop_thread(dev, flush); 751 751 ipoib_mcast_dev_flush(dev); 752 752 753 753 ipoib_flush_paths(dev); ··· 807 807 local_bh_enable(); 808 808 } 809 809 810 - int ipoib_ib_dev_stop(struct net_device *dev) 810 + int ipoib_ib_dev_stop(struct net_device *dev, int flush) 811 811 { 812 812 struct ipoib_dev_priv *priv = netdev_priv(dev); 813 813 struct ib_qp_attr qp_attr; ··· 880 880 /* Wait for all AHs to be reaped */ 881 881 set_bit(IPOIB_STOP_REAPER, &priv->flags); 882 882 cancel_delayed_work(&priv->ah_reap_task); 883 - flush_workqueue(priv->wq); 883 + if (flush) 884 + flush_workqueue(ipoib_workqueue); 884 885 885 886 begin = jiffies; 886 887 ··· 918 917 (unsigned long) dev); 919 918 920 919 if (dev->flags & IFF_UP) { 921 - if (ipoib_ib_dev_open(dev)) { 920 + if (ipoib_ib_dev_open(dev, 1)) { 922 921 ipoib_transport_dev_cleanup(dev); 923 922 return -ENODEV; 924 923 } ··· 1040 1039 } 1041 1040 1042 1041 if (level >= IPOIB_FLUSH_NORMAL) 1043 - ipoib_ib_dev_down(dev); 1042 + ipoib_ib_dev_down(dev, 0); 1044 1043 1045 1044 if (level == IPOIB_FLUSH_HEAVY) { 1046 1045 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 1047 - ipoib_ib_dev_stop(dev); 1048 - if (ipoib_ib_dev_open(dev) != 0) 1046 + ipoib_ib_dev_stop(dev, 0); 1047 + if (ipoib_ib_dev_open(dev, 0) != 0) 1049 1048 return; 1050 1049 if (netif_queue_stopped(dev)) 1051 1050 netif_start_queue(dev); ··· 1097 1096 */ 1098 1097 ipoib_flush_paths(dev); 1099 1098 1100 - ipoib_mcast_stop_thread(dev); 1099 + ipoib_mcast_stop_thread(dev, 1); 1101 1100 ipoib_mcast_dev_flush(dev); 1102 1101 1103 1102 ipoib_transport_dev_cleanup(dev);
+17 -32
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 108 108 109 109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 110 110 111 - if (ipoib_ib_dev_open(dev)) { 111 + if (ipoib_ib_dev_open(dev, 1)) { 112 112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 113 113 return 0; 114 114 goto err_disable; ··· 139 139 return 0; 140 140 141 141 err_stop: 142 - ipoib_ib_dev_stop(dev); 142 + ipoib_ib_dev_stop(dev, 1); 143 143 144 144 err_disable: 145 145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); ··· 157 157 158 158 netif_stop_queue(dev); 159 159 160 - ipoib_ib_dev_down(dev); 161 - ipoib_ib_dev_stop(dev); 160 + ipoib_ib_dev_down(dev, 1); 161 + ipoib_ib_dev_stop(dev, 0); 162 162 163 163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 164 164 struct ipoib_dev_priv *cpriv; ··· 839 839 return; 840 840 } 841 841 842 - queue_work(priv->wq, &priv->restart_task); 842 + queue_work(ipoib_workqueue, &priv->restart_task); 843 843 } 844 844 845 845 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) ··· 954 954 __ipoib_reap_neigh(priv); 955 955 956 956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 957 - queue_delayed_work(priv->wq, &priv->neigh_reap_task, 957 + queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, 958 958 arp_tbl.gc_interval); 959 959 } 960 960 ··· 1133 1133 1134 1134 /* start garbage collection */ 1135 1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1136 - queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1136 + queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, 1137 1137 arp_tbl.gc_interval); 1138 1138 1139 1139 return 0; ··· 1262 1262 { 1263 1263 struct ipoib_dev_priv *priv = netdev_priv(dev); 1264 1264 1265 + if (ipoib_neigh_hash_init(priv) < 0) 1266 + goto out; 1265 1267 /* Allocate RX/TX "rings" to hold queued skbs */ 1266 1268 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1267 1269 GFP_KERNEL); 1268 1270 if (!priv->rx_ring) { 1269 1271 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1270 1272 ca->name, ipoib_recvq_size); 1271 - goto out; 1273 + goto out_neigh_hash_cleanup; 1272 1274 } 1273 1275 1274 1276 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); ··· 1285 1283 if (ipoib_ib_dev_init(dev, ca, port)) 1286 1284 goto out_tx_ring_cleanup; 1287 1285 1288 - /* 1289 - * Must be after ipoib_ib_dev_init so we can allocate a per 1290 - * device wq there and use it here 1291 - */ 1292 - if (ipoib_neigh_hash_init(priv) < 0) 1293 - goto out_dev_uninit; 1294 - 1295 1286 return 0; 1296 - 1297 - out_dev_uninit: 1298 - ipoib_ib_dev_cleanup(dev); 1299 1287 1300 1288 out_tx_ring_cleanup: 1301 1289 vfree(priv->tx_ring); ··· 1293 1301 out_rx_ring_cleanup: 1294 1302 kfree(priv->rx_ring); 1295 1303 1304 + out_neigh_hash_cleanup: 1305 + ipoib_neigh_hash_uninit(dev); 1296 1306 out: 1297 1307 return -ENOMEM; 1298 1308 } ··· 1317 1323 } 1318 1324 unregister_netdevice_many(&head); 1319 1325 1320 - /* 1321 - * Must be before ipoib_ib_dev_cleanup or we delete an in use 1322 - * work queue 1323 - */ 1324 - ipoib_neigh_hash_uninit(dev); 1325 - 1326 1326 ipoib_ib_dev_cleanup(dev); 1327 1327 1328 1328 kfree(priv->rx_ring); ··· 1324 1336 1325 1337 priv->rx_ring = NULL; 1326 1338 priv->tx_ring = NULL; 1339 + 1340 + ipoib_neigh_hash_uninit(dev); 1327 1341 } 1328 1342 1329 1343 static const struct header_ops ipoib_header_ops = { ··· 1636 1646 /* Stop GC if started before flush */ 1637 1647 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1638 1648 cancel_delayed_work(&priv->neigh_reap_task); 1639 - flush_workqueue(priv->wq); 1649 + flush_workqueue(ipoib_workqueue); 1640 1650 1641 1651 event_failed: 1642 1652 ipoib_dev_cleanup(priv->dev); ··· 1707 1717 /* Stop GC */ 1708 1718 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1709 1719 cancel_delayed_work(&priv->neigh_reap_task); 1710 - flush_workqueue(priv->wq); 1720 + flush_workqueue(ipoib_workqueue); 1711 1721 1712 1722 unregister_netdev(priv->dev); 1713 1723 free_netdev(priv->dev); ··· 1748 1758 * unregister_netdev() and linkwatch_event take the rtnl lock, 1749 1759 * so flush_scheduled_work() can deadlock during device 1750 1760 * removal. 1751 - * 1752 - * In addition, bringing one device up and another down at the 1753 - * same time can deadlock a single workqueue, so we have this 1754 - * global fallback workqueue, but we also attempt to open a 1755 - * per device workqueue each time we bring an interface up 1756 1761 */ 1757 - ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); 1762 + ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1758 1763 if (!ipoib_workqueue) { 1759 1764 ret = -ENOMEM; 1760 1765 goto err_fs;
+88 -153
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 190 190 spin_unlock_irq(&priv->lock); 191 191 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 192 192 set_qkey = 1; 193 + 194 + if (!ipoib_cm_admin_enabled(dev)) { 195 + rtnl_lock(); 196 + dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); 197 + rtnl_unlock(); 198 + } 193 199 } 194 200 195 201 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { ··· 277 271 struct ipoib_mcast *mcast = multicast->context; 278 272 struct net_device *dev = mcast->dev; 279 273 280 - /* 281 - * We have to take the mutex to force mcast_sendonly_join to 282 - * return from ib_sa_multicast_join and set mcast->mc to a 283 - * valid value. Otherwise we were racing with ourselves in 284 - * that we might fail here, but get a valid return from 285 - * ib_sa_multicast_join after we had cleared mcast->mc here, 286 - * resulting in mis-matched joins and leaves and a deadlock 287 - */ 288 - mutex_lock(&mcast_mutex); 289 - 290 274 /* We trap for port events ourselves. */ 291 275 if (status == -ENETRESET) 292 - goto out; 276 + return 0; 293 277 294 278 if (!status) 295 279 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 296 280 297 281 if (status) { 298 282 if (mcast->logcount++ < 20) 299 - ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " 300 - "join failed for %pI6, status %d\n", 283 + ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", 301 284 mcast->mcmember.mgid.raw, status); 302 285 303 286 /* Flush out any queued packets */ ··· 296 301 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 297 302 } 298 303 netif_tx_unlock_bh(dev); 304 + 305 + /* Clear the busy flag so we try again */ 306 + status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, 307 + &mcast->flags); 299 308 } 300 - out: 301 - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 302 - if (status) 303 - mcast->mc = NULL; 304 - complete(&mcast->done); 305 - if (status == -ENETRESET) 306 - status = 0; 307 - mutex_unlock(&mcast_mutex); 308 309 return status; 309 310 } 310 311 ··· 318 327 int ret = 0; 319 328 320 329 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 321 - ipoib_dbg_mcast(priv, "device shutting down, no sendonly " 322 - "multicast joins\n"); 330 + ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); 323 331 return -ENODEV; 324 332 } 325 333 326 - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 327 - ipoib_dbg_mcast(priv, "multicast entry busy, skipping " 328 - "sendonly join\n"); 334 + if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 335 + ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); 329 336 return -EBUSY; 330 337 } 331 338 ··· 331 342 rec.port_gid = priv->local_gid; 332 343 rec.pkey = cpu_to_be16(priv->pkey); 333 344 334 - mutex_lock(&mcast_mutex); 335 - init_completion(&mcast->done); 336 - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 337 345 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 338 346 priv->port, &rec, 339 347 IB_SA_MCMEMBER_REC_MGID | ··· 343 357 if (IS_ERR(mcast->mc)) { 344 358 ret = PTR_ERR(mcast->mc); 345 359 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 346 - complete(&mcast->done); 347 - ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " 348 - "failed (ret = %d)\n", ret); 360 + ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", 361 + ret); 349 362 } else { 350 - ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " 351 - "sendonly join\n", mcast->mcmember.mgid.raw); 363 + ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", 364 + mcast->mcmember.mgid.raw); 352 365 } 353 - mutex_unlock(&mcast_mutex); 354 366 355 367 return ret; 356 368 } ··· 359 375 carrier_on_task); 360 376 struct ib_port_attr attr; 361 377 378 + /* 379 + * Take rtnl_lock to avoid racing with ipoib_stop() and 380 + * turning the carrier back on while a device is being 381 + * removed. 382 + */ 362 383 if (ib_query_port(priv->ca, priv->port, &attr) || 363 384 attr.state != IB_PORT_ACTIVE) { 364 385 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 365 386 return; 366 387 } 367 388 368 - /* 369 - * Take rtnl_lock to avoid racing with ipoib_stop() and 370 - * turning the carrier back on while a device is being 371 - * removed. However, ipoib_stop() will attempt to flush 372 - * the workqueue while holding the rtnl lock, so loop 373 - * on trylock until either we get the lock or we see 374 - * FLAG_ADMIN_UP go away as that signals that we are bailing 375 - * and can safely ignore the carrier on work. 376 - */ 377 - while (!rtnl_trylock()) { 378 - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 379 - return; 380 - else 381 - msleep(20); 382 - } 383 - if (!ipoib_cm_admin_enabled(priv->dev)) 384 - dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); 389 + rtnl_lock(); 385 390 netif_carrier_on(priv->dev); 386 391 rtnl_unlock(); 387 392 } ··· 385 412 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", 386 413 mcast->mcmember.mgid.raw, status); 387 414 388 - /* 389 - * We have to take the mutex to force mcast_join to 390 - * return from ib_sa_multicast_join and set mcast->mc to a 391 - * valid value. Otherwise we were racing with ourselves in 392 - * that we might fail here, but get a valid return from 393 - * ib_sa_multicast_join after we had cleared mcast->mc here, 394 - * resulting in mis-matched joins and leaves and a deadlock 395 - */ 396 - mutex_lock(&mcast_mutex); 397 - 398 415 /* We trap for port events ourselves. */ 399 - if (status == -ENETRESET) 416 + if (status == -ENETRESET) { 417 + status = 0; 400 418 goto out; 419 + } 401 420 402 421 if (!status) 403 422 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 404 423 405 424 if (!status) { 406 425 mcast->backoff = 1; 426 + mutex_lock(&mcast_mutex); 407 427 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 408 - queue_delayed_work(priv->wq, &priv->mcast_task, 0); 428 + queue_delayed_work(ipoib_workqueue, 429 + &priv->mcast_task, 0); 430 + mutex_unlock(&mcast_mutex); 409 431 410 432 /* 411 - * Defer carrier on work to priv->wq to avoid a 433 + * Defer carrier on work to ipoib_workqueue to avoid a 412 434 * deadlock on rtnl_lock here. 413 435 */ 414 436 if (mcast == priv->broadcast) 415 - queue_work(priv->wq, &priv->carrier_on_task); 416 - } else { 417 - if (mcast->logcount++ < 20) { 418 - if (status == -ETIMEDOUT || status == -EAGAIN) { 419 - ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 420 - mcast->mcmember.mgid.raw, status); 421 - } else { 422 - ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", 423 - mcast->mcmember.mgid.raw, status); 424 - } 425 - } 437 + queue_work(ipoib_workqueue, &priv->carrier_on_task); 426 438 427 - mcast->backoff *= 2; 428 - if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 429 - mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 430 - } 431 - out: 432 - spin_lock_irq(&priv->lock); 433 - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 434 - if (status) 435 - mcast->mc = NULL; 436 - complete(&mcast->done); 437 - if (status == -ENETRESET) 438 439 status = 0; 439 - if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags)) 440 - queue_delayed_work(priv->wq, &priv->mcast_task, 440 + goto out; 441 + } 442 + 443 + if (mcast->logcount++ < 20) { 444 + if (status == -ETIMEDOUT || status == -EAGAIN) { 445 + ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 446 + mcast->mcmember.mgid.raw, status); 447 + } else { 448 + ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", 449 + mcast->mcmember.mgid.raw, status); 450 + } 451 + } 452 + 453 + mcast->backoff *= 2; 454 + if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 455 + mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 456 + 457 + /* Clear the busy flag so we try again */ 458 + status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 459 + 460 + mutex_lock(&mcast_mutex); 461 + spin_lock_irq(&priv->lock); 462 + if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 463 + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 441 464 mcast->backoff * HZ); 442 465 spin_unlock_irq(&priv->lock); 443 466 mutex_unlock(&mcast_mutex); 444 - 467 + out: 468 + complete(&mcast->done); 445 469 return status; 446 470 } 447 471 ··· 487 517 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 488 518 } 489 519 490 - mutex_lock(&mcast_mutex); 491 - init_completion(&mcast->done); 492 520 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 521 + init_completion(&mcast->done); 522 + set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags); 523 + 493 524 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 494 525 &rec, comp_mask, GFP_KERNEL, 495 526 ipoib_mcast_join_complete, mcast); ··· 504 533 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 505 534 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 506 535 536 + mutex_lock(&mcast_mutex); 507 537 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 508 - queue_delayed_work(priv->wq, &priv->mcast_task, 538 + queue_delayed_work(ipoib_workqueue, 539 + &priv->mcast_task, 509 540 mcast->backoff * HZ); 541 + mutex_unlock(&mcast_mutex); 510 542 } 511 - mutex_unlock(&mcast_mutex); 512 543 } 513 544 514 545 void ipoib_mcast_join_task(struct work_struct *work) ··· 547 574 ipoib_warn(priv, "failed to allocate broadcast group\n"); 548 575 mutex_lock(&mcast_mutex); 549 576 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 550 - queue_delayed_work(priv->wq, &priv->mcast_task, 551 - HZ); 577 + queue_delayed_work(ipoib_workqueue, 578 + &priv->mcast_task, HZ); 552 579 mutex_unlock(&mcast_mutex); 553 580 return; 554 581 } ··· 563 590 } 564 591 565 592 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 566 - if (IS_ERR_OR_NULL(priv->broadcast->mc) && 567 - !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) 593 + if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) 568 594 ipoib_mcast_join(dev, priv->broadcast, 0); 569 595 return; 570 596 } ··· 571 599 while (1) { 572 600 struct ipoib_mcast *mcast = NULL; 573 601 574 - /* 575 - * Need the mutex so our flags are consistent, need the 576 - * priv->lock so we don't race with list removals in either 577 - * mcast_dev_flush or mcast_restart_task 578 - */ 579 - mutex_lock(&mcast_mutex); 580 602 spin_lock_irq(&priv->lock); 581 603 list_for_each_entry(mcast, &priv->multicast_list, list) { 582 - if (IS_ERR_OR_NULL(mcast->mc) && 583 - !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && 584 - !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 604 + if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) 605 + && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) 606 + && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 585 607 /* Found the next unjoined group */ 586 608 break; 587 609 } 588 610 } 589 611 spin_unlock_irq(&priv->lock); 590 - mutex_unlock(&mcast_mutex); 591 612 592 613 if (&mcast->list == &priv->multicast_list) { 593 614 /* All done */ 594 615 break; 595 616 } 596 617 597 - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 598 - ipoib_mcast_sendonly_join(mcast); 599 - else 600 - ipoib_mcast_join(dev, mcast, 1); 618 + ipoib_mcast_join(dev, mcast, 1); 601 619 return; 602 620 } 603 621 ··· 604 642 605 643 mutex_lock(&mcast_mutex); 606 644 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 607 - queue_delayed_work(priv->wq, &priv->mcast_task, 0); 645 + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); 608 646 mutex_unlock(&mcast_mutex); 609 647 610 648 return 0; 611 649 } 612 650 613 - int ipoib_mcast_stop_thread(struct net_device *dev) 651 + int ipoib_mcast_stop_thread(struct net_device *dev, int flush) 614 652 { 615 653 struct ipoib_dev_priv *priv = netdev_priv(dev); 616 654 ··· 621 659 cancel_delayed_work(&priv->mcast_task); 622 660 mutex_unlock(&mcast_mutex); 623 661 624 - flush_workqueue(priv->wq); 662 + if (flush) 663 + flush_workqueue(ipoib_workqueue); 625 664 626 665 return 0; 627 666 } ··· 633 670 int ret = 0; 634 671 635 672 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 636 - ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n"); 637 - 638 - if (!IS_ERR_OR_NULL(mcast->mc)) 639 673 ib_sa_free_multicast(mcast->mc); 640 674 641 675 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { ··· 685 725 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 686 726 __ipoib_mcast_add(dev, mcast); 687 727 list_add_tail(&mcast->list, &priv->multicast_list); 688 - if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 689 - queue_delayed_work(priv->wq, &priv->mcast_task, 0); 690 728 } 691 729 692 730 if (!mcast->ah) { ··· 698 740 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 699 741 ipoib_dbg_mcast(priv, "no address vector, " 700 742 "but multicast join already started\n"); 743 + else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 744 + ipoib_mcast_sendonly_join(mcast); 701 745 702 746 /* 703 747 * If lookup completes between here and out:, don't ··· 759 799 760 800 spin_unlock_irqrestore(&priv->lock, flags); 761 801 762 - /* 763 - * make sure the in-flight joins have finished before we attempt 764 - * to leave 765 - */ 802 + /* seperate between the wait to the leave*/ 766 803 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 767 - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 804 + if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags)) 768 805 wait_for_completion(&mcast->done); 769 806 770 807 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ··· 793 836 struct ib_sa_mcmember_rec rec; 794 837 795 838 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 839 + 840 + ipoib_mcast_stop_thread(dev, 0); 796 841 797 842 local_irq_save(flags); 798 843 netif_addr_lock(dev); ··· 880 921 netif_addr_unlock(dev); 881 922 local_irq_restore(flags); 882 923 883 - /* 884 - * make sure the in-flight joins have finished before we attempt 885 - * to leave 886 - */ 887 - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 888 - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 889 - wait_for_completion(&mcast->done); 890 - 891 - /* 892 - * We have to cancel outside of the spinlock, but we have to 893 - * take the rtnl lock or else we race with the removal of 894 - * entries from the remove list in mcast_dev_flush as part 895 - * of ipoib_stop(). We detect the drop of the ADMIN_UP flag 896 - * to signal that we have hit this particular race, and we 897 - * return since we know we don't need to do anything else 898 - * anyway. 899 - */ 900 - while (!rtnl_trylock()) { 901 - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 902 - return; 903 - else 904 - msleep(20); 905 - } 924 + /* We have to cancel outside of the spinlock */ 906 925 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 907 926 ipoib_mcast_leave(mcast->dev, mcast); 908 927 ipoib_mcast_free(mcast); 909 928 } 910 - /* 911 - * Restart our join task if needed 912 - */ 913 - ipoib_mcast_start_thread(dev); 914 - rtnl_unlock(); 929 + 930 + if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 931 + ipoib_mcast_start_thread(dev); 915 932 } 916 933 917 934 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+1 -21
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
··· 145 145 int ret, size; 146 146 int i; 147 147 148 - /* 149 - * the various IPoIB tasks assume they will never race against 150 - * themselves, so always use a single thread workqueue 151 - */ 152 - priv->wq = create_singlethread_workqueue("ipoib_wq"); 153 - if (!priv->wq) { 154 - printk(KERN_WARNING "ipoib: failed to allocate device WQ\n"); 155 - return -ENODEV; 156 - } 157 - 158 148 priv->pd = ib_alloc_pd(priv->ca); 159 149 if (IS_ERR(priv->pd)) { 160 150 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); 161 - goto out_free_wq; 151 + return -ENODEV; 162 152 } 163 153 164 154 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); ··· 242 252 243 253 out_free_pd: 244 254 ib_dealloc_pd(priv->pd); 245 - 246 - out_free_wq: 247 - destroy_workqueue(priv->wq); 248 - priv->wq = NULL; 249 255 return -ENODEV; 250 256 } 251 257 ··· 270 284 271 285 if (ib_dealloc_pd(priv->pd)) 272 286 ipoib_warn(priv, "ib_dealloc_pd failed\n"); 273 - 274 - if (priv->wq) { 275 - flush_workqueue(priv->wq); 276 - destroy_workqueue(priv->wq); 277 - priv->wq = NULL; 278 - } 279 287 } 280 288 281 289 void ipoib_event(struct ib_event_handler *handler,
+16
drivers/input/mouse/elantech.c
··· 1097 1097 * Asus UX31 0x361f00 20, 15, 0e clickpad 1098 1098 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1099 1099 * Avatar AVIU-145A2 0x361f00 ? clickpad 1100 + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1101 + * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1100 1102 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1101 1103 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1102 1104 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) ··· 1475 1473 .matches = { 1476 1474 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1477 1475 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), 1476 + }, 1477 + }, 1478 + { 1479 + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ 1480 + .matches = { 1481 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1482 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), 1483 + }, 1484 + }, 1485 + { 1486 + /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */ 1487 + .matches = { 1488 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1489 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), 1478 1490 }, 1479 1491 }, 1480 1492 #endif
+4 -3
drivers/input/mouse/synaptics.c
··· 135 135 1232, 5710, 1156, 4696 136 136 }, 137 137 { 138 - (const char * const []){"LEN0034", "LEN0036", "LEN0039", 139 - "LEN2002", "LEN2004", NULL}, 138 + (const char * const []){"LEN0034", "LEN0036", "LEN0037", 139 + "LEN0039", "LEN2002", "LEN2004", 140 + NULL}, 140 141 1024, 5112, 2024, 4832 141 142 }, 142 143 { ··· 166 165 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ 167 166 "LEN0035", /* X240 */ 168 167 "LEN0036", /* T440 */ 169 - "LEN0037", 168 + "LEN0037", /* X1 Carbon 2nd */ 170 169 "LEN0038", 171 170 "LEN0039", /* T440s */ 172 171 "LEN0041",
+8
drivers/input/serio/i8042-x86ia64io.h
··· 152 152 }, 153 153 }, 154 154 { 155 + /* Medion Akoya E7225 */ 156 + .matches = { 157 + DMI_MATCH(DMI_SYS_VENDOR, "Medion"), 158 + DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), 159 + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), 160 + }, 161 + }, 162 + { 155 163 /* Blue FB5601 */ 156 164 .matches = { 157 165 DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+2 -1
drivers/iommu/tegra-gart.c
··· 315 315 .attach_dev = gart_iommu_attach_dev, 316 316 .detach_dev = gart_iommu_detach_dev, 317 317 .map = gart_iommu_map, 318 + .map_sg = default_iommu_map_sg, 318 319 .unmap = gart_iommu_unmap, 319 320 .iova_to_phys = gart_iommu_iova_to_phys, 320 321 .pgsize_bitmap = GART_IOMMU_PGSIZES, ··· 396 395 do_gart_setup(gart, NULL); 397 396 398 397 gart_handle = gart; 399 - bus_set_iommu(&platform_bus_type, &gart_iommu_ops); 398 + 400 399 return 0; 401 400 } 402 401
+1 -1
drivers/isdn/hardware/eicon/message.c
··· 1474 1474 add_ai(plci, &parms[5]); 1475 1475 sig_req(plci, REJECT, 0); 1476 1476 } 1477 - else if (Reject == 1 || Reject > 9) 1477 + else if (Reject == 1 || Reject >= 9) 1478 1478 { 1479 1479 add_ai(plci, &parms[5]); 1480 1480 sig_req(plci, HANGUP, 0);
+13
drivers/md/bitmap.c
··· 72 72 /* this page has not been allocated yet */ 73 73 74 74 spin_unlock_irq(&bitmap->lock); 75 + /* It is possible that this is being called inside a 76 + * prepare_to_wait/finish_wait loop from raid5c:make_request(). 77 + * In general it is not permitted to sleep in that context as it 78 + * can cause the loop to spin freely. 79 + * That doesn't apply here as we can only reach this point 80 + * once with any loop. 81 + * When this function completes, either bp[page].map or 82 + * bp[page].hijacked. In either case, this function will 83 + * abort before getting to this point again. So there is 84 + * no risk of a free-spin, and so it is safe to assert 85 + * that sleeping here is allowed. 86 + */ 87 + sched_annotate_sleep(); 75 88 mappage = kzalloc(PAGE_SIZE, GFP_NOIO); 76 89 spin_lock_irq(&bitmap->lock); 77 90
+5 -4
drivers/md/dm-cache-metadata.c
··· 683 683 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 684 684 if (!cmd) { 685 685 DMERR("could not allocate metadata struct"); 686 - return NULL; 686 + return ERR_PTR(-ENOMEM); 687 687 } 688 688 689 689 atomic_set(&cmd->ref_count, 1); ··· 745 745 return cmd; 746 746 747 747 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); 748 - if (cmd) { 748 + if (!IS_ERR(cmd)) { 749 749 mutex_lock(&table_lock); 750 750 cmd2 = lookup(bdev); 751 751 if (cmd2) { ··· 780 780 { 781 781 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, 782 782 may_format_device, policy_hint_size); 783 - if (cmd && !same_params(cmd, data_block_size)) { 783 + 784 + if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { 784 785 dm_cache_metadata_close(cmd); 785 - return NULL; 786 + return ERR_PTR(-EINVAL); 786 787 } 787 788 788 789 return cmd;
+6
drivers/md/dm-thin.c
··· 3385 3385 struct pool_c *pt = ti->private; 3386 3386 struct pool *pool = pt->pool; 3387 3387 3388 + if (get_pool_mode(pool) >= PM_READ_ONLY) { 3389 + DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", 3390 + dm_device_name(pool->pool_md)); 3391 + return -EINVAL; 3392 + } 3393 + 3388 3394 if (!strcasecmp(argv[0], "create_thin")) 3389 3395 r = process_create_thin_mesg(argc, argv, pool); 3390 3396
+5
drivers/md/raid5.c
··· 3195 3195 (unsigned long long)sh->sector, 3196 3196 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3197 3197 } 3198 + 3199 + if (rcw > disks && rmw > disks && 3200 + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3201 + set_bit(STRIPE_DELAYED, &sh->state); 3202 + 3198 3203 /* now if nothing is locked, and if we have enough data, 3199 3204 * we can start a write request 3200 3205 */
-1
drivers/net/caif/caif_hsi.c
··· 1415 1415 1416 1416 cfhsi = netdev_priv(dev); 1417 1417 cfhsi_netlink_parms(data, cfhsi); 1418 - dev_net_set(cfhsi->ndev, src_net); 1419 1418 1420 1419 get_ops = symbol_get(cfhsi_get_ops); 1421 1420 if (!get_ops) {
+2 -2
drivers/net/ethernet/amd/Kconfig
··· 45 45 46 46 config LANCE 47 47 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 48 - depends on ISA && ISA_DMA_API 48 + depends on ISA && ISA_DMA_API && !ARM 49 49 ---help--- 50 50 If you have a network (Ethernet) card of this type, say Y and read 51 51 the Ethernet-HOWTO, available from ··· 142 142 143 143 config NI65 144 144 tristate "NI6510 support" 145 - depends on ISA && ISA_DMA_API 145 + depends on ISA && ISA_DMA_API && !ARM 146 146 ---help--- 147 147 If you have a network (Ethernet) card of this type, say Y and read 148 148 the Ethernet-HOWTO, available from
+2
drivers/net/ethernet/amd/nmclan_cs.c
··· 952 952 do { 953 953 /* WARNING: MACE_IR is a READ/CLEAR port! */ 954 954 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); 955 + if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS) 956 + return IRQ_NONE; 955 957 956 958 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); 957 959
+3 -1
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 522 522 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 523 523 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 524 524 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 525 + hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 525 526 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 526 527 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 527 528 HASHTBLSZ); ··· 552 551 break; 553 552 } 554 553 555 - /* The Queue and Channel counts are zero based so increment them 554 + /* The Queue, Channel and TC counts are zero based so increment them 556 555 * to get the actual number 557 556 */ 558 557 hw_feat->rx_q_cnt++; 559 558 hw_feat->tx_q_cnt++; 560 559 hw_feat->rx_ch_cnt++; 561 560 hw_feat->tx_ch_cnt++; 561 + hw_feat->tc_cnt++; 562 562 563 563 DBGPR("<--xgbe_get_all_hw_features\n"); 564 564 }
+2
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 373 373 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 374 374 break; 375 375 376 + /* read fpqnum field after dataaddr field */ 377 + dma_rmb(); 376 378 if (is_rx_desc(raw_desc)) 377 379 ret = xgene_enet_rx_frame(ring, raw_desc); 378 380 else
+2 -1
drivers/net/ethernet/cirrus/Kconfig
··· 32 32 will be called cs89x0. 33 33 34 34 config CS89x0_PLATFORM 35 - bool "CS89x0 platform driver support" 35 + bool "CS89x0 platform driver support" if HAS_IOPORT_MAP 36 + default !HAS_IOPORT_MAP 36 37 depends on CS89x0 37 38 help 38 39 Say Y to compile the cs89x0 driver as a platform driver. This
+1 -1
drivers/net/ethernet/freescale/gianfar_ethtool.c
··· 1586 1586 return -EBUSY; 1587 1587 1588 1588 /* Fill regular entries */ 1589 - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1589 + for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); 1590 1590 i++) 1591 1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1592 1592 /* Fill the rest with fall-troughs */
+11 -8
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1907 1907 1908 1908 static int igbvf_tso(struct igbvf_adapter *adapter, 1909 1909 struct igbvf_ring *tx_ring, 1910 - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1910 + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, 1911 + __be16 protocol) 1911 1912 { 1912 1913 struct e1000_adv_tx_context_desc *context_desc; 1913 1914 struct igbvf_buffer *buffer_info; ··· 1928 1927 l4len = tcp_hdrlen(skb); 1929 1928 *hdr_len += l4len; 1930 1929 1931 - if (skb->protocol == htons(ETH_P_IP)) { 1930 + if (protocol == htons(ETH_P_IP)) { 1932 1931 struct iphdr *iph = ip_hdr(skb); 1933 1932 iph->tot_len = 0; 1934 1933 iph->check = 0; ··· 1959 1958 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1960 1959 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1961 1960 1962 - if (skb->protocol == htons(ETH_P_IP)) 1961 + if (protocol == htons(ETH_P_IP)) 1963 1962 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1964 1963 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1965 1964 ··· 1985 1984 1986 1985 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1987 1986 struct igbvf_ring *tx_ring, 1988 - struct sk_buff *skb, u32 tx_flags) 1987 + struct sk_buff *skb, u32 tx_flags, 1988 + __be16 protocol) 1989 1989 { 1990 1990 struct e1000_adv_tx_context_desc *context_desc; 1991 1991 unsigned int i; ··· 2013 2011 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 2014 2012 2015 2013 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2016 - switch (skb->protocol) { 2014 + switch (protocol) { 2017 2015 case htons(ETH_P_IP): 2018 2016 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2019 2017 if (ip_hdr(skb)->protocol == IPPROTO_TCP) ··· 2213 2211 u8 hdr_len = 0; 2214 2212 int count = 0; 2215 2213 int tso = 0; 2214 + __be16 protocol = vlan_get_protocol(skb); 2216 2215 2217 2216 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2218 2217 dev_kfree_skb_any(skb); ··· 2243 2240 IGBVF_TX_FLAGS_VLAN_SHIFT); 2244 2241 } 2245 2242 2246 - if (skb->protocol == htons(ETH_P_IP)) 2243 + if (protocol == htons(ETH_P_IP)) 2247 2244 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2248 2245 2249 2246 first = tx_ring->next_to_use; 2250 2247 2251 2248 tso = skb_is_gso(skb) ? 2252 - igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2249 + igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; 2253 2250 if (unlikely(tso < 0)) { 2254 2251 dev_kfree_skb_any(skb); 2255 2252 return NETDEV_TX_OK; ··· 2257 2254 2258 2255 if (tso) 2259 2256 tx_flags |= IGBVF_TX_FLAGS_TSO; 2260 - else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2257 + else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && 2261 2258 (skb->ip_summed == CHECKSUM_PARTIAL)) 2262 2259 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2263 2260
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 7227 7227 if (!vhdr) 7228 7228 goto out_drop; 7229 7229 7230 - protocol = vhdr->h_vlan_encapsulated_proto; 7231 7230 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 7232 7231 IXGBE_TX_FLAGS_VLAN_SHIFT; 7233 7232 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7234 7233 } 7234 + protocol = vlan_get_protocol(skb); 7235 7235 7236 7236 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 7237 7237 adapter->ptp_clock &&
+2 -2
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 3099 3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3100 3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3101 3101 3102 - if (skb->protocol == htons(ETH_P_IP)) { 3102 + if (first->protocol == htons(ETH_P_IP)) { 3103 3103 struct iphdr *iph = ip_hdr(skb); 3104 3104 iph->tot_len = 0; 3105 3105 iph->check = 0; ··· 3156 3156 3157 3157 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3158 3158 u8 l4_hdr = 0; 3159 - switch (skb->protocol) { 3159 + switch (first->protocol) { 3160 3160 case htons(ETH_P_IP): 3161 3161 vlan_macip_lens |= skb_network_header_len(skb); 3162 3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+2 -1
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 244 244 extern int log_mtts_per_seg; 245 245 extern int mlx4_internal_err_reset; 246 246 247 - #define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) 247 + #define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ 248 + MLX4_MFUNC_MAX)) 248 249 #define ALL_SLAVES 0xff 249 250 250 251 struct mlx4_bitmap {
+24 -3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
··· 968 968 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, 969 969 budget); 970 970 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 971 - if ((work_done < budget) && tx_complete) { 971 + 972 + /* Check if we need a repoll */ 973 + if (!tx_complete) 974 + work_done = budget; 975 + 976 + if (work_done < budget) { 972 977 napi_complete(&sds_ring->napi); 973 978 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 974 979 qlcnic_enable_sds_intr(adapter, sds_ring); ··· 998 993 napi_complete(&tx_ring->napi); 999 994 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 1000 995 qlcnic_enable_tx_intr(adapter, tx_ring); 996 + } else { 997 + /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/ 998 + work_done = budget; 1001 999 } 1002 1000 1003 1001 return work_done; ··· 1959 1951 1960 1952 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1961 1953 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1962 - if ((work_done < budget) && tx_complete) { 1954 + 1955 + /* Check if we need a repoll */ 1956 + if (!tx_complete) 1957 + work_done = budget; 1958 + 1959 + if (work_done < budget) { 1963 1960 napi_complete(&sds_ring->napi); 1964 1961 qlcnic_enable_sds_intr(adapter, sds_ring); 1965 1962 } ··· 1987 1974 1988 1975 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 1989 1976 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); 1990 - if ((work_done < budget) && tx_complete) { 1977 + 1978 + /* Check if we need a repoll */ 1979 + if (!tx_complete) 1980 + work_done = budget; 1981 + 1982 + if (work_done < budget) { 1991 1983 napi_complete(&sds_ring->napi); 1992 1984 qlcnic_enable_sds_intr(adapter, sds_ring); 1993 1985 } ··· 2014 1996 napi_complete(&tx_ring->napi); 2015 1997 if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) 2016 1998 qlcnic_enable_tx_intr(adapter, tx_ring); 1999 + } else { 2000 + /* need a repoll */ 2001 + work_done = budget; 2017 2002 } 2018 2003 2019 2004 return work_done;
+16 -10
drivers/net/ethernet/qlogic/qlge/qlge_main.c
··· 2351 2351 { 2352 2352 struct ql_adapter *qdev = netdev_priv(ndev); 2353 2353 int status = 0; 2354 + bool need_restart = netif_running(ndev); 2354 2355 2355 - status = ql_adapter_down(qdev); 2356 - if (status) { 2357 - netif_err(qdev, link, qdev->ndev, 2358 - "Failed to bring down the adapter\n"); 2359 - return status; 2356 + if (need_restart) { 2357 + status = ql_adapter_down(qdev); 2358 + if (status) { 2359 + netif_err(qdev, link, qdev->ndev, 2360 + "Failed to bring down the adapter\n"); 2361 + return status; 2362 + } 2360 2363 } 2361 2364 2362 2365 /* update the features with resent change */ 2363 2366 ndev->features = features; 2364 2367 2365 - status = ql_adapter_up(qdev); 2366 - if (status) { 2367 - netif_err(qdev, link, qdev->ndev, 2368 - "Failed to bring up the adapter\n"); 2369 - return status; 2368 + if (need_restart) { 2369 + status = ql_adapter_up(qdev); 2370 + if (status) { 2371 + netif_err(qdev, link, qdev->ndev, 2372 + "Failed to bring up the adapter\n"); 2373 + return status; 2374 + } 2370 2375 } 2376 + 2371 2377 return status; 2372 2378 } 2373 2379
+1
drivers/net/ethernet/sun/sunvnet.c
··· 1123 1123 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 1124 1124 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 1125 1125 } 1126 + nskb->queue_mapping = skb->queue_mapping; 1126 1127 dev_kfree_skb(skb); 1127 1128 skb = nskb; 1128 1129 }
+8 -3
drivers/net/hyperv/netvsc.c
··· 717 717 u64 req_id; 718 718 unsigned int section_index = NETVSC_INVALID_INDEX; 719 719 u32 msg_size = 0; 720 - struct sk_buff *skb; 720 + struct sk_buff *skb = NULL; 721 721 u16 q_idx = packet->q_idx; 722 722 723 723 ··· 744 744 packet); 745 745 skb = (struct sk_buff *) 746 746 (unsigned long)packet->send_completion_tid; 747 - if (skb) 748 - dev_kfree_skb_any(skb); 749 747 packet->page_buf_cnt = 0; 750 748 } 751 749 } ··· 807 809 } else { 808 810 netdev_err(ndev, "Unable to send packet %p ret %d\n", 809 811 packet, ret); 812 + } 813 + 814 + if (ret != 0) { 815 + if (section_index != NETVSC_INVALID_INDEX) 816 + netvsc_free_send_slot(net_device, section_index); 817 + } else if (skb) { 818 + dev_kfree_skb_any(skb); 810 819 } 811 820 812 821 return ret;
+8 -8
drivers/net/macvtap.c
··· 17 17 #include <linux/fs.h> 18 18 #include <linux/uio.h> 19 19 20 - #include <net/ipv6.h> 21 20 #include <net/net_namespace.h> 22 21 #include <net/rtnetlink.h> 23 22 #include <net/sock.h> ··· 80 81 static const struct proto_ops macvtap_socket_ops; 81 82 82 83 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 83 - NETIF_F_TSO6) 84 + NETIF_F_TSO6 | NETIF_F_UFO) 84 85 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 85 86 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 86 87 ··· 585 586 gso_type = SKB_GSO_TCPV6; 586 587 break; 587 588 case VIRTIO_NET_HDR_GSO_UDP: 588 - pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", 589 - current->comm); 590 589 gso_type = SKB_GSO_UDP; 591 - if (skb->protocol == htons(ETH_P_IPV6)) 592 - ipv6_proxy_select_ident(skb); 593 590 break; 594 591 default: 595 592 return -EINVAL; ··· 631 636 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 632 637 else if (sinfo->gso_type & SKB_GSO_TCPV6) 633 638 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 639 + else if (sinfo->gso_type & SKB_GSO_UDP) 640 + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; 634 641 else 635 642 BUG(); 636 643 if (sinfo->gso_type & SKB_GSO_TCP_ECN) ··· 962 965 if (arg & TUN_F_TSO6) 963 966 feature_mask |= NETIF_F_TSO6; 964 967 } 968 + 969 + if (arg & TUN_F_UFO) 970 + feature_mask |= NETIF_F_UFO; 965 971 } 966 972 967 973 /* tun/tap driver inverts the usage for TSO offloads, where ··· 975 975 * When user space turns off TSO, we turn off GSO/LRO so that 976 976 * user-space will not receive TSO frames. 977 977 */ 978 - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 978 + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) 979 979 features |= RX_OFFLOADS; 980 980 else 981 981 features &= ~RX_OFFLOADS; ··· 1090 1090 case TUNSETOFFLOAD: 1091 1091 /* let the user check for future flags */ 1092 1092 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1093 - TUN_F_TSO_ECN)) 1093 + TUN_F_TSO_ECN | TUN_F_UFO)) 1094 1094 return -EINVAL; 1095 1095 1096 1096 rtnl_lock();
+1 -1
drivers/net/ppp/ppp_deflate.c
··· 246 246 /* 247 247 * See if we managed to reduce the size of the packet. 248 248 */ 249 - if (olen < isize) { 249 + if (olen < isize && olen <= osize) { 250 250 state->stats.comp_bytes += olen; 251 251 state->stats.comp_packets++; 252 252 } else {
+9 -16
drivers/net/tun.c
··· 65 65 #include <linux/nsproxy.h> 66 66 #include <linux/virtio_net.h> 67 67 #include <linux/rcupdate.h> 68 - #include <net/ipv6.h> 69 68 #include <net/net_namespace.h> 70 69 #include <net/netns/generic.h> 71 70 #include <net/rtnetlink.h> ··· 185 186 struct net_device *dev; 186 187 netdev_features_t set_features; 187 188 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 188 - NETIF_F_TSO6) 189 + NETIF_F_TSO6|NETIF_F_UFO) 189 190 190 191 int vnet_hdr_sz; 191 192 int sndbuf; ··· 1165 1166 break; 1166 1167 } 1167 1168 1168 - skb_reset_network_header(skb); 1169 - 1170 1169 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1171 1170 pr_debug("GSO!\n"); 1172 1171 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { ··· 1175 1178 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1176 1179 break; 1177 1180 case VIRTIO_NET_HDR_GSO_UDP: 1178 - { 1179 - static bool warned; 1180 - 1181 - if (!warned) { 1182 - warned = true; 1183 - netdev_warn(tun->dev, 1184 - "%s: using disabled UFO feature; please fix this program\n", 1185 - current->comm); 1186 - } 1187 1181 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1188 - if (skb->protocol == htons(ETH_P_IPV6)) 1189 - ipv6_proxy_select_ident(skb); 1190 1182 break; 1191 - } 1192 1183 default: 1193 1184 tun->dev->stats.rx_frame_errors++; 1194 1185 kfree_skb(skb); ··· 1205 1220 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1206 1221 } 1207 1222 1223 + skb_reset_network_header(skb); 1208 1224 skb_probe_transport_header(skb, 0); 1209 1225 1210 1226 rxhash = skb_get_hash(skb); ··· 1283 1297 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1284 1298 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1285 1299 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1300 + else if (sinfo->gso_type & SKB_GSO_UDP) 1301 + gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; 1286 1302 else { 1287 1303 pr_err("unexpected GSO type: " 1288 1304 "0x%x, gso_size %d, hdr_len %d\n", ··· 1739 1751 if (arg & TUN_F_TSO6) 1740 1752 features |= NETIF_F_TSO6; 1741 1753 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 1754 + } 1755 + 1756 + if (arg & TUN_F_UFO) { 1757 + features |= NETIF_F_UFO; 1758 + arg &= ~TUN_F_UFO; 1742 1759 } 1743 1760 } 1744 1761
+18 -18
drivers/net/usb/sr9700.c
··· 77 77 int ret; 78 78 79 79 udelay(1); 80 - ret = sr_read_reg(dev, EPCR, &tmp); 80 + ret = sr_read_reg(dev, SR_EPCR, &tmp); 81 81 if (ret < 0) 82 82 return ret; 83 83 ··· 98 98 99 99 mutex_lock(&dev->phy_mutex); 100 100 101 - sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 102 - sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); 101 + sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 102 + sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); 103 103 104 104 ret = wait_phy_eeprom_ready(dev, phy); 105 105 if (ret < 0) 106 106 goto out_unlock; 107 107 108 - sr_write_reg(dev, EPCR, 0x0); 109 - ret = sr_read(dev, EPDR, 2, value); 108 + sr_write_reg(dev, SR_EPCR, 0x0); 109 + ret = sr_read(dev, SR_EPDR, 2, value); 110 110 111 111 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", 112 112 phy, reg, *value, ret); ··· 123 123 124 124 mutex_lock(&dev->phy_mutex); 125 125 126 - ret = sr_write(dev, EPDR, 2, &value); 126 + ret = sr_write(dev, SR_EPDR, 2, &value); 127 127 if (ret < 0) 128 128 goto out_unlock; 129 129 130 - sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 131 - sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : 130 + sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); 131 + sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : 132 132 (EPCR_WEP | EPCR_ERPRW)); 133 133 134 134 ret = wait_phy_eeprom_ready(dev, phy); 135 135 if (ret < 0) 136 136 goto out_unlock; 137 137 138 - sr_write_reg(dev, EPCR, 0x0); 138 + sr_write_reg(dev, SR_EPCR, 0x0); 139 139 140 140 out_unlock: 141 141 mutex_unlock(&dev->phy_mutex); ··· 188 188 if (loc == MII_BMSR) { 189 189 u8 value; 190 190 191 - sr_read_reg(dev, NSR, &value); 191 + sr_read_reg(dev, SR_NSR, &value); 192 192 if (value & NSR_LINKST) 193 193 rc = 1; 194 194 } ··· 228 228 int rc = 0; 229 229 230 230 /* Get the Link Status directly */ 231 - sr_read_reg(dev, NSR, &value); 231 + sr_read_reg(dev, SR_NSR, &value); 232 232 if (value & NSR_LINKST) 233 233 rc = 1; 234 234 ··· 281 281 } 282 282 } 283 283 284 - sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); 285 - sr_write_reg_async(dev, RCR, rx_ctl); 284 + sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes); 285 + sr_write_reg_async(dev, SR_RCR, rx_ctl); 286 286 } 287 287 288 288 static int sr9700_set_mac_address(struct net_device *netdev, void *p) ··· 297 297 } 298 298 299 299 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 300 - sr_write_async(dev, PAR, 6, netdev->dev_addr); 300 + sr_write_async(dev, SR_PAR, 6, netdev->dev_addr); 301 301 302 302 return 0; 303 303 } ··· 340 340 mii->phy_id_mask = 0x1f; 341 341 mii->reg_num_mask = 0x1f; 342 342 343 - sr_write_reg(dev, NCR, NCR_RST); 343 + sr_write_reg(dev, SR_NCR, NCR_RST); 344 344 udelay(20); 345 345 346 346 /* read MAC ··· 348 348 * EEPROM automatically to PAR. In case there is no EEPROM externally, 349 349 * a default MAC address is stored in PAR for making chip work properly. 350 350 */ 351 - if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { 351 + if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) { 352 352 netdev_err(netdev, "Error reading MAC address\n"); 353 353 ret = -ENODEV; 354 354 goto out; 355 355 } 356 356 357 357 /* power up and reset phy */ 358 - sr_write_reg(dev, PRR, PRR_PHY_RST); 358 + sr_write_reg(dev, SR_PRR, PRR_PHY_RST); 359 359 /* at least 10ms, here 20ms for safe */ 360 360 mdelay(20); 361 - sr_write_reg(dev, PRR, 0); 361 + sr_write_reg(dev, SR_PRR, 0); 362 362 /* at least 1ms, here 2ms for reading right register */ 363 363 udelay(2 * 1000); 364 364
+33 -33
drivers/net/usb/sr9700.h
··· 14 14 /* sr9700 spec. register table on Linux platform */ 15 15 16 16 /* Network Control Reg */ 17 - #define NCR 0x00 17 + #define SR_NCR 0x00 18 18 #define NCR_RST (1 << 0) 19 19 #define NCR_LBK (3 << 1) 20 20 #define NCR_FDX (1 << 3) 21 21 #define NCR_WAKEEN (1 << 6) 22 22 /* Network Status Reg */ 23 - #define NSR 0x01 23 + #define SR_NSR 0x01 24 24 #define NSR_RXRDY (1 << 0) 25 25 #define NSR_RXOV (1 << 1) 26 26 #define NSR_TX1END (1 << 2) ··· 30 30 #define NSR_LINKST (1 << 6) 31 31 #define NSR_SPEED (1 << 7) 32 32 /* Tx Control Reg */ 33 - #define TCR 0x02 33 + #define SR_TCR 0x02 34 34 #define TCR_CRC_DIS (1 << 1) 35 35 #define TCR_PAD_DIS (1 << 2) 36 36 #define TCR_LC_CARE (1 << 3) ··· 38 38 #define TCR_EXCECM (1 << 5) 39 39 #define TCR_LF_EN (1 << 6) 40 40 /* Tx Status Reg for Packet Index 1 */ 41 - #define TSR1 0x03 41 + #define SR_TSR1 0x03 42 42 #define TSR1_EC (1 << 2) 43 43 #define TSR1_COL (1 << 3) 44 44 #define TSR1_LC (1 << 4) ··· 46 46 #define TSR1_LOC (1 << 6) 47 47 #define TSR1_TLF (1 << 7) 48 48 /* Tx Status Reg for Packet Index 2 */ 49 - #define TSR2 0x04 49 + #define SR_TSR2 0x04 50 50 #define TSR2_EC (1 << 2) 51 51 #define TSR2_COL (1 << 3) 52 52 #define TSR2_LC (1 << 4) ··· 54 54 #define TSR2_LOC (1 << 6) 55 55 #define TSR2_TLF (1 << 7) 56 56 /* Rx Control Reg*/ 57 - #define RCR 0x05 57 + #define SR_RCR 0x05 58 58 #define RCR_RXEN (1 << 0) 59 59 #define RCR_PRMSC (1 << 1) 60 60 #define RCR_RUNT (1 << 2) ··· 62 62 #define RCR_DIS_CRC (1 << 4) 63 63 #define RCR_DIS_LONG (1 << 5) 64 64 /* Rx Status Reg */ 65 - #define RSR 0x06 65 + #define SR_RSR 0x06 66 66 #define RSR_AE (1 << 2) 67 67 #define RSR_MF (1 << 6) 68 68 #define RSR_RF (1 << 7) 69 69 /* Rx Overflow Counter Reg */ 70 - #define ROCR 0x07 70 + #define SR_ROCR 0x07 71 71 #define ROCR_ROC (0x7F << 0) 72 72 #define ROCR_RXFU (1 << 7) 73 73 /* Back Pressure Threshold Reg */ 74 - #define BPTR 0x08 74 + #define SR_BPTR 0x08 75 75 #define BPTR_JPT (0x0F << 0) 76 76 #define BPTR_BPHW (0x0F << 4) 77 77 /* Flow Control Threshold Reg */ 78 - #define FCTR 0x09 78 + #define SR_FCTR 0x09 79 79 #define FCTR_LWOT (0x0F << 0) 80 80 #define FCTR_HWOT (0x0F << 4) 81 81 /* rx/tx Flow Control Reg */ 82 - #define FCR 0x0A 82 + #define SR_FCR 0x0A 83 83 #define FCR_FLCE (1 << 0) 84 84 #define FCR_BKPA (1 << 4) 85 85 #define FCR_TXPEN (1 << 5) 86 86 #define FCR_TXPF (1 << 6) 87 87 #define FCR_TXP0 (1 << 7) 88 88 /* Eeprom & Phy Control Reg */ 89 - #define EPCR 0x0B 89 + #define SR_EPCR 0x0B 90 90 #define EPCR_ERRE (1 << 0) 91 91 #define EPCR_ERPRW (1 << 1) 92 92 #define EPCR_ERPRR (1 << 2) 93 93 #define EPCR_EPOS (1 << 3) 94 94 #define EPCR_WEP (1 << 4) 95 95 /* Eeprom & Phy Address Reg */ 96 - #define EPAR 0x0C 96 + #define SR_EPAR 0x0C 97 97 #define EPAR_EROA (0x3F << 0) 98 98 #define EPAR_PHY_ADR_MASK (0x03 << 6) 99 99 #define EPAR_PHY_ADR (0x01 << 6) 100 100 /* Eeprom & Phy Data Reg */ 101 - #define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ 101 + #define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ 102 102 /* Wakeup Control Reg */ 103 - #define WCR 0x0F 103 + #define SR_WCR 0x0F 104 104 #define WCR_MAGICST (1 << 0) 105 105 #define WCR_LINKST (1 << 2) 106 106 #define WCR_MAGICEN (1 << 3) 107 107 #define WCR_LINKEN (1 << 5) 108 108 /* Physical Address Reg */ 109 - #define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ 109 + #define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ 110 110 /* Multicast Address Reg */ 111 - #define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ 111 + #define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ 112 112 /* 0x1e unused */ 113 113 /* Phy Reset Reg */ 114 - #define PRR 0x1F 114 + #define SR_PRR 0x1F 115 115 #define PRR_PHY_RST (1 << 0) 116 116 /* Tx sdram Write Pointer Address Low */ 117 - #define TWPAL 0x20 117 + #define SR_TWPAL 0x20 118 118 /* Tx sdram Write Pointer Address High */ 119 - #define TWPAH 0x21 119 + #define SR_TWPAH 0x21 120 120 /* Tx sdram Read Pointer Address Low */ 121 - #define TRPAL 0x22 121 + #define SR_TRPAL 0x22 122 122 /* Tx sdram Read Pointer Address High */ 123 - #define TRPAH 0x23 123 + #define SR_TRPAH 0x23 124 124 /* Rx sdram Write Pointer Address Low */ 125 - #define RWPAL 0x24 125 + #define SR_RWPAL 0x24 126 126 /* Rx sdram Write Pointer Address High */ 127 - #define RWPAH 0x25 127 + #define SR_RWPAH 0x25 128 128 /* Rx sdram Read Pointer Address Low */ 129 - #define RRPAL 0x26 129 + #define SR_RRPAL 0x26 130 130 /* Rx sdram Read Pointer Address High */ 131 - #define RRPAH 0x27 131 + #define SR_RRPAH 0x27 132 132 /* Vendor ID register */ 133 - #define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ 133 + #define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ 134 134 /* Product ID register */ 135 - #define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ 135 + #define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ 136 136 /* CHIP Revision register */ 137 - #define CHIPR 0x2C 137 + #define SR_CHIPR 0x2C 138 138 /* 0x2D --> 0xEF unused */ 139 139 /* USB Device Address */ 140 - #define USBDA 0xF0 140 + #define SR_USBDA 0xF0 141 141 #define USBDA_USBFA (0x7F << 0) 142 142 /* RX packet Counter Reg */ 143 - #define RXC 0xF1 143 + #define SR_RXC 0xF1 144 144 /* Tx packet Counter & USB Status Reg */ 145 - #define TXC_USBS 0xF2 145 + #define SR_TXC_USBS 0xF2 146 146 #define TXC_USBS_TXC0 (1 << 0) 147 147 #define TXC_USBS_TXC1 (1 << 1) 148 148 #define TXC_USBS_TXC2 (1 << 2) ··· 150 150 #define TXC_USBS_SUSFLAG (1 << 6) 151 151 #define TXC_USBS_RXFAULT (1 << 7) 152 152 /* USB Control register */ 153 - #define USBC 0xF4 153 + #define SR_USBC 0xF4 154 154 #define USBC_EP3NAK (1 << 4) 155 155 #define USBC_EP3ACK (1 << 5) 156 156
+10 -14
drivers/net/virtio_net.c
··· 490 490 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 491 491 break; 492 492 case VIRTIO_NET_HDR_GSO_UDP: 493 - { 494 - static bool warned; 495 - 496 - if (!warned) { 497 - warned = true; 498 - netdev_warn(dev, 499 - "host using disabled UFO feature; please fix it\n"); 500 - } 501 493 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 502 494 break; 503 - } 504 495 case VIRTIO_NET_HDR_GSO_TCPV6: 505 496 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 506 497 break; ··· 879 888 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 880 889 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 881 890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 891 + else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 892 + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; 882 893 else 883 894 BUG(); 884 895 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) ··· 1745 1752 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1746 1753 1747 1754 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1748 - dev->hw_features |= NETIF_F_TSO 1755 + dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1749 1756 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1750 1757 } 1751 1758 /* Individual feature bits: what can host handle? */ ··· 1755 1762 dev->hw_features |= NETIF_F_TSO6; 1756 1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1757 1764 dev->hw_features |= NETIF_F_TSO_ECN; 1765 + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 1766 + dev->hw_features |= NETIF_F_UFO; 1758 1767 1759 1768 dev->features |= NETIF_F_GSO_ROBUST; 1760 1769 1761 1770 if (gso) 1762 - dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 1771 + dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 1763 1772 /* (!csum && gso) case will be fixed by register_netdev() */ 1764 1773 } 1765 1774 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) ··· 1799 1804 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1800 1805 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1801 1806 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1802 - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 1807 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 1808 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 1803 1809 vi->big_packets = true; 1804 1810 1805 1811 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) ··· 1996 2000 static unsigned int features[] = { 1997 2001 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1998 2002 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1999 - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, 2003 + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 2000 2004 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 2001 - VIRTIO_NET_F_GUEST_ECN, 2005 + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 2002 2006 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 2003 2007 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 2004 2008 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+5 -5
drivers/net/vxlan.c
··· 2625 2625 dev_put(vxlan->dev); 2626 2626 } 2627 2627 2628 - static int vxlan_newlink(struct net *net, struct net_device *dev, 2628 + static int vxlan_newlink(struct net *src_net, struct net_device *dev, 2629 2629 struct nlattr *tb[], struct nlattr *data[]) 2630 2630 { 2631 - struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2631 + struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); 2632 2632 struct vxlan_dev *vxlan = netdev_priv(dev); 2633 2633 struct vxlan_rdst *dst = &vxlan->default_dst; 2634 2634 __u32 vni; ··· 2638 2638 if (!data[IFLA_VXLAN_ID]) 2639 2639 return -EINVAL; 2640 2640 2641 - vxlan->net = dev_net(dev); 2641 + vxlan->net = src_net; 2642 2642 2643 2643 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2644 2644 dst->remote_vni = vni; ··· 2674 2674 if (data[IFLA_VXLAN_LINK] && 2675 2675 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 2676 2676 struct net_device *lowerdev 2677 - = __dev_get_by_index(net, dst->remote_ifindex); 2677 + = __dev_get_by_index(src_net, dst->remote_ifindex); 2678 2678 2679 2679 if (!lowerdev) { 2680 2680 pr_info("ifindex %d does not exist\n", dst->remote_ifindex); ··· 2761 2761 if (data[IFLA_VXLAN_GBP]) 2762 2762 vxlan->flags |= VXLAN_F_GBP; 2763 2763 2764 - if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2764 + if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2765 2765 vxlan->dst_port, vxlan->flags)) { 2766 2766 pr_info("duplicate VNI %u\n", vni); 2767 2767 return -EEXIST;
+3 -3
drivers/net/wan/Kconfig
··· 25 25 # There is no way to detect a comtrol sv11 - force it modular for now. 26 26 config HOSTESS_SV11 27 27 tristate "Comtrol Hostess SV-11 support" 28 - depends on ISA && m && ISA_DMA_API && INET && HDLC 28 + depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS 29 29 help 30 30 Driver for Comtrol Hostess SV-11 network card which 31 31 operates on low speed synchronous serial links at up to ··· 37 37 # The COSA/SRP driver has not been tested as non-modular yet. 38 38 config COSA 39 39 tristate "COSA/SRP sync serial boards support" 40 - depends on ISA && m && ISA_DMA_API && HDLC 40 + depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS 41 41 ---help--- 42 42 Driver for COSA and SRP synchronous serial boards. 43 43 ··· 87 87 # There is no way to detect a Sealevel board. Force it modular 88 88 config SEALEVEL_4021 89 89 tristate "Sealevel Systems 4021 support" 90 - depends on ISA && m && ISA_DMA_API && INET && HDLC 90 + depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS 91 91 help 92 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 93 93
+2
drivers/net/xen-netback/interface.c
··· 578 578 goto err_rx_unbind; 579 579 } 580 580 queue->task = task; 581 + get_task_struct(task); 581 582 582 583 task = kthread_create(xenvif_dealloc_kthread, 583 584 (void *)queue, "%s-dealloc", queue->name); ··· 635 634 636 635 if (queue->task) { 637 636 kthread_stop(queue->task); 637 + put_task_struct(queue->task); 638 638 queue->task = NULL; 639 639 } 640 640
+1 -2
drivers/net/xen-netback/netback.c
··· 2008 2008 */ 2009 2009 if (unlikely(vif->disabled && queue->id == 0)) { 2010 2010 xenvif_carrier_off(vif); 2011 - xenvif_rx_queue_purge(queue); 2012 - continue; 2011 + break; 2013 2012 } 2014 2013 2015 2014 if (!skb_queue_empty(&queue->rx_queue))
+3
drivers/pci/host/pcie-designware.c
··· 283 283 struct msi_msg msg; 284 284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 285 285 286 + if (desc->msi_attrib.is_msix) 287 + return -EINVAL; 288 + 286 289 irq = assign_irq(1, desc, &pos); 287 290 if (irq < 0) 288 291 return irq;
+37 -3
drivers/pci/quirks.c
··· 324 324 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 325 325 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 326 326 327 + static void quirk_io(struct pci_dev *dev, int pos, unsigned size, 328 + const char *name) 329 + { 330 + u32 region; 331 + struct pci_bus_region bus_region; 332 + struct resource *res = dev->resource + pos; 333 + 334 + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region); 335 + 336 + if (!region) 337 + return; 338 + 339 + res->name = pci_name(dev); 340 + res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; 341 + res->flags |= 342 + (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); 343 + region &= ~(size - 1); 344 + 345 + /* Convert from PCI bus to resource space */ 346 + bus_region.start = region; 347 + bus_region.end = region + size - 1; 348 + pcibios_bus_to_resource(dev->bus, res, &bus_region); 349 + 350 + dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", 351 + name, PCI_BASE_ADDRESS_0 + (pos << 2), res); 352 + } 353 + 327 354 /* 328 355 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS 329 356 * ver. 1.33 20070103) don't set the correct ISA PCI region header info. 330 357 * BAR0 should be 8 bytes; instead, it may be set to something like 8k 331 358 * (which conflicts w/ BAR1's memory range). 359 + * 360 + * CS553x's ISA PCI BARs may also be read-only (ref: 361 + * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). 332 362 */ 333 363 static void quirk_cs5536_vsa(struct pci_dev *dev) 334 364 { 365 + static char *name = "CS5536 ISA bridge"; 366 + 335 367 if (pci_resource_len(dev, 0) != 8) { 336 - struct resource *res = &dev->resource[0]; 337 - res->end = res->start + 8 - 1; 338 - dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n"); 368 + quirk_io(dev, 0, 8, name); /* SMB */ 369 + quirk_io(dev, 1, 256, name); /* GPIO */ 370 + quirk_io(dev, 2, 64, name); /* MFGPT */ 371 + dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", 372 + name); 339 373 } 340 374 } 341 375 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
+55 -53
drivers/pinctrl/pinctrl-at91.c
··· 177 177 struct device *dev; 178 178 struct pinctrl_dev *pctl; 179 179 180 - int nbanks; 180 + int nactive_banks; 181 181 182 182 uint32_t *mux_mask; 183 183 int nmux; ··· 653 653 int mux; 654 654 655 655 /* check if it's a valid config */ 656 - if (pin->bank >= info->nbanks) { 656 + if (pin->bank >= gpio_banks) { 657 657 dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", 658 - name, index, pin->bank, info->nbanks); 658 + name, index, pin->bank, gpio_banks); 659 659 return -EINVAL; 660 + } 661 + 662 + if (!gpio_chips[pin->bank]) { 663 + dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n", 664 + name, index, pin->bank); 665 + return -ENXIO; 660 666 } 661 667 662 668 if (pin->pin >= MAX_NB_GPIO_PER_BANK) { ··· 987 981 988 982 for_each_child_of_node(np, child) { 989 983 if (of_device_is_compatible(child, gpio_compat)) { 990 - info->nbanks++; 984 + if (of_device_is_available(child)) 985 + info->nactive_banks++; 991 986 } else { 992 987 info->nfunctions++; 993 988 info->ngroups += of_get_child_count(child); ··· 1010 1003 } 1011 1004 1012 1005 size /= sizeof(*list); 1013 - if (!size || size % info->nbanks) { 1014 - dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); 1006 + if (!size || size % gpio_banks) { 1007 + dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks); 1015 1008 return -EINVAL; 1016 1009 } 1017 - info->nmux = size / info->nbanks; 1010 + info->nmux = size / gpio_banks; 1018 1011 1019 1012 info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); 1020 1013 if (!info->mux_mask) { ··· 1138 1131 of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; 1139 1132 at91_pinctrl_child_count(info, np); 1140 1133 1141 - if (info->nbanks < 1) { 1134 + if (gpio_banks < 1) { 1142 1135 dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); 1143 1136 return -EINVAL; 1144 1137 } ··· 1151 1144 1152 1145 dev_dbg(&pdev->dev, "mux-mask\n"); 1153 1146 tmp = info->mux_mask; 1154 - for (i = 0; i < info->nbanks; i++) { 1147 + for (i = 0; i < gpio_banks; i++) { 1155 1148 for (j = 0; j < info->nmux; j++, tmp++) { 1156 1149 dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); 1157 1150 } ··· 1169 1162 if (!info->groups) 1170 1163 return -ENOMEM; 1171 1164 1172 - dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); 1165 + dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks); 1173 1166 dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); 1174 1167 dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); 1175 1168 ··· 1192 1185 { 1193 1186 struct at91_pinctrl *info; 1194 1187 struct pinctrl_pin_desc *pdesc; 1195 - int ret, i, j, k; 1188 + int ret, i, j, k, ngpio_chips_enabled = 0; 1196 1189 1197 1190 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 1198 1191 if (!info) ··· 1207 1200 * to obtain references to the struct gpio_chip * for them, and we 1208 1201 * need this to proceed. 1209 1202 */ 1210 - for (i = 0; i < info->nbanks; i++) { 1211 - if (!gpio_chips[i]) { 1212 - dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); 1213 - devm_kfree(&pdev->dev, info); 1214 - return -EPROBE_DEFER; 1215 - } 1203 + for (i = 0; i < gpio_banks; i++) 1204 + if (gpio_chips[i]) 1205 + ngpio_chips_enabled++; 1206 + 1207 + if (ngpio_chips_enabled < info->nactive_banks) { 1208 + dev_warn(&pdev->dev, 1209 + "All GPIO chips are not registered yet (%d/%d)\n", 1210 + ngpio_chips_enabled, info->nactive_banks); 1211 + devm_kfree(&pdev->dev, info); 1212 + return -EPROBE_DEFER; 1216 1213 } 1217 1214 1218 1215 at91_pinctrl_desc.name = dev_name(&pdev->dev); 1219 - at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; 1216 + at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK; 1220 1217 at91_pinctrl_desc.pins = pdesc = 1221 1218 devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); 1222 1219 1223 1220 if (!at91_pinctrl_desc.pins) 1224 1221 return -ENOMEM; 1225 1222 1226 - for (i = 0 , k = 0; i < info->nbanks; i++) { 1223 + for (i = 0, k = 0; i < gpio_banks; i++) { 1227 1224 for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { 1228 1225 pdesc->number = k; 1229 1226 pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); ··· 1245 1234 } 1246 1235 1247 1236 /* We will handle a range of GPIO pins */ 1248 - for (i = 0; i < info->nbanks; i++) 1249 - pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); 1237 + for (i = 0; i < gpio_banks; i++) 1238 + if (gpio_chips[i]) 1239 + pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); 1250 1240 1251 1241 dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); 1252 1242 ··· 1625 1613 static int at91_gpio_of_irq_setup(struct platform_device *pdev, 1626 1614 struct at91_gpio_chip *at91_gpio) 1627 1615 { 1616 + struct gpio_chip *gpiochip_prev = NULL; 1628 1617 struct at91_gpio_chip *prev = NULL; 1629 1618 struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); 1630 - int ret; 1619 + int ret, i; 1631 1620 1632 1621 at91_gpio->pioc_hwirq = irqd_to_hwirq(d); 1633 1622 ··· 1654 1641 return ret; 1655 1642 } 1656 1643 1657 - /* Setup chained handler */ 1658 - if (at91_gpio->pioc_idx) 1659 - prev = gpio_chips[at91_gpio->pioc_idx - 1]; 1660 - 1661 1644 /* The top level handler handles one bank of GPIOs, except 1662 1645 * on some SoC it can handle up to three... 1663 1646 * We only set up the handler for the first of the list. 1664 1647 */ 1665 - if (prev && prev->next == at91_gpio) 1648 + gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq); 1649 + if (!gpiochip_prev) { 1650 + /* Then register the chain on the parent IRQ */ 1651 + gpiochip_set_chained_irqchip(&at91_gpio->chip, 1652 + &gpio_irqchip, 1653 + at91_gpio->pioc_virq, 1654 + gpio_irq_handler); 1666 1655 return 0; 1656 + } 1667 1657 1668 - /* Then register the chain on the parent IRQ */ 1669 - gpiochip_set_chained_irqchip(&at91_gpio->chip, 1670 - &gpio_irqchip, 1671 - at91_gpio->pioc_virq, 1672 - gpio_irq_handler); 1658 + prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip); 1673 1659 1674 - return 0; 1660 + /* we can only have 2 banks before */ 1661 + for (i = 0; i < 2; i++) { 1662 + if (prev->next) { 1663 + prev = prev->next; 1664 + } else { 1665 + prev->next = at91_gpio; 1666 + return 0; 1667 + } 1668 + } 1669 + 1670 + return -EINVAL; 1675 1671 } 1676 1672 1677 1673 /* This structure is replicated for each GPIO block allocated at probe time */ ··· 1696 1674 .can_sleep = false, 1697 1675 .ngpio = MAX_NB_GPIO_PER_BANK, 1698 1676 }; 1699 - 1700 - static void at91_gpio_probe_fixup(void) 1701 - { 1702 - unsigned i; 1703 - struct at91_gpio_chip *at91_gpio, *last = NULL; 1704 - 1705 - for (i = 0; i < gpio_banks; i++) { 1706 - at91_gpio = gpio_chips[i]; 1707 - 1708 - /* 1709 - * GPIO controller are grouped on some SoC: 1710 - * PIOC, PIOD and PIOE can share the same IRQ line 1711 - */ 1712 - if (last && last->pioc_virq == at91_gpio->pioc_virq) 1713 - last->next = at91_gpio; 1714 - last = at91_gpio; 1715 - } 1716 - } 1717 1677 1718 1678 static struct of_device_id at91_gpio_of_match[] = { 1719 1679 { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, ··· 1808 1804 1809 1805 gpio_chips[alias_idx] = at91_chip; 1810 1806 gpio_banks = max(gpio_banks, alias_idx + 1); 1811 - 1812 - at91_gpio_probe_fixup(); 1813 1807 1814 1808 ret = at91_gpio_of_irq_setup(pdev, at91_chip); 1815 1809 if (ret)
+2 -1
drivers/scsi/device_handler/scsi_dh.c
··· 136 136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; 137 137 struct scsi_device *sdev = scsi_dh_data->sdev; 138 138 139 + scsi_dh->detach(sdev); 140 + 139 141 spin_lock_irq(sdev->request_queue->queue_lock); 140 142 sdev->scsi_dh_data = NULL; 141 143 spin_unlock_irq(sdev->request_queue->queue_lock); 142 144 143 - scsi_dh->detach(sdev); 144 145 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); 145 146 module_put(scsi_dh->module); 146 147 }
+4 -2
drivers/scsi/sd.c
··· 2800 2800 */ 2801 2801 sd_set_flush_flag(sdkp); 2802 2802 2803 - max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2804 - sdkp->max_xfer_blocks); 2803 + max_xfer = sdkp->max_xfer_blocks; 2805 2804 max_xfer <<= ilog2(sdp->sector_size) - 9; 2805 + 2806 + max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2807 + max_xfer); 2806 2808 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2807 2809 set_capacity(disk, sdkp->capacity); 2808 2810 sd_config_write_same(sdkp);
+12 -2
drivers/spi/spi-fsl-dspi.c
··· 342 342 /* Only alloc on first setup */ 343 343 chip = spi_get_ctldata(spi); 344 344 if (chip == NULL) { 345 - chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), 346 - GFP_KERNEL); 345 + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 347 346 if (!chip) 348 347 return -ENOMEM; 349 348 } ··· 379 380 return -EINVAL; 380 381 381 382 return dspi_setup_transfer(spi, NULL); 383 + } 384 + 385 + static void dspi_cleanup(struct spi_device *spi) 386 + { 387 + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); 388 + 389 + dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n", 390 + spi->master->bus_num, spi->chip_select); 391 + 392 + kfree(chip); 382 393 } 383 394 384 395 static irqreturn_t dspi_interrupt(int irq, void *dev_id) ··· 476 467 dspi->bitbang.master->setup = dspi_setup; 477 468 dspi->bitbang.master->dev.of_node = pdev->dev.of_node; 478 469 470 + master->cleanup = dspi_cleanup; 479 471 master->mode_bits = SPI_CPOL | SPI_CPHA; 480 472 master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | 481 473 SPI_BPW_MASK(16);
+4
drivers/spi/spi-imx.c
··· 823 823 struct dma_slave_config slave_config = {}; 824 824 int ret; 825 825 826 + /* use pio mode for i.mx6dl chip TKT238285 */ 827 + if (of_machine_is_compatible("fsl,imx6dl")) 828 + return 0; 829 + 826 830 /* Prepare for TX DMA: */ 827 831 master->dma_tx = dma_request_slave_channel(dev, "tx"); 828 832 if (!master->dma_tx) {
+1 -1
drivers/staging/lustre/lustre/llite/vvp_io.c
··· 632 632 return 0; 633 633 } 634 634 635 - if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { 635 + if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { 636 636 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); 637 637 return -EFAULT; 638 638 }
+5 -8
drivers/staging/nvec/nvec.c
··· 85 85 static const struct mfd_cell nvec_devices[] = { 86 86 { 87 87 .name = "nvec-kbd", 88 - .id = 1, 89 88 }, 90 89 { 91 90 .name = "nvec-mouse", 92 - .id = 1, 91 + }, 92 + { 93 + .name = "nvec-power", 94 + .id = 0, 93 95 }, 94 96 { 95 97 .name = "nvec-power", 96 98 .id = 1, 97 - }, 98 - { 99 - .name = "nvec-power", 100 - .id = 2, 101 99 }, 102 100 { 103 101 .name = "nvec-paz00", 104 - .id = 1, 105 102 }, 106 103 }; 107 104 ··· 888 891 nvec_msg_free(nvec, msg); 889 892 } 890 893 891 - ret = mfd_add_devices(nvec->dev, -1, nvec_devices, 894 + ret = mfd_add_devices(nvec->dev, 0, nvec_devices, 892 895 ARRAY_SIZE(nvec_devices), NULL, 0, NULL); 893 896 if (ret) 894 897 dev_err(nvec->dev, "error adding subdevices\n");
+5
drivers/usb/core/otg_whitelist.h
··· 55 55 le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) 56 56 return 0; 57 57 58 + /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */ 59 + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a && 60 + le16_to_cpu(dev->descriptor.idProduct) == 0x0200)) 61 + return 1; 62 + 58 63 /* NOTE: can't use usb_match_id() since interface caches 59 64 * aren't set up yet. this is cut/paste from that code. 60 65 */
+4
drivers/usb/core/quirks.c
··· 179 179 { USB_DEVICE(0x0b05, 0x17e0), .driver_info = 180 180 USB_QUIRK_IGNORE_REMOTE_WAKEUP }, 181 181 182 + /* Protocol and OTG Electrical Test Device */ 183 + { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 184 + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 185 + 182 186 { } /* terminating entry must be last */ 183 187 }; 184 188
+3 -3
drivers/usb/dwc2/core_intr.c
··· 476 476 u32 gintsts; 477 477 irqreturn_t retval = IRQ_NONE; 478 478 479 + spin_lock(&hsotg->lock); 480 + 479 481 if (!dwc2_is_controller_alive(hsotg)) { 480 482 dev_warn(hsotg->dev, "Controller is dead\n"); 481 483 goto out; 482 484 } 483 - 484 - spin_lock(&hsotg->lock); 485 485 486 486 gintsts = dwc2_read_common_intr(hsotg); 487 487 if (gintsts & ~GINTSTS_PRTINT) ··· 515 515 } 516 516 } 517 517 518 - spin_unlock(&hsotg->lock); 519 518 out: 519 + spin_unlock(&hsotg->lock); 520 520 return retval; 521 521 } 522 522 EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
+1 -1
drivers/usb/phy/phy.c
··· 34 34 return phy; 35 35 } 36 36 37 - return ERR_PTR(-EPROBE_DEFER); 37 + return ERR_PTR(-ENODEV); 38 38 } 39 39 40 40 static struct usb_phy *__usb_find_phy_dev(struct device *dev,
+8 -1
drivers/usb/storage/unusual_devs.h
··· 507 507 UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, 508 508 "SCM Microsystems", 509 509 "eUSB SCSI Adapter (Bus Powered)", 510 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, 510 + USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init, 511 511 US_FL_SCM_MULT_TARG ), 512 512 513 513 UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, ··· 1994 1994 "USB to ATA/ATAPI Bridge", 1995 1995 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1996 1996 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), 1997 + 1998 + /* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */ 1999 + UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, 2000 + "JMicron", 2001 + "USB to ATA/ATAPI Bridge", 2002 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2003 + US_FL_BROKEN_FUA ), 1997 2004 1998 2005 /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) 1999 2006 * and Mac USB Dock USB-SCSI */
+7
drivers/usb/storage/unusual_uas.h
··· 140 140 "External HDD", 141 141 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 142 142 US_FL_IGNORE_UAS), 143 + 144 + /* Reported-by: Richard Henderson <rth@redhat.com> */ 145 + UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999, 146 + "SimpleTech", 147 + "External HDD", 148 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 149 + US_FL_NO_REPORT_OPCODES),
+7 -7
drivers/vhost/net.c
··· 528 528 .msg_controllen = 0, 529 529 .msg_flags = MSG_DONTWAIT, 530 530 }; 531 - struct virtio_net_hdr hdr = { 532 - .flags = 0, 533 - .gso_type = VIRTIO_NET_HDR_GSO_NONE 531 + struct virtio_net_hdr_mrg_rxbuf hdr = { 532 + .hdr.flags = 0, 533 + .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE 534 534 }; 535 535 size_t total_len = 0; 536 536 int err, mergeable; ··· 614 614 vq->iov->iov_base); 615 615 break; 616 616 } 617 - /* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF 618 - * TODO: Should check and handle checksum. 619 - */ 617 + /* TODO: Should check and handle checksum. */ 618 + 619 + hdr.num_buffers = cpu_to_vhost16(vq, headcount); 620 620 if (likely(mergeable) && 621 - copy_to_iter(&headcount, 2, &fixup) != 2) { 621 + copy_to_iter(&hdr.num_buffers, 2, &fixup) != 2) { 622 622 vq_err(vq, "Failed num_buffers write"); 623 623 vhost_discard_vq_desc(vq, headcount); 624 624 break;
+2
fs/btrfs/scrub.c
··· 3065 3065 path->search_commit_root = 1; 3066 3066 path->skip_locking = 1; 3067 3067 3068 + ppath->search_commit_root = 1; 3069 + ppath->skip_locking = 1; 3068 3070 /* 3069 3071 * trigger the readahead for extent tree csum tree and wait for 3070 3072 * completion. During readahead, the scrub is officially paused
+4 -2
fs/cifs/cifs_debug.c
··· 606 606 *flags = CIFSSEC_MUST_NTLMV2; 607 607 else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) 608 608 *flags = CIFSSEC_MUST_NTLM; 609 - else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) 609 + else if (CIFSSEC_MUST_LANMAN && 610 + (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) 610 611 *flags = CIFSSEC_MUST_LANMAN; 611 - else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) 612 + else if (CIFSSEC_MUST_PLNTXT && 613 + (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) 612 614 *flags = CIFSSEC_MUST_PLNTXT; 613 615 614 616 *flags |= signflags;
+5 -1
fs/cifs/file.c
··· 366 366 struct cifsLockInfo *li, *tmp; 367 367 struct cifs_fid fid; 368 368 struct cifs_pending_open open; 369 + bool oplock_break_cancelled; 369 370 370 371 spin_lock(&cifs_file_list_lock); 371 372 if (--cifs_file->count > 0) { ··· 398 397 } 399 398 spin_unlock(&cifs_file_list_lock); 400 399 401 - cancel_work_sync(&cifs_file->oplock_break); 400 + oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); 402 401 403 402 if (!tcon->need_reconnect && !cifs_file->invalidHandle) { 404 403 struct TCP_Server_Info *server = tcon->ses->server; ··· 409 408 server->ops->close(xid, tcon, &cifs_file->fid); 410 409 _free_xid(xid); 411 410 } 411 + 412 + if (oplock_break_cancelled) 413 + cifs_done_oplock_break(cifsi); 412 414 413 415 cifs_del_pending_open(&open); 414 416
+1 -1
fs/cifs/smbencrypt.c
··· 221 221 } 222 222 223 223 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); 224 - memset(wpwd, 0, 129 * sizeof(__le16)); 224 + memzero_explicit(wpwd, sizeof(wpwd)); 225 225 226 226 return rc; 227 227 }
+23 -26
fs/gfs2/quota.c
··· 667 667 668 668 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 669 669 s64 change, struct gfs2_quota_data *qd, 670 - struct fs_disk_quota *fdq) 670 + struct qc_dqblk *fdq) 671 671 { 672 672 struct inode *inode = &ip->i_inode; 673 673 struct gfs2_sbd *sdp = GFS2_SB(inode); ··· 697 697 be64_add_cpu(&q.qu_value, change); 698 698 qd->qd_qb.qb_value = q.qu_value; 699 699 if (fdq) { 700 - if (fdq->d_fieldmask & FS_DQ_BSOFT) { 701 - q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); 700 + if (fdq->d_fieldmask & QC_SPC_SOFT) { 701 + q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); 702 702 qd->qd_qb.qb_warn = q.qu_warn; 703 703 } 704 - if (fdq->d_fieldmask & FS_DQ_BHARD) { 705 - q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); 704 + if (fdq->d_fieldmask & QC_SPC_HARD) { 705 + q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); 706 706 qd->qd_qb.qb_limit = q.qu_limit; 707 707 } 708 - if (fdq->d_fieldmask & FS_DQ_BCOUNT) { 709 - q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); 708 + if (fdq->d_fieldmask & QC_SPACE) { 709 + q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); 710 710 qd->qd_qb.qb_value = q.qu_value; 711 711 } 712 712 } ··· 1497 1497 } 1498 1498 1499 1499 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, 1500 - struct fs_disk_quota *fdq) 1500 + struct qc_dqblk *fdq) 1501 1501 { 1502 1502 struct gfs2_sbd *sdp = sb->s_fs_info; 1503 1503 struct gfs2_quota_lvb *qlvb; ··· 1505 1505 struct gfs2_holder q_gh; 1506 1506 int error; 1507 1507 1508 - memset(fdq, 0, sizeof(struct fs_disk_quota)); 1508 + memset(fdq, 0, sizeof(*fdq)); 1509 1509 1510 1510 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1511 1511 return -ESRCH; /* Crazy XFS error code */ ··· 1522 1522 goto out; 1523 1523 1524 1524 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1525 - fdq->d_version = FS_DQUOT_VERSION; 1526 - fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1527 - fdq->d_id = from_kqid_munged(current_user_ns(), qid); 1528 - fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; 1529 - fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; 1530 - fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; 1525 + fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; 1526 + fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; 1527 + fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; 1531 1528 1532 1529 gfs2_glock_dq_uninit(&q_gh); 1533 1530 out: ··· 1533 1536 } 1534 1537 1535 1538 /* GFS2 only supports a subset of the XFS fields */ 1536 - #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) 1539 + #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) 1537 1540 1538 1541 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, 1539 - struct fs_disk_quota *fdq) 1542 + struct qc_dqblk *fdq) 1540 1543 { 1541 1544 struct gfs2_sbd *sdp = sb->s_fs_info; 1542 1545 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); ··· 1580 1583 goto out_i; 1581 1584 1582 1585 /* If nothing has changed, this is a no-op */ 1583 - if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1584 - ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1585 - fdq->d_fieldmask ^= FS_DQ_BSOFT; 1586 + if ((fdq->d_fieldmask & QC_SPC_SOFT) && 1587 + ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) 1588 + fdq->d_fieldmask ^= QC_SPC_SOFT; 1586 1589 1587 - if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1588 - ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1589 - fdq->d_fieldmask ^= FS_DQ_BHARD; 1590 + if ((fdq->d_fieldmask & QC_SPC_HARD) && 1591 + ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) 1592 + fdq->d_fieldmask ^= QC_SPC_HARD; 1590 1593 1591 - if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && 1592 - ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1593 - fdq->d_fieldmask ^= FS_DQ_BCOUNT; 1594 + if ((fdq->d_fieldmask & QC_SPACE) && 1595 + ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) 1596 + fdq->d_fieldmask ^= QC_SPACE; 1594 1597 1595 1598 if (fdq->d_fieldmask == 0) 1596 1599 goto out_i;
+6
fs/nfs/direct.c
··· 212 212 */ 213 213 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) 214 214 { 215 + struct inode *inode = iocb->ki_filp->f_mapping->host; 216 + 217 + /* we only support swap file calling nfs_direct_IO */ 218 + if (!IS_SWAPFILE(inode)) 219 + return 0; 220 + 215 221 #ifndef CONFIG_NFS_SWAP 216 222 dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", 217 223 iocb->ki_filp, (long long) pos, iter->nr_segs);
+3 -2
fs/nfs/inode.c
··· 352 352 353 353 nfs_attr_check_mountpoint(sb, fattr); 354 354 355 - if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && 356 - !nfs_attr_use_mounted_on_fileid(fattr)) 355 + if (nfs_attr_use_mounted_on_fileid(fattr)) 356 + fattr->fileid = fattr->mounted_on_fileid; 357 + else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) 357 358 goto out_no_inode; 358 359 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) 359 360 goto out_no_inode;
-2
fs/nfs/internal.h
··· 31 31 (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && 32 32 ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) 33 33 return 0; 34 - 35 - fattr->fileid = fattr->mounted_on_fileid; 36 34 return 1; 37 35 } 38 36
+1 -1
fs/nfs/nfs4client.c
··· 639 639 prev = pos; 640 640 641 641 status = nfs_wait_client_init_complete(pos); 642 - if (status == 0) { 642 + if (pos->cl_cons_state == NFS_CS_SESSION_INITING) { 643 643 nfs4_schedule_lease_recovery(pos); 644 644 status = nfs4_wait_clnt_recover(pos); 645 645 }
+39 -44
fs/quota/dquot.c
··· 2396 2396 } 2397 2397 2398 2398 /* Generic routine for getting common part of quota structure */ 2399 - static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2399 + static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2400 2400 { 2401 2401 struct mem_dqblk *dm = &dquot->dq_dqb; 2402 2402 2403 2403 memset(di, 0, sizeof(*di)); 2404 - di->d_version = FS_DQUOT_VERSION; 2405 - di->d_flags = dquot->dq_id.type == USRQUOTA ? 2406 - FS_USER_QUOTA : FS_GROUP_QUOTA; 2407 - di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); 2408 - 2409 2404 spin_lock(&dq_data_lock); 2410 - di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); 2411 - di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); 2405 + di->d_spc_hardlimit = dm->dqb_bhardlimit; 2406 + di->d_spc_softlimit = dm->dqb_bsoftlimit; 2412 2407 di->d_ino_hardlimit = dm->dqb_ihardlimit; 2413 2408 di->d_ino_softlimit = dm->dqb_isoftlimit; 2414 - di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; 2415 - di->d_icount = dm->dqb_curinodes; 2416 - di->d_btimer = dm->dqb_btime; 2417 - di->d_itimer = dm->dqb_itime; 2409 + di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; 2410 + di->d_ino_count = dm->dqb_curinodes; 2411 + di->d_spc_timer = dm->dqb_btime; 2412 + di->d_ino_timer = dm->dqb_itime; 2418 2413 spin_unlock(&dq_data_lock); 2419 2414 } 2420 2415 2421 2416 int dquot_get_dqblk(struct super_block *sb, struct kqid qid, 2422 - struct fs_disk_quota *di) 2417 + struct qc_dqblk *di) 2423 2418 { 2424 2419 struct dquot *dquot; 2425 2420 ··· 2428 2433 } 2429 2434 EXPORT_SYMBOL(dquot_get_dqblk); 2430 2435 2431 - #define VFS_FS_DQ_MASK \ 2432 - (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ 2433 - FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ 2434 - FS_DQ_BTIMER | FS_DQ_ITIMER) 2436 + #define VFS_QC_MASK \ 2437 + (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ 2438 + QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ 2439 + QC_SPC_TIMER | QC_INO_TIMER) 2435 2440 2436 2441 /* Generic routine for setting common part of quota structure */ 2437 - static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) 2442 + static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) 2438 2443 { 2439 2444 struct mem_dqblk *dm = &dquot->dq_dqb; 2440 2445 int check_blim = 0, check_ilim = 0; 2441 2446 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; 2442 2447 2443 - if (di->d_fieldmask & ~VFS_FS_DQ_MASK) 2448 + if (di->d_fieldmask & ~VFS_QC_MASK) 2444 2449 return -EINVAL; 2445 2450 2446 - if (((di->d_fieldmask & FS_DQ_BSOFT) && 2447 - (di->d_blk_softlimit > dqi->dqi_maxblimit)) || 2448 - ((di->d_fieldmask & FS_DQ_BHARD) && 2449 - (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || 2450 - ((di->d_fieldmask & FS_DQ_ISOFT) && 2451 + if (((di->d_fieldmask & QC_SPC_SOFT) && 2452 + stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) || 2453 + ((di->d_fieldmask & QC_SPC_HARD) && 2454 + stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) || 2455 + ((di->d_fieldmask & QC_INO_SOFT) && 2451 2456 (di->d_ino_softlimit > dqi->dqi_maxilimit)) || 2452 - ((di->d_fieldmask & FS_DQ_IHARD) && 2457 + ((di->d_fieldmask & QC_INO_HARD) && 2453 2458 (di->d_ino_hardlimit > dqi->dqi_maxilimit))) 2454 2459 return -ERANGE; 2455 2460 2456 2461 spin_lock(&dq_data_lock); 2457 - if (di->d_fieldmask & FS_DQ_BCOUNT) { 2458 - dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; 2462 + if (di->d_fieldmask & QC_SPACE) { 2463 + dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; 2459 2464 check_blim = 1; 2460 2465 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2461 2466 } 2462 2467 2463 - if (di->d_fieldmask & FS_DQ_BSOFT) 2464 - dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); 2465 - if (di->d_fieldmask & FS_DQ_BHARD) 2466 - dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); 2467 - if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { 2468 + if (di->d_fieldmask & QC_SPC_SOFT) 2469 + dm->dqb_bsoftlimit = di->d_spc_softlimit; 2470 + if (di->d_fieldmask & QC_SPC_HARD) 2471 + dm->dqb_bhardlimit = di->d_spc_hardlimit; 2472 + if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { 2468 2473 check_blim = 1; 2469 2474 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2470 2475 } 2471 2476 2472 - if (di->d_fieldmask & FS_DQ_ICOUNT) { 2473 - dm->dqb_curinodes = di->d_icount; 2477 + if (di->d_fieldmask & QC_INO_COUNT) { 2478 + dm->dqb_curinodes = di->d_ino_count; 2474 2479 check_ilim = 1; 2475 2480 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2476 2481 } 2477 2482 2478 - if (di->d_fieldmask & FS_DQ_ISOFT) 2483 + if (di->d_fieldmask & QC_INO_SOFT) 2479 2484 dm->dqb_isoftlimit = di->d_ino_softlimit; 2480 - if (di->d_fieldmask & FS_DQ_IHARD) 2485 + if (di->d_fieldmask & QC_INO_HARD) 2481 2486 dm->dqb_ihardlimit = di->d_ino_hardlimit; 2482 - if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { 2487 + if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { 2483 2488 check_ilim = 1; 2484 2489 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2485 2490 } 2486 2491 2487 - if (di->d_fieldmask & FS_DQ_BTIMER) { 2488 - dm->dqb_btime = di->d_btimer; 2492 + if (di->d_fieldmask & QC_SPC_TIMER) { 2493 + dm->dqb_btime = di->d_spc_timer; 2489 2494 check_blim = 1; 2490 2495 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2491 2496 } 2492 2497 2493 - if (di->d_fieldmask & FS_DQ_ITIMER) { 2494 - dm->dqb_itime = di->d_itimer; 2498 + if (di->d_fieldmask & QC_INO_TIMER) { 2499 + dm->dqb_itime = di->d_ino_timer; 2495 2500 check_ilim = 1; 2496 2501 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2497 2502 } ··· 2501 2506 dm->dqb_curspace < dm->dqb_bsoftlimit) { 2502 2507 dm->dqb_btime = 0; 2503 2508 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2504 - } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) 2509 + } else if (!(di->d_fieldmask & QC_SPC_TIMER)) 2505 2510 /* Set grace only if user hasn't provided his own... */ 2506 2511 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2507 2512 } ··· 2510 2515 dm->dqb_curinodes < dm->dqb_isoftlimit) { 2511 2516 dm->dqb_itime = 0; 2512 2517 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2513 - } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) 2518 + } else if (!(di->d_fieldmask & QC_INO_TIMER)) 2514 2519 /* Set grace only if user hasn't provided his own... */ 2515 2520 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2516 2521 } ··· 2526 2531 } 2527 2532 2528 2533 int dquot_set_dqblk(struct super_block *sb, struct kqid qid, 2529 - struct fs_disk_quota *di) 2534 + struct qc_dqblk *di) 2530 2535 { 2531 2536 struct dquot *dquot; 2532 2537 int rc;
+137 -25
fs/quota/quota.c
··· 118 118 return sb->s_qcop->set_info(sb, type, &info); 119 119 } 120 120 121 - static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) 121 + static inline qsize_t qbtos(qsize_t blocks) 122 + { 123 + return blocks << QIF_DQBLKSIZE_BITS; 124 + } 125 + 126 + static inline qsize_t stoqb(qsize_t space) 127 + { 128 + return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; 129 + } 130 + 131 + static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) 122 132 { 123 133 memset(dst, 0, sizeof(*dst)); 124 - dst->dqb_bhardlimit = src->d_blk_hardlimit; 125 - dst->dqb_bsoftlimit = src->d_blk_softlimit; 126 - dst->dqb_curspace = src->d_bcount; 134 + dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); 135 + dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); 136 + dst->dqb_curspace = src->d_space; 127 137 dst->dqb_ihardlimit = src->d_ino_hardlimit; 128 138 dst->dqb_isoftlimit = src->d_ino_softlimit; 129 - dst->dqb_curinodes = src->d_icount; 130 - dst->dqb_btime = src->d_btimer; 131 - dst->dqb_itime = src->d_itimer; 139 + dst->dqb_curinodes = src->d_ino_count; 140 + dst->dqb_btime = src->d_spc_timer; 141 + dst->dqb_itime = src->d_ino_timer; 132 142 dst->dqb_valid = QIF_ALL; 133 143 } 134 144 ··· 146 136 void __user *addr) 147 137 { 148 138 struct kqid qid; 149 - struct fs_disk_quota fdq; 139 + struct qc_dqblk fdq; 150 140 struct if_dqblk idq; 151 141 int ret; 152 142 ··· 164 154 return 0; 165 155 } 166 156 167 - static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) 157 + static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) 168 158 { 169 - dst->d_blk_hardlimit = src->dqb_bhardlimit; 170 - dst->d_blk_softlimit = src->dqb_bsoftlimit; 171 - dst->d_bcount = src->dqb_curspace; 159 + dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); 160 + dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); 161 + dst->d_space = src->dqb_curspace; 172 162 dst->d_ino_hardlimit = src->dqb_ihardlimit; 173 163 dst->d_ino_softlimit = src->dqb_isoftlimit; 174 - dst->d_icount = src->dqb_curinodes; 175 - dst->d_btimer = src->dqb_btime; 176 - dst->d_itimer = src->dqb_itime; 164 + dst->d_ino_count = src->dqb_curinodes; 165 + dst->d_spc_timer = src->dqb_btime; 166 + dst->d_ino_timer = src->dqb_itime; 177 167 178 168 dst->d_fieldmask = 0; 179 169 if (src->dqb_valid & QIF_BLIMITS) 180 - dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; 170 + dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; 181 171 if (src->dqb_valid & QIF_SPACE) 182 - dst->d_fieldmask |= FS_DQ_BCOUNT; 172 + dst->d_fieldmask |= QC_SPACE; 183 173 if (src->dqb_valid & QIF_ILIMITS) 184 - dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; 174 + dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; 185 175 if (src->dqb_valid & QIF_INODES) 186 - dst->d_fieldmask |= FS_DQ_ICOUNT; 176 + dst->d_fieldmask |= QC_INO_COUNT; 187 177 if (src->dqb_valid & QIF_BTIME) 188 - dst->d_fieldmask |= FS_DQ_BTIMER; 178 + dst->d_fieldmask |= QC_SPC_TIMER; 189 179 if (src->dqb_valid & QIF_ITIME) 190 - dst->d_fieldmask |= FS_DQ_ITIMER; 180 + dst->d_fieldmask |= QC_INO_TIMER; 191 181 } 192 182 193 183 static int quota_setquota(struct super_block *sb, int type, qid_t id, 194 184 void __user *addr) 195 185 { 196 - struct fs_disk_quota fdq; 186 + struct qc_dqblk fdq; 197 187 struct if_dqblk idq; 198 188 struct kqid qid; 199 189 ··· 257 247 return ret; 258 248 } 259 249 250 + /* 251 + * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them 252 + * out of there as xfsprogs rely on definitions being in that header file. So 253 + * just define same functions here for quota purposes. 254 + */ 255 + #define XFS_BB_SHIFT 9 256 + 257 + static inline u64 quota_bbtob(u64 blocks) 258 + { 259 + return blocks << XFS_BB_SHIFT; 260 + } 261 + 262 + static inline u64 quota_btobb(u64 bytes) 263 + { 264 + return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; 265 + } 266 + 267 + static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) 268 + { 269 + dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); 270 + dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); 271 + dst->d_ino_hardlimit = src->d_ino_hardlimit; 272 + dst->d_ino_softlimit = src->d_ino_softlimit; 273 + dst->d_space = quota_bbtob(src->d_bcount); 274 + dst->d_ino_count = src->d_icount; 275 + dst->d_ino_timer = src->d_itimer; 276 + dst->d_spc_timer = src->d_btimer; 277 + dst->d_ino_warns = src->d_iwarns; 278 + dst->d_spc_warns = src->d_bwarns; 279 + dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); 280 + dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); 281 + dst->d_rt_space = quota_bbtob(src->d_rtbcount); 282 + dst->d_rt_spc_timer = src->d_rtbtimer; 283 + dst->d_rt_spc_warns = src->d_rtbwarns; 284 + dst->d_fieldmask = 0; 285 + if (src->d_fieldmask & FS_DQ_ISOFT) 286 + dst->d_fieldmask |= QC_INO_SOFT; 287 + if (src->d_fieldmask & FS_DQ_IHARD) 288 + dst->d_fieldmask |= QC_INO_HARD; 289 + if (src->d_fieldmask & FS_DQ_BSOFT) 290 + dst->d_fieldmask |= QC_SPC_SOFT; 291 + if (src->d_fieldmask & FS_DQ_BHARD) 292 + dst->d_fieldmask |= QC_SPC_HARD; 293 + if (src->d_fieldmask & FS_DQ_RTBSOFT) 294 + dst->d_fieldmask |= QC_RT_SPC_SOFT; 295 + if (src->d_fieldmask & FS_DQ_RTBHARD) 296 + dst->d_fieldmask |= QC_RT_SPC_HARD; 297 + if (src->d_fieldmask & FS_DQ_BTIMER) 298 + dst->d_fieldmask |= QC_SPC_TIMER; 299 + if (src->d_fieldmask & FS_DQ_ITIMER) 300 + dst->d_fieldmask |= QC_INO_TIMER; 301 + if (src->d_fieldmask & FS_DQ_RTBTIMER) 302 + dst->d_fieldmask |= QC_RT_SPC_TIMER; 303 + if (src->d_fieldmask & FS_DQ_BWARNS) 304 + dst->d_fieldmask |= QC_SPC_WARNS; 305 + if (src->d_fieldmask & FS_DQ_IWARNS) 306 + dst->d_fieldmask |= QC_INO_WARNS; 307 + if (src->d_fieldmask & FS_DQ_RTBWARNS) 308 + dst->d_fieldmask |= QC_RT_SPC_WARNS; 309 + if (src->d_fieldmask & FS_DQ_BCOUNT) 310 + dst->d_fieldmask |= QC_SPACE; 311 + if (src->d_fieldmask & FS_DQ_ICOUNT) 312 + dst->d_fieldmask |= QC_INO_COUNT; 313 + if (src->d_fieldmask & FS_DQ_RTBCOUNT) 314 + dst->d_fieldmask |= QC_RT_SPACE; 315 + } 316 + 260 317 static int quota_setxquota(struct super_block *sb, int type, qid_t id, 261 318 void __user *addr) 262 319 { 263 320 struct fs_disk_quota fdq; 321 + struct qc_dqblk qdq; 264 322 struct kqid qid; 265 323 266 324 if (copy_from_user(&fdq, addr, sizeof(fdq))) ··· 338 260 qid = make_kqid(current_user_ns(), type, id); 339 261 if (!qid_valid(qid)) 340 262 return -EINVAL; 341 - return sb->s_qcop->set_dqblk(sb, qid, &fdq); 263 + copy_from_xfs_dqblk(&qdq, &fdq); 264 + return sb->s_qcop->set_dqblk(sb, qid, &qdq); 265 + } 266 + 267 + static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, 268 + int type, qid_t id) 269 + { 270 + memset(dst, 0, sizeof(*dst)); 271 + dst->d_version = FS_DQUOT_VERSION; 272 + dst->d_id = id; 273 + if (type == USRQUOTA) 274 + dst->d_flags = FS_USER_QUOTA; 275 + else if (type == PRJQUOTA) 276 + dst->d_flags = FS_PROJ_QUOTA; 277 + else 278 + dst->d_flags = FS_GROUP_QUOTA; 279 + dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); 280 + dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); 281 + dst->d_ino_hardlimit = src->d_ino_hardlimit; 282 + dst->d_ino_softlimit = src->d_ino_softlimit; 283 + dst->d_bcount = quota_btobb(src->d_space); 284 + dst->d_icount = src->d_ino_count; 285 + dst->d_itimer = src->d_ino_timer; 286 + dst->d_btimer = src->d_spc_timer; 287 + dst->d_iwarns = src->d_ino_warns; 288 + dst->d_bwarns = src->d_spc_warns; 289 + dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); 290 + dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); 291 + dst->d_rtbcount = quota_btobb(src->d_rt_space); 292 + dst->d_rtbtimer = src->d_rt_spc_timer; 293 + dst->d_rtbwarns = src->d_rt_spc_warns; 342 294 } 343 295 344 296 static int quota_getxquota(struct super_block *sb, int type, qid_t id, 345 297 void __user *addr) 346 298 { 347 299 struct fs_disk_quota fdq; 300 + struct qc_dqblk qdq; 348 301 struct kqid qid; 349 302 int ret; 350 303 ··· 384 275 qid = make_kqid(current_user_ns(), type, id); 385 276 if (!qid_valid(qid)) 386 277 return -EINVAL; 387 - ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); 388 - if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) 278 + ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); 279 + if (ret) 280 + return ret; 281 + copy_to_xfs_dqblk(&fdq, &qdq, type, id); 282 + if (copy_to_user(addr, &fdq, sizeof(fdq))) 389 283 return -EFAULT; 390 284 return ret; 391 285 }
+1 -1
fs/udf/file.c
··· 224 224 static int udf_release_file(struct inode *inode, struct file *filp) 225 225 { 226 226 if (filp->f_mode & FMODE_WRITE && 227 - atomic_read(&inode->i_writecount) > 1) { 227 + atomic_read(&inode->i_writecount) == 1) { 228 228 /* 229 229 * Grab i_mutex to avoid races with writes changing i_size 230 230 * while we are running.
+2 -2
fs/xfs/xfs_qm.h
··· 166 166 /* quota ops */ 167 167 extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); 168 168 extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, 169 - uint, struct fs_disk_quota *); 169 + uint, struct qc_dqblk *); 170 170 extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, 171 - struct fs_disk_quota *); 171 + struct qc_dqblk *); 172 172 extern int xfs_qm_scall_getqstat(struct xfs_mount *, 173 173 struct fs_quota_stat *); 174 174 extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
+66 -90
fs/xfs/xfs_qm_syscalls.c
··· 39 39 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, 40 40 uint); 41 41 STATIC uint xfs_qm_export_flags(uint); 42 - STATIC uint xfs_qm_export_qtype_flags(uint); 43 42 44 43 /* 45 44 * Turn off quota accounting and/or enforcement for all udquots and/or ··· 572 573 return 0; 573 574 } 574 575 575 - #define XFS_DQ_MASK \ 576 - (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) 576 + #define XFS_QC_MASK \ 577 + (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) 577 578 578 579 /* 579 580 * Adjust quota limits, and start/stop timers accordingly. ··· 583 584 struct xfs_mount *mp, 584 585 xfs_dqid_t id, 585 586 uint type, 586 - fs_disk_quota_t *newlim) 587 + struct qc_dqblk *newlim) 587 588 { 588 589 struct xfs_quotainfo *q = mp->m_quotainfo; 589 590 struct xfs_disk_dquot *ddq; ··· 592 593 int error; 593 594 xfs_qcnt_t hard, soft; 594 595 595 - if (newlim->d_fieldmask & ~XFS_DQ_MASK) 596 + if (newlim->d_fieldmask & ~XFS_QC_MASK) 596 597 return -EINVAL; 597 - if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) 598 + if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) 598 599 return 0; 599 600 600 601 /* ··· 632 633 /* 633 634 * Make sure that hardlimits are >= soft limits before changing. 634 635 */ 635 - hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? 636 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : 636 + hard = (newlim->d_fieldmask & QC_SPC_HARD) ? 637 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : 637 638 be64_to_cpu(ddq->d_blk_hardlimit); 638 - soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? 639 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : 639 + soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? 640 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : 640 641 be64_to_cpu(ddq->d_blk_softlimit); 641 642 if (hard == 0 || hard >= soft) { 642 643 ddq->d_blk_hardlimit = cpu_to_be64(hard); ··· 649 650 } else { 650 651 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); 651 652 } 652 - hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? 653 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : 653 + hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? 654 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : 654 655 be64_to_cpu(ddq->d_rtb_hardlimit); 655 - soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? 656 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : 656 + soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? 657 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : 657 658 be64_to_cpu(ddq->d_rtb_softlimit); 658 659 if (hard == 0 || hard >= soft) { 659 660 ddq->d_rtb_hardlimit = cpu_to_be64(hard); ··· 666 667 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); 667 668 } 668 669 669 - hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? 670 + hard = (newlim->d_fieldmask & QC_INO_HARD) ? 670 671 (xfs_qcnt_t) newlim->d_ino_hardlimit : 671 672 be64_to_cpu(ddq->d_ino_hardlimit); 672 - soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? 673 + soft = (newlim->d_fieldmask & QC_INO_SOFT) ? 673 674 (xfs_qcnt_t) newlim->d_ino_softlimit : 674 675 be64_to_cpu(ddq->d_ino_softlimit); 675 676 if (hard == 0 || hard >= soft) { ··· 686 687 /* 687 688 * Update warnings counter(s) if requested 688 689 */ 689 - if (newlim->d_fieldmask & FS_DQ_BWARNS) 690 - ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); 691 - if (newlim->d_fieldmask & FS_DQ_IWARNS) 692 - ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); 693 - if (newlim->d_fieldmask & FS_DQ_RTBWARNS) 694 - ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); 690 + if (newlim->d_fieldmask & QC_SPC_WARNS) 691 + ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); 692 + if (newlim->d_fieldmask & QC_INO_WARNS) 693 + ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); 694 + if (newlim->d_fieldmask & QC_RT_SPC_WARNS) 695 + ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); 695 696 696 697 if (id == 0) { 697 698 /* ··· 701 702 * soft and hard limit values (already done, above), and 702 703 * for warnings. 703 704 */ 704 - if (newlim->d_fieldmask & FS_DQ_BTIMER) { 705 - q->qi_btimelimit = newlim->d_btimer; 706 - ddq->d_btimer = cpu_to_be32(newlim->d_btimer); 705 + if (newlim->d_fieldmask & QC_SPC_TIMER) { 706 + q->qi_btimelimit = newlim->d_spc_timer; 707 + ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); 707 708 } 708 - if (newlim->d_fieldmask & FS_DQ_ITIMER) { 709 - q->qi_itimelimit = newlim->d_itimer; 710 - ddq->d_itimer = cpu_to_be32(newlim->d_itimer); 709 + if (newlim->d_fieldmask & QC_INO_TIMER) { 710 + q->qi_itimelimit = newlim->d_ino_timer; 711 + ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); 711 712 } 712 - if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { 713 - q->qi_rtbtimelimit = newlim->d_rtbtimer; 714 - ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); 713 + if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { 714 + q->qi_rtbtimelimit = newlim->d_rt_spc_timer; 715 + ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); 715 716 } 716 - if (newlim->d_fieldmask & FS_DQ_BWARNS) 717 - q->qi_bwarnlimit = newlim->d_bwarns; 718 - if (newlim->d_fieldmask & FS_DQ_IWARNS) 719 - q->qi_iwarnlimit = newlim->d_iwarns; 720 - if (newlim->d_fieldmask & FS_DQ_RTBWARNS) 721 - q->qi_rtbwarnlimit = newlim->d_rtbwarns; 717 + if (newlim->d_fieldmask & QC_SPC_WARNS) 718 + q->qi_bwarnlimit = newlim->d_spc_warns; 719 + if (newlim->d_fieldmask & QC_INO_WARNS) 720 + q->qi_iwarnlimit = newlim->d_ino_warns; 721 + if (newlim->d_fieldmask & QC_RT_SPC_WARNS) 722 + q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; 722 723 } else { 723 724 /* 724 725 * If the user is now over quota, start the timelimit. ··· 823 824 struct xfs_mount *mp, 824 825 xfs_dqid_t id, 825 826 uint type, 826 - struct fs_disk_quota *dst) 827 + struct qc_dqblk *dst) 827 828 { 828 829 struct xfs_dquot *dqp; 829 830 int error; ··· 847 848 } 848 849 849 850 memset(dst, 0, sizeof(*dst)); 850 - dst->d_version = FS_DQUOT_VERSION; 851 - dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); 852 - dst->d_id = be32_to_cpu(dqp->q_core.d_id); 853 - dst->d_blk_hardlimit = 854 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); 855 - dst->d_blk_softlimit = 856 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); 851 + dst->d_spc_hardlimit = 852 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); 853 + dst->d_spc_softlimit = 854 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); 857 855 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); 858 856 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); 859 - dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); 860 - dst->d_icount = dqp->q_res_icount; 861 - dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); 862 - dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); 863 - dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); 864 - dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); 865 - dst->d_rtb_hardlimit = 866 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); 867 - dst->d_rtb_softlimit = 868 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); 869 - dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); 870 - dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); 871 - dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); 857 + dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); 858 + dst->d_ino_count = dqp->q_res_icount; 859 + dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); 860 + dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); 861 + dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); 862 + dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); 863 + dst->d_rt_spc_hardlimit = 864 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); 865 + dst->d_rt_spc_softlimit = 866 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); 867 + dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); 868 + dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); 869 + dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); 872 870 873 871 /* 874 872 * Internally, we don't reset all the timers when quota enforcement ··· 878 882 dqp->q_core.d_flags == XFS_DQ_GROUP) || 879 883 (!XFS_IS_PQUOTA_ENFORCED(mp) && 880 884 dqp->q_core.d_flags == XFS_DQ_PROJ)) { 881 - dst->d_btimer = 0; 882 - dst->d_itimer = 0; 883 - dst->d_rtbtimer = 0; 885 + dst->d_spc_timer = 0; 886 + dst->d_ino_timer = 0; 887 + dst->d_rt_spc_timer = 0; 884 888 } 885 889 886 890 #ifdef DEBUG 887 - if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || 888 - (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || 889 - (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && 890 - dst->d_id != 0) { 891 - if ((dst->d_bcount > dst->d_blk_softlimit) && 892 - (dst->d_blk_softlimit > 0)) { 893 - ASSERT(dst->d_btimer != 0); 891 + if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || 892 + (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || 893 + (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && 894 + id != 0) { 895 + if ((dst->d_space > dst->d_spc_softlimit) && 896 + (dst->d_spc_softlimit > 0)) { 897 + ASSERT(dst->d_spc_timer != 0); 894 898 } 895 - if ((dst->d_icount > dst->d_ino_softlimit) && 899 + if ((dst->d_ino_count > dst->d_ino_softlimit) && 896 900 (dst->d_ino_softlimit > 0)) { 897 - ASSERT(dst->d_itimer != 0); 901 + ASSERT(dst->d_ino_timer != 0); 898 902 } 899 903 } 900 904 #endif 901 905 out_put: 902 906 xfs_qm_dqput(dqp); 903 907 return error; 904 - } 905 - 906 - STATIC uint 907 - xfs_qm_export_qtype_flags( 908 - uint flags) 909 - { 910 - /* 911 - * Can't be more than one, or none. 912 - */ 913 - ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != 914 - (FS_PROJ_QUOTA | FS_USER_QUOTA)); 915 - ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != 916 - (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); 917 - ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != 918 - (FS_USER_QUOTA | FS_GROUP_QUOTA)); 919 - ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); 920 - 921 - return (flags & XFS_DQ_USER) ? 922 - FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? 923 - FS_PROJ_QUOTA : FS_GROUP_QUOTA; 924 908 } 925 909 926 910 STATIC uint
+4 -4
fs/xfs/xfs_quotaops.c
··· 131 131 xfs_fs_get_dqblk( 132 132 struct super_block *sb, 133 133 struct kqid qid, 134 - struct fs_disk_quota *fdq) 134 + struct qc_dqblk *qdq) 135 135 { 136 136 struct xfs_mount *mp = XFS_M(sb); 137 137 ··· 141 141 return -ESRCH; 142 142 143 143 return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), 144 - xfs_quota_type(qid.type), fdq); 144 + xfs_quota_type(qid.type), qdq); 145 145 } 146 146 147 147 STATIC int 148 148 xfs_fs_set_dqblk( 149 149 struct super_block *sb, 150 150 struct kqid qid, 151 - struct fs_disk_quota *fdq) 151 + struct qc_dqblk *qdq) 152 152 { 153 153 struct xfs_mount *mp = XFS_M(sb); 154 154 ··· 160 160 return -ESRCH; 161 161 162 162 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), 163 - xfs_quota_type(qid.type), fdq); 163 + xfs_quota_type(qid.type), qdq); 164 164 } 165 165 166 166 const struct quotactl_ops xfs_quotactl_operations = {
+6
include/linux/i2c.h
··· 228 228 struct device dev; /* the device structure */ 229 229 int irq; /* irq issued by device */ 230 230 struct list_head detected; 231 + #if IS_ENABLED(CONFIG_I2C_SLAVE) 231 232 i2c_slave_cb_t slave_cb; /* callback for slave mode */ 233 + #endif 232 234 }; 233 235 #define to_i2c_client(d) container_of(d, struct i2c_client, dev) 234 236 ··· 255 253 256 254 /* I2C slave support */ 257 255 256 + #if IS_ENABLED(CONFIG_I2C_SLAVE) 258 257 enum i2c_slave_event { 259 258 I2C_SLAVE_REQ_READ_START, 260 259 I2C_SLAVE_REQ_READ_END, ··· 272 269 { 273 270 return client->slave_cb(client, event, val); 274 271 } 272 + #endif 275 273 276 274 /** 277 275 * struct i2c_board_info - template for device creation ··· 408 404 /* To determine what the adapter supports */ 409 405 u32 (*functionality) (struct i2c_adapter *); 410 406 407 + #if IS_ENABLED(CONFIG_I2C_SLAVE) 411 408 int (*reg_slave)(struct i2c_client *client); 412 409 int (*unreg_slave)(struct i2c_client *client); 410 + #endif 413 411 }; 414 412 415 413 /**
+45 -13
include/linux/if_vlan.h
··· 472 472 /** 473 473 * vlan_get_protocol - get protocol EtherType. 474 474 * @skb: skbuff to query 475 + * @type: first vlan protocol 476 + * @depth: buffer to store length of eth and vlan tags in bytes 475 477 * 476 478 * Returns the EtherType of the packet, regardless of whether it is 477 479 * vlan encapsulated (normal or hardware accelerated) or not. 478 480 */ 479 - static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 481 + static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, 482 + int *depth) 480 483 { 481 - __be16 protocol = 0; 484 + unsigned int vlan_depth = skb->mac_len; 482 485 483 - if (skb_vlan_tag_present(skb) || 484 - skb->protocol != cpu_to_be16(ETH_P_8021Q)) 485 - protocol = skb->protocol; 486 - else { 487 - __be16 proto, *protop; 488 - protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 489 - h_vlan_encapsulated_proto), 490 - sizeof(proto), &proto); 491 - if (likely(protop)) 492 - protocol = *protop; 486 + /* if type is 802.1Q/AD then the header should already be 487 + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at 488 + * ETH_HLEN otherwise 489 + */ 490 + if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 491 + if (vlan_depth) { 492 + if (WARN_ON(vlan_depth < VLAN_HLEN)) 493 + return 0; 494 + vlan_depth -= VLAN_HLEN; 495 + } else { 496 + vlan_depth = ETH_HLEN; 497 + } 498 + do { 499 + struct vlan_hdr *vh; 500 + 501 + if (unlikely(!pskb_may_pull(skb, 502 + vlan_depth + VLAN_HLEN))) 503 + return 0; 504 + 505 + vh = (struct vlan_hdr *)(skb->data + vlan_depth); 506 + type = vh->h_vlan_encapsulated_proto; 507 + vlan_depth += VLAN_HLEN; 508 + } while (type == htons(ETH_P_8021Q) || 509 + type == htons(ETH_P_8021AD)); 493 510 } 494 511 495 - return protocol; 512 + if (depth) 513 + *depth = vlan_depth; 514 + 515 + return type; 516 + } 517 + 518 + /** 519 + * vlan_get_protocol - get protocol EtherType. 520 + * @skb: skbuff to query 521 + * 522 + * Returns the EtherType of the packet, regardless of whether it is 523 + * vlan encapsulated (normal or hardware accelerated) or not. 524 + */ 525 + static inline __be16 vlan_get_protocol(struct sk_buff *skb) 526 + { 527 + return __vlan_get_protocol(skb, skb->protocol, NULL); 496 528 } 497 529 498 530 static inline void vlan_set_encap_proto(struct sk_buff *skb,
+1 -1
include/linux/kernel.h
··· 176 176 */ 177 177 # define might_sleep() \ 178 178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) 179 - # define sched_annotate_sleep() __set_current_state(TASK_RUNNING) 179 + # define sched_annotate_sleep() (current->task_state_change = 0) 180 180 #else 181 181 static inline void ___might_sleep(const char *file, int line, 182 182 int preempt_offset) { }
+1 -1
include/linux/mlx4/device.h
··· 98 98 MLX4_MAX_NUM_PF = 16, 99 99 MLX4_MAX_NUM_VF = 126, 100 100 MLX4_MAX_NUM_VF_P_PORT = 64, 101 - MLX4_MFUNC_MAX = 80, 101 + MLX4_MFUNC_MAX = 128, 102 102 MLX4_MAX_EQ_NUM = 1024, 103 103 MLX4_MFUNC_EQ_NUM = 4, 104 104 MLX4_MFUNC_MAX_EQES = 8,
+4 -2
include/linux/mm.h
··· 1070 1070 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1071 1071 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1072 1072 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1073 + #define VM_FAULT_SIGSEGV 0x0040 1073 1074 1074 1075 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1075 1076 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ ··· 1079 1078 1080 1079 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1081 1080 1082 - #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ 1083 - VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) 1081 + #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ 1082 + VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ 1083 + VM_FAULT_FALLBACK) 1084 1084 1085 1085 /* Encode hstate index for a hwpoisoned large page */ 1086 1086 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
-6
include/linux/perf_event.h
··· 450 450 #endif /* CONFIG_PERF_EVENTS */ 451 451 }; 452 452 453 - enum perf_event_context_type { 454 - task_context, 455 - cpu_context, 456 - }; 457 - 458 453 /** 459 454 * struct perf_event_context - event context structure 460 455 * ··· 457 462 */ 458 463 struct perf_event_context { 459 464 struct pmu *pmu; 460 - enum perf_event_context_type type; 461 465 /* 462 466 * Protect the states of the events in the list, 463 467 * nr_active, and the list:
+45 -2
include/linux/quota.h
··· 321 321 322 322 struct path; 323 323 324 + /* Structure for communicating via ->get_dqblk() & ->set_dqblk() */ 325 + struct qc_dqblk { 326 + int d_fieldmask; /* mask of fields to change in ->set_dqblk() */ 327 + u64 d_spc_hardlimit; /* absolute limit on used space */ 328 + u64 d_spc_softlimit; /* preferred limit on used space */ 329 + u64 d_ino_hardlimit; /* maximum # allocated inodes */ 330 + u64 d_ino_softlimit; /* preferred inode limit */ 331 + u64 d_space; /* Space owned by the user */ 332 + u64 d_ino_count; /* # inodes owned by the user */ 333 + s64 d_ino_timer; /* zero if within inode limits */ 334 + /* if not, we refuse service */ 335 + s64 d_spc_timer; /* similar to above; for space */ 336 + int d_ino_warns; /* # warnings issued wrt num inodes */ 337 + int d_spc_warns; /* # warnings issued wrt used space */ 338 + u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */ 339 + u64 d_rt_spc_softlimit; /* preferred limit on RT space */ 340 + u64 d_rt_space; /* realtime space owned */ 341 + s64 d_rt_spc_timer; /* similar to above; for RT space */ 342 + int d_rt_spc_warns; /* # warnings issued wrt RT space */ 343 + }; 344 + 345 + /* Field specifiers for ->set_dqblk() in struct qc_dqblk */ 346 + #define QC_INO_SOFT (1<<0) 347 + #define QC_INO_HARD (1<<1) 348 + #define QC_SPC_SOFT (1<<2) 349 + #define QC_SPC_HARD (1<<3) 350 + #define QC_RT_SPC_SOFT (1<<4) 351 + #define QC_RT_SPC_HARD (1<<5) 352 + #define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \ 353 + QC_RT_SPC_SOFT | QC_RT_SPC_HARD) 354 + #define QC_SPC_TIMER (1<<6) 355 + #define QC_INO_TIMER (1<<7) 356 + #define QC_RT_SPC_TIMER (1<<8) 357 + #define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER) 358 + #define QC_SPC_WARNS (1<<9) 359 + #define QC_INO_WARNS (1<<10) 360 + #define QC_RT_SPC_WARNS (1<<11) 361 + #define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS) 362 + #define QC_SPACE (1<<12) 363 + #define QC_INO_COUNT (1<<13) 364 + #define QC_RT_SPACE (1<<14) 365 + #define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE) 366 + 324 367 /* Operations handling requests from userspace */ 325 368 struct quotactl_ops { 326 369 int (*quota_on)(struct super_block *, int, int, struct path *); ··· 372 329 int (*quota_sync)(struct super_block *, int); 373 330 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 374 331 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 375 - int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 376 - int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 332 + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); 333 + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); 377 334 int (*get_xstate)(struct super_block *, struct fs_quota_stat *); 378 335 int (*set_xstate)(struct super_block *, unsigned int, int); 379 336 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
+2 -2
include/linux/quotaops.h
··· 98 98 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 99 99 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 100 100 int dquot_get_dqblk(struct super_block *sb, struct kqid id, 101 - struct fs_disk_quota *di); 101 + struct qc_dqblk *di); 102 102 int dquot_set_dqblk(struct super_block *sb, struct kqid id, 103 - struct fs_disk_quota *di); 103 + struct qc_dqblk *di); 104 104 105 105 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); 106 106 int dquot_transfer(struct inode *inode, struct iattr *iattr);
+3 -3
include/net/flow_keys.h
··· 22 22 __be32 ports; 23 23 __be16 port16[2]; 24 24 }; 25 - u16 thoff; 26 - u16 n_proto; 27 - u8 ip_proto; 25 + u16 thoff; 26 + __be16 n_proto; 27 + u8 ip_proto; 28 28 }; 29 29 30 30 bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+1 -1
include/net/ip.h
··· 181 181 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 182 182 } 183 183 184 - void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, 184 + void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 185 185 const struct ip_options *sopt, 186 186 __be32 daddr, __be32 saddr, 187 187 const struct ip_reply_arg *arg,
+5 -2
include/net/ipv6.h
··· 671 671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 672 672 } 673 673 674 + u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, 675 + struct in6_addr *src); 676 + void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); 674 677 void ipv6_proxy_select_ident(struct sk_buff *skb); 675 678 676 679 int ip6_dst_hoplimit(struct dst_entry *dst); ··· 711 708 __be32 flowlabel, bool autolabel) 712 709 { 713 710 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { 714 - __be32 hash; 711 + u32 hash; 715 712 716 713 hash = skb_get_hash(skb); 717 714 ··· 721 718 */ 722 719 hash ^= hash >> 12; 723 720 724 - flowlabel = hash & IPV6_FLOWLABEL_MASK; 721 + flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; 725 722 } 726 723 727 724 return flowlabel;
+2
include/net/netfilter/nf_tables.h
··· 530 530 531 531 int nft_chain_validate_dependency(const struct nft_chain *chain, 532 532 enum nft_chain_type type); 533 + int nft_chain_validate_hooks(const struct nft_chain *chain, 534 + unsigned int hook_flags); 533 535 534 536 struct nft_stats { 535 537 u64 bytes;
+1
include/net/netns/ipv4.h
··· 53 53 struct inet_peer_base *peers; 54 54 struct tcpm_hash_bucket *tcp_metrics_hash; 55 55 unsigned int tcp_metrics_hash_log; 56 + struct sock * __percpu *tcp_sk; 56 57 struct netns_frags frags; 57 58 #ifdef CONFIG_NETFILTER 58 59 struct xt_table *iptable_filter;
+5 -8
include/net/sch_generic.h
··· 79 79 struct netdev_queue *dev_queue; 80 80 81 81 struct gnet_stats_rate_est64 rate_est; 82 + struct gnet_stats_basic_cpu __percpu *cpu_bstats; 83 + struct gnet_stats_queue __percpu *cpu_qstats; 84 + 82 85 struct Qdisc *next_sched; 83 86 struct sk_buff *gso_skb; 84 87 /* ··· 89 86 */ 90 87 unsigned long state; 91 88 struct sk_buff_head q; 92 - union { 93 - struct gnet_stats_basic_packed bstats; 94 - struct gnet_stats_basic_cpu __percpu *cpu_bstats; 95 - } __packed; 89 + struct gnet_stats_basic_packed bstats; 96 90 unsigned int __state; 97 - union { 98 - struct gnet_stats_queue qstats; 99 - struct gnet_stats_queue __percpu *cpu_qstats; 100 - } __packed; 91 + struct gnet_stats_queue qstats; 101 92 struct rcu_head rcu_head; 102 93 int padded; 103 94 atomic_t refcnt;
+2 -2
include/net/tcp.h
··· 843 843 void tcp_get_allowed_congestion_control(char *buf, size_t len); 844 844 int tcp_set_allowed_congestion_control(char *allowed); 845 845 int tcp_set_congestion_control(struct sock *sk, const char *name); 846 - void tcp_slow_start(struct tcp_sock *tp, u32 acked); 847 - void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 846 + u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); 847 + void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 848 848 849 849 u32 tcp_reno_ssthresh(struct sock *sk); 850 850 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
+13 -2
kernel/events/core.c
··· 6776 6776 __perf_event_init_context(&cpuctx->ctx); 6777 6777 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 6778 6778 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 6779 - cpuctx->ctx.type = cpu_context; 6780 6779 cpuctx->ctx.pmu = pmu; 6781 6780 6782 6781 __perf_cpu_hrtimer_init(cpuctx, cpu); ··· 7419 7420 * task or CPU context: 7420 7421 */ 7421 7422 if (move_group) { 7422 - if (group_leader->ctx->type != ctx->type) 7423 + /* 7424 + * Make sure we're both on the same task, or both 7425 + * per-cpu events. 7426 + */ 7427 + if (group_leader->ctx->task != ctx->task) 7428 + goto err_context; 7429 + 7430 + /* 7431 + * Make sure we're both events for the same CPU; 7432 + * grouping events for different CPUs is broken; since 7433 + * you can never concurrently schedule them anyhow. 7434 + */ 7435 + if (group_leader->cpu != event->cpu) 7423 7436 goto err_context; 7424 7437 } else { 7425 7438 if (group_leader->ctx != ctx)
+2 -3
kernel/sched/core.c
··· 7292 7292 * since we will exit with TASK_RUNNING make sure we enter with it, 7293 7293 * otherwise we will destroy state. 7294 7294 */ 7295 - if (WARN_ONCE(current->state != TASK_RUNNING, 7295 + WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 7296 7296 "do not call blocking ops when !TASK_RUNNING; " 7297 7297 "state=%lx set at [<%p>] %pS\n", 7298 7298 current->state, 7299 7299 (void *)current->task_state_change, 7300 - (void *)current->task_state_change)) 7301 - __set_current_state(TASK_RUNNING); 7300 + (void *)current->task_state_change); 7302 7301 7303 7302 ___might_sleep(file, line, preempt_offset); 7304 7303 }
+10 -2
lib/checksum.c
··· 181 181 EXPORT_SYMBOL(csum_partial_copy); 182 182 183 183 #ifndef csum_tcpudp_nofold 184 + static inline u32 from64to32(u64 x) 185 + { 186 + /* add up 32-bit and 32-bit for 32+c bit */ 187 + x = (x & 0xffffffff) + (x >> 32); 188 + /* add up carry.. */ 189 + x = (x & 0xffffffff) + (x >> 32); 190 + return (u32)x; 191 + } 192 + 184 193 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 185 194 unsigned short len, 186 195 unsigned short proto, ··· 204 195 #else 205 196 s += (proto + len) << 8; 206 197 #endif 207 - s += (s >> 32); 208 - return (__force __wsum)s; 198 + return (__force __wsum)from64to32(s); 209 199 } 210 200 EXPORT_SYMBOL(csum_tcpudp_nofold); 211 201 #endif
+2 -2
mm/gup.c
··· 296 296 return -ENOMEM; 297 297 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 298 298 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 299 - if (ret & VM_FAULT_SIGBUS) 299 + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 300 300 return -EFAULT; 301 301 BUG(); 302 302 } ··· 571 571 return -ENOMEM; 572 572 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 573 573 return -EHWPOISON; 574 - if (ret & VM_FAULT_SIGBUS) 574 + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 575 575 return -EFAULT; 576 576 BUG(); 577 577 }
+1 -1
mm/ksm.c
··· 376 376 else 377 377 ret = VM_FAULT_WRITE; 378 378 put_page(page); 379 - } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); 379 + } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 380 380 /* 381 381 * We must loop because handle_mm_fault() may back out if there's 382 382 * any difficulty e.g. if pte accessed bit gets updated concurrently.
+1 -1
mm/memory.c
··· 2632 2632 2633 2633 /* Check if we need to add a guard page to the stack */ 2634 2634 if (check_stack_guard_page(vma, address) < 0) 2635 - return VM_FAULT_SIGBUS; 2635 + return VM_FAULT_SIGSEGV; 2636 2636 2637 2637 /* Use the zero-page for reads */ 2638 2638 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
+6 -23
net/bridge/netfilter/nft_reject_bridge.c
··· 265 265 data[NFT_REG_VERDICT].verdict = NF_DROP; 266 266 } 267 267 268 - static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) 268 + static int nft_reject_bridge_validate(const struct nft_ctx *ctx, 269 + const struct nft_expr *expr, 270 + const struct nft_data **data) 269 271 { 270 - struct nft_base_chain *basechain; 271 - 272 - if (chain->flags & NFT_BASE_CHAIN) { 273 - basechain = nft_base_chain(chain); 274 - 275 - switch (basechain->ops[0].hooknum) { 276 - case NF_BR_PRE_ROUTING: 277 - case NF_BR_LOCAL_IN: 278 - break; 279 - default: 280 - return -EOPNOTSUPP; 281 - } 282 - } 283 - return 0; 272 + return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) | 273 + (1 << NF_BR_LOCAL_IN)); 284 274 } 285 275 286 276 static int nft_reject_bridge_init(const struct nft_ctx *ctx, ··· 280 290 struct nft_reject *priv = nft_expr_priv(expr); 281 291 int icmp_code, err; 282 292 283 - err = nft_reject_bridge_validate_hooks(ctx->chain); 293 + err = nft_reject_bridge_validate(ctx, expr, NULL); 284 294 if (err < 0) 285 295 return err; 286 296 ··· 329 339 330 340 nla_put_failure: 331 341 return -1; 332 - } 333 - 334 - static int nft_reject_bridge_validate(const struct nft_ctx *ctx, 335 - const struct nft_expr *expr, 336 - const struct nft_data **data) 337 - { 338 - return nft_reject_bridge_validate_hooks(ctx->chain); 339 342 } 340 343 341 344 static struct nft_expr_type nft_reject_bridge_type;
-1
net/caif/chnl_net.c
··· 470 470 ASSERT_RTNL(); 471 471 caifdev = netdev_priv(dev); 472 472 caif_netlink_parms(data, &caifdev->conn_req); 473 - dev_net_set(caifdev->netdev, src_net); 474 473 475 474 ret = register_netdevice(dev); 476 475 if (ret)
+4 -33
net/core/dev.c
··· 2379 2379 2380 2380 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2381 2381 { 2382 - unsigned int vlan_depth = skb->mac_len; 2383 2382 __be16 type = skb->protocol; 2384 2383 2385 2384 /* Tunnel gso handlers can set protocol to ethernet. */ ··· 2392 2393 type = eth->h_proto; 2393 2394 } 2394 2395 2395 - /* if skb->protocol is 802.1Q/AD then the header should already be 2396 - * present at mac_len - VLAN_HLEN (if mac_len > 0), or at 2397 - * ETH_HLEN otherwise 2398 - */ 2399 - if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 2400 - if (vlan_depth) { 2401 - if (WARN_ON(vlan_depth < VLAN_HLEN)) 2402 - return 0; 2403 - vlan_depth -= VLAN_HLEN; 2404 - } else { 2405 - vlan_depth = ETH_HLEN; 2406 - } 2407 - do { 2408 - struct vlan_hdr *vh; 2409 - 2410 - if (unlikely(!pskb_may_pull(skb, 2411 - vlan_depth + VLAN_HLEN))) 2412 - return 0; 2413 - 2414 - vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2415 - type = vh->h_vlan_encapsulated_proto; 2416 - vlan_depth += VLAN_HLEN; 2417 - } while (type == htons(ETH_P_8021Q) || 2418 - type == htons(ETH_P_8021AD)); 2419 - } 2420 - 2421 - *depth = vlan_depth; 2422 - 2423 - return type; 2396 + return __vlan_get_protocol(skb, type, depth); 2424 2397 } 2425 2398 2426 2399 /** ··· 5346 5375 } 5347 5376 EXPORT_SYMBOL(netdev_bonding_info_change); 5348 5377 5349 - void netdev_adjacent_add_links(struct net_device *dev) 5378 + static void netdev_adjacent_add_links(struct net_device *dev) 5350 5379 { 5351 5380 struct netdev_adjacent *iter; 5352 5381 ··· 5371 5400 } 5372 5401 } 5373 5402 5374 - void netdev_adjacent_del_links(struct net_device *dev) 5403 + static void netdev_adjacent_del_links(struct net_device *dev) 5375 5404 { 5376 5405 struct netdev_adjacent *iter; 5377 5406 ··· 6684 6713 if (!queue) 6685 6714 return NULL; 6686 6715 netdev_init_one_queue(dev, queue, NULL); 6687 - queue->qdisc = &noop_qdisc; 6716 + RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 6688 6717 queue->qdisc_sleeping = &noop_qdisc; 6689 6718 rcu_assign_pointer(dev->ingress_queue, queue); 6690 6719 #endif
+5 -1
net/core/rtnetlink.c
··· 2937 2937 if (err < 0) 2938 2938 goto errout; 2939 2939 2940 + if (!skb->len) 2941 + goto errout; 2942 + 2940 2943 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 2941 2944 return 0; 2942 2945 errout: 2943 2946 WARN_ON(err == -EMSGSIZE); 2944 2947 kfree_skb(skb); 2945 - rtnl_set_sk_err(net, RTNLGRP_LINK, err); 2948 + if (err) 2949 + rtnl_set_sk_err(net, RTNLGRP_LINK, err); 2946 2950 return err; 2947 2951 } 2948 2952
+3 -26
net/ipv4/ip_output.c
··· 1504 1504 /* 1505 1505 * Generic function to send a packet as reply to another packet. 1506 1506 * Used to send some TCP resets/acks so far. 1507 - * 1508 - * Use a fake percpu inet socket to avoid false sharing and contention. 1509 1507 */ 1510 - static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { 1511 - .sk = { 1512 - .__sk_common = { 1513 - .skc_refcnt = ATOMIC_INIT(1), 1514 - }, 1515 - .sk_wmem_alloc = ATOMIC_INIT(1), 1516 - .sk_allocation = GFP_ATOMIC, 1517 - .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE), 1518 - }, 1519 - .pmtudisc = IP_PMTUDISC_WANT, 1520 - .uc_ttl = -1, 1521 - }; 1522 - 1523 - void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, 1508 + void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 1524 1509 const struct ip_options *sopt, 1525 1510 __be32 daddr, __be32 saddr, 1526 1511 const struct ip_reply_arg *arg, ··· 1515 1530 struct ipcm_cookie ipc; 1516 1531 struct flowi4 fl4; 1517 1532 struct rtable *rt = skb_rtable(skb); 1533 + struct net *net = sock_net(sk); 1518 1534 struct sk_buff *nskb; 1519 - struct sock *sk; 1520 - struct inet_sock *inet; 1521 1535 int err; 1522 1536 1523 1537 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) ··· 1547 1563 if (IS_ERR(rt)) 1548 1564 return; 1549 1565 1550 - inet = &get_cpu_var(unicast_sock); 1566 + inet_sk(sk)->tos = arg->tos; 1551 1567 1552 - inet->tos = arg->tos; 1553 - sk = &inet->sk; 1554 1568 sk->sk_priority = skb->priority; 1555 1569 sk->sk_protocol = ip_hdr(skb)->protocol; 1556 1570 sk->sk_bound_dev_if = arg->bound_dev_if; 1557 - sock_net_set(sk, net); 1558 - __skb_queue_head_init(&sk->sk_write_queue); 1559 1571 sk->sk_sndbuf = sysctl_wmem_default; 1560 1572 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1561 1573 len, 0, &ipc, &rt, MSG_DONTWAIT); ··· 1567 1587 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1568 1588 arg->csum)); 1569 1589 nskb->ip_summed = CHECKSUM_NONE; 1570 - skb_orphan(nskb); 1571 1590 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1572 1591 ip_push_pending_frames(sk, &fl4); 1573 1592 } 1574 1593 out: 1575 - put_cpu_var(unicast_sock); 1576 - 1577 1594 ip_rt_put(rt); 1578 1595 } 1579 1596
+3
net/ipv4/route.c
··· 966 966 if (dst->dev->mtu < mtu) 967 967 return; 968 968 969 + if (rt->rt_pmtu && rt->rt_pmtu < mtu) 970 + return; 971 + 969 972 if (mtu < ip_rt_min_pmtu) 970 973 mtu = ip_rt_min_pmtu; 971 974
+1 -1
net/ipv4/tcp_bic.c
··· 150 150 tcp_slow_start(tp, acked); 151 151 else { 152 152 bictcp_update(ca, tp->snd_cwnd); 153 - tcp_cong_avoid_ai(tp, ca->cnt); 153 + tcp_cong_avoid_ai(tp, ca->cnt, 1); 154 154 } 155 155 } 156 156
+20 -12
net/ipv4/tcp_cong.c
··· 360 360 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and 361 361 * returns the leftover acks to adjust cwnd in congestion avoidance mode. 362 362 */ 363 - void tcp_slow_start(struct tcp_sock *tp, u32 acked) 363 + u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) 364 364 { 365 365 u32 cwnd = tp->snd_cwnd + acked; 366 366 367 367 if (cwnd > tp->snd_ssthresh) 368 368 cwnd = tp->snd_ssthresh + 1; 369 + acked -= cwnd - tp->snd_cwnd; 369 370 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 371 + 372 + return acked; 370 373 } 371 374 EXPORT_SYMBOL_GPL(tcp_slow_start); 372 375 373 - /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ 374 - void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) 376 + /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w), 377 + * for every packet that was ACKed. 378 + */ 379 + void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) 375 380 { 381 + tp->snd_cwnd_cnt += acked; 376 382 if (tp->snd_cwnd_cnt >= w) { 377 - if (tp->snd_cwnd < tp->snd_cwnd_clamp) 378 - tp->snd_cwnd++; 379 - tp->snd_cwnd_cnt = 0; 380 - } else { 381 - tp->snd_cwnd_cnt++; 383 + u32 delta = tp->snd_cwnd_cnt / w; 384 + 385 + tp->snd_cwnd_cnt -= delta * w; 386 + tp->snd_cwnd += delta; 382 387 } 388 + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); 383 389 } 384 390 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); 385 391 ··· 404 398 return; 405 399 406 400 /* In "safe" area, increase. */ 407 - if (tp->snd_cwnd <= tp->snd_ssthresh) 408 - tcp_slow_start(tp, acked); 401 + if (tp->snd_cwnd <= tp->snd_ssthresh) { 402 + acked = tcp_slow_start(tp, acked); 403 + if (!acked) 404 + return; 405 + } 409 406 /* In dangerous area, increase slowly. */ 410 - else 411 - tcp_cong_avoid_ai(tp, tp->snd_cwnd); 407 + tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); 412 408 } 413 409 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 414 410
+17 -22
net/ipv4/tcp_cubic.c
··· 93 93 u32 epoch_start; /* beginning of an epoch */ 94 94 u32 ack_cnt; /* number of acks */ 95 95 u32 tcp_cwnd; /* estimated tcp cwnd */ 96 - #define ACK_RATIO_SHIFT 4 97 - #define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) 98 - u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ 96 + u16 unused; 99 97 u8 sample_cnt; /* number of samples to decide curr_rtt */ 100 98 u8 found; /* the exit point is found? */ 101 99 u32 round_start; /* beginning of each round */ ··· 112 114 ca->bic_K = 0; 113 115 ca->delay_min = 0; 114 116 ca->epoch_start = 0; 115 - ca->delayed_ack = 2 << ACK_RATIO_SHIFT; 116 117 ca->ack_cnt = 0; 117 118 ca->tcp_cwnd = 0; 118 119 ca->found = 0; ··· 202 205 /* 203 206 * Compute congestion window to use. 204 207 */ 205 - static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 208 + static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) 206 209 { 207 210 u32 delta, bic_target, max_cnt; 208 211 u64 offs, t; 209 212 210 - ca->ack_cnt++; /* count the number of ACKs */ 213 + ca->ack_cnt += acked; /* count the number of ACKed packets */ 211 214 212 215 if (ca->last_cwnd == cwnd && 213 216 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) 214 217 return; 218 + 219 + /* The CUBIC function can update ca->cnt at most once per jiffy. 220 + * On all cwnd reduction events, ca->epoch_start is set to 0, 221 + * which will force a recalculation of ca->cnt. 222 + */ 223 + if (ca->epoch_start && tcp_time_stamp == ca->last_time) 224 + goto tcp_friendliness; 215 225 216 226 ca->last_cwnd = cwnd; 217 227 ca->last_time = tcp_time_stamp; 218 228 219 229 if (ca->epoch_start == 0) { 220 230 ca->epoch_start = tcp_time_stamp; /* record beginning */ 221 - ca->ack_cnt = 1; /* start counting */ 231 + ca->ack_cnt = acked; /* start counting */ 222 232 ca->tcp_cwnd = cwnd; /* syn with cubic */ 223 233 224 234 if (ca->last_max_cwnd <= cwnd) { ··· 287 283 if (ca->last_max_cwnd == 0 && ca->cnt > 20) 288 284 ca->cnt = 20; /* increase cwnd 5% per RTT */ 289 285 286 + tcp_friendliness: 290 287 /* TCP Friendly */ 291 288 if (tcp_friendliness) { 292 289 u32 scale = beta_scale; ··· 306 301 } 307 302 } 308 303 309 - ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; 310 304 if (ca->cnt == 0) /* cannot be zero */ 311 305 ca->cnt = 1; 312 306 } ··· 321 317 if (tp->snd_cwnd <= tp->snd_ssthresh) { 322 318 if (hystart && after(ack, ca->end_seq)) 323 319 bictcp_hystart_reset(sk); 324 - tcp_slow_start(tp, acked); 325 - } else { 326 - bictcp_update(ca, tp->snd_cwnd); 327 - tcp_cong_avoid_ai(tp, ca->cnt); 320 + acked = tcp_slow_start(tp, acked); 321 + if (!acked) 322 + return; 328 323 } 324 + bictcp_update(ca, tp->snd_cwnd, acked); 325 + tcp_cong_avoid_ai(tp, ca->cnt, acked); 329 326 } 330 327 331 328 static u32 bictcp_recalc_ssthresh(struct sock *sk) ··· 416 411 */ 417 412 static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) 418 413 { 419 - const struct inet_connection_sock *icsk = inet_csk(sk); 420 414 const struct tcp_sock *tp = tcp_sk(sk); 421 415 struct bictcp *ca = inet_csk_ca(sk); 422 416 u32 delay; 423 - 424 - if (icsk->icsk_ca_state == TCP_CA_Open) { 425 - u32 ratio = ca->delayed_ack; 426 - 427 - ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; 428 - ratio += cnt; 429 - 430 - ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT); 431 - } 432 417 433 418 /* Some calls are for duplicates without timetamps */ 434 419 if (rtt_us < 0)
+35 -8
net/ipv4/tcp_ipv4.c
··· 683 683 arg.bound_dev_if = sk->sk_bound_dev_if; 684 684 685 685 arg.tos = ip_hdr(skb)->tos; 686 - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 686 + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 687 + skb, &TCP_SKB_CB(skb)->header.h4.opt, 687 688 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 688 689 &arg, arg.iov[0].iov_len); 689 690 ··· 768 767 if (oif) 769 768 arg.bound_dev_if = oif; 770 769 arg.tos = tos; 771 - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, 770 + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 771 + skb, &TCP_SKB_CB(skb)->header.h4.opt, 772 772 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 773 773 &arg, arg.iov[0].iov_len); 774 774 ··· 2432 2430 }; 2433 2431 EXPORT_SYMBOL(tcp_prot); 2434 2432 2435 - static int __net_init tcp_sk_init(struct net *net) 2436 - { 2437 - net->ipv4.sysctl_tcp_ecn = 2; 2438 - return 0; 2439 - } 2440 - 2441 2433 static void __net_exit tcp_sk_exit(struct net *net) 2442 2434 { 2435 + int cpu; 2436 + 2437 + for_each_possible_cpu(cpu) 2438 + inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); 2439 + free_percpu(net->ipv4.tcp_sk); 2440 + } 2441 + 2442 + static int __net_init tcp_sk_init(struct net *net) 2443 + { 2444 + int res, cpu; 2445 + 2446 + net->ipv4.tcp_sk = alloc_percpu(struct sock *); 2447 + if (!net->ipv4.tcp_sk) 2448 + return -ENOMEM; 2449 + 2450 + for_each_possible_cpu(cpu) { 2451 + struct sock *sk; 2452 + 2453 + res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, 2454 + IPPROTO_TCP, net); 2455 + if (res) 2456 + goto fail; 2457 + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; 2458 + } 2459 + net->ipv4.sysctl_tcp_ecn = 2; 2460 + return 0; 2461 + 2462 + fail: 2463 + tcp_sk_exit(net); 2464 + 2465 + return res; 2443 2466 } 2444 2467 2445 2468 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
+2 -1
net/ipv4/tcp_scalable.c
··· 25 25 if (tp->snd_cwnd <= tp->snd_ssthresh) 26 26 tcp_slow_start(tp, acked); 27 27 else 28 - tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); 28 + tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), 29 + 1); 29 30 } 30 31 31 32 static u32 tcp_scalable_ssthresh(struct sock *sk)
+1 -1
net/ipv4/tcp_veno.c
··· 159 159 /* In the "non-congestive state", increase cwnd 160 160 * every rtt. 161 161 */ 162 - tcp_cong_avoid_ai(tp, tp->snd_cwnd); 162 + tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); 163 163 } else { 164 164 /* In the "congestive state", increase cwnd 165 165 * every other rtt.
+1 -1
net/ipv4/tcp_yeah.c
··· 92 92 93 93 } else { 94 94 /* Reno */ 95 - tcp_cong_avoid_ai(tp, tp->snd_cwnd); 95 + tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); 96 96 } 97 97 98 98 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
+2 -2
net/ipv6/ip6_gre.c
··· 417 417 if (code == ICMPV6_HDR_FIELD) 418 418 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 419 419 420 - if (teli && teli == info - 2) { 420 + if (teli && teli == be32_to_cpu(info) - 2) { 421 421 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 422 422 if (tel->encap_limit == 0) { 423 423 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", ··· 429 429 } 430 430 break; 431 431 case ICMPV6_PKT_TOOBIG: 432 - mtu = info - offset; 432 + mtu = be32_to_cpu(info) - offset; 433 433 if (mtu < IPV6_MIN_MTU) 434 434 mtu = IPV6_MIN_MTU; 435 435 t->dev->mtu = mtu;
-14
net/ipv6/ip6_output.c
··· 537 537 skb_copy_secmark(to, from); 538 538 } 539 539 540 - static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) 541 - { 542 - static u32 ip6_idents_hashrnd __read_mostly; 543 - u32 hash, id; 544 - 545 - net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); 546 - 547 - hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); 548 - hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash); 549 - 550 - id = ip_idents_reserve(hash, 1); 551 - fhdr->identification = htonl(id); 552 - } 553 - 554 540 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 555 541 { 556 542 struct sk_buff *frag;
+35 -6
net/ipv6/output_core.c
··· 9 9 #include <net/addrconf.h> 10 10 #include <net/secure_seq.h> 11 11 12 + u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src) 13 + { 14 + u32 hash, id; 15 + 16 + hash = __ipv6_addr_jhash(dst, hashrnd); 17 + hash = __ipv6_addr_jhash(src, hash); 18 + 19 + /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, 20 + * set the hight order instead thus minimizing possible future 21 + * collisions. 22 + */ 23 + id = ip_idents_reserve(hash, 1); 24 + if (unlikely(!id)) 25 + id = 1 << 31; 26 + 27 + return id; 28 + } 29 + 12 30 /* This function exists only for tap drivers that must support broken 13 31 * clients requesting UFO without specifying an IPv6 fragment ID. 14 32 * ··· 40 22 static u32 ip6_proxy_idents_hashrnd __read_mostly; 41 23 struct in6_addr buf[2]; 42 24 struct in6_addr *addrs; 43 - u32 hash, id; 25 + u32 id; 44 26 45 27 addrs = skb_header_pointer(skb, 46 28 skb_network_offset(skb) + ··· 52 34 net_get_random_once(&ip6_proxy_idents_hashrnd, 53 35 sizeof(ip6_proxy_idents_hashrnd)); 54 36 55 - hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); 56 - hash = __ipv6_addr_jhash(&addrs[0], hash); 57 - 58 - id = ip_idents_reserve(hash, 1); 59 - skb_shinfo(skb)->ip6_frag_id = htonl(id); 37 + id = __ipv6_select_ident(ip6_proxy_idents_hashrnd, 38 + &addrs[1], &addrs[0]); 39 + skb_shinfo(skb)->ip6_frag_id = id; 60 40 } 61 41 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); 42 + 43 + void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) 44 + { 45 + static u32 ip6_idents_hashrnd __read_mostly; 46 + u32 id; 47 + 48 + net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); 49 + 50 + id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr, 51 + &rt->rt6i_src.addr); 52 + fhdr->identification = htonl(id); 53 + } 54 + EXPORT_SYMBOL(ipv6_select_ident); 62 55 63 56 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 64 57 {
+4 -4
net/ipv6/sit.c
··· 1506 1506 1507 1507 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1508 1508 ret = true; 1509 - ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); 1509 + ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); 1510 1510 } 1511 1511 1512 1512 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1513 1513 ret = true; 1514 - ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); 1514 + ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); 1515 1515 } 1516 1516 1517 1517 return ret; ··· 1707 1707 1708 1708 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, 1709 1709 tunnel->encap.type) || 1710 - nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, 1710 + nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, 1711 1711 tunnel->encap.sport) || 1712 - nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, 1712 + nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, 1713 1713 tunnel->encap.dport) || 1714 1714 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, 1715 1715 tunnel->encap.flags))
+9 -1
net/ipv6/udp_offload.c
··· 52 52 53 53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 54 54 55 + /* Set the IPv6 fragment id if not set yet */ 56 + if (!skb_shinfo(skb)->ip6_frag_id) 57 + ipv6_proxy_select_ident(skb); 58 + 55 59 segs = NULL; 56 60 goto out; 57 61 } ··· 112 108 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 113 109 fptr->nexthdr = nexthdr; 114 110 fptr->reserved = 0; 115 - fptr->identification = skb_shinfo(skb)->ip6_frag_id; 111 + if (skb_shinfo(skb)->ip6_frag_id) 112 + fptr->identification = skb_shinfo(skb)->ip6_frag_id; 113 + else 114 + ipv6_select_ident(fptr, 115 + (struct rt6_info *)skb_dst(skb)); 116 116 117 117 /* Fragment the skb. ipv6 header and the remaining fields of the 118 118 * fragment header are updated in ipv6_gso_segment()
+22 -11
net/netfilter/ipvs/ip_vs_core.c
··· 659 659 return err; 660 660 } 661 661 662 - static int ip_vs_route_me_harder(int af, struct sk_buff *skb) 662 + static int ip_vs_route_me_harder(int af, struct sk_buff *skb, 663 + unsigned int hooknum) 663 664 { 665 + if (!sysctl_snat_reroute(skb)) 666 + return 0; 667 + /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */ 668 + if (NF_INET_LOCAL_IN == hooknum) 669 + return 0; 664 670 #ifdef CONFIG_IP_VS_IPV6 665 671 if (af == AF_INET6) { 666 - if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) 672 + struct dst_entry *dst = skb_dst(skb); 673 + 674 + if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && 675 + ip6_route_me_harder(skb) != 0) 667 676 return 1; 668 677 } else 669 678 #endif 670 - if ((sysctl_snat_reroute(skb) || 671 - skb_rtable(skb)->rt_flags & RTCF_LOCAL) && 679 + if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && 672 680 ip_route_me_harder(skb, RTN_LOCAL) != 0) 673 681 return 1; 674 682 ··· 799 791 union nf_inet_addr *snet, 800 792 __u8 protocol, struct ip_vs_conn *cp, 801 793 struct ip_vs_protocol *pp, 802 - unsigned int offset, unsigned int ihl) 794 + unsigned int offset, unsigned int ihl, 795 + unsigned int hooknum) 803 796 { 804 797 unsigned int verdict = NF_DROP; 805 798 ··· 830 821 #endif 831 822 ip_vs_nat_icmp(skb, pp, cp, 1); 832 823 833 - if (ip_vs_route_me_harder(af, skb)) 824 + if (ip_vs_route_me_harder(af, skb, hooknum)) 834 825 goto out; 835 826 836 827 /* do the statistics and put it back */ ··· 925 916 926 917 snet.ip = iph->saddr; 927 918 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, 928 - pp, ciph.len, ihl); 919 + pp, ciph.len, ihl, hooknum); 929 920 } 930 921 931 922 #ifdef CONFIG_IP_VS_IPV6 ··· 990 981 snet.in6 = ciph.saddr.in6; 991 982 writable = ciph.len; 992 983 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, 993 - pp, writable, sizeof(struct ipv6hdr)); 984 + pp, writable, sizeof(struct ipv6hdr), 985 + hooknum); 994 986 } 995 987 #endif 996 988 ··· 1050 1040 */ 1051 1041 static unsigned int 1052 1042 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 1053 - struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 1043 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph, 1044 + unsigned int hooknum) 1054 1045 { 1055 1046 struct ip_vs_protocol *pp = pd->pp; 1056 1047 ··· 1089 1078 * if it came from this machine itself. So re-compute 1090 1079 * the routing information. 1091 1080 */ 1092 - if (ip_vs_route_me_harder(af, skb)) 1081 + if (ip_vs_route_me_harder(af, skb, hooknum)) 1093 1082 goto drop; 1094 1083 1095 1084 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); ··· 1192 1181 cp = pp->conn_out_get(af, skb, &iph, 0); 1193 1182 1194 1183 if (likely(cp)) 1195 - return handle_response(af, skb, pd, cp, &iph); 1184 + return handle_response(af, skb, pd, cp, &iph, hooknum); 1196 1185 if (sysctl_nat_icmp_send(net) && 1197 1186 (pp->protocol == IPPROTO_TCP || 1198 1187 pp->protocol == IPPROTO_UDP ||
+26 -2
net/netfilter/nf_tables_api.c
··· 1136 1136 /* Restore old counters on this cpu, no problem. Per-cpu statistics 1137 1137 * are not exposed to userspace. 1138 1138 */ 1139 + preempt_disable(); 1139 1140 stats = this_cpu_ptr(newstats); 1140 1141 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); 1141 1142 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); 1143 + preempt_enable(); 1142 1144 1143 1145 return newstats; 1144 1146 } ··· 1266 1264 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1267 1265 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1268 1266 sizeof(struct nft_trans_chain)); 1269 - if (trans == NULL) 1267 + if (trans == NULL) { 1268 + free_percpu(stats); 1270 1269 return -ENOMEM; 1270 + } 1271 1271 1272 1272 nft_trans_chain_stats(trans) = stats; 1273 1273 nft_trans_chain_update(trans) = true; ··· 1325 1321 hookfn = type->hooks[hooknum]; 1326 1322 1327 1323 basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); 1328 - if (basechain == NULL) 1324 + if (basechain == NULL) { 1325 + module_put(type->owner); 1329 1326 return -ENOMEM; 1327 + } 1330 1328 1331 1329 if (nla[NFTA_CHAIN_COUNTERS]) { 1332 1330 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); ··· 3764 3758 return 0; 3765 3759 } 3766 3760 EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); 3761 + 3762 + int nft_chain_validate_hooks(const struct nft_chain *chain, 3763 + unsigned int hook_flags) 3764 + { 3765 + struct nft_base_chain *basechain; 3766 + 3767 + if (chain->flags & NFT_BASE_CHAIN) { 3768 + basechain = nft_base_chain(chain); 3769 + 3770 + if ((1 << basechain->ops[0].hooknum) & hook_flags) 3771 + return 0; 3772 + 3773 + return -EOPNOTSUPP; 3774 + } 3775 + 3776 + return 0; 3777 + } 3778 + EXPORT_SYMBOL_GPL(nft_chain_validate_hooks); 3767 3779 3768 3780 /* 3769 3781 * Loop detection - walk through the ruleset beginning at the destination chain
+17 -9
net/netfilter/nft_masq.c
··· 21 21 }; 22 22 EXPORT_SYMBOL_GPL(nft_masq_policy); 23 23 24 + int nft_masq_validate(const struct nft_ctx *ctx, 25 + const struct nft_expr *expr, 26 + const struct nft_data **data) 27 + { 28 + int err; 29 + 30 + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 31 + if (err < 0) 32 + return err; 33 + 34 + return nft_chain_validate_hooks(ctx->chain, 35 + (1 << NF_INET_POST_ROUTING)); 36 + } 37 + EXPORT_SYMBOL_GPL(nft_masq_validate); 38 + 24 39 int nft_masq_init(const struct nft_ctx *ctx, 25 40 const struct nft_expr *expr, 26 41 const struct nlattr * const tb[]) ··· 43 28 struct nft_masq *priv = nft_expr_priv(expr); 44 29 int err; 45 30 46 - err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 47 - if (err < 0) 31 + err = nft_masq_validate(ctx, expr, NULL); 32 + if (err) 48 33 return err; 49 34 50 35 if (tb[NFTA_MASQ_FLAGS] == NULL) ··· 74 59 return -1; 75 60 } 76 61 EXPORT_SYMBOL_GPL(nft_masq_dump); 77 - 78 - int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, 79 - const struct nft_data **data) 80 - { 81 - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 82 - } 83 - EXPORT_SYMBOL_GPL(nft_masq_validate); 84 62 85 63 MODULE_LICENSE("GPL"); 86 64 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
+31 -11
net/netfilter/nft_nat.c
··· 88 88 [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, 89 89 }; 90 90 91 + static int nft_nat_validate(const struct nft_ctx *ctx, 92 + const struct nft_expr *expr, 93 + const struct nft_data **data) 94 + { 95 + struct nft_nat *priv = nft_expr_priv(expr); 96 + int err; 97 + 98 + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 99 + if (err < 0) 100 + return err; 101 + 102 + switch (priv->type) { 103 + case NFT_NAT_SNAT: 104 + err = nft_chain_validate_hooks(ctx->chain, 105 + (1 << NF_INET_POST_ROUTING) | 106 + (1 << NF_INET_LOCAL_IN)); 107 + break; 108 + case NFT_NAT_DNAT: 109 + err = nft_chain_validate_hooks(ctx->chain, 110 + (1 << NF_INET_PRE_ROUTING) | 111 + (1 << NF_INET_LOCAL_OUT)); 112 + break; 113 + } 114 + 115 + return err; 116 + } 117 + 91 118 static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 92 119 const struct nlattr * const tb[]) 93 120 { 94 121 struct nft_nat *priv = nft_expr_priv(expr); 95 122 u32 family; 96 123 int err; 97 - 98 - err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 99 - if (err < 0) 100 - return err; 101 124 102 125 if (tb[NFTA_NAT_TYPE] == NULL || 103 126 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && ··· 137 114 default: 138 115 return -EINVAL; 139 116 } 117 + 118 + err = nft_nat_validate(ctx, expr, NULL); 119 + if (err < 0) 120 + return err; 140 121 141 122 if (tb[NFTA_NAT_FAMILY] == NULL) 142 123 return -EINVAL; ··· 244 217 245 218 nla_put_failure: 246 219 return -1; 247 - } 248 - 249 - static int nft_nat_validate(const struct nft_ctx *ctx, 250 - const struct nft_expr *expr, 251 - const struct nft_data **data) 252 - { 253 - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 254 220 } 255 221 256 222 static struct nft_expr_type nft_nat_type;
+17 -8
net/netfilter/nft_redir.c
··· 23 23 }; 24 24 EXPORT_SYMBOL_GPL(nft_redir_policy); 25 25 26 + int nft_redir_validate(const struct nft_ctx *ctx, 27 + const struct nft_expr *expr, 28 + const struct nft_data **data) 29 + { 30 + int err; 31 + 32 + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 33 + if (err < 0) 34 + return err; 35 + 36 + return nft_chain_validate_hooks(ctx->chain, 37 + (1 << NF_INET_PRE_ROUTING) | 38 + (1 << NF_INET_LOCAL_OUT)); 39 + } 40 + EXPORT_SYMBOL_GPL(nft_redir_validate); 41 + 26 42 int nft_redir_init(const struct nft_ctx *ctx, 27 43 const struct nft_expr *expr, 28 44 const struct nlattr * const tb[]) ··· 46 30 struct nft_redir *priv = nft_expr_priv(expr); 47 31 int err; 48 32 49 - err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 33 + err = nft_redir_validate(ctx, expr, NULL); 50 34 if (err < 0) 51 35 return err; 52 36 ··· 103 87 return -1; 104 88 } 105 89 EXPORT_SYMBOL_GPL(nft_redir_dump); 106 - 107 - int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, 108 - const struct nft_data **data) 109 - { 110 - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 111 - } 112 - EXPORT_SYMBOL_GPL(nft_redir_validate); 113 90 114 91 MODULE_LICENSE("GPL"); 115 92 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
+2 -2
net/netlink/af_netlink.c
··· 1438 1438 1439 1439 for (undo = 0; undo < group; undo++) 1440 1440 if (test_bit(undo, &groups)) 1441 - nlk->netlink_unbind(sock_net(sk), undo); 1441 + nlk->netlink_unbind(sock_net(sk), undo + 1); 1442 1442 } 1443 1443 1444 1444 static int netlink_bind(struct socket *sock, struct sockaddr *addr, ··· 1476 1476 for (group = 0; group < nlk->ngroups; group++) { 1477 1477 if (!test_bit(group, &groups)) 1478 1478 continue; 1479 - err = nlk->netlink_bind(net, group); 1479 + err = nlk->netlink_bind(net, group + 1); 1480 1480 if (!err) 1481 1481 continue; 1482 1482 netlink_undo_bind(group, groups, sk);
+2 -2
net/rds/sysctl.c
··· 71 71 { 72 72 .procname = "max_unacked_packets", 73 73 .data = &rds_sysctl_max_unacked_packets, 74 - .maxlen = sizeof(unsigned long), 74 + .maxlen = sizeof(int), 75 75 .mode = 0644, 76 76 .proc_handler = proc_dointvec, 77 77 }, 78 78 { 79 79 .procname = "max_unacked_bytes", 80 80 .data = &rds_sysctl_max_unacked_bytes, 81 - .maxlen = sizeof(unsigned long), 81 + .maxlen = sizeof(int), 82 82 .mode = 0644, 83 83 .proc_handler = proc_dointvec, 84 84 },
+4 -3
net/sched/cls_api.c
··· 556 556 } 557 557 EXPORT_SYMBOL(tcf_exts_change); 558 558 559 - #define tcf_exts_first_act(ext) \ 560 - list_first_entry(&(exts)->actions, struct tc_action, list) 559 + #define tcf_exts_first_act(ext) \ 560 + list_first_entry_or_null(&(exts)->actions, \ 561 + struct tc_action, list) 561 562 562 563 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 563 564 { ··· 604 603 { 605 604 #ifdef CONFIG_NET_CLS_ACT 606 605 struct tc_action *a = tcf_exts_first_act(exts); 607 - if (tcf_action_copy_stats(skb, a, 1) < 0) 606 + if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 608 607 return -1; 609 608 #endif 610 609 return 0;
+8 -2
net/sched/sch_fq.c
··· 686 686 if (tb[TCA_FQ_FLOW_PLIMIT]) 687 687 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 688 688 689 - if (tb[TCA_FQ_QUANTUM]) 690 - q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 689 + if (tb[TCA_FQ_QUANTUM]) { 690 + u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 691 + 692 + if (quantum > 0) 693 + q->quantum = quantum; 694 + else 695 + err = -EINVAL; 696 + } 691 697 692 698 if (tb[TCA_FQ_INITIAL_QUANTUM]) 693 699 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+1 -1
net/sctp/sm_make_chunk.c
··· 2608 2608 2609 2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2610 2610 2611 - af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 + af = sctp_get_af_specific(param_type2af(addr_param->p.type)); 2612 2612 if (af == NULL) 2613 2613 break; 2614 2614
-31
sound/core/seq/seq_dummy.c
··· 82 82 static int my_client = -1; 83 83 84 84 /* 85 - * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events 86 - * to subscribers. 87 - * Note: this callback is called only after all subscribers are removed. 88 - */ 89 - static int 90 - dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) 91 - { 92 - struct snd_seq_dummy_port *p; 93 - int i; 94 - struct snd_seq_event ev; 95 - 96 - p = private_data; 97 - memset(&ev, 0, sizeof(ev)); 98 - if (p->duplex) 99 - ev.source.port = p->connect; 100 - else 101 - ev.source.port = p->port; 102 - ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; 103 - ev.type = SNDRV_SEQ_EVENT_CONTROLLER; 104 - for (i = 0; i < 16; i++) { 105 - ev.data.control.channel = i; 106 - ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; 107 - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); 108 - ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; 109 - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); 110 - } 111 - return 0; 112 - } 113 - 114 - /* 115 85 * event input callback - just redirect events to subscribers 116 86 */ 117 87 static int ··· 145 175 | SNDRV_SEQ_PORT_TYPE_PORT; 146 176 memset(&pcb, 0, sizeof(pcb)); 147 177 pcb.owner = THIS_MODULE; 148 - pcb.unuse = dummy_unuse; 149 178 pcb.event_input = dummy_input; 150 179 pcb.private_free = dummy_free; 151 180 pcb.private_data = rec;
+2
sound/soc/adi/axi-i2s.c
··· 240 240 if (ret) 241 241 goto err_clk_disable; 242 242 243 + return 0; 244 + 243 245 err_clk_disable: 244 246 clk_disable_unprepare(i2s->clk); 245 247 return ret;
+1 -1
sound/soc/codecs/pcm512x.c
··· 188 188 static const char * const pcm512x_dsp_program_texts[] = { 189 189 "FIR interpolation with de-emphasis", 190 190 "Low latency IIR with de-emphasis", 191 - "Fixed process flow", 192 191 "High attenuation with de-emphasis", 192 + "Fixed process flow", 193 193 "Ringing-less low latency FIR", 194 194 }; 195 195
+2 -4
sound/soc/codecs/rt286.c
··· 861 861 RT286_I2S_CTRL1, 0x0018, d_len_code << 3); 862 862 dev_dbg(codec->dev, "format val = 0x%x\n", val); 863 863 864 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 865 - snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); 866 - else 867 - snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val); 864 + snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); 865 + snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val); 868 866 869 867 return 0; 870 868 }
+14 -4
sound/soc/codecs/rt5677.c
··· 2083 2083 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 2084 2084 2085 2085 switch (event) { 2086 - case SND_SOC_DAPM_POST_PMU: 2086 + case SND_SOC_DAPM_PRE_PMU: 2087 2087 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2); 2088 + break; 2089 + 2090 + case SND_SOC_DAPM_POST_PMU: 2088 2091 regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0); 2089 2092 break; 2093 + 2090 2094 default: 2091 2095 return 0; 2092 2096 } ··· 2105 2101 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 2106 2102 2107 2103 switch (event) { 2108 - case SND_SOC_DAPM_POST_PMU: 2104 + case SND_SOC_DAPM_PRE_PMU: 2109 2105 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2); 2106 + break; 2107 + 2108 + case SND_SOC_DAPM_POST_PMU: 2110 2109 regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0); 2111 2110 break; 2111 + 2112 2112 default: 2113 2113 return 0; 2114 2114 } ··· 2220 2212 2221 2213 static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { 2222 2214 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, 2223 - 0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU), 2215 + 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | 2216 + SND_SOC_DAPM_POST_PMU), 2224 2217 SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT, 2225 - 0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU), 2218 + 0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU | 2219 + SND_SOC_DAPM_POST_PMU), 2226 2220 2227 2221 /* Input Side */ 2228 2222 /* micbias */
+6
sound/soc/codecs/ts3a227e.c
··· 254 254 struct ts3a227e *ts3a227e; 255 255 struct device *dev = &i2c->dev; 256 256 int ret; 257 + unsigned int acc_reg; 257 258 258 259 ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL); 259 260 if (ts3a227e == NULL) ··· 283 282 regmap_update_bits(ts3a227e->regmap, TS3A227E_REG_INTERRUPT_DISABLE, 284 283 INTB_DISABLE | ADC_COMPLETE_INT_DISABLE, 285 284 ADC_COMPLETE_INT_DISABLE); 285 + 286 + /* Read jack status because chip might not trigger interrupt at boot. */ 287 + regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg); 288 + ts3a227e_new_jack_state(ts3a227e, acc_reg); 289 + ts3a227e_jack_report(ts3a227e); 286 290 287 291 return 0; 288 292 }
+15 -8
sound/soc/codecs/wm8904.c
··· 1076 1076 { "Right Capture PGA", NULL, "Right Capture Mux" }, 1077 1077 { "Right Capture PGA", NULL, "Right Capture Inverting Mux" }, 1078 1078 1079 - { "AIFOUTL", "Left", "ADCL" }, 1080 - { "AIFOUTL", "Right", "ADCR" }, 1081 - { "AIFOUTR", "Left", "ADCL" }, 1082 - { "AIFOUTR", "Right", "ADCR" }, 1079 + { "AIFOUTL Mux", "Left", "ADCL" }, 1080 + { "AIFOUTL Mux", "Right", "ADCR" }, 1081 + { "AIFOUTR Mux", "Left", "ADCL" }, 1082 + { "AIFOUTR Mux", "Right", "ADCR" }, 1083 + 1084 + { "AIFOUTL", NULL, "AIFOUTL Mux" }, 1085 + { "AIFOUTR", NULL, "AIFOUTR Mux" }, 1083 1086 1084 1087 { "ADCL", NULL, "CLK_DSP" }, 1085 1088 { "ADCL", NULL, "Left Capture PGA" }, ··· 1092 1089 }; 1093 1090 1094 1091 static const struct snd_soc_dapm_route dac_intercon[] = { 1095 - { "DACL", "Right", "AIFINR" }, 1096 - { "DACL", "Left", "AIFINL" }, 1092 + { "DACL Mux", "Left", "AIFINL" }, 1093 + { "DACL Mux", "Right", "AIFINR" }, 1094 + 1095 + { "DACR Mux", "Left", "AIFINL" }, 1096 + { "DACR Mux", "Right", "AIFINR" }, 1097 + 1098 + { "DACL", NULL, "DACL Mux" }, 1097 1099 { "DACL", NULL, "CLK_DSP" }, 1098 1100 1099 - { "DACR", "Right", "AIFINR" }, 1100 - { "DACR", "Left", "AIFINL" }, 1101 + { "DACR", NULL, "DACR Mux" }, 1101 1102 { "DACR", NULL, "CLK_DSP" }, 1102 1103 1103 1104 { "Charge pump", NULL, "SYSCLK" },
+1 -1
sound/soc/codecs/wm8960.c
··· 556 556 { 22050, 2 }, 557 557 { 24000, 2 }, 558 558 { 16000, 3 }, 559 - { 11250, 4 }, 559 + { 11025, 4 }, 560 560 { 12000, 4 }, 561 561 { 8000, 5 }, 562 562 };
+1 -1
sound/soc/fsl/fsl_esai.h
··· 302 302 #define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT) 303 303 #define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK) 304 304 #define ESAI_xCCR_xDC_SHIFT 9 305 - #define ESAI_xCCR_xDC_WIDTH 4 305 + #define ESAI_xCCR_xDC_WIDTH 5 306 306 #define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT) 307 307 #define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK) 308 308 #define ESAI_xCCR_xPSR_SHIFT 8
+2 -2
sound/soc/fsl/fsl_ssi.c
··· 1362 1362 } 1363 1363 1364 1364 ssi_private->irq = platform_get_irq(pdev, 0); 1365 - if (!ssi_private->irq) { 1365 + if (ssi_private->irq < 0) { 1366 1366 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); 1367 - return -ENXIO; 1367 + return ssi_private->irq; 1368 1368 } 1369 1369 1370 1370 /* Are the RX and the TX clocks locked? */
+1
sound/soc/fsl/imx-wm8962.c
··· 257 257 if (ret) 258 258 goto clk_fail; 259 259 data->card.num_links = 1; 260 + data->card.owner = THIS_MODULE; 260 261 data->card.dai_link = &data->dai; 261 262 data->card.dapm_widgets = imx_wm8962_dapm_widgets; 262 263 data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
+3 -4
sound/soc/generic/simple-card.c
··· 452 452 } 453 453 454 454 /* Decrease the reference count of the device nodes */ 455 - static int asoc_simple_card_unref(struct platform_device *pdev) 455 + static int asoc_simple_card_unref(struct snd_soc_card *card) 456 456 { 457 - struct snd_soc_card *card = platform_get_drvdata(pdev); 458 457 struct snd_soc_dai_link *dai_link; 459 458 int num_links; 460 459 ··· 555 556 return ret; 556 557 557 558 err: 558 - asoc_simple_card_unref(pdev); 559 + asoc_simple_card_unref(&priv->snd_card); 559 560 return ret; 560 561 } 561 562 ··· 571 572 snd_soc_jack_free_gpios(&simple_card_mic_jack, 1, 572 573 &simple_card_mic_jack_gpio); 573 574 574 - return asoc_simple_card_unref(pdev); 575 + return asoc_simple_card_unref(card); 575 576 } 576 577 577 578 static const struct of_device_id asoc_simple_of_match[] = {
+7 -6
sound/soc/intel/sst-firmware.c
··· 706 706 struct list_head *block_list) 707 707 { 708 708 struct sst_mem_block *block, *tmp; 709 + struct sst_block_allocator ba_tmp = *ba; 709 710 u32 end = ba->offset + ba->size, block_end; 710 711 int err; 711 712 ··· 731 730 if (ba->offset >= block->offset && ba->offset < block_end) { 732 731 733 732 /* align ba to block boundary */ 734 - ba->size -= block_end - ba->offset; 735 - ba->offset = block_end; 736 - err = block_alloc_contiguous(dsp, ba, block_list); 733 + ba_tmp.size -= block_end - ba->offset; 734 + ba_tmp.offset = block_end; 735 + err = block_alloc_contiguous(dsp, &ba_tmp, block_list); 737 736 if (err < 0) 738 737 return -ENOMEM; 739 738 ··· 768 767 list_move(&block->list, &dsp->used_block_list); 769 768 list_add(&block->module_list, block_list); 770 769 /* align ba to block boundary */ 771 - ba->size -= block_end - ba->offset; 772 - ba->offset = block_end; 770 + ba_tmp.size -= block_end - ba->offset; 771 + ba_tmp.offset = block_end; 773 772 774 - err = block_alloc_contiguous(dsp, ba, block_list); 773 + err = block_alloc_contiguous(dsp, &ba_tmp, block_list); 775 774 if (err < 0) 776 775 return -ENOMEM; 777 776
+30
sound/soc/intel/sst-haswell-ipc.c
··· 1228 1228 struct sst_dsp *sst = hsw->dsp; 1229 1229 unsigned long flags; 1230 1230 1231 + if (!stream) { 1232 + dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n"); 1233 + return 0; 1234 + } 1235 + 1231 1236 /* dont free DSP streams that are not commited */ 1232 1237 if (!stream->commited) 1233 1238 goto out; ··· 1420 1415 u32 header; 1421 1416 int ret; 1422 1417 1418 + if (!stream) { 1419 + dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n"); 1420 + return 0; 1421 + } 1422 + 1423 + if (stream->commited) { 1424 + dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n"); 1425 + return 0; 1426 + } 1427 + 1423 1428 trace_ipc_request("stream alloc", stream->host_id); 1424 1429 1425 1430 header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM); ··· 1534 1519 { 1535 1520 int ret; 1536 1521 1522 + if (!stream) { 1523 + dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n"); 1524 + return 0; 1525 + } 1526 + 1537 1527 trace_ipc_request("stream pause", stream->reply.stream_hw_id); 1538 1528 1539 1529 ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE, ··· 1555 1535 { 1556 1536 int ret; 1557 1537 1538 + if (!stream) { 1539 + dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n"); 1540 + return 0; 1541 + } 1542 + 1558 1543 trace_ipc_request("stream resume", stream->reply.stream_hw_id); 1559 1544 1560 1545 ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME, ··· 1574 1549 int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream) 1575 1550 { 1576 1551 int ret, tries = 10; 1552 + 1553 + if (!stream) { 1554 + dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n"); 1555 + return 0; 1556 + } 1577 1557 1578 1558 /* dont reset streams that are not commited */ 1579 1559 if (!stream->commited)
+1 -1
sound/soc/omap/omap-mcbsp.c
··· 434 434 case SND_SOC_DAIFMT_CBM_CFS: 435 435 /* McBSP slave. FS clock as output */ 436 436 regs->srgr2 |= FSGM; 437 - regs->pcr0 |= FSXM; 437 + regs->pcr0 |= FSXM | FSRM; 438 438 break; 439 439 case SND_SOC_DAIFMT_CBM_CFM: 440 440 /* McBSP slave */
+1
sound/soc/rockchip/rockchip_i2s.c
··· 335 335 SNDRV_PCM_FMTBIT_S24_LE), 336 336 }, 337 337 .ops = &rockchip_i2s_dai_ops, 338 + .symmetric_rates = 1, 338 339 }; 339 340 340 341 static const struct snd_soc_component_driver rockchip_i2s_component = {
+6 -3
sound/soc/soc-compress.c
··· 659 659 rtd->dai_link->stream_name); 660 660 661 661 ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, 662 - 1, 0, &be_pcm); 662 + rtd->dai_link->dpcm_playback, 663 + rtd->dai_link->dpcm_capture, &be_pcm); 663 664 if (ret < 0) { 664 665 dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n", 665 666 rtd->dai_link->name); ··· 669 668 670 669 rtd->pcm = be_pcm; 671 670 rtd->fe_compr = 1; 672 - be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; 673 - be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; 671 + if (rtd->dai_link->dpcm_playback) 672 + be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; 673 + else if (rtd->dai_link->dpcm_capture) 674 + be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; 674 675 memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); 675 676 } else 676 677 memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
+4 -1
tools/perf/scripts/perl/Perf-Trace-Util/Context.c
··· 5 5 * ANY CHANGES MADE HERE WILL BE LOST! 6 6 * 7 7 */ 8 - 8 + #include <stdbool.h> 9 + #ifndef HAS_BOOL 10 + # define HAS_BOOL 1 11 + #endif 9 12 #line 1 "Context.xs" 10 13 /* 11 14 * Context.xs. XS interfaces for perf script.
+14 -4
tools/perf/util/annotate.c
··· 177 177 goto out_free_ops; 178 178 179 179 ops->locked.ins = ins__find(name); 180 + free(name); 181 + 180 182 if (ops->locked.ins == NULL) 181 183 goto out_free_ops; 182 184 183 185 if (!ops->locked.ins->ops) 184 186 return 0; 185 187 186 - if (ops->locked.ins->ops->parse) 187 - ops->locked.ins->ops->parse(ops->locked.ops); 188 + if (ops->locked.ins->ops->parse && 189 + ops->locked.ins->ops->parse(ops->locked.ops) < 0) 190 + goto out_free_ops; 188 191 189 192 return 0; 190 193 ··· 211 208 212 209 static void lock__delete(struct ins_operands *ops) 213 210 { 211 + struct ins *ins = ops->locked.ins; 212 + 213 + if (ins && ins->ops->free) 214 + ins->ops->free(ops->locked.ops); 215 + else 216 + ins__delete(ops->locked.ops); 217 + 214 218 zfree(&ops->locked.ops); 215 219 zfree(&ops->target.raw); 216 220 zfree(&ops->target.name); ··· 541 531 if (!dl->ins->ops) 542 532 return; 543 533 544 - if (dl->ins->ops->parse) 545 - dl->ins->ops->parse(&dl->ops); 534 + if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0) 535 + dl->ins = NULL; 546 536 } 547 537 548 538 static int disasm_line__parse(char *line, char **namep, char **rawp)
+1 -1
tools/perf/util/evlist.c
··· 1445 1445 case ENOENT: 1446 1446 scnprintf(buf, size, "%s", 1447 1447 "Error:\tUnable to find debugfs\n" 1448 - "Hint:\tWas your kernel was compiled with debugfs support?\n" 1448 + "Hint:\tWas your kernel compiled with debugfs support?\n" 1449 1449 "Hint:\tIs the debugfs filesystem mounted?\n" 1450 1450 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); 1451 1451 break;
+16
tools/perf/util/map.h
··· 116 116 #define map__for_each_symbol(map, pos, n) \ 117 117 dso__for_each_symbol(map->dso, pos, n, map->type) 118 118 119 + /* map__for_each_symbol_with_name - iterate over the symbols in the given map 120 + * that have the given name 121 + * 122 + * @map: the 'struct map *' in which symbols itereated 123 + * @sym_name: the symbol name 124 + * @pos: the 'struct symbol *' to use as a loop cursor 125 + * @filter: to use when loading the DSO 126 + */ 127 + #define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \ 128 + for (pos = map__find_symbol_by_name(map, sym_name, filter); \ 129 + pos && strcmp(pos->name, sym_name) == 0; \ 130 + pos = symbol__next_by_name(pos)) 131 + 132 + #define map__for_each_symbol_by_name(map, sym_name, pos) \ 133 + __map__for_each_symbol_by_name(map, sym_name, (pos), NULL) 134 + 119 135 typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); 120 136 121 137 void map__init(struct map *map, enum map_type type,
+16 -18
tools/perf/util/probe-event.c
··· 446 446 } 447 447 448 448 for (i = 0; i < ntevs; i++) { 449 - if (tevs[i].point.address) { 449 + if (tevs[i].point.address && !tevs[i].point.retprobe) { 450 450 tmp = strdup(reloc_sym->name); 451 451 if (!tmp) 452 452 return -ENOMEM; ··· 2193 2193 return ret; 2194 2194 } 2195 2195 2196 - static char *looking_function_name; 2197 - static int num_matched_functions; 2198 - 2199 - static int probe_function_filter(struct map *map __maybe_unused, 2200 - struct symbol *sym) 2196 + static int find_probe_functions(struct map *map, char *name) 2201 2197 { 2202 - if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && 2203 - strcmp(looking_function_name, sym->name) == 0) { 2204 - num_matched_functions++; 2205 - return 0; 2198 + int found = 0; 2199 + struct symbol *sym; 2200 + 2201 + map__for_each_symbol_by_name(map, name, sym) { 2202 + if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) 2203 + found++; 2206 2204 } 2207 - return 1; 2205 + 2206 + return found; 2208 2207 } 2209 2208 2210 2209 #define strdup_or_goto(str, label) \ ··· 2221 2222 struct kmap *kmap = NULL; 2222 2223 struct ref_reloc_sym *reloc_sym = NULL; 2223 2224 struct symbol *sym; 2224 - struct rb_node *nd; 2225 2225 struct probe_trace_event *tev; 2226 2226 struct perf_probe_point *pp = &pev->point; 2227 2227 struct probe_trace_point *tp; 2228 + int num_matched_functions; 2228 2229 int ret, i; 2229 2230 2230 2231 /* Init maps of given executable or kernel */ ··· 2241 2242 * Load matched symbols: Since the different local symbols may have 2242 2243 * same name but different addresses, this lists all the symbols. 2243 2244 */ 2244 - num_matched_functions = 0; 2245 - looking_function_name = pp->function; 2246 - ret = map__load(map, probe_function_filter); 2247 - if (ret || num_matched_functions == 0) { 2245 + num_matched_functions = find_probe_functions(map, pp->function); 2246 + if (num_matched_functions == 0) { 2248 2247 pr_err("Failed to find symbol %s in %s\n", pp->function, 2249 2248 target ? : "kernel"); 2250 2249 ret = -ENOENT; ··· 2254 2257 goto out; 2255 2258 } 2256 2259 2257 - if (!pev->uprobes) { 2260 + if (!pev->uprobes && !pp->retprobe) { 2258 2261 kmap = map__kmap(map); 2259 2262 reloc_sym = kmap->ref_reloc_sym; 2260 2263 if (!reloc_sym) { ··· 2272 2275 } 2273 2276 2274 2277 ret = 0; 2275 - map__for_each_symbol(map, sym, nd) { 2278 + 2279 + map__for_each_symbol_by_name(map, pp->function, sym) { 2276 2280 tev = (*tevs) + ret; 2277 2281 tp = &tev->point; 2278 2282 if (ret == num_matched_functions) {
+28 -3
tools/perf/util/symbol.c
··· 396 396 const char *name) 397 397 { 398 398 struct rb_node *n; 399 + struct symbol_name_rb_node *s; 399 400 400 401 if (symbols == NULL) 401 402 return NULL; ··· 404 403 n = symbols->rb_node; 405 404 406 405 while (n) { 407 - struct symbol_name_rb_node *s; 408 406 int cmp; 409 407 410 408 s = rb_entry(n, struct symbol_name_rb_node, rb_node); ··· 414 414 else if (cmp > 0) 415 415 n = n->rb_right; 416 416 else 417 - return &s->sym; 417 + break; 418 418 } 419 419 420 - return NULL; 420 + if (n == NULL) 421 + return NULL; 422 + 423 + /* return first symbol that has same name (if any) */ 424 + for (n = rb_prev(n); n; n = rb_prev(n)) { 425 + struct symbol_name_rb_node *tmp; 426 + 427 + tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); 428 + if (strcmp(tmp->sym.name, s->sym.name)) 429 + break; 430 + 431 + s = tmp; 432 + } 433 + 434 + return &s->sym; 421 435 } 422 436 423 437 struct symbol *dso__find_symbol(struct dso *dso, ··· 450 436 return symbols__next(sym); 451 437 } 452 438 439 + struct symbol *symbol__next_by_name(struct symbol *sym) 440 + { 441 + struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); 442 + struct rb_node *n = rb_next(&s->rb_node); 443 + 444 + return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; 445 + } 446 + 447 + /* 448 + * Teturns first symbol that matched with @name. 449 + */ 453 450 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 454 451 const char *name) 455 452 {
+1
tools/perf/util/symbol.h
··· 231 231 u64 addr); 232 232 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 233 233 const char *name); 234 + struct symbol *symbol__next_by_name(struct symbol *sym); 234 235 235 236 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); 236 237 struct symbol *dso__next_symbol(struct symbol *sym);