Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Couple conflicts resolved here:

1) In the MACB driver, a bug fix to properly initialize the
RX tail pointer properly overlapped with some changes
to support variable sized rings.

2) In XGBE we had a "CONFIG_PM" --> "CONFIG_PM_SLEEP" fix
overlapping with a reorganization of the driver to support
ACPI, OF, as well as PCI variants of the chip.

3) In 'net' we had several probe error path bug fixes to the
stmmac driver, meanwhile a lot of this code was cleaned up
and reorganized in 'net-next'.

4) The cls_flower classifier obtained a helper function in
'net-next' called __fl_delete() and this overlapped with
Daniel Borkamann's bug fix to use RCU for object destruction
in 'net'. It also overlapped with Jiri's change to guard
the rhashtable_remove_fast() call with a check against
tc_skip_sw().

5) In mlx4, a revert bug fix in 'net' overlapped with some
unrelated changes in 'net-next'.

6) In geneve, a stale header pointer after pskb_expand_head()
bug fix in 'net' overlapped with a large reorganization of
the same code in 'net-next'. Since the 'net-next' code no
longer had the bug in question, there was nothing to do
other than to simply take the 'net-next' hunks.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1414 -612
+4 -4
CREDITS
··· 9 9 Linus 10 10 ---------- 11 11 12 - M: Matt Mackal 12 + N: Matt Mackal 13 13 E: mpm@selenic.com 14 14 D: SLOB slab allocator 15 15 ··· 1910 1910 1911 1911 N: Andi Kleen 1912 1912 E: andi@firstfloor.org 1913 - U: http://www.halobates.de 1913 + W: http://www.halobates.de 1914 1914 D: network, x86, NUMA, various hacks 1915 1915 S: Schwalbenstr. 96 1916 1916 S: 85551 Ottobrunn ··· 2089 2089 D: Synopsys Designware PCI host bridge driver 2090 2090 2091 2091 N: Gabor Kuti 2092 - M: seasons@falcon.sch.bme.hu 2093 - M: seasons@makosteszta.sote.hu 2092 + E: seasons@falcon.sch.bme.hu 2093 + E: seasons@makosteszta.sote.hu 2094 2094 D: Original author of software suspend 2095 2095 2096 2096 N: Jaroslav Kysela
+20 -4
Documentation/devicetree/bindings/net/ethernet.txt
··· 9 9 - max-speed: number, specifies maximum speed in Mbit/s supported by the device; 10 10 - max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than 11 11 the maximum frame size (there's contradiction in ePAPR). 12 - - phy-mode: string, operation mode of the PHY interface; supported values are 13 - "mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id", 14 - "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a 15 - de-facto standard property; 12 + - phy-mode: string, operation mode of the PHY interface. This is now a de-facto 13 + standard property; supported values are: 14 + * "mii" 15 + * "gmii" 16 + * "sgmii" 17 + * "qsgmii" 18 + * "tbi" 19 + * "rev-mii" 20 + * "rmii" 21 + * "rgmii" (RX and TX delays are added by the MAC when required) 22 + * "rgmii-id" (RGMII with internal RX and TX delays provided by the PHY, the 23 + MAC should not add the RX or TX delays in this case) 24 + * "rgmii-rxid" (RGMII with internal RX delay provided by the PHY, the MAC 25 + should not add an RX delay in this case) 26 + * "rgmii-txid" (RGMII with internal TX delay provided by the PHY, the MAC 27 + should not add an TX delay in this case) 28 + * "rtbi" 29 + * "smii" 30 + * "xgmii" 31 + * "trgmii" 16 32 - phy-connection-type: the same as "phy-mode" property but described in ePAPR; 17 33 - phy-handle: phandle, specifies a reference to a node representing a PHY 18 34 device; this property is described in ePAPR and so preferred;
+5 -2
Documentation/networking/nf_conntrack-sysctl.txt
··· 62 62 protocols. 63 63 64 64 nf_conntrack_helper - BOOLEAN 65 - 0 - disabled 66 - not 0 - enabled (default) 65 + 0 - disabled (default) 66 + not 0 - enabled 67 67 68 68 Enable automatic conntrack helper assignment. 69 + If disabled it is required to set up iptables rules to assign 70 + helpers to connections. See the CT target description in the 71 + iptables-extensions(8) man page for further information. 69 72 70 73 nf_conntrack_icmp_timeout - INTEGER (seconds) 71 74 default 30
+4 -9
MAINTAINERS
··· 9260 9260 F: drivers/pci/host/*layerscape* 9261 9261 9262 9262 PCI DRIVER FOR IMX6 9263 - M: Richard Zhu <Richard.Zhu@freescale.com> 9263 + M: Richard Zhu <hongxing.zhu@nxp.com> 9264 9264 M: Lucas Stach <l.stach@pengutronix.de> 9265 9265 L: linux-pci@vger.kernel.org 9266 9266 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9267 9267 S: Maintained 9268 + F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt 9268 9269 F: drivers/pci/host/*imx6* 9269 9270 9270 9271 PCI DRIVER FOR TI KEYSTONE ··· 9324 9323 9325 9324 PCI DRIVER FOR SYNOPSIS DESIGNWARE 9326 9325 M: Jingoo Han <jingoohan1@gmail.com> 9327 - M: Pratyush Anand <pratyush.anand@gmail.com> 9328 - L: linux-pci@vger.kernel.org 9329 - S: Maintained 9330 - F: drivers/pci/host/*designware* 9331 - 9332 - PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE 9333 - M: Jose Abreu <Jose.Abreu@synopsys.com> 9326 + M: Joao Pinto <Joao.Pinto@synopsys.com> 9334 9327 L: linux-pci@vger.kernel.org 9335 9328 S: Maintained 9336 9329 F: Documentation/devicetree/bindings/pci/designware-pcie.txt 9337 - F: drivers/pci/host/pcie-designware-plat.c 9330 + F: drivers/pci/host/*designware* 9338 9331 9339 9332 PCI DRIVER FOR GENERIC OF HOSTS 9340 9333 M: Will Deacon <will.deacon@arm.com>
+1 -3
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 9 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Psychotic Stoned Sheep 6 6 7 7 # *DOCUMENTATION* ··· 1019 1019 prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ 1020 1020 include/config/auto.conf 1021 1021 $(cmd_crmodverdir) 1022 - $(Q)test -e include/generated/autoksyms.h || \ 1023 - touch include/generated/autoksyms.h 1024 1022 1025 1023 archprepare: archheaders archscripts prepare1 scripts_basic 1026 1024
+5 -4
arch/arc/include/asm/delay.h
··· 22 22 static inline void __delay(unsigned long loops) 23 23 { 24 24 __asm__ __volatile__( 25 - " lp 1f \n" 26 - " nop \n" 27 - "1: \n" 28 - : "+l"(loops)); 25 + " mov lp_count, %0 \n" 26 + " lp 1f \n" 27 + " nop \n" 28 + "1: \n" 29 + : : "r"(loops)); 29 30 } 30 31 31 32 extern void __bad_udelay(void);
+1 -1
arch/arc/include/asm/pgtable.h
··· 280 280 281 281 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 282 282 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 283 - #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 283 + #define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) 284 284 285 285 /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ 286 286 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+1 -1
arch/arc/mm/cache.c
··· 23 23 24 24 static int l2_line_sz; 25 25 static int ioc_exists; 26 - int slc_enable = 1, ioc_enable = 1; 26 + int slc_enable = 1, ioc_enable = 0; 27 27 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 28 28 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 29 29
+1 -1
arch/arm/boot/dts/Makefile
··· 745 745 sun4i-a10-pcduino2.dtb \ 746 746 sun4i-a10-pov-protab2-ips9.dtb 747 747 dtb-$(CONFIG_MACH_SUN5I) += \ 748 - ntc-gr8-evb.dtb \ 749 748 sun5i-a10s-auxtek-t003.dtb \ 750 749 sun5i-a10s-auxtek-t004.dtb \ 751 750 sun5i-a10s-mk802.dtb \ ··· 760 761 sun5i-a13-olinuxino-micro.dtb \ 761 762 sun5i-a13-q8-tablet.dtb \ 762 763 sun5i-a13-utoo-p66.dtb \ 764 + sun5i-gr8-evb.dtb \ 763 765 sun5i-r8-chip.dtb 764 766 dtb-$(CONFIG_MACH_SUN6I) += \ 765 767 sun6i-a31-app4-evb1.dtb \
+1 -1
arch/arm/boot/dts/ntc-gr8-evb.dts arch/arm/boot/dts/sun5i-gr8-evb.dts
··· 44 44 */ 45 45 46 46 /dts-v1/; 47 - #include "ntc-gr8.dtsi" 47 + #include "sun5i-gr8.dtsi" 48 48 #include "sunxi-common-regulators.dtsi" 49 49 50 50 #include <dt-bindings/gpio/gpio.h>
arch/arm/boot/dts/ntc-gr8.dtsi arch/arm/boot/dts/sun5i-gr8.dtsi
+16
arch/arm/boot/dts/stih407-family.dtsi
··· 283 283 clock-frequency = <400000>; 284 284 pinctrl-names = "default"; 285 285 pinctrl-0 = <&pinctrl_i2c0_default>; 286 + #address-cells = <1>; 287 + #size-cells = <0>; 286 288 287 289 status = "disabled"; 288 290 }; ··· 298 296 clock-frequency = <400000>; 299 297 pinctrl-names = "default"; 300 298 pinctrl-0 = <&pinctrl_i2c1_default>; 299 + #address-cells = <1>; 300 + #size-cells = <0>; 301 301 302 302 status = "disabled"; 303 303 }; ··· 313 309 clock-frequency = <400000>; 314 310 pinctrl-names = "default"; 315 311 pinctrl-0 = <&pinctrl_i2c2_default>; 312 + #address-cells = <1>; 313 + #size-cells = <0>; 316 314 317 315 status = "disabled"; 318 316 }; ··· 328 322 clock-frequency = <400000>; 329 323 pinctrl-names = "default"; 330 324 pinctrl-0 = <&pinctrl_i2c3_default>; 325 + #address-cells = <1>; 326 + #size-cells = <0>; 331 327 332 328 status = "disabled"; 333 329 }; ··· 343 335 clock-frequency = <400000>; 344 336 pinctrl-names = "default"; 345 337 pinctrl-0 = <&pinctrl_i2c4_default>; 338 + #address-cells = <1>; 339 + #size-cells = <0>; 346 340 347 341 status = "disabled"; 348 342 }; ··· 358 348 clock-frequency = <400000>; 359 349 pinctrl-names = "default"; 360 350 pinctrl-0 = <&pinctrl_i2c5_default>; 351 + #address-cells = <1>; 352 + #size-cells = <0>; 361 353 362 354 status = "disabled"; 363 355 }; ··· 375 363 clock-frequency = <400000>; 376 364 pinctrl-names = "default"; 377 365 pinctrl-0 = <&pinctrl_i2c10_default>; 366 + #address-cells = <1>; 367 + #size-cells = <0>; 378 368 379 369 status = "disabled"; 380 370 }; ··· 390 376 clock-frequency = <400000>; 391 377 pinctrl-names = "default"; 392 378 pinctrl-0 = <&pinctrl_i2c11_default>; 379 + #address-cells = <1>; 380 + #size-cells = <0>; 393 381 394 382 status = "disabled"; 395 383 };
+1 -1
arch/arm64/boot/dts/arm/juno-base.dtsi
··· 393 393 #address-cells = <3>; 394 394 #size-cells = <2>; 395 395 dma-coherent; 396 - ranges = <0x01000000 0x00 0x5f800000 0x00 0x5f800000 0x0 0x00800000>, 396 + ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>, 397 397 <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>, 398 398 <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; 399 399 #interrupt-cells = <1>;
+1 -1
arch/arm64/boot/dts/arm/juno-r1.dts
··· 76 76 compatible = "arm,idle-state"; 77 77 arm,psci-suspend-param = <0x1010000>; 78 78 local-timer-stop; 79 - entry-latency-us = <300>; 79 + entry-latency-us = <400>; 80 80 exit-latency-us = <1200>; 81 81 min-residency-us = <2500>; 82 82 };
+1 -1
arch/arm64/boot/dts/arm/juno-r2.dts
··· 76 76 compatible = "arm,idle-state"; 77 77 arm,psci-suspend-param = <0x1010000>; 78 78 local-timer-stop; 79 - entry-latency-us = <300>; 79 + entry-latency-us = <400>; 80 80 exit-latency-us = <1200>; 81 81 min-residency-us = <2500>; 82 82 };
+1 -1
arch/arm64/boot/dts/arm/juno.dts
··· 76 76 compatible = "arm,idle-state"; 77 77 arm,psci-suspend-param = <0x1010000>; 78 78 local-timer-stop; 79 - entry-latency-us = <300>; 79 + entry-latency-us = <400>; 80 80 exit-latency-us = <1200>; 81 81 min-residency-us = <2500>; 82 82 };
+6
arch/mips/include/asm/mipsregs.h
··· 215 215 #endif 216 216 217 217 /* 218 + * Wired register bits 219 + */ 220 + #define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16) 221 + #define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0) 222 + 223 + /* 218 224 * Values used for computation of new tlb entries 219 225 */ 220 226 #define PL_4K 12
+13
arch/mips/include/asm/tlb.h
··· 1 1 #ifndef __ASM_TLB_H 2 2 #define __ASM_TLB_H 3 3 4 + #include <asm/cpu-features.h> 5 + #include <asm/mipsregs.h> 6 + 4 7 /* 5 8 * MIPS doesn't need any special per-pte or per-vma handling, except 6 9 * we need to flush cache for area to be unmapped. ··· 24 21 #define UNIQUE_ENTRYHI(idx) \ 25 22 ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \ 26 23 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) 24 + 25 + static inline unsigned int num_wired_entries(void) 26 + { 27 + unsigned int wired = read_c0_wired(); 28 + 29 + if (cpu_has_mips_r6) 30 + wired &= MIPSR6_WIRED_WIRED; 31 + 32 + return wired; 33 + } 27 34 28 35 #include <asm-generic/tlb.h> 29 36
+5 -4
arch/mips/mm/fault.c
··· 209 209 if (show_unhandled_signals && 210 210 unhandled_signal(tsk, SIGSEGV) && 211 211 __ratelimit(&ratelimit_state)) { 212 - pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", 212 + pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n", 213 213 tsk->comm, 214 214 write ? "write access to" : "read access from", 215 215 field, address); 216 216 pr_info("epc = %0*lx in", field, 217 217 (unsigned long) regs->cp0_epc); 218 - print_vma_addr(" ", regs->cp0_epc); 218 + print_vma_addr(KERN_CONT " ", regs->cp0_epc); 219 + pr_cont("\n"); 219 220 pr_info("ra = %0*lx in", field, 220 221 (unsigned long) regs->regs[31]); 221 - print_vma_addr(" ", regs->regs[31]); 222 - pr_info("\n"); 222 + print_vma_addr(KERN_CONT " ", regs->regs[31]); 223 + pr_cont("\n"); 223 224 } 224 225 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 225 226 info.si_signo = SIGSEGV;
+2 -2
arch/mips/mm/init.c
··· 118 118 writex_c0_entrylo1(entrylo); 119 119 } 120 120 #endif 121 - tlbidx = read_c0_wired(); 121 + tlbidx = num_wired_entries(); 122 122 write_c0_wired(tlbidx + 1); 123 123 write_c0_index(tlbidx); 124 124 mtc0_tlbw_hazard(); ··· 147 147 148 148 local_irq_save(flags); 149 149 old_ctx = read_c0_entryhi(); 150 - wired = read_c0_wired() - 1; 150 + wired = num_wired_entries() - 1; 151 151 write_c0_wired(wired); 152 152 write_c0_index(wired); 153 153 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+3 -3
arch/mips/mm/tlb-r4k.c
··· 65 65 write_c0_entrylo0(0); 66 66 write_c0_entrylo1(0); 67 67 68 - entry = read_c0_wired(); 68 + entry = num_wired_entries(); 69 69 70 70 /* 71 71 * Blast 'em all away. ··· 385 385 old_ctx = read_c0_entryhi(); 386 386 htw_stop(); 387 387 old_pagemask = read_c0_pagemask(); 388 - wired = read_c0_wired(); 388 + wired = num_wired_entries(); 389 389 write_c0_wired(wired + 1); 390 390 write_c0_index(wired); 391 391 tlbw_use_hazard(); /* What is the hazard here? */ ··· 449 449 htw_stop(); 450 450 old_ctx = read_c0_entryhi(); 451 451 old_pagemask = read_c0_pagemask(); 452 - wired = read_c0_wired(); 452 + wired = num_wired_entries(); 453 453 if (--temp_tlb_entry < wired) { 454 454 printk(KERN_WARNING 455 455 "No TLB space left for add_temporary_entry\n");
-7
drivers/ata/ahci.c
··· 1436 1436 "ahci: MRSM is on, fallback to single MSI\n"); 1437 1437 pci_free_irq_vectors(pdev); 1438 1438 } 1439 - 1440 - /* 1441 - * -ENOSPC indicated we don't have enough vectors. Don't bother 1442 - * trying a single vectors for any other error: 1443 - */ 1444 - if (nvec < 0 && nvec != -ENOSPC) 1445 - return nvec; 1446 1439 } 1447 1440 1448 1441 /*
+1 -1
drivers/ata/libata-scsi.c
··· 1088 1088 desc[1] = tf->command; /* status */ 1089 1089 desc[2] = tf->device; 1090 1090 desc[3] = tf->nsect; 1091 - desc[0] = 0; 1091 + desc[7] = 0; 1092 1092 if (tf->flags & ATA_TFLAG_LBA48) { 1093 1093 desc[8] |= 0x80; 1094 1094 if (tf->hob_nsect)
+2 -1
drivers/block/zram/zram_drv.c
··· 1403 1403 zram = idr_find(&zram_index_idr, dev_id); 1404 1404 if (zram) { 1405 1405 ret = zram_remove(zram); 1406 - idr_remove(&zram_index_idr, dev_id); 1406 + if (!ret) 1407 + idr_remove(&zram_index_idr, dev_id); 1407 1408 } else { 1408 1409 ret = -ENODEV; 1409 1410 }
+1 -1
drivers/clk/bcm/Kconfig
··· 20 20 21 21 config COMMON_CLK_IPROC 22 22 bool "Broadcom iProc clock support" 23 - depends on ARCH_BCM_IPROC || COMPILE_TEST 23 + depends on ARCH_BCM_IPROC || ARCH_BCM_63XX || COMPILE_TEST 24 24 depends on COMMON_CLK 25 25 default ARCH_BCM_IPROC 26 26 help
+1 -1
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
··· 143 143 4, 2, /* K */ 144 144 0, 4, /* M */ 145 145 21, 0, /* mux */ 146 - BIT(31), /* gate */ 146 + BIT(31) | BIT(23) | BIT(22), /* gate */ 147 147 BIT(28), /* lock */ 148 148 CLK_SET_RATE_UNGATE); 149 149
+1 -1
drivers/clk/sunxi-ng/ccu-sun8i-a33.c
··· 131 131 8, 4, /* N */ 132 132 4, 2, /* K */ 133 133 0, 4, /* M */ 134 - BIT(31), /* gate */ 134 + BIT(31) | BIT(23) | BIT(22), /* gate */ 135 135 BIT(28), /* lock */ 136 136 CLK_SET_RATE_UNGATE); 137 137
+1 -3
drivers/i2c/busses/i2c-octeon-core.c
··· 381 381 if (result) 382 382 return result; 383 383 384 - data[i] = octeon_i2c_data_read(i2c, &result); 385 - if (result) 386 - return result; 384 + data[i] = octeon_i2c_data_read(i2c); 387 385 if (recv_len && i == 0) { 388 386 if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) 389 387 return -EPROTO;
+11 -16
drivers/i2c/busses/i2c-octeon-core.h
··· 5 5 #include <linux/i2c.h> 6 6 #include <linux/i2c-smbus.h> 7 7 #include <linux/io.h> 8 - #include <linux/iopoll.h> 9 8 #include <linux/kernel.h> 10 9 #include <linux/pci.h> 11 10 ··· 144 145 u64 tmp; 145 146 146 147 __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c)); 147 - 148 - readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V, 149 - I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout); 148 + do { 149 + tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); 150 + } while ((tmp & SW_TWSI_V) != 0); 150 151 } 151 152 152 153 #define octeon_i2c_ctl_write(i2c, val) \ ··· 163 164 * 164 165 * The I2C core registers are accessed indirectly via the SW_TWSI CSR. 165 166 */ 166 - static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg, 167 - int *error) 167 + static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg) 168 168 { 169 169 u64 tmp; 170 - int ret; 171 170 172 171 __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c)); 172 + do { 173 + tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); 174 + } while ((tmp & SW_TWSI_V) != 0); 173 175 174 - ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, 175 - tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT, 176 - i2c->adap.timeout); 177 - if (error) 178 - *error = ret; 179 176 return tmp & 0xFF; 180 177 } 181 178 182 179 #define octeon_i2c_ctl_read(i2c) \ 183 - octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL) 184 - #define octeon_i2c_data_read(i2c, error) \ 185 - octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error) 180 + octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL) 181 + #define octeon_i2c_data_read(i2c) \ 182 + octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA) 186 183 #define octeon_i2c_stat_read(i2c) \ 187 - octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL) 184 + octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT) 188 185 189 186 /** 190 187 * octeon_i2c_read_int - read the TWSI_INT register
-4
drivers/input/mouse/psmouse-base.c
··· 1115 1115 if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2, 1116 1116 &max_proto, set_properties, true)) 1117 1117 return PSMOUSE_TOUCHKIT_PS2; 1118 - 1119 - if (psmouse_try_protocol(psmouse, PSMOUSE_BYD, 1120 - &max_proto, set_properties, true)) 1121 - return PSMOUSE_BYD; 1122 1118 } 1123 1119 1124 1120 /*
+3 -1
drivers/iommu/dmar.c
··· 338 338 struct pci_dev *pdev = to_pci_dev(data); 339 339 struct dmar_pci_notify_info *info; 340 340 341 - /* Only care about add/remove events for physical functions */ 341 + /* Only care about add/remove events for physical functions. 342 + * For VFs we actually do the lookup based on the corresponding 343 + * PF in device_to_iommu() anyway. */ 342 344 if (pdev->is_virtfn) 343 345 return NOTIFY_DONE; 344 346 if (action != BUS_NOTIFY_ADD_DEVICE &&
+13
drivers/iommu/intel-iommu.c
··· 892 892 return NULL; 893 893 894 894 if (dev_is_pci(dev)) { 895 + struct pci_dev *pf_pdev; 896 + 895 897 pdev = to_pci_dev(dev); 898 + /* VFs aren't listed in scope tables; we need to look up 899 + * the PF instead to find the IOMMU. */ 900 + pf_pdev = pci_physfn(pdev); 901 + dev = &pf_pdev->dev; 896 902 segment = pci_domain_nr(pdev->bus); 897 903 } else if (has_acpi_companion(dev)) 898 904 dev = &ACPI_COMPANION(dev)->dev; ··· 911 905 for_each_active_dev_scope(drhd->devices, 912 906 drhd->devices_cnt, i, tmp) { 913 907 if (tmp == dev) { 908 + /* For a VF use its original BDF# not that of the PF 909 + * which we used for the IOMMU lookup. Strictly speaking 910 + * we could do this for all PCI devices; we only need to 911 + * get the BDF# from the scope table for ACPI matches. */ 912 + if (pdev->is_virtfn) 913 + goto got_pdev; 914 + 914 915 *bus = drhd->devices[i].bus; 915 916 *devfn = drhd->devices[i].devfn; 916 917 goto out;
+16 -10
drivers/iommu/intel-svm.c
··· 39 39 struct page *pages; 40 40 int order; 41 41 42 - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; 43 - if (order < 0) 44 - order = 0; 42 + /* Start at 2 because it's defined as 2^(1+PSS) */ 43 + iommu->pasid_max = 2 << ecap_pss(iommu->ecap); 45 44 45 + /* Eventually I'm promised we will get a multi-level PASID table 46 + * and it won't have to be physically contiguous. Until then, 47 + * limit the size because 8MiB contiguous allocations can be hard 48 + * to come by. The limit of 0x20000, which is 1MiB for each of 49 + * the PASID and PASID-state tables, is somewhat arbitrary. */ 50 + if (iommu->pasid_max > 0x20000) 51 + iommu->pasid_max = 0x20000; 52 + 53 + order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); 46 54 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 47 55 if (!pages) { 48 56 pr_warn("IOMMU: %s: Failed to allocate PASID table\n", ··· 61 53 pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); 62 54 63 55 if (ecap_dis(iommu->ecap)) { 56 + /* Just making it explicit... */ 57 + BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry)); 64 58 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 65 59 if (pages) 66 60 iommu->pasid_state_table = page_address(pages); ··· 78 68 79 69 int intel_svm_free_pasid_tables(struct intel_iommu *iommu) 80 70 { 81 - int order; 82 - 83 - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; 84 - if (order < 0) 85 - order = 0; 71 + int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); 86 72 87 73 if (iommu->pasid_table) { 88 74 free_pages((unsigned long)iommu->pasid_table, order); ··· 377 371 } 378 372 svm->iommu = iommu; 379 373 380 - if (pasid_max > 2 << ecap_pss(iommu->ecap)) 381 - pasid_max = 2 << ecap_pss(iommu->ecap); 374 + if (pasid_max > iommu->pasid_max) 375 + pasid_max = iommu->pasid_max; 382 376 383 377 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ 384 378 ret = idr_alloc(&iommu->pasid_idr, svm,
+29 -8
drivers/net/can/usb/peak_usb/pcan_ucan.h
··· 43 43 u16 args[3]; 44 44 }; 45 45 46 + #define PUCAN_TSLOW_BRP_BITS 10 47 + #define PUCAN_TSLOW_TSGEG1_BITS 8 48 + #define PUCAN_TSLOW_TSGEG2_BITS 7 49 + #define PUCAN_TSLOW_SJW_BITS 7 50 + 51 + #define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1) 52 + #define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1) 53 + #define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1) 54 + #define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1) 55 + 46 56 /* uCAN TIMING_SLOW command fields */ 47 - #define PUCAN_TSLOW_SJW_T(s, t) (((s) & 0xf) | ((!!(t)) << 7)) 48 - #define PUCAN_TSLOW_TSEG2(t) ((t) & 0xf) 49 - #define PUCAN_TSLOW_TSEG1(t) ((t) & 0x3f) 50 - #define PUCAN_TSLOW_BRP(b) ((b) & 0x3ff) 57 + #define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \ 58 + ((!!(t)) << 7)) 59 + #define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK) 60 + #define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK) 61 + #define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK) 51 62 52 63 struct __packed pucan_timing_slow { 53 64 __le16 opcode_channel; ··· 71 60 __le16 brp; /* BaudRate Prescaler */ 72 61 }; 73 62 63 + #define PUCAN_TFAST_BRP_BITS 10 64 + #define PUCAN_TFAST_TSGEG1_BITS 5 65 + #define PUCAN_TFAST_TSGEG2_BITS 4 66 + #define PUCAN_TFAST_SJW_BITS 4 67 + 68 + #define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1) 69 + #define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1) 70 + #define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1) 71 + #define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1) 72 + 74 73 /* uCAN TIMING_FAST command fields */ 75 - #define PUCAN_TFAST_SJW(s) ((s) & 0x3) 76 - #define PUCAN_TFAST_TSEG2(t) ((t) & 0x7) 77 - #define PUCAN_TFAST_TSEG1(t) ((t) & 0xf) 78 - #define PUCAN_TFAST_BRP(b) ((b) & 0x3ff) 74 + #define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK) 75 + #define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK) 76 + #define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK) 77 + #define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK) 79 78 80 79 struct __packed pucan_timing_fast { 81 80 __le16 opcode_channel;
+2
drivers/net/can/usb/peak_usb/pcan_usb_core.c
··· 39 39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, 40 40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, 41 41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, 42 + {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, 42 43 {} /* Terminating entry */ 43 44 }; 44 45 ··· 51 50 &pcan_usb_pro, 52 51 &pcan_usb_fd, 53 52 &pcan_usb_pro_fd, 53 + &pcan_usb_x6, 54 54 }; 55 55 56 56 /*
+2
drivers/net/can/usb/peak_usb/pcan_usb_core.h
··· 27 27 #define PCAN_USBPRO_PRODUCT_ID 0x000d 28 28 #define PCAN_USBPROFD_PRODUCT_ID 0x0011 29 29 #define PCAN_USBFD_PRODUCT_ID 0x0012 30 + #define PCAN_USBX6_PRODUCT_ID 0x0014 30 31 31 32 #define PCAN_USB_DRIVER_NAME "peak_usb" 32 33 ··· 91 90 extern const struct peak_usb_adapter pcan_usb_pro; 92 91 extern const struct peak_usb_adapter pcan_usb_fd; 93 92 extern const struct peak_usb_adapter pcan_usb_pro_fd; 93 + extern const struct peak_usb_adapter pcan_usb_x6; 94 94 95 95 struct peak_time_ref { 96 96 struct timeval tv_host_0, tv_host;
+88 -16
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
··· 993 993 static const struct can_bittiming_const pcan_usb_fd_const = { 994 994 .name = "pcan_usb_fd", 995 995 .tseg1_min = 1, 996 - .tseg1_max = 64, 996 + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), 997 997 .tseg2_min = 1, 998 - .tseg2_max = 16, 999 - .sjw_max = 16, 998 + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), 999 + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), 1000 1000 .brp_min = 1, 1001 - .brp_max = 1024, 1001 + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), 1002 1002 .brp_inc = 1, 1003 1003 }; 1004 1004 1005 1005 static const struct can_bittiming_const pcan_usb_fd_data_const = { 1006 1006 .name = "pcan_usb_fd", 1007 1007 .tseg1_min = 1, 1008 - .tseg1_max = 16, 1008 + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), 1009 1009 .tseg2_min = 1, 1010 - .tseg2_max = 8, 1011 - .sjw_max = 4, 1010 + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), 1011 + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), 1012 1012 .brp_min = 1, 1013 - .brp_max = 1024, 1013 + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), 1014 1014 .brp_inc = 1, 1015 1015 }; 1016 1016 ··· 1065 1065 static const struct can_bittiming_const pcan_usb_pro_fd_const = { 1066 1066 .name = "pcan_usb_pro_fd", 1067 1067 .tseg1_min = 1, 1068 - .tseg1_max = 64, 1068 + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), 1069 1069 .tseg2_min = 1, 1070 - .tseg2_max = 16, 1071 - .sjw_max = 16, 1070 + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), 1071 + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), 1072 1072 .brp_min = 1, 1073 - .brp_max = 1024, 1073 + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), 1074 1074 .brp_inc = 1, 1075 1075 }; 1076 1076 1077 1077 static const struct can_bittiming_const pcan_usb_pro_fd_data_const = { 1078 1078 .name = "pcan_usb_pro_fd", 1079 1079 .tseg1_min = 1, 1080 - .tseg1_max = 16, 1080 + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), 1081 1081 .tseg2_min = 1, 1082 - .tseg2_max = 8, 1083 - .sjw_max = 4, 1082 + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), 1083 + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), 1084 1084 .brp_min = 1, 1085 - .brp_max = 1024, 1085 + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), 1086 1086 .brp_inc = 1, 1087 1087 }; 1088 1088 ··· 1097 1097 }, 1098 1098 .bittiming_const = &pcan_usb_pro_fd_const, 1099 1099 .data_bittiming_const = &pcan_usb_pro_fd_data_const, 1100 + 1101 + /* size of device private data */ 1102 + .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), 1103 + 1104 + /* timestamps usage */ 1105 + .ts_used_bits = 32, 1106 + .ts_period = 1000000, /* calibration period in ts. */ 1107 + .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ 1108 + .us_per_ts_shift = 0, 1109 + 1110 + /* give here messages in/out endpoints */ 1111 + .ep_msg_in = PCAN_USBPRO_EP_MSGIN, 1112 + .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, 1113 + 1114 + /* size of rx/tx usb buffers */ 1115 + .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, 1116 + .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, 1117 + 1118 + /* device callbacks */ 1119 + .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ 1120 + .dev_init = pcan_usb_fd_init, 1121 + 1122 + .dev_exit = pcan_usb_fd_exit, 1123 + .dev_free = pcan_usb_fd_free, 1124 + .dev_set_bus = pcan_usb_fd_set_bus, 1125 + .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, 1126 + .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, 1127 + .dev_decode_buf = pcan_usb_fd_decode_buf, 1128 + .dev_start = pcan_usb_fd_start, 1129 + .dev_stop = pcan_usb_fd_stop, 1130 + .dev_restart_async = pcan_usb_fd_restart_async, 1131 + .dev_encode_msg = pcan_usb_fd_encode_msg, 1132 + 1133 + .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1134 + }; 1135 + 1136 + /* describes the PCAN-USB X6 adapter */ 1137 + static const struct can_bittiming_const pcan_usb_x6_const = { 1138 + .name = "pcan_usb_x6", 1139 + .tseg1_min = 1, 1140 + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), 1141 + .tseg2_min = 1, 1142 + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), 1143 + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), 1144 + .brp_min = 1, 1145 + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), 1146 + .brp_inc = 1, 1147 + }; 1148 + 1149 + static const struct can_bittiming_const pcan_usb_x6_data_const = { 1150 + .name = "pcan_usb_x6", 1151 + .tseg1_min = 1, 1152 + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), 1153 + .tseg2_min = 1, 1154 + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), 1155 + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), 1156 + .brp_min = 1, 1157 + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), 1158 + .brp_inc = 1, 1159 + }; 1160 + 1161 + const struct peak_usb_adapter pcan_usb_x6 = { 1162 + .name = "PCAN-USB X6", 1163 + .device_id = PCAN_USBX6_PRODUCT_ID, 1164 + .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, 1165 + .ctrlmode_supported = CAN_CTRLMODE_FD | 1166 + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, 1167 + .clock = { 1168 + .freq = PCAN_UFD_CRYSTAL_HZ, 1169 + }, 1170 + .bittiming_const = &pcan_usb_x6_const, 1171 + .data_bittiming_const = &pcan_usb_x6_data_const, 1100 1172 1101 1173 /* size of device private data */ 1102 1174 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+8 -13
drivers/net/ethernet/altera/altera_tse_main.c
··· 422 422 423 423 skb_put(skb, pktlength); 424 424 425 - /* make cache consistent with receive packet buffer */ 426 - dma_sync_single_for_cpu(priv->device, 427 - priv->rx_ring[entry].dma_addr, 428 - priv->rx_ring[entry].len, 429 - DMA_FROM_DEVICE); 430 - 431 425 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, 432 426 priv->rx_ring[entry].len, DMA_FROM_DEVICE); 433 427 ··· 485 491 486 492 if (unlikely(netif_queue_stopped(priv->dev) && 487 493 tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { 488 - netif_tx_lock(priv->dev); 489 494 if (netif_queue_stopped(priv->dev) && 490 495 tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { 491 496 if (netif_msg_tx_done(priv)) ··· 492 499 __func__); 493 500 netif_wake_queue(priv->dev); 494 501 } 495 - netif_tx_unlock(priv->dev); 496 502 } 497 503 498 504 spin_unlock(&priv->tx_lock); ··· 605 613 buffer->skb = skb; 606 614 buffer->dma_addr = dma_addr; 607 615 buffer->len = nopaged_len; 608 - 609 - /* Push data out of the cache hierarchy into main memory */ 610 - dma_sync_single_for_device(priv->device, buffer->dma_addr, 611 - buffer->len, DMA_TO_DEVICE); 612 616 613 617 priv->dmaops->tx_buffer(priv, buffer); 614 618 ··· 829 841 830 842 if (!phydev) { 831 843 netdev_err(dev, "Could not find the PHY\n"); 844 + if (fixed_link) 845 + of_phy_deregister_fixed_link(priv->device->of_node); 832 846 return -ENODEV; 833 847 } 834 848 ··· 1620 1630 static int altera_tse_remove(struct platform_device *pdev) 1621 1631 { 1622 1632 struct net_device *ndev = platform_get_drvdata(pdev); 1633 + struct altera_tse_private *priv = netdev_priv(ndev); 1623 1634 1624 - if (ndev->phydev) 1635 + if (ndev->phydev) { 1625 1636 phy_disconnect(ndev->phydev); 1637 + 1638 + if (of_phy_is_fixed_link(priv->device->of_node)) 1639 + of_phy_deregister_fixed_link(priv->device->of_node); 1640 + } 1626 1641 1627 1642 platform_set_drvdata(pdev, NULL); 1628 1643 altera_tse_mdio_destroy(ndev);
+2 -2
drivers/net/ethernet/amd/xgbe/xgbe-platform.c
··· 538 538 return 0; 539 539 } 540 540 541 - #ifdef CONFIG_PM 541 + #ifdef CONFIG_PM_SLEEP 542 542 static int xgbe_platform_suspend(struct device *dev) 543 543 { 544 544 struct xgbe_prv_data *pdata = dev_get_drvdata(dev); ··· 583 583 584 584 return ret; 585 585 } 586 - #endif /* CONFIG_PM */ 586 + #endif /* CONFIG_PM_SLEEP */ 587 587 588 588 static const struct xgbe_version_data xgbe_v1 = { 589 589 .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
+7 -2
drivers/net/ethernet/aurora/nb8800.c
··· 1457 1457 1458 1458 ret = nb8800_hw_init(dev); 1459 1459 if (ret) 1460 - goto err_free_bus; 1460 + goto err_deregister_fixed_link; 1461 1461 1462 1462 if (ops && ops->init) { 1463 1463 ret = ops->init(dev); 1464 1464 if (ret) 1465 - goto err_free_bus; 1465 + goto err_deregister_fixed_link; 1466 1466 } 1467 1467 1468 1468 dev->netdev_ops = &nb8800_netdev_ops; ··· 1495 1495 1496 1496 err_free_dma: 1497 1497 nb8800_dma_free(dev); 1498 + err_deregister_fixed_link: 1499 + if (of_phy_is_fixed_link(pdev->dev.of_node)) 1500 + of_phy_deregister_fixed_link(pdev->dev.of_node); 1498 1501 err_free_bus: 1499 1502 of_node_put(priv->phy_node); 1500 1503 mdiobus_unregister(bus); ··· 1515 1512 struct nb8800_priv *priv = netdev_priv(ndev); 1516 1513 1517 1514 unregister_netdev(ndev); 1515 + if (of_phy_is_fixed_link(pdev->dev.of_node)) 1516 + of_phy_deregister_fixed_link(pdev->dev.of_node); 1518 1517 of_node_put(priv->phy_node); 1519 1518 1520 1519 mdiobus_unregister(priv->mii_bus);
+12 -5
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1755 1755 if (priv->irq0 <= 0 || priv->irq1 <= 0) { 1756 1756 dev_err(&pdev->dev, "invalid interrupts\n"); 1757 1757 ret = -EINVAL; 1758 - goto err; 1758 + goto err_free_netdev; 1759 1759 } 1760 1760 1761 1761 priv->base = devm_ioremap_resource(&pdev->dev, r); 1762 1762 if (IS_ERR(priv->base)) { 1763 1763 ret = PTR_ERR(priv->base); 1764 - goto err; 1764 + goto err_free_netdev; 1765 1765 } 1766 1766 1767 1767 priv->netdev = dev; ··· 1779 1779 ret = of_phy_register_fixed_link(dn); 1780 1780 if (ret) { 1781 1781 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 1782 - goto err; 1782 + goto err_free_netdev; 1783 1783 } 1784 1784 1785 1785 priv->phy_dn = dn; ··· 1821 1821 ret = register_netdev(dev); 1822 1822 if (ret) { 1823 1823 dev_err(&pdev->dev, "failed to register net_device\n"); 1824 - goto err; 1824 + goto err_deregister_fixed_link; 1825 1825 } 1826 1826 1827 1827 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; ··· 1832 1832 priv->base, priv->irq0, priv->irq1, txq, rxq); 1833 1833 1834 1834 return 0; 1835 - err: 1835 + 1836 + err_deregister_fixed_link: 1837 + if (of_phy_is_fixed_link(dn)) 1838 + of_phy_deregister_fixed_link(dn); 1839 + err_free_netdev: 1836 1840 free_netdev(dev); 1837 1841 return ret; 1838 1842 } ··· 1844 1840 static int bcm_sysport_remove(struct platform_device *pdev) 1845 1841 { 1846 1842 struct net_device *dev = dev_get_drvdata(&pdev->dev); 1843 + struct device_node *dn = pdev->dev.of_node; 1847 1844 1848 1845 /* Not much to do, ndo_close has been called 1849 1846 * and we use managed allocations 1850 1847 */ 1851 1848 unregister_netdev(dev); 1849 + if (of_phy_is_fixed_link(dn)) 1850 + of_phy_deregister_fixed_link(dn); 1852 1851 free_netdev(dev); 1853 1852 dev_set_drvdata(&pdev->dev, NULL); 1854 1853
+5 -3
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1165 1165 struct bcmgenet_tx_ring *ring) 1166 1166 { 1167 1167 struct bcmgenet_priv *priv = netdev_priv(dev); 1168 + struct device *kdev = &priv->pdev->dev; 1168 1169 struct enet_cb *tx_cb_ptr; 1169 1170 struct netdev_queue *txq; 1170 1171 unsigned int pkts_compl = 0; ··· 1193 1192 if (tx_cb_ptr->skb) { 1194 1193 pkts_compl++; 1195 1194 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; 1196 - dma_unmap_single(&dev->dev, 1195 + dma_unmap_single(kdev, 1197 1196 dma_unmap_addr(tx_cb_ptr, dma_addr), 1198 1197 dma_unmap_len(tx_cb_ptr, dma_len), 1199 1198 DMA_TO_DEVICE); 1200 1199 bcmgenet_free_cb(tx_cb_ptr); 1201 1200 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { 1202 - dma_unmap_page(&dev->dev, 1201 + dma_unmap_page(kdev, 1203 1202 dma_unmap_addr(tx_cb_ptr, dma_addr), 1204 1203 dma_unmap_len(tx_cb_ptr, dma_len), 1205 1204 DMA_TO_DEVICE); ··· 1769 1768 1770 1769 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) 1771 1770 { 1771 + struct device *kdev = &priv->pdev->dev; 1772 1772 struct enet_cb *cb; 1773 1773 int i; 1774 1774 ··· 1777 1775 cb = &priv->rx_cbs[i]; 1778 1776 1779 1777 if (dma_unmap_addr(cb, dma_addr)) { 1780 - dma_unmap_single(&priv->dev->dev, 1778 + dma_unmap_single(kdev, 1781 1779 dma_unmap_addr(cb, dma_addr), 1782 1780 priv->rx_buf_len, DMA_FROM_DEVICE); 1783 1781 dma_unmap_addr_set(cb, dma_addr, 0);
+9 -1
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 542 542 /* Make sure we initialize MoCA PHYs with a link down */ 543 543 if (phy_mode == PHY_INTERFACE_MODE_MOCA) { 544 544 phydev = of_phy_find_device(dn); 545 - if (phydev) 545 + if (phydev) { 546 546 phydev->link = 0; 547 + put_device(&phydev->mdio.dev); 548 + } 547 549 } 548 550 549 551 return 0; ··· 627 625 int bcmgenet_mii_init(struct net_device *dev) 628 626 { 629 627 struct bcmgenet_priv *priv = netdev_priv(dev); 628 + struct device_node *dn = priv->pdev->dev.of_node; 630 629 int ret; 631 630 632 631 ret = bcmgenet_mii_alloc(priv); ··· 641 638 return 0; 642 639 643 640 out: 641 + if (of_phy_is_fixed_link(dn)) 642 + of_phy_deregister_fixed_link(dn); 644 643 of_node_put(priv->phy_dn); 645 644 mdiobus_unregister(priv->mii_bus); 646 645 mdiobus_free(priv->mii_bus); ··· 652 647 void bcmgenet_mii_exit(struct net_device *dev) 653 648 { 654 649 struct bcmgenet_priv *priv = netdev_priv(dev); 650 + struct device_node *dn = priv->pdev->dev.of_node; 655 651 652 + if (of_phy_is_fixed_link(dn)) 653 + of_phy_deregister_fixed_link(dn); 656 654 of_node_put(priv->phy_dn); 657 655 mdiobus_unregister(priv->mii_bus); 658 656 mdiobus_free(priv->mii_bus);
+3 -2
drivers/net/ethernet/cadence/macb.c
··· 991 991 addr += bp->rx_buffer_size; 992 992 } 993 993 bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); 994 + bp->rx_tail = 0; 994 995 } 995 996 996 997 static int macb_rx(struct macb *bp, int budget) ··· 1173 1172 if (status & MACB_BIT(RXUBR)) { 1174 1173 ctrl = macb_readl(bp, NCR); 1175 1174 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1175 + wmb(); 1176 1176 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1177 1177 1178 1178 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) ··· 1738 1736 bp->queues[0].tx_head = 0; 1739 1737 bp->queues[0].tx_tail = 0; 1740 1738 bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1741 - 1742 - bp->rx_tail = 0; 1743 1739 } 1744 1740 1745 1741 static void macb_reset_hw(struct macb *bp) ··· 2943 2943 if (intstatus & MACB_BIT(RXUBR)) { 2944 2944 ctl = macb_readl(lp, NCR); 2945 2945 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 2946 + wmb(); 2946 2947 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 2947 2948 } 2948 2949
+1
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
··· 168 168 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 169 169 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ 170 170 CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ 171 + CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/ 171 172 172 173 /* T6 adapters: 173 174 */
+2
drivers/net/ethernet/freescale/fec.h
··· 574 574 unsigned int reload_period; 575 575 int pps_enable; 576 576 unsigned int next_counter; 577 + 578 + u64 ethtool_stats[0]; 577 579 }; 578 580 579 581 void fec_ptp_init(struct platform_device *pdev);
+24 -4
drivers/net/ethernet/freescale/fec_main.c
··· 2310 2310 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2311 2311 }; 2312 2312 2313 - static void fec_enet_get_ethtool_stats(struct net_device *dev, 2314 - struct ethtool_stats *stats, u64 *data) 2313 + static void fec_enet_update_ethtool_stats(struct net_device *dev) 2315 2314 { 2316 2315 struct fec_enet_private *fep = netdev_priv(dev); 2317 2316 int i; 2318 2317 2319 2318 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2320 - data[i] = readl(fep->hwp + fec_stats[i].offset); 2319 + fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 2320 + } 2321 + 2322 + static void fec_enet_get_ethtool_stats(struct net_device *dev, 2323 + struct ethtool_stats *stats, u64 *data) 2324 + { 2325 + struct fec_enet_private *fep = netdev_priv(dev); 2326 + 2327 + if (netif_running(dev)) 2328 + fec_enet_update_ethtool_stats(dev); 2329 + 2330 + memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64)); 2321 2331 } 2322 2332 2323 2333 static void fec_enet_get_strings(struct net_device *netdev, ··· 2871 2861 if (fep->quirks & FEC_QUIRK_ERR006687) 2872 2862 imx6q_cpuidle_fec_irqs_unused(); 2873 2863 2864 + fec_enet_update_ethtool_stats(ndev); 2865 + 2874 2866 fec_enet_clk_enable(ndev, false); 2875 2867 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2876 2868 pm_runtime_mark_last_busy(&fep->pdev->dev); ··· 3178 3166 3179 3167 fec_restart(ndev); 3180 3168 3169 + fec_enet_update_ethtool_stats(ndev); 3170 + 3181 3171 return 0; 3182 3172 } 3183 3173 ··· 3278 3264 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 3279 3265 3280 3266 /* Init network device */ 3281 - ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), 3267 + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 3268 + ARRAY_SIZE(fec_stats) * sizeof(u64), 3282 3269 num_tx_qs, num_rx_qs); 3283 3270 if (!ndev) 3284 3271 return -ENOMEM; ··· 3476 3461 failed_clk_ipg: 3477 3462 fec_enet_clk_enable(ndev, false); 3478 3463 failed_clk: 3464 + if (of_phy_is_fixed_link(np)) 3465 + of_phy_deregister_fixed_link(np); 3479 3466 failed_phy: 3480 3467 of_node_put(phy_node); 3481 3468 failed_ioremap: ··· 3491 3474 { 3492 3475 struct net_device *ndev = platform_get_drvdata(pdev); 3493 3476 struct fec_enet_private *fep = netdev_priv(ndev); 3477 + struct device_node *np = pdev->dev.of_node; 3494 3478 3495 3479 cancel_work_sync(&fep->tx_timeout_work); 3496 3480 fec_ptp_stop(pdev); ··· 3499 3481 fec_enet_mii_remove(fep); 3500 3482 if (fep->reg_phy) 3501 3483 regulator_disable(fep->reg_phy); 3484 + if (of_phy_is_fixed_link(np)) 3485 + of_phy_deregister_fixed_link(np); 3502 3486 of_node_put(fep->phy_node); 3503 3487 free_netdev(ndev); 3504 3488
+3
drivers/net/ethernet/freescale/fman/fman_memac.c
··· 1107 1107 { 1108 1108 free_init_resources(memac); 1109 1109 1110 + if (memac->pcsphy) 1111 + put_device(&memac->pcsphy->mdio.dev); 1112 + 1110 1113 kfree(memac->memac_drv_param); 1111 1114 kfree(memac); 1112 1115
+2
drivers/net/ethernet/freescale/fman/mac.c
··· 896 896 priv->fixed_link->duplex = phy->duplex; 897 897 priv->fixed_link->pause = phy->pause; 898 898 priv->fixed_link->asym_pause = phy->asym_pause; 899 + 900 + put_device(&phy->mdio.dev); 899 901 } 900 902 901 903 err = mac_dev->init(mac_dev);
+6 -1
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
··· 967 967 err = clk_prepare_enable(clk); 968 968 if (err) { 969 969 ret = err; 970 - goto out_free_fpi; 970 + goto out_deregister_fixed_link; 971 971 } 972 972 fpi->clk_per = clk; 973 973 } ··· 1048 1048 of_node_put(fpi->phy_node); 1049 1049 if (fpi->clk_per) 1050 1050 clk_disable_unprepare(fpi->clk_per); 1051 + out_deregister_fixed_link: 1052 + if (of_phy_is_fixed_link(ofdev->dev.of_node)) 1053 + of_phy_deregister_fixed_link(ofdev->dev.of_node); 1051 1054 out_free_fpi: 1052 1055 kfree(fpi); 1053 1056 return ret; ··· 1069 1066 of_node_put(fep->fpi->phy_node); 1070 1067 if (fep->fpi->clk_per) 1071 1068 clk_disable_unprepare(fep->fpi->clk_per); 1069 + if (of_phy_is_fixed_link(ofdev->dev.of_node)) 1070 + of_phy_deregister_fixed_link(ofdev->dev.of_node); 1072 1071 free_netdev(ndev); 1073 1072 return 0; 1074 1073 }
+8
drivers/net/ethernet/freescale/gianfar.c
··· 1312 1312 */ 1313 1313 static int gfar_probe(struct platform_device *ofdev) 1314 1314 { 1315 + struct device_node *np = ofdev->dev.of_node; 1315 1316 struct net_device *dev = NULL; 1316 1317 struct gfar_private *priv = NULL; 1317 1318 int err = 0, i; ··· 1466 1465 return 0; 1467 1466 1468 1467 register_fail: 1468 + if (of_phy_is_fixed_link(np)) 1469 + of_phy_deregister_fixed_link(np); 1469 1470 unmap_group_regs(priv); 1470 1471 gfar_free_rx_queues(priv); 1471 1472 gfar_free_tx_queues(priv); ··· 1480 1477 static int gfar_remove(struct platform_device *ofdev) 1481 1478 { 1482 1479 struct gfar_private *priv = platform_get_drvdata(ofdev); 1480 + struct device_node *np = ofdev->dev.of_node; 1483 1481 1484 1482 of_node_put(priv->phy_node); 1485 1483 of_node_put(priv->tbi_node); 1486 1484 1487 1485 unregister_netdev(priv->ndev); 1486 + 1487 + if (of_phy_is_fixed_link(np)) 1488 + of_phy_deregister_fixed_link(np); 1489 + 1488 1490 unmap_group_regs(priv); 1489 1491 gfar_free_rx_queues(priv); 1490 1492 gfar_free_tx_queues(priv);
+16 -7
drivers/net/ethernet/freescale/ucc_geth.c
··· 3867 3867 dev = alloc_etherdev(sizeof(*ugeth)); 3868 3868 3869 3869 if (dev == NULL) { 3870 - of_node_put(ug_info->tbi_node); 3871 - of_node_put(ug_info->phy_node); 3872 - return -ENOMEM; 3870 + err = -ENOMEM; 3871 + goto err_deregister_fixed_link; 3873 3872 } 3874 3873 3875 3874 ugeth = netdev_priv(dev); ··· 3905 3906 if (netif_msg_probe(ugeth)) 3906 3907 pr_err("%s: Cannot register net device, aborting\n", 3907 3908 dev->name); 3908 - free_netdev(dev); 3909 - of_node_put(ug_info->tbi_node); 3910 - of_node_put(ug_info->phy_node); 3911 - return err; 3909 + goto err_free_netdev; 3912 3910 } 3913 3911 3914 3912 mac_addr = of_get_mac_address(np); ··· 3918 3922 ugeth->node = np; 3919 3923 3920 3924 return 0; 3925 + 3926 + err_free_netdev: 3927 + free_netdev(dev); 3928 + err_deregister_fixed_link: 3929 + if (of_phy_is_fixed_link(np)) 3930 + of_phy_deregister_fixed_link(np); 3931 + of_node_put(ug_info->tbi_node); 3932 + of_node_put(ug_info->phy_node); 3933 + 3934 + return err; 3921 3935 } 3922 3936 3923 3937 static int ucc_geth_remove(struct platform_device* ofdev) 3924 3938 { 3925 3939 struct net_device *dev = platform_get_drvdata(ofdev); 3926 3940 struct ucc_geth_private *ugeth = netdev_priv(dev); 3941 + struct device_node *np = ofdev->dev.of_node; 3927 3942 3928 3943 unregister_netdev(dev); 3929 3944 free_netdev(dev); 3930 3945 ucc_geth_memclean(ugeth); 3946 + if (of_phy_is_fixed_link(np)) 3947 + of_phy_deregister_fixed_link(np); 3931 3948 of_node_put(ugeth->ug_info->tbi_node); 3932 3949 of_node_put(ugeth->ug_info->phy_node); 3933 3950
+6 -2
drivers/net/ethernet/intel/igb/igb_main.c
··· 4935 4935 4936 4936 /* initialize outer IP header fields */ 4937 4937 if (ip.v4->version == 4) { 4938 + unsigned char *csum_start = skb_checksum_start(skb); 4939 + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 4940 + 4938 4941 /* IP header will have to cancel out any data that 4939 4942 * is not a part of the outer IP header 4940 4943 */ 4941 - ip.v4->check = csum_fold(csum_add(lco_csum(skb), 4942 - csum_unfold(l4.tcp->check))); 4944 + ip.v4->check = csum_fold(csum_partial(trans_start, 4945 + csum_start - trans_start, 4946 + 0)); 4943 4947 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4944 4948 4945 4949 ip.v4->tot_len = 0;
+6 -2
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1965 1965 1966 1966 /* initialize outer IP header fields */ 1967 1967 if (ip.v4->version == 4) { 1968 + unsigned char *csum_start = skb_checksum_start(skb); 1969 + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1970 + 1968 1971 /* IP header will have to cancel out any data that 1969 1972 * is not a part of the outer IP header 1970 1973 */ 1971 - ip.v4->check = csum_fold(csum_add(lco_csum(skb), 1972 - csum_unfold(l4.tcp->check))); 1974 + ip.v4->check = csum_fold(csum_partial(trans_start, 1975 + csum_start - trans_start, 1976 + 0)); 1973 1977 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 1974 1978 1975 1979 ip.v4->tot_len = 0;
+6 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 7287 7287 7288 7288 /* initialize outer IP header fields */ 7289 7289 if (ip.v4->version == 4) { 7290 + unsigned char *csum_start = skb_checksum_start(skb); 7291 + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 7292 + 7290 7293 /* IP header will have to cancel out any data that 7291 7294 * is not a part of the outer IP header 7292 7295 */ 7293 - ip.v4->check = csum_fold(csum_add(lco_csum(skb), 7294 - csum_unfold(l4.tcp->check))); 7296 + ip.v4->check = csum_fold(csum_partial(trans_start, 7297 + csum_start - trans_start, 7298 + 0)); 7295 7299 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 7296 7300 7297 7301 ip.v4->tot_len = 0;
+6 -2
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 3335 3335 3336 3336 /* initialize outer IP header fields */ 3337 3337 if (ip.v4->version == 4) { 3338 + unsigned char *csum_start = skb_checksum_start(skb); 3339 + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 3340 + 3338 3341 /* IP header will have to cancel out any data that 3339 3342 * is not a part of the outer IP header 3340 3343 */ 3341 - ip.v4->check = csum_fold(csum_add(lco_csum(skb), 3342 - csum_unfold(l4.tcp->check))); 3344 + ip.v4->check = csum_fold(csum_partial(trans_start, 3345 + csum_start - trans_start, 3346 + 0)); 3343 3347 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3344 3348 3345 3349 ip.v4->tot_len = 0;
+5
drivers/net/ethernet/marvell/mvneta.c
··· 4327 4327 clk_disable_unprepare(pp->clk); 4328 4328 err_put_phy_node: 4329 4329 of_node_put(phy_node); 4330 + if (of_phy_is_fixed_link(dn)) 4331 + of_phy_deregister_fixed_link(dn); 4330 4332 err_free_irq: 4331 4333 irq_dispose_mapping(dev->irq); 4332 4334 err_free_netdev: ··· 4340 4338 static int mvneta_remove(struct platform_device *pdev) 4341 4339 { 4342 4340 struct net_device *dev = platform_get_drvdata(pdev); 4341 + struct device_node *dn = pdev->dev.of_node; 4343 4342 struct mvneta_port *pp = netdev_priv(dev); 4344 4343 4345 4344 unregister_netdev(dev); ··· 4348 4345 clk_disable_unprepare(pp->clk); 4349 4346 free_percpu(pp->ports); 4350 4347 free_percpu(pp->stats); 4348 + if (of_phy_is_fixed_link(dn)) 4349 + of_phy_deregister_fixed_link(dn); 4351 4350 irq_dispose_mapping(dev->irq); 4352 4351 of_node_put(pp->phy_node); 4353 4352 free_netdev(dev);
+4
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 318 318 return 0; 319 319 320 320 err_phy: 321 + if (of_phy_is_fixed_link(mac->of_node)) 322 + of_phy_deregister_fixed_link(mac->of_node); 321 323 of_node_put(np); 322 324 dev_err(eth->dev, "%s: invalid phy\n", __func__); 323 325 return -EINVAL; ··· 1925 1923 struct mtk_eth *eth = mac->hw; 1926 1924 1927 1925 phy_disconnect(dev->phydev); 1926 + if (of_phy_is_fixed_link(mac->of_node)) 1927 + of_phy_deregister_fixed_link(mac->of_node); 1928 1928 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); 1929 1929 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); 1930 1930 }
+2 -15
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 2109 2109 return -ENOMEM; 2110 2110 } 2111 2111 2112 - static void mlx4_en_shutdown(struct net_device *dev) 2113 - { 2114 - rtnl_lock(); 2115 - netif_device_detach(dev); 2116 - mlx4_en_close(dev); 2117 - rtnl_unlock(); 2118 - } 2119 2112 2120 2113 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2121 2114 struct mlx4_en_priv *src, ··· 2207 2214 { 2208 2215 struct mlx4_en_priv *priv = netdev_priv(dev); 2209 2216 struct mlx4_en_dev *mdev = priv->mdev; 2210 - bool shutdown = mdev->dev->persist->interface_state & 2211 - MLX4_INTERFACE_STATE_SHUTDOWN; 2212 2217 int t; 2213 2218 2214 2219 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); ··· 2215 2224 if (priv->registered) { 2216 2225 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2217 2226 priv->port)); 2218 - if (shutdown) 2219 - mlx4_en_shutdown(dev); 2220 - else 2221 - unregister_netdev(dev); 2227 + unregister_netdev(dev); 2222 2228 } 2223 2229 2224 2230 if (priv->allocated) ··· 2246 2258 kfree(priv->tx_cq[t]); 2247 2259 } 2248 2260 2249 - if (!shutdown) 2250 - free_netdev(dev); 2261 + free_netdev(dev); 2251 2262 } 2252 2263 2253 2264 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+1 -4
drivers/net/ethernet/mellanox/mlx4/main.c
··· 4147 4147 4148 4148 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4149 4149 mutex_lock(&persist->interface_state_mutex); 4150 - if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { 4151 - /* Notify mlx4 clients that the kernel is being shut down */ 4152 - persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN; 4150 + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4153 4151 mlx4_unload_one(pdev); 4154 - } 4155 4152 mutex_unlock(&persist->interface_state_mutex); 4156 4153 } 4157 4154
+6 -1
drivers/net/ethernet/mellanox/mlx4/mcg.c
··· 1457 1457 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1458 1458 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1459 1459 { 1460 - struct mlx4_net_trans_rule rule; 1460 + struct mlx4_net_trans_rule rule = { 1461 + .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1462 + .exclusive = 0, 1463 + .allow_loopback = 1, 1464 + }; 1465 + 1461 1466 u64 *regid_p; 1462 1467 1463 1468 switch (mode) {
+1
drivers/net/ethernet/qualcomm/emac/emac-phy.c
··· 212 212 213 213 phy_np = of_parse_phandle(np, "phy-handle", 0); 214 214 adpt->phydev = of_phy_find_device(phy_np); 215 + of_node_put(phy_np); 215 216 } 216 217 217 218 if (!adpt->phydev) {
+4
drivers/net/ethernet/qualcomm/emac/emac.c
··· 710 710 err_undo_napi: 711 711 netif_napi_del(&adpt->rx_q.napi); 712 712 err_undo_mdiobus: 713 + if (!has_acpi_companion(&pdev->dev)) 714 + put_device(&adpt->phydev->mdio.dev); 713 715 mdiobus_unregister(adpt->mii_bus); 714 716 err_undo_clocks: 715 717 emac_clks_teardown(adpt); ··· 731 729 732 730 emac_clks_teardown(adpt); 733 731 732 + if (!has_acpi_companion(&pdev->dev)) 733 + put_device(&adpt->phydev->mdio.dev); 734 734 mdiobus_unregister(adpt->mii_bus); 735 735 free_netdev(netdev); 736 736
+14 -5
drivers/net/ethernet/renesas/ravb_main.c
··· 1008 1008 of_node_put(pn); 1009 1009 if (!phydev) { 1010 1010 netdev_err(ndev, "failed to connect PHY\n"); 1011 - return -ENOENT; 1011 + err = -ENOENT; 1012 + goto err_deregister_fixed_link; 1012 1013 } 1013 1014 1014 1015 /* This driver only support 10/100Mbit speeds on Gen3 1015 1016 * at this time. 1016 1017 */ 1017 1018 if (priv->chip_id == RCAR_GEN3) { 1018 - int err; 1019 - 1020 1019 err = phy_set_max_speed(phydev, SPEED_100); 1021 1020 if (err) { 1022 1021 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); 1023 - phy_disconnect(phydev); 1024 - return err; 1022 + goto err_phy_disconnect; 1025 1023 } 1026 1024 1027 1025 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); ··· 1031 1033 phy_attached_info(phydev); 1032 1034 1033 1035 return 0; 1036 + 1037 + err_phy_disconnect: 1038 + phy_disconnect(phydev); 1039 + err_deregister_fixed_link: 1040 + if (of_phy_is_fixed_link(np)) 1041 + of_phy_deregister_fixed_link(np); 1042 + 1043 + return err; 1034 1044 } 1035 1045 1036 1046 /* PHY control start function */ ··· 1640 1634 /* Device close function for Ethernet AVB */ 1641 1635 static int ravb_close(struct net_device *ndev) 1642 1636 { 1637 + struct device_node *np = ndev->dev.parent->of_node; 1643 1638 struct ravb_private *priv = netdev_priv(ndev); 1644 1639 struct ravb_tstamp_skb *ts_skb, *ts_skb2; 1645 1640 ··· 1670 1663 if (ndev->phydev) { 1671 1664 phy_stop(ndev->phydev); 1672 1665 phy_disconnect(ndev->phydev); 1666 + if (of_phy_is_fixed_link(np)) 1667 + of_phy_deregister_fixed_link(np); 1673 1668 } 1674 1669 1675 1670 if (priv->chip_id != RCAR_GEN2) {
+1 -1
drivers/net/ethernet/renesas/sh_eth.c
··· 518 518 519 519 .ecsr_value = ECSR_ICD, 520 520 .ecsipr_value = ECSIPR_ICDIP, 521 - .eesipr_value = 0xff7f009f, 521 + .eesipr_value = 0xe77f009f, 522 522 523 523 .tx_check = EESR_TC1 | EESR_FTC, 524 524 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+15 -2
drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
··· 50 50 if (plat_dat->init) { 51 51 ret = plat_dat->init(pdev, plat_dat->bsp_priv); 52 52 if (ret) 53 - return ret; 53 + goto err_remove_config_dt; 54 54 } 55 55 56 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 56 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 57 + if (ret) 58 + goto err_exit; 59 + 60 + return 0; 61 + 62 + err_exit: 63 + if (plat_dat->exit) 64 + plat_dat->exit(pdev, plat_dat->bsp_priv); 65 + err_remove_config_dt: 66 + if (pdev->dev.of_node) 67 + stmmac_remove_config_dt(pdev, plat_dat); 68 + 69 + return ret; 57 70 } 58 71 59 72 static const struct of_device_id dwmac_generic_match[] = {
+19 -6
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
··· 271 271 return PTR_ERR(plat_dat); 272 272 273 273 gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); 274 - if (!gmac) 275 - return -ENOMEM; 274 + if (!gmac) { 275 + err = -ENOMEM; 276 + goto err_remove_config_dt; 277 + } 276 278 277 279 gmac->pdev = pdev; 278 280 279 281 err = ipq806x_gmac_of_parse(gmac); 280 282 if (err) { 281 283 dev_err(dev, "device tree parsing error\n"); 282 - return err; 284 + goto err_remove_config_dt; 283 285 } 284 286 285 287 regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, ··· 302 300 default: 303 301 dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", 304 302 phy_modes(gmac->phy_mode)); 305 - return -EINVAL; 303 + err = -EINVAL; 304 + goto err_remove_config_dt; 306 305 } 307 306 regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); 308 307 ··· 322 319 default: 323 320 dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", 324 321 phy_modes(gmac->phy_mode)); 325 - return -EINVAL; 322 + err = -EINVAL; 323 + goto err_remove_config_dt; 326 324 } 327 325 regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); 328 326 ··· 350 346 plat_dat->bsp_priv = gmac; 351 347 plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; 352 348 353 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 349 + err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 350 + if (err) 351 + goto err_remove_config_dt; 352 + 353 + return 0; 354 + 355 + err_remove_config_dt: 356 + stmmac_remove_config_dt(pdev, plat_dat); 357 + 358 + return err; 354 359 } 355 360 356 361 static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+14 -3
drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
··· 46 46 reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); 47 47 if (IS_ERR(reg)) { 48 48 dev_err(&pdev->dev, "syscon lookup failed\n"); 49 - return PTR_ERR(reg); 49 + ret = PTR_ERR(reg); 50 + goto err_remove_config_dt; 50 51 } 51 52 52 53 if (plat_dat->interface == PHY_INTERFACE_MODE_MII) { ··· 56 55 ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; 57 56 } else { 58 57 dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); 59 - return -EINVAL; 58 + ret = -EINVAL; 59 + goto err_remove_config_dt; 60 60 } 61 61 62 62 regmap_update_bits(reg, LPC18XX_CREG_CREG6, 63 63 LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); 64 64 65 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 65 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 66 + if (ret) 67 + goto err_remove_config_dt; 68 + 69 + return 0; 70 + 71 + err_remove_config_dt: 72 + stmmac_remove_config_dt(pdev, plat_dat); 73 + 74 + return ret; 66 75 } 67 76 68 77 static const struct of_device_id lpc18xx_dwmac_match[] = {
+18 -5
drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
··· 64 64 return PTR_ERR(plat_dat); 65 65 66 66 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 67 - if (!dwmac) 68 - return -ENOMEM; 67 + if (!dwmac) { 68 + ret = -ENOMEM; 69 + goto err_remove_config_dt; 70 + } 69 71 70 72 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 71 73 dwmac->reg = devm_ioremap_resource(&pdev->dev, res); 72 - if (IS_ERR(dwmac->reg)) 73 - return PTR_ERR(dwmac->reg); 74 + if (IS_ERR(dwmac->reg)) { 75 + ret = PTR_ERR(dwmac->reg); 76 + goto err_remove_config_dt; 77 + } 74 78 75 79 plat_dat->bsp_priv = dwmac; 76 80 plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; 77 81 78 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 82 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 83 + if (ret) 84 + goto err_remove_config_dt; 85 + 86 + return 0; 87 + 88 + err_remove_config_dt: 89 + stmmac_remove_config_dt(pdev, plat_dat); 90 + 91 + return ret; 79 92 } 80 93 81 94 static const struct of_device_id meson6_dwmac_match[] = {
+24 -8
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
··· 264 264 return PTR_ERR(plat_dat); 265 265 266 266 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 267 - if (!dwmac) 268 - return -ENOMEM; 267 + if (!dwmac) { 268 + ret = -ENOMEM; 269 + goto err_remove_config_dt; 270 + } 269 271 270 272 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 271 273 dwmac->regs = devm_ioremap_resource(&pdev->dev, res); 272 - if (IS_ERR(dwmac->regs)) 273 - return PTR_ERR(dwmac->regs); 274 + if (IS_ERR(dwmac->regs)) { 275 + ret = PTR_ERR(dwmac->regs); 276 + goto err_remove_config_dt; 277 + } 274 278 275 279 dwmac->pdev = pdev; 276 280 dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node); 277 281 if (dwmac->phy_mode < 0) { 278 282 dev_err(&pdev->dev, "missing phy-mode property\n"); 279 - return -EINVAL; 283 + ret = -EINVAL; 284 + goto err_remove_config_dt; 280 285 } 281 286 282 287 ret = meson8b_init_clk(dwmac); 283 288 if (ret) 284 - return ret; 289 + goto err_remove_config_dt; 285 290 286 291 ret = meson8b_init_prg_eth(dwmac); 287 292 if (ret) 288 - return ret; 293 + goto err_remove_config_dt; 289 294 290 295 plat_dat->bsp_priv = dwmac; 291 296 292 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 297 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 298 + if (ret) 299 + goto err_clk_disable; 300 + 301 + return 0; 302 + 303 + err_clk_disable: 304 + clk_disable_unprepare(dwmac->m25_div_clk); 305 + err_remove_config_dt: 306 + stmmac_remove_config_dt(pdev, plat_dat); 307 + 308 + return ret; 293 309 } 294 310 295 311 static int meson8b_dwmac_remove(struct platform_device *pdev)
+17 -4
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
··· 939 939 plat_dat->fix_mac_speed = rk_fix_speed; 940 940 941 941 plat_dat->bsp_priv = rk_gmac_setup(pdev, data); 942 - if (IS_ERR(plat_dat->bsp_priv)) 943 - return PTR_ERR(plat_dat->bsp_priv); 942 + if (IS_ERR(plat_dat->bsp_priv)) { 943 + ret = PTR_ERR(plat_dat->bsp_priv); 944 + goto err_remove_config_dt; 945 + } 944 946 945 947 ret = rk_gmac_powerup(plat_dat->bsp_priv); 946 948 if (ret) 947 - return ret; 949 + goto err_remove_config_dt; 948 950 949 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 951 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 952 + if (ret) 953 + goto err_gmac_powerdown; 954 + 955 + return 0; 956 + 957 + err_gmac_powerdown: 958 + rk_gmac_powerdown(plat_dat->bsp_priv); 959 + err_remove_config_dt: 960 + stmmac_remove_config_dt(pdev, plat_dat); 961 + 962 + return ret; 950 963 } 951 964 952 965 static int rk_gmac_remove(struct platform_device *pdev)
+26 -13
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
··· 304 304 struct device *dev = &pdev->dev; 305 305 int ret; 306 306 struct socfpga_dwmac *dwmac; 307 + struct net_device *ndev; 308 + struct stmmac_priv *stpriv; 307 309 308 310 ret = stmmac_get_platform_resources(pdev, &stmmac_res); 309 311 if (ret) ··· 316 314 return PTR_ERR(plat_dat); 317 315 318 316 dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); 319 - if (!dwmac) 320 - return -ENOMEM; 317 + if (!dwmac) { 318 + ret = -ENOMEM; 319 + goto err_remove_config_dt; 320 + } 321 321 322 322 ret = socfpga_dwmac_parse_data(dwmac, dev); 323 323 if (ret) { 324 324 dev_err(dev, "Unable to parse OF data\n"); 325 - return ret; 325 + goto err_remove_config_dt; 326 326 } 327 327 328 328 plat_dat->bsp_priv = dwmac; 329 329 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; 330 330 331 331 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 332 + if (ret) 333 + goto err_remove_config_dt; 332 334 333 - if (!ret) { 334 - struct net_device *ndev = platform_get_drvdata(pdev); 335 - struct stmmac_priv *stpriv = netdev_priv(ndev); 335 + ndev = platform_get_drvdata(pdev); 336 + stpriv = netdev_priv(ndev); 336 337 337 - /* The socfpga driver needs to control the stmmac reset to 338 - * set the phy mode. Create a copy of the core reset handel 339 - * so it can be used by the driver later. 340 - */ 341 - dwmac->stmmac_rst = stpriv->stmmac_rst; 338 + /* The socfpga driver needs to control the stmmac reset to set the phy 339 + * mode. Create a copy of the core reset handle so it can be used by 340 + * the driver later. 341 + */ 342 + dwmac->stmmac_rst = stpriv->stmmac_rst; 342 343 343 - ret = socfpga_dwmac_set_phy_mode(dwmac); 344 - } 344 + ret = socfpga_dwmac_set_phy_mode(dwmac); 345 + if (ret) 346 + goto err_dvr_remove; 347 + 348 + return 0; 349 + 350 + err_dvr_remove: 351 + stmmac_dvr_remove(&pdev->dev); 352 + err_remove_config_dt: 353 + stmmac_remove_config_dt(pdev, plat_dat); 345 354 346 355 return ret; 347 356 }
+9 -4
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
··· 329 329 return PTR_ERR(plat_dat); 330 330 331 331 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 332 - if (!dwmac) 333 - return -ENOMEM; 332 + if (!dwmac) { 333 + ret = -ENOMEM; 334 + goto err_remove_config_dt; 335 + } 334 336 335 337 ret = sti_dwmac_parse_data(dwmac, pdev); 336 338 if (ret) { 337 339 dev_err(&pdev->dev, "Unable to parse OF data\n"); 338 - return ret; 340 + goto err_remove_config_dt; 339 341 } 340 342 341 343 dwmac->fix_retime_src = data->fix_retime_src; ··· 347 345 348 346 ret = clk_prepare_enable(dwmac->clk); 349 347 if (ret) 350 - return ret; 348 + goto err_remove_config_dt; 351 349 352 350 ret = sti_dwmac_set_mode(dwmac); 353 351 if (ret) ··· 361 359 362 360 disable_clk: 363 361 clk_disable_unprepare(dwmac->clk); 362 + err_remove_config_dt: 363 + stmmac_remove_config_dt(pdev, plat_dat); 364 + 364 365 return ret; 365 366 } 366 367
+14 -5
drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
··· 107 107 return PTR_ERR(plat_dat); 108 108 109 109 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 110 - if (!dwmac) 111 - return -ENOMEM; 110 + if (!dwmac) { 111 + ret = -ENOMEM; 112 + goto err_remove_config_dt; 113 + } 112 114 113 115 ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); 114 116 if (ret) { 115 117 dev_err(&pdev->dev, "Unable to parse OF data\n"); 116 - return ret; 118 + goto err_remove_config_dt; 117 119 } 118 120 119 121 plat_dat->bsp_priv = dwmac; 120 122 121 123 ret = stm32_dwmac_init(plat_dat); 122 124 if (ret) 123 - return ret; 125 + goto err_remove_config_dt; 124 126 125 127 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 126 128 if (ret) 127 - stm32_dwmac_clk_disable(dwmac); 129 + goto err_clk_disable; 130 + 131 + return 0; 132 + 133 + err_clk_disable: 134 + stm32_dwmac_clk_disable(dwmac); 135 + err_remove_config_dt: 136 + stmmac_remove_config_dt(pdev, plat_dat); 128 137 129 138 return ret; 130 139 }
+19 -7
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
··· 120 120 return PTR_ERR(plat_dat); 121 121 122 122 gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); 123 - if (!gmac) 124 - return -ENOMEM; 123 + if (!gmac) { 124 + ret = -ENOMEM; 125 + goto err_remove_config_dt; 126 + } 125 127 126 128 gmac->interface = of_get_phy_mode(dev->of_node); 127 129 128 130 gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); 129 131 if (IS_ERR(gmac->tx_clk)) { 130 132 dev_err(dev, "could not get tx clock\n"); 131 - return PTR_ERR(gmac->tx_clk); 133 + ret = PTR_ERR(gmac->tx_clk); 134 + goto err_remove_config_dt; 132 135 } 133 136 134 137 /* Optional regulator for PHY */ 135 138 gmac->regulator = devm_regulator_get_optional(dev, "phy"); 136 139 if (IS_ERR(gmac->regulator)) { 137 - if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) 138 - return -EPROBE_DEFER; 140 + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) { 141 + ret = -EPROBE_DEFER; 142 + goto err_remove_config_dt; 143 + } 139 144 dev_info(dev, "no regulator found\n"); 140 145 gmac->regulator = NULL; 141 146 } ··· 156 151 157 152 ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); 158 153 if (ret) 159 - return ret; 154 + goto err_remove_config_dt; 160 155 161 156 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 162 157 if (ret) 163 - sun7i_gmac_exit(pdev, plat_dat->bsp_priv); 158 + goto err_gmac_exit; 159 + 160 + return 0; 161 + 162 + err_gmac_exit: 163 + sun7i_gmac_exit(pdev, plat_dat->bsp_priv); 164 + err_remove_config_dt: 165 + stmmac_remove_config_dt(pdev, plat_dat); 164 166 165 167 return ret; 166 168 }
-1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 3428 3428 stmmac_set_mac(priv->ioaddr, false); 3429 3429 netif_carrier_off(ndev); 3430 3430 unregister_netdev(ndev); 3431 - of_node_put(priv->plat->phy_node); 3432 3431 if (priv->stmmac_rst) 3433 3432 reset_control_assert(priv->stmmac_rst); 3434 3433 clk_disable_unprepare(priv->pclk);
+29 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 200 200 /** 201 201 * stmmac_probe_config_dt - parse device-tree driver parameters 202 202 * @pdev: platform_device structure 203 - * @plat: driver data platform structure 204 203 * @mac: MAC address to use 205 204 * Description: 206 205 * this function is to read the driver parameters from device-tree and ··· 305 306 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 306 307 GFP_KERNEL); 307 308 if (!dma_cfg) { 308 - of_node_put(plat->phy_node); 309 + stmmac_remove_config_dt(pdev, plat); 309 310 return ERR_PTR(-ENOMEM); 310 311 } 311 312 plat->dma_cfg = dma_cfg; ··· 328 329 329 330 return plat; 330 331 } 332 + 333 + /** 334 + * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() 335 + * @pdev: platform_device structure 336 + * @plat: driver data platform structure 337 + * 338 + * Release resources claimed by stmmac_probe_config_dt(). 339 + */ 340 + void stmmac_remove_config_dt(struct platform_device *pdev, 341 + struct plat_stmmacenet_data *plat) 342 + { 343 + struct device_node *np = pdev->dev.of_node; 344 + 345 + if (of_phy_is_fixed_link(np)) 346 + of_phy_deregister_fixed_link(np); 347 + of_node_put(plat->phy_node); 348 + } 331 349 #else 332 350 struct plat_stmmacenet_data * 333 351 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 334 352 { 335 353 return ERR_PTR(-ENOSYS); 336 354 } 355 + 356 + void stmmac_remove_config_dt(struct platform_device *pdev, 357 + struct plat_stmmacenet_data *plat) 358 + { 359 + } 337 360 #endif /* CONFIG_OF */ 338 361 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); 362 + EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); 339 363 340 364 int stmmac_get_platform_resources(struct platform_device *pdev, 341 365 struct stmmac_resources *stmmac_res) ··· 414 392 { 415 393 struct net_device *ndev = platform_get_drvdata(pdev); 416 394 struct stmmac_priv *priv = netdev_priv(ndev); 395 + struct plat_stmmacenet_data *plat = priv->plat; 417 396 int ret = stmmac_dvr_remove(&pdev->dev); 418 397 419 - if (priv->plat->exit) 420 - priv->plat->exit(pdev, priv->plat->bsp_priv); 398 + if (plat->exit) 399 + plat->exit(pdev, plat->bsp_priv); 400 + 401 + stmmac_remove_config_dt(pdev, plat); 421 402 422 403 return ret; 423 404 }
+2
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
··· 23 23 24 24 struct plat_stmmacenet_data * 25 25 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); 26 + void stmmac_remove_config_dt(struct platform_device *pdev, 27 + struct plat_stmmacenet_data *plat); 26 28 27 29 int stmmac_get_platform_resources(struct platform_device *pdev, 28 30 struct stmmac_resources *stmmac_res);
+13 -7
drivers/net/ethernet/synopsys/dwc_eth_qos.c
··· 2881 2881 ret = of_get_phy_mode(lp->pdev->dev.of_node); 2882 2882 if (ret < 0) { 2883 2883 dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); 2884 - goto err_out_clk_dis_phy; 2884 + goto err_out_deregister_fixed_link; 2885 2885 } 2886 2886 2887 2887 lp->phy_interface = ret; ··· 2889 2889 ret = dwceqos_mii_init(lp); 2890 2890 if (ret) { 2891 2891 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); 2892 - goto err_out_clk_dis_phy; 2892 + goto err_out_deregister_fixed_link; 2893 2893 } 2894 2894 2895 2895 ret = dwceqos_mii_probe(ndev); 2896 2896 if (ret != 0) { 2897 2897 netdev_err(ndev, "mii_probe fail.\n"); 2898 2898 ret = -ENXIO; 2899 - goto err_out_clk_dis_phy; 2899 + goto err_out_deregister_fixed_link; 2900 2900 } 2901 2901 2902 2902 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); ··· 2914 2914 if (ret) { 2915 2915 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", 2916 2916 ret); 2917 - goto err_out_clk_dis_phy; 2917 + goto err_out_deregister_fixed_link; 2918 2918 } 2919 2919 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", 2920 2920 pdev->id, ndev->base_addr, ndev->irq); ··· 2924 2924 if (ret) { 2925 2925 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", 2926 2926 ndev->irq, ret); 2927 - goto err_out_clk_dis_phy; 2927 + goto err_out_deregister_fixed_link; 2928 2928 } 2929 2929 2930 2930 if (netif_msg_probe(lp)) ··· 2935 2935 ret = register_netdev(ndev); 2936 2936 if (ret) { 2937 2937 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 2938 - goto err_out_clk_dis_phy; 2938 + goto err_out_deregister_fixed_link; 2939 2939 } 2940 2940 2941 2941 return 0; 2942 2942 2943 + err_out_deregister_fixed_link: 2944 + if (of_phy_is_fixed_link(pdev->dev.of_node)) 2945 + of_phy_deregister_fixed_link(pdev->dev.of_node); 2943 2946 err_out_clk_dis_phy: 2944 2947 clk_disable_unprepare(lp->phy_ref_clk); 2945 2948 err_out_clk_dis_aper: ··· 2962 2959 if (ndev) { 2963 2960 lp = netdev_priv(ndev); 2964 2961 2965 - if (ndev->phydev) 2962 + if (ndev->phydev) { 2966 2963 phy_disconnect(ndev->phydev); 2964 + if (of_phy_is_fixed_link(pdev->dev.of_node)) 2965 + of_phy_deregister_fixed_link(pdev->dev.of_node); 2966 + } 2967 2967 mdiobus_unregister(lp->mii_bus); 2968 2968 mdiobus_free(lp->mii_bus); 2969 2969
+6 -14
drivers/net/ethernet/ti/cpsw.c
··· 2671 2671 if (strcmp(slave_node->name, "slave")) 2672 2672 continue; 2673 2673 2674 - if (of_phy_is_fixed_link(slave_node)) { 2675 - struct phy_device *phydev; 2676 - 2677 - phydev = of_phy_find_device(slave_node); 2678 - if (phydev) { 2679 - fixed_phy_unregister(phydev); 2680 - /* Put references taken by 2681 - * of_phy_find_device() and 2682 - * of_phy_register_fixed_link(). 2683 - */ 2684 - phy_device_free(phydev); 2685 - phy_device_free(phydev); 2686 - } 2687 - } 2674 + if (of_phy_is_fixed_link(slave_node)) 2675 + of_phy_deregister_fixed_link(slave_node); 2688 2676 2689 2677 of_node_put(slave_data->phy_node); 2690 2678 ··· 3143 3155 /* Select default pin state */ 3144 3156 pinctrl_pm_select_default_state(dev); 3145 3157 3158 + /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */ 3159 + rtnl_lock(); 3146 3160 if (cpsw->data.dual_emac) { 3147 3161 int i; 3148 3162 ··· 3156 3166 if (netif_running(ndev)) 3157 3167 cpsw_ndo_open(ndev); 3158 3168 } 3169 + rtnl_unlock(); 3170 + 3159 3171 return 0; 3160 3172 } 3161 3173 #endif
+9 -1
drivers/net/ethernet/ti/davinci_emac.c
··· 1767 1767 */ 1768 1768 static int davinci_emac_probe(struct platform_device *pdev) 1769 1769 { 1770 + struct device_node *np = pdev->dev.of_node; 1770 1771 int rc = 0; 1771 1772 struct resource *res, *res_ctrl; 1772 1773 struct net_device *ndev; ··· 1806 1805 if (!pdata) { 1807 1806 dev_err(&pdev->dev, "no platform data\n"); 1808 1807 rc = -ENODEV; 1809 - goto no_pdata; 1808 + goto err_free_netdev; 1810 1809 } 1811 1810 1812 1811 /* MAC addr and PHY mask , RMII enable info from platform_data */ ··· 1942 1941 cpdma_chan_destroy(priv->rxchan); 1943 1942 cpdma_ctlr_destroy(priv->dma); 1944 1943 no_pdata: 1944 + if (of_phy_is_fixed_link(np)) 1945 + of_phy_deregister_fixed_link(np); 1946 + of_node_put(priv->phy_node); 1947 + err_free_netdev: 1945 1948 free_netdev(ndev); 1946 1949 return rc; 1947 1950 } ··· 1961 1956 { 1962 1957 struct net_device *ndev = platform_get_drvdata(pdev); 1963 1958 struct emac_priv *priv = netdev_priv(ndev); 1959 + struct device_node *np = pdev->dev.of_node; 1964 1960 1965 1961 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); 1966 1962 ··· 1974 1968 unregister_netdev(ndev); 1975 1969 of_node_put(priv->phy_node); 1976 1970 pm_runtime_disable(&pdev->dev); 1971 + if (of_phy_is_fixed_link(np)) 1972 + of_phy_deregister_fixed_link(np); 1977 1973 free_netdev(ndev); 1978 1974 1979 1975 return 0;
+12 -5
drivers/net/ipvlan/ipvlan_main.c
··· 497 497 struct net_device *phy_dev; 498 498 int err; 499 499 u16 mode = IPVLAN_MODE_L3; 500 + bool create = false; 500 501 501 502 if (!tb[IFLA_LINK]) 502 503 return -EINVAL; ··· 514 513 err = ipvlan_port_create(phy_dev); 515 514 if (err < 0) 516 515 return err; 516 + create = true; 517 517 } 518 518 519 519 if (data && data[IFLA_IPVLAN_MODE]) ··· 538 536 539 537 err = register_netdevice(dev); 540 538 if (err < 0) 541 - return err; 539 + goto destroy_ipvlan_port; 542 540 543 541 err = netdev_upper_dev_link(phy_dev, dev); 544 542 if (err) { 545 - unregister_netdevice(dev); 546 - return err; 543 + goto unregister_netdev; 547 544 } 548 545 err = ipvlan_set_port_mode(port, mode); 549 546 if (err) { 550 - unregister_netdevice(dev); 551 - return err; 547 + goto unregister_netdev; 552 548 } 553 549 554 550 list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans); 555 551 netif_stacked_transfer_operstate(phy_dev, dev); 556 552 return 0; 553 + 554 + unregister_netdev: 555 + unregister_netdevice(dev); 556 + destroy_ipvlan_port: 557 + if (create) 558 + ipvlan_port_destroy(phy_dev); 559 + return err; 557 560 } 558 561 559 562 static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
+3 -1
drivers/net/irda/w83977af_ir.c
··· 518 518 519 519 mtt = irda_get_mtt(skb); 520 520 pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); 521 - if (mtt) 521 + if (mtt > 1000) 522 + mdelay(mtt/1000); 523 + else if (mtt) 522 524 udelay(mtt); 523 525 524 526 /* Enable DMA interrupt */
+12 -7
drivers/net/macvtap.c
··· 491 491 /* Don't put anything that may fail after macvlan_common_newlink 492 492 * because we can't undo what it does. 493 493 */ 494 - return macvlan_common_newlink(src_net, dev, tb, data); 494 + err = macvlan_common_newlink(src_net, dev, tb, data); 495 + if (err) { 496 + netdev_rx_handler_unregister(dev); 497 + return err; 498 + } 499 + 500 + return 0; 495 501 } 496 502 497 503 static void macvtap_dellink(struct net_device *dev, ··· 742 736 743 737 if (zerocopy) 744 738 err = zerocopy_sg_from_iter(skb, from); 745 - else { 739 + else 746 740 err = skb_copy_datagram_from_iter(skb, 0, from, len); 747 - if (!err && m && m->msg_control) { 748 - struct ubuf_info *uarg = m->msg_control; 749 - uarg->callback(uarg, false); 750 - } 751 - } 752 741 753 742 if (err) 754 743 goto err_kfree; ··· 774 773 skb_shinfo(skb)->destructor_arg = m->msg_control; 775 774 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 776 775 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 776 + } else if (m && m->msg_control) { 777 + struct ubuf_info *uarg = m->msg_control; 778 + uarg->callback(uarg, false); 777 779 } 780 + 778 781 if (vlan) { 779 782 skb->dev = vlan->dev; 780 783 dev_queue_xmit(skb);
+12 -8
drivers/net/phy/realtek.c
··· 102 102 if (ret < 0) 103 103 return ret; 104 104 105 - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 106 - /* enable TXDLY */ 107 - phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); 108 - reg = phy_read(phydev, 0x11); 105 + phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); 106 + reg = phy_read(phydev, 0x11); 107 + 108 + /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */ 109 + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 110 + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) 109 111 reg |= RTL8211F_TX_DELAY; 110 - phy_write(phydev, 0x11, reg); 111 - /* restore to default page 0 */ 112 - phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); 113 - } 112 + else 113 + reg &= ~RTL8211F_TX_DELAY; 114 + 115 + phy_write(phydev, 0x11, reg); 116 + /* restore to default page 0 */ 117 + phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); 114 118 115 119 return 0; 116 120 }
+4 -6
drivers/net/tun.c
··· 1231 1231 1232 1232 if (zerocopy) 1233 1233 err = zerocopy_sg_from_iter(skb, from); 1234 - else { 1234 + else 1235 1235 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1236 - if (!err && msg_control) { 1237 - struct ubuf_info *uarg = msg_control; 1238 - uarg->callback(uarg, false); 1239 - } 1240 - } 1241 1236 1242 1237 if (err) { 1243 1238 this_cpu_inc(tun->pcpu_stats->rx_dropped); ··· 1277 1282 skb_shinfo(skb)->destructor_arg = msg_control; 1278 1283 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1279 1284 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1285 + } else if (msg_control) { 1286 + struct ubuf_info *uarg = msg_control; 1287 + uarg->callback(uarg, false); 1280 1288 } 1281 1289 1282 1290 skb_reset_network_header(skb);
+3 -3
drivers/net/usb/asix_devices.c
··· 603 603 u16 medium; 604 604 605 605 /* Stop MAC operation */ 606 - medium = asix_read_medium_status(dev, 0); 606 + medium = asix_read_medium_status(dev, 1); 607 607 medium &= ~AX_MEDIUM_RE; 608 - asix_write_medium_mode(dev, medium, 0); 608 + asix_write_medium_mode(dev, medium, 1); 609 609 610 610 netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n", 611 - asix_read_medium_status(dev, 0)); 611 + asix_read_medium_status(dev, 1)); 612 612 613 613 /* Preserve BMCR for restoring */ 614 614 priv->presvd_phy_bmcr =
+31 -7
drivers/net/usb/cdc_ether.c
··· 388 388 case USB_CDC_NOTIFY_NETWORK_CONNECTION: 389 389 netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", 390 390 event->wValue ? "on" : "off"); 391 - 392 - /* Work-around for devices with broken off-notifications */ 393 - if (event->wValue && 394 - !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state)) 395 - usbnet_link_change(dev, 0, 0); 396 - 397 391 usbnet_link_change(dev, !!event->wValue, 0); 398 392 break; 399 393 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ ··· 460 466 return 1; 461 467 } 462 468 469 + /* Ensure correct link state 470 + * 471 + * Some devices (ZTE MF823/831/910) export two carrier on notifications when 472 + * connected. This causes the link state to be incorrect. Work around this by 473 + * always setting the state to off, then on. 474 + */ 475 + void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) 476 + { 477 + struct usb_cdc_notification *event; 478 + 479 + if (urb->actual_length < sizeof(*event)) 480 + return; 481 + 482 + event = urb->transfer_buffer; 483 + 484 + if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) { 485 + usbnet_cdc_status(dev, urb); 486 + return; 487 + } 488 + 489 + netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", 490 + event->wValue ? "on" : "off"); 491 + 492 + if (event->wValue && 493 + netif_carrier_ok(dev->net)) 494 + netif_carrier_off(dev->net); 495 + 496 + usbnet_link_change(dev, !!event->wValue, 0); 497 + } 498 + 463 499 static const struct driver_info cdc_info = { 464 500 .description = "CDC Ethernet Device", 465 501 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, ··· 505 481 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, 506 482 .bind = usbnet_cdc_zte_bind, 507 483 .unbind = usbnet_cdc_unbind, 508 - .status = usbnet_cdc_status, 484 + .status = usbnet_cdc_zte_status, 509 485 .set_rx_mode = usbnet_cdc_update_filter, 510 486 .manage_power = usbnet_manage_power, 511 487 .rx_fixup = usbnet_cdc_zte_rx_fixup,
+1
drivers/net/usb/qmi_wwan.c
··· 894 894 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 895 895 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 896 896 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 897 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 897 898 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 898 899 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 899 900 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
+7 -3
drivers/net/vxlan.c
··· 611 611 struct vxlan_rdst *rd = NULL; 612 612 struct vxlan_fdb *f; 613 613 int notify = 0; 614 + int rc; 614 615 615 616 f = __vxlan_find_mac(vxlan, mac); 616 617 if (f) { ··· 642 641 if ((flags & NLM_F_APPEND) && 643 642 (is_multicast_ether_addr(f->eth_addr) || 644 643 is_zero_ether_addr(f->eth_addr))) { 645 - int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, 646 - &rd); 644 + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 647 645 648 646 if (rc < 0) 649 647 return rc; ··· 673 673 INIT_LIST_HEAD(&f->remotes); 674 674 memcpy(f->eth_addr, mac, ETH_ALEN); 675 675 676 - vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 676 + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 677 + if (rc < 0) { 678 + kfree(f); 679 + return rc; 680 + } 677 681 678 682 ++vxlan->addrcnt; 679 683 hlist_add_head_rcu(&f->hlist,
+7 -6
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 2238 2238 is_scanning_required = 1; 2239 2239 } else { 2240 2240 mwifiex_dbg(priv->adapter, MSG, 2241 - "info: trying to associate to '%s' bssid %pM\n", 2242 - (char *)req_ssid.ssid, bss->bssid); 2241 + "info: trying to associate to '%.*s' bssid %pM\n", 2242 + req_ssid.ssid_len, (char *)req_ssid.ssid, 2243 + bss->bssid); 2243 2244 memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); 2244 2245 break; 2245 2246 } ··· 2300 2299 } 2301 2300 2302 2301 mwifiex_dbg(adapter, INFO, 2303 - "info: Trying to associate to %s and bssid %pM\n", 2304 - (char *)sme->ssid, sme->bssid); 2302 + "info: Trying to associate to %.*s and bssid %pM\n", 2303 + (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); 2305 2304 2306 2305 if (!mwifiex_stop_bg_scan(priv)) 2307 2306 cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); ··· 2434 2433 } 2435 2434 2436 2435 mwifiex_dbg(priv->adapter, MSG, 2437 - "info: trying to join to %s and bssid %pM\n", 2438 - (char *)params->ssid, params->bssid); 2436 + "info: trying to join to %.*s and bssid %pM\n", 2437 + params->ssid_len, (char *)params->ssid, params->bssid); 2439 2438 2440 2439 mwifiex_set_ibss_params(priv, params); 2441 2440
+15
drivers/of/of_mdio.c
··· 490 490 return -ENODEV; 491 491 } 492 492 EXPORT_SYMBOL(of_phy_register_fixed_link); 493 + 494 + void of_phy_deregister_fixed_link(struct device_node *np) 495 + { 496 + struct phy_device *phydev; 497 + 498 + phydev = of_phy_find_device(np); 499 + if (!phydev) 500 + return; 501 + 502 + fixed_phy_unregister(phydev); 503 + 504 + put_device(&phydev->mdio.dev); /* of_phy_find_device() */ 505 + phy_device_free(phydev); /* fixed_phy_register() */ 506 + } 507 + EXPORT_SYMBOL(of_phy_deregister_fixed_link);
+1 -1
drivers/pci/host/pcie-designware-plat.c
··· 3 3 * 4 4 * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) 5 5 * 6 - * Authors: Joao Pinto <jpmpinto@gmail.com> 6 + * Authors: Joao Pinto <Joao.Pinto@synopsys.com> 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify 9 9 * it under the terms of the GNU General Public License version 2 as
-14
drivers/pci/pcie/aer/aer_inject.c
··· 307 307 return 0; 308 308 } 309 309 310 - static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) 311 - { 312 - while (1) { 313 - if (!pci_is_pcie(dev)) 314 - break; 315 - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 316 - return dev; 317 - if (!dev->bus->self) 318 - break; 319 - dev = dev->bus->self; 320 - } 321 - return NULL; 322 - } 323 - 324 310 static int find_aer_device_iter(struct device *device, void *data) 325 311 { 326 312 struct pcie_device **result = data;
+27 -1
drivers/pci/probe.c
··· 1439 1439 dev_warn(&dev->dev, "PCI-X settings not supported\n"); 1440 1440 } 1441 1441 1442 + static bool pcie_root_rcb_set(struct pci_dev *dev) 1443 + { 1444 + struct pci_dev *rp = pcie_find_root_port(dev); 1445 + u16 lnkctl; 1446 + 1447 + if (!rp) 1448 + return false; 1449 + 1450 + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); 1451 + if (lnkctl & PCI_EXP_LNKCTL_RCB) 1452 + return true; 1453 + 1454 + return false; 1455 + } 1456 + 1442 1457 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) 1443 1458 { 1444 1459 int pos; ··· 1483 1468 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); 1484 1469 1485 1470 /* Initialize Link Control Register */ 1486 - if (pcie_cap_has_lnkctl(dev)) 1471 + if (pcie_cap_has_lnkctl(dev)) { 1472 + 1473 + /* 1474 + * If the Root Port supports Read Completion Boundary of 1475 + * 128, set RCB to 128. Otherwise, clear it. 1476 + */ 1477 + hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; 1478 + hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; 1479 + if (pcie_root_rcb_set(dev)) 1480 + hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; 1481 + 1487 1482 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 1488 1483 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); 1484 + } 1489 1485 1490 1486 /* Find Advanced Error Reporting Enhanced Capability */ 1491 1487 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+1
drivers/pwm/pwm-meson.c
··· 474 474 if (IS_ERR(meson->base)) 475 475 return PTR_ERR(meson->base); 476 476 477 + spin_lock_init(&meson->lock); 477 478 meson->chip.dev = &pdev->dev; 478 479 meson->chip.ops = &meson_pwm_ops; 479 480 meson->chip.base = -1;
+2
drivers/pwm/sysfs.c
··· 425 425 if (test_bit(PWMF_EXPORTED, &pwm->flags)) 426 426 pwm_unexport_child(parent, pwm); 427 427 } 428 + 429 + put_device(parent); 428 430 } 429 431 430 432 static int __init pwm_sysfs_init(void)
+1 -1
drivers/scsi/be2iscsi/be_mgmt.c
··· 1083 1083 nonemb_cmd = &phba->boot_struct.nonemb_cmd; 1084 1084 nonemb_cmd->size = sizeof(*resp); 1085 1085 nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, 1086 - sizeof(nonemb_cmd->size), 1086 + nonemb_cmd->size, 1087 1087 &nonemb_cmd->dma); 1088 1088 if (!nonemb_cmd->va) { 1089 1089 mutex_unlock(&ctrl->mbox_lock);
+11 -5
drivers/scsi/hpsa.c
··· 2009 2009 2010 2010 static int hpsa_slave_alloc(struct scsi_device *sdev) 2011 2011 { 2012 - struct hpsa_scsi_dev_t *sd; 2012 + struct hpsa_scsi_dev_t *sd = NULL; 2013 2013 unsigned long flags; 2014 2014 struct ctlr_info *h; 2015 2015 ··· 2026 2026 sd->target = sdev_id(sdev); 2027 2027 sd->lun = sdev->lun; 2028 2028 } 2029 - } else 2029 + } 2030 + if (!sd) 2030 2031 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 2031 2032 sdev_id(sdev), sdev->lun); 2032 2033 ··· 3841 3840 sizeof(this_device->vendor)); 3842 3841 memcpy(this_device->model, &inq_buff[16], 3843 3842 sizeof(this_device->model)); 3843 + this_device->rev = inq_buff[2]; 3844 3844 memset(this_device->device_id, 0, 3845 3845 sizeof(this_device->device_id)); 3846 3846 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, ··· 3931 3929 3932 3930 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 3933 3931 /* physical device, target and lun filled in later */ 3934 - if (is_hba_lunid(lunaddrbytes)) 3932 + if (is_hba_lunid(lunaddrbytes)) { 3933 + int bus = HPSA_HBA_BUS; 3934 + 3935 + if (!device->rev) 3936 + bus = HPSA_LEGACY_HBA_BUS; 3935 3937 hpsa_set_bus_target_lun(device, 3936 - HPSA_HBA_BUS, 0, lunid & 0x3fff); 3937 - else 3938 + bus, 0, lunid & 0x3fff); 3939 + } else 3938 3940 /* defer target, lun assignment for physical devices */ 3939 3941 hpsa_set_bus_target_lun(device, 3940 3942 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
+2
drivers/scsi/hpsa.h
··· 69 69 u64 sas_address; 70 70 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 71 71 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 72 + unsigned char rev; /* byte 2 of inquiry data */ 72 73 unsigned char raid_level; /* from inquiry page 0xC1 */ 73 74 unsigned char volume_offline; /* discovered via TUR or VPD */ 74 75 u16 queue_depth; /* max queue_depth for this device */ ··· 403 402 #define HPSA_RAID_VOLUME_BUS 1 404 403 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2 405 404 #define HPSA_HBA_BUS 0 405 + #define HPSA_LEGACY_HBA_BUS 3 406 406 407 407 /* 408 408 Send the command to the hardware
+1 -1
drivers/scsi/libfc/fc_lport.c
··· 308 308 fc_stats = &lport->host_stats; 309 309 memset(fc_stats, 0, sizeof(struct fc_host_statistics)); 310 310 311 - fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ; 311 + fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ; 312 312 313 313 for_each_possible_cpu(cpu) { 314 314 struct fc_stats *stats;
+8 -5
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 3885 3885 } 3886 3886 } 3887 3887 3888 + static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) 3889 + { 3890 + return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); 3891 + } 3892 + 3888 3893 /** 3889 3894 * _scsih_flush_running_cmds - completing outstanding commands. 3890 3895 * @ioc: per adapter object ··· 3911 3906 if (!scmd) 3912 3907 continue; 3913 3908 count++; 3909 + if (ata_12_16_cmd(scmd)) 3910 + scsi_internal_device_unblock(scmd->device, 3911 + SDEV_RUNNING); 3914 3912 mpt3sas_base_free_smid(ioc, smid); 3915 3913 scsi_dma_unmap(scmd); 3916 3914 if (ioc->pci_error_recovery) ··· 4016 4008 ascq); 4017 4009 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | 4018 4010 SAM_STAT_CHECK_CONDITION; 4019 - } 4020 - 4021 - static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) 4022 - { 4023 - return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); 4024 4011 } 4025 4012 4026 4013 /**
+3 -1
drivers/scsi/mvsas/mv_sas.c
··· 791 791 slot->slot_tag = tag; 792 792 793 793 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); 794 - if (!slot->buf) 794 + if (!slot->buf) { 795 + rc = -ENOMEM; 795 796 goto err_out_tag; 797 + } 796 798 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 797 799 798 800 tei.task = task;
+2 -2
drivers/scsi/qlogicpti.h
··· 356 356 357 357 /* The rest of the elements are unimportant for performance. */ 358 358 struct qlogicpti *next; 359 - __u32 res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ 360 - __u32 req_dvma; /* Ptr to REQUEST bufs (DVMA) */ 359 + dma_addr_t res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ 360 + dma_addr_t req_dvma; /* Ptr to REQUEST bufs (DVMA) */ 361 361 u_char fware_majrev, fware_minrev, fware_micrev; 362 362 struct Scsi_Host *qhost; 363 363 int qpti_id;
+8 -3
fs/cifs/cifsencrypt.c
··· 808 808 struct crypto_skcipher *tfm_arc4; 809 809 struct scatterlist sgin, sgout; 810 810 struct skcipher_request *req; 811 - unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ 811 + unsigned char *sec_key; 812 + 813 + sec_key = kmalloc(CIFS_SESS_KEY_SIZE, GFP_KERNEL); 814 + if (sec_key == NULL) 815 + return -ENOMEM; 812 816 813 817 get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); 814 818 ··· 820 816 if (IS_ERR(tfm_arc4)) { 821 817 rc = PTR_ERR(tfm_arc4); 822 818 cifs_dbg(VFS, "could not allocate crypto API arc4\n"); 823 - return rc; 819 + goto out; 824 820 } 825 821 826 822 rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response, ··· 858 854 859 855 out_free_cipher: 860 856 crypto_free_skcipher(tfm_arc4); 861 - 857 + out: 858 + kfree(sec_key); 862 859 return rc; 863 860 } 864 861
+2 -2
fs/cifs/cifssmb.c
··· 3427 3427 __u16 rc = 0; 3428 3428 struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data; 3429 3429 struct posix_acl_xattr_header *local_acl = (void *)pACL; 3430 + struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1); 3430 3431 int count; 3431 3432 int i; 3432 3433 ··· 3454 3453 return 0; 3455 3454 } 3456 3455 for (i = 0; i < count; i++) { 3457 - rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], 3458 - (struct posix_acl_xattr_entry *)(local_acl + 1)); 3456 + rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]); 3459 3457 if (rc != 0) { 3460 3458 /* ACE not converted */ 3461 3459 break;
+18 -7
fs/cifs/connect.c
··· 412 412 } 413 413 } while (server->tcpStatus == CifsNeedReconnect); 414 414 415 + if (server->tcpStatus == CifsNeedNegotiate) 416 + mod_delayed_work(cifsiod_wq, &server->echo, 0); 417 + 415 418 return rc; 416 419 } 417 420 ··· 424 421 int rc; 425 422 struct TCP_Server_Info *server = container_of(work, 426 423 struct TCP_Server_Info, echo.work); 427 - unsigned long echo_interval = server->echo_interval; 424 + unsigned long echo_interval; 428 425 429 426 /* 430 - * We cannot send an echo if it is disabled or until the 431 - * NEGOTIATE_PROTOCOL request is done, which is indicated by 432 - * server->ops->need_neg() == true. Also, no need to ping if 433 - * we got a response recently. 427 + * If we need to renegotiate, set echo interval to zero to 428 + * immediately call echo service where we can renegotiate. 429 + */ 430 + if (server->tcpStatus == CifsNeedNegotiate) 431 + echo_interval = 0; 432 + else 433 + echo_interval = server->echo_interval; 434 + 435 + /* 436 + * We cannot send an echo if it is disabled. 437 + * Also, no need to ping if we got a response recently. 434 438 */ 435 439 436 440 if (server->tcpStatus == CifsNeedReconnect || 437 - server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || 441 + server->tcpStatus == CifsExiting || 442 + server->tcpStatus == CifsNew || 438 443 (server->ops->can_echo && !server->ops->can_echo(server)) || 439 444 time_before(jiffies, server->lstrp + echo_interval - HZ)) 440 445 goto requeue_echo; ··· 453 442 server->hostname); 454 443 455 444 requeue_echo: 456 - queue_delayed_work(cifsiod_wq, &server->echo, echo_interval); 445 + queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval); 457 446 } 458 447 459 448 static bool
+2 -2
fs/isofs/rock.c
··· 377 377 { 378 378 int p; 379 379 for (p = 0; p < rr->u.ER.len_id; p++) 380 - printk("%c", rr->u.ER.data[p]); 380 + printk(KERN_CONT "%c", rr->u.ER.data[p]); 381 381 } 382 - printk("\n"); 382 + printk(KERN_CONT "\n"); 383 383 break; 384 384 case SIG('P', 'X'): 385 385 inode->i_mode = isonum_733(rr->u.PX.mode);
+3 -3
fs/overlayfs/super.c
··· 328 328 if (!real) 329 329 goto bug; 330 330 331 + /* Handle recursion */ 332 + real = d_real(real, inode, open_flags); 333 + 331 334 if (!inode || inode == d_inode(real)) 332 335 return real; 333 - 334 - /* Handle recursion */ 335 - return d_real(real, inode, open_flags); 336 336 bug: 337 337 WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry, 338 338 inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+1
include/asm-generic/export.h
··· 54 54 KSYM(__kcrctab_\name): 55 55 __put KSYM(__crc_\name) 56 56 .weak KSYM(__crc_\name) 57 + .set KSYM(__crc_\name), 0 57 58 .previous 58 59 #endif 59 60 #endif
+3 -1
include/linux/compiler-gcc.h
··· 263 263 #endif 264 264 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ 265 265 266 - #if GCC_VERSION >= 50000 266 + #if GCC_VERSION >= 70000 267 + #define KASAN_ABI_VERSION 5 268 + #elif GCC_VERSION >= 50000 267 269 #define KASAN_ABI_VERSION 4 268 270 #elif GCC_VERSION >= 40902 269 271 #define KASAN_ABI_VERSION 3
+1
include/linux/intel-iommu.h
··· 429 429 struct page_req_dsc *prq; 430 430 unsigned char prq_name[16]; /* Name for PRQ interrupt */ 431 431 struct idr pasid_idr; 432 + u32 pasid_max; 432 433 #endif 433 434 struct q_inval *qi; /* Queued invalidation info */ 434 435 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-1
include/linux/mlx4/device.h
··· 476 476 enum { 477 477 MLX4_INTERFACE_STATE_UP = 1 << 0, 478 478 MLX4_INTERFACE_STATE_DELETION = 1 << 1, 479 - MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2, 480 479 }; 481 480 482 481 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
+4
include/linux/of_mdio.h
··· 29 29 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 30 30 extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); 31 31 extern int of_phy_register_fixed_link(struct device_node *np); 32 + extern void of_phy_deregister_fixed_link(struct device_node *np); 32 33 extern bool of_phy_is_fixed_link(struct device_node *np); 33 34 34 35 #else /* CONFIG_OF */ ··· 83 82 static inline int of_phy_register_fixed_link(struct device_node *np) 84 83 { 85 84 return -ENOSYS; 85 + } 86 + static inline void of_phy_deregister_fixed_link(struct device_node *np) 87 + { 86 88 } 87 89 static inline bool of_phy_is_fixed_link(struct device_node *np) 88 90 {
+15 -6
include/linux/pagemap.h
··· 374 374 } 375 375 376 376 /* 377 - * Get the offset in PAGE_SIZE. 378 - * (TODO: hugepage should have ->index in PAGE_SIZE) 377 + * Get index of the page with in radix-tree 378 + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) 379 379 */ 380 - static inline pgoff_t page_to_pgoff(struct page *page) 380 + static inline pgoff_t page_to_index(struct page *page) 381 381 { 382 382 pgoff_t pgoff; 383 - 384 - if (unlikely(PageHeadHuge(page))) 385 - return page->index << compound_order(page); 386 383 387 384 if (likely(!PageTransTail(page))) 388 385 return page->index; ··· 391 394 pgoff = compound_head(page)->index; 392 395 pgoff += page - compound_head(page); 393 396 return pgoff; 397 + } 398 + 399 + /* 400 + * Get the offset in PAGE_SIZE. 401 + * (TODO: hugepage should have ->index in PAGE_SIZE) 402 + */ 403 + static inline pgoff_t page_to_pgoff(struct page *page) 404 + { 405 + if (unlikely(PageHeadHuge(page))) 406 + return page->index << compound_order(page); 407 + 408 + return page_to_index(page); 394 409 } 395 410 396 411 /*
+14
include/linux/pci.h
··· 1928 1928 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; 1929 1929 } 1930 1930 1931 + static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) 1932 + { 1933 + while (1) { 1934 + if (!pci_is_pcie(dev)) 1935 + break; 1936 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 1937 + return dev; 1938 + if (!dev->bus->self) 1939 + break; 1940 + dev = dev->bus->self; 1941 + } 1942 + return NULL; 1943 + } 1944 + 1931 1945 void pci_request_acs(void); 1932 1946 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 1933 1947 bool pci_acs_path_enabled(struct pci_dev *start,
+2
include/net/ipv6.h
··· 971 971 int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, 972 972 char __user *optval, int __user *optlen); 973 973 974 + int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, 975 + int addr_len); 974 976 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 975 977 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, 976 978 int addr_len);
+3 -3
include/net/netfilter/nf_conntrack.h
··· 100 100 101 101 possible_net_t ct_net; 102 102 103 + #if IS_ENABLED(CONFIG_NF_NAT) 104 + struct rhlist_head nat_bysource; 105 + #endif 103 106 /* all members below initialized via memset */ 104 107 u8 __nfct_init_offset[0]; 105 108 ··· 120 117 /* Extensions */ 121 118 struct nf_ct_ext *ext; 122 119 123 - #if IS_ENABLED(CONFIG_NF_NAT) 124 - struct rhash_head nat_bysource; 125 - #endif 126 120 /* Storage reserved for other modules, must be the last member */ 127 121 union nf_conntrack_proto proto; 128 122 };
+1 -1
include/net/netfilter/nf_tables.h
··· 329 329 * @size: maximum set size 330 330 * @nelems: number of elements 331 331 * @ndeact: number of deactivated elements queued for removal 332 - * @timeout: default timeout value in msecs 332 + * @timeout: default timeout value in jiffies 333 333 * @gc_int: garbage collection interval in msecs 334 334 * @policy: set parameterization (see enum nft_set_policies) 335 335 * @udlen: user data length
+1 -1
include/uapi/linux/input-event-codes.h
··· 640 640 * Control a data application associated with the currently viewed channel, 641 641 * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.) 642 642 */ 643 - #define KEY_DATA 0x275 643 + #define KEY_DATA 0x277 644 644 645 645 #define BTN_TRIGGER_HAPPY 0x2c0 646 646 #define BTN_TRIGGER_HAPPY1 0x2c0
+1
include/uapi/linux/tc_act/Kbuild
··· 11 11 header-y += tc_bpf.h 12 12 header-y += tc_connmark.h 13 13 header-y += tc_ife.h 14 + header-y += tc_tunnel_key.h
-1
init/Kconfig
··· 1957 1957 1958 1958 config MODVERSIONS 1959 1959 bool "Module versioning support" 1960 - depends on BROKEN 1961 1960 help 1962 1961 Usually, you have to use modules compiled with your kernel. 1963 1962 Saying Y here makes it sometimes possible to use modules
+8 -2
kernel/bpf/verifier.c
··· 2508 2508 struct bpf_verifier_state *old, 2509 2509 struct bpf_verifier_state *cur) 2510 2510 { 2511 + bool varlen_map_access = env->varlen_map_value_access; 2511 2512 struct bpf_reg_state *rold, *rcur; 2512 2513 int i; 2513 2514 ··· 2522 2521 /* If the ranges were not the same, but everything else was and 2523 2522 * we didn't do a variable access into a map then we are a-ok. 2524 2523 */ 2525 - if (!env->varlen_map_value_access && 2524 + if (!varlen_map_access && 2526 2525 rold->type == rcur->type && rold->imm == rcur->imm) 2527 2526 continue; 2528 2527 2528 + /* If we didn't map access then again we don't care about the 2529 + * mismatched range values and it's ok if our old type was 2530 + * UNKNOWN and we didn't go to a NOT_INIT'ed reg. 2531 + */ 2529 2532 if (rold->type == NOT_INIT || 2530 - (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) 2533 + (!varlen_map_access && rold->type == UNKNOWN_VALUE && 2534 + rcur->type != NOT_INIT)) 2531 2535 continue; 2532 2536 2533 2537 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
+3 -2
kernel/module.c
··· 1301 1301 goto bad_version; 1302 1302 } 1303 1303 1304 - pr_warn("%s: no symbol version for %s\n", mod->name, symname); 1305 - return 0; 1304 + /* Broken toolchain. Warn once, then let it go.. */ 1305 + pr_warn_once("%s: no symbol version for %s\n", mod->name, symname); 1306 + return 1; 1306 1307 1307 1308 bad_version: 1308 1309 pr_warn("%s: disagrees about version of symbol %s\n",
+8
lib/debugobjects.c
··· 362 362 363 363 __debug_object_init(addr, descr, 0); 364 364 } 365 + EXPORT_SYMBOL_GPL(debug_object_init); 365 366 366 367 /** 367 368 * debug_object_init_on_stack - debug checks when an object on stack is ··· 377 376 378 377 __debug_object_init(addr, descr, 1); 379 378 } 379 + EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 380 380 381 381 /** 382 382 * debug_object_activate - debug checks when an object is activated ··· 451 449 } 452 450 return 0; 453 451 } 452 + EXPORT_SYMBOL_GPL(debug_object_activate); 454 453 455 454 /** 456 455 * debug_object_deactivate - debug checks when an object is deactivated ··· 499 496 500 497 raw_spin_unlock_irqrestore(&db->lock, flags); 501 498 } 499 + EXPORT_SYMBOL_GPL(debug_object_deactivate); 502 500 503 501 /** 504 502 * debug_object_destroy - debug checks when an object is destroyed ··· 546 542 out_unlock: 547 543 raw_spin_unlock_irqrestore(&db->lock, flags); 548 544 } 545 + EXPORT_SYMBOL_GPL(debug_object_destroy); 549 546 550 547 /** 551 548 * debug_object_free - debug checks when an object is freed ··· 587 582 out_unlock: 588 583 raw_spin_unlock_irqrestore(&db->lock, flags); 589 584 } 585 + EXPORT_SYMBOL_GPL(debug_object_free); 590 586 591 587 /** 592 588 * debug_object_assert_init - debug checks when object should be init-ed ··· 632 626 633 627 raw_spin_unlock_irqrestore(&db->lock, flags); 634 628 } 629 + EXPORT_SYMBOL_GPL(debug_object_assert_init); 635 630 636 631 /** 637 632 * debug_object_active_state - debug checks object usage state machine ··· 680 673 681 674 raw_spin_unlock_irqrestore(&db->lock, flags); 682 675 } 676 + EXPORT_SYMBOL_GPL(debug_object_active_state); 683 677 684 678 #ifdef CONFIG_DEBUG_OBJECTS_FREE 685 679 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+29
lib/test_kasan.c
··· 20 20 #include <linux/uaccess.h> 21 21 #include <linux/module.h> 22 22 23 + /* 24 + * Note: test functions are marked noinline so that their names appear in 25 + * reports. 26 + */ 27 + 23 28 static noinline void __init kmalloc_oob_right(void) 24 29 { 25 30 char *ptr; ··· 416 411 kfree(kmem); 417 412 } 418 413 414 + static noinline void __init use_after_scope_test(void) 415 + { 416 + volatile char *volatile p; 417 + 418 + pr_info("use-after-scope on int\n"); 419 + { 420 + int local = 0; 421 + 422 + p = (char *)&local; 423 + } 424 + p[0] = 1; 425 + p[3] = 1; 426 + 427 + pr_info("use-after-scope on array\n"); 428 + { 429 + char local[1024] = {0}; 430 + 431 + p = local; 432 + } 433 + p[0] = 1; 434 + p[1023] = 1; 435 + } 436 + 419 437 static int __init kmalloc_tests_init(void) 420 438 { 421 439 kmalloc_oob_right(); ··· 464 436 kasan_global_oob(); 465 437 ksize_unpoisons_memory(); 466 438 copy_user_test(); 439 + use_after_scope_test(); 467 440 return -EAGAIN; 468 441 } 469 442
+2 -2
mm/huge_memory.c
··· 1456 1456 new_ptl = pmd_lockptr(mm, new_pmd); 1457 1457 if (new_ptl != old_ptl) 1458 1458 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1459 - if (pmd_present(*old_pmd) && pmd_dirty(*old_pmd)) 1460 - force_flush = true; 1461 1459 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1460 + if (pmd_present(pmd) && pmd_dirty(pmd)) 1461 + force_flush = true; 1462 1462 VM_BUG_ON(!pmd_none(*new_pmd)); 1463 1463 1464 1464 if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
+19
mm/kasan/kasan.c
··· 764 764 void __asan_handle_no_return(void) {} 765 765 EXPORT_SYMBOL(__asan_handle_no_return); 766 766 767 + /* Emitted by compiler to poison large objects when they go out of scope. */ 768 + void __asan_poison_stack_memory(const void *addr, size_t size) 769 + { 770 + /* 771 + * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded 772 + * by redzones, so we simply round up size to simplify logic. 773 + */ 774 + kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), 775 + KASAN_USE_AFTER_SCOPE); 776 + } 777 + EXPORT_SYMBOL(__asan_poison_stack_memory); 778 + 779 + /* Emitted by compiler to unpoison large objects when they go into scope. */ 780 + void __asan_unpoison_stack_memory(const void *addr, size_t size) 781 + { 782 + kasan_unpoison_shadow(addr, size); 783 + } 784 + EXPORT_SYMBOL(__asan_unpoison_stack_memory); 785 + 767 786 #ifdef CONFIG_MEMORY_HOTPLUG 768 787 static int kasan_mem_notifier(struct notifier_block *nb, 769 788 unsigned long action, void *data)
+4
mm/kasan/kasan.h
··· 21 21 #define KASAN_STACK_MID 0xF2 22 22 #define KASAN_STACK_RIGHT 0xF3 23 23 #define KASAN_STACK_PARTIAL 0xF4 24 + #define KASAN_USE_AFTER_SCOPE 0xF8 24 25 25 26 /* Don't break randconfig/all*config builds */ 26 27 #ifndef KASAN_ABI_VERSION ··· 53 52 unsigned long has_dynamic_init; /* This needed for C++ */ 54 53 #if KASAN_ABI_VERSION >= 4 55 54 struct kasan_source_location *location; 55 + #endif 56 + #if KASAN_ABI_VERSION >= 5 57 + char *odr_indicator; 56 58 #endif 57 59 }; 58 60
+3
mm/kasan/report.c
··· 90 90 case KASAN_KMALLOC_FREE: 91 91 bug_type = "use-after-free"; 92 92 break; 93 + case KASAN_USE_AFTER_SCOPE: 94 + bug_type = "use-after-scope"; 95 + break; 93 96 } 94 97 95 98 pr_err("BUG: KASAN: %s in %pS at addr %p\n",
+2
mm/khugepaged.c
··· 103 103 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 104 104 }; 105 105 106 + #ifdef CONFIG_SYSFS 106 107 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 107 108 struct kobj_attribute *attr, 108 109 char *buf) ··· 296 295 .attrs = khugepaged_attr, 297 296 .name = "khugepaged", 298 297 }; 298 + #endif /* CONFIG_SYSFS */ 299 299 300 300 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 301 301
+5 -2
mm/mlock.c
··· 190 190 */ 191 191 spin_lock_irq(zone_lru_lock(zone)); 192 192 193 - nr_pages = hpage_nr_pages(page); 194 - if (!TestClearPageMlocked(page)) 193 + if (!TestClearPageMlocked(page)) { 194 + /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ 195 + nr_pages = 1; 195 196 goto unlock_out; 197 + } 196 198 199 + nr_pages = hpage_nr_pages(page); 197 200 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); 198 201 199 202 if (__munlock_isolate_lru_page(page, true)) {
+11 -7
mm/mremap.c
··· 149 149 if (pte_none(*old_pte)) 150 150 continue; 151 151 152 - /* 153 - * We are remapping a dirty PTE, make sure to 154 - * flush TLB before we drop the PTL for the 155 - * old PTE or we may race with page_mkclean(). 156 - */ 157 - if (pte_present(*old_pte) && pte_dirty(*old_pte)) 158 - force_flush = true; 159 152 pte = ptep_get_and_clear(mm, old_addr, old_pte); 153 + /* 154 + * If we are remapping a dirty PTE, make sure 155 + * to flush TLB before we drop the PTL for the 156 + * old PTE or we may race with page_mkclean(). 157 + * 158 + * This check has to be done after we removed the 159 + * old PTE from page tables or another thread may 160 + * dirty it after the check and before the removal. 161 + */ 162 + if (pte_present(pte) && pte_dirty(pte)) 163 + force_flush = true; 160 164 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 161 165 pte = move_soft_dirty_pte(pte); 162 166 set_pte_at(mm, new_addr, new_pte, pte);
+4 -4
mm/truncate.c
··· 283 283 284 284 if (!trylock_page(page)) 285 285 continue; 286 - WARN_ON(page_to_pgoff(page) != index); 286 + WARN_ON(page_to_index(page) != index); 287 287 if (PageWriteback(page)) { 288 288 unlock_page(page); 289 289 continue; ··· 371 371 } 372 372 373 373 lock_page(page); 374 - WARN_ON(page_to_pgoff(page) != index); 374 + WARN_ON(page_to_index(page) != index); 375 375 wait_on_page_writeback(page); 376 376 truncate_inode_page(mapping, page); 377 377 unlock_page(page); ··· 492 492 if (!trylock_page(page)) 493 493 continue; 494 494 495 - WARN_ON(page_to_pgoff(page) != index); 495 + WARN_ON(page_to_index(page) != index); 496 496 497 497 /* Middle of THP: skip */ 498 498 if (PageTransTail(page)) { ··· 612 612 } 613 613 614 614 lock_page(page); 615 - WARN_ON(page_to_pgoff(page) != index); 615 + WARN_ON(page_to_index(page) != index); 616 616 if (page->mapping != mapping) { 617 617 unlock_page(page); 618 618 continue;
+2 -4
net/core/flow.c
··· 95 95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { 96 96 flow_entry_kill(fce, xfrm); 97 97 atomic_dec(&xfrm->flow_cache_gc_count); 98 - WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0); 99 98 } 100 99 } 101 100 ··· 235 236 if (fcp->hash_count > fc->high_watermark) 236 237 flow_cache_shrink(fc, fcp); 237 238 238 - if (fcp->hash_count > 2 * fc->high_watermark || 239 - atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) { 240 - atomic_inc(&net->xfrm.flow_cache_genid); 239 + if (atomic_read(&net->xfrm.flow_cache_gc_count) > 240 + 2 * num_online_cpus() * fc->high_watermark) { 241 241 flo = ERR_PTR(-ENOBUFS); 242 242 goto ret_object; 243 243 }
+2 -2
net/core/rtnetlink.c
··· 931 931 + nla_total_size(4) /* IFLA_PROMISCUITY */ 932 932 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 933 933 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 934 - + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */ 935 - + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */ 934 + + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 935 + + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 936 936 + nla_total_size(1) /* IFLA_OPERSTATE */ 937 937 + nla_total_size(1) /* IFLA_LINKMODE */ 938 938 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
+2 -2
net/core/sock.c
··· 715 715 val = min_t(u32, val, sysctl_wmem_max); 716 716 set_sndbuf: 717 717 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 718 - sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 718 + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 719 719 /* Wake up sending tasks if we upped the value. */ 720 720 sk->sk_write_space(sk); 721 721 break; ··· 751 751 * returning the value we actually used in getsockopt 752 752 * is the most desirable behavior. 753 753 */ 754 - sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 754 + sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); 755 755 break; 756 756 757 757 case SO_RCVBUFFORCE:
+7 -5
net/dccp/ipv4.c
··· 694 694 { 695 695 const struct dccp_hdr *dh; 696 696 unsigned int cscov; 697 + u8 dccph_doff; 697 698 698 699 if (skb->pkt_type != PACKET_HOST) 699 700 return 1; ··· 716 715 /* 717 716 * If P.Data Offset is too small for packet type, drop packet and return 718 717 */ 719 - if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { 720 - DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); 718 + dccph_doff = dh->dccph_doff; 719 + if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { 720 + DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff); 721 721 return 1; 722 722 } 723 723 /* 724 724 * If P.Data Offset is too too large for packet, drop packet and return 725 725 */ 726 - if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { 727 - DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); 726 + if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) { 727 + DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff); 728 728 return 1; 729 729 } 730 - 730 + dh = dccp_hdr(skb); 731 731 /* 732 732 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet 733 733 * has short sequence numbers), drop packet and return
+4 -9
net/dsa/dsa.c
··· 233 233 genphy_read_status(phydev); 234 234 if (ds->ops->adjust_link) 235 235 ds->ops->adjust_link(ds, port, phydev); 236 + 237 + put_device(&phydev->mdio.dev); 236 238 } 237 239 238 240 return 0; ··· 506 504 507 505 void dsa_cpu_dsa_destroy(struct device_node *port_dn) 508 506 { 509 - struct phy_device *phydev; 510 - 511 - if (of_phy_is_fixed_link(port_dn)) { 512 - phydev = of_phy_find_device(port_dn); 513 - if (phydev) { 514 - phy_device_free(phydev); 515 - fixed_phy_unregister(phydev); 516 - } 517 - } 507 + if (of_phy_is_fixed_link(port_dn)) 508 + of_phy_deregister_fixed_link(port_dn); 518 509 } 519 510 520 511 static void dsa_switch_destroy(struct dsa_switch *ds)
+3 -1
net/dsa/dsa2.c
··· 28 28 struct dsa_switch_tree *dst; 29 29 30 30 list_for_each_entry(dst, &dsa_switch_trees, list) 31 - if (dst->tree == tree) 31 + if (dst->tree == tree) { 32 + kref_get(&dst->refcount); 32 33 return dst; 34 + } 33 35 return NULL; 34 36 } 35 37
+16 -3
net/dsa/slave.c
··· 1127 1127 p->phy_interface = mode; 1128 1128 1129 1129 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); 1130 - if (of_phy_is_fixed_link(port_dn)) { 1130 + if (!phy_dn && of_phy_is_fixed_link(port_dn)) { 1131 1131 /* In the case of a fixed PHY, the DT node associated 1132 1132 * to the fixed PHY is the Port DT node 1133 1133 */ ··· 1137 1137 return ret; 1138 1138 } 1139 1139 phy_is_fixed = true; 1140 - phy_dn = port_dn; 1140 + phy_dn = of_node_get(port_dn); 1141 1141 } 1142 1142 1143 1143 if (ds->ops->get_phy_flags) ··· 1156 1156 ret = dsa_slave_phy_connect(p, slave_dev, phy_id); 1157 1157 if (ret) { 1158 1158 netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret); 1159 + of_node_put(phy_dn); 1159 1160 return ret; 1160 1161 } 1161 1162 } else { ··· 1165 1164 phy_flags, 1166 1165 p->phy_interface); 1167 1166 } 1167 + 1168 + of_node_put(phy_dn); 1168 1169 } 1169 1170 1170 1171 if (p->phy && phy_is_fixed) ··· 1179 1176 ret = dsa_slave_phy_connect(p, slave_dev, p->port); 1180 1177 if (ret) { 1181 1178 netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret); 1179 + if (phy_is_fixed) 1180 + of_phy_deregister_fixed_link(port_dn); 1182 1181 return ret; 1183 1182 } 1184 1183 } ··· 1298 1293 void dsa_slave_destroy(struct net_device *slave_dev) 1299 1294 { 1300 1295 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1296 + struct dsa_switch *ds = p->parent; 1297 + struct device_node *port_dn; 1298 + 1299 + port_dn = ds->ports[p->port].dn; 1301 1300 1302 1301 netif_carrier_off(slave_dev); 1303 - if (p->phy) 1302 + if (p->phy) { 1304 1303 phy_disconnect(p->phy); 1304 + 1305 + if (of_phy_is_fixed_link(port_dn)) 1306 + of_phy_deregister_fixed_link(port_dn); 1307 + } 1305 1308 unregister_netdev(slave_dev); 1306 1309 free_netdev(slave_dev); 1307 1310 }
+1
net/ipv4/Kconfig
··· 723 723 default "reno" if DEFAULT_RENO 724 724 default "dctcp" if DEFAULT_DCTCP 725 725 default "cdg" if DEFAULT_CDG 726 + default "bbr" if DEFAULT_BBR 726 727 default "cubic" 727 728 728 729 config TCP_MD5SIG
+1 -1
net/ipv4/af_inet.c
··· 1243 1243 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); 1244 1244 1245 1245 /* fixed ID is invalid if DF bit is not set */ 1246 - if (fixedid && !(iph->frag_off & htons(IP_DF))) 1246 + if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF))) 1247 1247 goto out; 1248 1248 } 1249 1249
+1 -1
net/ipv4/esp4.c
··· 476 476 esph = (void *)skb_push(skb, 4); 477 477 *seqhi = esph->spi; 478 478 esph->spi = esph->seq_no; 479 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); 479 + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; 480 480 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 481 481 } 482 482
+2
net/ipv4/ip_output.c
··· 108 108 if (unlikely(!skb)) 109 109 return 0; 110 110 111 + skb->protocol = htons(ETH_P_IP); 112 + 111 113 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 112 114 net, sk, skb, NULL, skb_dst(skb)->dev, 113 115 dst_output);
+4 -1
net/ipv4/netfilter.c
··· 24 24 struct flowi4 fl4 = {}; 25 25 __be32 saddr = iph->saddr; 26 26 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 27 + struct net_device *dev = skb_dst(skb)->dev; 27 28 unsigned int hh_len; 28 29 29 30 if (addr_type == RTN_UNSPEC) 30 - addr_type = inet_addr_type(net, saddr); 31 + addr_type = inet_addr_type_dev_table(net, dev, saddr); 31 32 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) 32 33 flags |= FLOWI_FLAG_ANYSRC; 33 34 else ··· 41 40 fl4.saddr = saddr; 42 41 fl4.flowi4_tos = RT_TOS(iph->tos); 43 42 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 43 + if (!fl4.flowi4_oif) 44 + fl4.flowi4_oif = l3mdev_master_ifindex(dev); 44 45 fl4.flowi4_mark = skb->mark; 45 46 fl4.flowi4_flags = flags; 46 47 rt = ip_route_output_key(net, &fl4);
+2 -2
net/ipv4/netfilter/arp_tables.c
··· 1197 1197 1198 1198 newinfo->number = compatr->num_entries; 1199 1199 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1200 - newinfo->hook_entry[i] = info->hook_entry[i]; 1201 - newinfo->underflow[i] = info->underflow[i]; 1200 + newinfo->hook_entry[i] = compatr->hook_entry[i]; 1201 + newinfo->underflow[i] = compatr->underflow[i]; 1202 1202 } 1203 1203 entry1 = newinfo->entries; 1204 1204 pos = entry1;
+3 -1
net/ipv6/datagram.c
··· 140 140 } 141 141 EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); 142 142 143 - static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 143 + int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, 144 + int addr_len) 144 145 { 145 146 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 146 147 struct inet_sock *inet = inet_sk(sk); ··· 254 253 out: 255 254 return err; 256 255 } 256 + EXPORT_SYMBOL_GPL(__ip6_datagram_connect); 257 257 258 258 int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 259 259 {
+1 -1
net/ipv6/esp6.c
··· 418 418 esph = (void *)skb_push(skb, 4); 419 419 *seqhi = esph->spi; 420 420 esph->spi = esph->seq_no; 421 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); 421 + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; 422 422 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 423 423 } 424 424
+4 -2
net/ipv6/icmp.c
··· 448 448 449 449 if (__ipv6_addr_needs_scope_id(addr_type)) 450 450 iif = skb->dev->ifindex; 451 - else 452 - iif = l3mdev_master_ifindex(skb_dst(skb)->dev); 451 + else { 452 + dst = skb_dst(skb); 453 + iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev); 454 + } 453 455 454 456 /* 455 457 * Must not send error if the source does not uniquely
+1 -1
net/ipv6/ip6_offload.c
··· 99 99 segs = ops->callbacks.gso_segment(skb, features); 100 100 } 101 101 102 - if (IS_ERR(segs)) 102 + if (IS_ERR_OR_NULL(segs)) 103 103 goto out; 104 104 105 105 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
-1
net/ipv6/ip6_tunnel.c
··· 1181 1181 if (err) 1182 1182 return err; 1183 1183 1184 - skb->protocol = htons(ETH_P_IPV6); 1185 1184 skb_push(skb, sizeof(struct ipv6hdr)); 1186 1185 skb_reset_network_header(skb); 1187 1186 ipv6h = ipv6_hdr(skb);
+31
net/ipv6/ip6_vti.c
··· 1122 1122 .priority = 100, 1123 1123 }; 1124 1124 1125 + static bool is_vti6_tunnel(const struct net_device *dev) 1126 + { 1127 + return dev->netdev_ops == &vti6_netdev_ops; 1128 + } 1129 + 1130 + static int vti6_device_event(struct notifier_block *unused, 1131 + unsigned long event, void *ptr) 1132 + { 1133 + struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1134 + struct ip6_tnl *t = netdev_priv(dev); 1135 + 1136 + if (!is_vti6_tunnel(dev)) 1137 + return NOTIFY_DONE; 1138 + 1139 + switch (event) { 1140 + case NETDEV_DOWN: 1141 + if (!net_eq(t->net, dev_net(dev))) 1142 + xfrm_garbage_collect(t->net); 1143 + break; 1144 + } 1145 + return NOTIFY_DONE; 1146 + } 1147 + 1148 + static struct notifier_block vti6_notifier_block __read_mostly = { 1149 + .notifier_call = vti6_device_event, 1150 + }; 1151 + 1125 1152 /** 1126 1153 * vti6_tunnel_init - register protocol and reserve needed resources 1127 1154 * ··· 1158 1131 { 1159 1132 const char *msg; 1160 1133 int err; 1134 + 1135 + register_netdevice_notifier(&vti6_notifier_block); 1161 1136 1162 1137 msg = "tunnel device"; 1163 1138 err = register_pernet_device(&vti6_net_ops); ··· 1193 1164 xfrm_proto_esp_failed: 1194 1165 unregister_pernet_device(&vti6_net_ops); 1195 1166 pernet_dev_failed: 1167 + unregister_netdevice_notifier(&vti6_notifier_block); 1196 1168 pr_err("vti6 init: failed to register %s\n", msg); 1197 1169 return err; 1198 1170 } ··· 1208 1178 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1209 1179 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1210 1180 unregister_pernet_device(&vti6_net_ops); 1181 + unregister_netdevice_notifier(&vti6_notifier_block); 1211 1182 } 1212 1183 1213 1184 module_init(vti6_tunnel_init);
+2 -2
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 576 576 /* Jumbo payload inhibits frag. header */ 577 577 if (ipv6_hdr(skb)->payload_len == 0) { 578 578 pr_debug("payload len = 0\n"); 579 - return -EINVAL; 579 + return 0; 580 580 } 581 581 582 582 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) 583 - return -EINVAL; 583 + return 0; 584 584 585 585 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) 586 586 return -ENOMEM;
+1 -1
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
··· 69 69 if (err == -EINPROGRESS) 70 70 return NF_STOLEN; 71 71 72 - return NF_ACCEPT; 72 + return err == 0 ? NF_ACCEPT : NF_DROP; 73 73 } 74 74 75 75 static struct nf_hook_ops ipv6_defrag_ops[] = {
+1
net/ipv6/netfilter/nf_reject_ipv6.c
··· 156 156 fl6.daddr = oip6h->saddr; 157 157 fl6.fl6_sport = otcph->dest; 158 158 fl6.fl6_dport = otcph->source; 159 + fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); 159 160 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 160 161 dst = ip6_route_output(net, NULL, &fl6); 161 162 if (dst->error) {
+2
net/ipv6/output_core.c
··· 155 155 if (unlikely(!skb)) 156 156 return 0; 157 157 158 + skb->protocol = htons(ETH_P_IPV6); 159 + 158 160 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, 159 161 net, sk, skb, NULL, skb_dst(skb)->dev, 160 162 dst_output);
+35 -30
net/l2tp/l2tp_ip.c
··· 61 61 if ((l2tp->conn_id == tunnel_id) && 62 62 net_eq(sock_net(sk), net) && 63 63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && 64 - !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 64 + (!sk->sk_bound_dev_if || !dif || 65 + sk->sk_bound_dev_if == dif)) 65 66 goto found; 66 67 } 67 68 ··· 183 182 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 184 183 185 184 read_lock_bh(&l2tp_ip_lock); 186 - sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); 185 + sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb), 186 + tunnel_id); 187 + if (!sk) { 188 + read_unlock_bh(&l2tp_ip_lock); 189 + goto discard; 190 + } 191 + 192 + sock_hold(sk); 187 193 read_unlock_bh(&l2tp_ip_lock); 188 194 } 189 - 190 - if (sk == NULL) 191 - goto discard; 192 - 193 - sock_hold(sk); 194 195 195 196 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 196 197 goto discard_put; ··· 259 256 if (addr->l2tp_family != AF_INET) 260 257 return -EINVAL; 261 258 262 - ret = -EADDRINUSE; 263 - read_lock_bh(&l2tp_ip_lock); 264 - if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 265 - sk->sk_bound_dev_if, addr->l2tp_conn_id)) 266 - goto out_in_use; 267 - 268 - read_unlock_bh(&l2tp_ip_lock); 269 - 270 259 lock_sock(sk); 260 + 261 + ret = -EINVAL; 271 262 if (!sock_flag(sk, SOCK_ZAPPED)) 272 263 goto out; 273 264 ··· 278 281 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 279 282 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 280 283 inet->inet_saddr = 0; /* Use device */ 281 - sk_dst_reset(sk); 282 - 283 - l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; 284 284 285 285 write_lock_bh(&l2tp_ip_lock); 286 + if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 287 + sk->sk_bound_dev_if, addr->l2tp_conn_id)) { 288 + write_unlock_bh(&l2tp_ip_lock); 289 + ret = -EADDRINUSE; 290 + goto out; 291 + } 292 + 293 + sk_dst_reset(sk); 294 + l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; 295 + 286 296 sk_add_bind_node(sk, &l2tp_ip_bind_table); 287 297 sk_del_node_init(sk); 288 298 write_unlock_bh(&l2tp_ip_lock); 299 + 289 300 ret = 0; 290 301 sock_reset_flag(sk, SOCK_ZAPPED); 291 302 292 303 out: 293 304 release_sock(sk); 294 - 295 - return ret; 296 - 297 - out_in_use: 298 - read_unlock_bh(&l2tp_ip_lock); 299 305 300 306 return ret; 301 307 } ··· 308 308 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 309 309 int rc; 310 310 311 - if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ 312 - return -EINVAL; 313 - 314 311 if (addr_len < sizeof(*lsa)) 315 312 return -EINVAL; 316 313 317 314 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 318 315 return -EINVAL; 319 316 320 - rc = ip4_datagram_connect(sk, uaddr, addr_len); 321 - if (rc < 0) 322 - return rc; 323 - 324 317 lock_sock(sk); 318 + 319 + /* Must bind first - autobinding does not work */ 320 + if (sock_flag(sk, SOCK_ZAPPED)) { 321 + rc = -EINVAL; 322 + goto out_sk; 323 + } 324 + 325 + rc = __ip4_datagram_connect(sk, uaddr, addr_len); 326 + if (rc < 0) 327 + goto out_sk; 325 328 326 329 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 327 330 ··· 333 330 sk_add_bind_node(sk, &l2tp_ip_bind_table); 334 331 write_unlock_bh(&l2tp_ip_lock); 335 332 333 + out_sk: 336 334 release_sock(sk); 335 + 337 336 return rc; 338 337 } 339 338
+42 -37
net/l2tp/l2tp_ip6.c
··· 72 72 73 73 if ((l2tp->conn_id == tunnel_id) && 74 74 net_eq(sock_net(sk), net) && 75 - !(addr && ipv6_addr_equal(addr, laddr)) && 76 - !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 75 + (!addr || ipv6_addr_equal(addr, laddr)) && 76 + (!sk->sk_bound_dev_if || !dif || 77 + sk->sk_bound_dev_if == dif)) 77 78 goto found; 78 79 } 79 80 ··· 197 196 struct ipv6hdr *iph = ipv6_hdr(skb); 198 197 199 198 read_lock_bh(&l2tp_ip6_lock); 200 - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, 201 - 0, tunnel_id); 199 + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb), 200 + tunnel_id); 201 + if (!sk) { 202 + read_unlock_bh(&l2tp_ip6_lock); 203 + goto discard; 204 + } 205 + 206 + sock_hold(sk); 202 207 read_unlock_bh(&l2tp_ip6_lock); 203 208 } 204 - 205 - if (sk == NULL) 206 - goto discard; 207 - 208 - sock_hold(sk); 209 209 210 210 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 211 211 goto discard_put; ··· 268 266 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; 269 267 struct net *net = sock_net(sk); 270 268 __be32 v4addr = 0; 269 + int bound_dev_if; 271 270 int addr_type; 272 271 int err; 273 272 ··· 287 284 if (addr_type & IPV6_ADDR_MULTICAST) 288 285 return -EADDRNOTAVAIL; 289 286 290 - err = -EADDRINUSE; 291 - read_lock_bh(&l2tp_ip6_lock); 292 - if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, 293 - sk->sk_bound_dev_if, addr->l2tp_conn_id)) 294 - goto out_in_use; 295 - read_unlock_bh(&l2tp_ip6_lock); 296 - 297 287 lock_sock(sk); 298 288 299 289 err = -EINVAL; ··· 296 300 if (sk->sk_state != TCP_CLOSE) 297 301 goto out_unlock; 298 302 303 + bound_dev_if = sk->sk_bound_dev_if; 304 + 299 305 /* Check if the address belongs to the host. */ 300 306 rcu_read_lock(); 301 307 if (addr_type != IPV6_ADDR_ANY) { 302 308 struct net_device *dev = NULL; 303 309 304 310 if (addr_type & IPV6_ADDR_LINKLOCAL) { 305 - if (addr_len >= sizeof(struct sockaddr_in6) && 306 - addr->l2tp_scope_id) { 307 - /* Override any existing binding, if another 308 - * one is supplied by user. 309 - */ 310 - sk->sk_bound_dev_if = addr->l2tp_scope_id; 311 - } 311 + if (addr->l2tp_scope_id) 312 + bound_dev_if = addr->l2tp_scope_id; 312 313 313 314 /* Binding to link-local address requires an 314 - interface */ 315 - if (!sk->sk_bound_dev_if) 315 + * interface. 316 + */ 317 + if (!bound_dev_if) 316 318 goto out_unlock_rcu; 317 319 318 320 err = -ENODEV; 319 - dev = dev_get_by_index_rcu(sock_net(sk), 320 - sk->sk_bound_dev_if); 321 + dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if); 321 322 if (!dev) 322 323 goto out_unlock_rcu; 323 324 } ··· 329 336 } 330 337 rcu_read_unlock(); 331 338 332 - inet->inet_rcv_saddr = inet->inet_saddr = v4addr; 339 + write_lock_bh(&l2tp_ip6_lock); 340 + if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if, 341 + addr->l2tp_conn_id)) { 342 + write_unlock_bh(&l2tp_ip6_lock); 343 + err = -EADDRINUSE; 344 + goto out_unlock; 345 + } 346 + 347 + inet->inet_saddr = v4addr; 348 + inet->inet_rcv_saddr = v4addr; 349 + sk->sk_bound_dev_if = bound_dev_if; 333 350 sk->sk_v6_rcv_saddr = addr->l2tp_addr; 334 351 np->saddr = addr->l2tp_addr; 335 352 336 353 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; 337 354 338 - write_lock_bh(&l2tp_ip6_lock); 339 355 sk_add_bind_node(sk, &l2tp_ip6_bind_table); 340 356 sk_del_node_init(sk); 341 357 write_unlock_bh(&l2tp_ip6_lock); ··· 357 355 rcu_read_unlock(); 358 356 out_unlock: 359 357 release_sock(sk); 360 - return err; 361 358 362 - out_in_use: 363 - read_unlock_bh(&l2tp_ip6_lock); 364 359 return err; 365 360 } 366 361 ··· 369 370 struct in6_addr *daddr; 370 371 int addr_type; 371 372 int rc; 372 - 373 - if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ 374 - return -EINVAL; 375 373 376 374 if (addr_len < sizeof(*lsa)) 377 375 return -EINVAL; ··· 386 390 return -EINVAL; 387 391 } 388 392 389 - rc = ip6_datagram_connect(sk, uaddr, addr_len); 390 - 391 393 lock_sock(sk); 394 + 395 + /* Must bind first - autobinding does not work */ 396 + if (sock_flag(sk, SOCK_ZAPPED)) { 397 + rc = -EINVAL; 398 + goto out_sk; 399 + } 400 + 401 + rc = __ip6_datagram_connect(sk, uaddr, addr_len); 402 + if (rc < 0) 403 + goto out_sk; 392 404 393 405 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 394 406 ··· 405 401 sk_add_bind_node(sk, &l2tp_ip6_bind_table); 406 402 write_unlock_bh(&l2tp_ip6_lock); 407 403 404 + out_sk: 408 405 release_sock(sk); 409 406 410 407 return rc;
+30 -19
net/netfilter/nf_nat_core.c
··· 42 42 const struct nf_conntrack_zone *zone; 43 43 }; 44 44 45 - static struct rhashtable nf_nat_bysource_table; 45 + static struct rhltable nf_nat_bysource_table; 46 46 47 47 inline const struct nf_nat_l3proto * 48 48 __nf_nat_l3proto_find(u8 family) ··· 193 193 const struct nf_nat_conn_key *key = arg->key; 194 194 const struct nf_conn *ct = obj; 195 195 196 - return same_src(ct, key->tuple) && 197 - net_eq(nf_ct_net(ct), key->net) && 198 - nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL); 196 + if (!same_src(ct, key->tuple) || 197 + !net_eq(nf_ct_net(ct), key->net) || 198 + !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL)) 199 + return 1; 200 + 201 + return 0; 199 202 } 200 203 201 204 static struct rhashtable_params nf_nat_bysource_params = { ··· 207 204 .obj_cmpfn = nf_nat_bysource_cmp, 208 205 .nelem_hint = 256, 209 206 .min_size = 1024, 210 - .nulls_base = (1U << RHT_BASE_SHIFT), 211 207 }; 212 208 213 209 /* Only called for SRC manip */ ··· 225 223 .tuple = tuple, 226 224 .zone = zone 227 225 }; 226 + struct rhlist_head *hl; 228 227 229 - ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key, 230 - nf_nat_bysource_params); 231 - if (!ct) 228 + hl = rhltable_lookup(&nf_nat_bysource_table, &key, 229 + nf_nat_bysource_params); 230 + if (!hl) 232 231 return 0; 232 + 233 + ct = container_of(hl, typeof(*ct), nat_bysource); 233 234 234 235 nf_ct_invert_tuplepr(result, 235 236 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ··· 451 446 } 452 447 453 448 if (maniptype == NF_NAT_MANIP_SRC) { 449 + struct nf_nat_conn_key key = { 450 + .net = nf_ct_net(ct), 451 + .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 452 + .zone = nf_ct_zone(ct), 453 + }; 454 454 int err; 455 455 456 - err = rhashtable_insert_fast(&nf_nat_bysource_table, 457 - &ct->nat_bysource, 458 - nf_nat_bysource_params); 456 + err = rhltable_insert_key(&nf_nat_bysource_table, 457 + &key, 458 + &ct->nat_bysource, 459 + nf_nat_bysource_params); 459 460 if (err) 460 461 return NF_DROP; 461 462 } ··· 578 567 * will delete entry from already-freed table. 579 568 */ 580 569 ct->status &= ~IPS_NAT_DONE_MASK; 581 - rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, 582 - nf_nat_bysource_params); 570 + rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 571 + nf_nat_bysource_params); 583 572 584 573 /* don't delete conntrack. Although that would make things a lot 585 574 * simpler, we'd end up flushing all conntracks on nat rmmod. ··· 709 698 if (!nat) 710 699 return; 711 700 712 - rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, 713 - nf_nat_bysource_params); 701 + rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 702 + nf_nat_bysource_params); 714 703 } 715 704 716 705 static struct nf_ct_ext_type nat_extend __read_mostly = { ··· 845 834 { 846 835 int ret; 847 836 848 - ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); 837 + ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); 849 838 if (ret) 850 839 return ret; 851 840 852 841 ret = nf_ct_extend_register(&nat_extend); 853 842 if (ret < 0) { 854 - rhashtable_destroy(&nf_nat_bysource_table); 843 + rhltable_destroy(&nf_nat_bysource_table); 855 844 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 856 845 return ret; 857 846 } ··· 875 864 return 0; 876 865 877 866 cleanup_extend: 878 - rhashtable_destroy(&nf_nat_bysource_table); 867 + rhltable_destroy(&nf_nat_bysource_table); 879 868 nf_ct_extend_unregister(&nat_extend); 880 869 return ret; 881 870 } ··· 894 883 for (i = 0; i < NFPROTO_NUMPROTO; i++) 895 884 kfree(nf_nat_l4protos[i]); 896 885 897 - rhashtable_destroy(&nf_nat_bysource_table); 886 + rhltable_destroy(&nf_nat_bysource_table); 898 887 } 899 888 900 889 MODULE_LICENSE("GPL");
+9 -5
net/netfilter/nf_tables_api.c
··· 2570 2570 } 2571 2571 2572 2572 if (set->timeout && 2573 - nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout), 2573 + nla_put_be64(skb, NFTA_SET_TIMEOUT, 2574 + cpu_to_be64(jiffies_to_msecs(set->timeout)), 2574 2575 NFTA_SET_PAD)) 2575 2576 goto nla_put_failure; 2576 2577 if (set->gc_int && ··· 2860 2859 if (nla[NFTA_SET_TIMEOUT] != NULL) { 2861 2860 if (!(flags & NFT_SET_TIMEOUT)) 2862 2861 return -EINVAL; 2863 - timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT])); 2862 + timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( 2863 + nla[NFTA_SET_TIMEOUT]))); 2864 2864 } 2865 2865 gc_int = 0; 2866 2866 if (nla[NFTA_SET_GC_INTERVAL] != NULL) { ··· 3180 3178 3181 3179 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) && 3182 3180 nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, 3183 - cpu_to_be64(*nft_set_ext_timeout(ext)), 3181 + cpu_to_be64(jiffies_to_msecs( 3182 + *nft_set_ext_timeout(ext))), 3184 3183 NFTA_SET_ELEM_PAD)) 3185 3184 goto nla_put_failure; 3186 3185 ··· 3450 3447 memcpy(nft_set_ext_data(ext), data, set->dlen); 3451 3448 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) 3452 3449 *nft_set_ext_expiration(ext) = 3453 - jiffies + msecs_to_jiffies(timeout); 3450 + jiffies + timeout; 3454 3451 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) 3455 3452 *nft_set_ext_timeout(ext) = timeout; 3456 3453 ··· 3538 3535 if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { 3539 3536 if (!(set->flags & NFT_SET_TIMEOUT)) 3540 3537 return -EINVAL; 3541 - timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT])); 3538 + timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( 3539 + nla[NFTA_SET_ELEM_TIMEOUT]))); 3542 3540 } else if (set->flags & NFT_SET_TIMEOUT) { 3543 3541 timeout = set->timeout; 3544 3542 }
+5 -2
net/netfilter/nft_hash.c
··· 53 53 { 54 54 struct nft_hash *priv = nft_expr_priv(expr); 55 55 u32 len; 56 + int err; 56 57 57 58 if (!tb[NFTA_HASH_SREG] || 58 59 !tb[NFTA_HASH_DREG] || ··· 67 66 priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]); 68 67 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); 69 68 70 - len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN])); 71 - if (len == 0 || len > U8_MAX) 69 + err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len); 70 + if (err < 0) 71 + return err; 72 + if (len == 0) 72 73 return -ERANGE; 73 74 74 75 priv->len = len;
+6
net/netfilter/nft_range.c
··· 59 59 int err; 60 60 u32 op; 61 61 62 + if (!tb[NFTA_RANGE_SREG] || 63 + !tb[NFTA_RANGE_OP] || 64 + !tb[NFTA_RANGE_FROM_DATA] || 65 + !tb[NFTA_RANGE_TO_DATA]) 66 + return -EINVAL; 67 + 62 68 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), 63 69 &desc_from, tb[NFTA_RANGE_FROM_DATA]); 64 70 if (err < 0)
+23 -4
net/netlink/af_netlink.c
··· 322 322 sk_mem_charge(sk, skb->truesize); 323 323 } 324 324 325 - static void netlink_sock_destruct(struct sock *sk) 325 + static void __netlink_sock_destruct(struct sock *sk) 326 326 { 327 327 struct netlink_sock *nlk = nlk_sk(sk); 328 328 329 329 if (nlk->cb_running) { 330 - if (nlk->cb.done) 331 - nlk->cb.done(&nlk->cb); 332 - 333 330 module_put(nlk->cb.module); 334 331 kfree_skb(nlk->cb.skb); 335 332 } ··· 341 344 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 342 345 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 343 346 WARN_ON(nlk_sk(sk)->groups); 347 + } 348 + 349 + static void netlink_sock_destruct_work(struct work_struct *work) 350 + { 351 + struct netlink_sock *nlk = container_of(work, struct netlink_sock, 352 + work); 353 + 354 + nlk->cb.done(&nlk->cb); 355 + __netlink_sock_destruct(&nlk->sk); 356 + } 357 + 358 + static void netlink_sock_destruct(struct sock *sk) 359 + { 360 + struct netlink_sock *nlk = nlk_sk(sk); 361 + 362 + if (nlk->cb_running && nlk->cb.done) { 363 + INIT_WORK(&nlk->work, netlink_sock_destruct_work); 364 + schedule_work(&nlk->work); 365 + return; 366 + } 367 + 368 + __netlink_sock_destruct(sk); 344 369 } 345 370 346 371 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
+2
net/netlink/af_netlink.h
··· 3 3 4 4 #include <linux/rhashtable.h> 5 5 #include <linux/atomic.h> 6 + #include <linux/workqueue.h> 6 7 #include <net/sock.h> 7 8 8 9 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) ··· 34 33 35 34 struct rhash_head node; 36 35 struct rcu_head rcu; 36 + struct work_struct work; 37 37 }; 38 38 39 39 static inline struct netlink_sock *nlk_sk(struct sock *sk)
+4 -1
net/openvswitch/conntrack.c
··· 370 370 skb_orphan(skb); 371 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 372 372 err = nf_ct_frag6_gather(net, skb, user); 373 - if (err) 373 + if (err) { 374 + if (err != -EINPROGRESS) 375 + kfree_skb(skb); 374 376 return err; 377 + } 375 378 376 379 key->ip.proto = ipv6_hdr(skb)->nexthdr; 377 380 ovs_cb.mru = IP6CB(skb)->frag_max_size;
+12 -6
net/packet/af_packet.c
··· 3593 3593 3594 3594 if (optlen != sizeof(val)) 3595 3595 return -EINVAL; 3596 - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3597 - return -EBUSY; 3598 3596 if (copy_from_user(&val, optval, sizeof(val))) 3599 3597 return -EFAULT; 3600 3598 switch (val) { 3601 3599 case TPACKET_V1: 3602 3600 case TPACKET_V2: 3603 3601 case TPACKET_V3: 3604 - po->tp_version = val; 3605 - return 0; 3602 + break; 3606 3603 default: 3607 3604 return -EINVAL; 3608 3605 } 3606 + lock_sock(sk); 3607 + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3608 + ret = -EBUSY; 3609 + } else { 3610 + po->tp_version = val; 3611 + ret = 0; 3612 + } 3613 + release_sock(sk); 3614 + return ret; 3609 3615 } 3610 3616 case PACKET_RESERVE: 3611 3617 { ··· 4115 4109 /* Added to avoid minimal code churn */ 4116 4110 struct tpacket_req *req = &req_u->req; 4117 4111 4112 + lock_sock(sk); 4118 4113 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 4119 4114 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 4120 4115 net_warn_ratelimited("Tx-ring is not supported.\n"); ··· 4197 4190 goto out; 4198 4191 } 4199 4192 4200 - lock_sock(sk); 4201 4193 4202 4194 /* Detach socket from network */ 4203 4195 spin_lock(&po->bind_lock); ··· 4245 4239 if (!tx_ring) 4246 4240 prb_shutdown_retire_blk_timer(po, rb_queue); 4247 4241 } 4248 - release_sock(sk); 4249 4242 4250 4243 if (pg_vec) 4251 4244 free_pg_vec(pg_vec, order, req->tp_block_nr); 4252 4245 out: 4246 + release_sock(sk); 4253 4247 return err; 4254 4248 } 4255 4249
+2
net/rds/tcp.c
··· 665 665 out_pernet: 666 666 unregister_pernet_subsys(&rds_tcp_net_ops); 667 667 out_slab: 668 + if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) 669 + pr_warn("could not unregister rds_tcp_dev_notifier\n"); 668 670 kmem_cache_destroy(rds_tcp_conn_slab); 669 671 out: 670 672 return ret;
+20 -4
net/sched/act_pedit.c
··· 108 108 kfree(keys); 109 109 } 110 110 111 + static bool offset_valid(struct sk_buff *skb, int offset) 112 + { 113 + if (offset > 0 && offset > skb->len) 114 + return false; 115 + 116 + if (offset < 0 && -offset > skb_headroom(skb)) 117 + return false; 118 + 119 + return true; 120 + } 121 + 111 122 static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, 112 123 struct tcf_result *res) 113 124 { ··· 145 134 if (tkey->offmask) { 146 135 char *d, _d; 147 136 137 + if (!offset_valid(skb, off + tkey->at)) { 138 + pr_info("tc filter pedit 'at' offset %d out of bounds\n", 139 + off + tkey->at); 140 + goto bad; 141 + } 148 142 d = skb_header_pointer(skb, off + tkey->at, 1, 149 143 &_d); 150 144 if (!d) ··· 162 146 " offset must be on 32 bit boundaries\n"); 163 147 goto bad; 164 148 } 165 - if (offset > 0 && offset > skb->len) { 166 - pr_info("tc filter pedit" 167 - " offset %d can't exceed pkt length %d\n", 168 - offset, skb->len); 149 + 150 + if (!offset_valid(skb, off + offset)) { 151 + pr_info("tc filter pedit offset %d out of bounds\n", 152 + offset); 169 153 goto bad; 170 154 } 171 155
-4
net/sched/cls_basic.c
··· 62 62 struct basic_head *head = rtnl_dereference(tp->root); 63 63 struct basic_filter *f; 64 64 65 - if (head == NULL) 66 - return 0UL; 67 - 68 65 list_for_each_entry(f, &head->flist, link) { 69 66 if (f->handle == handle) { 70 67 l = (unsigned long) f; ··· 106 109 tcf_unbind_filter(tp, &f->res); 107 110 call_rcu(&f->rcu, basic_delete_filter); 108 111 } 109 - RCU_INIT_POINTER(tp->root, NULL); 110 112 kfree_rcu(head, rcu); 111 113 return true; 112 114 }
-4
net/sched/cls_bpf.c
··· 289 289 call_rcu(&prog->rcu, __cls_bpf_delete_prog); 290 290 } 291 291 292 - RCU_INIT_POINTER(tp->root, NULL); 293 292 kfree_rcu(head, rcu); 294 293 return true; 295 294 } ··· 298 299 struct cls_bpf_head *head = rtnl_dereference(tp->root); 299 300 struct cls_bpf_prog *prog; 300 301 unsigned long ret = 0UL; 301 - 302 - if (head == NULL) 303 - return 0UL; 304 302 305 303 list_for_each_entry(prog, &head->plist, link) { 306 304 if (prog->handle == handle) {
+3 -4
net/sched/cls_cgroup.c
··· 137 137 138 138 if (!force) 139 139 return false; 140 - 141 - if (head) { 142 - RCU_INIT_POINTER(tp->root, NULL); 140 + /* Head can still be NULL due to cls_cgroup_init(). */ 141 + if (head) 143 142 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 144 - } 143 + 145 144 return true; 146 145 } 147 146
-1
net/sched/cls_flow.c
··· 596 596 list_del_rcu(&f->list); 597 597 call_rcu(&f->rcu, flow_destroy_filter); 598 598 } 599 - RCU_INIT_POINTER(tp->root, NULL); 600 599 kfree_rcu(head, rcu); 601 600 return true; 602 601 }
+33 -9
net/sched/cls_flower.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/module.h> 15 15 #include <linux/rhashtable.h> 16 + #include <linux/workqueue.h> 16 17 17 18 #include <linux/if_ether.h> 18 19 #include <linux/in6.h> ··· 66 65 bool mask_assigned; 67 66 struct list_head filters; 68 67 struct rhashtable_params ht_params; 69 - struct rcu_head rcu; 68 + union { 69 + struct work_struct work; 70 + struct rcu_head rcu; 71 + }; 70 72 }; 71 73 72 74 struct cls_fl_filter { ··· 290 286 call_rcu(&f->rcu, fl_destroy_filter); 291 287 } 292 288 289 + static void fl_destroy_sleepable(struct work_struct *work) 290 + { 291 + struct cls_fl_head *head = container_of(work, struct cls_fl_head, 292 + work); 293 + if (head->mask_assigned) 294 + rhashtable_destroy(&head->ht); 295 + kfree(head); 296 + module_put(THIS_MODULE); 297 + } 298 + 299 + static void fl_destroy_rcu(struct rcu_head *rcu) 300 + { 301 + struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu); 302 + 303 + INIT_WORK(&head->work, fl_destroy_sleepable); 304 + schedule_work(&head->work); 305 + } 306 + 293 307 static bool fl_destroy(struct tcf_proto *tp, bool force) 294 308 { 295 309 struct cls_fl_head *head = rtnl_dereference(tp->root); ··· 318 296 319 297 list_for_each_entry_safe(f, next, &head->filters, list) 320 298 __fl_delete(tp, f); 321 - RCU_INIT_POINTER(tp->root, NULL); 322 - if (head->mask_assigned) 323 - rhashtable_destroy(&head->ht); 324 - kfree_rcu(head, rcu); 299 + 300 + __module_get(THIS_MODULE); 301 + call_rcu(&head->rcu, fl_destroy_rcu); 302 + 325 303 return true; 326 304 } 327 305 ··· 781 759 } 782 760 783 761 if (fold) { 784 - rhashtable_remove_fast(&head->ht, &fold->ht_node, 785 - head->ht_params); 762 + if (!tc_skip_sw(fold->flags)) 763 + rhashtable_remove_fast(&head->ht, &fold->ht_node, 764 + head->ht_params); 786 765 if (!tc_skip_hw(fold->flags)) 787 766 fl_hw_destroy_filter(tp, fold); 788 767 } ··· 811 788 struct cls_fl_head *head = rtnl_dereference(tp->root); 812 789 struct cls_fl_filter *f = (struct cls_fl_filter *) arg; 813 790 814 - rhashtable_remove_fast(&head->ht, &f->ht_node, 815 - head->ht_params); 791 + if (!tc_skip_sw(f->flags)) 792 + rhashtable_remove_fast(&head->ht, &f->ht_node, 793 + head->ht_params); 816 794 __fl_delete(tp, f); 817 795 return 0; 818 796 }
-1
net/sched/cls_matchall.c
··· 114 114 115 115 call_rcu(&f->rcu, mall_destroy_filter); 116 116 } 117 - RCU_INIT_POINTER(tp->root, NULL); 118 117 kfree_rcu(head, rcu); 119 118 return true; 120 119 }
+2 -1
net/sched/cls_rsvp.h
··· 152 152 return -1; 153 153 nhptr = ip_hdr(skb); 154 154 #endif 155 - 155 + if (unlikely(!head)) 156 + return -1; 156 157 restart: 157 158 158 159 #if RSVP_DST_LEN == 4
-1
net/sched/cls_tcindex.c
··· 543 543 walker.fn = tcindex_destroy_element; 544 544 tcindex_walk(tp, &walker); 545 545 546 - RCU_INIT_POINTER(tp->root, NULL); 547 546 call_rcu(&p->rcu, __tcindex_destroy); 548 547 return true; 549 548 }
+9 -2
net/tipc/bearer.c
··· 421 421 dev = dev_get_by_name(net, driver_name); 422 422 if (!dev) 423 423 return -ENODEV; 424 + if (tipc_mtu_bad(dev, 0)) { 425 + dev_put(dev); 426 + return -EINVAL; 427 + } 424 428 425 429 /* Associate TIPC bearer with L2 bearer */ 426 430 rcu_assign_pointer(b->media_ptr, dev); ··· 614 610 if (!b) 615 611 return NOTIFY_DONE; 616 612 617 - b->mtu = dev->mtu; 618 - 619 613 switch (evt) { 620 614 case NETDEV_CHANGE: 621 615 if (netif_carrier_ok(dev)) ··· 626 624 tipc_reset_bearer(net, b); 627 625 break; 628 626 case NETDEV_CHANGEMTU: 627 + if (tipc_mtu_bad(dev, 0)) { 628 + bearer_disable(net, b); 629 + break; 630 + } 631 + b->mtu = dev->mtu; 629 632 tipc_reset_bearer(net, b); 630 633 break; 631 634 case NETDEV_CHANGEADDR:
+13
net/tipc/bearer.h
··· 39 39 40 40 #include "netlink.h" 41 41 #include "core.h" 42 + #include "msg.h" 42 43 #include <net/genetlink.h> 43 44 44 45 #define MAX_MEDIA 3 ··· 59 58 #define TIPC_MEDIA_TYPE_ETH 1 60 59 #define TIPC_MEDIA_TYPE_IB 2 61 60 #define TIPC_MEDIA_TYPE_UDP 3 61 + 62 + /* minimum bearer MTU */ 63 + #define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE) 62 64 63 65 /** 64 66 * struct tipc_media_addr - destination address used by TIPC bearers ··· 218 214 struct tipc_media_addr *dst); 219 215 void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, 220 216 struct sk_buff_head *xmitq); 217 + 218 + /* check if device MTU is too low for tipc headers */ 219 + static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve) 220 + { 221 + if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve) 222 + return false; 223 + netdev_warn(dev, "MTU too low for tipc bearer\n"); 224 + return true; 225 + } 221 226 222 227 #endif /* _TIPC_BEARER_H */
+19 -16
net/tipc/link.c
··· 47 47 #include <linux/pkt_sched.h> 48 48 49 49 struct tipc_stats { 50 - u32 sent_info; /* used in counting # sent packets */ 51 - u32 recv_info; /* used in counting # recv'd packets */ 50 + u32 sent_pkts; 51 + u32 recv_pkts; 52 52 u32 sent_states; 53 53 u32 recv_states; 54 54 u32 sent_probes; ··· 857 857 l->acked = 0; 858 858 l->silent_intv_cnt = 0; 859 859 l->rst_cnt = 0; 860 - l->stats.recv_info = 0; 861 860 l->stale_count = 0; 862 861 l->bc_peer_is_up = false; 863 862 memset(&l->mon_state, 0, sizeof(l->mon_state)); ··· 887 888 struct sk_buff_head *transmq = &l->transmq; 888 889 struct sk_buff_head *backlogq = &l->backlogq; 889 890 struct sk_buff *skb, *_skb, *bskb; 891 + int pkt_cnt = skb_queue_len(list); 890 892 891 893 /* Match msg importance against this and all higher backlog limits: */ 892 894 if (!skb_queue_empty(backlogq)) { ··· 899 899 if (unlikely(msg_size(hdr) > mtu)) { 900 900 skb_queue_purge(list); 901 901 return -EMSGSIZE; 902 + } 903 + 904 + if (pkt_cnt > 1) { 905 + l->stats.sent_fragmented++; 906 + l->stats.sent_fragments += pkt_cnt; 902 907 } 903 908 904 909 /* Prepare each packet for sending, and add to relevant queue: */ ··· 925 920 __skb_queue_tail(xmitq, _skb); 926 921 TIPC_SKB_CB(skb)->ackers = l->ackers; 927 922 l->rcv_unacked = 0; 923 + l->stats.sent_pkts++; 928 924 seqno++; 929 925 continue; 930 926 } ··· 974 968 msg_set_ack(hdr, ack); 975 969 msg_set_bcast_ack(hdr, bc_ack); 976 970 l->rcv_unacked = 0; 971 + l->stats.sent_pkts++; 977 972 seqno++; 978 973 } 979 974 l->snd_nxt = seqno; ··· 1267 1260 1268 1261 /* Deliver packet */ 1269 1262 l->rcv_nxt++; 1270 - l->stats.recv_info++; 1263 + l->stats.recv_pkts++; 1271 1264 if (!tipc_data_input(l, skb, l->inputq)) 1272 1265 rc |= tipc_link_input(l, skb, l->inputq); 1273 1266 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) ··· 1807 1800 void tipc_link_reset_stats(struct tipc_link *l) 1808 1801 { 1809 1802 memset(&l->stats, 0, sizeof(l->stats)); 1810 - if (!link_is_bc_sndlink(l)) { 1811 - l->stats.sent_info = l->snd_nxt; 1812 - l->stats.recv_info = l->rcv_nxt; 1813 - } 1814 1803 } 1815 1804 1816 1805 static void link_print(struct tipc_link *l, const char *str) ··· 1870 1867 }; 1871 1868 1872 1869 struct nla_map map[] = { 1873 - {TIPC_NLA_STATS_RX_INFO, s->recv_info}, 1870 + {TIPC_NLA_STATS_RX_INFO, 0}, 1874 1871 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1875 1872 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1876 1873 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1877 1874 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1878 - {TIPC_NLA_STATS_TX_INFO, s->sent_info}, 1875 + {TIPC_NLA_STATS_TX_INFO, 0}, 1879 1876 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1880 1877 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1881 1878 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, ··· 1950 1947 goto attr_msg_full; 1951 1948 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1952 1949 goto attr_msg_full; 1953 - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) 1950 + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts)) 1954 1951 goto attr_msg_full; 1955 - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) 1952 + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts)) 1956 1953 goto attr_msg_full; 1957 1954 1958 1955 if (tipc_link_is_up(link)) ··· 2007 2004 }; 2008 2005 2009 2006 struct nla_map map[] = { 2010 - {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, 2007 + {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts}, 2011 2008 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 2012 2009 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 2013 2010 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 2014 2011 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 2015 - {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, 2012 + {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts}, 2016 2013 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 2017 2014 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 2018 2015 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, ··· 2079 2076 goto attr_msg_full; 2080 2077 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 2081 2078 goto attr_msg_full; 2082 - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt)) 2079 + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0)) 2083 2080 goto attr_msg_full; 2084 - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt)) 2081 + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0)) 2085 2082 goto attr_msg_full; 2086 2083 2087 2084 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
+5
net/tipc/udp_media.c
··· 697 697 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 698 698 udp_conf.use_udp_checksums = false; 699 699 ub->ifindex = dev->ifindex; 700 + if (tipc_mtu_bad(dev, sizeof(struct iphdr) + 701 + sizeof(struct udphdr))) { 702 + err = -EINVAL; 703 + goto err; 704 + } 700 705 b->mtu = dev->mtu - sizeof(struct iphdr) 701 706 - sizeof(struct udphdr); 702 707 #if IS_ENABLED(CONFIG_IPV6)
+6 -4
net/xfrm/xfrm_policy.c
··· 1268 1268 err = security_xfrm_policy_lookup(pol->security, 1269 1269 fl->flowi_secid, 1270 1270 policy_to_flow_dir(dir)); 1271 - if (!err && !xfrm_pol_hold_rcu(pol)) 1272 - goto again; 1273 - else if (err == -ESRCH) 1271 + if (!err) { 1272 + if (!xfrm_pol_hold_rcu(pol)) 1273 + goto again; 1274 + } else if (err == -ESRCH) { 1274 1275 pol = NULL; 1275 - else 1276 + } else { 1276 1277 pol = ERR_PTR(err); 1278 + } 1277 1279 } else 1278 1280 pol = NULL; 1279 1281 }
+1 -1
net/xfrm/xfrm_user.c
··· 2450 2450 2451 2451 #ifdef CONFIG_COMPAT 2452 2452 if (in_compat_syscall()) 2453 - return -ENOTSUPP; 2453 + return -EOPNOTSUPP; 2454 2454 #endif 2455 2455 2456 2456 type = nlh->nlmsg_type;
+1 -1
samples/bpf/bpf_helpers.h
··· 117 117 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ 118 118 #define PT_REGS_RC(x) ((x)->gprs[2]) 119 119 #define PT_REGS_SP(x) ((x)->gprs[15]) 120 - #define PT_REGS_IP(x) ((x)->ip) 120 + #define PT_REGS_IP(x) ((x)->psw.addr) 121 121 122 122 #elif defined(__aarch64__) 123 123
+1 -1
samples/bpf/sampleip_kern.c
··· 25 25 u64 ip; 26 26 u32 *value, init_val = 1; 27 27 28 - ip = ctx->regs.ip; 28 + ip = PT_REGS_IP(&ctx->regs); 29 29 value = bpf_map_lookup_elem(&ip_map, &ip); 30 30 if (value) 31 31 *value += 1;
+1 -1
samples/bpf/trace_event_kern.c
··· 50 50 key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS); 51 51 if ((int)key.kernstack < 0 && (int)key.userstack < 0) { 52 52 bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period, 53 - ctx->regs.ip); 53 + PT_REGS_IP(&ctx->regs)); 54 54 return 0; 55 55 } 56 56
+2
scripts/kconfig/Makefile
··· 35 35 36 36 silentoldconfig: $(obj)/conf 37 37 $(Q)mkdir -p include/config include/generated 38 + $(Q)test -e include/generated/autoksyms.h || \ 39 + touch include/generated/autoksyms.h 38 40 $< $(silent) --$@ $(Kconfig) 39 41 40 42 localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
+16 -10
sound/sparc/dbri.c
··· 304 304 spinlock_t lock; 305 305 306 306 struct dbri_dma *dma; /* Pointer to our DMA block */ 307 - u32 dma_dvma; /* DBRI visible DMA address */ 307 + dma_addr_t dma_dvma; /* DBRI visible DMA address */ 308 308 309 309 void __iomem *regs; /* dbri HW regs */ 310 310 int dbri_irqp; /* intr queue pointer */ ··· 657 657 */ 658 658 static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len) 659 659 { 660 + u32 dvma_addr = (u32)dbri->dma_dvma; 661 + 660 662 /* Space for 2 WAIT cmds (replaced later by 1 JUMP cmd) */ 661 663 len += 2; 662 664 spin_lock(&dbri->cmdlock); 663 665 if (dbri->cmdptr - dbri->dma->cmd + len < DBRI_NO_CMDS - 2) 664 666 return dbri->cmdptr + 2; 665 - else if (len < sbus_readl(dbri->regs + REG8) - dbri->dma_dvma) 667 + else if (len < sbus_readl(dbri->regs + REG8) - dvma_addr) 666 668 return dbri->dma->cmd; 667 669 else 668 670 printk(KERN_ERR "DBRI: no space for commands."); ··· 682 680 */ 683 681 static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len) 684 682 { 683 + u32 dvma_addr = (u32)dbri->dma_dvma; 685 684 s32 tmp, addr; 686 685 static int wait_id = 0; 687 686 ··· 692 689 *(cmd+1) = DBRI_CMD(D_WAIT, 1, wait_id); 693 690 694 691 /* Replace the last command with JUMP */ 695 - addr = dbri->dma_dvma + (cmd - len - dbri->dma->cmd) * sizeof(s32); 692 + addr = dvma_addr + (cmd - len - dbri->dma->cmd) * sizeof(s32); 696 693 *(dbri->cmdptr+1) = addr; 697 694 *(dbri->cmdptr) = DBRI_CMD(D_JUMP, 0, 0); 698 695 ··· 750 747 /* Lock must not be held before calling this */ 751 748 static void dbri_initialize(struct snd_dbri *dbri) 752 749 { 750 + u32 dvma_addr = (u32)dbri->dma_dvma; 753 751 s32 *cmd; 754 752 u32 dma_addr; 755 753 unsigned long flags; ··· 768 764 /* 769 765 * Initialize the interrupt ring buffer. 770 766 */ 771 - dma_addr = dbri->dma_dvma + dbri_dma_off(intr, 0); 767 + dma_addr = dvma_addr + dbri_dma_off(intr, 0); 772 768 dbri->dma->intr[0] = dma_addr; 773 769 dbri->dbri_irqp = 1; 774 770 /* ··· 782 778 dbri->cmdptr = cmd; 783 779 *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); 784 780 *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); 785 - dma_addr = dbri->dma_dvma + dbri_dma_off(cmd, 0); 781 + dma_addr = dvma_addr + dbri_dma_off(cmd, 0); 786 782 sbus_writel(dma_addr, dbri->regs + REG8); 787 783 spin_unlock(&dbri->cmdlock); 788 784 ··· 1081 1077 static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period) 1082 1078 { 1083 1079 struct dbri_streaminfo *info = &dbri->stream_info[streamno]; 1080 + u32 dvma_addr = (u32)dbri->dma_dvma; 1084 1081 __u32 dvma_buffer; 1085 1082 int desc; 1086 1083 int len; ··· 1182 1177 else { 1183 1178 dbri->next_desc[last_desc] = desc; 1184 1179 dbri->dma->desc[last_desc].nda = 1185 - dbri->dma_dvma + dbri_dma_off(desc, desc); 1180 + dvma_addr + dbri_dma_off(desc, desc); 1186 1181 } 1187 1182 1188 1183 last_desc = desc; ··· 1197 1192 } 1198 1193 1199 1194 dbri->dma->desc[last_desc].nda = 1200 - dbri->dma_dvma + dbri_dma_off(desc, first_desc); 1195 + dvma_addr + dbri_dma_off(desc, first_desc); 1201 1196 dbri->next_desc[last_desc] = first_desc; 1202 1197 dbri->pipes[info->pipe].first_desc = first_desc; 1203 1198 dbri->pipes[info->pipe].desc = first_desc; ··· 1702 1697 static void xmit_descs(struct snd_dbri *dbri) 1703 1698 { 1704 1699 struct dbri_streaminfo *info; 1700 + u32 dvma_addr = (u32)dbri->dma_dvma; 1705 1701 s32 *cmd; 1706 1702 unsigned long flags; 1707 1703 int first_td; ··· 1724 1718 *(cmd++) = DBRI_CMD(D_SDP, 0, 1725 1719 dbri->pipes[info->pipe].sdp 1726 1720 | D_SDP_P | D_SDP_EVERY | D_SDP_C); 1727 - *(cmd++) = dbri->dma_dvma + 1721 + *(cmd++) = dvma_addr + 1728 1722 dbri_dma_off(desc, first_td); 1729 1723 dbri_cmdsend(dbri, cmd, 2); 1730 1724 ··· 1746 1740 *(cmd++) = DBRI_CMD(D_SDP, 0, 1747 1741 dbri->pipes[info->pipe].sdp 1748 1742 | D_SDP_P | D_SDP_EVERY | D_SDP_C); 1749 - *(cmd++) = dbri->dma_dvma + 1743 + *(cmd++) = dvma_addr + 1750 1744 dbri_dma_off(desc, first_td); 1751 1745 dbri_cmdsend(dbri, cmd, 2); 1752 1746 ··· 2545 2539 if (!dbri->dma) 2546 2540 return -ENOMEM; 2547 2541 2548 - dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n", 2542 + dprintk(D_GEN, "DMA Cmd Block 0x%p (%pad)\n", 2549 2543 dbri->dma, dbri->dma_dvma); 2550 2544 2551 2545 /* Map the registers into memory. */
+4 -2
virt/kvm/arm/vgic/vgic-v2.c
··· 50 50 51 51 WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE); 52 52 53 - kvm_notify_acked_irq(vcpu->kvm, 0, 54 - intid - VGIC_NR_PRIVATE_IRQS); 53 + /* Only SPIs require notification */ 54 + if (vgic_valid_spi(vcpu->kvm, intid)) 55 + kvm_notify_acked_irq(vcpu->kvm, 0, 56 + intid - VGIC_NR_PRIVATE_IRQS); 55 57 } 56 58 } 57 59
+4 -2
virt/kvm/arm/vgic/vgic-v3.c
··· 41 41 42 42 WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE); 43 43 44 - kvm_notify_acked_irq(vcpu->kvm, 0, 45 - intid - VGIC_NR_PRIVATE_IRQS); 44 + /* Only SPIs require notification */ 45 + if (vgic_valid_spi(vcpu->kvm, intid)) 46 + kvm_notify_acked_irq(vcpu->kvm, 0, 47 + intid - VGIC_NR_PRIVATE_IRQS); 46 48 } 47 49 48 50 /*
+1 -1
virt/kvm/kvm_main.c
··· 2889 2889 2890 2890 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2891 2891 if (ret < 0) { 2892 - ops->destroy(dev); 2893 2892 mutex_lock(&kvm->lock); 2894 2893 list_del(&dev->vm_node); 2895 2894 mutex_unlock(&kvm->lock); 2895 + ops->destroy(dev); 2896 2896 return ret; 2897 2897 } 2898 2898