Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

A function in kernel/bpf/syscall.c which got a bug fix in 'net'
was moved to kernel/bpf/verifier.c in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>

+874 -376
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 11 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Fearless Coyote 6 6 7 7 # *DOCUMENTATION*
+2
arch/arm/boot/dts/am335x-baltos.dtsi
··· 371 371 372 372 phy1: ethernet-phy@1 { 373 373 reg = <7>; 374 + eee-broken-100tx; 375 + eee-broken-1000t; 374 376 }; 375 377 }; 376 378
+1
arch/arm/boot/dts/am335x-evmsk.dts
··· 672 672 ti,non-removable; 673 673 bus-width = <4>; 674 674 cap-power-off-card; 675 + keep-power-in-suspend; 675 676 pinctrl-names = "default"; 676 677 pinctrl-0 = <&mmc2_pins>; 677 678
+2
arch/arm/boot/dts/dra7.dtsi
··· 283 283 device_type = "pci"; 284 284 ranges = <0x81000000 0 0 0x03000 0 0x00010000 285 285 0x82000000 0 0x20013000 0x13000 0 0xffed000>; 286 + bus-range = <0x00 0xff>; 286 287 #interrupt-cells = <1>; 287 288 num-lanes = <1>; 288 289 linux,pci-domain = <0>; ··· 320 319 device_type = "pci"; 321 320 ranges = <0x81000000 0 0 0x03000 0 0x00010000 322 321 0x82000000 0 0x30013000 0x13000 0 0xffed000>; 322 + bus-range = <0x00 0xff>; 323 323 #interrupt-cells = <1>; 324 324 num-lanes = <1>; 325 325 linux,pci-domain = <1>;
+1 -1
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
··· 121 121 &i2c3 { 122 122 clock-frequency = <400000>; 123 123 at24@50 { 124 - compatible = "at24,24c02"; 124 + compatible = "atmel,24c64"; 125 125 readonly; 126 126 reg = <0x50>; 127 127 };
+6 -6
arch/arm/boot/dts/sun8i-a33.dtsi
··· 66 66 opp-microvolt = <1200000>; 67 67 clock-latency-ns = <244144>; /* 8 32k periods */ 68 68 }; 69 - 70 - opp@1200000000 { 71 - opp-hz = /bits/ 64 <1200000000>; 72 - opp-microvolt = <1320000>; 73 - clock-latency-ns = <244144>; /* 8 32k periods */ 74 - }; 75 69 }; 76 70 77 71 cpus { ··· 75 81 operating-points-v2 = <&cpu0_opp_table>; 76 82 }; 77 83 84 + cpu@1 { 85 + operating-points-v2 = <&cpu0_opp_table>; 86 + }; 87 + 78 88 cpu@2 { 79 89 compatible = "arm,cortex-a7"; 80 90 device_type = "cpu"; 81 91 reg = <2>; 92 + operating-points-v2 = <&cpu0_opp_table>; 82 93 }; 83 94 84 95 cpu@3 { 85 96 compatible = "arm,cortex-a7"; 86 97 device_type = "cpu"; 87 98 reg = <3>; 99 + operating-points-v2 = <&cpu0_opp_table>; 88 100 }; 89 101 }; 90 102
+1
arch/arm/mach-omap2/common.h
··· 270 270 extern int omap4_mpuss_init(void); 271 271 extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); 272 272 extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); 273 + extern u32 omap4_get_cpu1_ns_pa_addr(void); 273 274 #else 274 275 static inline int omap4_enter_lowpower(unsigned int cpu, 275 276 unsigned int power_state)
+1 -1
arch/arm/mach-omap2/omap-hotplug.c
··· 50 50 omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); 51 51 52 52 if (omap_secure_apis_support()) 53 - boot_cpu = omap_read_auxcoreboot0(); 53 + boot_cpu = omap_read_auxcoreboot0() >> 9; 54 54 else 55 55 boot_cpu = 56 56 readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
+18 -4
arch/arm/mach-omap2/omap-mpuss-lowpower.c
··· 64 64 #include "prm-regbits-44xx.h" 65 65 66 66 static void __iomem *sar_base; 67 + static u32 old_cpu1_ns_pa_addr; 67 68 68 69 #if defined(CONFIG_PM) && defined(CONFIG_SMP) 69 70 ··· 212 211 static void __init save_l2x0_context(void) 213 212 {} 214 213 #endif 214 + 215 + u32 omap4_get_cpu1_ns_pa_addr(void) 216 + { 217 + return old_cpu1_ns_pa_addr; 218 + } 215 219 216 220 /** 217 221 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function ··· 466 460 void __init omap4_mpuss_early_init(void) 467 461 { 468 462 unsigned long startup_pa; 463 + void __iomem *ns_pa_addr; 469 464 470 - if (!(cpu_is_omap44xx() || soc_is_omap54xx())) 465 + if (!(soc_is_omap44xx() || soc_is_omap54xx())) 471 466 return; 472 467 473 468 sar_base = omap4_get_sar_ram_base(); 474 469 475 - if (cpu_is_omap443x()) 470 + /* Save old NS_PA_ADDR for validity checks later on */ 471 + if (soc_is_omap44xx()) 472 + ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 473 + else 474 + ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 475 + old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr); 476 + 477 + if (soc_is_omap443x()) 476 478 startup_pa = __pa_symbol(omap4_secondary_startup); 477 - else if (cpu_is_omap446x()) 479 + else if (soc_is_omap446x()) 478 480 startup_pa = __pa_symbol(omap4460_secondary_startup); 479 481 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 480 482 startup_pa = __pa_symbol(omap5_secondary_hyp_startup); 481 483 else 482 484 startup_pa = __pa_symbol(omap5_secondary_startup); 483 485 484 - if (cpu_is_omap44xx()) 486 + if (soc_is_omap44xx()) 485 487 writel_relaxed(startup_pa, sar_base + 486 488 CPU1_WAKEUP_NS_PA_ADDR_OFFSET); 487 489 else
-1
arch/arm/mach-omap2/omap-smc.S
··· 94 94 ldr r12, =0x103 95 95 dsb 96 96 smc #0 97 - mov r0, r0, lsr #9 98 97 ldmfd sp!, {r2-r12, pc} 99 98 ENDPROC(omap_read_auxcoreboot0)
+76 -14
arch/arm/mach-omap2/omap-smp.c
··· 21 21 #include <linux/io.h> 22 22 #include <linux/irqchip/arm-gic.h> 23 23 24 + #include <asm/sections.h> 24 25 #include <asm/smp_scu.h> 25 26 #include <asm/virt.h> 26 27 ··· 41 40 42 41 #define OMAP5_CORE_COUNT 0x2 43 42 43 + #define AUX_CORE_BOOT0_GP_RELEASE 0x020 44 + #define AUX_CORE_BOOT0_HS_RELEASE 0x200 45 + 44 46 struct omap_smp_config { 45 47 unsigned long cpu1_rstctrl_pa; 46 48 void __iomem *cpu1_rstctrl_va; 47 49 void __iomem *scu_base; 50 + void __iomem *wakeupgen_base; 48 51 void *startup_addr; 49 52 }; 50 53 ··· 145 140 static struct clockdomain *cpu1_clkdm; 146 141 static bool booted; 147 142 static struct powerdomain *cpu1_pwrdm; 148 - void __iomem *base = omap_get_wakeupgen_base(); 149 143 150 144 /* 151 145 * Set synchronisation state between this boot processor ··· 159 155 * A barrier is added to ensure that write buffer is drained 160 156 */ 161 157 if (omap_secure_apis_support()) 162 - omap_modify_auxcoreboot0(0x200, 0xfffffdff); 158 + omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE, 159 + 0xfffffdff); 163 160 else 164 - writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0); 161 + writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE, 162 + cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 165 163 166 164 if (!cpu1_clkdm && !cpu1_pwrdm) { 167 165 cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); ··· 267 261 set_cpu_possible(i, true); 268 262 } 269 263 264 + /* 265 + * For now, just make sure the start-up address is not within the booting 266 + * kernel space as that means we just overwrote whatever secondary_startup() 267 + * code there was. 268 + */ 269 + static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr) 270 + { 271 + if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start))) 272 + return false; 273 + 274 + return true; 275 + } 276 + 277 + /* 278 + * We may need to reset CPU1 before configuring, otherwise kexec boot can end 279 + * up trying to use old kernel startup address or suspend-resume will 280 + * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper 281 + * idle states. 282 + */ 283 + static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) 284 + { 285 + unsigned long cpu1_startup_pa, cpu1_ns_pa_addr; 286 + bool needs_reset = false; 287 + u32 released; 288 + 289 + if (omap_secure_apis_support()) 290 + released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE; 291 + else 292 + released = readl_relaxed(cfg.wakeupgen_base + 293 + OMAP_AUX_CORE_BOOT_0) & 294 + AUX_CORE_BOOT0_GP_RELEASE; 295 + if (released) { 296 + pr_warn("smp: CPU1 not parked?\n"); 297 + 298 + return; 299 + } 300 + 301 + cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + 302 + OMAP_AUX_CORE_BOOT_1); 303 + cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr(); 304 + 305 + /* Did the configured secondary_startup() get overwritten? */ 306 + if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) 307 + needs_reset = true; 308 + 309 + /* 310 + * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a 311 + * deeper idle state in WFI and will wake to an invalid address. 312 + */ 313 + if ((soc_is_omap44xx() || soc_is_omap54xx()) && 314 + !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) 315 + needs_reset = true; 316 + 317 + if (!needs_reset || !c->cpu1_rstctrl_va) 318 + return; 319 + 320 + pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n", 321 + cpu1_startup_pa, cpu1_ns_pa_addr); 322 + 323 + writel_relaxed(1, c->cpu1_rstctrl_va); 324 + readl_relaxed(c->cpu1_rstctrl_va); 325 + writel_relaxed(0, c->cpu1_rstctrl_va); 326 + } 327 + 270 328 static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) 271 329 { 272 - void __iomem *base = omap_get_wakeupgen_base(); 273 330 const struct omap_smp_config *c = NULL; 274 331 275 332 if (soc_is_omap443x()) ··· 350 281 /* Must preserve cfg.scu_base set earlier */ 351 282 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; 352 283 cfg.startup_addr = c->startup_addr; 284 + cfg.wakeupgen_base = omap_get_wakeupgen_base(); 353 285 354 286 if (soc_is_dra74x() || soc_is_omap54xx()) { 355 287 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) ··· 369 299 if (cfg.scu_base) 370 300 scu_enable(cfg.scu_base); 371 301 372 - /* 373 - * Reset CPU1 before configuring, otherwise kexec will 374 - * end up trying to use old kernel startup address. 375 - */ 376 - if (cfg.cpu1_rstctrl_va) { 377 - writel_relaxed(1, cfg.cpu1_rstctrl_va); 378 - readl_relaxed(cfg.cpu1_rstctrl_va); 379 - writel_relaxed(0, cfg.cpu1_rstctrl_va); 380 - } 302 + omap4_smp_maybe_reset_cpu1(&cfg); 381 303 382 304 /* 383 305 * Write the address of secondary startup routine into the ··· 381 319 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); 382 320 else 383 321 writel_relaxed(__pa_symbol(cfg.startup_addr), 384 - base + OMAP_AUX_CORE_BOOT_1); 322 + cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1); 385 323 } 386 324 387 325 const struct smp_operations omap4_smp_ops __initconst = {
+8
arch/arm/mach-omap2/omap_device.c
··· 222 222 dev_err(dev, "failed to idle\n"); 223 223 } 224 224 break; 225 + case BUS_NOTIFY_BIND_DRIVER: 226 + od = to_omap_device(pdev); 227 + if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && 228 + pm_runtime_status_suspended(dev)) { 229 + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; 230 + pm_runtime_set_active(dev); 231 + } 232 + break; 225 233 case BUS_NOTIFY_ADD_DEVICE: 226 234 if (pdev->dev.of_node) 227 235 omap_device_build_from_dt(pdev);
+1
arch/arm/mach-orion5x/Kconfig
··· 6 6 select GPIOLIB 7 7 select MVEBU_MBUS 8 8 select PCI 9 + select PHYLIB if NETDEVICES 9 10 select PLAT_ORION_LEGACY 10 11 help 11 12 Support for the following Marvell Orion 5x series SoCs:
+5
arch/arm/plat-orion/common.c
··· 468 468 eth_data, &orion_ge11); 469 469 } 470 470 471 + #ifdef CONFIG_ARCH_ORION5X 471 472 /***************************************************************************** 472 473 * Ethernet switch 473 474 ****************************************************************************/ ··· 480 479 { 481 480 struct mdio_board_info *bd; 482 481 unsigned int i; 482 + 483 + if (!IS_BUILTIN(CONFIG_PHYLIB)) 484 + return; 483 485 484 486 for (i = 0; i < ARRAY_SIZE(d->port_names); i++) 485 487 if (!strcmp(d->port_names[i], "cpu")) ··· 497 493 498 494 mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); 499 495 } 496 + #endif 500 497 501 498 /***************************************************************************** 502 499 * I2C
+2
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
··· 179 179 usbphy: phy@01c19400 { 180 180 compatible = "allwinner,sun50i-a64-usb-phy"; 181 181 reg = <0x01c19400 0x14>, 182 + <0x01c1a800 0x4>, 182 183 <0x01c1b800 0x4>; 183 184 reg-names = "phy_ctrl", 185 + "pmu0", 184 186 "pmu1"; 185 187 clocks = <&ccu CLK_USB_PHY0>, 186 188 <&ccu CLK_USB_PHY1>;
+55 -31
arch/parisc/include/asm/uaccess.h
··· 39 39 #define get_user __get_user 40 40 41 41 #if !defined(CONFIG_64BIT) 42 - #define LDD_USER(ptr) __get_user_asm64(ptr) 42 + #define LDD_USER(val, ptr) __get_user_asm64(val, ptr) 43 43 #define STD_USER(x, ptr) __put_user_asm64(x, ptr) 44 44 #else 45 - #define LDD_USER(ptr) __get_user_asm("ldd", ptr) 45 + #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) 46 46 #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 47 47 #endif 48 48 ··· 97 97 " mtsp %0,%%sr2\n\t" \ 98 98 : : "r"(get_fs()) : ) 99 99 100 - #define __get_user(x, ptr) \ 101 - ({ \ 102 - register long __gu_err __asm__ ("r8") = 0; \ 103 - register long __gu_val; \ 104 - \ 105 - load_sr2(); \ 106 - switch (sizeof(*(ptr))) { \ 107 - case 1: __get_user_asm("ldb", ptr); break; \ 108 - case 2: __get_user_asm("ldh", ptr); break; \ 109 - case 4: __get_user_asm("ldw", ptr); break; \ 110 - case 8: LDD_USER(ptr); break; \ 111 - default: BUILD_BUG(); break; \ 112 - } \ 113 - \ 114 - (x) = (__force __typeof__(*(ptr))) __gu_val; \ 115 - __gu_err; \ 100 + #define __get_user_internal(val, ptr) \ 101 + ({ \ 102 + register long __gu_err __asm__ ("r8") = 0; \ 103 + \ 104 + switch (sizeof(*(ptr))) { \ 105 + case 1: __get_user_asm(val, "ldb", ptr); break; \ 106 + case 2: __get_user_asm(val, "ldh", ptr); break; \ 107 + case 4: __get_user_asm(val, "ldw", ptr); break; \ 108 + case 8: LDD_USER(val, ptr); break; \ 109 + default: BUILD_BUG(); \ 110 + } \ 111 + \ 112 + __gu_err; \ 116 113 }) 117 114 118 - #define __get_user_asm(ldx, ptr) \ 115 + #define __get_user(val, ptr) \ 116 + ({ \ 117 + load_sr2(); \ 118 + __get_user_internal(val, ptr); \ 119 + }) 120 + 121 + #define __get_user_asm(val, ldx, ptr) \ 122 + { \ 123 + register long __gu_val; \ 124 + \ 119 125 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ 120 126 "9:\n" \ 121 127 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 122 128 : "=r"(__gu_val), "=r"(__gu_err) \ 123 - : "r"(ptr), "1"(__gu_err)); 129 + : "r"(ptr), "1"(__gu_err)); \ 130 + \ 131 + (val) = (__force __typeof__(*(ptr))) __gu_val; \ 132 + } 124 133 125 134 #if !defined(CONFIG_64BIT) 126 135 127 - #define __get_user_asm64(ptr) \ 136 + #define __get_user_asm64(val, ptr) \ 137 + { \ 138 + union { \ 139 + unsigned long long l; \ 140 + __typeof__(*(ptr)) t; \ 141 + } __gu_tmp; \ 142 + \ 128 143 __asm__(" copy %%r0,%R0\n" \ 129 144 "1: ldw 0(%%sr2,%2),%0\n" \ 130 145 "2: ldw 4(%%sr2,%2),%R0\n" \ 131 146 "9:\n" \ 132 147 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 133 148 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 134 - : "=r"(__gu_val), "=r"(__gu_err) \ 135 - : "r"(ptr), "1"(__gu_err)); 149 + : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ 150 + : "r"(ptr), "1"(__gu_err)); \ 151 + \ 152 + (val) = __gu_tmp.t; \ 153 + } 136 154 137 155 #endif /* !defined(CONFIG_64BIT) */ 138 156 139 157 140 - #define __put_user(x, ptr) \ 158 + #define __put_user_internal(x, ptr) \ 141 159 ({ \ 142 160 register long __pu_err __asm__ ("r8") = 0; \ 143 161 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 144 162 \ 145 - load_sr2(); \ 146 163 switch (sizeof(*(ptr))) { \ 147 - case 1: __put_user_asm("stb", __x, ptr); break; \ 148 - case 2: __put_user_asm("sth", __x, ptr); break; \ 149 - case 4: __put_user_asm("stw", __x, ptr); break; \ 150 - case 8: STD_USER(__x, ptr); break; \ 151 - default: BUILD_BUG(); break; \ 152 - } \ 164 + case 1: __put_user_asm("stb", __x, ptr); break; \ 165 + case 2: __put_user_asm("sth", __x, ptr); break; \ 166 + case 4: __put_user_asm("stw", __x, ptr); break; \ 167 + case 8: STD_USER(__x, ptr); break; \ 168 + default: BUILD_BUG(); \ 169 + } \ 153 170 \ 154 171 __pu_err; \ 155 172 }) 173 + 174 + #define __put_user(x, ptr) \ 175 + ({ \ 176 + load_sr2(); \ 177 + __put_user_internal(x, ptr); \ 178 + }) 179 + 156 180 157 181 /* 158 182 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
+14 -13
arch/parisc/lib/lusercopy.S
··· 201 201 add dst,len,end 202 202 203 203 /* short copy with less than 16 bytes? */ 204 - cmpib,>>=,n 15,len,.Lbyte_loop 204 + cmpib,COND(>>=),n 15,len,.Lbyte_loop 205 205 206 206 /* same alignment? */ 207 207 xor src,dst,t0 ··· 216 216 /* loop until we are 64-bit aligned */ 217 217 .Lalign_loop64: 218 218 extru dst,31,3,t1 219 - cmpib,=,n 0,t1,.Lcopy_loop_16 219 + cmpib,=,n 0,t1,.Lcopy_loop_16_start 220 220 20: ldb,ma 1(srcspc,src),t1 221 221 21: stb,ma t1,1(dstspc,dst) 222 222 b .Lalign_loop64 ··· 225 225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) 226 226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 227 227 228 + .Lcopy_loop_16_start: 228 229 ldi 31,t0 229 230 .Lcopy_loop_16: 230 231 cmpb,COND(>>=),n t0,len,.Lword_loop ··· 268 267 /* loop until we are 32-bit aligned */ 269 268 .Lalign_loop32: 270 269 extru dst,31,2,t1 271 - cmpib,=,n 0,t1,.Lcopy_loop_4 270 + cmpib,=,n 0,t1,.Lcopy_loop_8 272 271 20: ldb,ma 1(srcspc,src),t1 273 272 21: stb,ma t1,1(dstspc,dst) 274 273 b .Lalign_loop32 ··· 278 277 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) 279 278 280 279 281 - .Lcopy_loop_4: 280 + .Lcopy_loop_8: 282 281 cmpib,COND(>>=),n 15,len,.Lbyte_loop 283 282 284 283 10: ldw 0(srcspc,src),t1 ··· 300 299 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) 301 300 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) 302 301 303 - b .Lcopy_loop_4 302 + b .Lcopy_loop_8 304 303 ldo -16(len),len 305 304 306 305 .Lbyte_loop: ··· 325 324 .Lunaligned_copy: 326 325 /* align until dst is 32bit-word-aligned */ 327 326 extru dst,31,2,t1 328 - cmpib,COND(=),n 0,t1,.Lcopy_dstaligned 327 + cmpib,=,n 0,t1,.Lcopy_dstaligned 329 328 20: ldb 0(srcspc,src),t1 330 329 ldo 1(src),src 331 330 21: stb,ma t1,1(dstspc,dst) ··· 363 362 cmpiclr,<> 1,t0,%r0 364 363 b,n .Lcase1 365 364 .Lcase0: 366 - cmpb,= %r0,len,.Lcda_finish 365 + cmpb,COND(=) %r0,len,.Lcda_finish 367 366 nop 368 367 369 368 1: ldw,ma 4(srcspc,src), a3 ··· 377 376 1: ldw,ma 4(srcspc,src), a3 378 377 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) 379 378 ldo -1(len),len 380 - cmpb,=,n %r0,len,.Ldo0 379 + cmpb,COND(=),n %r0,len,.Ldo0 381 380 .Ldo4: 382 381 1: ldw,ma 4(srcspc,src), a0 383 382 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) ··· 403 402 1: stw,ma t0, 4(dstspc,dst) 404 403 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) 405 404 ldo -4(len),len 406 - cmpb,<> %r0,len,.Ldo4 405 + cmpb,COND(<>) %r0,len,.Ldo4 407 406 nop 408 407 .Ldo0: 409 408 shrpw a2, a3, %sar, t0 ··· 437 436 /* fault exception fixup handlers: */ 438 437 #ifdef CONFIG_64BIT 439 438 .Lcopy16_fault: 440 - 10: b .Lcopy_done 441 - std,ma t1,8(dstspc,dst) 439 + b .Lcopy_done 440 + 10: std,ma t1,8(dstspc,dst) 442 441 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 443 442 #endif 444 443 445 444 .Lcopy8_fault: 446 - 10: b .Lcopy_done 447 - stw,ma t1,4(dstspc,dst) 445 + b .Lcopy_done 446 + 10: stw,ma t1,4(dstspc,dst) 448 447 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) 449 448 450 449 .exit
+1 -1
arch/sparc/Kconfig
··· 43 43 select ARCH_HAS_SG_CHAIN 44 44 select CPU_NO_EFFICIENT_FFS 45 45 select HAVE_ARCH_HARDENED_USERCOPY 46 - select PROVE_LOCKING_SMALL if PROVE_LOCKING 46 + select LOCKDEP_SMALL if LOCKDEP 47 47 select ARCH_WANT_RELAX_ORDER 48 48 49 49 config SPARC32
+16
arch/sparc/mm/hugetlbpage.c
··· 461 461 pgd_t *pgd; 462 462 unsigned long next; 463 463 464 + addr &= PMD_MASK; 465 + if (addr < floor) { 466 + addr += PMD_SIZE; 467 + if (!addr) 468 + return; 469 + } 470 + if (ceiling) { 471 + ceiling &= PMD_MASK; 472 + if (!ceiling) 473 + return; 474 + } 475 + if (end - 1 > ceiling - 1) 476 + end -= PMD_SIZE; 477 + if (addr > end - 1) 478 + return; 479 + 464 480 pgd = pgd_offset(tlb->mm, addr); 465 481 do { 466 482 next = pgd_addr_end(addr, end);
+31 -11
arch/x86/include/asm/pmem.h
··· 55 55 * @size: number of bytes to write back 56 56 * 57 57 * Write back a cache range using the CLWB (cache line write back) 58 - * instruction. 58 + * instruction. Note that @size is internally rounded up to be cache 59 + * line size aligned. 59 60 */ 60 61 static inline void arch_wb_cache_pmem(void *addr, size_t size) 61 62 { ··· 68 67 for (p = (void *)((unsigned long)addr & ~clflush_mask); 69 68 p < vend; p += x86_clflush_size) 70 69 clwb(p); 71 - } 72 - 73 - /* 74 - * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec 75 - * iterators, so for other types (bvec & kvec) we must do a cache write-back. 76 - */ 77 - static inline bool __iter_needs_pmem_wb(struct iov_iter *i) 78 - { 79 - return iter_is_iovec(i) == false; 80 70 } 81 71 82 72 /** ··· 86 94 /* TODO: skip the write-back by always using non-temporal stores */ 87 95 len = copy_from_iter_nocache(addr, bytes, i); 88 96 89 - if (__iter_needs_pmem_wb(i)) 97 + /* 98 + * In the iovec case on x86_64 copy_from_iter_nocache() uses 99 + * non-temporal stores for the bulk of the transfer, but we need 100 + * to manually flush if the transfer is unaligned. A cached 101 + * memory copy is used when destination or size is not naturally 102 + * aligned. That is: 103 + * - Require 8-byte alignment when size is 8 bytes or larger. 104 + * - Require 4-byte alignment when size is 4 bytes. 105 + * 106 + * In the non-iovec case the entire destination needs to be 107 + * flushed. 108 + */ 109 + if (iter_is_iovec(i)) { 110 + unsigned long flushed, dest = (unsigned long) addr; 111 + 112 + if (bytes < 8) { 113 + if (!IS_ALIGNED(dest, 4) || (bytes != 4)) 114 + arch_wb_cache_pmem(addr, 1); 115 + } else { 116 + if (!IS_ALIGNED(dest, 8)) { 117 + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); 118 + arch_wb_cache_pmem(addr, 1); 119 + } 120 + 121 + flushed = dest - (unsigned long) addr; 122 + if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8)) 123 + arch_wb_cache_pmem(addr + bytes - 1, 1); 124 + } 125 + } else 90 126 arch_wb_cache_pmem(addr, bytes); 91 127 92 128 return len;
+51 -30
crypto/ahash.c
··· 32 32 crypto_completion_t complete; 33 33 void *data; 34 34 u8 *result; 35 + u32 flags; 35 36 void *ubuf[] CRYPTO_MINALIGN_ATTR; 36 37 }; 37 38 ··· 254 253 priv->result = req->result; 255 254 priv->complete = req->base.complete; 256 255 priv->data = req->base.data; 256 + priv->flags = req->base.flags; 257 + 257 258 /* 258 259 * WARNING: We do not backup req->priv here! The req->priv 259 260 * is for internal use of the Crypto API and the ··· 270 267 return 0; 271 268 } 272 269 273 - static void ahash_restore_req(struct ahash_request *req) 270 + static void ahash_restore_req(struct ahash_request *req, int err) 274 271 { 275 272 struct ahash_request_priv *priv = req->priv; 276 273 274 + if (!err) 275 + memcpy(priv->result, req->result, 276 + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 277 + 277 278 /* Restore the original crypto request. */ 278 279 req->result = priv->result; 279 - req->base.complete = priv->complete; 280 - req->base.data = priv->data; 280 + 281 + ahash_request_set_callback(req, priv->flags, 282 + priv->complete, priv->data); 281 283 req->priv = NULL; 282 284 283 285 /* Free the req->priv.priv from the ADJUSTED request. */ 284 286 kzfree(priv); 285 287 } 286 288 287 - static void ahash_op_unaligned_finish(struct ahash_request *req, int err) 289 + static void ahash_notify_einprogress(struct ahash_request *req) 288 290 { 289 291 struct ahash_request_priv *priv = req->priv; 292 + struct crypto_async_request oreq; 290 293 291 - if (err == -EINPROGRESS) 292 - return; 294 + oreq.data = priv->data; 293 295 294 - if (!err) 295 - memcpy(priv->result, req->result, 296 - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 297 - 298 - ahash_restore_req(req); 296 + priv->complete(&oreq, -EINPROGRESS); 299 297 } 300 298 301 299 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) 302 300 { 303 301 struct ahash_request *areq = req->data; 302 + 303 + if (err == -EINPROGRESS) { 304 + ahash_notify_einprogress(areq); 305 + return; 306 + } 304 307 305 308 /* 306 309 * Restore the original request, see ahash_op_unaligned() for what ··· 318 309 */ 319 310 320 311 /* First copy req->result into req->priv.result */ 321 - ahash_op_unaligned_finish(areq, err); 312 + ahash_restore_req(areq, err); 322 313 323 314 /* Complete the ORIGINAL request. */ 324 315 areq->base.complete(&areq->base, err); ··· 334 325 return err; 335 326 336 327 err = op(req); 337 - ahash_op_unaligned_finish(req, err); 328 + if (err == -EINPROGRESS || 329 + (err == -EBUSY && (ahash_request_flags(req) & 330 + CRYPTO_TFM_REQ_MAY_BACKLOG))) 331 + return err; 332 + 333 + ahash_restore_req(req, err); 338 334 339 335 return err; 340 336 } ··· 374 360 } 375 361 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 376 362 377 - static void ahash_def_finup_finish2(struct ahash_request *req, int err) 378 - { 379 - struct ahash_request_priv *priv = req->priv; 380 - 381 - if (err == -EINPROGRESS) 382 - return; 383 - 384 - if (!err) 385 - memcpy(priv->result, req->result, 386 - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 387 - 388 - ahash_restore_req(req); 389 - } 390 - 391 363 static void ahash_def_finup_done2(struct crypto_async_request *req, int err) 392 364 { 393 365 struct ahash_request *areq = req->data; 394 366 395 - ahash_def_finup_finish2(areq, err); 367 + if (err == -EINPROGRESS) 368 + return; 369 + 370 + ahash_restore_req(areq, err); 396 371 397 372 areq->base.complete(&areq->base, err); 398 373 } ··· 392 389 goto out; 393 390 394 391 req->base.complete = ahash_def_finup_done2; 395 - req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 392 + 396 393 err = crypto_ahash_reqtfm(req)->final(req); 394 + if (err == -EINPROGRESS || 395 + (err == -EBUSY && (ahash_request_flags(req) & 396 + CRYPTO_TFM_REQ_MAY_BACKLOG))) 397 + return err; 397 398 398 399 out: 399 - ahash_def_finup_finish2(req, err); 400 + ahash_restore_req(req, err); 400 401 return err; 401 402 } 402 403 ··· 408 401 { 409 402 struct ahash_request *areq = req->data; 410 403 404 + if (err == -EINPROGRESS) { 405 + ahash_notify_einprogress(areq); 406 + return; 407 + } 408 + 409 + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 410 + 411 411 err = ahash_def_finup_finish1(areq, err); 412 + if (areq->priv) 413 + return; 412 414 413 415 areq->base.complete(&areq->base, err); 414 416 } ··· 432 416 return err; 433 417 434 418 err = tfm->update(req); 419 + if (err == -EINPROGRESS || 420 + (err == -EBUSY && (ahash_request_flags(req) & 421 + CRYPTO_TFM_REQ_MAY_BACKLOG))) 422 + return err; 423 + 435 424 return ahash_def_finup_finish1(req, err); 436 425 } 437 426
+6 -6
crypto/algif_aead.c
··· 40 40 struct aead_async_rsgl first_rsgl; 41 41 struct list_head list; 42 42 struct kiocb *iocb; 43 + struct sock *sk; 43 44 unsigned int tsgls; 44 45 char iv[]; 45 46 }; ··· 380 379 381 380 static void aead_async_cb(struct crypto_async_request *_req, int err) 382 381 { 383 - struct sock *sk = _req->data; 384 - struct alg_sock *ask = alg_sk(sk); 385 - struct aead_ctx *ctx = ask->private; 386 - struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); 387 - struct aead_request *req = aead_request_cast(_req); 382 + struct aead_request *req = _req->data; 383 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 388 384 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); 385 + struct sock *sk = areq->sk; 389 386 struct scatterlist *sg = areq->tsgl; 390 387 struct aead_async_rsgl *rsgl; 391 388 struct kiocb *iocb = areq->iocb; ··· 446 447 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); 447 448 INIT_LIST_HEAD(&areq->list); 448 449 areq->iocb = msg->msg_iocb; 450 + areq->sk = sk; 449 451 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); 450 452 aead_request_set_tfm(req, tfm); 451 453 aead_request_set_ad(req, ctx->aead_assoclen); 452 454 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 453 - aead_async_cb, sk); 455 + aead_async_cb, req); 454 456 used -= ctx->aead_assoclen; 455 457 456 458 /* take over all tx sgls from ctx */
+16
crypto/lrw.c
··· 345 345 struct rctx *rctx; 346 346 347 347 rctx = skcipher_request_ctx(req); 348 + 349 + if (err == -EINPROGRESS) { 350 + if (rctx->left != req->cryptlen) 351 + return; 352 + goto out; 353 + } 354 + 348 355 subreq = &rctx->subreq; 349 356 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 350 357 ··· 359 352 if (rctx->left) 360 353 return; 361 354 355 + out: 362 356 skcipher_request_complete(req, err); 363 357 } 364 358 ··· 397 389 struct rctx *rctx; 398 390 399 391 rctx = skcipher_request_ctx(req); 392 + 393 + if (err == -EINPROGRESS) { 394 + if (rctx->left != req->cryptlen) 395 + return; 396 + goto out; 397 + } 398 + 400 399 subreq = &rctx->subreq; 401 400 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 402 401 ··· 411 396 if (rctx->left) 412 397 return; 413 398 399 + out: 414 400 skcipher_request_complete(req, err); 415 401 } 416 402
+16
crypto/xts.c
··· 286 286 struct rctx *rctx; 287 287 288 288 rctx = skcipher_request_ctx(req); 289 + 290 + if (err == -EINPROGRESS) { 291 + if (rctx->left != req->cryptlen) 292 + return; 293 + goto out; 294 + } 295 + 289 296 subreq = &rctx->subreq; 290 297 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 291 298 ··· 300 293 if (rctx->left) 301 294 return; 302 295 296 + out: 303 297 skcipher_request_complete(req, err); 304 298 } 305 299 ··· 338 330 struct rctx *rctx; 339 331 340 332 rctx = skcipher_request_ctx(req); 333 + 334 + if (err == -EINPROGRESS) { 335 + if (rctx->left != req->cryptlen) 336 + return; 337 + goto out; 338 + } 339 + 341 340 subreq = &rctx->subreq; 342 341 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 343 342 ··· 352 337 if (rctx->left) 353 338 return; 354 339 340 + out: 355 341 skcipher_request_complete(req, err); 356 342 } 357 343
+5 -1
drivers/acpi/nfit/core.c
··· 1617 1617 const struct nfit_set_info_map *map0 = m0; 1618 1618 const struct nfit_set_info_map *map1 = m1; 1619 1619 1620 - return map0->region_offset - map1->region_offset; 1620 + if (map0->region_offset < map1->region_offset) 1621 + return -1; 1622 + else if (map0->region_offset > map1->region_offset) 1623 + return 1; 1624 + return 0; 1621 1625 } 1622 1626 1623 1627 /* Retrieve the nth entry referencing this spa */
+1
drivers/dax/Kconfig
··· 2 2 tristate "DAX: direct access to differentiated memory" 3 3 default m if NVDIMM_DAX 4 4 depends on TRANSPARENT_HUGEPAGE 5 + select SRCU 5 6 help 6 7 Support raw access to differentiated (persistence, bandwidth, 7 8 latency...) memory via an mmap(2) capable character
+7 -6
drivers/dax/dax.c
··· 25 25 #include "dax.h" 26 26 27 27 static dev_t dax_devt; 28 + DEFINE_STATIC_SRCU(dax_srcu); 28 29 static struct class *dax_class; 29 30 static DEFINE_IDA(dax_minor_ida); 30 31 static int nr_dax = CONFIG_NR_DEV_DAX; ··· 61 60 * @region - parent region 62 61 * @dev - device backing the character device 63 62 * @cdev - core chardev data 64 - * @alive - !alive + rcu grace period == no new mappings can be established 63 + * @alive - !alive + srcu grace period == no new mappings can be established 65 64 * @id - child id in the region 66 65 * @num_resources - number of physical address extents in this device 67 66 * @res - array of physical address ranges ··· 570 569 static int dax_dev_huge_fault(struct vm_fault *vmf, 571 570 enum page_entry_size pe_size) 572 571 { 573 - int rc; 572 + int rc, id; 574 573 struct file *filp = vmf->vma->vm_file; 575 574 struct dax_dev *dax_dev = filp->private_data; 576 575 ··· 579 578 ? "write" : "read", 580 579 vmf->vma->vm_start, vmf->vma->vm_end); 581 580 582 - rcu_read_lock(); 581 + id = srcu_read_lock(&dax_srcu); 583 582 switch (pe_size) { 584 583 case PE_SIZE_PTE: 585 584 rc = __dax_dev_pte_fault(dax_dev, vmf); ··· 593 592 default: 594 593 return VM_FAULT_FALLBACK; 595 594 } 596 - rcu_read_unlock(); 595 + srcu_read_unlock(&dax_srcu, id); 597 596 598 597 return rc; 599 598 } ··· 714 713 * Note, rcu is not protecting the liveness of dax_dev, rcu is 715 714 * ensuring that any fault handlers that might have seen 716 715 * dax_dev->alive == true, have completed. Any fault handlers 717 - * that start after synchronize_rcu() has started will abort 716 + * that start after synchronize_srcu() has started will abort 718 717 * upon seeing dax_dev->alive == false. 719 718 */ 720 719 dax_dev->alive = false; 721 - synchronize_rcu(); 720 + synchronize_srcu(&dax_srcu); 722 721 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); 723 722 cdev_del(cdev); 724 723 device_unregister(dev);
+1
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 134 134 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); 135 135 switch (sdevid) { 136 136 case PCI_SUBSYS_DEVID_81XX_BGX: 137 + case PCI_SUBSYS_DEVID_81XX_RGX: 137 138 max_bgx_per_node = MAX_BGX_PER_CN81XX; 138 139 break; 139 140 case PCI_SUBSYS_DEVID_83XX_BGX:
+1
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
··· 16 16 /* Subsystem device IDs */ 17 17 #define PCI_SUBSYS_DEVID_88XX_BGX 0xA126 18 18 #define PCI_SUBSYS_DEVID_81XX_BGX 0xA226 19 + #define PCI_SUBSYS_DEVID_81XX_RGX 0xA254 19 20 #define PCI_SUBSYS_DEVID_83XX_BGX 0xA326 20 21 21 22 #define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
+17 -14
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 613 613 struct mtk_mac *mac = netdev_priv(dev); 614 614 struct mtk_eth *eth = mac->hw; 615 615 struct mtk_tx_dma *itxd, *txd; 616 - struct mtk_tx_buf *tx_buf; 616 + struct mtk_tx_buf *itx_buf, *tx_buf; 617 617 dma_addr_t mapped_addr; 618 618 unsigned int nr_frags; 619 619 int i, n_desc = 1; ··· 627 627 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; 628 628 txd4 |= fport; 629 629 630 - tx_buf = mtk_desc_to_tx_buf(ring, itxd); 631 - memset(tx_buf, 0, sizeof(*tx_buf)); 630 + itx_buf = mtk_desc_to_tx_buf(ring, itxd); 631 + memset(itx_buf, 0, sizeof(*itx_buf)); 632 632 633 633 if (gso) 634 634 txd4 |= TX_DMA_TSO; ··· 647 647 return -ENOMEM; 648 648 649 649 WRITE_ONCE(itxd->txd1, mapped_addr); 650 - tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; 651 - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 652 - dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); 650 + itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; 651 + itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : 652 + MTK_TX_FLAGS_FPORT1; 653 + dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr); 654 + dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb)); 653 655 654 656 /* TX SG offload */ 655 657 txd = itxd; ··· 687 685 last_frag * TX_DMA_LS0)); 688 686 WRITE_ONCE(txd->txd4, fport); 689 687 690 - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; 691 688 tx_buf = mtk_desc_to_tx_buf(ring, txd); 692 689 memset(tx_buf, 0, sizeof(*tx_buf)); 693 - 690 + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; 694 691 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; 692 + tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : 693 + MTK_TX_FLAGS_FPORT1; 694 + 695 695 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 696 696 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); 697 697 frag_size -= frag_map_size; ··· 702 698 } 703 699 704 700 /* store skb to cleanup */ 705 - tx_buf->skb = skb; 701 + itx_buf->skb = skb; 706 702 707 703 WRITE_ONCE(itxd->txd4, txd4); 708 704 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | ··· 1016 1012 1017 1013 while ((cpu != dma) && budget) { 1018 1014 u32 next_cpu = desc->txd2; 1019 - int mac; 1015 + int mac = 0; 1020 1016 1021 1017 desc = mtk_qdma_phys_to_virt(ring, desc->txd2); 1022 1018 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) 1023 1019 break; 1024 1020 1025 - mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & 1026 - TX_DMA_FPORT_MASK; 1027 - mac--; 1028 - 1029 1021 tx_buf = mtk_desc_to_tx_buf(ring, desc); 1022 + if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) 1023 + mac = 1; 1024 + 1030 1025 skb = tx_buf->skb; 1031 1026 if (!skb) { 1032 1027 condition = 1;
+9 -3
drivers/net/ethernet/mediatek/mtk_eth_soc.h
··· 410 410 struct u64_stats_sync syncp; 411 411 }; 412 412 413 - /* PDMA descriptor can point at 1-2 segments. This enum allows us to track how 414 - * memory was allocated so that it can be freed properly 415 - */ 416 413 enum mtk_tx_flags { 414 + /* PDMA descriptor can point at 1-2 segments. This enum allows us to 415 + * track how memory was allocated so that it can be freed properly. 416 + */ 417 417 MTK_TX_FLAGS_SINGLE0 = 0x01, 418 418 MTK_TX_FLAGS_PAGE0 = 0x02, 419 + 420 + /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted 421 + * SKB out instead of looking up through hardware TX descriptor. 422 + */ 423 + MTK_TX_FLAGS_FPORT0 = 0x04, 424 + MTK_TX_FLAGS_FPORT1 = 0x08, 419 425 }; 420 426 421 427 /* This enum allows us to identify how the clock is defined on the array of the
+67 -55
drivers/net/ethernet/renesas/sh_eth.c
··· 1127 1127 .get_mdio_data = sh_get_mdio, 1128 1128 }; 1129 1129 1130 + /* free Tx skb function */ 1131 + static int sh_eth_tx_free(struct net_device *ndev, bool sent_only) 1132 + { 1133 + struct sh_eth_private *mdp = netdev_priv(ndev); 1134 + struct sh_eth_txdesc *txdesc; 1135 + int free_num = 0; 1136 + int entry; 1137 + bool sent; 1138 + 1139 + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1140 + entry = mdp->dirty_tx % mdp->num_tx_ring; 1141 + txdesc = &mdp->tx_ring[entry]; 1142 + sent = !(txdesc->status & cpu_to_le32(TD_TACT)); 1143 + if (sent_only && !sent) 1144 + break; 1145 + /* TACT bit must be checked before all the following reads */ 1146 + dma_rmb(); 1147 + netif_info(mdp, tx_done, ndev, 1148 + "tx entry %d status 0x%08x\n", 1149 + entry, le32_to_cpu(txdesc->status)); 1150 + /* Free the original skb. */ 1151 + if (mdp->tx_skbuff[entry]) { 1152 + dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), 1153 + le32_to_cpu(txdesc->len) >> 16, 1154 + DMA_TO_DEVICE); 1155 + dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1156 + mdp->tx_skbuff[entry] = NULL; 1157 + free_num++; 1158 + } 1159 + txdesc->status = cpu_to_le32(TD_TFP); 1160 + if (entry >= mdp->num_tx_ring - 1) 1161 + txdesc->status |= cpu_to_le32(TD_TDLE); 1162 + 1163 + if (sent) { 1164 + ndev->stats.tx_packets++; 1165 + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; 1166 + } 1167 + } 1168 + return free_num; 1169 + } 1170 + 1130 1171 /* free skb and descriptor buffer */ 1131 1172 static void sh_eth_ring_free(struct net_device *ndev) 1132 1173 { 1133 1174 struct sh_eth_private *mdp = netdev_priv(ndev); 1134 1175 int ringsize, i; 1176 + 1177 + if (mdp->rx_ring) { 1178 + for (i = 0; i < mdp->num_rx_ring; i++) { 1179 + if (mdp->rx_skbuff[i]) { 1180 + struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; 1181 + 1182 + dma_unmap_single(&ndev->dev, 1183 + le32_to_cpu(rxdesc->addr), 1184 + ALIGN(mdp->rx_buf_sz, 32), 1185 + DMA_FROM_DEVICE); 1186 + } 1187 + } 1188 + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1189 + dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1190 + mdp->rx_desc_dma); 1191 + mdp->rx_ring = NULL; 1192 + } 1135 1193 1136 1194 /* Free Rx skb ringbuffer */ 1137 1195 if (mdp->rx_skbuff) { ··· 1199 1141 kfree(mdp->rx_skbuff); 1200 1142 mdp->rx_skbuff = NULL; 1201 1143 1202 - /* Free Tx skb ringbuffer */ 1203 - if (mdp->tx_skbuff) { 1204 - for (i = 0; i < mdp->num_tx_ring; i++) 1205 - dev_kfree_skb(mdp->tx_skbuff[i]); 1206 - } 1207 - kfree(mdp->tx_skbuff); 1208 - mdp->tx_skbuff = NULL; 1209 - 1210 - if (mdp->rx_ring) { 1211 - ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1212 - dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1213 - mdp->rx_desc_dma); 1214 - mdp->rx_ring = NULL; 1215 - } 1216 - 1217 1144 if (mdp->tx_ring) { 1145 + sh_eth_tx_free(ndev, false); 1146 + 1218 1147 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1219 1148 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1220 1149 mdp->tx_desc_dma); 1221 1150 mdp->tx_ring = NULL; 1222 1151 } 1152 + 1153 + /* Free Tx skb ringbuffer */ 1154 + kfree(mdp->tx_skbuff); 1155 + mdp->tx_skbuff = NULL; 1223 1156 } 1224 1157 1225 1158 /* format skb and descriptor buffer */ ··· 1458 1409 update_mac_address(ndev); 1459 1410 } 1460 1411 1461 - /* free Tx skb function */ 1462 - static int sh_eth_txfree(struct net_device *ndev) 1463 - { 1464 - struct sh_eth_private *mdp = netdev_priv(ndev); 1465 - struct sh_eth_txdesc *txdesc; 1466 - int free_num = 0; 1467 - int entry; 1468 - 1469 - for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1470 - entry = mdp->dirty_tx % mdp->num_tx_ring; 1471 - txdesc = &mdp->tx_ring[entry]; 1472 - if (txdesc->status & cpu_to_le32(TD_TACT)) 1473 - break; 1474 - /* TACT bit must be checked before all the following reads */ 1475 - dma_rmb(); 1476 - netif_info(mdp, tx_done, ndev, 1477 - "tx entry %d status 0x%08x\n", 1478 - entry, le32_to_cpu(txdesc->status)); 1479 - /* Free the original skb. */ 1480 - if (mdp->tx_skbuff[entry]) { 1481 - dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), 1482 - le32_to_cpu(txdesc->len) >> 16, 1483 - DMA_TO_DEVICE); 1484 - dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1485 - mdp->tx_skbuff[entry] = NULL; 1486 - free_num++; 1487 - } 1488 - txdesc->status = cpu_to_le32(TD_TFP); 1489 - if (entry >= mdp->num_tx_ring - 1) 1490 - txdesc->status |= cpu_to_le32(TD_TDLE); 1491 - 1492 - ndev->stats.tx_packets++; 1493 - ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; 1494 - } 1495 - return free_num; 1496 - } 1497 - 1498 1412 /* Packet receive function */ 1499 1413 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) 1500 1414 { ··· 1702 1690 intr_status, mdp->cur_tx, mdp->dirty_tx, 1703 1691 (u32)ndev->state, edtrr); 1704 1692 /* dirty buffer free */ 1705 - sh_eth_txfree(ndev); 1693 + sh_eth_tx_free(ndev, true); 1706 1694 1707 1695 /* SH7712 BUG */ 1708 1696 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { ··· 1763 1751 /* Clear Tx interrupts */ 1764 1752 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); 1765 1753 1766 - sh_eth_txfree(ndev); 1754 + sh_eth_tx_free(ndev, true); 1767 1755 netif_wake_queue(ndev); 1768 1756 } 1769 1757 ··· 2424 2412 2425 2413 spin_lock_irqsave(&mdp->lock, flags); 2426 2414 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2427 - if (!sh_eth_txfree(ndev)) { 2415 + if (!sh_eth_tx_free(ndev, true)) { 2428 2416 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); 2429 2417 netif_stop_queue(ndev); 2430 2418 spin_unlock_irqrestore(&mdp->lock, flags);
+7
drivers/net/ethernet/sfc/efx.c
··· 1371 1371 free_cpumask_var(thread_mask); 1372 1372 } 1373 1373 1374 + if (count > EFX_MAX_RX_QUEUES) { 1375 + netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, 1376 + "Reducing number of rx queues from %u to %u.\n", 1377 + count, EFX_MAX_RX_QUEUES); 1378 + count = EFX_MAX_RX_QUEUES; 1379 + } 1380 + 1374 1381 /* If RSS is requested for the PF *and* VFs then we can't write RSS 1375 1382 * table entries that are inaccessible to VFs 1376 1383 */
+7
drivers/net/ethernet/sfc/falcon/efx.c
··· 1354 1354 free_cpumask_var(thread_mask); 1355 1355 } 1356 1356 1357 + if (count > EF4_MAX_RX_QUEUES) { 1358 + netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, 1359 + "Reducing number of rx queues from %u to %u.\n", 1360 + count, EF4_MAX_RX_QUEUES); 1361 + count = EF4_MAX_RX_QUEUES; 1362 + } 1363 + 1357 1364 return count; 1358 1365 } 1359 1366
+2 -15
drivers/net/phy/micrel.c
··· 797 797 .read_status = genphy_read_status, 798 798 .ack_interrupt = kszphy_ack_interrupt, 799 799 .config_intr = kszphy_config_intr, 800 - .get_sset_count = kszphy_get_sset_count, 801 - .get_strings = kszphy_get_strings, 802 - .get_stats = kszphy_get_stats, 803 800 .suspend = genphy_suspend, 804 801 .resume = genphy_resume, 805 802 }, { ··· 936 939 .read_status = genphy_read_status, 937 940 .ack_interrupt = kszphy_ack_interrupt, 938 941 .config_intr = kszphy_config_intr, 939 - .get_sset_count = kszphy_get_sset_count, 940 - .get_strings = kszphy_get_strings, 941 - .get_stats = kszphy_get_stats, 942 942 .suspend = genphy_suspend, 943 943 .resume = genphy_resume, 944 944 }, { ··· 945 951 .features = PHY_GBIT_FEATURES, 946 952 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 947 953 .driver_data = &ksz9021_type, 954 + .probe = kszphy_probe, 948 955 .config_init = ksz9021_config_init, 949 956 .config_aneg = genphy_config_aneg, 950 957 .read_status = genphy_read_status, ··· 965 970 .features = PHY_GBIT_FEATURES, 966 971 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 967 972 .driver_data = &ksz9021_type, 973 + .probe = kszphy_probe, 968 974 .config_init = ksz9031_config_init, 969 975 .config_aneg = genphy_config_aneg, 970 976 .read_status = ksz9031_read_status, ··· 984 988 .config_init = kszphy_config_init, 985 989 .config_aneg = ksz8873mll_config_aneg, 986 990 .read_status = ksz8873mll_read_status, 987 - .get_sset_count = kszphy_get_sset_count, 988 - .get_strings = kszphy_get_strings, 989 - .get_stats = kszphy_get_stats, 990 991 .suspend = genphy_suspend, 991 992 .resume = genphy_resume, 992 993 }, { ··· 995 1002 .config_init = kszphy_config_init, 996 1003 .config_aneg = genphy_config_aneg, 997 1004 .read_status = genphy_read_status, 998 - .get_sset_count = kszphy_get_sset_count, 999 - .get_strings = kszphy_get_strings, 1000 - .get_stats = kszphy_get_stats, 1001 1005 .suspend = genphy_suspend, 1002 1006 .resume = genphy_resume, 1003 1007 }, { ··· 1006 1016 .config_init = kszphy_config_init, 1007 1017 .config_aneg = ksz8873mll_config_aneg, 1008 1018 .read_status = ksz8873mll_read_status, 1009 - .get_sset_count = kszphy_get_sset_count, 1010 - .get_strings = kszphy_get_strings, 1011 - .get_stats = kszphy_get_stats, 1012 1019 .suspend = genphy_suspend, 1013 1020 .resume = genphy_resume, 1014 1021 } };
+1 -1
drivers/net/vrf.c
··· 1264 1264 goto nla_put_failure; 1265 1265 1266 1266 /* rule only needs to appear once */ 1267 - nlh->nlmsg_flags &= NLM_F_EXCL; 1267 + nlh->nlmsg_flags |= NLM_F_EXCL; 1268 1268 1269 1269 frh = nlmsg_data(nlh); 1270 1270 memset(frh, 0, sizeof(*frh));
+6
drivers/nvdimm/bus.c
··· 934 934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); 935 935 if (rc < 0) 936 936 goto out_unlock; 937 + nvdimm_bus_unlock(&nvdimm_bus->dev); 938 + 937 939 if (copy_to_user(p, buf, buf_len)) 938 940 rc = -EFAULT; 941 + 942 + vfree(buf); 943 + return rc; 944 + 939 945 out_unlock: 940 946 nvdimm_bus_unlock(&nvdimm_bus->dev); 941 947 out:
+9 -1
drivers/nvdimm/claim.c
··· 243 243 } 244 244 245 245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 246 - if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { 246 + /* 247 + * FIXME: nsio_rw_bytes() may be called from atomic 248 + * context in the btt case and nvdimm_clear_poison() 249 + * takes a sleeping lock. Until the locking can be 250 + * reworked this capability requires that the namespace 251 + * is not claimed by btt. 252 + */ 253 + if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) 254 + && (!ndns->claim || !is_nd_btt(ndns->claim))) { 247 255 long cleared; 248 256 249 257 cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
+11 -66
drivers/nvdimm/dimm_devs.c
··· 395 395 396 396 int alias_dpa_busy(struct device *dev, void *data) 397 397 { 398 - resource_size_t map_end, blk_start, new, busy; 398 + resource_size_t map_end, blk_start, new; 399 399 struct blk_alloc_info *info = data; 400 400 struct nd_mapping *nd_mapping; 401 401 struct nd_region *nd_region; ··· 436 436 retry: 437 437 /* 438 438 * Find the free dpa from the end of the last pmem allocation to 439 - * the end of the interleave-set mapping that is not already 440 - * covered by a blk allocation. 439 + * the end of the interleave-set mapping. 441 440 */ 442 - busy = 0; 443 441 for_each_dpa_resource(ndd, res) { 442 + if (strncmp(res->name, "pmem", 4) != 0) 443 + continue; 444 444 if ((res->start >= blk_start && res->start < map_end) 445 445 || (res->end >= blk_start 446 446 && res->end <= map_end)) { 447 - if (strncmp(res->name, "pmem", 4) == 0) { 448 - new = max(blk_start, min(map_end + 1, 449 - res->end + 1)); 450 - if (new != blk_start) { 451 - blk_start = new; 452 - goto retry; 453 - } 454 - } else 455 - busy += min(map_end, res->end) 456 - - max(nd_mapping->start, res->start) + 1; 457 - } else if (nd_mapping->start > res->start 458 - && map_end < res->end) { 459 - /* total eclipse of the PMEM region mapping */ 460 - busy += nd_mapping->size; 461 - break; 447 + new = max(blk_start, min(map_end + 1, res->end + 1)); 448 + if (new != blk_start) { 449 + blk_start = new; 450 + goto retry; 451 + } 462 452 } 463 453 } 464 454 ··· 460 470 return 1; 461 471 } 462 472 463 - info->available -= blk_start - nd_mapping->start + busy; 473 + info->available -= blk_start - nd_mapping->start; 464 474 465 475 return 0; 466 - } 467 - 468 - static int blk_dpa_busy(struct device *dev, void *data) 469 - { 470 - struct blk_alloc_info *info = data; 471 - struct nd_mapping *nd_mapping; 472 - struct nd_region *nd_region; 473 - resource_size_t map_end; 474 - int i; 475 - 476 - if (!is_nd_pmem(dev)) 477 - return 0; 478 - 479 - nd_region = to_nd_region(dev); 480 - for (i = 0; i < nd_region->ndr_mappings; i++) { 481 - nd_mapping = &nd_region->mapping[i]; 482 - if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) 483 - break; 484 - } 485 - 486 - if (i >= nd_region->ndr_mappings) 487 - return 0; 488 - 489 - map_end = nd_mapping->start + nd_mapping->size - 1; 490 - if (info->res->start >= nd_mapping->start 491 - && info->res->start < map_end) { 492 - if (info->res->end <= map_end) { 493 - info->busy = 0; 494 - return 1; 495 - } else { 496 - info->busy -= info->res->end - map_end; 497 - return 0; 498 - } 499 - } else if (info->res->end >= nd_mapping->start 500 - && info->res->end <= map_end) { 501 - info->busy -= nd_mapping->start - info->res->start; 502 - return 0; 503 - } else { 504 - info->busy -= nd_mapping->size; 505 - return 0; 506 - } 507 476 } 508 477 509 478 /** ··· 494 545 for_each_dpa_resource(ndd, res) { 495 546 if (strncmp(res->name, "blk", 3) != 0) 496 547 continue; 497 - 498 - info.res = res; 499 - info.busy = resource_size(res); 500 - device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy); 501 - info.available -= info.busy; 548 + info.available -= resource_size(res); 502 549 } 503 550 504 551 return info.available;
+1 -1
drivers/nvme/host/fc.c
··· 2023 2023 } 2024 2024 2025 2025 ctrl->ctrl.sqsize = 2026 - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 2026 + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); 2027 2027 2028 2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 2029 2029 if (error)
+1 -1
drivers/nvme/host/rdma.c
··· 1606 1606 } 1607 1607 1608 1608 ctrl->ctrl.sqsize = 1609 - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 1609 + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); 1610 1610 1611 1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 1612 1612 if (error)
+1 -1
drivers/nvme/target/loop.c
··· 392 392 } 393 393 394 394 ctrl->ctrl.sqsize = 395 - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 395 + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); 396 396 397 397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 398 398 if (error)
+16 -6
drivers/reset/core.c
··· 275 275 } 276 276 EXPORT_SYMBOL_GPL(reset_control_status); 277 277 278 - static struct reset_control *__reset_control_get( 278 + static struct reset_control *__reset_control_get_internal( 279 279 struct reset_controller_dev *rcdev, 280 280 unsigned int index, bool shared) 281 281 { ··· 308 308 return rstc; 309 309 } 310 310 311 - static void __reset_control_put(struct reset_control *rstc) 311 + static void __reset_control_put_internal(struct reset_control *rstc) 312 312 { 313 313 lockdep_assert_held(&reset_list_mutex); 314 314 ··· 377 377 } 378 378 379 379 /* reset_list_mutex also protects the rcdev's reset_control list */ 380 - rstc = __reset_control_get(rcdev, rstc_id, shared); 380 + rstc = __reset_control_get_internal(rcdev, rstc_id, shared); 381 381 382 382 mutex_unlock(&reset_list_mutex); 383 383 384 384 return rstc; 385 385 } 386 386 EXPORT_SYMBOL_GPL(__of_reset_control_get); 387 + 388 + struct reset_control *__reset_control_get(struct device *dev, const char *id, 389 + int index, bool shared, bool optional) 390 + { 391 + if (dev->of_node) 392 + return __of_reset_control_get(dev->of_node, id, index, shared, 393 + optional); 394 + 395 + return optional ? NULL : ERR_PTR(-EINVAL); 396 + } 397 + EXPORT_SYMBOL_GPL(__reset_control_get); 387 398 388 399 /** 389 400 * reset_control_put - free the reset controller ··· 407 396 return; 408 397 409 398 mutex_lock(&reset_list_mutex); 410 - __reset_control_put(rstc); 399 + __reset_control_put_internal(rstc); 411 400 mutex_unlock(&reset_list_mutex); 412 401 } 413 402 EXPORT_SYMBOL_GPL(reset_control_put); ··· 428 417 if (!ptr) 429 418 return ERR_PTR(-ENOMEM); 430 419 431 - rstc = __of_reset_control_get(dev ? dev->of_node : NULL, 432 - id, index, shared, optional); 420 + rstc = __reset_control_get(dev, id, index, shared, optional); 433 421 if (!IS_ERR(rstc)) { 434 422 *ptr = rstc; 435 423 devres_add(dev, ptr);
+8 -3
drivers/scsi/aacraid/aacraid.h
··· 1690 1690 #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1691 1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1692 1692 1693 - #define aac_adapter_check_health(dev) \ 1694 - (dev)->a_ops.adapter_check_health(dev) 1695 - 1696 1693 #define aac_adapter_restart(dev, bled, reset_type) \ 1697 1694 ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) 1698 1695 ··· 2610 2613 { 2611 2614 sector_div(capacity, divisor); 2612 2615 return capacity; 2616 + } 2617 + 2618 + static inline int aac_adapter_check_health(struct aac_dev *dev) 2619 + { 2620 + if (unlikely(pci_channel_offline(dev->pdev))) 2621 + return -1; 2622 + 2623 + return (dev)->a_ops.adapter_check_health(dev); 2613 2624 } 2614 2625 2615 2626 /* SCp.phase values */
+2 -1
drivers/scsi/aacraid/commsup.c
··· 1873 1873 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1874 1874 1875 1875 if (BlinkLED < 0) { 1876 - printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); 1876 + printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", 1877 + aac->name, BlinkLED); 1877 1878 goto out; 1878 1879 } 1879 1880
+6 -1
drivers/scsi/ipr.c
··· 6293 6293 break; 6294 6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6295 6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6296 - scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6296 + /* 6297 + * exception: do not set DID_PASSTHROUGH on CHECK CONDITION 6298 + * so SCSI mid-layer and upper layers handle it accordingly. 6299 + */ 6300 + if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) 6301 + scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6297 6302 break; 6298 6303 case IPR_IOASC_BUS_WAS_RESET: 6299 6304 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
+2 -1
drivers/scsi/qedf/qedf_fip.c
··· 99 99 qedf_set_vlan_id(qedf, vid); 100 100 101 101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ 102 - complete(&qedf->fipvlan_compl); 102 + if (!completion_done(&qedf->fipvlan_compl)) 103 + complete(&qedf->fipvlan_compl); 103 104 } 104 105 } 105 106
+1
drivers/scsi/qedf/qedf_main.c
··· 2803 2803 atomic_set(&qedf->num_offloads, 0); 2804 2804 qedf->stop_io_on_error = false; 2805 2805 pci_set_drvdata(pdev, qedf); 2806 + init_completion(&qedf->fipvlan_compl); 2806 2807 2807 2808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, 2808 2809 "QLogic FastLinQ FCoE Module qedf %s, "
+6 -1
drivers/scsi/qla2xxx/qla_os.c
··· 1160 1160 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1161 1161 { 1162 1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1163 + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1163 1164 1164 - return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT); 1165 + if (IS_P3P_TYPE(ha)) 1166 + return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT); 1167 + else 1168 + return ((RD_REG_DWORD(&reg->host_status)) == 1169 + ISP_REG_DISCONNECT); 1165 1170 } 1166 1171 1167 1172 /**************************************************************************
+20 -3
drivers/scsi/sd.c
··· 2102 2102 2103 2103 #define READ_CAPACITY_RETRIES_ON_RESET 10 2104 2104 2105 + /* 2106 + * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set 2107 + * and the reported logical block size is bigger than 512 bytes. Note 2108 + * that last_sector is a u64 and therefore logical_to_sectors() is not 2109 + * applicable. 2110 + */ 2111 + static bool sd_addressable_capacity(u64 lba, unsigned int sector_size) 2112 + { 2113 + u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9); 2114 + 2115 + if (sizeof(sector_t) == 4 && last_sector > U32_MAX) 2116 + return false; 2117 + 2118 + return true; 2119 + } 2120 + 2105 2121 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2106 2122 unsigned char *buffer) 2107 2123 { ··· 2183 2167 return -ENODEV; 2184 2168 } 2185 2169 2186 - if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { 2170 + if (!sd_addressable_capacity(lba, sector_size)) { 2187 2171 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2188 2172 "kernel compiled with support for large block " 2189 2173 "devices.\n"); ··· 2272 2256 return sector_size; 2273 2257 } 2274 2258 2275 - if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { 2259 + if (!sd_addressable_capacity(lba, sector_size)) { 2276 2260 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2277 2261 "kernel compiled with support for large block " 2278 2262 "devices.\n"); ··· 2972 2956 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 2973 2957 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 2974 2958 } else 2975 - rw_max = BLK_DEF_MAX_SECTORS; 2959 + rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 2960 + (sector_t)BLK_DEF_MAX_SECTORS); 2976 2961 2977 2962 /* Combine with controller limits */ 2978 2963 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+4 -2
drivers/scsi/sr.c
··· 836 836 unsigned char *buffer; 837 837 struct scsi_mode_data data; 838 838 struct scsi_sense_hdr sshdr; 839 + unsigned int ms_len = 128; 839 840 int rc, n; 840 841 841 842 static const char *loadmech[] = ··· 863 862 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); 864 863 865 864 /* ask for mode page 0x2a */ 866 - rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 865 + rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, 867 866 SR_TIMEOUT, 3, &data, NULL); 868 867 869 - if (!scsi_status_is_good(rc)) { 868 + if (!scsi_status_is_good(rc) || data.length > ms_len || 869 + data.header_length + data.block_descriptor_length > data.length) { 870 870 /* failed, drive doesn't have capabilities mode page */ 871 871 cd->cdi.speed = 1; 872 872 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
+69 -16
drivers/tty/tty_ldisc.c
··· 492 492 } 493 493 494 494 /** 495 + * tty_ldisc_restore - helper for tty ldisc change 496 + * @tty: tty to recover 497 + * @old: previous ldisc 498 + * 499 + * Restore the previous line discipline or N_TTY when a line discipline 500 + * change fails due to an open error 501 + */ 502 + 503 + static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) 504 + { 505 + struct tty_ldisc *new_ldisc; 506 + int r; 507 + 508 + /* There is an outstanding reference here so this is safe */ 509 + old = tty_ldisc_get(tty, old->ops->num); 510 + WARN_ON(IS_ERR(old)); 511 + tty->ldisc = old; 512 + tty_set_termios_ldisc(tty, old->ops->num); 513 + if (tty_ldisc_open(tty, old) < 0) { 514 + tty_ldisc_put(old); 515 + /* This driver is always present */ 516 + new_ldisc = tty_ldisc_get(tty, N_TTY); 517 + if (IS_ERR(new_ldisc)) 518 + panic("n_tty: get"); 519 + tty->ldisc = new_ldisc; 520 + tty_set_termios_ldisc(tty, N_TTY); 521 + r = tty_ldisc_open(tty, new_ldisc); 522 + if (r < 0) 523 + panic("Couldn't open N_TTY ldisc for " 524 + "%s --- error %d.", 525 + tty_name(tty), r); 526 + } 527 + } 528 + 529 + /** 495 530 * tty_set_ldisc - set line discipline 496 531 * @tty: the terminal to set 497 532 * @ldisc: the line discipline ··· 539 504 540 505 int tty_set_ldisc(struct tty_struct *tty, int disc) 541 506 { 542 - int retval, old_disc; 507 + int retval; 508 + struct tty_ldisc *old_ldisc, *new_ldisc; 509 + 510 + new_ldisc = tty_ldisc_get(tty, disc); 511 + if (IS_ERR(new_ldisc)) 512 + return PTR_ERR(new_ldisc); 543 513 544 514 tty_lock(tty); 545 515 retval = tty_ldisc_lock(tty, 5 * HZ); ··· 557 517 } 558 518 559 519 /* Check the no-op case */ 560 - old_disc = tty->ldisc->ops->num; 561 - if (old_disc == disc) 520 + if (tty->ldisc->ops->num == disc) 562 521 goto out; 563 522 564 523 if (test_bit(TTY_HUPPED, &tty->flags)) { ··· 566 527 goto out; 567 528 } 568 529 569 - retval = tty_ldisc_reinit(tty, disc); 530 + old_ldisc = tty->ldisc; 531 + 532 + /* Shutdown the old discipline. */ 533 + tty_ldisc_close(tty, old_ldisc); 534 + 535 + /* Now set up the new line discipline. */ 536 + tty->ldisc = new_ldisc; 537 + tty_set_termios_ldisc(tty, disc); 538 + 539 + retval = tty_ldisc_open(tty, new_ldisc); 570 540 if (retval < 0) { 571 541 /* Back to the old one or N_TTY if we can't */ 572 - if (tty_ldisc_reinit(tty, old_disc) < 0) { 573 - pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); 574 - if (tty_ldisc_reinit(tty, N_TTY) < 0) { 575 - /* At this point we have tty->ldisc == NULL. */ 576 - pr_err("tty: reinitializing N_TTY failed\n"); 577 - } 578 - } 542 + tty_ldisc_put(new_ldisc); 543 + tty_ldisc_restore(tty, old_ldisc); 579 544 } 580 545 581 - if (tty->ldisc && tty->ldisc->ops->num != old_disc && 582 - tty->ops->set_ldisc) { 546 + if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { 583 547 down_read(&tty->termios_rwsem); 584 548 tty->ops->set_ldisc(tty); 585 549 up_read(&tty->termios_rwsem); 586 550 } 587 551 552 + /* At this point we hold a reference to the new ldisc and a 553 + reference to the old ldisc, or we hold two references to 554 + the old ldisc (if it was restored as part of error cleanup 555 + above). In either case, releasing a single reference from 556 + the old ldisc is correct. */ 557 + new_ldisc = old_ldisc; 588 558 out: 589 559 tty_ldisc_unlock(tty); 590 560 ··· 601 553 already running */ 602 554 tty_buffer_restart_work(tty->port); 603 555 err: 556 + tty_ldisc_put(new_ldisc); /* drop the extra reference */ 604 557 tty_unlock(tty); 605 558 return retval; 606 559 } ··· 662 613 int retval; 663 614 664 615 ld = tty_ldisc_get(tty, disc); 665 - if (IS_ERR(ld)) 616 + if (IS_ERR(ld)) { 617 + BUG_ON(disc == N_TTY); 666 618 return PTR_ERR(ld); 619 + } 667 620 668 621 if (tty->ldisc) { 669 622 tty_ldisc_close(tty, tty->ldisc); ··· 677 626 tty_set_termios_ldisc(tty, disc); 678 627 retval = tty_ldisc_open(tty, tty->ldisc); 679 628 if (retval) { 680 - tty_ldisc_put(tty->ldisc); 681 - tty->ldisc = NULL; 629 + if (!WARN_ON(disc == N_TTY)) { 630 + tty_ldisc_put(tty->ldisc); 631 + tty->ldisc = NULL; 632 + } 682 633 } 683 634 return retval; 684 635 }
+3
fs/namei.c
··· 2145 2145 int retval = 0; 2146 2146 const char *s = nd->name->name; 2147 2147 2148 + if (!*s) 2149 + flags &= ~LOOKUP_RCU; 2150 + 2148 2151 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 2149 2152 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; 2150 2153 nd->depth = 0;
+7 -2
fs/orangefs/devorangefs-req.c
··· 208 208 continue; 209 209 /* 210 210 * Skip ops whose filesystem we don't know about unless 211 - * it is being mounted. 211 + * it is being mounted or unmounted. It is possible for 212 + * a filesystem we don't know about to be unmounted if 213 + * it fails to mount in the kernel after userspace has 214 + * been sent the mount request. 212 215 */ 213 216 /* XXX: is there a better way to detect this? */ 214 217 } else if (ret == -1 && 215 218 !(op->upcall.type == 216 219 ORANGEFS_VFS_OP_FS_MOUNT || 217 220 op->upcall.type == 218 - ORANGEFS_VFS_OP_GETATTR)) { 221 + ORANGEFS_VFS_OP_GETATTR || 222 + op->upcall.type == 223 + ORANGEFS_VFS_OP_FS_UMOUNT)) { 219 224 gossip_debug(GOSSIP_DEV_DEBUG, 220 225 "orangefs: skipping op tag %llu %s\n", 221 226 llu(op->tag), get_opname_string(op));
+1
fs/orangefs/orangefs-kernel.h
··· 249 249 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; 250 250 struct super_block *sb; 251 251 int mount_pending; 252 + int no_list; 252 253 struct list_head list; 253 254 }; 254 255
+16 -7
fs/orangefs/super.c
··· 493 493 494 494 if (ret) { 495 495 d = ERR_PTR(ret); 496 - goto free_op; 496 + goto free_sb_and_op; 497 497 } 498 498 499 499 /* ··· 519 519 spin_unlock(&orangefs_superblocks_lock); 520 520 op_release(new_op); 521 521 522 + /* Must be removed from the list now. */ 523 + ORANGEFS_SB(sb)->no_list = 0; 524 + 522 525 if (orangefs_userspace_version >= 20906) { 523 526 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); 524 527 if (!new_op) ··· 536 533 537 534 return dget(sb->s_root); 538 535 536 + free_sb_and_op: 537 + /* Will call orangefs_kill_sb with sb not in list. */ 538 + ORANGEFS_SB(sb)->no_list = 1; 539 + deactivate_locked_super(sb); 539 540 free_op: 540 541 gossip_err("orangefs_mount: mount request failed with %d\n", ret); 541 542 if (ret == -EINVAL) { ··· 565 558 */ 566 559 orangefs_unmount_sb(sb); 567 560 568 - /* remove the sb from our list of orangefs specific sb's */ 569 - 570 - spin_lock(&orangefs_superblocks_lock); 571 - __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ 572 - ORANGEFS_SB(sb)->list.prev = NULL; 573 - spin_unlock(&orangefs_superblocks_lock); 561 + if (!ORANGEFS_SB(sb)->no_list) { 562 + /* remove the sb from our list of orangefs specific sb's */ 563 + spin_lock(&orangefs_superblocks_lock); 564 + /* not list_del_init */ 565 + __list_del_entry(&ORANGEFS_SB(sb)->list); 566 + ORANGEFS_SB(sb)->list.prev = NULL; 567 + spin_unlock(&orangefs_superblocks_lock); 568 + } 574 569 575 570 /* 576 571 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
+10
include/crypto/internal/hash.h
··· 166 166 return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); 167 167 } 168 168 169 + static inline void ahash_request_complete(struct ahash_request *req, int err) 170 + { 171 + req->base.complete(&req->base, err); 172 + } 173 + 174 + static inline u32 ahash_request_flags(struct ahash_request *req) 175 + { 176 + return req->base.flags; 177 + } 178 + 169 179 static inline struct crypto_ahash *crypto_spawn_ahash( 170 180 struct crypto_ahash_spawn *spawn) 171 181 {
+28 -4
include/linux/blkdev.h
··· 1672 1672 return true; 1673 1673 } 1674 1674 1675 - static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1676 - struct bio *next) 1675 + static inline bool bio_will_gap(struct request_queue *q, 1676 + struct request *prev_rq, 1677 + struct bio *prev, 1678 + struct bio *next) 1677 1679 { 1678 1680 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1679 1681 struct bio_vec pb, nb; 1680 1682 1683 + /* 1684 + * don't merge if the 1st bio starts with non-zero 1685 + * offset, otherwise it is quite difficult to respect 1686 + * sg gap limit. We work hard to merge a huge number of small 1687 + * single bios in case of mkfs. 1688 + */ 1689 + if (prev_rq) 1690 + bio_get_first_bvec(prev_rq->bio, &pb); 1691 + else 1692 + bio_get_first_bvec(prev, &pb); 1693 + if (pb.bv_offset) 1694 + return true; 1695 + 1696 + /* 1697 + * We don't need to worry about the situation that the 1698 + * merged segment ends in unaligned virt boundary: 1699 + * 1700 + * - if 'pb' ends aligned, the merged segment ends aligned 1701 + * - if 'pb' ends unaligned, the next bio must include 1702 + * one single bvec of 'nb', otherwise the 'nb' can't 1703 + * merge with 'pb' 1704 + */ 1681 1705 bio_get_last_bvec(prev, &pb); 1682 1706 bio_get_first_bvec(next, &nb); 1683 1707 ··· 1714 1690 1715 1691 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1716 1692 { 1717 - return bio_will_gap(req->q, req->biotail, bio); 1693 + return bio_will_gap(req->q, req, req->biotail, bio); 1718 1694 } 1719 1695 1720 1696 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1721 1697 { 1722 - return bio_will_gap(req->q, bio, req->bio); 1698 + return bio_will_gap(req->q, NULL, bio, req->bio); 1723 1699 } 1724 1700 1725 1701 int kblockd_schedule_work(struct work_struct *work);
+14 -8
include/linux/reset.h
··· 15 15 struct reset_control *__of_reset_control_get(struct device_node *node, 16 16 const char *id, int index, bool shared, 17 17 bool optional); 18 + struct reset_control *__reset_control_get(struct device *dev, const char *id, 19 + int index, bool shared, 20 + bool optional); 18 21 void reset_control_put(struct reset_control *rstc); 19 22 struct reset_control *__devm_reset_control_get(struct device *dev, 20 23 const char *id, int index, bool shared, ··· 75 72 return optional ? NULL : ERR_PTR(-ENOTSUPP); 76 73 } 77 74 75 + static inline struct reset_control *__reset_control_get( 76 + struct device *dev, const char *id, 77 + int index, bool shared, bool optional) 78 + { 79 + return optional ? NULL : ERR_PTR(-ENOTSUPP); 80 + } 81 + 78 82 static inline struct reset_control *__devm_reset_control_get( 79 83 struct device *dev, const char *id, 80 84 int index, bool shared, bool optional) ··· 112 102 #ifndef CONFIG_RESET_CONTROLLER 113 103 WARN_ON(1); 114 104 #endif 115 - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 116 - false); 105 + return __reset_control_get(dev, id, 0, false, false); 117 106 } 118 107 119 108 /** ··· 140 131 static inline struct reset_control *reset_control_get_shared( 141 132 struct device *dev, const char *id) 142 133 { 143 - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 144 - false); 134 + return __reset_control_get(dev, id, 0, true, false); 145 135 } 146 136 147 137 static inline struct reset_control *reset_control_get_optional_exclusive( 148 138 struct device *dev, const char *id) 149 139 { 150 - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 151 - true); 140 + return __reset_control_get(dev, id, 0, false, true); 152 141 } 153 142 154 143 static inline struct reset_control *reset_control_get_optional_shared( 155 144 struct device *dev, const char *id) 156 145 { 157 - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 158 - true); 146 + return __reset_control_get(dev, id, 0, true, true); 159 147 } 160 148 161 149 /**
+8
kernel/bpf/verifier.c
··· 3349 3349 if (insn->imm == BPF_FUNC_xdp_adjust_head) 3350 3350 prog->xdp_adjust_head = 1; 3351 3351 if (insn->imm == BPF_FUNC_tail_call) { 3352 + /* If we tail call into other programs, we 3353 + * cannot make any assumptions since they can 3354 + * be replaced dynamically during runtime in 3355 + * the program array. 3356 + */ 3357 + prog->cb_access = 1; 3358 + prog->xdp_adjust_head = 1; 3359 + 3352 3360 /* mark bpf_tail_call as different opcode to avoid 3353 3361 * conditional branch in the interpeter for every normal 3354 3362 * call and to prevent accidental JITing by JIT compiler
+1 -1
kernel/cgroup/cgroup-v1.c
··· 1146 1146 * path is super cold. Let's just sleep a bit and retry. 1147 1147 */ 1148 1148 pinned_sb = kernfs_pin_sb(root->kf_root, NULL); 1149 - if (IS_ERR_OR_NULL(pinned_sb) || 1149 + if (IS_ERR(pinned_sb) || 1150 1150 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { 1151 1151 mutex_unlock(&cgroup_mutex); 1152 1152 if (!IS_ERR_OR_NULL(pinned_sb))
+3 -3
kernel/locking/lockdep_internals.h
··· 46 46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 47 47 48 48 /* 49 - * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, 49 + * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, 50 50 * .data and .bss to fit in required 32MB limit for the kernel. With 51 - * PROVE_LOCKING we could go over this limit and cause system boot-up problems. 51 + * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. 52 52 * So, reduce the static allocations for lockdeps related structures so that 53 53 * everything fits in current required size limit. 54 54 */ 55 - #ifdef CONFIG_PROVE_LOCKING_SMALL 55 + #ifdef CONFIG_LOCKDEP_SMALL 56 56 /* 57 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 58 58 * we track.
+25 -4
kernel/trace/ftrace.c
··· 3755 3755 ftrace_probe_registered = 1; 3756 3756 } 3757 3757 3758 - static void __disable_ftrace_function_probe(void) 3758 + static bool __disable_ftrace_function_probe(void) 3759 3759 { 3760 3760 int i; 3761 3761 3762 3762 if (!ftrace_probe_registered) 3763 - return; 3763 + return false; 3764 3764 3765 3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3766 3766 struct hlist_head *hhd = &ftrace_func_hash[i]; 3767 3767 if (hhd->first) 3768 - return; 3768 + return false; 3769 3769 } 3770 3770 3771 3771 /* no more funcs left */ 3772 3772 ftrace_shutdown(&trace_probe_ops, 0); 3773 3773 3774 3774 ftrace_probe_registered = 0; 3775 + return true; 3775 3776 } 3776 3777 3777 3778 ··· 3902 3901 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3903 3902 void *data, int flags) 3904 3903 { 3904 + struct ftrace_ops_hash old_hash_ops; 3905 3905 struct ftrace_func_entry *rec_entry; 3906 3906 struct ftrace_func_probe *entry; 3907 3907 struct ftrace_func_probe *p; ··· 3914 3912 struct hlist_node *tmp; 3915 3913 char str[KSYM_SYMBOL_LEN]; 3916 3914 int i, ret; 3915 + bool disabled; 3917 3916 3918 3917 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3919 3918 func_g.search = NULL; ··· 3932 3929 } 3933 3930 3934 3931 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3932 + 3933 + old_hash_ops.filter_hash = old_hash; 3934 + /* Probes only have filters */ 3935 + old_hash_ops.notrace_hash = NULL; 3935 3936 3936 3937 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3937 3938 if (!hash) ··· 3974 3967 } 3975 3968 } 3976 3969 mutex_lock(&ftrace_lock); 3977 - __disable_ftrace_function_probe(); 3970 + disabled = __disable_ftrace_function_probe(); 3978 3971 /* 3979 3972 * Remove after the disable is called. Otherwise, if the last 3980 3973 * probe is removed, a null hash means *all enabled*. 3981 3974 */ 3982 3975 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3976 + 3977 + /* still need to update the function call sites */ 3978 + if (ftrace_enabled && !disabled) 3979 + ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, 3980 + &old_hash_ops); 3983 3981 synchronize_sched(); 3984 3982 if (!ret) 3985 3983 free_ftrace_hash_rcu(old_hash); ··· 5564 5552 synchronize_sched(); 5565 5553 5566 5554 trace_free_pid_list(pid_list); 5555 + } 5556 + 5557 + void ftrace_clear_pids(struct trace_array *tr) 5558 + { 5559 + mutex_lock(&ftrace_lock); 5560 + 5561 + clear_ftrace_pids(tr); 5562 + 5563 + mutex_unlock(&ftrace_lock); 5567 5564 } 5568 5565 5569 5566 static void ftrace_pid_reset(struct trace_array *tr)
+1
kernel/trace/trace.c
··· 7402 7402 7403 7403 tracing_set_nop(tr); 7404 7404 event_trace_del_tracer(tr); 7405 + ftrace_clear_pids(tr); 7405 7406 ftrace_destroy_function_files(tr); 7406 7407 tracefs_remove_recursive(tr->dir); 7407 7408 free_trace_buffers(tr);
+2
kernel/trace/trace.h
··· 896 896 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 897 897 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 898 898 struct dentry *d_tracer); 899 + void ftrace_clear_pids(struct trace_array *tr); 899 900 #else 900 901 static inline int ftrace_trace_task(struct trace_array *tr) 901 902 { ··· 915 914 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 916 915 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 917 916 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 917 + static inline void ftrace_clear_pids(struct trace_array *tr) { } 918 918 /* ftace_func_t type is not defined, use macro instead of static inline */ 919 919 #define ftrace_init_array_ops(tr, func) do { } while (0) 920 920 #endif /* CONFIG_FUNCTION_TRACER */
+3 -3
lib/Kconfig.debug
··· 1103 1103 1104 1104 For more details, see Documentation/locking/lockdep-design.txt. 1105 1105 1106 - config PROVE_LOCKING_SMALL 1107 - bool 1108 - 1109 1106 config LOCKDEP 1110 1107 bool 1111 1108 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT ··· 1110 1113 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE 1111 1114 select KALLSYMS 1112 1115 select KALLSYMS_ALL 1116 + 1117 + config LOCKDEP_SMALL 1118 + bool 1113 1119 1114 1120 config LOCK_STAT 1115 1121 bool "Lock usage statistics"
+1
net/core/skbuff.c
··· 3817 3817 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3818 3818 serr->ee.ee_info = tstype; 3819 3819 serr->opt_stats = opt_stats; 3820 + serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 3820 3821 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3821 3822 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3822 3823 if (sk->sk_protocol == IPPROTO_TCP &&
+5 -5
net/ipv4/ip_sockglue.c
··· 488 488 return false; 489 489 490 490 /* Support IP_PKTINFO on tstamp packets if requested, to correlate 491 - * timestamp with egress dev. Not possible for packets without dev 491 + * timestamp with egress dev. Not possible for packets without iif 492 492 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). 493 493 */ 494 - if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || 495 - (!skb->dev)) 494 + info = PKTINFO_SKB_CB(skb); 495 + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) || 496 + !info->ipi_ifindex) 496 497 return false; 497 498 498 - info = PKTINFO_SKB_CB(skb); 499 499 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; 500 - info->ipi_ifindex = skb->dev->ifindex; 501 500 return true; 502 501 } 503 502 ··· 590 591 case MCAST_LEAVE_GROUP: 591 592 case MCAST_LEAVE_SOURCE_GROUP: 592 593 case MCAST_UNBLOCK_SOURCE: 594 + case IP_ROUTER_ALERT: 593 595 return true; 594 596 } 595 597 return false;
+2 -9
net/ipv4/ipmr.c
··· 1278 1278 struct net *net = sock_net(sk); 1279 1279 struct mr_table *mrt; 1280 1280 1281 - rtnl_lock(); 1281 + ASSERT_RTNL(); 1282 1282 ipmr_for_each_table(mrt, net) { 1283 1283 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1284 1284 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; ··· 1290 1290 mroute_clean_tables(mrt, false); 1291 1291 } 1292 1292 } 1293 - rtnl_unlock(); 1294 1293 } 1295 1294 1296 1295 /* Socket options and virtual interface manipulation. The whole ··· 1354 1355 if (sk != rcu_access_pointer(mrt->mroute_sk)) { 1355 1356 ret = -EACCES; 1356 1357 } else { 1357 - /* We need to unlock here because mrtsock_destruct takes 1358 - * care of rtnl itself and we can't change that due to 1359 - * the IP_ROUTER_ALERT setsockopt which runs without it. 1360 - */ 1361 - rtnl_unlock(); 1362 1358 ret = ip_ra_control(sk, 0, NULL); 1363 - goto out; 1359 + goto out_unlock; 1364 1360 } 1365 1361 break; 1366 1362 case MRT_ADD_VIF: ··· 1466 1472 } 1467 1473 out_unlock: 1468 1474 rtnl_unlock(); 1469 - out: 1470 1475 return ret; 1471 1476 } 1472 1477
+2
net/ipv4/raw.c
··· 682 682 /* 683 683 * Raw sockets may have direct kernel references. Kill them. 684 684 */ 685 + rtnl_lock(); 685 686 ip_ra_control(sk, 0, NULL); 687 + rtnl_unlock(); 686 688 687 689 sk_common_release(sk); 688 690 }
+1 -9
net/ipv6/datagram.c
··· 405 405 * At one point, excluding local errors was a quick test to identify icmp/icmp6 406 406 * errors. This is no longer true, but the test remained, so the v6 stack, 407 407 * unlike v4, also honors cmsg requests on all wifi and timestamp errors. 408 - * 409 - * Timestamp code paths do not initialize the fields expected by cmsg: 410 - * the PKTINFO fields in skb->cb[]. Fill those in here. 411 408 */ 412 409 static bool ip6_datagram_support_cmsg(struct sk_buff *skb, 413 410 struct sock_exterr_skb *serr) ··· 416 419 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) 417 420 return false; 418 421 419 - if (!skb->dev) 422 + if (!IP6CB(skb)->iif) 420 423 return false; 421 - 422 - if (skb->protocol == htons(ETH_P_IPV6)) 423 - IP6CB(skb)->iif = skb->dev->ifindex; 424 - else 425 - PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; 426 424 427 425 return true; 428 426 }
+5 -2
net/ipv6/ip6_input.c
··· 124 124 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); 125 125 /* 126 126 * RFC4291 2.5.3 127 + * The loopback address must not be used as the source address in IPv6 128 + * packets that are sent outside of a single node. [..] 127 129 * A packet received on an interface with a destination address 128 130 * of loopback must be dropped. 129 131 */ 130 - if (!(dev->flags & IFF_LOOPBACK) && 131 - ipv6_addr_loopback(&hdr->daddr)) 132 + if ((ipv6_addr_loopback(&hdr->saddr) || 133 + ipv6_addr_loopback(&hdr->daddr)) && 134 + !(dev->flags & IFF_LOOPBACK)) 132 135 goto err; 133 136 134 137 /* RFC4291 Errata ID: 3480
+117
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
··· 1 + #!/bin/sh 2 + # description: ftrace - function pid filters 3 + 4 + # Make sure that function pid matching filter works. 5 + # Also test it on an instance directory 6 + 7 + if ! grep -q function available_tracers; then 8 + echo "no function tracer configured" 9 + exit_unsupported 10 + fi 11 + 12 + if [ ! -f set_ftrace_pid ]; then 13 + echo "set_ftrace_pid not found? Is function tracer not set?" 14 + exit_unsupported 15 + fi 16 + 17 + if [ ! -f set_ftrace_filter ]; then 18 + echo "set_ftrace_filter not found? Is function tracer not set?" 19 + exit_unsupported 20 + fi 21 + 22 + do_function_fork=1 23 + 24 + if [ ! -f options/function-fork ]; then 25 + do_function_fork=0 26 + echo "no option for function-fork found. Option will not be tested." 27 + fi 28 + 29 + read PID _ < /proc/self/stat 30 + 31 + if [ $do_function_fork -eq 1 ]; then 32 + # default value of function-fork option 33 + orig_value=`grep function-fork trace_options` 34 + fi 35 + 36 + do_reset() { 37 + reset_tracer 38 + clear_trace 39 + enable_tracing 40 + echo > set_ftrace_filter 41 + echo > set_ftrace_pid 42 + 43 + if [ $do_function_fork -eq 0 ]; then 44 + return 45 + fi 46 + 47 + echo $orig_value > trace_options 48 + } 49 + 50 + fail() { # msg 51 + do_reset 52 + echo $1 53 + exit $FAIL 54 + } 55 + 56 + yield() { 57 + ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1 58 + } 59 + 60 + do_test() { 61 + disable_tracing 62 + 63 + echo do_execve* > set_ftrace_filter 64 + echo *do_fork >> set_ftrace_filter 65 + 66 + echo $PID > set_ftrace_pid 67 + echo function > current_tracer 68 + 69 + if [ $do_function_fork -eq 1 ]; then 70 + # don't allow children to be traced 71 + echo nofunction-fork > trace_options 72 + fi 73 + 74 + enable_tracing 75 + yield 76 + 77 + count_pid=`cat trace | grep -v ^# | grep $PID | wc -l` 78 + count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l` 79 + 80 + # count_other should be 0 81 + if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then 82 + fail "PID filtering not working?" 83 + fi 84 + 85 + disable_tracing 86 + clear_trace 87 + 88 + if [ $do_function_fork -eq 0 ]; then 89 + return 90 + fi 91 + 92 + # allow children to be traced 93 + echo function-fork > trace_options 94 + 95 + enable_tracing 96 + yield 97 + 98 + count_pid=`cat trace | grep -v ^# | grep $PID | wc -l` 99 + count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l` 100 + 101 + # count_other should NOT be 0 102 + if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then 103 + fail "PID filtering not following fork?" 104 + fi 105 + } 106 + 107 + do_test 108 + 109 + mkdir instances/foo 110 + cd instances/foo 111 + do_test 112 + cd ../../ 113 + rmdir instances/foo 114 + 115 + do_reset 116 + 117 + exit 0