Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

All conflicts were simple overlapping changes except perhaps
for the Thunder driver.

That driver has a change_mtu method explicitly for sending
a message to the hardware. If that fails it returns an
error.

Normally a driver doesn't need an ndo_change_mtu method becuase those
are usually just range changes, which are now handled generically.
But since this extra operation is needed in the Thunder driver, it has
to stay.

However, if the message send fails we have to restore the original
MTU before the change because the entire call chain expects that if
an error is thrown by ndo_change_mtu then the MTU did not change.
Therefore code is added to nicvf_change_mtu to remember the original
MTU, and to restore it upon nicvf_update_hw_max_frs() failue.

Signed-off-by: David S. Miller <davem@davemloft.net>

+3388 -1793
+2 -2
Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
··· 6 6 7 7 Required properties: 8 8 9 - - compatible : should be "aspeed,ast2400-bt-bmc" 9 + - compatible : should be "aspeed,ast2400-ibt-bmc" 10 10 - reg: physical address and size of the registers 11 11 12 12 Optional properties: ··· 17 17 Example: 18 18 19 19 ibt@1e789140 { 20 - compatible = "aspeed,ast2400-bt-bmc"; 20 + compatible = "aspeed,ast2400-ibt-bmc"; 21 21 reg = <0x1e789140 0x18>; 22 22 interrupts = <8>; 23 23 };
+1 -1
Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
··· 12 12 13 13 Optional properties: 14 14 - ti,dmic: phandle for the OMAP dmic node if the machine have it connected 15 - - ti,jack_detection: Need to be present if the board capable to detect jack 15 + - ti,jack-detection: Need to be present if the board capable to detect jack 16 16 insertion, removal. 17 17 18 18 Available audio endpoints for the audio-routing table:
+2 -2
Documentation/i2c/i2c-topology
··· 326 326 327 327 This is a good topology. 328 328 329 - .--------. 329 + .--------. 330 330 .----------. .--| dev D1 | 331 331 | parent- |--' '--------' 332 332 .--| locked | .--------. ··· 350 350 351 351 This is a good topology. 352 352 353 - .--------. 353 + .--------. 354 354 .----------. .--| dev D1 | 355 355 | mux- |--' '--------' 356 356 .--| locked | .--------.
+11
Documentation/virtual/kvm/api.txt
··· 777 777 conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios 778 778 such as migration. 779 779 780 + When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the 781 + set of bits that KVM can return in struct kvm_clock_data's flag member. 782 + 783 + The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned 784 + value is the exact kvmclock value seen by all VCPUs at the instant 785 + when KVM_GET_CLOCK was called. If clear, the returned value is simply 786 + CLOCK_MONOTONIC plus a constant offset; the offset can be modified 787 + with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock, 788 + but the exact value read by each VCPU could differ, because the host 789 + TSC is not stable. 790 + 780 791 struct kvm_clock_data { 781 792 __u64 clock; /* kvmclock current value */ 782 793 __u32 flags;
+1
MAINTAINERS
··· 7086 7086 LED SUBSYSTEM 7087 7087 M: Richard Purdie <rpurdie@rpsys.net> 7088 7088 M: Jacek Anaszewski <j.anaszewski@samsung.com> 7089 + M: Pavel Machek <pavel@ucw.cz> 7089 7090 L: linux-leds@vger.kernel.org 7090 7091 T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git 7091 7092 S: Maintained
+4 -3
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 9 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc6 5 5 NAME = Psychotic Stoned Sheep 6 6 7 7 # *DOCUMENTATION* ··· 399 399 -fno-strict-aliasing -fno-common \ 400 400 -Werror-implicit-function-declaration \ 401 401 -Wno-format-security \ 402 - -std=gnu89 402 + -std=gnu89 $(call cc-option,-fno-PIE) 403 + 403 404 404 405 KBUILD_AFLAGS_KERNEL := 405 406 KBUILD_CFLAGS_KERNEL := 406 - KBUILD_AFLAGS := -D__ASSEMBLY__ 407 + KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) 407 408 KBUILD_AFLAGS_MODULE := -DMODULE 408 409 KBUILD_CFLAGS_MODULE := -DMODULE 409 410 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+7 -7
arch/arm/boot/dts/imx53-qsb.dts
··· 64 64 }; 65 65 66 66 ldo3_reg: ldo3 { 67 - regulator-min-microvolt = <600000>; 68 - regulator-max-microvolt = <1800000>; 67 + regulator-min-microvolt = <1725000>; 68 + regulator-max-microvolt = <3300000>; 69 69 regulator-always-on; 70 70 }; 71 71 ··· 76 76 }; 77 77 78 78 ldo5_reg: ldo5 { 79 - regulator-min-microvolt = <1725000>; 80 - regulator-max-microvolt = <3300000>; 79 + regulator-min-microvolt = <1200000>; 80 + regulator-max-microvolt = <3600000>; 81 81 regulator-always-on; 82 82 }; 83 83 ··· 100 100 }; 101 101 102 102 ldo9_reg: ldo9 { 103 - regulator-min-microvolt = <1200000>; 103 + regulator-min-microvolt = <1250000>; 104 104 regulator-max-microvolt = <3600000>; 105 105 regulator-always-on; 106 106 }; 107 107 108 108 ldo10_reg: ldo10 { 109 - regulator-min-microvolt = <1250000>; 110 - regulator-max-microvolt = <3650000>; 109 + regulator-min-microvolt = <1200000>; 110 + regulator-max-microvolt = <3600000>; 111 111 regulator-always-on; 112 112 }; 113 113 };
+5
arch/arm/boot/dts/logicpd-som-lv.dtsi
··· 13 13 }; 14 14 }; 15 15 16 + memory@80000000 { 17 + device_type = "memory"; 18 + reg = <0x80000000 0>; 19 + }; 20 + 16 21 wl12xx_vmmc: wl12xx_vmmc { 17 22 compatible = "regulator-fixed"; 18 23 regulator-name = "vwl1271";
+2 -2
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
··· 13 13 }; 14 14 }; 15 15 16 - memory@0 { 16 + memory@80000000 { 17 17 device_type = "memory"; 18 - reg = <0 0>; 18 + reg = <0x80000000 0>; 19 19 }; 20 20 21 21 leds {
+4 -3
arch/arm/boot/dts/omap5-board-common.dtsi
··· 124 124 compatible = "ti,abe-twl6040"; 125 125 ti,model = "omap5-uevm"; 126 126 127 + ti,jack-detection; 127 128 ti,mclk-freq = <19200000>; 128 129 129 130 ti,mcpdm = <&mcpdm>; ··· 416 415 ti,backup-battery-charge-high-current; 417 416 }; 418 417 419 - gpadc { 418 + gpadc: gpadc { 420 419 compatible = "ti,palmas-gpadc"; 421 420 interrupts = <18 0 422 421 16 0 ··· 476 475 smps6_reg: smps6 { 477 476 /* VDD_DDR3 - over VDD_SMPS6 */ 478 477 regulator-name = "smps6"; 479 - regulator-min-microvolt = <1200000>; 480 - regulator-max-microvolt = <1200000>; 478 + regulator-min-microvolt = <1350000>; 479 + regulator-max-microvolt = <1350000>; 481 480 regulator-always-on; 482 481 regulator-boot-on; 483 482 };
+1 -1
arch/arm/boot/dts/stih410-b2260.dts
··· 74 74 /* Low speed expansion connector */ 75 75 spi0: spi@9844000 { 76 76 label = "LS-SPI0"; 77 - cs-gpio = <&pio30 3 0>; 77 + cs-gpios = <&pio30 3 0>; 78 78 status = "okay"; 79 79 }; 80 80
+4
arch/arm/boot/dts/sun8i-a23-a33.dtsi
··· 282 282 uart1_pins_a: uart1@0 { 283 283 allwinner,pins = "PG6", "PG7"; 284 284 allwinner,function = "uart1"; 285 + allwinner,drive = <SUN4I_PINCTRL_10_MA>; 286 + allwinner,pull = <SUN4I_PINCTRL_NO_PULL>; 285 287 }; 286 288 287 289 uart1_pins_cts_rts_a: uart1-cts-rts@0 { 288 290 allwinner,pins = "PG8", "PG9"; 289 291 allwinner,function = "uart1"; 292 + allwinner,drive = <SUN4I_PINCTRL_10_MA>; 293 + allwinner,pull = <SUN4I_PINCTRL_NO_PULL>; 290 294 }; 291 295 292 296 mmc0_pins_a: mmc0@0 {
+20
arch/arm/kernel/traps.c
··· 74 74 dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); 75 75 } 76 76 77 + void dump_backtrace_stm(u32 *stack, u32 instruction) 78 + { 79 + char str[80], *p; 80 + unsigned int x; 81 + int reg; 82 + 83 + for (reg = 10, x = 0, p = str; reg >= 0; reg--) { 84 + if (instruction & BIT(reg)) { 85 + p += sprintf(p, " r%d:%08x", reg, *stack--); 86 + if (++x == 6) { 87 + x = 0; 88 + p = str; 89 + printk("%s\n", str); 90 + } 91 + } 92 + } 93 + if (p != str) 94 + printk("%s\n", str); 95 + } 96 + 77 97 #ifndef CONFIG_ARM_UNWIND 78 98 /* 79 99 * Stack pointers should always be within the kernels view of
+5
arch/arm/kernel/vmlinux-xip.lds.S
··· 3 3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 4 4 */ 5 5 6 + /* No __ro_after_init data in the .rodata section - which will always be ro */ 7 + #define RO_AFTER_INIT_DATA 8 + 6 9 #include <asm-generic/vmlinux.lds.h> 7 10 #include <asm/cache.h> 8 11 #include <asm/thread_info.h> ··· 225 222 ARM_EXIT_KEEP(EXIT_DATA) 226 223 . = ALIGN(PAGE_SIZE); 227 224 __init_end = .; 225 + 226 + *(.data..ro_after_init) 228 227 229 228 NOSAVE_DATA 230 229 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+3 -34
arch/arm/lib/backtrace.S
··· 10 10 * 27/03/03 Ian Molton Clean up CONFIG_CPU 11 11 * 12 12 */ 13 + #include <linux/kern_levels.h> 13 14 #include <linux/linkage.h> 14 15 #include <asm/assembler.h> 15 16 .text ··· 84 83 teq r3, r1, lsr #11 85 84 ldreq r0, [frame, #-8] @ get sp 86 85 subeq r0, r0, #4 @ point at the last arg 87 - bleq .Ldumpstm @ dump saved registers 86 + bleq dump_backtrace_stm @ dump saved registers 88 87 89 88 1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} 90 89 ldr r3, .Ldsi @ instruction exists, 91 90 teq r3, r1, lsr #11 92 91 subeq r0, frame, #16 93 - bleq .Ldumpstm @ dump saved registers 92 + bleq dump_backtrace_stm @ dump saved registers 94 93 95 94 teq sv_fp, #0 @ zero saved fp means 96 95 beq no_frame @ no further frames ··· 113 112 .long 1004b, 1006b 114 113 .popsection 115 114 116 - #define instr r4 117 - #define reg r5 118 - #define stack r6 119 - 120 - .Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} 121 - mov stack, r0 122 - mov instr, r1 123 - mov reg, #10 124 - mov r7, #0 125 - 1: mov r3, #1 126 - ARM( tst instr, r3, lsl reg ) 127 - THUMB( lsl r3, reg ) 128 - THUMB( tst instr, r3 ) 129 - beq 2f 130 - add r7, r7, #1 131 - teq r7, #6 132 - moveq r7, #0 133 - adr r3, .Lcr 134 - addne r3, r3, #1 @ skip newline 135 - ldr r2, [stack], #-4 136 - mov r1, reg 137 - adr r0, .Lfp 138 - bl printk 139 - 2: subs reg, reg, #1 140 - bpl 1b 141 - teq r7, #0 142 - adrne r0, .Lcr 143 - blne printk 144 - ldmfd sp!, {instr, reg, stack, r7, pc} 145 - 146 - .Lfp: .asciz " r%d:%08x%s" 147 - .Lcr: .asciz "\n" 148 115 .Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" 149 116 .align 150 117 .Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
+1
arch/arm/mach-omap2/Kconfig
··· 71 71 select HAVE_ARM_TWD 72 72 select ARM_ERRATA_754322 73 73 select ARM_ERRATA_775420 74 + select OMAP_INTERCONNECT 74 75 75 76 config SOC_DRA7XX 76 77 bool "TI DRA7XX"
+11 -5
arch/arm/mach-omap2/id.c
··· 205 205 206 206 #define OMAP3_SHOW_FEATURE(feat) \ 207 207 if (omap3_has_ ##feat()) \ 208 - printk(#feat" "); 208 + n += scnprintf(buf + n, sizeof(buf) - n, #feat " "); 209 209 210 210 static void __init omap3_cpuinfo(void) 211 211 { 212 212 const char *cpu_name; 213 + char buf[64]; 214 + int n = 0; 215 + 216 + memset(buf, 0, sizeof(buf)); 213 217 214 218 /* 215 219 * OMAP3430 and OMAP3530 are assumed to be same. ··· 245 241 cpu_name = "OMAP3503"; 246 242 } 247 243 248 - sprintf(soc_name, "%s", cpu_name); 244 + scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name); 249 245 250 246 /* Print verbose information */ 251 - pr_info("%s %s (", soc_name, soc_rev); 247 + n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev); 252 248 253 249 OMAP3_SHOW_FEATURE(l2cache); 254 250 OMAP3_SHOW_FEATURE(iva); ··· 256 252 OMAP3_SHOW_FEATURE(neon); 257 253 OMAP3_SHOW_FEATURE(isp); 258 254 OMAP3_SHOW_FEATURE(192mhz_clk); 259 - 260 - printk(")\n"); 255 + if (*(buf + n - 1) == ' ') 256 + n--; 257 + n += scnprintf(buf + n, sizeof(buf) - n, ")\n"); 258 + pr_info("%s", buf); 261 259 } 262 260 263 261 #define OMAP3_CHECK_FEATURE(status,feat) \
+3
arch/arm/mach-omap2/prm3xxx.c
··· 319 319 if (has_uart4) { 320 320 en_uart4_mask = OMAP3630_EN_UART4_MASK; 321 321 grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; 322 + } else { 323 + en_uart4_mask = 0; 324 + grpsel_uart4_mask = 0; 322 325 } 323 326 324 327 /* Enable wakeups in PER */
+6
arch/arm/mach-omap2/voltage.c
··· 87 87 return -ENODATA; 88 88 } 89 89 90 + if (!voltdm->volt_data) { 91 + pr_err("%s: No voltage data defined for vdd_%s\n", 92 + __func__, voltdm->name); 93 + return -ENODATA; 94 + } 95 + 90 96 /* Adjust voltage to the exact voltage from the OPP table */ 91 97 for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { 92 98 if (voltdm->volt_data[i].volt_nominal >= target_volt) {
+1 -1
arch/arm/mm/dma-mapping.c
··· 1167 1167 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 1168 1168 return 0; 1169 1169 } 1170 - fs_initcall(dma_debug_do_init); 1170 + core_initcall(dma_debug_do_init); 1171 1171 1172 1172 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1173 1173
+1 -1
arch/arm/mm/proc-v7m.S
··· 96 96 ret lr 97 97 ENDPROC(cpu_cm7_proc_fin) 98 98 99 - .section ".text.init", #alloc, #execinstr 99 + .section ".init.text", #alloc, #execinstr 100 100 101 101 __v7m_cm7_setup: 102 102 mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
+2 -2
arch/arm64/boot/dts/marvell/armada-37xx.dtsi
··· 105 105 status = "disabled"; 106 106 }; 107 107 108 - nb_perih_clk: nb-periph-clk@13000{ 108 + nb_periph_clk: nb-periph-clk@13000 { 109 109 compatible = "marvell,armada-3700-periph-clock-nb"; 110 110 reg = <0x13000 0x100>; 111 111 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, ··· 113 113 #clock-cells = <1>; 114 114 }; 115 115 116 - sb_perih_clk: sb-periph-clk@18000{ 116 + sb_periph_clk: sb-periph-clk@18000 { 117 117 compatible = "marvell,armada-3700-periph-clock-sb"; 118 118 reg = <0x18000 0x100>; 119 119 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
+3 -3
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
··· 130 130 reg = <0x700600 0x50>; 131 131 #address-cells = <0x1>; 132 132 #size-cells = <0x0>; 133 - cell-index = <1>; 134 - clocks = <&cps_syscon0 0 3>; 133 + cell-index = <3>; 134 + clocks = <&cps_syscon0 1 21>; 135 135 status = "disabled"; 136 136 }; 137 137 ··· 140 140 reg = <0x700680 0x50>; 141 141 #address-cells = <1>; 142 142 #size-cells = <0>; 143 - cell-index = <2>; 143 + cell-index = <4>; 144 144 clocks = <&cps_syscon0 1 21>; 145 145 status = "disabled"; 146 146 };
+9 -1
arch/arm64/include/asm/perf_event.h
··· 46 46 #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ 47 47 #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ 48 48 49 - #define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ 49 + /* 50 + * PMUv3 event types: required events 51 + */ 52 + #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 53 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 54 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 55 + #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 56 + #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 57 + #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 50 58 51 59 /* 52 60 * Event filters for PMUv3
+1 -9
arch/arm64/kernel/perf_event.c
··· 31 31 32 32 /* 33 33 * ARMv8 PMUv3 Performance Events handling code. 34 - * Common event types. 34 + * Common event types (some are defined in asm/perf_event.h). 35 35 */ 36 - 37 - /* Required events. */ 38 - #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 39 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 40 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 41 - #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 42 - #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 43 - #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 44 36 45 37 /* At least one of the following is required. */ 46 38 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
+8 -2
arch/arm64/kvm/sys_regs.c
··· 597 597 598 598 idx = ARMV8_PMU_CYCLE_IDX; 599 599 } else { 600 - BUG(); 600 + return false; 601 601 } 602 + } else if (r->CRn == 0 && r->CRm == 9) { 603 + /* PMCCNTR */ 604 + if (pmu_access_event_counter_el0_disabled(vcpu)) 605 + return false; 606 + 607 + idx = ARMV8_PMU_CYCLE_IDX; 602 608 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 603 609 /* PMEVCNTRn_EL0 */ 604 610 if (pmu_access_event_counter_el0_disabled(vcpu)) ··· 612 606 613 607 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 614 608 } else { 615 - BUG(); 609 + return false; 616 610 } 617 611 618 612 if (!pmu_counter_idx_valid(vcpu, idx))
+12 -3
arch/powerpc/include/asm/exception-64s.h
··· 91 91 */ 92 92 #define LOAD_HANDLER(reg, label) \ 93 93 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 94 - ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; 94 + ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label); 95 95 96 96 #define __LOAD_HANDLER(reg, label) \ 97 97 ld reg,PACAKBASE(r13); \ ··· 158 158 std ra,offset(r13); \ 159 159 END_FTR_SECTION_NESTED(ftr,ftr,943) 160 160 161 - #define EXCEPTION_PROLOG_0(area) \ 162 - GET_PACA(r13); \ 161 + #define EXCEPTION_PROLOG_0_PACA(area) \ 163 162 std r9,area+EX_R9(r13); /* save r9 */ \ 164 163 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ 165 164 HMT_MEDIUM; \ 166 165 std r10,area+EX_R10(r13); /* save r10 - r12 */ \ 167 166 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) 167 + 168 + #define EXCEPTION_PROLOG_0(area) \ 169 + GET_PACA(r13); \ 170 + EXCEPTION_PROLOG_0_PACA(area) 168 171 169 172 #define __EXCEPTION_PROLOG_1(area, extra, vec) \ 170 173 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ ··· 196 193 197 194 #define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ 198 195 EXCEPTION_PROLOG_0(area); \ 196 + EXCEPTION_PROLOG_1(area, extra, vec); \ 197 + EXCEPTION_PROLOG_PSERIES_1(label, h); 198 + 199 + /* Have the PACA in r13 already */ 200 + #define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \ 201 + EXCEPTION_PROLOG_0_PACA(area); \ 199 202 EXCEPTION_PROLOG_1(area, extra, vec); \ 200 203 EXCEPTION_PROLOG_PSERIES_1(label, h); 201 204
+1
arch/powerpc/include/asm/ppc-opcode.h
··· 460 460 461 461 #define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ 462 462 ((IH & 0x7) << 21)) 463 + #define PPC_INVALIDATE_ERAT PPC_SLBIA(7) 463 464 464 465 #endif /* _ASM_POWERPC_PPC_OPCODE_H */
+8 -3
arch/powerpc/kernel/exceptions-64s.S
··· 116 116 117 117 EXC_REAL_BEGIN(system_reset, 0x100, 0x200) 118 118 SET_SCRATCH0(r13) 119 - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 119 + GET_PACA(r13) 120 + clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */ 121 + EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD, 120 122 IDLETEST, 0x100) 121 123 122 124 EXC_REAL_END(system_reset, 0x100, 0x200) ··· 126 124 127 125 #ifdef CONFIG_PPC_P7_NAP 128 126 EXC_COMMON_BEGIN(system_reset_idle_common) 127 + BEGIN_FTR_SECTION 128 + GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */ 129 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 129 130 bl pnv_restore_hyp_resource 130 131 131 132 li r0,PNV_THREAD_RUNNING ··· 174 169 SET_SCRATCH0(r13) /* save r13 */ 175 170 /* 176 171 * Running native on arch 2.06 or later, we may wakeup from winkle 177 - * inside machine check. If yes, then last bit of HSPGR0 would be set 172 + * inside machine check. If yes, then last bit of HSPRG0 would be set 178 173 * to 1. Hence clear it unconditionally. 179 174 */ 180 175 GET_PACA(r13) ··· 393 388 /* 394 389 * Go back to winkle. Please note that this thread was woken up in 395 390 * machine check from winkle and have not restored the per-subcore 396 - * state. Hence before going back to winkle, set last bit of HSPGR0 391 + * state. Hence before going back to winkle, set last bit of HSPRG0 397 392 * to 1. This will make sure that if this thread gets woken up 398 393 * again at reset vector 0x100 then it will get chance to restore 399 394 * the subcore state.
+21 -21
arch/powerpc/kernel/process.c
··· 1215 1215 int instr; 1216 1216 1217 1217 if (!(i % 8)) 1218 - printk("\n"); 1218 + pr_cont("\n"); 1219 1219 1220 1220 #if !defined(CONFIG_BOOKE) 1221 1221 /* If executing with the IMMU off, adjust pc rather ··· 1227 1227 1228 1228 if (!__kernel_text_address(pc) || 1229 1229 probe_kernel_address((unsigned int __user *)pc, instr)) { 1230 - printk(KERN_CONT "XXXXXXXX "); 1230 + pr_cont("XXXXXXXX "); 1231 1231 } else { 1232 1232 if (regs->nip == pc) 1233 - printk(KERN_CONT "<%08x> ", instr); 1233 + pr_cont("<%08x> ", instr); 1234 1234 else 1235 - printk(KERN_CONT "%08x ", instr); 1235 + pr_cont("%08x ", instr); 1236 1236 } 1237 1237 1238 1238 pc += sizeof(int); 1239 1239 } 1240 1240 1241 - printk("\n"); 1241 + pr_cont("\n"); 1242 1242 } 1243 1243 1244 1244 struct regbit { ··· 1282 1282 1283 1283 for (; bits->bit; ++bits) 1284 1284 if (val & bits->bit) { 1285 - printk("%s%s", s, bits->name); 1285 + pr_cont("%s%s", s, bits->name); 1286 1286 s = sep; 1287 1287 } 1288 1288 } ··· 1305 1305 * T: Transactional (bit 34) 1306 1306 */ 1307 1307 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { 1308 - printk(",TM["); 1308 + pr_cont(",TM["); 1309 1309 print_bits(val, msr_tm_bits, ""); 1310 - printk("]"); 1310 + pr_cont("]"); 1311 1311 } 1312 1312 } 1313 1313 #else ··· 1316 1316 1317 1317 static void print_msr_bits(unsigned long val) 1318 1318 { 1319 - printk("<"); 1319 + pr_cont("<"); 1320 1320 print_bits(val, msr_bits, ","); 1321 1321 print_tm_bits(val); 1322 - printk(">"); 1322 + pr_cont(">"); 1323 1323 } 1324 1324 1325 1325 #ifdef CONFIG_PPC64 ··· 1347 1347 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 1348 1348 trap = TRAP(regs); 1349 1349 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 1350 - printk("CFAR: "REG" ", regs->orig_gpr3); 1350 + pr_cont("CFAR: "REG" ", regs->orig_gpr3); 1351 1351 if (trap == 0x200 || trap == 0x300 || trap == 0x600) 1352 1352 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 1353 - printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); 1353 + pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); 1354 1354 #else 1355 - printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); 1355 + pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); 1356 1356 #endif 1357 1357 #ifdef CONFIG_PPC64 1358 - printk("SOFTE: %ld ", regs->softe); 1358 + pr_cont("SOFTE: %ld ", regs->softe); 1359 1359 #endif 1360 1360 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1361 1361 if (MSR_TM_ACTIVE(regs->msr)) 1362 - printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); 1362 + pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); 1363 1363 #endif 1364 1364 1365 1365 for (i = 0; i < 32; i++) { 1366 1366 if ((i % REGS_PER_LINE) == 0) 1367 - printk("\nGPR%02d: ", i); 1368 - printk(REG " ", regs->gpr[i]); 1367 + pr_cont("\nGPR%02d: ", i); 1368 + pr_cont(REG " ", regs->gpr[i]); 1369 1369 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 1370 1370 break; 1371 1371 } 1372 - printk("\n"); 1372 + pr_cont("\n"); 1373 1373 #ifdef CONFIG_KALLSYMS 1374 1374 /* 1375 1375 * Lookup NIP late so we have the best change of getting the ··· 1900 1900 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1901 1901 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1902 1902 if ((ip == rth) && curr_frame >= 0) { 1903 - printk(" (%pS)", 1903 + pr_cont(" (%pS)", 1904 1904 (void *)current->ret_stack[curr_frame].ret); 1905 1905 curr_frame--; 1906 1906 } 1907 1907 #endif 1908 1908 if (firstframe) 1909 - printk(" (unreliable)"); 1910 - printk("\n"); 1909 + pr_cont(" (unreliable)"); 1910 + pr_cont("\n"); 1911 1911 } 1912 1912 firstframe = 0; 1913 1913
+14 -6
arch/powerpc/kernel/setup_64.c
··· 226 226 if (firmware_has_feature(FW_FEATURE_OPAL)) 227 227 opal_configure_cores(); 228 228 229 - /* Enable AIL if supported, and we are in hypervisor mode */ 230 - if (early_cpu_has_feature(CPU_FTR_HVMODE) && 231 - early_cpu_has_feature(CPU_FTR_ARCH_207S)) { 232 - unsigned long lpcr = mfspr(SPRN_LPCR); 233 - mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 234 - } 229 + /* AIL on native is done in cpu_ready_for_interrupts() */ 235 230 } 236 231 } 237 232 238 233 static void cpu_ready_for_interrupts(void) 239 234 { 235 + /* 236 + * Enable AIL if supported, and we are in hypervisor mode. This 237 + * is called once for every processor. 238 + * 239 + * If we are not in hypervisor mode the job is done once for 240 + * the whole partition in configure_exceptions(). 241 + */ 242 + if (early_cpu_has_feature(CPU_FTR_HVMODE) && 243 + early_cpu_has_feature(CPU_FTR_ARCH_207S)) { 244 + unsigned long lpcr = mfspr(SPRN_LPCR); 245 + mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 246 + } 247 + 240 248 /* Set IR and DR in PACA MSR */ 241 249 get_paca()->kernel_msr = MSR_KERNEL; 242 250 }
+4
arch/powerpc/mm/hash_utils_64.c
··· 1029 1029 { 1030 1030 /* Initialize hash table for that CPU */ 1031 1031 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 1032 + 1033 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 1034 + update_hid_for_hash(); 1035 + 1032 1036 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1033 1037 mtspr(SPRN_SDR1, _SDR1); 1034 1038 else
+4
arch/powerpc/mm/pgtable-radix.c
··· 388 388 * update partition table control register and UPRT 389 389 */ 390 390 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 391 + 392 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 393 + update_hid_for_radix(); 394 + 391 395 lpcr = mfspr(SPRN_LPCR); 392 396 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 393 397
+4
arch/powerpc/mm/tlb-radix.c
··· 50 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 51 51 __tlbiel_pid(pid, set, ric); 52 52 } 53 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 54 + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); 53 55 return; 54 56 } 55 57 ··· 85 83 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 86 84 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 87 85 asm volatile("ptesync": : :"memory"); 86 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 87 + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); 88 88 } 89 89 90 90 static inline void _tlbie_va(unsigned long va, unsigned long pid,
+23
arch/sparc/Kconfig
··· 43 43 select ARCH_HAS_SG_CHAIN 44 44 select CPU_NO_EFFICIENT_FFS 45 45 select HAVE_ARCH_HARDENED_USERCOPY 46 + select PROVE_LOCKING_SMALL if PROVE_LOCKING 46 47 47 48 config SPARC32 48 49 def_bool !64BIT ··· 89 88 90 89 config ARCH_PROC_KCORE_TEXT 91 90 def_bool y 91 + 92 + config ARCH_ATU 93 + bool 94 + default y if SPARC64 95 + 96 + config ARCH_DMA_ADDR_T_64BIT 97 + bool 98 + default y if ARCH_ATU 92 99 93 100 config IOMMU_HELPER 94 101 bool ··· 312 303 313 304 config ARCH_SPARSEMEM_DEFAULT 314 305 def_bool y if SPARC64 306 + 307 + config FORCE_MAX_ZONEORDER 308 + int "Maximum zone order" 309 + default "13" 310 + help 311 + The kernel memory allocator divides physically contiguous memory 312 + blocks into "zones", where each zone is a power of two number of 313 + pages. This option selects the largest power of two that the kernel 314 + keeps in the memory allocator. If you need to allocate very large 315 + blocks of physically contiguous memory, then you may need to 316 + increase this value. 317 + 318 + This config option is actually maximum order plus one. For example, 319 + a value of 13 means that the largest free memory block is 2^12 pages. 315 320 316 321 source "mm/Kconfig" 317 322
+343
arch/sparc/include/asm/hypervisor.h
··· 2335 2335 */ 2336 2336 #define HV_FAST_PCI_MSG_SETVALID 0xd3 2337 2337 2338 + /* PCI IOMMU v2 definitions and services 2339 + * 2340 + * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO 2341 + * definitions and services. 2342 + * 2343 + * CTE Clump Table Entry. First level table entry in the ATU. 2344 + * 2345 + * pci_device_list 2346 + * A 32-bit aligned list of pci_devices. 2347 + * 2348 + * pci_device_listp 2349 + * real address of a pci_device_list. 32-bit aligned. 2350 + * 2351 + * iotte IOMMU translation table entry. 2352 + * 2353 + * iotte_attributes 2354 + * IO Attributes for IOMMU v2 mappings. In addition to 2355 + * read, write IOMMU v2 supports relax ordering 2356 + * 2357 + * io_page_list A 64-bit aligned list of real addresses. Each real 2358 + * address in an io_page_list must be properly aligned 2359 + * to the pagesize of the given IOTSB. 2360 + * 2361 + * io_page_list_p Real address of an io_page_list, 64-bit aligned. 2362 + * 2363 + * IOTSB IO Translation Storage Buffer. An aligned table of 2364 + * IOTTEs. Each IOTSB has a pagesize, table size, and 2365 + * virtual address associated with it that must match 2366 + * a pagesize and table size supported by the un-derlying 2367 + * hardware implementation. The alignment requirements 2368 + * for an IOTSB depend on the pagesize used for that IOTSB. 2369 + * Each IOTTE in an IOTSB maps one pagesize-sized page. 2370 + * The size of the IOTSB dictates how large of a virtual 2371 + * address space the IOTSB is capable of mapping. 2372 + * 2373 + * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus 2374 + * iotsb_handle represents a binding of an IOTSB to a 2375 + * PCI root complex. 2376 + * 2377 + * iotsb_index Zero-based IOTTE number within an IOTSB. 2378 + */ 2379 + 2380 + /* The index_count argument consists of two fields: 2381 + * bits 63:48 #iottes and bits 47:0 iotsb_index 2382 + */ 2383 + #define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \ 2384 + (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index))) 2385 + 2386 + /* pci_iotsb_conf() 2387 + * TRAP: HV_FAST_TRAP 2388 + * FUNCTION: HV_FAST_PCI_IOTSB_CONF 2389 + * ARG0: devhandle 2390 + * ARG1: r_addr 2391 + * ARG2: size 2392 + * ARG3: pagesize 2393 + * ARG4: iova 2394 + * RET0: status 2395 + * RET1: iotsb_handle 2396 + * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize 2397 + * EBADALIGN r_addr is not properly aligned 2398 + * ENORADDR r_addr is not a valid real address 2399 + * ETOOMANY No further IOTSBs may be configured 2400 + * EBUSY Duplicate devhandle, raddir, iova combination 2401 + * 2402 + * Create an IOTSB suitable for the PCI root complex identified by devhandle, 2403 + * for the DMA virtual address defined by the argument iova. 2404 + * 2405 + * r_addr is the properly aligned base address of the IOTSB and size is the 2406 + * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to 2407 + * being configured. If it contains any values other than zeros then the 2408 + * behavior is undefined. 2409 + * 2410 + * pagesize is the size of each page in the IOTSB. Note that the combination of 2411 + * size (table size) and pagesize must be valid. 2412 + * 2413 + * virt is the DMA virtual address this IOTSB will map. 2414 + * 2415 + * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1. 2416 + * Once configured, privileged access to the IOTSB memory is prohibited and 2417 + * creates undefined behavior. The only permitted access is indirect via these 2418 + * services. 2419 + */ 2420 + #define HV_FAST_PCI_IOTSB_CONF 0x190 2421 + 2422 + /* pci_iotsb_info() 2423 + * TRAP: HV_FAST_TRAP 2424 + * FUNCTION: HV_FAST_PCI_IOTSB_INFO 2425 + * ARG0: devhandle 2426 + * ARG1: iotsb_handle 2427 + * RET0: status 2428 + * RET1: r_addr 2429 + * RET2: size 2430 + * RET3: pagesize 2431 + * RET4: iova 2432 + * RET5: #bound 2433 + * ERRORS: EINVAL Invalid devhandle or iotsb_handle 2434 + * 2435 + * This service returns configuration information about an IOTSB previously 2436 + * created with pci_iotsb_conf. 2437 + * 2438 + * iotsb_handle value 0 may be used with this service to inquire about the 2439 + * legacy IOTSB that may or may not exist. If the service succeeds, the return 2440 + * values describe the legacy IOTSB and I/O virtual addresses mapped by that 2441 + * table. However, the table base address r_addr may contain the value -1 which 2442 + * indicates a memory range that cannot be accessed or be reclaimed. 2443 + * 2444 + * The return value #bound contains the number of PCI devices that iotsb_handle 2445 + * is currently bound to. 2446 + */ 2447 + #define HV_FAST_PCI_IOTSB_INFO 0x191 2448 + 2449 + /* pci_iotsb_unconf() 2450 + * TRAP: HV_FAST_TRAP 2451 + * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF 2452 + * ARG0: devhandle 2453 + * ARG1: iotsb_handle 2454 + * RET0: status 2455 + * ERRORS: EINVAL Invalid devhandle or iotsb_handle 2456 + * EBUSY The IOTSB is bound and may not be unconfigured 2457 + * 2458 + * This service unconfigures the IOTSB identified by the devhandle and 2459 + * iotsb_handle arguments, previously created with pci_iotsb_conf. 2460 + * The IOTSB must not be currently bound to any device or the service will fail 2461 + * 2462 + * If the call succeeds, iotsb_handle is no longer valid. 2463 + */ 2464 + #define HV_FAST_PCI_IOTSB_UNCONF 0x192 2465 + 2466 + /* pci_iotsb_bind() 2467 + * TRAP: HV_FAST_TRAP 2468 + * FUNCTION: HV_FAST_PCI_IOTSB_BIND 2469 + * ARG0: devhandle 2470 + * ARG1: iotsb_handle 2471 + * ARG2: pci_device 2472 + * RET0: status 2473 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device 2474 + * EBUSY A PCI function is already bound to an IOTSB at the same 2475 + * address range as specified by devhandle, iotsb_handle. 2476 + * 2477 + * This service binds the PCI function specified by the argument pci_device to 2478 + * the IOTSB specified by the arguments devhandle and iotsb_handle. 2479 + * 2480 + * The PCI device function is bound to the specified IOTSB with the IOVA range 2481 + * specified when the IOTSB was configured via pci_iotsb_conf. If the function 2482 + * is already bound then it is unbound first. 2483 + */ 2484 + #define HV_FAST_PCI_IOTSB_BIND 0x193 2485 + 2486 + /* pci_iotsb_unbind() 2487 + * TRAP: HV_FAST_TRAP 2488 + * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND 2489 + * ARG0: devhandle 2490 + * ARG1: iotsb_handle 2491 + * ARG2: pci_device 2492 + * RET0: status 2493 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device 2494 + * ENOMAP The PCI function was not bound to the specified IOTSB 2495 + * 2496 + * This service unbinds the PCI device specified by the argument pci_device 2497 + * from the IOTSB identified * by the arguments devhandle and iotsb_handle. 2498 + * 2499 + * If the PCI device is not bound to the specified IOTSB then this service will 2500 + * fail with status ENOMAP 2501 + */ 2502 + #define HV_FAST_PCI_IOTSB_UNBIND 0x194 2503 + 2504 + /* pci_iotsb_get_binding() 2505 + * TRAP: HV_FAST_TRAP 2506 + * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING 2507 + * ARG0: devhandle 2508 + * ARG1: iotsb_handle 2509 + * ARG2: iova 2510 + * RET0: status 2511 + * RET1: iotsb_handle 2512 + * ERRORS: EINVAL Invalid devhandle, pci_device, or iova 2513 + * ENOMAP The PCI function is not bound to an IOTSB at iova 2514 + * 2515 + * This service returns the IOTSB binding, iotsb_handle, for a given pci_device 2516 + * and DMA virtual address, iova. 2517 + * 2518 + * iova must be the base address of a DMA virtual address range as defined by 2519 + * the iommu-address-ranges property in the root complex device node defined 2520 + * by the argument devhandle. 2521 + */ 2522 + #define HV_FAST_PCI_IOTSB_GET_BINDING 0x195 2523 + 2524 + /* pci_iotsb_map() 2525 + * TRAP: HV_FAST_TRAP 2526 + * FUNCTION: HV_FAST_PCI_IOTSB_MAP 2527 + * ARG0: devhandle 2528 + * ARG1: iotsb_handle 2529 + * ARG2: index_count 2530 + * ARG3: iotte_attributes 2531 + * ARG4: io_page_list_p 2532 + * RET0: status 2533 + * RET1: #mapped 2534 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes, 2535 + * iotsb_index or iotte_attributes 2536 + * EBADALIGN Improperly aligned io_page_list_p or I/O page 2537 + * address in the I/O page list. 2538 + * ENORADDR Invalid io_page_list_p or I/O page address in 2539 + * the I/O page list. 2540 + * 2541 + * This service creates and flushes mappings in the IOTSB defined by the 2542 + * arguments devhandle, iotsb. 2543 + * 2544 + * The index_count argument consists of two fields. Bits 63:48 contain #iotte 2545 + * and bits 47:0 contain iotsb_index 2546 + * 2547 + * The first mapping is created in the IOTSB index specified by iotsb_index. 2548 + * Subsequent mappings are created at iotsb_index+1 and so on. 2549 + * 2550 + * The attributes of each mapping are defined by the argument iotte_attributes. 2551 + * 2552 + * The io_page_list_p specifies the real address of the 64-bit-aligned list of 2553 + * #iottes I/O page addresses. Each page address must be a properly aligned 2554 + * real address of a page to be mapped in the IOTSB. The first entry in the I/O 2555 + * page list contains the real address of the first page, the 2nd entry for the 2556 + * 2nd page, and so on. 2557 + * 2558 + * #iottes must be greater than zero. 2559 + * 2560 + * The return value #mapped is the actual number of mappings created, which may 2561 + * be less than or equal to the argument #iottes. If the function returns 2562 + * successfully with a #mapped value less than the requested #iottes then the 2563 + * caller should continue to invoke the service with updated iotsb_index, 2564 + * #iottes, and io_page_list_p arguments until all pages are mapped. 2565 + * 2566 + * This service must not be used to demap a mapping. In other words, all 2567 + * mappings must be valid and have one or both of the RW attribute bits set. 2568 + * 2569 + * Note: 2570 + * It is implementation-defined whether I/O page real address validity checking 2571 + * is done at time mappings are established or deferred until they are 2572 + * accessed. 2573 + */ 2574 + #define HV_FAST_PCI_IOTSB_MAP 0x196 2575 + 2576 + /* pci_iotsb_map_one() 2577 + * TRAP: HV_FAST_TRAP 2578 + * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE 2579 + * ARG0: devhandle 2580 + * ARG1: iotsb_handle 2581 + * ARG2: iotsb_index 2582 + * ARG3: iotte_attributes 2583 + * ARG4: r_addr 2584 + * RET0: status 2585 + * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index 2586 + * or iotte_attributes 2587 + * EBADALIGN Improperly aligned r_addr 2588 + * ENORADDR Invalid r_addr 2589 + * 2590 + * This service creates and flushes a single mapping in the IOTSB defined by the 2591 + * arguments devhandle, iotsb. 2592 + * 2593 + * The mapping for the page at r_addr is created at the IOTSB index specified by 2594 + * iotsb_index with the attributes iotte_attributes. 2595 + * 2596 + * This service must not be used to demap a mapping. In other words, the mapping 2597 + * must be valid and have one or both of the RW attribute bits set. 2598 + * 2599 + * Note: 2600 + * It is implementation-defined whether I/O page real address validity checking 2601 + * is done at time mappings are established or deferred until they are 2602 + * accessed. 2603 + */ 2604 + #define HV_FAST_PCI_IOTSB_MAP_ONE 0x197 2605 + 2606 + /* pci_iotsb_demap() 2607 + * TRAP: HV_FAST_TRAP 2608 + * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP 2609 + * ARG0: devhandle 2610 + * ARG1: iotsb_handle 2611 + * ARG2: iotsb_index 2612 + * ARG3: #iottes 2613 + * RET0: status 2614 + * RET1: #unmapped 2615 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes 2616 + * 2617 + * This service unmaps and flushes up to #iottes mappings starting at index 2618 + * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb. 2619 + * 2620 + * #iottes must be greater than zero. 2621 + * 2622 + * The actual number of IOTTEs unmapped is returned in #unmapped and may be less 2623 + * than or equal to the requested number of IOTTEs, #iottes. 2624 + * 2625 + * If #unmapped is less than #iottes, the caller should continue to invoke this 2626 + * service with updated iotsb_index and #iottes arguments until all pages are 2627 + * demapped. 2628 + */ 2629 + #define HV_FAST_PCI_IOTSB_DEMAP 0x198 2630 + 2631 + /* pci_iotsb_getmap() 2632 + * TRAP: HV_FAST_TRAP 2633 + * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP 2634 + * ARG0: devhandle 2635 + * ARG1: iotsb_handle 2636 + * ARG2: iotsb_index 2637 + * RET0: status 2638 + * RET1: r_addr 2639 + * RET2: iotte_attributes 2640 + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index 2641 + * ENOMAP No mapping was found 2642 + * 2643 + * This service returns the mapping specified by index iotsb_index from the 2644 + * IOTSB defined by the arguments devhandle, iotsb. 2645 + * 2646 + * Upon success, the real address of the mapping shall be returned in 2647 + * r_addr and thethe IOTTE mapping attributes shall be returned in 2648 + * iotte_attributes. 2649 + * 2650 + * The return value iotte_attributes may not include optional features used in 2651 + * the call to create the mapping. 2652 + */ 2653 + #define HV_FAST_PCI_IOTSB_GETMAP 0x199 2654 + 2655 + /* pci_iotsb_sync_mappings() 2656 + * TRAP: HV_FAST_TRAP 2657 + * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 2658 + * ARG0: devhandle 2659 + * ARG1: iotsb_handle 2660 + * ARG2: iotsb_index 2661 + * ARG3: #iottes 2662 + * RET0: status 2663 + * RET1: #synced 2664 + * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes 2665 + * 2666 + * This service synchronizes #iottes mappings starting at index iotsb_index in 2667 + * the IOTSB defined by the arguments devhandle, iotsb. 2668 + * 2669 + * #iottes must be greater than zero. 2670 + * 2671 + * The actual number of IOTTEs synchronized is returned in #synced, which may 2672 + * be less than or equal to the requested number, #iottes. 2673 + * 2674 + * Upon a successful return, #synced is less than #iottes, the caller should 2675 + * continue to invoke this service with updated iotsb_index and #iottes 2676 + * arguments until all pages are synchronized. 2677 + */ 2678 + #define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a 2679 + 2338 2680 /* Logical Domain Channel services. */ 2339 2681 2340 2682 #define LDC_CHANNEL_DOWN 0 ··· 3335 2993 #define HV_GRP_SDIO 0x0108 3336 2994 #define HV_GRP_SDIO_ERR 0x0109 3337 2995 #define HV_GRP_REBOOT_DATA 0x0110 2996 + #define HV_GRP_ATU 0x0111 3338 2997 #define HV_GRP_M7_PERF 0x0114 3339 2998 #define HV_GRP_NIAG_PERF 0x0200 3340 2999 #define HV_GRP_FIRE_PERF 0x0201
+28
arch/sparc/include/asm/iommu_64.h
··· 24 24 unsigned int limit; 25 25 }; 26 26 27 + #define ATU_64_SPACE_SIZE 0x800000000 /* 32G */ 28 + 29 + /* Data structures for SPARC ATU architecture */ 30 + struct atu_iotsb { 31 + void *table; /* IOTSB table base virtual addr*/ 32 + u64 ra; /* IOTSB table real addr */ 33 + u64 dvma_size; /* ranges[3].size or OS slected 32G size */ 34 + u64 dvma_base; /* ranges[3].base */ 35 + u64 table_size; /* IOTSB table size */ 36 + u64 page_size; /* IO PAGE size for IOTSB */ 37 + u32 iotsb_num; /* tsbnum is same as iotsb_handle */ 38 + }; 39 + 40 + struct atu_ranges { 41 + u64 base; 42 + u64 size; 43 + }; 44 + 45 + struct atu { 46 + struct atu_ranges *ranges; 47 + struct atu_iotsb *iotsb; 48 + struct iommu_map_table tbl; 49 + u64 base; 50 + u64 size; 51 + u64 dma_addr_mask; 52 + }; 53 + 27 54 struct iommu { 28 55 struct iommu_map_table tbl; 56 + struct atu *atu; 29 57 spinlock_t lock; 30 58 u32 dma_addr_mask; 31 59 iopte_t *page_table;
+1
arch/sparc/kernel/hvapi.c
··· 39 39 { .group = HV_GRP_SDIO, }, 40 40 { .group = HV_GRP_SDIO_ERR, }, 41 41 { .group = HV_GRP_REBOOT_DATA, }, 42 + { .group = HV_GRP_ATU, .flags = FLAG_PRE_API }, 42 43 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, 43 44 { .group = HV_GRP_FIRE_PERF, }, 44 45 { .group = HV_GRP_N2_CPU, },
+6 -2
arch/sparc/kernel/iommu.c
··· 760 760 struct iommu *iommu = dev->archdata.iommu; 761 761 u64 dma_addr_mask = iommu->dma_addr_mask; 762 762 763 - if (device_mask >= (1UL << 32UL)) 764 - return 0; 763 + if (device_mask > DMA_BIT_MASK(32)) { 764 + if (iommu->atu) 765 + dma_addr_mask = iommu->atu->dma_addr_mask; 766 + else 767 + return 0; 768 + } 765 769 766 770 if ((device_mask & dma_addr_mask) == dma_addr_mask) 767 771 return 1;
-1
arch/sparc/kernel/iommu_common.h
··· 13 13 #include <linux/scatterlist.h> 14 14 #include <linux/device.h> 15 15 #include <linux/iommu-helper.h> 16 - #include <linux/scatterlist.h> 17 16 18 17 #include <asm/iommu.h> 19 18
+360 -58
arch/sparc/kernel/pci_sun4v.c
··· 44 44 { .major = 1, .minor = 1 }, 45 45 }; 46 46 47 + static unsigned long vatu_major = 1; 48 + static unsigned long vatu_minor = 1; 49 + 47 50 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 48 51 49 52 struct iommu_batch { ··· 72 69 } 73 70 74 71 /* Interrupts must be disabled. */ 75 - static long iommu_batch_flush(struct iommu_batch *p) 72 + static long iommu_batch_flush(struct iommu_batch *p, u64 mask) 76 73 { 77 74 struct pci_pbm_info *pbm = p->dev->archdata.host_controller; 75 + u64 *pglist = p->pglist; 76 + u64 index_count; 78 77 unsigned long devhandle = pbm->devhandle; 79 78 unsigned long prot = p->prot; 80 79 unsigned long entry = p->entry; 81 - u64 *pglist = p->pglist; 82 80 unsigned long npages = p->npages; 81 + unsigned long iotsb_num; 82 + unsigned long ret; 83 + long num; 83 84 84 85 /* VPCI maj=1, min=[0,1] only supports read and write */ 85 86 if (vpci_major < 2) 86 87 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); 87 88 88 89 while (npages != 0) { 89 - long num; 90 - 91 - num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), 92 - npages, prot, __pa(pglist)); 93 - if (unlikely(num < 0)) { 94 - if (printk_ratelimit()) 95 - printk("iommu_batch_flush: IOMMU map of " 96 - "[%08lx:%08llx:%lx:%lx:%lx] failed with " 97 - "status %ld\n", 98 - devhandle, HV_PCI_TSBID(0, entry), 99 - npages, prot, __pa(pglist), num); 100 - return -1; 90 + if (mask <= DMA_BIT_MASK(32)) { 91 + num = pci_sun4v_iommu_map(devhandle, 92 + HV_PCI_TSBID(0, entry), 93 + npages, 94 + prot, 95 + __pa(pglist)); 96 + if (unlikely(num < 0)) { 97 + pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", 98 + __func__, 99 + devhandle, 100 + HV_PCI_TSBID(0, entry), 101 + npages, prot, __pa(pglist), 102 + num); 103 + return -1; 104 + } 105 + } else { 106 + index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), 107 + iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; 108 + ret = pci_sun4v_iotsb_map(devhandle, 109 + iotsb_num, 110 + index_count, 111 + prot, 112 + __pa(pglist), 113 + &num); 114 + if (unlikely(ret != HV_EOK)) { 115 + pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", 116 + __func__, 117 + devhandle, iotsb_num, 118 + index_count, prot, 119 + __pa(pglist), ret); 120 + return -1; 121 + } 101 122 } 102 - 103 123 entry += num; 104 124 npages -= num; 105 125 pglist += num; ··· 134 108 return 0; 135 109 } 136 110 137 - static inline void iommu_batch_new_entry(unsigned long entry) 111 + static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) 138 112 { 139 113 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 140 114 141 115 if (p->entry + p->npages == entry) 142 116 return; 143 117 if (p->entry != ~0UL) 144 - iommu_batch_flush(p); 118 + iommu_batch_flush(p, mask); 145 119 p->entry = entry; 146 120 } 147 121 148 122 /* Interrupts must be disabled. */ 149 - static inline long iommu_batch_add(u64 phys_page) 123 + static inline long iommu_batch_add(u64 phys_page, u64 mask) 150 124 { 151 125 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 152 126 ··· 154 128 155 129 p->pglist[p->npages++] = phys_page; 156 130 if (p->npages == PGLIST_NENTS) 157 - return iommu_batch_flush(p); 131 + return iommu_batch_flush(p, mask); 158 132 159 133 return 0; 160 134 } 161 135 162 136 /* Interrupts must be disabled. */ 163 - static inline long iommu_batch_end(void) 137 + static inline long iommu_batch_end(u64 mask) 164 138 { 165 139 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 166 140 167 141 BUG_ON(p->npages >= PGLIST_NENTS); 168 142 169 - return iommu_batch_flush(p); 143 + return iommu_batch_flush(p, mask); 170 144 } 171 145 172 146 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 173 147 dma_addr_t *dma_addrp, gfp_t gfp, 174 148 unsigned long attrs) 175 149 { 150 + u64 mask; 176 151 unsigned long flags, order, first_page, npages, n; 177 152 unsigned long prot = 0; 178 153 struct iommu *iommu; 154 + struct atu *atu; 155 + struct iommu_map_table *tbl; 179 156 struct page *page; 180 157 void *ret; 181 158 long entry; ··· 203 174 memset((char *)first_page, 0, PAGE_SIZE << order); 204 175 205 176 iommu = dev->archdata.iommu; 177 + atu = iommu->atu; 206 178 207 - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 179 + mask = dev->coherent_dma_mask; 180 + if (mask <= DMA_BIT_MASK(32)) 181 + tbl = &iommu->tbl; 182 + else 183 + tbl = &atu->tbl; 184 + 185 + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, 208 186 (unsigned long)(-1), 0); 209 187 210 188 if (unlikely(entry == IOMMU_ERROR_CODE)) 211 189 goto range_alloc_fail; 212 190 213 - *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 191 + *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); 214 192 ret = (void *) first_page; 215 193 first_page = __pa(first_page); 216 194 ··· 229 193 entry); 230 194 231 195 for (n = 0; n < npages; n++) { 232 - long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); 196 + long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); 233 197 if (unlikely(err < 0L)) 234 198 goto iommu_map_fail; 235 199 } 236 200 237 - if (unlikely(iommu_batch_end() < 0L)) 201 + if (unlikely(iommu_batch_end(mask) < 0L)) 238 202 goto iommu_map_fail; 239 203 240 204 local_irq_restore(flags); ··· 242 206 return ret; 243 207 244 208 iommu_map_fail: 245 - iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); 209 + iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); 246 210 247 211 range_alloc_fail: 248 212 free_pages(first_page, order); 249 213 return NULL; 250 214 } 251 215 252 - static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, 253 - unsigned long npages) 216 + unsigned long dma_4v_iotsb_bind(unsigned long devhandle, 217 + unsigned long iotsb_num, 218 + struct pci_bus *bus_dev) 254 219 { 255 - u32 devhandle = *(u32 *)demap_arg; 220 + struct pci_dev *pdev; 221 + unsigned long err; 222 + unsigned int bus; 223 + unsigned int device; 224 + unsigned int fun; 225 + 226 + list_for_each_entry(pdev, &bus_dev->devices, bus_list) { 227 + if (pdev->subordinate) { 228 + /* No need to bind pci bridge */ 229 + dma_4v_iotsb_bind(devhandle, iotsb_num, 230 + pdev->subordinate); 231 + } else { 232 + bus = bus_dev->number; 233 + device = PCI_SLOT(pdev->devfn); 234 + fun = PCI_FUNC(pdev->devfn); 235 + err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, 236 + HV_PCI_DEVICE_BUILD(bus, 237 + device, 238 + fun)); 239 + 240 + /* If bind fails for one device it is going to fail 241 + * for rest of the devices because we are sharing 242 + * IOTSB. So in case of failure simply return with 243 + * error. 244 + */ 245 + if (err) 246 + return err; 247 + } 248 + } 249 + 250 + return 0; 251 + } 252 + 253 + static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, 254 + dma_addr_t dvma, unsigned long iotsb_num, 255 + unsigned long entry, unsigned long npages) 256 + { 256 257 unsigned long num, flags; 258 + unsigned long ret; 257 259 258 260 local_irq_save(flags); 259 261 do { 260 - num = pci_sun4v_iommu_demap(devhandle, 261 - HV_PCI_TSBID(0, entry), 262 - npages); 263 - 262 + if (dvma <= DMA_BIT_MASK(32)) { 263 + num = pci_sun4v_iommu_demap(devhandle, 264 + HV_PCI_TSBID(0, entry), 265 + npages); 266 + } else { 267 + ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, 268 + entry, npages, &num); 269 + if (unlikely(ret != HV_EOK)) { 270 + pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", 271 + ret); 272 + } 273 + } 264 274 entry += num; 265 275 npages -= num; 266 276 } while (npages != 0); ··· 318 236 { 319 237 struct pci_pbm_info *pbm; 320 238 struct iommu *iommu; 239 + struct atu *atu; 240 + struct iommu_map_table *tbl; 321 241 unsigned long order, npages, entry; 242 + unsigned long iotsb_num; 322 243 u32 devhandle; 323 244 324 245 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 325 246 iommu = dev->archdata.iommu; 326 247 pbm = dev->archdata.host_controller; 248 + atu = iommu->atu; 327 249 devhandle = pbm->devhandle; 328 - entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); 329 - dma_4v_iommu_demap(&devhandle, entry, npages); 330 - iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); 250 + 251 + if (dvma <= DMA_BIT_MASK(32)) { 252 + tbl = &iommu->tbl; 253 + iotsb_num = 0; /* we don't care for legacy iommu */ 254 + } else { 255 + tbl = &atu->tbl; 256 + iotsb_num = atu->iotsb->iotsb_num; 257 + } 258 + entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); 259 + dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); 260 + iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); 331 261 order = get_order(size); 332 262 if (order < 10) 333 263 free_pages((unsigned long)cpu, order); ··· 351 257 unsigned long attrs) 352 258 { 353 259 struct iommu *iommu; 260 + struct atu *atu; 261 + struct iommu_map_table *tbl; 262 + u64 mask; 354 263 unsigned long flags, npages, oaddr; 355 264 unsigned long i, base_paddr; 356 - u32 bus_addr, ret; 357 265 unsigned long prot; 266 + dma_addr_t bus_addr, ret; 358 267 long entry; 359 268 360 269 iommu = dev->archdata.iommu; 270 + atu = iommu->atu; 361 271 362 272 if (unlikely(direction == DMA_NONE)) 363 273 goto bad; ··· 370 272 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 371 273 npages >>= IO_PAGE_SHIFT; 372 274 373 - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 275 + mask = *dev->dma_mask; 276 + if (mask <= DMA_BIT_MASK(32)) 277 + tbl = &iommu->tbl; 278 + else 279 + tbl = &atu->tbl; 280 + 281 + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, 374 282 (unsigned long)(-1), 0); 375 283 376 284 if (unlikely(entry == IOMMU_ERROR_CODE)) 377 285 goto bad; 378 286 379 - bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 287 + bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); 380 288 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 381 289 base_paddr = __pa(oaddr & IO_PAGE_MASK); 382 290 prot = HV_PCI_MAP_ATTR_READ; ··· 397 293 iommu_batch_start(dev, prot, entry); 398 294 399 295 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 400 - long err = iommu_batch_add(base_paddr); 296 + long err = iommu_batch_add(base_paddr, mask); 401 297 if (unlikely(err < 0L)) 402 298 goto iommu_map_fail; 403 299 } 404 - if (unlikely(iommu_batch_end() < 0L)) 300 + if (unlikely(iommu_batch_end(mask) < 0L)) 405 301 goto iommu_map_fail; 406 302 407 303 local_irq_restore(flags); ··· 414 310 return DMA_ERROR_CODE; 415 311 416 312 iommu_map_fail: 417 - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 313 + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); 418 314 return DMA_ERROR_CODE; 419 315 } 420 316 ··· 424 320 { 425 321 struct pci_pbm_info *pbm; 426 322 struct iommu *iommu; 323 + struct atu *atu; 324 + struct iommu_map_table *tbl; 427 325 unsigned long npages; 326 + unsigned long iotsb_num; 428 327 long entry; 429 328 u32 devhandle; 430 329 ··· 439 332 440 333 iommu = dev->archdata.iommu; 441 334 pbm = dev->archdata.host_controller; 335 + atu = iommu->atu; 442 336 devhandle = pbm->devhandle; 443 337 444 338 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 445 339 npages >>= IO_PAGE_SHIFT; 446 340 bus_addr &= IO_PAGE_MASK; 447 - entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; 448 - dma_4v_iommu_demap(&devhandle, entry, npages); 449 - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 341 + 342 + if (bus_addr <= DMA_BIT_MASK(32)) { 343 + iotsb_num = 0; /* we don't care for legacy iommu */ 344 + tbl = &iommu->tbl; 345 + } else { 346 + iotsb_num = atu->iotsb->iotsb_num; 347 + tbl = &atu->tbl; 348 + } 349 + entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; 350 + dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); 351 + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); 450 352 } 451 353 452 354 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ··· 469 353 unsigned long seg_boundary_size; 470 354 int outcount, incount, i; 471 355 struct iommu *iommu; 356 + struct atu *atu; 357 + struct iommu_map_table *tbl; 358 + u64 mask; 472 359 unsigned long base_shift; 473 360 long err; 474 361 475 362 BUG_ON(direction == DMA_NONE); 476 363 477 364 iommu = dev->archdata.iommu; 365 + atu = iommu->atu; 366 + 478 367 if (nelems == 0 || !iommu) 479 368 return 0; 480 369 ··· 505 384 max_seg_size = dma_get_max_seg_size(dev); 506 385 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 507 386 IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 508 - base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; 387 + 388 + mask = *dev->dma_mask; 389 + if (mask <= DMA_BIT_MASK(32)) 390 + tbl = &iommu->tbl; 391 + else 392 + tbl = &atu->tbl; 393 + 394 + base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; 395 + 509 396 for_each_sg(sglist, s, nelems, i) { 510 397 unsigned long paddr, npages, entry, out_entry = 0, slen; 511 398 ··· 526 397 /* Allocate iommu entries for that segment */ 527 398 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 528 399 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 529 - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, 400 + entry = iommu_tbl_range_alloc(dev, tbl, npages, 530 401 &handle, (unsigned long)(-1), 0); 531 402 532 403 /* Handle failure */ 533 404 if (unlikely(entry == IOMMU_ERROR_CODE)) { 534 - if (printk_ratelimit()) 535 - printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 536 - " npages %lx\n", iommu, paddr, npages); 405 + pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", 406 + tbl, paddr, npages); 537 407 goto iommu_map_failed; 538 408 } 539 409 540 - iommu_batch_new_entry(entry); 410 + iommu_batch_new_entry(entry, mask); 541 411 542 412 /* Convert entry to a dma_addr_t */ 543 - dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); 413 + dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); 544 414 dma_addr |= (s->offset & ~IO_PAGE_MASK); 545 415 546 416 /* Insert into HW table */ 547 417 paddr &= IO_PAGE_MASK; 548 418 while (npages--) { 549 - err = iommu_batch_add(paddr); 419 + err = iommu_batch_add(paddr, mask); 550 420 if (unlikely(err < 0L)) 551 421 goto iommu_map_failed; 552 422 paddr += IO_PAGE_SIZE; ··· 580 452 dma_next = dma_addr + slen; 581 453 } 582 454 583 - err = iommu_batch_end(); 455 + err = iommu_batch_end(mask); 584 456 585 457 if (unlikely(err < 0L)) 586 458 goto iommu_map_failed; ··· 603 475 vaddr = s->dma_address & IO_PAGE_MASK; 604 476 npages = iommu_num_pages(s->dma_address, s->dma_length, 605 477 IO_PAGE_SIZE); 606 - iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 478 + iommu_tbl_range_free(tbl, vaddr, npages, 607 479 IOMMU_ERROR_CODE); 608 480 /* XXX demap? XXX */ 609 481 s->dma_address = DMA_ERROR_CODE; ··· 624 496 struct pci_pbm_info *pbm; 625 497 struct scatterlist *sg; 626 498 struct iommu *iommu; 499 + struct atu *atu; 627 500 unsigned long flags, entry; 501 + unsigned long iotsb_num; 628 502 u32 devhandle; 629 503 630 504 BUG_ON(direction == DMA_NONE); 631 505 632 506 iommu = dev->archdata.iommu; 633 507 pbm = dev->archdata.host_controller; 508 + atu = iommu->atu; 634 509 devhandle = pbm->devhandle; 635 510 636 511 local_irq_save(flags); ··· 643 512 dma_addr_t dma_handle = sg->dma_address; 644 513 unsigned int len = sg->dma_length; 645 514 unsigned long npages; 646 - struct iommu_map_table *tbl = &iommu->tbl; 515 + struct iommu_map_table *tbl; 647 516 unsigned long shift = IO_PAGE_SHIFT; 648 517 649 518 if (!len) 650 519 break; 651 520 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 521 + 522 + if (dma_handle <= DMA_BIT_MASK(32)) { 523 + iotsb_num = 0; /* we don't care for legacy iommu */ 524 + tbl = &iommu->tbl; 525 + } else { 526 + iotsb_num = atu->iotsb->iotsb_num; 527 + tbl = &atu->tbl; 528 + } 652 529 entry = ((dma_handle - tbl->table_map_base) >> shift); 653 - dma_4v_iommu_demap(&devhandle, entry, npages); 654 - iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 530 + dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, 531 + entry, npages); 532 + iommu_tbl_range_free(tbl, dma_handle, npages, 655 533 IOMMU_ERROR_CODE); 656 534 sg = sg_next(sg); 657 535 } ··· 719 579 } 720 580 } 721 581 return cnt; 582 + } 583 + 584 + static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) 585 + { 586 + struct atu *atu = pbm->iommu->atu; 587 + struct atu_iotsb *iotsb; 588 + void *table; 589 + u64 table_size; 590 + u64 iotsb_num; 591 + unsigned long order; 592 + unsigned long err; 593 + 594 + iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); 595 + if (!iotsb) { 596 + err = -ENOMEM; 597 + goto out_err; 598 + } 599 + atu->iotsb = iotsb; 600 + 601 + /* calculate size of IOTSB */ 602 + table_size = (atu->size / IO_PAGE_SIZE) * 8; 603 + order = get_order(table_size); 604 + table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 605 + if (!table) { 606 + err = -ENOMEM; 607 + goto table_failed; 608 + } 609 + iotsb->table = table; 610 + iotsb->ra = __pa(table); 611 + iotsb->dvma_size = atu->size; 612 + iotsb->dvma_base = atu->base; 613 + iotsb->table_size = table_size; 614 + iotsb->page_size = IO_PAGE_SIZE; 615 + 616 + /* configure and register IOTSB with HV */ 617 + err = pci_sun4v_iotsb_conf(pbm->devhandle, 618 + iotsb->ra, 619 + iotsb->table_size, 620 + iotsb->page_size, 621 + iotsb->dvma_base, 622 + &iotsb_num); 623 + if (err) { 624 + pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); 625 + goto iotsb_conf_failed; 626 + } 627 + iotsb->iotsb_num = iotsb_num; 628 + 629 + err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); 630 + if (err) { 631 + pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); 632 + goto iotsb_conf_failed; 633 + } 634 + 635 + return 0; 636 + 637 + iotsb_conf_failed: 638 + free_pages((unsigned long)table, order); 639 + table_failed: 640 + kfree(iotsb); 641 + out_err: 642 + return err; 643 + } 644 + 645 + static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) 646 + { 647 + struct atu *atu = pbm->iommu->atu; 648 + unsigned long err; 649 + const u64 *ranges; 650 + u64 map_size, num_iotte; 651 + u64 dma_mask; 652 + const u32 *page_size; 653 + int len; 654 + 655 + ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", 656 + &len); 657 + if (!ranges) { 658 + pr_err(PFX "No iommu-address-ranges\n"); 659 + return -EINVAL; 660 + } 661 + 662 + page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", 663 + NULL); 664 + if (!page_size) { 665 + pr_err(PFX "No iommu-pagesizes\n"); 666 + return -EINVAL; 667 + } 668 + 669 + /* There are 4 iommu-address-ranges supported. Each range is pair of 670 + * {base, size}. The ranges[0] and ranges[1] are 32bit address space 671 + * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit 672 + * address ranges to support 64bit addressing. Because 'size' for 673 + * address ranges[2] and ranges[3] are same we can select either of 674 + * ranges[2] or ranges[3] for mapping. However due to 'size' is too 675 + * large for OS to allocate IOTSB we are using fix size 32G 676 + * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices 677 + * to share. 678 + */ 679 + atu->ranges = (struct atu_ranges *)ranges; 680 + atu->base = atu->ranges[3].base; 681 + atu->size = ATU_64_SPACE_SIZE; 682 + 683 + /* Create IOTSB */ 684 + err = pci_sun4v_atu_alloc_iotsb(pbm); 685 + if (err) { 686 + pr_err(PFX "Error creating ATU IOTSB\n"); 687 + return err; 688 + } 689 + 690 + /* Create ATU iommu map. 691 + * One bit represents one iotte in IOTSB table. 692 + */ 693 + dma_mask = (roundup_pow_of_two(atu->size) - 1UL); 694 + num_iotte = atu->size / IO_PAGE_SIZE; 695 + map_size = num_iotte / 8; 696 + atu->tbl.table_map_base = atu->base; 697 + atu->dma_addr_mask = dma_mask; 698 + atu->tbl.map = kzalloc(map_size, GFP_KERNEL); 699 + if (!atu->tbl.map) 700 + return -ENOMEM; 701 + 702 + iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, 703 + NULL, false /* no large_pool */, 704 + 0 /* default npools */, 705 + false /* want span boundary checking */); 706 + 707 + return 0; 722 708 } 723 709 724 710 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) ··· 1184 918 1185 919 pci_sun4v_scan_bus(pbm, &op->dev); 1186 920 921 + /* if atu_init fails its not complete failure. 922 + * we can still continue using legacy iommu. 923 + */ 924 + if (pbm->iommu->atu) { 925 + err = pci_sun4v_atu_init(pbm); 926 + if (err) { 927 + kfree(pbm->iommu->atu); 928 + pbm->iommu->atu = NULL; 929 + pr_err(PFX "ATU init failed, err=%d\n", err); 930 + } 931 + } 932 + 1187 933 pbm->next = pci_pbm_root; 1188 934 pci_pbm_root = pbm; 1189 935 ··· 1209 931 struct pci_pbm_info *pbm; 1210 932 struct device_node *dp; 1211 933 struct iommu *iommu; 934 + struct atu *atu; 1212 935 u32 devhandle; 1213 936 int i, err = -ENODEV; 937 + static bool hv_atu = true; 1214 938 1215 939 dp = op->dev.of_node; 1216 940 ··· 1233 953 } 1234 954 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", 1235 955 vpci_major, vpci_minor); 956 + 957 + err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); 958 + if (err) { 959 + /* don't return an error if we fail to register the 960 + * ATU group, but ATU hcalls won't be available. 961 + */ 962 + hv_atu = false; 963 + pr_err(PFX "Could not register hvapi ATU err=%d\n", 964 + err); 965 + } else { 966 + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", 967 + vatu_major, vatu_minor); 968 + } 1236 969 1237 970 dma_ops = &sun4v_dma_ops; 1238 971 } ··· 1284 991 } 1285 992 1286 993 pbm->iommu = iommu; 994 + iommu->atu = NULL; 995 + if (hv_atu) { 996 + atu = kzalloc(sizeof(*atu), GFP_KERNEL); 997 + if (!atu) 998 + pr_err(PFX "Could not allocate atu\n"); 999 + else 1000 + iommu->atu = atu; 1001 + } 1287 1002 1288 1003 err = pci_sun4v_pbm_init(pbm, op, devhandle); 1289 1004 if (err) ··· 1302 1001 return 0; 1303 1002 1304 1003 out_free_iommu: 1004 + kfree(iommu->atu); 1305 1005 kfree(pbm->iommu); 1306 1006 1307 1007 out_free_controller:
+21
arch/sparc/kernel/pci_sun4v.h
··· 89 89 unsigned long msinum, 90 90 unsigned long valid); 91 91 92 + /* Sun4v HV IOMMU v2 APIs */ 93 + unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle, 94 + unsigned long ra, 95 + unsigned long table_size, 96 + unsigned long page_size, 97 + unsigned long dvma_base, 98 + u64 *iotsb_num); 99 + unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle, 100 + unsigned long iotsb_num, 101 + unsigned int pci_device); 102 + unsigned long pci_sun4v_iotsb_map(unsigned long devhandle, 103 + unsigned long iotsb_num, 104 + unsigned long iotsb_index_iottes, 105 + unsigned long io_attributes, 106 + unsigned long io_page_list_pa, 107 + long *mapped); 108 + unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle, 109 + unsigned long iotsb_num, 110 + unsigned long iotsb_index, 111 + unsigned long iottes, 112 + unsigned long *demapped); 92 113 #endif /* !(_PCI_SUN4V_H) */
+68
arch/sparc/kernel/pci_sun4v_asm.S
··· 360 360 mov %o0, %o0 361 361 ENDPROC(pci_sun4v_msg_setvalid) 362 362 363 + /* 364 + * %o0: devhandle 365 + * %o1: r_addr 366 + * %o2: size 367 + * %o3: pagesize 368 + * %o4: virt 369 + * %o5: &iotsb_num/&iotsb_handle 370 + * 371 + * returns %o0: status 372 + * %o1: iotsb_num/iotsb_handle 373 + */ 374 + ENTRY(pci_sun4v_iotsb_conf) 375 + mov %o5, %g1 376 + mov HV_FAST_PCI_IOTSB_CONF, %o5 377 + ta HV_FAST_TRAP 378 + retl 379 + stx %o1, [%g1] 380 + ENDPROC(pci_sun4v_iotsb_conf) 381 + 382 + /* 383 + * %o0: devhandle 384 + * %o1: iotsb_num/iotsb_handle 385 + * %o2: pci_device 386 + * 387 + * returns %o0: status 388 + */ 389 + ENTRY(pci_sun4v_iotsb_bind) 390 + mov HV_FAST_PCI_IOTSB_BIND, %o5 391 + ta HV_FAST_TRAP 392 + retl 393 + nop 394 + ENDPROC(pci_sun4v_iotsb_bind) 395 + 396 + /* 397 + * %o0: devhandle 398 + * %o1: iotsb_num/iotsb_handle 399 + * %o2: index_count 400 + * %o3: iotte_attributes 401 + * %o4: io_page_list_p 402 + * %o5: &mapped 403 + * 404 + * returns %o0: status 405 + * %o1: #mapped 406 + */ 407 + ENTRY(pci_sun4v_iotsb_map) 408 + mov %o5, %g1 409 + mov HV_FAST_PCI_IOTSB_MAP, %o5 410 + ta HV_FAST_TRAP 411 + retl 412 + stx %o1, [%g1] 413 + ENDPROC(pci_sun4v_iotsb_map) 414 + 415 + /* 416 + * %o0: devhandle 417 + * %o1: iotsb_num/iotsb_handle 418 + * %o2: iotsb_index 419 + * %o3: #iottes 420 + * %o4: &demapped 421 + * 422 + * returns %o0: status 423 + * %o1: #demapped 424 + */ 425 + ENTRY(pci_sun4v_iotsb_demap) 426 + mov HV_FAST_PCI_IOTSB_DEMAP, %o5 427 + ta HV_FAST_TRAP 428 + retl 429 + stx %o1, [%o4] 430 + ENDPROC(pci_sun4v_iotsb_demap)
+2 -2
arch/sparc/kernel/signal_32.c
··· 89 89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; 90 90 91 91 /* 1. Make sure we are not getting garbage from the user */ 92 - if (!invalid_frame_pointer(sf, sizeof(*sf))) 92 + if (invalid_frame_pointer(sf, sizeof(*sf))) 93 93 goto segv_and_exit; 94 94 95 95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) ··· 150 150 151 151 synchronize_user_stack(); 152 152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; 153 - if (!invalid_frame_pointer(sf, sizeof(*sf))) 153 + if (invalid_frame_pointer(sf, sizeof(*sf))) 154 154 goto segv; 155 155 156 156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+64 -7
arch/sparc/mm/init_64.c
··· 802 802 }; 803 803 static struct mdesc_mblock *mblocks; 804 804 static int num_mblocks; 805 + static int find_numa_node_for_addr(unsigned long pa, 806 + struct node_mem_mask *pnode_mask); 805 807 806 - static unsigned long ra_to_pa(unsigned long addr) 808 + static unsigned long __init ra_to_pa(unsigned long addr) 807 809 { 808 810 int i; 809 811 ··· 821 819 return addr; 822 820 } 823 821 824 - static int find_node(unsigned long addr) 822 + static int __init find_node(unsigned long addr) 825 823 { 824 + static bool search_mdesc = true; 825 + static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; 826 + static int last_index; 826 827 int i; 827 828 828 829 addr = ra_to_pa(addr); ··· 835 830 if ((addr & p->mask) == p->val) 836 831 return i; 837 832 } 838 - /* The following condition has been observed on LDOM guests.*/ 839 - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" 840 - " rule. Some physical memory will be owned by node 0."); 841 - return 0; 833 + /* The following condition has been observed on LDOM guests because 834 + * node_masks only contains the best latency mask and value. 835 + * LDOM guest's mdesc can contain a single latency group to 836 + * cover multiple address range. Print warning message only if the 837 + * address cannot be found in node_masks nor mdesc. 838 + */ 839 + if ((search_mdesc) && 840 + ((addr & last_mem_mask.mask) != last_mem_mask.val)) { 841 + /* find the available node in the mdesc */ 842 + last_index = find_numa_node_for_addr(addr, &last_mem_mask); 843 + numadbg("find_node: latency group for address 0x%lx is %d\n", 844 + addr, last_index); 845 + if ((last_index < 0) || (last_index >= num_node_masks)) { 846 + /* WARN_ONCE() and use default group 0 */ 847 + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); 848 + search_mdesc = false; 849 + last_index = 0; 850 + } 851 + } 852 + 853 + return last_index; 842 854 } 843 855 844 - static u64 memblock_nid_range(u64 start, u64 end, int *nid) 856 + static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) 845 857 { 846 858 *nid = find_node(start); 847 859 start += PAGE_SIZE; ··· 1180 1158 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE; 1181 1159 } 1182 1160 return numa_latency[from][to]; 1161 + } 1162 + 1163 + static int find_numa_node_for_addr(unsigned long pa, 1164 + struct node_mem_mask *pnode_mask) 1165 + { 1166 + struct mdesc_handle *md = mdesc_grab(); 1167 + u64 node, arc; 1168 + int i = 0; 1169 + 1170 + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1171 + if (node == MDESC_NODE_NULL) 1172 + goto out; 1173 + 1174 + mdesc_for_each_node_by_name(md, node, "group") { 1175 + mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { 1176 + u64 target = mdesc_arc_target(md, arc); 1177 + struct mdesc_mlgroup *m = find_mlgroup(target); 1178 + 1179 + if (!m) 1180 + continue; 1181 + if ((pa & m->mask) == m->match) { 1182 + if (pnode_mask) { 1183 + pnode_mask->mask = m->mask; 1184 + pnode_mask->val = m->match; 1185 + } 1186 + mdesc_release(md); 1187 + return i; 1188 + } 1189 + } 1190 + i++; 1191 + } 1192 + 1193 + out: 1194 + mdesc_release(md); 1195 + return -1; 1183 1196 } 1184 1197 1185 1198 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+27 -31
arch/x86/kvm/irq_comm.c
··· 156 156 } 157 157 158 158 159 + static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, 160 + struct kvm *kvm, int irq_source_id, int level, 161 + bool line_status) 162 + { 163 + if (!level) 164 + return -1; 165 + 166 + return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); 167 + } 168 + 159 169 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 160 170 struct kvm *kvm, int irq_source_id, int level, 161 171 bool line_status) ··· 173 163 struct kvm_lapic_irq irq; 174 164 int r; 175 165 176 - if (unlikely(e->type != KVM_IRQ_ROUTING_MSI)) 177 - return -EWOULDBLOCK; 166 + switch (e->type) { 167 + case KVM_IRQ_ROUTING_HV_SINT: 168 + return kvm_hv_set_sint(e, kvm, irq_source_id, level, 169 + line_status); 178 170 179 - if (kvm_msi_route_invalid(kvm, e)) 180 - return -EINVAL; 171 + case KVM_IRQ_ROUTING_MSI: 172 + if (kvm_msi_route_invalid(kvm, e)) 173 + return -EINVAL; 181 174 182 - kvm_set_msi_irq(kvm, e, &irq); 175 + kvm_set_msi_irq(kvm, e, &irq); 183 176 184 - if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) 185 - return r; 186 - else 187 - return -EWOULDBLOCK; 177 + if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) 178 + return r; 179 + break; 180 + 181 + default: 182 + break; 183 + } 184 + 185 + return -EWOULDBLOCK; 188 186 } 189 187 190 188 int kvm_request_irq_source_id(struct kvm *kvm) ··· 270 252 if (kimn->irq == gsi) 271 253 kimn->func(kimn, mask); 272 254 srcu_read_unlock(&kvm->irq_srcu, idx); 273 - } 274 - 275 - static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, 276 - struct kvm *kvm, int irq_source_id, int level, 277 - bool line_status) 278 - { 279 - if (!level) 280 - return -1; 281 - 282 - return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); 283 255 } 284 256 285 257 int kvm_set_routing_entry(struct kvm *kvm, ··· 429 421 } 430 422 } 431 423 srcu_read_unlock(&kvm->irq_srcu, idx); 432 - } 433 - 434 - int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm, 435 - int irq_source_id, int level, bool line_status) 436 - { 437 - switch (irq->type) { 438 - case KVM_IRQ_ROUTING_HV_SINT: 439 - return kvm_hv_set_sint(irq, kvm, irq_source_id, level, 440 - line_status); 441 - default: 442 - return -EWOULDBLOCK; 443 - } 444 424 } 445 425 446 426 void kvm_arch_irq_routing_update(struct kvm *kvm)
+34 -13
arch/x86/kvm/x86.c
··· 210 210 struct kvm_shared_msrs *locals 211 211 = container_of(urn, struct kvm_shared_msrs, urn); 212 212 struct kvm_shared_msr_values *values; 213 + unsigned long flags; 213 214 215 + /* 216 + * Disabling irqs at this point since the following code could be 217 + * interrupted and executed through kvm_arch_hardware_disable() 218 + */ 219 + local_irq_save(flags); 220 + if (locals->registered) { 221 + locals->registered = false; 222 + user_return_notifier_unregister(urn); 223 + } 224 + local_irq_restore(flags); 214 225 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 215 226 values = &locals->values[slot]; 216 227 if (values->host != values->curr) { ··· 229 218 values->curr = values->host; 230 219 } 231 220 } 232 - locals->registered = false; 233 - user_return_notifier_unregister(urn); 234 221 } 235 222 236 223 static void shared_msr_update(unsigned slot, u32 msr) ··· 1733 1724 1734 1725 static u64 __get_kvmclock_ns(struct kvm *kvm) 1735 1726 { 1736 - struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0); 1737 1727 struct kvm_arch *ka = &kvm->arch; 1738 - s64 ns; 1728 + struct pvclock_vcpu_time_info hv_clock; 1739 1729 1740 - if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) { 1741 - u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1742 - ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc); 1743 - } else { 1744 - ns = ktime_get_boot_ns() + ka->kvmclock_offset; 1730 + spin_lock(&ka->pvclock_gtod_sync_lock); 1731 + if (!ka->use_master_clock) { 1732 + spin_unlock(&ka->pvclock_gtod_sync_lock); 1733 + return ktime_get_boot_ns() + ka->kvmclock_offset; 1745 1734 } 1746 1735 1747 - return ns; 1736 + hv_clock.tsc_timestamp = ka->master_cycle_now; 1737 + hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 1738 + spin_unlock(&ka->pvclock_gtod_sync_lock); 1739 + 1740 + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 1741 + &hv_clock.tsc_shift, 1742 + &hv_clock.tsc_to_system_mul); 1743 + return __pvclock_read_cycles(&hv_clock, rdtsc()); 1748 1744 } 1749 1745 1750 1746 u64 get_kvmclock_ns(struct kvm *kvm) ··· 2610 2596 case KVM_CAP_PIT_STATE2: 2611 2597 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 2612 2598 case KVM_CAP_XEN_HVM: 2613 - case KVM_CAP_ADJUST_CLOCK: 2614 2599 case KVM_CAP_VCPU_EVENTS: 2615 2600 case KVM_CAP_HYPERV: 2616 2601 case KVM_CAP_HYPERV_VAPIC: ··· 2635 2622 case KVM_CAP_PCI_2_3: 2636 2623 #endif 2637 2624 r = 1; 2625 + break; 2626 + case KVM_CAP_ADJUST_CLOCK: 2627 + r = KVM_CLOCK_TSC_STABLE; 2638 2628 break; 2639 2629 case KVM_CAP_X86_SMM: 2640 2630 /* SMBASE is usually relocated above 1M on modern chipsets, ··· 3431 3415 }; 3432 3416 case KVM_SET_VAPIC_ADDR: { 3433 3417 struct kvm_vapic_addr va; 3418 + int idx; 3434 3419 3435 3420 r = -EINVAL; 3436 3421 if (!lapic_in_kernel(vcpu)) ··· 3439 3422 r = -EFAULT; 3440 3423 if (copy_from_user(&va, argp, sizeof va)) 3441 3424 goto out; 3425 + idx = srcu_read_lock(&vcpu->kvm->srcu); 3442 3426 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3427 + srcu_read_unlock(&vcpu->kvm->srcu, idx); 3443 3428 break; 3444 3429 } 3445 3430 case KVM_X86_SETUP_MCE: { ··· 4122 4103 struct kvm_clock_data user_ns; 4123 4104 u64 now_ns; 4124 4105 4125 - now_ns = get_kvmclock_ns(kvm); 4106 + local_irq_disable(); 4107 + now_ns = __get_kvmclock_ns(kvm); 4126 4108 user_ns.clock = now_ns; 4127 - user_ns.flags = 0; 4109 + user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; 4110 + local_irq_enable(); 4128 4111 memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 4129 4112 4130 4113 r = -EFAULT;
+1
arch/x86/purgatory/Makefile
··· 16 16 17 17 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large 18 18 KBUILD_CFLAGS += -m$(BITS) 19 + KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 19 20 20 21 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 21 22 $(call if_changed,ld)
+8 -1
arch/xtensa/include/uapi/asm/unistd.h
··· 767 767 #define __NR_pwritev2 347 768 768 __SYSCALL(347, sys_pwritev2, 6) 769 769 770 - #define __NR_syscall_count 348 770 + #define __NR_pkey_mprotect 348 771 + __SYSCALL(348, sys_pkey_mprotect, 4) 772 + #define __NR_pkey_alloc 349 773 + __SYSCALL(349, sys_pkey_alloc, 2) 774 + #define __NR_pkey_free 350 775 + __SYSCALL(350, sys_pkey_free, 1) 776 + 777 + #define __NR_syscall_count 351 771 778 772 779 /* 773 780 * sysxtensa syscall handler
+7 -7
arch/xtensa/kernel/time.c
··· 172 172 { 173 173 of_clk_init(NULL); 174 174 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT 175 - printk("Calibrating CPU frequency "); 175 + pr_info("Calibrating CPU frequency "); 176 176 calibrate_ccount(); 177 - printk("%d.%02d MHz\n", (int)ccount_freq/1000000, 178 - (int)(ccount_freq/10000)%100); 177 + pr_cont("%d.%02d MHz\n", 178 + (int)ccount_freq / 1000000, 179 + (int)(ccount_freq / 10000) % 100); 179 180 #else 180 181 ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; 181 182 #endif ··· 211 210 void calibrate_delay(void) 212 211 { 213 212 loops_per_jiffy = ccount_freq / HZ; 214 - printk("Calibrating delay loop (skipped)... " 215 - "%lu.%02lu BogoMIPS preset\n", 216 - loops_per_jiffy/(1000000/HZ), 217 - (loops_per_jiffy/(10000/HZ)) % 100); 213 + pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n", 214 + loops_per_jiffy / (1000000 / HZ), 215 + (loops_per_jiffy / (10000 / HZ)) % 100); 218 216 } 219 217 #endif
+22 -52
arch/xtensa/kernel/traps.c
··· 465 465 466 466 for (i = 0; i < 16; i++) { 467 467 if ((i % 8) == 0) 468 - printk(KERN_INFO "a%02d:", i); 469 - printk(KERN_CONT " %08lx", regs->areg[i]); 468 + pr_info("a%02d:", i); 469 + pr_cont(" %08lx", regs->areg[i]); 470 470 } 471 - printk(KERN_CONT "\n"); 472 - 473 - printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", 474 - regs->pc, regs->ps, regs->depc, regs->excvaddr); 475 - printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", 476 - regs->lbeg, regs->lend, regs->lcount, regs->sar); 471 + pr_cont("\n"); 472 + pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", 473 + regs->pc, regs->ps, regs->depc, regs->excvaddr); 474 + pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", 475 + regs->lbeg, regs->lend, regs->lcount, regs->sar); 477 476 if (user_mode(regs)) 478 - printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", 479 - regs->windowbase, regs->windowstart, regs->wmask, 480 - regs->syscall); 477 + pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", 478 + regs->windowbase, regs->windowstart, regs->wmask, 479 + regs->syscall); 481 480 } 482 481 483 482 static int show_trace_cb(struct stackframe *frame, void *data) 484 483 { 485 484 if (kernel_text_address(frame->pc)) { 486 - printk(" [<%08lx>] ", frame->pc); 487 - print_symbol("%s\n", frame->pc); 485 + pr_cont(" [<%08lx>]", frame->pc); 486 + print_symbol(" %s\n", frame->pc); 488 487 } 489 488 return 0; 490 489 } ··· 493 494 if (!sp) 494 495 sp = stack_pointer(task); 495 496 496 - printk("Call Trace:"); 497 - #ifdef CONFIG_KALLSYMS 498 - printk("\n"); 499 - #endif 497 + pr_info("Call Trace:\n"); 500 498 walk_stackframe(sp, show_trace_cb, NULL); 501 - printk("\n"); 499 + #ifndef CONFIG_KALLSYMS 500 + pr_cont("\n"); 501 + #endif 502 502 } 503 - 504 - /* 505 - * This routine abuses get_user()/put_user() to reference pointers 506 - * with at least a bit of error checking ... 507 - */ 508 503 509 504 static int kstack_depth_to_print = 24; 510 505 ··· 511 518 sp = stack_pointer(task); 512 519 stack = sp; 513 520 514 - printk("\nStack: "); 521 + pr_info("Stack:\n"); 515 522 516 523 for (i = 0; i < kstack_depth_to_print; i++) { 517 524 if (kstack_end(sp)) 518 525 break; 519 - if (i && ((i % 8) == 0)) 520 - printk("\n "); 521 - printk("%08lx ", *sp++); 526 + pr_cont(" %08lx", *sp++); 527 + if (i % 8 == 7) 528 + pr_cont("\n"); 522 529 } 523 - printk("\n"); 524 530 show_trace(task, stack); 525 - } 526 - 527 - void show_code(unsigned int *pc) 528 - { 529 - long i; 530 - 531 - printk("\nCode:"); 532 - 533 - for(i = -3 ; i < 6 ; i++) { 534 - unsigned long insn; 535 - if (__get_user(insn, pc + i)) { 536 - printk(" (Bad address in pc)\n"); 537 - break; 538 - } 539 - printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>')); 540 - } 541 531 } 542 532 543 533 DEFINE_SPINLOCK(die_lock); ··· 528 552 void die(const char * str, struct pt_regs * regs, long err) 529 553 { 530 554 static int die_counter; 531 - int nl = 0; 532 555 533 556 console_verbose(); 534 557 spin_lock_irq(&die_lock); 535 558 536 - printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter); 537 - #ifdef CONFIG_PREEMPT 538 - printk("PREEMPT "); 539 - nl = 1; 540 - #endif 541 - if (nl) 542 - printk("\n"); 559 + pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, 560 + IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : ""); 543 561 show_regs(regs); 544 562 if (!user_mode(regs)) 545 563 show_stack(NULL, (unsigned long*)regs->areg[1]);
+10 -7
crypto/algif_hash.c
··· 214 214 215 215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 216 216 217 - if (ctx->more) { 217 + if (!result) { 218 + err = af_alg_wait_for_completion( 219 + crypto_ahash_init(&ctx->req), 220 + &ctx->completion); 221 + if (err) 222 + goto unlock; 223 + } 224 + 225 + if (!result || ctx->more) { 218 226 ctx->more = 0; 219 227 err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), 220 228 &ctx->completion); 221 229 if (err) 222 230 goto unlock; 223 - } else if (!result) { 224 - err = af_alg_wait_for_completion( 225 - crypto_ahash_digest(&ctx->req), 226 - &ctx->completion); 227 231 } 228 232 229 233 err = memcpy_to_msg(msg, ctx->result, len); 230 234 231 - hash_free_result(sk, ctx); 232 - 233 235 unlock: 236 + hash_free_result(sk, ctx); 234 237 release_sock(sk); 235 238 236 239 return err ?: len;
+4 -6
drivers/acpi/acpica/tbfadt.c
··· 480 480 u32 i; 481 481 482 482 /* 483 - * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which 483 + * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which 484 484 * should be zero are indeed zero. This will workaround BIOSs that 485 485 * inadvertently place values in these fields. 486 486 * 487 487 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located 488 488 * at offset 45, 55, 95, and the word located at offset 109, 110. 489 489 * 490 - * Note: The FADT revision value is unreliable because of BIOS errors. 491 - * The table length is instead used as the final word on the version. 492 - * 493 - * Note: FADT revision 3 is the ACPI 2.0 version of the FADT. 490 + * Note: The FADT revision value is unreliable. Only the length can be 491 + * trusted. 494 492 */ 495 - if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) { 493 + if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) { 496 494 acpi_gbl_FADT.preferred_profile = 0; 497 495 acpi_gbl_FADT.pstate_control = 0; 498 496 acpi_gbl_FADT.cst_control = 0;
+2 -2
drivers/char/ipmi/bt-bmc.c
··· 484 484 } 485 485 486 486 static const struct of_device_id bt_bmc_match[] = { 487 - { .compatible = "aspeed,ast2400-bt-bmc" }, 487 + { .compatible = "aspeed,ast2400-ibt-bmc" }, 488 488 { }, 489 489 }; 490 490 ··· 502 502 MODULE_DEVICE_TABLE(of, bt_bmc_match); 503 503 MODULE_LICENSE("GPL"); 504 504 MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>"); 505 - MODULE_DESCRIPTION("Linux device interface to the BT interface"); 505 + MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
+10 -1
drivers/crypto/caam/caamalg.c
··· 137 137 } 138 138 139 139 buf = it_page + it->offset; 140 - len = min(tlen, it->length); 140 + len = min_t(size_t, tlen, it->length); 141 141 print_hex_dump(level, prefix_str, prefix_type, rowsize, 142 142 groupsize, buf, len, ascii); 143 143 tlen -= len; ··· 4581 4581 4582 4582 /* Skip AES algorithms if not supported by device */ 4583 4583 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 4584 + continue; 4585 + 4586 + /* 4587 + * Check support for AES modes not available 4588 + * on LP devices. 4589 + */ 4590 + if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) 4591 + if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == 4592 + OP_ALG_AAI_XTS) 4584 4593 continue; 4585 4594 4586 4595 t_alg = caam_alg_alloc(alg);
+1
drivers/dma/Kconfig
··· 306 306 depends on ARCH_MMP || COMPILE_TEST 307 307 select DMA_ENGINE 308 308 select MMP_SRAM if ARCH_MMP 309 + select GENERIC_ALLOCATOR 309 310 help 310 311 Support the MMP Two-Channel DMA engine. 311 312 This engine used for MMP Audio DMA and pxa910 SQU.
+26 -5
drivers/dma/cppi41.c
··· 317 317 318 318 while (val) { 319 319 u32 desc, len; 320 + int error; 321 + 322 + error = pm_runtime_get(cdd->ddev.dev); 323 + if (error < 0) 324 + dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", 325 + __func__, error); 320 326 321 327 q_num = __fls(val); 322 328 val &= ~(1 << q_num); ··· 344 338 dma_cookie_complete(&c->txd); 345 339 dmaengine_desc_get_callback_invoke(&c->txd, NULL); 346 340 347 - /* Paired with cppi41_dma_issue_pending */ 348 341 pm_runtime_mark_last_busy(cdd->ddev.dev); 349 342 pm_runtime_put_autosuspend(cdd->ddev.dev); 350 343 } ··· 367 362 int error; 368 363 369 364 error = pm_runtime_get_sync(cdd->ddev.dev); 370 - if (error < 0) 365 + if (error < 0) { 366 + dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", 367 + __func__, error); 368 + pm_runtime_put_noidle(cdd->ddev.dev); 369 + 371 370 return error; 371 + } 372 372 373 373 dma_cookie_init(chan); 374 374 dma_async_tx_descriptor_init(&c->txd, chan); ··· 395 385 int error; 396 386 397 387 error = pm_runtime_get_sync(cdd->ddev.dev); 398 - if (error < 0) 388 + if (error < 0) { 389 + pm_runtime_put_noidle(cdd->ddev.dev); 390 + 399 391 return; 392 + } 400 393 401 394 WARN_ON(!list_empty(&cdd->pending)); 402 395 ··· 473 460 struct cppi41_dd *cdd = c->cdd; 474 461 int error; 475 462 476 - /* PM runtime paired with dmaengine_desc_get_callback_invoke */ 477 463 error = pm_runtime_get(cdd->ddev.dev); 478 464 if ((error != -EINPROGRESS) && error < 0) { 465 + pm_runtime_put_noidle(cdd->ddev.dev); 479 466 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", 480 467 error); 481 468 ··· 486 473 push_desc_queue(c); 487 474 else 488 475 pending_desc(c); 476 + 477 + pm_runtime_mark_last_busy(cdd->ddev.dev); 478 + pm_runtime_put_autosuspend(cdd->ddev.dev); 489 479 } 490 480 491 481 static u32 get_host_pd0(u32 length) ··· 1075 1059 deinit_cppi41(dev, cdd); 1076 1060 err_init_cppi: 1077 1061 pm_runtime_dont_use_autosuspend(dev); 1078 - pm_runtime_put_sync(dev); 1079 1062 err_get_sync: 1063 + pm_runtime_put_sync(dev); 1080 1064 pm_runtime_disable(dev); 1081 1065 iounmap(cdd->usbss_mem); 1082 1066 iounmap(cdd->ctrl_mem); ··· 1088 1072 static int cppi41_dma_remove(struct platform_device *pdev) 1089 1073 { 1090 1074 struct cppi41_dd *cdd = platform_get_drvdata(pdev); 1075 + int error; 1091 1076 1077 + error = pm_runtime_get_sync(&pdev->dev); 1078 + if (error < 0) 1079 + dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n", 1080 + __func__, error); 1092 1081 of_dma_controller_free(pdev->dev.of_node); 1093 1082 dma_async_device_unregister(&cdd->ddev); 1094 1083
+1
drivers/dma/edma.c
··· 1628 1628 if (echan->slot[0] < 0) { 1629 1629 dev_err(dev, "Entry slot allocation failed for channel %u\n", 1630 1630 EDMA_CHAN_SLOT(echan->ch_num)); 1631 + ret = echan->slot[0]; 1631 1632 goto err_slot; 1632 1633 } 1633 1634
+1 -1
drivers/dma/sun6i-dma.c
··· 578 578 579 579 burst = convert_burst(8); 580 580 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); 581 - v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 581 + v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 582 582 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 583 583 DMA_CHAN_CFG_DST_LINEAR_MODE | 584 584 DMA_CHAN_CFG_SRC_LINEAR_MODE |
-4
drivers/gpio/Kconfig
··· 22 22 23 23 if GPIOLIB 24 24 25 - config GPIO_DEVRES 26 - def_bool y 27 - depends on HAS_IOMEM 28 - 29 25 config OF_GPIO 30 26 def_bool y 31 27 depends on OF
+1 -1
drivers/gpio/Makefile
··· 2 2 3 3 ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG 4 4 5 - obj-$(CONFIG_GPIO_DEVRES) += devres.o 5 + obj-$(CONFIG_GPIOLIB) += devres.o 6 6 obj-$(CONFIG_GPIOLIB) += gpiolib.o 7 7 obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o 8 8 obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
+2 -2
drivers/gpio/gpio-pca953x.c
··· 372 372 373 373 bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); 374 374 375 - memcpy(reg_val, chip->reg_output, NBANK(chip)); 376 375 mutex_lock(&chip->i2c_lock); 376 + memcpy(reg_val, chip->reg_output, NBANK(chip)); 377 377 for (bank = 0; bank < NBANK(chip); bank++) { 378 378 bank_mask = mask[bank / sizeof(*mask)] >> 379 379 ((bank % sizeof(*mask)) * 8); 380 380 if (bank_mask) { 381 381 bank_val = bits[bank / sizeof(*bits)] >> 382 382 ((bank % sizeof(*bits)) * 8); 383 + bank_val &= bank_mask; 383 384 reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val; 384 385 } 385 386 } ··· 608 607 609 608 if (client->irq && irq_base != -1 610 609 && (chip->driver_data & PCA_INT)) { 611 - 612 610 ret = pca953x_read_regs(chip, 613 611 chip->regs->input, chip->irq_stat); 614 612 if (ret)
+1 -1
drivers/gpio/gpio-tc3589x.c
··· 97 97 if (ret < 0) 98 98 return ret; 99 99 100 - return !!(ret & BIT(pos)); 100 + return !(ret & BIT(pos)); 101 101 } 102 102 103 103 static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
+5 -2
drivers/gpio/gpiolib.c
··· 2737 2737 if (IS_ERR(desc)) 2738 2738 return PTR_ERR(desc); 2739 2739 2740 - /* Flush direction if something changed behind our back */ 2741 - if (chip->get_direction) { 2740 + /* 2741 + * If it's fast: flush the direction setting if something changed 2742 + * behind our back 2743 + */ 2744 + if (!chip->can_sleep && chip->get_direction) { 2742 2745 int dir = chip->get_direction(chip, offset); 2743 2746 2744 2747 if (dir)
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 459 459 u64 metadata_flags; 460 460 void *metadata; 461 461 u32 metadata_size; 462 + unsigned prime_shared_count; 462 463 /* list of all virtual address to which this bo 463 464 * is associated to 464 465 */
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 132 132 entry->priority = min(info[i].bo_priority, 133 133 AMDGPU_BO_LIST_MAX_PRIORITY); 134 134 entry->tv.bo = &entry->robj->tbo; 135 - entry->tv.shared = true; 135 + entry->tv.shared = !entry->robj->prime_shared_count; 136 136 137 137 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) 138 138 gds_obj = entry->robj;
+7 -20
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 658 658 return false; 659 659 660 660 if (amdgpu_passthrough(adev)) { 661 - /* for FIJI: In whole GPU pass-through virtualization case 662 - * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) 663 - * so amdgpu_card_posted return false and driver will incorrectly skip vPost. 664 - * but if we force vPost do in pass-through case, the driver reload will hang. 665 - * whether doing vPost depends on amdgpu_card_posted if smc version is above 666 - * 00160e00 for FIJI. 661 + /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 662 + * some old smc fw still need driver do vPost otherwise gpu hang, while 663 + * those smc fw version above 22.15 doesn't have this flaw, so we force 664 + * vpost executed for smc version below 22.15 667 665 */ 668 666 if (adev->asic_type == CHIP_FIJI) { 669 667 int err; ··· 672 674 return true; 673 675 674 676 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 675 - if (fw_ver >= 0x00160e00) 676 - return !amdgpu_card_posted(adev); 677 + if (fw_ver < 0x00160e00) 678 + return true; 677 679 } 678 - } else { 679 - /* in bare-metal case, amdgpu_card_posted return false 680 - * after system reboot/boot, and return true if driver 681 - * reloaded. 682 - * we shouldn't do vPost after driver reload otherwise GPU 683 - * could hang. 684 - */ 685 - if (amdgpu_card_posted(adev)) 686 - return false; 687 680 } 688 - 689 - /* we assume vPost is neede for all other cases */ 690 - return true; 681 + return !amdgpu_card_posted(adev); 691 682 } 692 683 693 684 /**
+19 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
··· 74 74 if (ret) 75 75 return ERR_PTR(ret); 76 76 77 + bo->prime_shared_count = 1; 77 78 return &bo->gem_base; 78 79 } 79 80 80 81 int amdgpu_gem_prime_pin(struct drm_gem_object *obj) 81 82 { 82 83 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 83 - int ret = 0; 84 + long ret = 0; 84 85 85 86 ret = amdgpu_bo_reserve(bo, false); 86 87 if (unlikely(ret != 0)) 87 88 return ret; 88 89 90 + /* 91 + * Wait for all shared fences to complete before we switch to future 92 + * use of exclusive fence on this prime shared bo. 93 + */ 94 + ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, 95 + MAX_SCHEDULE_TIMEOUT); 96 + if (unlikely(ret < 0)) { 97 + DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); 98 + amdgpu_bo_unreserve(bo); 99 + return ret; 100 + } 101 + 89 102 /* pin buffer into GTT */ 90 103 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); 104 + if (likely(ret == 0)) 105 + bo->prime_shared_count++; 106 + 91 107 amdgpu_bo_unreserve(bo); 92 108 return ret; 93 109 } ··· 118 102 return; 119 103 120 104 amdgpu_bo_unpin(bo); 105 + if (bo->prime_shared_count) 106 + bo->prime_shared_count--; 121 107 amdgpu_bo_unreserve(bo); 122 108 } 123 109
-2
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 1469 1469 table_info->vddgfx_lookup_table, vv_id, &sclk)) { 1470 1470 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1471 1471 PHM_PlatformCaps_ClockStretcher)) { 1472 - if (table_info == NULL) 1473 - return -EINVAL; 1474 1472 sclk_table = table_info->vdd_dep_on_sclk; 1475 1473 1476 1474 for (j = 1; j < sclk_table->count; j++) {
+17 -142
drivers/gpu/drm/arc/arcpgu_hdmi.c
··· 14 14 * 15 15 */ 16 16 17 - #include <drm/drm_crtc_helper.h> 17 + #include <drm/drm_crtc.h> 18 18 #include <drm/drm_encoder_slave.h> 19 - #include <drm/drm_atomic_helper.h> 20 19 21 20 #include "arcpgu.h" 22 - 23 - struct arcpgu_drm_connector { 24 - struct drm_connector connector; 25 - struct drm_encoder_slave *encoder_slave; 26 - }; 27 - 28 - static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) 29 - { 30 - const struct drm_encoder_slave_funcs *sfuncs; 31 - struct drm_encoder_slave *slave; 32 - struct arcpgu_drm_connector *con = 33 - container_of(connector, struct arcpgu_drm_connector, connector); 34 - 35 - slave = con->encoder_slave; 36 - if (slave == NULL) { 37 - dev_err(connector->dev->dev, 38 - "connector_get_modes: cannot find slave encoder for connector\n"); 39 - return 0; 40 - } 41 - 42 - sfuncs = slave->slave_funcs; 43 - if (sfuncs->get_modes == NULL) 44 - return 0; 45 - 46 - return sfuncs->get_modes(&slave->base, connector); 47 - } 48 - 49 - static enum drm_connector_status 50 - arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) 51 - { 52 - enum drm_connector_status status = connector_status_unknown; 53 - const struct drm_encoder_slave_funcs *sfuncs; 54 - struct drm_encoder_slave *slave; 55 - 56 - struct arcpgu_drm_connector *con = 57 - container_of(connector, struct arcpgu_drm_connector, connector); 58 - 59 - slave = con->encoder_slave; 60 - if (slave == NULL) { 61 - dev_err(connector->dev->dev, 62 - "connector_detect: cannot find slave encoder for connector\n"); 63 - return status; 64 - } 65 - 66 - sfuncs = slave->slave_funcs; 67 - if (sfuncs && sfuncs->detect) 68 - return sfuncs->detect(&slave->base, connector); 69 - 70 - dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n"); 71 - return status; 72 - } 73 - 74 - static void arcpgu_drm_connector_destroy(struct drm_connector *connector) 75 - { 76 - drm_connector_unregister(connector); 77 - drm_connector_cleanup(connector); 78 - } 79 - 80 - static const struct drm_connector_helper_funcs 81 - arcpgu_drm_connector_helper_funcs = { 82 - .get_modes = arcpgu_drm_connector_get_modes, 83 - }; 84 - 85 - static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { 86 - .dpms = drm_helper_connector_dpms, 87 - .reset = drm_atomic_helper_connector_reset, 88 - .detect = arcpgu_drm_connector_detect, 89 - .fill_modes = drm_helper_probe_single_connector_modes, 90 - .destroy = arcpgu_drm_connector_destroy, 91 - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 92 - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 93 - }; 94 - 95 - static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = { 96 - .dpms = drm_i2c_encoder_dpms, 97 - .mode_fixup = drm_i2c_encoder_mode_fixup, 98 - .mode_set = drm_i2c_encoder_mode_set, 99 - .prepare = drm_i2c_encoder_prepare, 100 - .commit = drm_i2c_encoder_commit, 101 - .detect = drm_i2c_encoder_detect, 102 - }; 103 21 104 22 static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { 105 23 .destroy = drm_encoder_cleanup, ··· 25 107 26 108 int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) 27 109 { 28 - struct arcpgu_drm_connector *arcpgu_connector; 29 - struct drm_i2c_encoder_driver *driver; 30 - struct drm_encoder_slave *encoder; 31 - struct drm_connector *connector; 32 - struct i2c_client *i2c_slave; 33 - int ret; 110 + struct drm_encoder *encoder; 111 + struct drm_bridge *bridge; 112 + 113 + int ret = 0; 34 114 35 115 encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); 36 116 if (encoder == NULL) 37 117 return -ENOMEM; 38 118 39 - i2c_slave = of_find_i2c_device_by_node(np); 40 - if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { 41 - dev_err(drm->dev, "failed to find i2c slave encoder\n"); 119 + /* Locate drm bridge from the hdmi encoder DT node */ 120 + bridge = of_drm_find_bridge(np); 121 + if (!bridge) 42 122 return -EPROBE_DEFER; 43 - } 44 123 45 - if (i2c_slave->dev.driver == NULL) { 46 - dev_err(drm->dev, "failed to find i2c slave driver\n"); 47 - return -EPROBE_DEFER; 48 - } 49 - 50 - driver = 51 - to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver)); 52 - ret = driver->encoder_init(i2c_slave, drm, encoder); 53 - if (ret) { 54 - dev_err(drm->dev, "failed to initialize i2c encoder slave\n"); 55 - return ret; 56 - } 57 - 58 - encoder->base.possible_crtcs = 1; 59 - encoder->base.possible_clones = 0; 60 - ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs, 124 + encoder->possible_crtcs = 1; 125 + encoder->possible_clones = 0; 126 + ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, 61 127 DRM_MODE_ENCODER_TMDS, NULL); 62 128 if (ret) 63 129 return ret; 64 130 65 - drm_encoder_helper_add(&encoder->base, 66 - &arcpgu_drm_encoder_helper_funcs); 131 + /* Link drm_bridge to encoder */ 132 + bridge->encoder = encoder; 133 + encoder->bridge = bridge; 67 134 68 - arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), 69 - GFP_KERNEL); 70 - if (!arcpgu_connector) { 71 - ret = -ENOMEM; 72 - goto error_encoder_cleanup; 73 - } 135 + ret = drm_bridge_attach(drm, bridge); 136 + if (ret) 137 + drm_encoder_cleanup(encoder); 74 138 75 - connector = &arcpgu_connector->connector; 76 - drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); 77 - ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, 78 - DRM_MODE_CONNECTOR_HDMIA); 79 - if (ret < 0) { 80 - dev_err(drm->dev, "failed to initialize drm connector\n"); 81 - goto error_encoder_cleanup; 82 - } 83 - 84 - ret = drm_mode_connector_attach_encoder(connector, &encoder->base); 85 - if (ret < 0) { 86 - dev_err(drm->dev, "could not attach connector to encoder\n"); 87 - drm_connector_unregister(connector); 88 - goto error_connector_cleanup; 89 - } 90 - 91 - arcpgu_connector->encoder_slave = encoder; 92 - 93 - return 0; 94 - 95 - error_connector_cleanup: 96 - drm_connector_cleanup(connector); 97 - 98 - error_encoder_cleanup: 99 - drm_encoder_cleanup(&encoder->base); 100 139 return ret; 101 140 }
+11 -2
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
··· 25 25 static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, 26 26 struct drm_crtc_state *old_crtc_state) 27 27 { 28 + struct drm_device *dev = crtc->dev; 29 + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 28 30 struct drm_pending_vblank_event *event = crtc->state->event; 31 + 32 + regmap_write(fsl_dev->regmap, 33 + DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); 29 34 30 35 if (event) { 31 36 crtc->state->event = NULL; ··· 44 39 } 45 40 } 46 41 47 - static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) 42 + static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, 43 + struct drm_crtc_state *old_crtc_state) 48 44 { 49 45 struct drm_device *dev = crtc->dev; 50 46 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 47 + 48 + /* always disable planes on the CRTC */ 49 + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); 51 50 52 51 drm_crtc_vblank_off(crtc); 53 52 ··· 131 122 } 132 123 133 124 static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { 125 + .atomic_disable = fsl_dcu_drm_crtc_atomic_disable, 134 126 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, 135 - .disable = fsl_dcu_drm_disable_crtc, 136 127 .enable = fsl_dcu_drm_crtc_enable, 137 128 .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, 138 129 };
-4
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
··· 59 59 60 60 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); 61 61 regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); 62 - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 63 - DCU_UPDATE_MODE_READREG); 64 62 65 63 return ret; 66 64 } ··· 137 139 drm_handle_vblank(dev, 0); 138 140 139 141 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); 140 - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 141 - DCU_UPDATE_MODE_READREG); 142 142 143 143 return IRQ_HANDLED; 144 144 }
-5
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
··· 160 160 DCU_LAYER_POST_SKIP(0) | 161 161 DCU_LAYER_PRE_SKIP(0)); 162 162 } 163 - regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 164 - DCU_MODE_DCU_MODE_MASK, 165 - DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); 166 - regmap_write(fsl_dev->regmap, 167 - DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); 168 163 169 164 return; 170 165 }
+8
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1281 1281 return ctx; 1282 1282 } 1283 1283 1284 + static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 1285 + { 1286 + return !(obj->cache_level == I915_CACHE_NONE || 1287 + obj->cache_level == I915_CACHE_WT); 1288 + } 1289 + 1284 1290 void i915_vma_move_to_active(struct i915_vma *vma, 1285 1291 struct drm_i915_gem_request *req, 1286 1292 unsigned int flags) ··· 1317 1311 1318 1312 /* update for the implicit flush after a batch */ 1319 1313 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 1314 + if (!obj->cache_dirty && gpu_write_needs_clflush(obj)) 1315 + obj->cache_dirty = true; 1320 1316 } 1321 1317 1322 1318 if (flags & EXEC_OBJECT_NEEDS_FENCE)
+22 -8
drivers/gpu/drm/i915/intel_bios.c
··· 1143 1143 if (!child) 1144 1144 return; 1145 1145 1146 - aux_channel = child->raw[25]; 1146 + aux_channel = child->common.aux_channel; 1147 1147 ddc_pin = child->common.ddc_pin; 1148 1148 1149 1149 is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; ··· 1673 1673 return false; 1674 1674 } 1675 1675 1676 - bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) 1676 + static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, 1677 + enum port port) 1677 1678 { 1678 1679 static const struct { 1679 1680 u16 dp, hdmi; ··· 1688 1687 [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, 1689 1688 [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, 1690 1689 }; 1691 - int i; 1692 1690 1693 1691 if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) 1694 1692 return false; 1695 1693 1696 - if (!dev_priv->vbt.child_dev_num) 1694 + if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != 1695 + (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) 1697 1696 return false; 1697 + 1698 + if (p_child->common.dvo_port == port_mapping[port].dp) 1699 + return true; 1700 + 1701 + /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ 1702 + if (p_child->common.dvo_port == port_mapping[port].hdmi && 1703 + p_child->common.aux_channel != 0) 1704 + return true; 1705 + 1706 + return false; 1707 + } 1708 + 1709 + bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, 1710 + enum port port) 1711 + { 1712 + int i; 1698 1713 1699 1714 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 1700 1715 const union child_device_config *p_child = 1701 1716 &dev_priv->vbt.child_dev[i]; 1702 1717 1703 - if ((p_child->common.dvo_port == port_mapping[port].dp || 1704 - p_child->common.dvo_port == port_mapping[port].hdmi) && 1705 - (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == 1706 - (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) 1718 + if (child_dev_is_dp_dual_mode(p_child, port)) 1707 1719 return true; 1708 1720 } 1709 1721
-10
drivers/gpu/drm/i915/intel_dp.c
··· 4463 4463 intel_dp_detect(struct drm_connector *connector, bool force) 4464 4464 { 4465 4465 struct intel_dp *intel_dp = intel_attached_dp(connector); 4466 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4467 - struct intel_encoder *intel_encoder = &intel_dig_port->base; 4468 4466 enum drm_connector_status status = connector->status; 4469 4467 4470 4468 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4471 4469 connector->base.id, connector->name); 4472 - 4473 - if (intel_dp->is_mst) { 4474 - /* MST devices are disconnected from a monitor POV */ 4475 - intel_dp_unset_edid(intel_dp); 4476 - if (intel_encoder->type != INTEL_OUTPUT_EDP) 4477 - intel_encoder->type = INTEL_OUTPUT_DP; 4478 - return connector_status_disconnected; 4479 - } 4480 4470 4481 4471 /* If full detect is not performed yet, do a full detect */ 4482 4472 if (!intel_dp->detect_done)
+1 -1
drivers/gpu/drm/i915/intel_sprite.c
··· 358 358 int plane = intel_plane->plane; 359 359 u32 sprctl; 360 360 u32 sprsurf_offset, linear_offset; 361 - unsigned int rotation = dplane->state->rotation; 361 + unsigned int rotation = plane_state->base.rotation; 362 362 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 363 363 int crtc_x = plane_state->base.dst.x1; 364 364 int crtc_y = plane_state->base.dst.y1;
+2 -1
drivers/gpu/drm/i915/intel_vbt_defs.h
··· 280 280 u8 dp_support:1; 281 281 u8 tmds_support:1; 282 282 u8 support_reserved:5; 283 - u8 not_common3[12]; 283 + u8 aux_channel; 284 + u8 not_common3[11]; 284 285 u8 iboost_level; 285 286 } __packed; 286 287
+1
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
··· 80 80 ddp_comp); 81 81 82 82 priv->crtc = crtc; 83 + writel(0x0, comp->regs + DISP_REG_OVL_INTSTA); 83 84 writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); 84 85 } 85 86
+7 -2
drivers/gpu/drm/mediatek/mtk_dpi.c
··· 432 432 unsigned long pll_rate; 433 433 unsigned int factor; 434 434 435 + /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */ 435 436 pix_rate = 1000UL * mode->clock; 436 - if (mode->clock <= 74000) 437 + if (mode->clock <= 27000) 438 + factor = 16 * 3; 439 + else if (mode->clock <= 84000) 437 440 factor = 8 * 3; 438 - else 441 + else if (mode->clock <= 167000) 439 442 factor = 4 * 3; 443 + else 444 + factor = 2 * 3; 440 445 pll_rate = pix_rate * factor; 441 446 442 447 dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
+11 -6
drivers/gpu/drm/mediatek/mtk_hdmi.c
··· 1133 1133 phy_power_on(hdmi->phy); 1134 1134 mtk_hdmi_aud_output_config(hdmi, mode); 1135 1135 1136 - mtk_hdmi_setup_audio_infoframe(hdmi); 1137 - mtk_hdmi_setup_avi_infoframe(hdmi, mode); 1138 - mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); 1139 - if (mode->flags & DRM_MODE_FLAG_3D_MASK) 1140 - mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); 1141 - 1142 1136 mtk_hdmi_hw_vid_black(hdmi, false); 1143 1137 mtk_hdmi_hw_aud_unmute(hdmi); 1144 1138 mtk_hdmi_hw_send_av_unmute(hdmi); ··· 1395 1401 hdmi->powered = true; 1396 1402 } 1397 1403 1404 + static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi, 1405 + struct drm_display_mode *mode) 1406 + { 1407 + mtk_hdmi_setup_audio_infoframe(hdmi); 1408 + mtk_hdmi_setup_avi_infoframe(hdmi, mode); 1409 + mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); 1410 + if (mode->flags & DRM_MODE_FLAG_3D_MASK) 1411 + mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); 1412 + } 1413 + 1398 1414 static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) 1399 1415 { 1400 1416 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); ··· 1413 1409 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); 1414 1410 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); 1415 1411 phy_power_on(hdmi->phy); 1412 + mtk_hdmi_send_infoframe(hdmi, &hdmi->mode); 1416 1413 1417 1414 hdmi->enabled = true; 1418 1415 }
+30 -12
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
··· 265 265 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); 266 266 unsigned int pre_div; 267 267 unsigned int div; 268 + unsigned int pre_ibias; 269 + unsigned int hdmi_ibias; 270 + unsigned int imp_en; 268 271 269 272 dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, 270 273 rate, parent_rate); ··· 301 298 (0x1 << PLL_BR_SHIFT), 302 299 RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | 303 300 RG_HDMITX_PLL_BR); 304 - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN); 301 + if (rate < 165000000) { 302 + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, 303 + RG_HDMITX_PRD_IMP_EN); 304 + pre_ibias = 0x3; 305 + imp_en = 0x0; 306 + hdmi_ibias = hdmi_phy->ibias; 307 + } else { 308 + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, 309 + RG_HDMITX_PRD_IMP_EN); 310 + pre_ibias = 0x6; 311 + imp_en = 0xf; 312 + hdmi_ibias = hdmi_phy->ibias_up; 313 + } 305 314 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 306 - (0x3 << PRD_IBIAS_CLK_SHIFT) | 307 - (0x3 << PRD_IBIAS_D2_SHIFT) | 308 - (0x3 << PRD_IBIAS_D1_SHIFT) | 309 - (0x3 << PRD_IBIAS_D0_SHIFT), 315 + (pre_ibias << PRD_IBIAS_CLK_SHIFT) | 316 + (pre_ibias << PRD_IBIAS_D2_SHIFT) | 317 + (pre_ibias << PRD_IBIAS_D1_SHIFT) | 318 + (pre_ibias << PRD_IBIAS_D0_SHIFT), 310 319 RG_HDMITX_PRD_IBIAS_CLK | 311 320 RG_HDMITX_PRD_IBIAS_D2 | 312 321 RG_HDMITX_PRD_IBIAS_D1 | 313 322 RG_HDMITX_PRD_IBIAS_D0); 314 323 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, 315 - (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN); 324 + (imp_en << DRV_IMP_EN_SHIFT), 325 + RG_HDMITX_DRV_IMP_EN); 316 326 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, 317 327 (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | 318 328 (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | ··· 334 318 RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | 335 319 RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); 336 320 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, 337 - (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) | 338 - (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) | 339 - (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) | 340 - (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT), 341 - RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 | 342 - RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0); 321 + (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) | 322 + (hdmi_ibias << DRV_IBIAS_D2_SHIFT) | 323 + (hdmi_ibias << DRV_IBIAS_D1_SHIFT) | 324 + (hdmi_ibias << DRV_IBIAS_D0_SHIFT), 325 + RG_HDMITX_DRV_IBIAS_CLK | 326 + RG_HDMITX_DRV_IBIAS_D2 | 327 + RG_HDMITX_DRV_IBIAS_D1 | 328 + RG_HDMITX_DRV_IBIAS_D0); 343 329 return 0; 344 330 } 345 331
+2 -2
drivers/gpu/drm/sun4i/sun4i_drv.c
··· 142 142 143 143 /* Create our layers */ 144 144 drv->layers = sun4i_layers_init(drm); 145 - if (!drv->layers) { 145 + if (IS_ERR(drv->layers)) { 146 146 dev_err(drm->dev, "Couldn't create the planes\n"); 147 - ret = -EINVAL; 147 + ret = PTR_ERR(drv->layers); 148 148 goto free_drm; 149 149 } 150 150
+8 -12
drivers/gpu/drm/sun4i/sun4i_rgb.c
··· 152 152 153 153 DRM_DEBUG_DRIVER("Enabling RGB output\n"); 154 154 155 - if (!IS_ERR(tcon->panel)) { 155 + if (!IS_ERR(tcon->panel)) 156 156 drm_panel_prepare(tcon->panel); 157 - drm_panel_enable(tcon->panel); 158 - } 159 - 160 - /* encoder->bridge can be NULL; drm_bridge_enable checks for it */ 161 - drm_bridge_enable(encoder->bridge); 162 157 163 158 sun4i_tcon_channel_enable(tcon, 0); 159 + 160 + if (!IS_ERR(tcon->panel)) 161 + drm_panel_enable(tcon->panel); 164 162 } 165 163 166 164 static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) ··· 169 171 170 172 DRM_DEBUG_DRIVER("Disabling RGB output\n"); 171 173 174 + if (!IS_ERR(tcon->panel)) 175 + drm_panel_disable(tcon->panel); 176 + 172 177 sun4i_tcon_channel_disable(tcon, 0); 173 178 174 - /* encoder->bridge can be NULL; drm_bridge_disable checks for it */ 175 - drm_bridge_disable(encoder->bridge); 176 - 177 - if (!IS_ERR(tcon->panel)) { 178 - drm_panel_disable(tcon->panel); 179 + if (!IS_ERR(tcon->panel)) 179 180 drm_panel_unprepare(tcon->panel); 180 - } 181 181 } 182 182 183 183 static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
-1
drivers/i2c/Kconfig
··· 59 59 60 60 config I2C_MUX 61 61 tristate "I2C bus multiplexing support" 62 - depends on HAS_IOMEM 63 62 help 64 63 Say Y here if you want the I2C core to support the ability to 65 64 handle multiplexed I2C bus topologies, by presenting each
+1 -1
drivers/i2c/busses/i2c-digicolor.c
··· 347 347 348 348 ret = i2c_add_adapter(&i2c->adap); 349 349 if (ret < 0) { 350 - clk_unprepare(i2c->clk); 350 + clk_disable_unprepare(i2c->clk); 351 351 return ret; 352 352 } 353 353
+1
drivers/i2c/muxes/Kconfig
··· 63 63 64 64 config I2C_MUX_REG 65 65 tristate "Register-based I2C multiplexer" 66 + depends on HAS_IOMEM 66 67 help 67 68 If you say yes to this option, support will be included for a 68 69 register based I2C multiplexer. This driver provides access to
+20 -2
drivers/i2c/muxes/i2c-demux-pinctrl.c
··· 69 69 goto err_with_revert; 70 70 } 71 71 72 - p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); 72 + /* 73 + * Check if there are pinctrl states at all. Note: we cant' use 74 + * devm_pinctrl_get_select() because we need to distinguish between 75 + * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state(). 76 + */ 77 + p = devm_pinctrl_get(adap->dev.parent); 73 78 if (IS_ERR(p)) { 74 79 ret = PTR_ERR(p); 75 - goto err_with_put; 80 + /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */ 81 + if (ret != -ENODEV) 82 + goto err_with_put; 83 + } else { 84 + /* there are states. check and use them */ 85 + struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name); 86 + 87 + if (IS_ERR(s)) { 88 + ret = PTR_ERR(s); 89 + goto err_with_put; 90 + } 91 + ret = pinctrl_select_state(p, s); 92 + if (ret < 0) 93 + goto err_with_put; 76 94 } 77 95 78 96 priv->chan[new_chan].parent_adap = adap;
+2 -2
drivers/i2c/muxes/i2c-mux-pca954x.c
··· 268 268 /* discard unconfigured channels */ 269 269 break; 270 270 idle_disconnect_pd = pdata->modes[num].deselect_on_exit; 271 - data->deselect |= (idle_disconnect_pd 272 - || idle_disconnect_dt) << num; 273 271 } 272 + data->deselect |= (idle_disconnect_pd || 273 + idle_disconnect_dt) << num; 274 274 275 275 ret = i2c_mux_add_adapter(muxc, force, num, class); 276 276
+9 -2
drivers/infiniband/core/addr.c
··· 699 699 struct resolve_cb_context { 700 700 struct rdma_dev_addr *addr; 701 701 struct completion comp; 702 + int status; 702 703 }; 703 704 704 705 static void resolve_cb(int status, struct sockaddr *src_addr, 705 706 struct rdma_dev_addr *addr, void *context) 706 707 { 707 - memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct 708 - rdma_dev_addr)); 708 + if (!status) 709 + memcpy(((struct resolve_cb_context *)context)->addr, 710 + addr, sizeof(struct rdma_dev_addr)); 711 + ((struct resolve_cb_context *)context)->status = status; 709 712 complete(&((struct resolve_cb_context *)context)->comp); 710 713 } 711 714 ··· 745 742 return ret; 746 743 747 744 wait_for_completion(&ctx.comp); 745 + 746 + ret = ctx.status; 747 + if (ret) 748 + return ret; 748 749 749 750 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); 750 751 dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
+110 -16
drivers/infiniband/core/cm.c
··· 80 80 __be32 random_id_operand; 81 81 struct list_head timewait_list; 82 82 struct workqueue_struct *wq; 83 + /* Sync on cm change port state */ 84 + spinlock_t state_lock; 83 85 } cm; 84 86 85 87 /* Counter indexes ordered by attribute ID */ ··· 163 161 struct ib_mad_agent *mad_agent; 164 162 struct kobject port_obj; 165 163 u8 port_num; 164 + struct list_head cm_priv_prim_list; 165 + struct list_head cm_priv_altr_list; 166 166 struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; 167 167 }; 168 168 ··· 245 241 u8 service_timeout; 246 242 u8 target_ack_delay; 247 243 244 + struct list_head prim_list; 245 + struct list_head altr_list; 246 + /* Indicates that the send port mad is registered and av is set */ 247 + int prim_send_port_not_ready; 248 + int altr_send_port_not_ready; 249 + 248 250 struct list_head work_list; 249 251 atomic_t work_count; 250 252 }; ··· 269 259 struct ib_mad_agent *mad_agent; 270 260 struct ib_mad_send_buf *m; 271 261 struct ib_ah *ah; 262 + struct cm_av *av; 263 + unsigned long flags, flags2; 264 + int ret = 0; 272 265 266 + /* don't let the port to be released till the agent is down */ 267 + spin_lock_irqsave(&cm.state_lock, flags2); 268 + spin_lock_irqsave(&cm.lock, flags); 269 + if (!cm_id_priv->prim_send_port_not_ready) 270 + av = &cm_id_priv->av; 271 + else if (!cm_id_priv->altr_send_port_not_ready && 272 + (cm_id_priv->alt_av.port)) 273 + av = &cm_id_priv->alt_av; 274 + else { 275 + pr_info("%s: not valid CM id\n", __func__); 276 + ret = -ENODEV; 277 + spin_unlock_irqrestore(&cm.lock, flags); 278 + goto out; 279 + } 280 + spin_unlock_irqrestore(&cm.lock, flags); 281 + /* Make sure the port haven't released the mad yet */ 273 282 mad_agent = cm_id_priv->av.port->mad_agent; 274 - ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 275 - if (IS_ERR(ah)) 276 - return PTR_ERR(ah); 283 + if (!mad_agent) { 284 + pr_info("%s: not a valid MAD agent\n", __func__); 285 + ret = -ENODEV; 286 + goto out; 287 + } 288 + ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); 289 + if (IS_ERR(ah)) { 290 + ret = PTR_ERR(ah); 291 + goto out; 292 + } 277 293 278 294 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 279 - cm_id_priv->av.pkey_index, 295 + av->pkey_index, 280 296 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 281 297 GFP_ATOMIC, 282 298 IB_MGMT_BASE_VERSION); 283 299 if (IS_ERR(m)) { 284 300 ib_destroy_ah(ah); 285 - return PTR_ERR(m); 301 + ret = PTR_ERR(m); 302 + goto out; 286 303 } 287 304 288 305 /* Timeout set by caller if response is expected. */ ··· 319 282 atomic_inc(&cm_id_priv->refcount); 320 283 m->context[0] = cm_id_priv; 321 284 *msg = m; 322 - return 0; 285 + 286 + out: 287 + spin_unlock_irqrestore(&cm.state_lock, flags2); 288 + return ret; 323 289 } 324 290 325 291 static int cm_alloc_response_msg(struct cm_port *port, ··· 392 352 grh, &av->ah_attr); 393 353 } 394 354 395 - static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 355 + static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, 356 + struct cm_id_private *cm_id_priv) 396 357 { 397 358 struct cm_device *cm_dev; 398 359 struct cm_port *port = NULL; ··· 428 387 &av->ah_attr); 429 388 av->timeout = path->packet_life_time + 1; 430 389 431 - return 0; 390 + spin_lock_irqsave(&cm.lock, flags); 391 + if (&cm_id_priv->av == av) 392 + list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); 393 + else if (&cm_id_priv->alt_av == av) 394 + list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); 395 + else 396 + ret = -EINVAL; 397 + 398 + spin_unlock_irqrestore(&cm.lock, flags); 399 + 400 + return ret; 432 401 } 433 402 434 403 static int cm_alloc_id(struct cm_id_private *cm_id_priv) ··· 728 677 spin_lock_init(&cm_id_priv->lock); 729 678 init_completion(&cm_id_priv->comp); 730 679 INIT_LIST_HEAD(&cm_id_priv->work_list); 680 + INIT_LIST_HEAD(&cm_id_priv->prim_list); 681 + INIT_LIST_HEAD(&cm_id_priv->altr_list); 731 682 atomic_set(&cm_id_priv->work_count, -1); 732 683 atomic_set(&cm_id_priv->refcount, 1); 733 684 return &cm_id_priv->id; ··· 944 891 spin_unlock_irq(&cm_id_priv->lock); 945 892 break; 946 893 } 894 + 895 + spin_lock_irq(&cm.lock); 896 + if (!list_empty(&cm_id_priv->altr_list) && 897 + (!cm_id_priv->altr_send_port_not_ready)) 898 + list_del(&cm_id_priv->altr_list); 899 + if (!list_empty(&cm_id_priv->prim_list) && 900 + (!cm_id_priv->prim_send_port_not_ready)) 901 + list_del(&cm_id_priv->prim_list); 902 + spin_unlock_irq(&cm.lock); 947 903 948 904 cm_free_id(cm_id->local_id); 949 905 cm_deref_id(cm_id_priv); ··· 1254 1192 goto out; 1255 1193 } 1256 1194 1257 - ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 1195 + ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, 1196 + cm_id_priv); 1258 1197 if (ret) 1259 1198 goto error1; 1260 1199 if (param->alternate_path) { 1261 1200 ret = cm_init_av_by_path(param->alternate_path, 1262 - &cm_id_priv->alt_av); 1201 + &cm_id_priv->alt_av, cm_id_priv); 1263 1202 if (ret) 1264 1203 goto error1; 1265 1204 } ··· 1716 1653 dev_put(gid_attr.ndev); 1717 1654 } 1718 1655 work->path[0].gid_type = gid_attr.gid_type; 1719 - ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1656 + ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, 1657 + cm_id_priv); 1720 1658 } 1721 1659 if (ret) { 1722 1660 int err = ib_get_cached_gid(work->port->cm_dev->ib_device, ··· 1736 1672 goto rejected; 1737 1673 } 1738 1674 if (req_msg->alt_local_lid) { 1739 - ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1675 + ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, 1676 + cm_id_priv); 1740 1677 if (ret) { 1741 1678 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1742 1679 &work->path[0].sgid, ··· 2792 2727 goto out; 2793 2728 } 2794 2729 2795 - ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); 2730 + ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, 2731 + cm_id_priv); 2796 2732 if (ret) 2797 2733 goto out; 2798 2734 cm_id_priv->alt_av.timeout = ··· 2905 2839 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2906 2840 work->mad_recv_wc->recv_buf.grh, 2907 2841 &cm_id_priv->av); 2908 - cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); 2842 + cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, 2843 + cm_id_priv); 2909 2844 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2910 2845 if (!ret) 2911 2846 list_add_tail(&work->list, &cm_id_priv->work_list); ··· 3098 3031 return -EINVAL; 3099 3032 3100 3033 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3101 - ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 3034 + ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); 3102 3035 if (ret) 3103 3036 goto out; 3104 3037 ··· 3535 3468 static int cm_migrate(struct ib_cm_id *cm_id) 3536 3469 { 3537 3470 struct cm_id_private *cm_id_priv; 3471 + struct cm_av tmp_av; 3538 3472 unsigned long flags; 3473 + int tmp_send_port_not_ready; 3539 3474 int ret = 0; 3540 3475 3541 3476 cm_id_priv = container_of(cm_id, struct cm_id_private, id); ··· 3546 3477 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3547 3478 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3548 3479 cm_id->lap_state = IB_CM_LAP_IDLE; 3480 + /* Swap address vector */ 3481 + tmp_av = cm_id_priv->av; 3549 3482 cm_id_priv->av = cm_id_priv->alt_av; 3483 + cm_id_priv->alt_av = tmp_av; 3484 + /* Swap port send ready state */ 3485 + tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; 3486 + cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; 3487 + cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; 3550 3488 } else 3551 3489 ret = -EINVAL; 3552 3490 spin_unlock_irqrestore(&cm_id_priv->lock, flags); ··· 3964 3888 port->cm_dev = cm_dev; 3965 3889 port->port_num = i; 3966 3890 3891 + INIT_LIST_HEAD(&port->cm_priv_prim_list); 3892 + INIT_LIST_HEAD(&port->cm_priv_altr_list); 3893 + 3967 3894 ret = cm_create_port_fs(port); 3968 3895 if (ret) 3969 3896 goto error1; ··· 4024 3945 { 4025 3946 struct cm_device *cm_dev = client_data; 4026 3947 struct cm_port *port; 3948 + struct cm_id_private *cm_id_priv; 3949 + struct ib_mad_agent *cur_mad_agent; 4027 3950 struct ib_port_modify port_modify = { 4028 3951 .clr_port_cap_mask = IB_PORT_CM_SUP 4029 3952 }; ··· 4049 3968 4050 3969 port = cm_dev->port[i-1]; 4051 3970 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 3971 + /* Mark all the cm_id's as not valid */ 3972 + spin_lock_irq(&cm.lock); 3973 + list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) 3974 + cm_id_priv->altr_send_port_not_ready = 1; 3975 + list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) 3976 + cm_id_priv->prim_send_port_not_ready = 1; 3977 + spin_unlock_irq(&cm.lock); 4052 3978 /* 4053 3979 * We flush the queue here after the going_down set, this 4054 3980 * verify that no new works will be queued in the recv handler, 4055 3981 * after that we can call the unregister_mad_agent 4056 3982 */ 4057 3983 flush_workqueue(cm.wq); 4058 - ib_unregister_mad_agent(port->mad_agent); 3984 + spin_lock_irq(&cm.state_lock); 3985 + cur_mad_agent = port->mad_agent; 3986 + port->mad_agent = NULL; 3987 + spin_unlock_irq(&cm.state_lock); 3988 + ib_unregister_mad_agent(cur_mad_agent); 4059 3989 cm_remove_port_fs(port); 4060 3990 } 3991 + 4061 3992 device_unregister(cm_dev->device); 4062 3993 kfree(cm_dev); 4063 3994 } ··· 4082 3989 INIT_LIST_HEAD(&cm.device_list); 4083 3990 rwlock_init(&cm.device_lock); 4084 3991 spin_lock_init(&cm.lock); 3992 + spin_lock_init(&cm.state_lock); 4085 3993 cm.listen_service_table = RB_ROOT; 4086 3994 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 4087 3995 cm.remote_id_table = RB_ROOT;
+20 -1
drivers/infiniband/core/cma.c
··· 2438 2438 return 0; 2439 2439 } 2440 2440 2441 + static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 2442 + unsigned long supported_gids, 2443 + enum ib_gid_type default_gid) 2444 + { 2445 + if ((network_type == RDMA_NETWORK_IPV4 || 2446 + network_type == RDMA_NETWORK_IPV6) && 2447 + test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 2448 + return IB_GID_TYPE_ROCE_UDP_ENCAP; 2449 + 2450 + return default_gid; 2451 + } 2452 + 2441 2453 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2442 2454 { 2443 2455 struct rdma_route *route = &id_priv->id.route; ··· 2475 2463 route->num_paths = 1; 2476 2464 2477 2465 if (addr->dev_addr.bound_dev_if) { 2466 + unsigned long supported_gids; 2467 + 2478 2468 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2479 2469 if (!ndev) { 2480 2470 ret = -ENODEV; ··· 2500 2486 2501 2487 route->path_rec->net = &init_net; 2502 2488 route->path_rec->ifindex = ndev->ifindex; 2503 - route->path_rec->gid_type = id_priv->gid_type; 2489 + supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2490 + id_priv->id.port_num); 2491 + route->path_rec->gid_type = 2492 + cma_route_gid_type(addr->dev_addr.network, 2493 + supported_gids, 2494 + id_priv->gid_type); 2504 2495 } 2505 2496 if (!ndev) { 2506 2497 ret = -ENODEV;
+1 -1
drivers/infiniband/core/umem.c
··· 175 175 176 176 cur_base = addr & PAGE_MASK; 177 177 178 - if (npages == 0) { 178 + if (npages == 0 || npages > UINT_MAX) { 179 179 ret = -EINVAL; 180 180 goto out; 181 181 }
+2 -5
drivers/infiniband/core/uverbs_main.c
··· 262 262 container_of(uobj, struct ib_uqp_object, uevent.uobject); 263 263 264 264 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 265 - if (qp != qp->real_qp) { 266 - ib_close_qp(qp); 267 - } else { 265 + if (qp == qp->real_qp) 268 266 ib_uverbs_detach_umcast(qp, uqp); 269 - ib_destroy_qp(qp); 270 - } 267 + ib_destroy_qp(qp); 271 268 ib_uverbs_release_uevent(file, &uqp->uevent); 272 269 kfree(uqp); 273 270 }
+3 -14
drivers/infiniband/hw/cxgb4/cq.c
··· 666 666 return ret; 667 667 } 668 668 669 - static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 670 - { 671 - struct c4iw_mr *mhp; 672 - unsigned long flags; 673 - 674 - spin_lock_irqsave(&rhp->lock, flags); 675 - mhp = get_mhp(rhp, rkey >> 8); 676 - if (mhp) 677 - mhp->attr.state = 0; 678 - spin_unlock_irqrestore(&rhp->lock, flags); 679 - } 680 - 681 669 /* 682 670 * Get one cq entry from c4iw and map it to openib. 683 671 * ··· 721 733 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { 722 734 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); 723 735 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 724 - invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); 736 + c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); 725 737 } 726 738 } else { 727 739 switch (CQE_OPCODE(&cqe)) { ··· 750 762 751 763 /* Invalidate the MR if the fastreg failed */ 752 764 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) 753 - invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); 765 + c4iw_invalidate_mr(qhp->rhp, 766 + CQE_WRID_FR_STAG(&cqe)); 754 767 break; 755 768 default: 756 769 printk(KERN_ERR MOD "Unexpected opcode %d "
+1 -1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 999 999 extern int use_dsgl; 1000 1000 void c4iw_drain_rq(struct ib_qp *qp); 1001 1001 void c4iw_drain_sq(struct ib_qp *qp); 1002 - 1002 + void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 1003 1003 1004 1004 #endif
+12
drivers/infiniband/hw/cxgb4/mem.c
··· 770 770 kfree(mhp); 771 771 return 0; 772 772 } 773 + 774 + void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 775 + { 776 + struct c4iw_mr *mhp; 777 + unsigned long flags; 778 + 779 + spin_lock_irqsave(&rhp->lock, flags); 780 + mhp = get_mhp(rhp, rkey >> 8); 781 + if (mhp) 782 + mhp->attr.state = 0; 783 + spin_unlock_irqrestore(&rhp->lock, flags); 784 + }
+12 -8
drivers/infiniband/hw/cxgb4/qp.c
··· 706 706 return 0; 707 707 } 708 708 709 - static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe, 710 - struct ib_send_wr *wr, u8 *len16) 709 + static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) 711 710 { 712 - struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8); 713 - 714 - mhp->attr.state = 0; 715 711 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 716 712 wqe->inv.r2 = 0; 717 713 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); ··· 793 797 spin_lock_irqsave(&qhp->lock, flag); 794 798 if (t4_wq_in_error(&qhp->wq)) { 795 799 spin_unlock_irqrestore(&qhp->lock, flag); 800 + *bad_wr = wr; 796 801 return -EINVAL; 797 802 } 798 803 num_wrs = t4_sq_avail(&qhp->wq); 799 804 if (num_wrs == 0) { 800 805 spin_unlock_irqrestore(&qhp->lock, flag); 806 + *bad_wr = wr; 801 807 return -ENOMEM; 802 808 } 803 809 while (wr) { ··· 838 840 case IB_WR_RDMA_READ_WITH_INV: 839 841 fw_opcode = FW_RI_RDMA_READ_WR; 840 842 swsqe->opcode = FW_RI_READ_REQ; 841 - if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) 843 + if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { 844 + c4iw_invalidate_mr(qhp->rhp, 845 + wr->sg_list[0].lkey); 842 846 fw_flags = FW_RI_RDMA_READ_INVALIDATE; 843 - else 847 + } else { 844 848 fw_flags = 0; 849 + } 845 850 err = build_rdma_read(wqe, wr, &len16); 846 851 if (err) 847 852 break; ··· 877 876 fw_flags |= FW_RI_LOCAL_FENCE_FLAG; 878 877 fw_opcode = FW_RI_INV_LSTAG_WR; 879 878 swsqe->opcode = FW_RI_LOCAL_INV; 880 - err = build_inv_stag(qhp->rhp, wqe, wr, &len16); 879 + err = build_inv_stag(wqe, wr, &len16); 880 + c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); 881 881 break; 882 882 default: 883 883 PDBG("%s post of type=%d TBD!\n", __func__, ··· 936 934 spin_lock_irqsave(&qhp->lock, flag); 937 935 if (t4_wq_in_error(&qhp->wq)) { 938 936 spin_unlock_irqrestore(&qhp->lock, flag); 937 + *bad_wr = wr; 939 938 return -EINVAL; 940 939 } 941 940 num_wrs = t4_rq_avail(&qhp->wq); 942 941 if (num_wrs == 0) { 943 942 spin_unlock_irqrestore(&qhp->lock, flag); 943 + *bad_wr = wr; 944 944 return -ENOMEM; 945 945 } 946 946 while (wr) {
-72
drivers/infiniband/hw/hfi1/affinity.c
··· 775 775 } 776 776 mutex_unlock(&affinity->lock); 777 777 } 778 - 779 - int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, 780 - size_t count) 781 - { 782 - struct hfi1_affinity_node *entry; 783 - cpumask_var_t mask; 784 - int ret, i; 785 - 786 - mutex_lock(&node_affinity.lock); 787 - entry = node_affinity_lookup(dd->node); 788 - 789 - if (!entry) { 790 - ret = -EINVAL; 791 - goto unlock; 792 - } 793 - 794 - ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 795 - if (!ret) { 796 - ret = -ENOMEM; 797 - goto unlock; 798 - } 799 - 800 - ret = cpulist_parse(buf, mask); 801 - if (ret) 802 - goto out; 803 - 804 - if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) { 805 - dd_dev_warn(dd, "Invalid CPU mask\n"); 806 - ret = -EINVAL; 807 - goto out; 808 - } 809 - 810 - /* reset the SDMA interrupt affinity details */ 811 - init_cpu_mask_set(&entry->def_intr); 812 - cpumask_copy(&entry->def_intr.mask, mask); 813 - 814 - /* Reassign the affinity for each SDMA interrupt. */ 815 - for (i = 0; i < dd->num_msix_entries; i++) { 816 - struct hfi1_msix_entry *msix; 817 - 818 - msix = &dd->msix_entries[i]; 819 - if (msix->type != IRQ_SDMA) 820 - continue; 821 - 822 - ret = get_irq_affinity(dd, msix); 823 - 824 - if (ret) 825 - break; 826 - } 827 - out: 828 - free_cpumask_var(mask); 829 - unlock: 830 - mutex_unlock(&node_affinity.lock); 831 - return ret ? ret : strnlen(buf, PAGE_SIZE); 832 - } 833 - 834 - int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf) 835 - { 836 - struct hfi1_affinity_node *entry; 837 - 838 - mutex_lock(&node_affinity.lock); 839 - entry = node_affinity_lookup(dd->node); 840 - 841 - if (!entry) { 842 - mutex_unlock(&node_affinity.lock); 843 - return -EINVAL; 844 - } 845 - 846 - cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask); 847 - mutex_unlock(&node_affinity.lock); 848 - return strnlen(buf, PAGE_SIZE); 849 - }
-4
drivers/infiniband/hw/hfi1/affinity.h
··· 102 102 /* Release a CPU used by a user process. */ 103 103 void hfi1_put_proc_affinity(int); 104 104 105 - int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf); 106 - int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, 107 - size_t count); 108 - 109 105 struct hfi1_affinity_node { 110 106 int node; 111 107 struct cpu_mask_set def_intr;
+9 -18
drivers/infiniband/hw/hfi1/chip.c
··· 6301 6301 /* leave shared count at zero for both global and VL15 */ 6302 6302 write_global_credit(dd, vau, vl15buf, 0); 6303 6303 6304 - /* We may need some credits for another VL when sending packets 6305 - * with the snoop interface. Dividing it down the middle for VL15 6306 - * and VL0 should suffice. 6307 - */ 6308 - if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) { 6309 - write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1) 6310 - << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6311 - write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1) 6312 - << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT); 6313 - } else { 6314 - write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6315 - << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6316 - } 6304 + write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6305 + << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6317 6306 } 6318 6307 6319 6308 /* ··· 9904 9915 u32 mask = ~((1U << ppd->lmc) - 1); 9905 9916 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); 9906 9917 9907 - if (dd->hfi1_snoop.mode_flag) 9908 - dd_dev_info(dd, "Set lid/lmc while snooping"); 9909 - 9910 9918 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 9911 9919 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); 9912 9920 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) ··· 12098 12112 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12099 12113 } 12100 12114 12101 - #define C_MAX_NAME 13 /* 12 chars + one for /0 */ 12115 + #define C_MAX_NAME 16 /* 15 chars + one for /0 */ 12102 12116 static int init_cntrs(struct hfi1_devdata *dd) 12103 12117 { 12104 12118 int i, rcv_ctxts, j; ··· 14449 14463 * Any error printing is already done by the init code. 14450 14464 * On return, we have the chip mapped. 14451 14465 */ 14452 - ret = hfi1_pcie_ddinit(dd, pdev, ent); 14466 + ret = hfi1_pcie_ddinit(dd, pdev); 14453 14467 if (ret < 0) 14454 14468 goto bail_free; 14455 14469 ··· 14676 14690 ret = init_rcverr(dd); 14677 14691 if (ret) 14678 14692 goto bail_free_cntrs; 14693 + 14694 + init_completion(&dd->user_comp); 14695 + 14696 + /* The user refcount starts with one to inidicate an active device */ 14697 + atomic_set(&dd->user_refcount, 1); 14679 14698 14680 14699 goto bail; 14681 14700
+3
drivers/infiniband/hw/hfi1/chip.h
··· 320 320 /* DC_DC8051_CFG_MODE.GENERAL bits */ 321 321 #define DISABLE_SELF_GUID_CHECK 0x2 322 322 323 + /* Bad L2 frame error code */ 324 + #define BAD_L2_ERR 0x6 325 + 323 326 /* 324 327 * Eager buffer minimum and maximum sizes supported by the hardware. 325 328 * All power-of-two sizes in between are supported as well.
+26 -11
drivers/infiniband/hw/hfi1/driver.c
··· 599 599 dd->rhf_offset; 600 600 struct rvt_qp *qp; 601 601 struct ib_header *hdr; 602 - struct ib_other_headers *ohdr; 603 602 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 604 603 u64 rhf = rhf_to_cpu(rhf_addr); 605 604 u32 etype = rhf_rcv_type(rhf), qpn, bth1; ··· 614 615 if (etype != RHF_RCV_TYPE_IB) 615 616 goto next; 616 617 617 - hdr = hfi1_get_msgheader(dd, rhf_addr); 618 + packet->hdr = hfi1_get_msgheader(dd, rhf_addr); 619 + hdr = packet->hdr; 618 620 619 621 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 620 622 621 - if (lnh == HFI1_LRH_BTH) 622 - ohdr = &hdr->u.oth; 623 - else if (lnh == HFI1_LRH_GRH) 624 - ohdr = &hdr->u.l.oth; 625 - else 623 + if (lnh == HFI1_LRH_BTH) { 624 + packet->ohdr = &hdr->u.oth; 625 + } else if (lnh == HFI1_LRH_GRH) { 626 + packet->ohdr = &hdr->u.l.oth; 627 + packet->rcv_flags |= HFI1_HAS_GRH; 628 + } else { 626 629 goto next; /* just in case */ 630 + } 627 631 628 - bth1 = be32_to_cpu(ohdr->bth[1]); 632 + bth1 = be32_to_cpu(packet->ohdr->bth[1]); 629 633 is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); 630 634 631 635 if (!is_ecn) ··· 648 646 649 647 /* turn off BECN, FECN */ 650 648 bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); 651 - ohdr->bth[1] = cpu_to_be32(bth1); 649 + packet->ohdr->bth[1] = cpu_to_be32(bth1); 652 650 next: 653 651 update_ps_mdata(&mdata, rcd); 654 652 } ··· 1362 1360 1363 1361 int process_receive_bypass(struct hfi1_packet *packet) 1364 1362 { 1363 + struct hfi1_devdata *dd = packet->rcd->dd; 1364 + 1365 1365 if (unlikely(rhf_err_flags(packet->rhf))) 1366 1366 handle_eflags(packet); 1367 1367 1368 - dd_dev_err(packet->rcd->dd, 1368 + dd_dev_err(dd, 1369 1369 "Bypass packets are not supported in normal operation. Dropping\n"); 1370 - incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors); 1370 + incr_cntr64(&dd->sw_rcv_bypass_packet_errors); 1371 + if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) { 1372 + u64 *flits = packet->ebuf; 1373 + 1374 + if (flits && !(packet->rhf & RHF_LEN_ERR)) { 1375 + dd->err_info_rcvport.packet_flit1 = flits[0]; 1376 + dd->err_info_rcvport.packet_flit2 = 1377 + packet->tlen > sizeof(flits[0]) ? flits[1] : 0; 1378 + } 1379 + dd->err_info_rcvport.status_and_code |= 1380 + (OPA_EI_STATUS_SMASK | BAD_L2_ERR); 1381 + } 1371 1382 return RHF_RCV_CONTINUE; 1372 1383 } 1373 1384
+16 -3
drivers/infiniband/hw/hfi1/file_ops.c
··· 172 172 struct hfi1_devdata, 173 173 user_cdev); 174 174 175 + if (!atomic_inc_not_zero(&dd->user_refcount)) 176 + return -ENXIO; 177 + 175 178 /* Just take a ref now. Not all opens result in a context assign */ 176 179 kobject_get(&dd->kobj); 177 180 ··· 186 183 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 187 184 fd->mm = current->mm; 188 185 atomic_inc(&fd->mm->mm_count); 186 + fp->private_data = fd; 187 + } else { 188 + fp->private_data = NULL; 189 + 190 + if (atomic_dec_and_test(&dd->user_refcount)) 191 + complete(&dd->user_comp); 192 + 193 + return -ENOMEM; 189 194 } 190 195 191 - fp->private_data = fd; 192 - 193 - return fd ? 0 : -ENOMEM; 196 + return 0; 194 197 } 195 198 196 199 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, ··· 807 798 done: 808 799 mmdrop(fdata->mm); 809 800 kobject_put(&dd->kobj); 801 + 802 + if (atomic_dec_and_test(&dd->user_refcount)) 803 + complete(&dd->user_comp); 804 + 810 805 kfree(fdata); 811 806 return 0; 812 807 }
+34 -55
drivers/infiniband/hw/hfi1/hfi.h
··· 367 367 u8 etype; 368 368 }; 369 369 370 - /* 371 - * Private data for snoop/capture support. 372 - */ 373 - struct hfi1_snoop_data { 374 - int mode_flag; 375 - struct cdev cdev; 376 - struct device *class_dev; 377 - /* protect snoop data */ 378 - spinlock_t snoop_lock; 379 - struct list_head queue; 380 - wait_queue_head_t waitq; 381 - void *filter_value; 382 - int (*filter_callback)(void *hdr, void *data, void *value); 383 - u64 dcc_cfg; /* saved value of DCC Cfg register */ 384 - }; 385 - 386 - /* snoop mode_flag values */ 387 - #define HFI1_PORT_SNOOP_MODE 1U 388 - #define HFI1_PORT_CAPTURE_MODE 2U 389 - 390 370 struct rvt_sge_state; 391 371 392 372 /* ··· 592 612 /* host link state variables */ 593 613 struct mutex hls_lock; 594 614 u32 host_link_state; 595 - 596 - spinlock_t sdma_alllock ____cacheline_aligned_in_smp; 597 615 598 616 u32 lstate; /* logical link state */ 599 617 ··· 1082 1104 char *portcntrnames; 1083 1105 size_t portcntrnameslen; 1084 1106 1085 - struct hfi1_snoop_data hfi1_snoop; 1086 - 1087 1107 struct err_info_rcvport err_info_rcvport; 1088 1108 struct err_info_constraint err_info_rcv_constraint; 1089 1109 struct err_info_constraint err_info_xmit_constraint; ··· 1117 1141 rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; 1118 1142 1119 1143 /* 1120 - * Handlers for outgoing data so that snoop/capture does not 1121 - * have to have its hooks in the send path 1144 + * Capability to have different send engines simply by changing a 1145 + * pointer value. 1122 1146 */ 1123 1147 send_routine process_pio_send; 1124 1148 send_routine process_dma_send; ··· 1150 1174 spinlock_t aspm_lock; 1151 1175 /* Number of verbs contexts which have disabled ASPM */ 1152 1176 atomic_t aspm_disabled_cnt; 1177 + /* Keeps track of user space clients */ 1178 + atomic_t user_refcount; 1179 + /* Used to wait for outstanding user space clients before dev removal */ 1180 + struct completion user_comp; 1153 1181 1154 1182 struct hfi1_affinity *affinity; 1155 1183 struct rhashtable sdma_rht; ··· 1201 1221 extern u32 hfi1_cpulist_count; 1202 1222 extern unsigned long *hfi1_cpulist; 1203 1223 1204 - extern unsigned int snoop_drop_send; 1205 - extern unsigned int snoop_force_capture; 1206 1224 int hfi1_init(struct hfi1_devdata *, int); 1207 1225 int hfi1_count_units(int *npresentp, int *nupp); 1208 1226 int hfi1_count_active_units(void); ··· 1535 1557 void reset_link_credits(struct hfi1_devdata *dd); 1536 1558 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1537 1559 1538 - int snoop_recv_handler(struct hfi1_packet *packet); 1539 - int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 1540 - u64 pbc); 1541 - int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, 1542 - u64 pbc); 1543 - void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, 1544 - u64 pbc, const void *from, size_t count); 1545 1560 int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); 1546 1561 1547 1562 static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) ··· 1734 1763 1735 1764 int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); 1736 1765 void hfi1_pcie_cleanup(struct pci_dev *); 1737 - int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *, 1738 - const struct pci_device_id *); 1766 + int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); 1739 1767 void hfi1_pcie_ddcleanup(struct hfi1_devdata *); 1740 1768 void hfi1_pcie_flr(struct hfi1_devdata *); 1741 1769 int pcie_speeds(struct hfi1_devdata *); ··· 1769 1799 int kdeth_process_eager(struct hfi1_packet *packet); 1770 1800 int process_receive_invalid(struct hfi1_packet *packet); 1771 1801 1772 - extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8]; 1773 - 1774 1802 void update_sge(struct rvt_sge_state *ss, u32 length); 1775 1803 1776 1804 /* global module parameter variables */ ··· 1795 1827 #define DRIVER_NAME "hfi1" 1796 1828 #define HFI1_USER_MINOR_BASE 0 1797 1829 #define HFI1_TRACE_MINOR 127 1798 - #define HFI1_DIAGPKT_MINOR 128 1799 - #define HFI1_DIAG_MINOR_BASE 129 1800 - #define HFI1_SNOOP_CAPTURE_BASE 200 1801 1830 #define HFI1_NMINORS 255 1802 1831 1803 1832 #define PCI_VENDOR_ID_INTEL 0x8086 ··· 1813 1848 static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, 1814 1849 u16 ctxt_type) 1815 1850 { 1816 - u64 base_sc_integrity = 1851 + u64 base_sc_integrity; 1852 + 1853 + /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 1854 + if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 1855 + return 0; 1856 + 1857 + base_sc_integrity = 1817 1858 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 1818 1859 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK 1819 1860 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK ··· 1834 1863 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 1835 1864 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 1836 1865 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 1837 - | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 1838 1866 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 1839 1867 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; 1840 1868 ··· 1842 1872 else 1843 1873 base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; 1844 1874 1845 - if (is_ax(dd)) 1846 - /* turn off send-side job key checks - A0 */ 1847 - return base_sc_integrity & 1848 - ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1875 + /* turn on send-side job key checks if !A0 */ 1876 + if (!is_ax(dd)) 1877 + base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1878 + 1849 1879 return base_sc_integrity; 1850 1880 } 1851 1881 1852 1882 static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) 1853 1883 { 1854 - u64 base_sdma_integrity = 1884 + u64 base_sdma_integrity; 1885 + 1886 + /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 1887 + if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 1888 + return 0; 1889 + 1890 + base_sdma_integrity = 1855 1891 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 1856 - | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK 1857 1892 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 1858 1893 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 1859 1894 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK ··· 1870 1895 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 1871 1896 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 1872 1897 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 1873 - | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 1874 1898 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 1875 1899 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; 1876 1900 1877 - if (is_ax(dd)) 1878 - /* turn off send-side job key checks - A0 */ 1879 - return base_sdma_integrity & 1880 - ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1901 + if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) 1902 + base_sdma_integrity |= 1903 + SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK; 1904 + 1905 + /* turn on send-side job key checks if !A0 */ 1906 + if (!is_ax(dd)) 1907 + base_sdma_integrity |= 1908 + SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1909 + 1881 1910 return base_sdma_integrity; 1882 1911 } 1883 1912
+64 -40
drivers/infiniband/hw/hfi1/init.c
··· 144 144 struct hfi1_ctxtdata *rcd; 145 145 146 146 ppd = dd->pport + (i % dd->num_pports); 147 + 148 + /* dd->rcd[i] gets assigned inside the callee */ 147 149 rcd = hfi1_create_ctxtdata(ppd, i, dd->node); 148 150 if (!rcd) { 149 151 dd_dev_err(dd, ··· 171 169 if (!rcd->sc) { 172 170 dd_dev_err(dd, 173 171 "Unable to allocate kernel send context, failing\n"); 174 - dd->rcd[rcd->ctxt] = NULL; 175 - hfi1_free_ctxtdata(dd, rcd); 176 172 goto nomem; 177 173 } 178 174 ··· 178 178 if (ret < 0) { 179 179 dd_dev_err(dd, 180 180 "Failed to setup kernel receive context, failing\n"); 181 - sc_free(rcd->sc); 182 - dd->rcd[rcd->ctxt] = NULL; 183 - hfi1_free_ctxtdata(dd, rcd); 184 181 ret = -EFAULT; 185 182 goto bail; 186 183 } ··· 193 196 nomem: 194 197 ret = -ENOMEM; 195 198 bail: 199 + if (dd->rcd) { 200 + for (i = 0; i < dd->num_rcv_contexts; ++i) 201 + hfi1_free_ctxtdata(dd, dd->rcd[i]); 202 + } 196 203 kfree(dd->rcd); 197 204 dd->rcd = NULL; 198 205 return ret; ··· 217 216 dd->num_rcv_contexts - dd->first_user_ctxt) 218 217 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 219 218 (dd->num_rcv_contexts - dd->first_user_ctxt)); 220 - rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); 219 + rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 221 220 if (rcd) { 222 221 u32 rcvtids, max_entries; 223 222 ··· 262 261 } 263 262 rcd->eager_base = base * dd->rcv_entries.group_size; 264 263 265 - /* Validate and initialize Rcv Hdr Q variables */ 266 - if (rcvhdrcnt % HDRQ_INCREMENT) { 267 - dd_dev_err(dd, 268 - "ctxt%u: header queue count %d must be divisible by %lu\n", 269 - rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); 270 - goto bail; 271 - } 272 264 rcd->rcvhdrq_cnt = rcvhdrcnt; 273 265 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 274 266 /* ··· 500 506 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 501 507 502 508 mutex_init(&ppd->hls_lock); 503 - spin_lock_init(&ppd->sdma_alllock); 504 509 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 505 510 506 511 ppd->qsfp_info.ppd = ppd; ··· 1392 1399 hfi1_free_devdata(dd); 1393 1400 } 1394 1401 1402 + static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) 1403 + { 1404 + if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1405 + hfi1_early_err(dev, "Receive header queue count too small\n"); 1406 + return -EINVAL; 1407 + } 1408 + 1409 + if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1410 + hfi1_early_err(dev, 1411 + "Receive header queue count cannot be greater than %u\n", 1412 + HFI1_MAX_HDRQ_EGRBUF_CNT); 1413 + return -EINVAL; 1414 + } 1415 + 1416 + if (thecnt % HDRQ_INCREMENT) { 1417 + hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", 1418 + thecnt, HDRQ_INCREMENT); 1419 + return -EINVAL; 1420 + } 1421 + 1422 + return 0; 1423 + } 1424 + 1395 1425 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1396 1426 { 1397 1427 int ret = 0, j, pidx, initfail; 1398 - struct hfi1_devdata *dd = ERR_PTR(-EINVAL); 1428 + struct hfi1_devdata *dd; 1399 1429 struct hfi1_pportdata *ppd; 1400 1430 1401 1431 /* First, lock the non-writable module parameters */ 1402 1432 HFI1_CAP_LOCK(); 1403 1433 1404 1434 /* Validate some global module parameters */ 1405 - if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1406 - hfi1_early_err(&pdev->dev, "Header queue count too small\n"); 1407 - ret = -EINVAL; 1435 + ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); 1436 + if (ret) 1408 1437 goto bail; 1409 - } 1410 - if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1411 - hfi1_early_err(&pdev->dev, 1412 - "Receive header queue count cannot be greater than %u\n", 1413 - HFI1_MAX_HDRQ_EGRBUF_CNT); 1414 - ret = -EINVAL; 1415 - goto bail; 1416 - } 1438 + 1417 1439 /* use the encoding function as a sanitization check */ 1418 1440 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1419 1441 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", ··· 1469 1461 if (ret) 1470 1462 goto bail; 1471 1463 1472 - /* 1473 - * Do device-specific initialization, function table setup, dd 1474 - * allocation, etc. 1475 - */ 1476 - switch (ent->device) { 1477 - case PCI_DEVICE_ID_INTEL0: 1478 - case PCI_DEVICE_ID_INTEL1: 1479 - dd = hfi1_init_dd(pdev, ent); 1480 - break; 1481 - default: 1464 + if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1465 + ent->device == PCI_DEVICE_ID_INTEL1)) { 1482 1466 hfi1_early_err(&pdev->dev, 1483 1467 "Failing on unknown Intel deviceid 0x%x\n", 1484 1468 ent->device); 1485 1469 ret = -ENODEV; 1470 + goto clean_bail; 1486 1471 } 1487 1472 1488 - if (IS_ERR(dd)) 1473 + /* 1474 + * Do device-specific initialization, function table setup, dd 1475 + * allocation, etc. 1476 + */ 1477 + dd = hfi1_init_dd(pdev, ent); 1478 + 1479 + if (IS_ERR(dd)) { 1489 1480 ret = PTR_ERR(dd); 1490 - if (ret) 1491 1481 goto clean_bail; /* error already printed */ 1482 + } 1492 1483 1493 1484 ret = create_workqueues(dd); 1494 1485 if (ret) ··· 1545 1538 return ret; 1546 1539 } 1547 1540 1541 + static void wait_for_clients(struct hfi1_devdata *dd) 1542 + { 1543 + /* 1544 + * Remove the device init value and complete the device if there is 1545 + * no clients or wait for active clients to finish. 1546 + */ 1547 + if (atomic_dec_and_test(&dd->user_refcount)) 1548 + complete(&dd->user_comp); 1549 + 1550 + wait_for_completion(&dd->user_comp); 1551 + } 1552 + 1548 1553 static void remove_one(struct pci_dev *pdev) 1549 1554 { 1550 1555 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1551 1556 1552 1557 /* close debugfs files before ib unregister */ 1553 1558 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1559 + 1560 + /* remove the /dev hfi1 interface */ 1561 + hfi1_device_remove(dd); 1562 + 1563 + /* wait for existing user space clients to finish */ 1564 + wait_for_clients(dd); 1565 + 1554 1566 /* unregister from IB core */ 1555 1567 hfi1_unregister_ib_device(dd); 1556 1568 ··· 1583 1557 1584 1558 /* wait until all of our (qsfp) queue_work() calls complete */ 1585 1559 flush_workqueue(ib_wq); 1586 - 1587 - hfi1_device_remove(dd); 1588 1560 1589 1561 postinit_cleanup(dd); 1590 1562 }
+1 -2
drivers/infiniband/hw/hfi1/pcie.c
··· 157 157 * fields required to re-initialize after a chip reset, or for 158 158 * various other purposes 159 159 */ 160 - int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, 161 - const struct pci_device_id *ent) 160 + int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) 162 161 { 163 162 unsigned long len; 164 163 resource_size_t addr;
+3 -10
drivers/infiniband/hw/hfi1/pio.c
··· 668 668 void set_pio_integrity(struct send_context *sc) 669 669 { 670 670 struct hfi1_devdata *dd = sc->dd; 671 - u64 reg = 0; 672 671 u32 hw_context = sc->hw_context; 673 672 int type = sc->type; 674 673 675 - /* 676 - * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if 677 - * we're snooping. 678 - */ 679 - if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) && 680 - dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE) 681 - reg = hfi1_pkt_default_send_ctxt_mask(dd, type); 682 - 683 - write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg); 674 + write_kctxt_csr(dd, hw_context, 675 + SC(CHECK_ENABLE), 676 + hfi1_pkt_default_send_ctxt_mask(dd, type)); 684 677 } 685 678 686 679 static u32 get_buffers_allocated(struct send_context *sc)
+1 -1
drivers/infiniband/hw/hfi1/rc.c
··· 89 89 90 90 lockdep_assert_held(&qp->s_lock); 91 91 qp->s_flags |= RVT_S_WAIT_RNR; 92 - qp->s_timer.expires = jiffies + usecs_to_jiffies(to); 92 + priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to); 93 93 add_timer(&priv->s_rnr_timer); 94 94 } 95 95
+2 -17
drivers/infiniband/hw/hfi1/sdma.c
··· 2009 2009 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); 2010 2010 } 2011 2011 2012 - #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 2013 - (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 2014 - 2015 - #define SET_STATIC_RATE_CONTROL_SMASK(r) \ 2016 - (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 2017 2012 /* 2018 2013 * set_sdma_integrity 2019 2014 * ··· 2017 2022 static void set_sdma_integrity(struct sdma_engine *sde) 2018 2023 { 2019 2024 struct hfi1_devdata *dd = sde->dd; 2020 - u64 reg; 2021 2025 2022 - if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY))) 2023 - return; 2024 - 2025 - reg = hfi1_pkt_base_sdma_integrity(dd); 2026 - 2027 - if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) 2028 - CLEAR_STATIC_RATE_CONTROL_SMASK(reg); 2029 - else 2030 - SET_STATIC_RATE_CONTROL_SMASK(reg); 2031 - 2032 - write_sde_csr(sde, SD(CHECK_ENABLE), reg); 2026 + write_sde_csr(sde, SD(CHECK_ENABLE), 2027 + hfi1_pkt_base_sdma_integrity(dd)); 2033 2028 } 2034 2029 2035 2030 static void init_sdma_regs(
-25
drivers/infiniband/hw/hfi1/sysfs.c
··· 49 49 #include "hfi.h" 50 50 #include "mad.h" 51 51 #include "trace.h" 52 - #include "affinity.h" 53 52 54 53 /* 55 54 * Start of per-port congestion control structures and support code ··· 622 623 return ret; 623 624 } 624 625 625 - static ssize_t show_sdma_affinity(struct device *device, 626 - struct device_attribute *attr, char *buf) 627 - { 628 - struct hfi1_ibdev *dev = 629 - container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); 630 - struct hfi1_devdata *dd = dd_from_dev(dev); 631 - 632 - return hfi1_get_sdma_affinity(dd, buf); 633 - } 634 - 635 - static ssize_t store_sdma_affinity(struct device *device, 636 - struct device_attribute *attr, 637 - const char *buf, size_t count) 638 - { 639 - struct hfi1_ibdev *dev = 640 - container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); 641 - struct hfi1_devdata *dd = dd_from_dev(dev); 642 - 643 - return hfi1_set_sdma_affinity(dd, buf, count); 644 - } 645 - 646 626 /* 647 627 * end of per-unit (or driver, in some cases, but replicated 648 628 * per unit) functions ··· 636 658 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 637 659 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); 638 660 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); 639 - static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity, 640 - store_sdma_affinity); 641 661 642 662 static struct device_attribute *hfi1_attributes[] = { 643 663 &dev_attr_hw_rev, ··· 646 670 &dev_attr_boardversion, 647 671 &dev_attr_tempsense, 648 672 &dev_attr_chip_reset, 649 - &dev_attr_sdma_affinity, 650 673 }; 651 674 652 675 int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
-60
drivers/infiniband/hw/hfi1/trace_rx.h
··· 253 253 ) 254 254 ); 255 255 256 - #define SNOOP_PRN \ 257 - "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \ 258 - "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]" 259 - 260 - TRACE_EVENT(snoop_capture, 261 - TP_PROTO(struct hfi1_devdata *dd, 262 - int hdr_len, 263 - struct ib_header *hdr, 264 - int data_len, 265 - void *data), 266 - TP_ARGS(dd, hdr_len, hdr, data_len, data), 267 - TP_STRUCT__entry( 268 - DD_DEV_ENTRY(dd) 269 - __field(u16, slid) 270 - __field(u16, dlid) 271 - __field(u32, qpn) 272 - __field(u8, opcode) 273 - __field(u8, sl) 274 - __field(u16, pkey) 275 - __field(u32, hdr_len) 276 - __field(u32, data_len) 277 - __field(u8, lnh) 278 - __dynamic_array(u8, raw_hdr, hdr_len) 279 - __dynamic_array(u8, raw_pkt, data_len) 280 - ), 281 - TP_fast_assign( 282 - struct ib_other_headers *ohdr; 283 - 284 - __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3); 285 - if (__entry->lnh == HFI1_LRH_BTH) 286 - ohdr = &hdr->u.oth; 287 - else 288 - ohdr = &hdr->u.l.oth; 289 - DD_DEV_ASSIGN(dd); 290 - __entry->slid = be16_to_cpu(hdr->lrh[3]); 291 - __entry->dlid = be16_to_cpu(hdr->lrh[1]); 292 - __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 293 - __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; 294 - __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; 295 - __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff; 296 - __entry->hdr_len = hdr_len; 297 - __entry->data_len = data_len; 298 - memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len); 299 - memcpy(__get_dynamic_array(raw_pkt), data, data_len); 300 - ), 301 - TP_printk( 302 - "[%s] " SNOOP_PRN, 303 - __get_str(dev), 304 - __entry->slid, 305 - __entry->dlid, 306 - __entry->qpn, 307 - __entry->opcode, 308 - show_ib_opcode(__entry->opcode), 309 - __entry->sl, 310 - __entry->pkey, 311 - __entry->hdr_len, 312 - __entry->data_len 313 - ) 314 - ); 315 - 316 256 #endif /* __HFI1_TRACE_RX_H */ 317 257 318 258 #undef TRACE_INCLUDE_PATH
+1 -1
drivers/infiniband/hw/hfi1/user_sdma.c
··· 1144 1144 rb_node = hfi1_mmu_rb_extract(pq->handler, 1145 1145 (unsigned long)iovec->iov.iov_base, 1146 1146 iovec->iov.iov_len); 1147 - if (rb_node && !IS_ERR(rb_node)) 1147 + if (rb_node) 1148 1148 node = container_of(rb_node, struct sdma_mmu_node, rb); 1149 1149 else 1150 1150 rb_node = NULL;
+4 -1
drivers/infiniband/hw/mlx4/ah.c
··· 102 102 if (vlan_tag < 0x1000) 103 103 vlan_tag |= (ah_attr->sl & 7) << 13; 104 104 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 105 - ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); 105 + ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); 106 + if (ret < 0) 107 + return ERR_PTR(ret); 108 + ah->av.eth.gid_index = ret; 106 109 ah->av.eth.vlan = cpu_to_be16(vlan_tag); 107 110 ah->av.eth.hop_limit = ah_attr->grh.hop_limit; 108 111 if (ah_attr->static_rate) {
+4 -1
drivers/infiniband/hw/mlx4/cq.c
··· 253 253 if (context) 254 254 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 255 255 err = -EFAULT; 256 - goto err_dbmap; 256 + goto err_cq_free; 257 257 } 258 258 259 259 return &cq->ibcq; 260 + 261 + err_cq_free: 262 + mlx4_cq_free(dev->dev, &cq->mcq); 260 263 261 264 err_dbmap: 262 265 if (context)
+1 -2
drivers/infiniband/hw/mlx5/cq.c
··· 932 932 if (err) 933 933 goto err_create; 934 934 } else { 935 - /* for now choose 64 bytes till we have a proper interface */ 936 - cqe_size = 64; 935 + cqe_size = cache_line_size() == 128 ? 128 : 64; 937 936 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 938 937 &index, &inlen); 939 938 if (err)
+7 -4
drivers/infiniband/hw/mlx5/main.c
··· 2311 2311 { 2312 2312 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 2313 2313 struct ib_event ibev; 2314 - 2314 + bool fatal = false; 2315 2315 u8 port = 0; 2316 2316 2317 2317 switch (event) { 2318 2318 case MLX5_DEV_EVENT_SYS_ERROR: 2319 - ibdev->ib_active = false; 2320 2319 ibev.event = IB_EVENT_DEVICE_FATAL; 2321 2320 mlx5_ib_handle_internal_error(ibdev); 2321 + fatal = true; 2322 2322 break; 2323 2323 2324 2324 case MLX5_DEV_EVENT_PORT_UP: ··· 2372 2372 2373 2373 if (ibdev->ib_active) 2374 2374 ib_dispatch_event(&ibev); 2375 + 2376 + if (fatal) 2377 + ibdev->ib_active = false; 2375 2378 } 2376 2379 2377 2380 static void get_ext_port_caps(struct mlx5_ib_dev *dev) ··· 3120 3117 } 3121 3118 err = init_node_data(dev); 3122 3119 if (err) 3123 - goto err_dealloc; 3120 + goto err_free_port; 3124 3121 3125 3122 mutex_init(&dev->flow_db.lock); 3126 3123 mutex_init(&dev->cap_mask_mutex); ··· 3130 3127 if (ll == IB_LINK_LAYER_ETHERNET) { 3131 3128 err = mlx5_enable_roce(dev); 3132 3129 if (err) 3133 - goto err_dealloc; 3130 + goto err_free_port; 3134 3131 } 3135 3132 3136 3133 err = create_dev_resources(&dev->devr);
+2
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 626 626 struct mlx5_ib_resources devr; 627 627 struct mlx5_mr_cache cache; 628 628 struct timer_list delay_timer; 629 + /* Prevents soft lock on massive reg MRs */ 630 + struct mutex slow_path_mutex; 629 631 int fill_delay; 630 632 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 631 633 struct ib_odp_caps odp_caps;
+5 -1
drivers/infiniband/hw/mlx5/mr.c
··· 610 610 int err; 611 611 int i; 612 612 613 + mutex_init(&dev->slow_path_mutex); 613 614 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 614 615 if (!cache->wq) { 615 616 mlx5_ib_warn(dev, "failed to create work queue\n"); ··· 1183 1182 goto error; 1184 1183 } 1185 1184 1186 - if (!mr) 1185 + if (!mr) { 1186 + mutex_lock(&dev->slow_path_mutex); 1187 1187 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, 1188 1188 page_shift, access_flags); 1189 + mutex_unlock(&dev->slow_path_mutex); 1190 + } 1189 1191 1190 1192 if (IS_ERR(mr)) { 1191 1193 err = PTR_ERR(mr);
+10 -2
drivers/infiniband/hw/mlx5/qp.c
··· 2051 2051 2052 2052 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", 2053 2053 qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 2054 - to_mcq(init_attr->recv_cq)->mcq.cqn, 2055 - to_mcq(init_attr->send_cq)->mcq.cqn); 2054 + init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, 2055 + init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); 2056 2056 2057 2057 qp->trans_qp.xrcdn = xrcdn; 2058 2058 ··· 4813 4813 !ib_is_udata_cleared(udata, 0, 4814 4814 udata->inlen)) 4815 4815 return ERR_PTR(-EOPNOTSUPP); 4816 + 4817 + if (init_attr->log_ind_tbl_size > 4818 + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { 4819 + mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", 4820 + init_attr->log_ind_tbl_size, 4821 + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); 4822 + return ERR_PTR(-EINVAL); 4823 + } 4816 4824 4817 4825 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 4818 4826 if (udata->outlen && udata->outlen < min_resp_len)
-3
drivers/infiniband/sw/rdmavt/dma.c
··· 90 90 if (WARN_ON(!valid_dma_direction(direction))) 91 91 return BAD_DMA_ADDRESS; 92 92 93 - if (offset + size > PAGE_SIZE) 94 - return BAD_DMA_ADDRESS; 95 - 96 93 addr = (u64)page_address(page); 97 94 if (addr) 98 95 addr += offset;
+2 -6
drivers/infiniband/sw/rxe/rxe_net.c
··· 243 243 { 244 244 int err; 245 245 struct socket *sock; 246 - struct udp_port_cfg udp_cfg; 247 - struct udp_tunnel_sock_cfg tnl_cfg; 248 - 249 - memset(&udp_cfg, 0, sizeof(udp_cfg)); 246 + struct udp_port_cfg udp_cfg = {0}; 247 + struct udp_tunnel_sock_cfg tnl_cfg = {0}; 250 248 251 249 if (ipv6) { 252 250 udp_cfg.family = AF_INET6; ··· 262 264 return ERR_PTR(err); 263 265 } 264 266 265 - tnl_cfg.sk_user_data = NULL; 266 267 tnl_cfg.encap_type = 1; 267 268 tnl_cfg.encap_rcv = rxe_udp_encap_recv; 268 - tnl_cfg.encap_destroy = NULL; 269 269 270 270 /* Setup UDP tunnel */ 271 271 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
+2
drivers/infiniband/sw/rxe/rxe_qp.c
··· 522 522 if (qp->sq.queue) { 523 523 __rxe_do_task(&qp->comp.task); 524 524 __rxe_do_task(&qp->req.task); 525 + rxe_queue_reset(qp->sq.queue); 525 526 } 526 527 527 528 /* cleanup attributes */ ··· 574 573 { 575 574 qp->req.state = QP_STATE_ERROR; 576 575 qp->resp.state = QP_STATE_ERROR; 576 + qp->attr.qp_state = IB_QPS_ERR; 577 577 578 578 /* drain work and packet queues */ 579 579 rxe_run_task(&qp->resp.task, 1);
+9
drivers/infiniband/sw/rxe/rxe_queue.c
··· 84 84 return -EINVAL; 85 85 } 86 86 87 + inline void rxe_queue_reset(struct rxe_queue *q) 88 + { 89 + /* queue is comprised from header and the memory 90 + * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h 91 + * reset only the queue itself and not the management header 92 + */ 93 + memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); 94 + } 95 + 87 96 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, 88 97 int *num_elem, 89 98 unsigned int elem_size)
+2
drivers/infiniband/sw/rxe/rxe_queue.h
··· 84 84 size_t buf_size, 85 85 struct rxe_mmap_info **ip_p); 86 86 87 + void rxe_queue_reset(struct rxe_queue *q); 88 + 87 89 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, 88 90 int *num_elem, 89 91 unsigned int elem_size);
+13 -8
drivers/infiniband/sw/rxe/rxe_req.c
··· 696 696 qp->req.wqe_index); 697 697 wqe->state = wqe_state_done; 698 698 wqe->status = IB_WC_SUCCESS; 699 - goto complete; 699 + __rxe_do_task(&qp->comp.task); 700 + return 0; 700 701 } 701 702 payload = mtu; 702 703 } ··· 746 745 wqe->status = IB_WC_LOC_PROT_ERR; 747 746 wqe->state = wqe_state_error; 748 747 749 - complete: 750 - if (qp_type(qp) != IB_QPT_RC) { 751 - while (rxe_completer(qp) == 0) 752 - ; 753 - } 754 - 755 - return 0; 748 + /* 749 + * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS 750 + * ---------8<---------8<------------- 751 + * ...Note that if a completion error occurs, a Work Completion 752 + * will always be generated, even if the signaling 753 + * indicator requests an Unsignaled Completion. 754 + * ---------8<---------8<------------- 755 + */ 756 + wqe->wr.send_flags |= IB_SEND_SIGNALED; 757 + __rxe_do_task(&qp->comp.task); 758 + return -EAGAIN; 756 759 757 760 exit: 758 761 return -EAGAIN;
+7 -6
drivers/mailbox/pcc.c
··· 65 65 #include <linux/mailbox_controller.h> 66 66 #include <linux/mailbox_client.h> 67 67 #include <linux/io-64-nonatomic-lo-hi.h> 68 + #include <acpi/pcc.h> 68 69 69 70 #include "mailbox.h" 70 71 ··· 268 267 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 269 268 chan->txdone_method |= TXDONE_BY_ACK; 270 269 270 + spin_unlock_irqrestore(&chan->lock, flags); 271 + 271 272 if (pcc_doorbell_irq[subspace_id] > 0) { 272 273 int rc; 273 274 ··· 278 275 if (unlikely(rc)) { 279 276 dev_err(dev, "failed to register PCC interrupt %d\n", 280 277 pcc_doorbell_irq[subspace_id]); 278 + pcc_mbox_free_channel(chan); 281 279 chan = ERR_PTR(rc); 282 280 } 283 281 } 284 - 285 - spin_unlock_irqrestore(&chan->lock, flags); 286 282 287 283 return chan; 288 284 } ··· 306 304 return; 307 305 } 308 306 307 + if (pcc_doorbell_irq[id] > 0) 308 + devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); 309 + 309 310 spin_lock_irqsave(&chan->lock, flags); 310 311 chan->cl = NULL; 311 312 chan->active_req = NULL; 312 313 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) 313 314 chan->txdone_method = TXDONE_BY_POLL; 314 315 315 - if (pcc_doorbell_irq[id] > 0) 316 - devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); 317 - 318 316 spin_unlock_irqrestore(&chan->lock, flags); 319 317 } 320 318 EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); 321 - 322 319 323 320 /** 324 321 * pcc_send_data - Called from Mailbox Controller code. Used
+9 -22
drivers/mfd/intel-lpss-pci.c
··· 123 123 .properties = apl_i2c_properties, 124 124 }; 125 125 126 - static const struct intel_lpss_platform_info kbl_info = { 127 - .clk_rate = 120000000, 128 - }; 129 - 130 - static const struct intel_lpss_platform_info kbl_uart_info = { 131 - .clk_rate = 120000000, 132 - .clk_con_id = "baudclk", 133 - }; 134 - 135 - static const struct intel_lpss_platform_info kbl_i2c_info = { 136 - .clk_rate = 133000000, 137 - }; 138 - 139 126 static const struct pci_device_id intel_lpss_pci_ids[] = { 140 127 /* BXT A-Step */ 141 128 { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, ··· 194 207 { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, 195 208 { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, 196 209 /* KBL-H */ 197 - { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info }, 198 - { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info }, 199 - { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info }, 200 - { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info }, 201 - { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info }, 202 - { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info }, 203 - { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info }, 204 - { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info }, 205 - { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info }, 210 + { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, 211 + { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, 212 + { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info }, 213 + { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info }, 214 + { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info }, 215 + { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info }, 216 + { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info }, 217 + { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info }, 218 + { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, 206 219 { } 207 220 }; 208 221 MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
-3
drivers/mfd/intel-lpss.c
··· 502 502 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) 503 503 lpss->priv_ctx[i] = readl(lpss->priv + i * 4); 504 504 505 - /* Put the device into reset state */ 506 - writel(0, lpss->priv + LPSS_PRIV_RESETS); 507 - 508 505 return 0; 509 506 } 510 507 EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+4 -2
drivers/mfd/intel_soc_pmic_bxtwc.c
··· 86 86 BXTWC_THRM2_IRQ, 87 87 BXTWC_BCU_IRQ, 88 88 BXTWC_ADC_IRQ, 89 + BXTWC_USBC_IRQ, 89 90 BXTWC_CHGR0_IRQ, 90 91 BXTWC_CHGR1_IRQ, 91 92 BXTWC_GPIO0_IRQ, ··· 112 111 REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), 113 112 REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), 114 113 REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), 115 - REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f), 114 + REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)), 115 + REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f), 116 116 REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), 117 117 REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), 118 118 REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), ··· 148 146 }; 149 147 150 148 static struct resource usbc_resources[] = { 151 - DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"), 149 + DEFINE_RES_IRQ(BXTWC_USBC_IRQ), 152 150 }; 153 151 154 152 static struct resource charger_resources[] = {
+2
drivers/mfd/mfd-core.c
··· 399 399 clones[i]); 400 400 } 401 401 402 + put_device(dev); 403 + 402 404 return 0; 403 405 } 404 406 EXPORT_SYMBOL(mfd_clone_cell);
+2
drivers/mfd/stmpe.c
··· 851 851 if (ret < 0) 852 852 return ret; 853 853 854 + msleep(10); 855 + 854 856 timeout = jiffies + msecs_to_jiffies(100); 855 857 while (time_before(jiffies, timeout)) { 856 858 ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
+4 -12
drivers/net/dsa/b53/b53_common.c
··· 962 962 963 963 vl->members |= BIT(port) | BIT(cpu_port); 964 964 if (untagged) 965 - vl->untag |= BIT(port) | BIT(cpu_port); 965 + vl->untag |= BIT(port); 966 966 else 967 - vl->untag &= ~(BIT(port) | BIT(cpu_port)); 967 + vl->untag &= ~BIT(port); 968 + vl->untag &= ~BIT(cpu_port); 968 969 969 970 b53_set_vlan_entry(dev, vid, vl); 970 971 b53_fast_age_vlan(dev, vid); ··· 973 972 974 973 if (pvid) { 975 974 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 976 - vlan->vid_end); 977 - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), 978 975 vlan->vid_end); 979 976 b53_fast_age_vlan(dev, vid); 980 977 } ··· 983 984 { 984 985 struct b53_device *dev = ds->priv; 985 986 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 986 - unsigned int cpu_port = dev->cpu_port; 987 987 struct b53_vlan *vl; 988 988 u16 vid; 989 989 u16 pvid; ··· 995 997 b53_get_vlan_entry(dev, vid, vl); 996 998 997 999 vl->members &= ~BIT(port); 998 - if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) 999 - vl->members = 0; 1000 1000 1001 1001 if (pvid == vid) { 1002 1002 if (is5325(dev) || is5365(dev)) ··· 1003 1007 pvid = 0; 1004 1008 } 1005 1009 1006 - if (untagged) { 1010 + if (untagged) 1007 1011 vl->untag &= ~(BIT(port)); 1008 - if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) 1009 - vl->untag = 0; 1010 - } 1011 1012 1012 1013 b53_set_vlan_entry(dev, vid, vl); 1013 1014 b53_fast_age_vlan(dev, vid); 1014 1015 } 1015 1016 1016 1017 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1017 - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); 1018 1018 b53_fast_age_vlan(dev, pvid); 1019 1019 1020 1020 return 0;
+4 -3
drivers/net/ethernet/arc/emac_main.c
··· 460 460 if (ndev->flags & IFF_ALLMULTI) { 461 461 arc_reg_set(priv, R_LAFL, ~0); 462 462 arc_reg_set(priv, R_LAFH, ~0); 463 - } else { 463 + } else if (ndev->flags & IFF_MULTICAST) { 464 464 struct netdev_hw_addr *ha; 465 465 unsigned int filter[2] = { 0, 0 }; 466 466 int bit; ··· 472 472 473 473 arc_reg_set(priv, R_LAFL, filter[0]); 474 474 arc_reg_set(priv, R_LAFH, filter[1]); 475 + } else { 476 + arc_reg_set(priv, R_LAFL, 0); 477 + arc_reg_set(priv, R_LAFH, 0); 475 478 } 476 479 } 477 480 } ··· 767 764 ndev->netdev_ops = &arc_emac_netdev_ops; 768 765 ndev->ethtool_ops = &arc_emac_ethtool_ops; 769 766 ndev->watchdog_timeo = TX_TIMEOUT; 770 - /* FIXME :: no multicast support yet */ 771 - ndev->flags &= ~IFF_MULTICAST; 772 767 773 768 priv = netdev_priv(ndev); 774 769 priv->dev = dev;
+4
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4929 4929 napi_hash_del(&bnapi->napi); 4930 4930 netif_napi_del(&bnapi->napi); 4931 4931 } 4932 + /* We called napi_hash_del() before netif_napi_del(), we need 4933 + * to respect an RCU grace period before freeing napi structures. 4934 + */ 4935 + synchronize_net(); 4932 4936 } 4933 4937 4934 4938 static void bnxt_init_napi(struct bnxt *bp)
+6
drivers/net/ethernet/cadence/macb.c
··· 2846 2846 lp->skb_length = skb->len; 2847 2847 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 2848 2848 DMA_TO_DEVICE); 2849 + if (dma_mapping_error(NULL, lp->skb_physaddr)) { 2850 + dev_kfree_skb_any(skb); 2851 + dev->stats.tx_dropped++; 2852 + netdev_err(dev, "%s: DMA mapping error\n", __func__); 2853 + return NETDEV_TX_OK; 2854 + } 2849 2855 2850 2856 /* Set address of the data in the Transmit Address register */ 2851 2857 macb_writel(lp, TAR, lp->skb_physaddr);
+35 -27
drivers/net/ethernet/cavium/thunder/nic.h
··· 47 47 48 48 /* Min/Max packet size */ 49 49 #define NIC_HW_MIN_FRS 64 50 - #define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ 50 + #define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */ 51 51 52 52 /* Max pkinds */ 53 53 #define NIC_MAX_PKIND 16 ··· 178 178 179 179 struct nicvf_hw_stats { 180 180 u64 rx_bytes; 181 + u64 rx_frames; 181 182 u64 rx_ucast_frames; 182 183 u64 rx_bcast_frames; 183 184 u64 rx_mcast_frames; 184 - u64 rx_fcs_errors; 185 - u64 rx_l2_errors; 185 + u64 rx_drops; 186 186 u64 rx_drop_red; 187 187 u64 rx_drop_red_bytes; 188 188 u64 rx_drop_overrun; ··· 191 191 u64 rx_drop_mcast; 192 192 u64 rx_drop_l3_bcast; 193 193 u64 rx_drop_l3_mcast; 194 + u64 rx_fcs_errors; 195 + u64 rx_l2_errors; 196 + 197 + u64 tx_bytes; 198 + u64 tx_frames; 199 + u64 tx_ucast_frames; 200 + u64 tx_bcast_frames; 201 + u64 tx_mcast_frames; 202 + u64 tx_drops; 203 + }; 204 + 205 + struct nicvf_drv_stats { 206 + /* CQE Rx errs */ 194 207 u64 rx_bgx_truncated_pkts; 195 208 u64 rx_jabber_errs; 196 209 u64 rx_fcs_errs; ··· 229 216 u64 rx_l4_pclp; 230 217 u64 rx_truncated_pkts; 231 218 232 - u64 tx_bytes_ok; 233 - u64 tx_ucast_frames_ok; 234 - u64 tx_bcast_frames_ok; 235 - u64 tx_mcast_frames_ok; 236 - u64 tx_drops; 237 - }; 219 + /* CQE Tx errs */ 220 + u64 tx_desc_fault; 221 + u64 tx_hdr_cons_err; 222 + u64 tx_subdesc_err; 223 + u64 tx_max_size_exceeded; 224 + u64 tx_imm_size_oflow; 225 + u64 tx_data_seq_err; 226 + u64 tx_mem_seq_err; 227 + u64 tx_lock_viol; 228 + u64 tx_data_fault; 229 + u64 tx_tstmp_conflict; 230 + u64 tx_tstmp_timeout; 231 + u64 tx_mem_fault; 232 + u64 tx_csum_overlap; 233 + u64 tx_csum_overflow; 238 234 239 - struct nicvf_drv_stats { 240 - /* Rx */ 241 - u64 rx_frames_ok; 242 - u64 rx_frames_64; 243 - u64 rx_frames_127; 244 - u64 rx_frames_255; 245 - u64 rx_frames_511; 246 - u64 rx_frames_1023; 247 - u64 rx_frames_1518; 248 - u64 rx_frames_jumbo; 249 - u64 rx_drops; 250 - 235 + /* driver debug stats */ 251 236 u64 rcv_buffer_alloc_failures; 252 - 253 - /* Tx */ 254 - u64 tx_frames_ok; 255 - u64 tx_drops; 256 237 u64 tx_tso; 257 238 u64 tx_timeout; 258 239 u64 txq_stop; 259 240 u64 txq_wake; 241 + 242 + struct u64_stats_sync syncp; 260 243 }; 261 244 262 245 struct nicvf { ··· 291 282 292 283 u8 node; 293 284 u8 cpi_alg; 294 - u16 mtu; 295 285 bool link_up; 296 286 u8 duplex; 297 287 u32 speed; ··· 306 298 307 299 /* Stats */ 308 300 struct nicvf_hw_stats hw_stats; 309 - struct nicvf_drv_stats drv_stats; 301 + struct nicvf_drv_stats __percpu *drv_stats; 310 302 struct bgx_stats bgx_stats; 311 303 312 304 /* MSI-X */
+27 -12
drivers/net/ethernet/cavium/thunder/nic_main.c
··· 11 11 #include <linux/pci.h> 12 12 #include <linux/etherdevice.h> 13 13 #include <linux/of.h> 14 + #include <linux/if_vlan.h> 14 15 15 16 #include "nic_reg.h" 16 17 #include "nic.h" ··· 261 260 /* Update hardware min/max frame size */ 262 261 static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 263 262 { 264 - if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 265 - dev_err(&nic->pdev->dev, 266 - "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", 267 - vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 268 - return 1; 269 - } 270 - new_frs += ETH_HLEN; 271 - if (new_frs <= nic->pkind.maxlen) 272 - return 0; 263 + int bgx, lmac, lmac_cnt; 264 + u64 lmac_credits; 273 265 274 - nic->pkind.maxlen = new_frs; 275 - nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); 266 + if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) 267 + return 1; 268 + 269 + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 270 + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 271 + lmac += bgx * MAX_LMAC_PER_BGX; 272 + 273 + new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4; 274 + 275 + /* Update corresponding LMAC credits */ 276 + lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 277 + lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8)); 278 + lmac_credits &= ~(0xFFFFFULL << 12); 279 + lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12); 280 + nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits); 281 + 282 + /* Enforce MTU in HW 283 + * This config is supported only from 88xx pass 2.0 onwards. 284 + */ 285 + if (!pass1_silicon(nic->pdev)) 286 + nic_reg_write(nic, 287 + NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs); 276 288 return 0; 277 289 } 278 290 ··· 478 464 479 465 /* PKIND configuration */ 480 466 nic->pkind.minlen = 0; 481 - nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; 467 + nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4; 482 468 nic->pkind.lenerr_en = 1; 483 469 nic->pkind.rx_hdr = 0; 484 470 nic->pkind.hdr_sl = 0; ··· 851 837 nic_reg_write(nic, reg_addr, 0); 852 838 } 853 839 } 840 + 854 841 return 0; 855 842 } 856 843
+1
drivers/net/ethernet/cavium/thunder/nic_reg.h
··· 106 106 #define NIC_PF_MPI_0_2047_CFG (0x210000) 107 107 #define NIC_PF_RSSI_0_4097_RQ (0x220000) 108 108 #define NIC_PF_LMAC_0_7_CFG (0x240000) 109 + #define NIC_PF_LMAC_0_7_CFG2 (0x240100) 109 110 #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) 110 111 #define NIC_PF_LMAC_0_7_CREDIT (0x244000) 111 112 #define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+59 -46
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
··· 36 36 37 37 static const struct nicvf_stat nicvf_hw_stats[] = { 38 38 NICVF_HW_STAT(rx_bytes), 39 + NICVF_HW_STAT(rx_frames), 39 40 NICVF_HW_STAT(rx_ucast_frames), 40 41 NICVF_HW_STAT(rx_bcast_frames), 41 42 NICVF_HW_STAT(rx_mcast_frames), 42 - NICVF_HW_STAT(rx_fcs_errors), 43 - NICVF_HW_STAT(rx_l2_errors), 43 + NICVF_HW_STAT(rx_drops), 44 44 NICVF_HW_STAT(rx_drop_red), 45 45 NICVF_HW_STAT(rx_drop_red_bytes), 46 46 NICVF_HW_STAT(rx_drop_overrun), ··· 49 49 NICVF_HW_STAT(rx_drop_mcast), 50 50 NICVF_HW_STAT(rx_drop_l3_bcast), 51 51 NICVF_HW_STAT(rx_drop_l3_mcast), 52 - NICVF_HW_STAT(rx_bgx_truncated_pkts), 53 - NICVF_HW_STAT(rx_jabber_errs), 54 - NICVF_HW_STAT(rx_fcs_errs), 55 - NICVF_HW_STAT(rx_bgx_errs), 56 - NICVF_HW_STAT(rx_prel2_errs), 57 - NICVF_HW_STAT(rx_l2_hdr_malformed), 58 - NICVF_HW_STAT(rx_oversize), 59 - NICVF_HW_STAT(rx_undersize), 60 - NICVF_HW_STAT(rx_l2_len_mismatch), 61 - NICVF_HW_STAT(rx_l2_pclp), 62 - NICVF_HW_STAT(rx_ip_ver_errs), 63 - NICVF_HW_STAT(rx_ip_csum_errs), 64 - NICVF_HW_STAT(rx_ip_hdr_malformed), 65 - NICVF_HW_STAT(rx_ip_payload_malformed), 66 - NICVF_HW_STAT(rx_ip_ttl_errs), 67 - NICVF_HW_STAT(rx_l3_pclp), 68 - NICVF_HW_STAT(rx_l4_malformed), 69 - NICVF_HW_STAT(rx_l4_csum_errs), 70 - NICVF_HW_STAT(rx_udp_len_errs), 71 - NICVF_HW_STAT(rx_l4_port_errs), 72 - NICVF_HW_STAT(rx_tcp_flag_errs), 73 - NICVF_HW_STAT(rx_tcp_offset_errs), 74 - NICVF_HW_STAT(rx_l4_pclp), 75 - NICVF_HW_STAT(rx_truncated_pkts), 76 - NICVF_HW_STAT(tx_bytes_ok), 77 - NICVF_HW_STAT(tx_ucast_frames_ok), 78 - NICVF_HW_STAT(tx_bcast_frames_ok), 79 - NICVF_HW_STAT(tx_mcast_frames_ok), 52 + NICVF_HW_STAT(rx_fcs_errors), 53 + NICVF_HW_STAT(rx_l2_errors), 54 + NICVF_HW_STAT(tx_bytes), 55 + NICVF_HW_STAT(tx_frames), 56 + NICVF_HW_STAT(tx_ucast_frames), 57 + NICVF_HW_STAT(tx_bcast_frames), 58 + NICVF_HW_STAT(tx_mcast_frames), 59 + NICVF_HW_STAT(tx_drops), 80 60 }; 81 61 82 62 static const struct nicvf_stat nicvf_drv_stats[] = { 83 - NICVF_DRV_STAT(rx_frames_ok), 84 - NICVF_DRV_STAT(rx_frames_64), 85 - NICVF_DRV_STAT(rx_frames_127), 86 - NICVF_DRV_STAT(rx_frames_255), 87 - NICVF_DRV_STAT(rx_frames_511), 88 - NICVF_DRV_STAT(rx_frames_1023), 89 - NICVF_DRV_STAT(rx_frames_1518), 90 - NICVF_DRV_STAT(rx_frames_jumbo), 91 - NICVF_DRV_STAT(rx_drops), 63 + NICVF_DRV_STAT(rx_bgx_truncated_pkts), 64 + NICVF_DRV_STAT(rx_jabber_errs), 65 + NICVF_DRV_STAT(rx_fcs_errs), 66 + NICVF_DRV_STAT(rx_bgx_errs), 67 + NICVF_DRV_STAT(rx_prel2_errs), 68 + NICVF_DRV_STAT(rx_l2_hdr_malformed), 69 + NICVF_DRV_STAT(rx_oversize), 70 + NICVF_DRV_STAT(rx_undersize), 71 + NICVF_DRV_STAT(rx_l2_len_mismatch), 72 + NICVF_DRV_STAT(rx_l2_pclp), 73 + NICVF_DRV_STAT(rx_ip_ver_errs), 74 + NICVF_DRV_STAT(rx_ip_csum_errs), 75 + NICVF_DRV_STAT(rx_ip_hdr_malformed), 76 + NICVF_DRV_STAT(rx_ip_payload_malformed), 77 + NICVF_DRV_STAT(rx_ip_ttl_errs), 78 + NICVF_DRV_STAT(rx_l3_pclp), 79 + NICVF_DRV_STAT(rx_l4_malformed), 80 + NICVF_DRV_STAT(rx_l4_csum_errs), 81 + NICVF_DRV_STAT(rx_udp_len_errs), 82 + NICVF_DRV_STAT(rx_l4_port_errs), 83 + NICVF_DRV_STAT(rx_tcp_flag_errs), 84 + NICVF_DRV_STAT(rx_tcp_offset_errs), 85 + NICVF_DRV_STAT(rx_l4_pclp), 86 + NICVF_DRV_STAT(rx_truncated_pkts), 87 + 88 + NICVF_DRV_STAT(tx_desc_fault), 89 + NICVF_DRV_STAT(tx_hdr_cons_err), 90 + NICVF_DRV_STAT(tx_subdesc_err), 91 + NICVF_DRV_STAT(tx_max_size_exceeded), 92 + NICVF_DRV_STAT(tx_imm_size_oflow), 93 + NICVF_DRV_STAT(tx_data_seq_err), 94 + NICVF_DRV_STAT(tx_mem_seq_err), 95 + NICVF_DRV_STAT(tx_lock_viol), 96 + NICVF_DRV_STAT(tx_data_fault), 97 + NICVF_DRV_STAT(tx_tstmp_conflict), 98 + NICVF_DRV_STAT(tx_tstmp_timeout), 99 + NICVF_DRV_STAT(tx_mem_fault), 100 + NICVF_DRV_STAT(tx_csum_overlap), 101 + NICVF_DRV_STAT(tx_csum_overflow), 102 + 92 103 NICVF_DRV_STAT(rcv_buffer_alloc_failures), 93 - NICVF_DRV_STAT(tx_frames_ok), 94 104 NICVF_DRV_STAT(tx_tso), 95 - NICVF_DRV_STAT(tx_drops), 96 105 NICVF_DRV_STAT(tx_timeout), 97 106 NICVF_DRV_STAT(txq_stop), 98 107 NICVF_DRV_STAT(txq_wake), ··· 287 278 struct ethtool_stats *stats, u64 *data) 288 279 { 289 280 struct nicvf *nic = netdev_priv(netdev); 290 - int stat; 291 - int sqs; 281 + int stat, tmp_stats; 282 + int sqs, cpu; 292 283 293 284 nicvf_update_stats(nic); 294 285 ··· 298 289 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 299 290 *(data++) = ((u64 *)&nic->hw_stats) 300 291 [nicvf_hw_stats[stat].index]; 301 - for (stat = 0; stat < nicvf_n_drv_stats; stat++) 302 - *(data++) = ((u64 *)&nic->drv_stats) 303 - [nicvf_drv_stats[stat].index]; 292 + for (stat = 0; stat < nicvf_n_drv_stats; stat++) { 293 + tmp_stats = 0; 294 + for_each_possible_cpu(cpu) 295 + tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu)) 296 + [nicvf_drv_stats[stat].index]; 297 + *(data++) = tmp_stats; 298 + } 304 299 305 300 nicvf_get_qset_stats(nic, stats, &data); 306 301
+83 -75
drivers/net/ethernet/cavium/thunder/nicvf_main.c
··· 69 69 return qidx; 70 70 } 71 71 72 - static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, 73 - struct sk_buff *skb) 74 - { 75 - if (skb->len <= 64) 76 - nic->drv_stats.rx_frames_64++; 77 - else if (skb->len <= 127) 78 - nic->drv_stats.rx_frames_127++; 79 - else if (skb->len <= 255) 80 - nic->drv_stats.rx_frames_255++; 81 - else if (skb->len <= 511) 82 - nic->drv_stats.rx_frames_511++; 83 - else if (skb->len <= 1023) 84 - nic->drv_stats.rx_frames_1023++; 85 - else if (skb->len <= 1518) 86 - nic->drv_stats.rx_frames_1518++; 87 - else 88 - nic->drv_stats.rx_frames_jumbo++; 89 - } 90 - 91 72 /* The Cavium ThunderX network controller can *only* be found in SoCs 92 73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 93 74 * registers on this platform are implicitly strongly ordered with respect ··· 473 492 static int nicvf_init_resources(struct nicvf *nic) 474 493 { 475 494 int err; 476 - union nic_mbx mbx = {}; 477 - 478 - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 479 495 480 496 /* Enable Qset */ 481 497 nicvf_qset_config(nic, true); ··· 485 507 return err; 486 508 } 487 509 488 - /* Send VF config done msg to PF */ 489 - nicvf_write_to_mbx(nic, &mbx); 490 - 491 510 return 0; 492 511 } 493 512 494 513 static void nicvf_snd_pkt_handler(struct net_device *netdev, 495 - struct cmp_queue *cq, 496 514 struct cqe_send_t *cqe_tx, 497 515 int cqe_type, int budget, 498 516 unsigned int *tx_pkts, unsigned int *tx_bytes) ··· 510 536 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 511 537 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 512 538 513 - nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 539 + nicvf_check_cqe_tx_errs(nic, cqe_tx); 514 540 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 515 541 if (skb) { 516 542 /* Check for dummy descriptor used for HW TSO offload on 88xx */ ··· 604 630 return; 605 631 } 606 632 607 - nicvf_set_rx_frame_cnt(nic, skb); 608 - 609 633 nicvf_set_rxhash(netdev, cqe_rx, skb); 610 634 611 635 skb_record_rx_queue(skb, rq_idx); ··· 675 703 work_done++; 676 704 break; 677 705 case CQE_TYPE_SEND: 678 - nicvf_snd_pkt_handler(netdev, cq, 706 + nicvf_snd_pkt_handler(netdev, 679 707 (void *)cq_desc, CQE_TYPE_SEND, 680 708 budget, &tx_pkts, &tx_bytes); 681 709 tx_done++; ··· 712 740 nic = nic->pnicvf; 713 741 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { 714 742 netif_tx_start_queue(txq); 715 - nic->drv_stats.txq_wake++; 743 + this_cpu_inc(nic->drv_stats->txq_wake); 716 744 if (netif_msg_tx_err(nic)) 717 745 netdev_warn(netdev, 718 746 "%s: Transmit queue wakeup SQ%d\n", ··· 1056 1084 1057 1085 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { 1058 1086 netif_tx_stop_queue(txq); 1059 - nic->drv_stats.txq_stop++; 1087 + this_cpu_inc(nic->drv_stats->txq_stop); 1060 1088 if (netif_msg_tx_err(nic)) 1061 1089 netdev_warn(netdev, 1062 1090 "%s: Transmit ring full, stopping SQ%d\n", ··· 1161 1189 return 0; 1162 1190 } 1163 1191 1192 + static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) 1193 + { 1194 + union nic_mbx mbx = {}; 1195 + 1196 + mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; 1197 + mbx.frs.max_frs = mtu; 1198 + mbx.frs.vf_id = nic->vf_id; 1199 + 1200 + return nicvf_send_msg_to_pf(nic, &mbx); 1201 + } 1202 + 1164 1203 int nicvf_open(struct net_device *netdev) 1165 1204 { 1166 - int err, qidx; 1205 + int cpu, err, qidx; 1167 1206 struct nicvf *nic = netdev_priv(netdev); 1168 1207 struct queue_set *qs = nic->qs; 1169 1208 struct nicvf_cq_poll *cq_poll = NULL; 1170 - 1171 - nic->mtu = netdev->mtu; 1209 + union nic_mbx mbx = {}; 1172 1210 1173 1211 netif_carrier_off(netdev); 1174 1212 ··· 1230 1248 if (nic->sqs_mode) 1231 1249 nicvf_get_primary_vf_struct(nic); 1232 1250 1233 - /* Configure receive side scaling */ 1234 - if (!nic->sqs_mode) 1251 + /* Configure receive side scaling and MTU */ 1252 + if (!nic->sqs_mode) { 1235 1253 nicvf_rss_init(nic); 1254 + if (nicvf_update_hw_max_frs(nic, netdev->mtu)) 1255 + goto cleanup; 1256 + 1257 + /* Clear percpu stats */ 1258 + for_each_possible_cpu(cpu) 1259 + memset(per_cpu_ptr(nic->drv_stats, cpu), 0, 1260 + sizeof(struct nicvf_drv_stats)); 1261 + } 1236 1262 1237 1263 err = nicvf_register_interrupts(nic); 1238 1264 if (err) ··· 1266 1276 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1267 1277 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1268 1278 1269 - nic->drv_stats.txq_stop = 0; 1270 - nic->drv_stats.txq_wake = 0; 1279 + /* Send VF config done msg to PF */ 1280 + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1281 + nicvf_write_to_mbx(nic, &mbx); 1271 1282 1272 1283 return 0; 1273 1284 cleanup: ··· 1288 1297 return err; 1289 1298 } 1290 1299 1291 - static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) 1292 - { 1293 - union nic_mbx mbx = {}; 1294 - 1295 - mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; 1296 - mbx.frs.max_frs = mtu; 1297 - mbx.frs.vf_id = nic->vf_id; 1298 - 1299 - return nicvf_send_msg_to_pf(nic, &mbx); 1300 - } 1301 - 1302 1300 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) 1303 1301 { 1304 1302 struct nicvf *nic = netdev_priv(netdev); 1303 + int orig_mtu = netdev->mtu; 1305 1304 1306 - if (nicvf_update_hw_max_frs(nic, new_mtu)) 1307 - return -EINVAL; 1308 1305 netdev->mtu = new_mtu; 1309 - nic->mtu = new_mtu; 1306 + 1307 + if (!netif_running(netdev)) 1308 + return 0; 1309 + 1310 + if (nicvf_update_hw_max_frs(nic, new_mtu)) { 1311 + netdev->mtu = orig_mtu; 1312 + return -EINVAL; 1313 + } 1310 1314 1311 1315 return 0; 1312 1316 } ··· 1359 1373 1360 1374 void nicvf_update_stats(struct nicvf *nic) 1361 1375 { 1362 - int qidx; 1376 + int qidx, cpu; 1377 + u64 tmp_stats = 0; 1363 1378 struct nicvf_hw_stats *stats = &nic->hw_stats; 1364 - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1379 + struct nicvf_drv_stats *drv_stats; 1365 1380 struct queue_set *qs = nic->qs; 1366 1381 1367 1382 #define GET_RX_STATS(reg) \ ··· 1385 1398 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1386 1399 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1387 1400 1388 - stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); 1389 - stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); 1390 - stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); 1391 - stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); 1401 + stats->tx_bytes = GET_TX_STATS(TX_OCTS); 1402 + stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST); 1403 + stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST); 1404 + stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST); 1392 1405 stats->tx_drops = GET_TX_STATS(TX_DROP); 1393 1406 1394 - drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1395 - stats->tx_bcast_frames_ok + 1396 - stats->tx_mcast_frames_ok; 1397 - drv_stats->rx_frames_ok = stats->rx_ucast_frames + 1398 - stats->rx_bcast_frames + 1399 - stats->rx_mcast_frames; 1400 - drv_stats->rx_drops = stats->rx_drop_red + 1401 - stats->rx_drop_overrun; 1402 - drv_stats->tx_drops = stats->tx_drops; 1407 + /* On T88 pass 2.0, the dummy SQE added for TSO notification 1408 + * via CQE has 'dont_send' set. Hence HW drops the pkt pointed 1409 + * pointed by dummy SQE and results in tx_drops counter being 1410 + * incremented. Subtracting it from tx_tso counter will give 1411 + * exact tx_drops counter. 1412 + */ 1413 + if (nic->t88 && nic->hw_tso) { 1414 + for_each_possible_cpu(cpu) { 1415 + drv_stats = per_cpu_ptr(nic->drv_stats, cpu); 1416 + tmp_stats += drv_stats->tx_tso; 1417 + } 1418 + stats->tx_drops = tmp_stats - stats->tx_drops; 1419 + } 1420 + stats->tx_frames = stats->tx_ucast_frames + 1421 + stats->tx_bcast_frames + 1422 + stats->tx_mcast_frames; 1423 + stats->rx_frames = stats->rx_ucast_frames + 1424 + stats->rx_bcast_frames + 1425 + stats->rx_mcast_frames; 1426 + stats->rx_drops = stats->rx_drop_red + 1427 + stats->rx_drop_overrun; 1403 1428 1404 1429 /* Update RQ and SQ stats */ 1405 1430 for (qidx = 0; qidx < qs->rq_cnt; qidx++) ··· 1425 1426 { 1426 1427 struct nicvf *nic = netdev_priv(netdev); 1427 1428 struct nicvf_hw_stats *hw_stats = &nic->hw_stats; 1428 - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1429 1429 1430 1430 nicvf_update_stats(nic); 1431 1431 1432 1432 stats->rx_bytes = hw_stats->rx_bytes; 1433 - stats->rx_packets = drv_stats->rx_frames_ok; 1434 - stats->rx_dropped = drv_stats->rx_drops; 1433 + stats->rx_packets = hw_stats->rx_frames; 1434 + stats->rx_dropped = hw_stats->rx_drops; 1435 1435 stats->multicast = hw_stats->rx_mcast_frames; 1436 1436 1437 - stats->tx_bytes = hw_stats->tx_bytes_ok; 1438 - stats->tx_packets = drv_stats->tx_frames_ok; 1439 - stats->tx_dropped = drv_stats->tx_drops; 1437 + stats->tx_bytes = hw_stats->tx_bytes; 1438 + stats->tx_packets = hw_stats->tx_frames; 1439 + stats->tx_dropped = hw_stats->tx_drops; 1440 1440 1441 1441 return stats; 1442 1442 } ··· 1448 1450 netdev_warn(dev, "%s: Transmit timed out, resetting\n", 1449 1451 dev->name); 1450 1452 1451 - nic->drv_stats.tx_timeout++; 1453 + this_cpu_inc(nic->drv_stats->tx_timeout); 1452 1454 schedule_work(&nic->reset_task); 1453 1455 } 1454 1456 ··· 1582 1584 goto err_free_netdev; 1583 1585 } 1584 1586 1587 + nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats); 1588 + if (!nic->drv_stats) { 1589 + err = -ENOMEM; 1590 + goto err_free_netdev; 1591 + } 1592 + 1585 1593 err = nicvf_set_qset_resources(nic); 1586 1594 if (err) 1587 1595 goto err_free_netdev; ··· 1650 1646 nicvf_unregister_interrupts(nic); 1651 1647 err_free_netdev: 1652 1648 pci_set_drvdata(pdev, NULL); 1649 + if (nic->drv_stats) 1650 + free_percpu(nic->drv_stats); 1653 1651 free_netdev(netdev); 1654 1652 err_release_regions: 1655 1653 pci_release_regions(pdev); ··· 1679 1673 unregister_netdev(pnetdev); 1680 1674 nicvf_unregister_interrupts(nic); 1681 1675 pci_set_drvdata(pdev, NULL); 1676 + if (nic->drv_stats) 1677 + free_percpu(nic->drv_stats); 1682 1678 free_netdev(netdev); 1683 1679 pci_release_regions(pdev); 1684 1680 pci_disable_device(pdev);
+67 -51
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 104 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 105 105 order); 106 106 if (!nic->rb_page) { 107 - nic->drv_stats.rcv_buffer_alloc_failures++; 107 + this_cpu_inc(nic->pnicvf->drv_stats-> 108 + rcv_buffer_alloc_failures); 108 109 return -ENOMEM; 109 110 } 110 111 nic->rb_page_offset = 0; ··· 271 270 rbdr_idx, new_rb); 272 271 next_rbdr: 273 272 /* Re-enable RBDR interrupts only if buffer allocation is success */ 274 - if (!nic->rb_alloc_fail && rbdr->enable) 273 + if (!nic->rb_alloc_fail && rbdr->enable && 274 + netif_running(nic->pnicvf->netdev)) 275 275 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 276 276 277 277 if (rbdr_idx) ··· 363 361 364 362 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 365 363 { 364 + struct sk_buff *skb; 365 + 366 366 if (!sq) 367 367 return; 368 368 if (!sq->dmem.base) ··· 375 371 sq->dmem.q_len * TSO_HEADER_SIZE, 376 372 sq->tso_hdrs, sq->tso_hdrs_phys); 377 373 374 + /* Free pending skbs in the queue */ 375 + smp_rmb(); 376 + while (sq->head != sq->tail) { 377 + skb = (struct sk_buff *)sq->skbuff[sq->head]; 378 + if (skb) 379 + dev_kfree_skb_any(skb); 380 + sq->head++; 381 + sq->head &= (sq->dmem.q_len - 1); 382 + } 378 383 kfree(sq->skbuff); 379 384 nicvf_free_q_desc_mem(nic, &sq->dmem); 380 385 } ··· 496 483 { 497 484 union nic_mbx mbx = {}; 498 485 499 - /* Reset all RXQ's stats */ 486 + /* Reset all RQ/SQ and VF stats */ 500 487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 488 + mbx.reset_stat.rx_stat_mask = 0x3FFF; 489 + mbx.reset_stat.tx_stat_mask = 0x1F; 501 490 mbx.reset_stat.rq_stat_mask = 0xFFFF; 491 + mbx.reset_stat.sq_stat_mask = 0xFFFF; 502 492 nicvf_send_msg_to_pf(nic, &mbx); 503 493 } 504 494 ··· 554 538 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 555 539 nicvf_send_msg_to_pf(nic, &mbx); 556 540 557 - nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 558 - if (!nic->sqs_mode) 541 + if (!nic->sqs_mode && (qidx == 0)) { 542 + /* Enable checking L3/L4 length and TCP/UDP checksums */ 543 + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 544 + (BIT(24) | BIT(23) | BIT(21))); 559 545 nicvf_config_vlan_stripping(nic, nic->netdev->features); 546 + } 560 547 561 548 /* Enable Receive queue */ 562 549 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); ··· 1048 1029 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1049 1030 /* For non-tunneled pkts, point this to L2 ethertype */ 1050 1031 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1051 - nic->drv_stats.tx_tso++; 1032 + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1052 1033 } 1053 1034 } 1054 1035 ··· 1180 1161 1181 1162 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1182 1163 1183 - nic->drv_stats.tx_tso++; 1164 + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1184 1165 return 1; 1185 1166 } 1186 1167 ··· 1441 1422 /* Check for errors in the receive cmp.queue entry */ 1442 1423 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1443 1424 { 1444 - struct nicvf_hw_stats *stats = &nic->hw_stats; 1445 - 1446 1425 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1447 1426 return 0; 1448 1427 ··· 1452 1435 1453 1436 switch (cqe_rx->err_opcode) { 1454 1437 case CQ_RX_ERROP_RE_PARTIAL: 1455 - stats->rx_bgx_truncated_pkts++; 1438 + this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); 1456 1439 break; 1457 1440 case CQ_RX_ERROP_RE_JABBER: 1458 - stats->rx_jabber_errs++; 1441 + this_cpu_inc(nic->drv_stats->rx_jabber_errs); 1459 1442 break; 1460 1443 case CQ_RX_ERROP_RE_FCS: 1461 - stats->rx_fcs_errs++; 1444 + this_cpu_inc(nic->drv_stats->rx_fcs_errs); 1462 1445 break; 1463 1446 case CQ_RX_ERROP_RE_RX_CTL: 1464 - stats->rx_bgx_errs++; 1447 + this_cpu_inc(nic->drv_stats->rx_bgx_errs); 1465 1448 break; 1466 1449 case CQ_RX_ERROP_PREL2_ERR: 1467 - stats->rx_prel2_errs++; 1450 + this_cpu_inc(nic->drv_stats->rx_prel2_errs); 1468 1451 break; 1469 1452 case CQ_RX_ERROP_L2_MAL: 1470 - stats->rx_l2_hdr_malformed++; 1453 + this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); 1471 1454 break; 1472 1455 case CQ_RX_ERROP_L2_OVERSIZE: 1473 - stats->rx_oversize++; 1456 + this_cpu_inc(nic->drv_stats->rx_oversize); 1474 1457 break; 1475 1458 case CQ_RX_ERROP_L2_UNDERSIZE: 1476 - stats->rx_undersize++; 1459 + this_cpu_inc(nic->drv_stats->rx_undersize); 1477 1460 break; 1478 1461 case CQ_RX_ERROP_L2_LENMISM: 1479 - stats->rx_l2_len_mismatch++; 1462 + this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); 1480 1463 break; 1481 1464 case CQ_RX_ERROP_L2_PCLP: 1482 - stats->rx_l2_pclp++; 1465 + this_cpu_inc(nic->drv_stats->rx_l2_pclp); 1483 1466 break; 1484 1467 case CQ_RX_ERROP_IP_NOT: 1485 - stats->rx_ip_ver_errs++; 1468 + this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); 1486 1469 break; 1487 1470 case CQ_RX_ERROP_IP_CSUM_ERR: 1488 - stats->rx_ip_csum_errs++; 1471 + this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); 1489 1472 break; 1490 1473 case CQ_RX_ERROP_IP_MAL: 1491 - stats->rx_ip_hdr_malformed++; 1474 + this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); 1492 1475 break; 1493 1476 case CQ_RX_ERROP_IP_MALD: 1494 - stats->rx_ip_payload_malformed++; 1477 + this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); 1495 1478 break; 1496 1479 case CQ_RX_ERROP_IP_HOP: 1497 - stats->rx_ip_ttl_errs++; 1480 + this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); 1498 1481 break; 1499 1482 case CQ_RX_ERROP_L3_PCLP: 1500 - stats->rx_l3_pclp++; 1483 + this_cpu_inc(nic->drv_stats->rx_l3_pclp); 1501 1484 break; 1502 1485 case CQ_RX_ERROP_L4_MAL: 1503 - stats->rx_l4_malformed++; 1486 + this_cpu_inc(nic->drv_stats->rx_l4_malformed); 1504 1487 break; 1505 1488 case CQ_RX_ERROP_L4_CHK: 1506 - stats->rx_l4_csum_errs++; 1489 + this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); 1507 1490 break; 1508 1491 case CQ_RX_ERROP_UDP_LEN: 1509 - stats->rx_udp_len_errs++; 1492 + this_cpu_inc(nic->drv_stats->rx_udp_len_errs); 1510 1493 break; 1511 1494 case CQ_RX_ERROP_L4_PORT: 1512 - stats->rx_l4_port_errs++; 1495 + this_cpu_inc(nic->drv_stats->rx_l4_port_errs); 1513 1496 break; 1514 1497 case CQ_RX_ERROP_TCP_FLAG: 1515 - stats->rx_tcp_flag_errs++; 1498 + this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); 1516 1499 break; 1517 1500 case CQ_RX_ERROP_TCP_OFFSET: 1518 - stats->rx_tcp_offset_errs++; 1501 + this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); 1519 1502 break; 1520 1503 case CQ_RX_ERROP_L4_PCLP: 1521 - stats->rx_l4_pclp++; 1504 + this_cpu_inc(nic->drv_stats->rx_l4_pclp); 1522 1505 break; 1523 1506 case CQ_RX_ERROP_RBDR_TRUNC: 1524 - stats->rx_truncated_pkts++; 1507 + this_cpu_inc(nic->drv_stats->rx_truncated_pkts); 1525 1508 break; 1526 1509 } 1527 1510 ··· 1529 1512 } 1530 1513 1531 1514 /* Check for errors in the send cmp.queue entry */ 1532 - int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1533 - struct cmp_queue *cq, struct cqe_send_t *cqe_tx) 1515 + int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) 1534 1516 { 1535 - struct cmp_queue_stats *stats = &cq->stats; 1536 - 1537 1517 switch (cqe_tx->send_status) { 1538 1518 case CQ_TX_ERROP_GOOD: 1539 - stats->tx.good++; 1540 1519 return 0; 1541 1520 case CQ_TX_ERROP_DESC_FAULT: 1542 - stats->tx.desc_fault++; 1521 + this_cpu_inc(nic->drv_stats->tx_desc_fault); 1543 1522 break; 1544 1523 case CQ_TX_ERROP_HDR_CONS_ERR: 1545 - stats->tx.hdr_cons_err++; 1524 + this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); 1546 1525 break; 1547 1526 case CQ_TX_ERROP_SUBDC_ERR: 1548 - stats->tx.subdesc_err++; 1527 + this_cpu_inc(nic->drv_stats->tx_subdesc_err); 1528 + break; 1529 + case CQ_TX_ERROP_MAX_SIZE_VIOL: 1530 + this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); 1549 1531 break; 1550 1532 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1551 - stats->tx.imm_size_oflow++; 1533 + this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); 1552 1534 break; 1553 1535 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1554 - stats->tx.data_seq_err++; 1536 + this_cpu_inc(nic->drv_stats->tx_data_seq_err); 1555 1537 break; 1556 1538 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1557 - stats->tx.mem_seq_err++; 1539 + this_cpu_inc(nic->drv_stats->tx_mem_seq_err); 1558 1540 break; 1559 1541 case CQ_TX_ERROP_LOCK_VIOL: 1560 - stats->tx.lock_viol++; 1542 + this_cpu_inc(nic->drv_stats->tx_lock_viol); 1561 1543 break; 1562 1544 case CQ_TX_ERROP_DATA_FAULT: 1563 - stats->tx.data_fault++; 1545 + this_cpu_inc(nic->drv_stats->tx_data_fault); 1564 1546 break; 1565 1547 case CQ_TX_ERROP_TSTMP_CONFLICT: 1566 - stats->tx.tstmp_conflict++; 1548 + this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); 1567 1549 break; 1568 1550 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1569 - stats->tx.tstmp_timeout++; 1551 + this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); 1570 1552 break; 1571 1553 case CQ_TX_ERROP_MEM_FAULT: 1572 - stats->tx.mem_fault++; 1554 + this_cpu_inc(nic->drv_stats->tx_mem_fault); 1573 1555 break; 1574 1556 case CQ_TX_ERROP_CK_OVERLAP: 1575 - stats->tx.csum_overlap++; 1557 + this_cpu_inc(nic->drv_stats->tx_csum_overlap); 1576 1558 break; 1577 1559 case CQ_TX_ERROP_CK_OFLOW: 1578 - stats->tx.csum_overflow++; 1560 + this_cpu_inc(nic->drv_stats->tx_csum_overflow); 1579 1561 break; 1580 1562 } 1581 1563
+2 -22
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
··· 158 158 CQ_TX_ERROP_DESC_FAULT = 0x10, 159 159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 160 160 CQ_TX_ERROP_SUBDC_ERR = 0x12, 161 + CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13, 161 162 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 162 163 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 163 164 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, ··· 171 170 CQ_TX_ERROP_CK_OFLOW = 0x89, 172 171 CQ_TX_ERROP_ENUM_LAST = 0x8a, 173 172 }; 174 - 175 - struct cmp_queue_stats { 176 - struct tx_stats { 177 - u64 good; 178 - u64 desc_fault; 179 - u64 hdr_cons_err; 180 - u64 subdesc_err; 181 - u64 imm_size_oflow; 182 - u64 data_seq_err; 183 - u64 mem_seq_err; 184 - u64 lock_viol; 185 - u64 data_fault; 186 - u64 tstmp_conflict; 187 - u64 tstmp_timeout; 188 - u64 mem_fault; 189 - u64 csum_overlap; 190 - u64 csum_overflow; 191 - } tx; 192 - } ____cacheline_aligned_in_smp; 193 173 194 174 enum RQ_SQ_STATS { 195 175 RQ_SQ_STATS_OCTS, ··· 223 241 spinlock_t lock; /* lock to serialize processing CQEs */ 224 242 void *desc; 225 243 struct q_desc_mem dmem; 226 - struct cmp_queue_stats stats; 227 244 int irq; 228 245 } ____cacheline_aligned_in_smp; 229 246 ··· 317 336 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 318 337 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 319 338 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); 320 - int nicvf_check_cqe_tx_errs(struct nicvf *nic, 321 - struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 339 + int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx); 322 340 #endif /* NICVF_QUEUES_H */
+2 -2
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1242 1242 1243 1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1244 1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1245 - bgx->bgx_id = 1246 - (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1245 + bgx->bgx_id = (pci_resource_start(pdev, 1246 + PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; 1247 1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1248 1248 bgx->max_lmac = MAX_LMAC_PER_BGX; 1249 1249 bgx_vnic[bgx->bgx_id] = bgx;
+2
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
··· 28 28 #define MAX_DMAC_PER_LMAC 8 29 29 #define MAX_FRAME_SIZE 9216 30 30 31 + #define BGX_ID_MASK 0x3 32 + 31 33 #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 32 34 33 35 /* Registers */
-1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2997 2997 rq->cntxt_id, fl_id, 0xffff); 2998 2998 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2999 2999 rq->desc, rq->phys_addr); 3000 - napi_hash_del(&rq->napi); 3001 3000 netif_napi_del(&rq->napi); 3002 3001 rq->netdev = NULL; 3003 3002 rq->cntxt_id = rq->abs_id = 0;
-1
drivers/net/ethernet/emulex/benet/be_main.c
··· 2796 2796 if (eqo->q.created) { 2797 2797 be_eq_clean(eqo); 2798 2798 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2799 - napi_hash_del(&eqo->napi); 2800 2799 netif_napi_del(&eqo->napi); 2801 2800 free_cpumask_var(eqo->affinity_mask); 2802 2801 }
+13
drivers/net/ethernet/marvell/sky2.c
··· 5218 5218 5219 5219 static void sky2_shutdown(struct pci_dev *pdev) 5220 5220 { 5221 + struct sky2_hw *hw = pci_get_drvdata(pdev); 5222 + int port; 5223 + 5224 + for (port = 0; port < hw->ports; port++) { 5225 + struct net_device *ndev = hw->dev[port]; 5226 + 5227 + rtnl_lock(); 5228 + if (netif_running(ndev)) { 5229 + dev_close(ndev); 5230 + netif_device_detach(ndev); 5231 + } 5232 + rtnl_unlock(); 5233 + } 5221 5234 sky2_suspend(&pdev->dev); 5222 5235 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); 5223 5236 pci_set_power_state(pdev, PCI_D3hot);
+1 -1
drivers/net/ethernet/stmicro/stmmac/Kconfig
··· 118 118 config DWMAC_STM32 119 119 tristate "STM32 DWMAC support" 120 120 default ARCH_STM32 121 - depends on OF && HAS_IOMEM 121 + depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST) 122 122 select MFD_SYSCON 123 123 ---help--- 124 124 Support for ethernet controller on STM32 SOCs.
+2 -2
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
··· 63 63 #define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 64 64 #define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 65 65 #define TSE_PCS_SW_RESET_TIMEOUT 100 66 - #define TSE_PCS_USE_SGMII_AN_MASK BIT(2) 67 - #define TSE_PCS_USE_SGMII_ENA BIT(1) 66 + #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) 67 + #define TSE_PCS_USE_SGMII_ENA BIT(0) 68 68 69 69 #define SGMII_ADAPTER_CTRL_REG 0x00 70 70 #define SGMII_ADAPTER_DISABLE 0x0001
+14 -10
drivers/net/ethernet/stmicro/stmmac/common.h
··· 120 120 unsigned long ip_csum_bypassed; 121 121 unsigned long ipv4_pkt_rcvd; 122 122 unsigned long ipv6_pkt_rcvd; 123 - unsigned long rx_msg_type_ext_no_ptp; 124 - unsigned long rx_msg_type_sync; 125 - unsigned long rx_msg_type_follow_up; 126 - unsigned long rx_msg_type_delay_req; 127 - unsigned long rx_msg_type_delay_resp; 128 - unsigned long rx_msg_type_pdelay_req; 129 - unsigned long rx_msg_type_pdelay_resp; 130 - unsigned long rx_msg_type_pdelay_follow_up; 123 + unsigned long no_ptp_rx_msg_type_ext; 124 + unsigned long ptp_rx_msg_type_sync; 125 + unsigned long ptp_rx_msg_type_follow_up; 126 + unsigned long ptp_rx_msg_type_delay_req; 127 + unsigned long ptp_rx_msg_type_delay_resp; 128 + unsigned long ptp_rx_msg_type_pdelay_req; 129 + unsigned long ptp_rx_msg_type_pdelay_resp; 130 + unsigned long ptp_rx_msg_type_pdelay_follow_up; 131 + unsigned long ptp_rx_msg_type_announce; 132 + unsigned long ptp_rx_msg_type_management; 133 + unsigned long ptp_rx_msg_pkt_reserved_type; 131 134 unsigned long ptp_frame_type; 132 135 unsigned long ptp_ver; 133 136 unsigned long timestamp_dropped; ··· 485 482 /* PTP and HW Timer helpers */ 486 483 struct stmmac_hwtimestamp { 487 484 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); 488 - u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); 485 + u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, 486 + int gmac4); 489 487 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); 490 488 int (*config_addend) (void __iomem *ioaddr, u32 addend); 491 489 int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, 492 - int add_sub); 490 + int add_sub, int gmac4); 493 491 u64(*get_systime) (void __iomem *ioaddr); 494 492 }; 495 493
+12 -8
drivers/net/ethernet/stmicro/stmmac/descs.h
··· 155 155 #define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) 156 156 157 157 /* Extended RDES4 message type definitions */ 158 - #define RDES_EXT_NO_PTP 0 159 - #define RDES_EXT_SYNC 1 160 - #define RDES_EXT_FOLLOW_UP 2 161 - #define RDES_EXT_DELAY_REQ 3 162 - #define RDES_EXT_DELAY_RESP 4 163 - #define RDES_EXT_PDELAY_REQ 5 164 - #define RDES_EXT_PDELAY_RESP 6 165 - #define RDES_EXT_PDELAY_FOLLOW_UP 7 158 + #define RDES_EXT_NO_PTP 0x0 159 + #define RDES_EXT_SYNC 0x1 160 + #define RDES_EXT_FOLLOW_UP 0x2 161 + #define RDES_EXT_DELAY_REQ 0x3 162 + #define RDES_EXT_DELAY_RESP 0x4 163 + #define RDES_EXT_PDELAY_REQ 0x5 164 + #define RDES_EXT_PDELAY_RESP 0x6 165 + #define RDES_EXT_PDELAY_FOLLOW_UP 0x7 166 + #define RDES_PTP_ANNOUNCE 0x8 167 + #define RDES_PTP_MANAGEMENT 0x9 168 + #define RDES_PTP_SIGNALING 0xa 169 + #define RDES_PTP_PKT_RESERVED_TYPE 0xf 166 170 167 171 /* Basic descriptor structure for normal and alternate descriptors */ 168 172 struct dma_desc {
+74 -21
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
··· 123 123 x->ipv4_pkt_rcvd++; 124 124 if (rdes1 & RDES1_IPV6_HEADER) 125 125 x->ipv6_pkt_rcvd++; 126 - if (message_type == RDES_EXT_SYNC) 127 - x->rx_msg_type_sync++; 126 + 127 + if (message_type == RDES_EXT_NO_PTP) 128 + x->no_ptp_rx_msg_type_ext++; 129 + else if (message_type == RDES_EXT_SYNC) 130 + x->ptp_rx_msg_type_sync++; 128 131 else if (message_type == RDES_EXT_FOLLOW_UP) 129 - x->rx_msg_type_follow_up++; 132 + x->ptp_rx_msg_type_follow_up++; 130 133 else if (message_type == RDES_EXT_DELAY_REQ) 131 - x->rx_msg_type_delay_req++; 134 + x->ptp_rx_msg_type_delay_req++; 132 135 else if (message_type == RDES_EXT_DELAY_RESP) 133 - x->rx_msg_type_delay_resp++; 136 + x->ptp_rx_msg_type_delay_resp++; 134 137 else if (message_type == RDES_EXT_PDELAY_REQ) 135 - x->rx_msg_type_pdelay_req++; 138 + x->ptp_rx_msg_type_pdelay_req++; 136 139 else if (message_type == RDES_EXT_PDELAY_RESP) 137 - x->rx_msg_type_pdelay_resp++; 140 + x->ptp_rx_msg_type_pdelay_resp++; 138 141 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 139 - x->rx_msg_type_pdelay_follow_up++; 140 - else 141 - x->rx_msg_type_ext_no_ptp++; 142 + x->ptp_rx_msg_type_pdelay_follow_up++; 143 + else if (message_type == RDES_PTP_ANNOUNCE) 144 + x->ptp_rx_msg_type_announce++; 145 + else if (message_type == RDES_PTP_MANAGEMENT) 146 + x->ptp_rx_msg_type_management++; 147 + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) 148 + x->ptp_rx_msg_pkt_reserved_type++; 142 149 143 150 if (rdes1 & RDES1_PTP_PACKET_TYPE) 144 151 x->ptp_frame_type++; ··· 212 205 213 206 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) 214 207 { 215 - return (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) 216 - >> TDES3_TIMESTAMP_STATUS_SHIFT; 208 + /* Context type from W/B descriptor must be zero */ 209 + if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) 210 + return -EINVAL; 211 + 212 + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ 213 + if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) 214 + return 0; 215 + 216 + return 1; 217 217 } 218 218 219 - /* NOTE: For RX CTX bit has to be checked before 220 - * HAVE a specific function for TX and another one for RX 221 - */ 222 - static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) 219 + static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) 223 220 { 224 221 struct dma_desc *p = (struct dma_desc *)desc; 225 222 u64 ns; ··· 235 224 return ns; 236 225 } 237 226 238 - static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) 227 + static int dwmac4_rx_check_timestamp(void *desc) 239 228 { 240 229 struct dma_desc *p = (struct dma_desc *)desc; 230 + u32 own, ctxt; 231 + int ret = 1; 241 232 242 - return (le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE) 243 - >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; 233 + own = p->des3 & RDES3_OWN; 234 + ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) 235 + >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); 236 + 237 + if (likely(!own && ctxt)) { 238 + if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) 239 + /* Corrupted value */ 240 + ret = -EINVAL; 241 + else 242 + /* A valid Timestamp is ready to be read */ 243 + ret = 0; 244 + } 245 + 246 + /* Timestamp not ready */ 247 + return ret; 248 + } 249 + 250 + static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) 251 + { 252 + struct dma_desc *p = (struct dma_desc *)desc; 253 + int ret = -EINVAL; 254 + 255 + /* Get the status from normal w/b descriptor */ 256 + if (likely(p->des3 & TDES3_RS1V)) { 257 + if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { 258 + int i = 0; 259 + 260 + /* Check if timestamp is OK from context descriptor */ 261 + do { 262 + ret = dwmac4_rx_check_timestamp(desc); 263 + if (ret < 0) 264 + goto exit; 265 + i++; 266 + 267 + } while ((ret == 1) || (i < 10)); 268 + 269 + if (i == 10) 270 + ret = -EBUSY; 271 + } 272 + } 273 + exit: 274 + return ret; 244 275 } 245 276 246 277 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, ··· 428 375 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, 429 376 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, 430 377 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, 431 - .get_timestamp = dwmac4_wrback_get_timestamp, 432 - .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, 378 + .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, 379 + .get_timestamp = dwmac4_get_timestamp, 433 380 .set_tx_ic = dwmac4_rd_set_tx_ic, 434 381 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, 435 382 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
+4
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
··· 59 59 #define TDES3_CTXT_TCMSSV BIT(26) 60 60 61 61 /* TDES3 Common */ 62 + #define TDES3_RS1V BIT(26) 63 + #define TDES3_RS1V_SHIFT 26 62 64 #define TDES3_LAST_DESCRIPTOR BIT(28) 63 65 #define TDES3_LAST_DESCRIPTOR_SHIFT 28 64 66 #define TDES3_FIRST_DESCRIPTOR BIT(29) 65 67 #define TDES3_CONTEXT_TYPE BIT(30) 68 + #define TDES3_CONTEXT_TYPE_SHIFT 30 66 69 67 70 /* TDS3 use for both format (read and write back) */ 68 71 #define TDES3_OWN BIT(31) ··· 120 117 #define RDES3_LAST_DESCRIPTOR BIT(28) 121 118 #define RDES3_FIRST_DESCRIPTOR BIT(29) 122 119 #define RDES3_CONTEXT_DESCRIPTOR BIT(30) 120 + #define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30 123 121 124 122 /* RDES3 (read format) */ 125 123 #define RDES3_BUFFER1_VALID_ADDR BIT(24)
+18 -10
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
··· 150 150 x->ipv4_pkt_rcvd++; 151 151 if (rdes4 & ERDES4_IPV6_PKT_RCVD) 152 152 x->ipv6_pkt_rcvd++; 153 - if (message_type == RDES_EXT_SYNC) 154 - x->rx_msg_type_sync++; 153 + 154 + if (message_type == RDES_EXT_NO_PTP) 155 + x->no_ptp_rx_msg_type_ext++; 156 + else if (message_type == RDES_EXT_SYNC) 157 + x->ptp_rx_msg_type_sync++; 155 158 else if (message_type == RDES_EXT_FOLLOW_UP) 156 - x->rx_msg_type_follow_up++; 159 + x->ptp_rx_msg_type_follow_up++; 157 160 else if (message_type == RDES_EXT_DELAY_REQ) 158 - x->rx_msg_type_delay_req++; 161 + x->ptp_rx_msg_type_delay_req++; 159 162 else if (message_type == RDES_EXT_DELAY_RESP) 160 - x->rx_msg_type_delay_resp++; 163 + x->ptp_rx_msg_type_delay_resp++; 161 164 else if (message_type == RDES_EXT_PDELAY_REQ) 162 - x->rx_msg_type_pdelay_req++; 165 + x->ptp_rx_msg_type_pdelay_req++; 163 166 else if (message_type == RDES_EXT_PDELAY_RESP) 164 - x->rx_msg_type_pdelay_resp++; 167 + x->ptp_rx_msg_type_pdelay_resp++; 165 168 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 166 - x->rx_msg_type_pdelay_follow_up++; 167 - else 168 - x->rx_msg_type_ext_no_ptp++; 169 + x->ptp_rx_msg_type_pdelay_follow_up++; 170 + else if (message_type == RDES_PTP_ANNOUNCE) 171 + x->ptp_rx_msg_type_announce++; 172 + else if (message_type == RDES_PTP_MANAGEMENT) 173 + x->ptp_rx_msg_type_management++; 174 + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) 175 + x->ptp_rx_msg_pkt_reserved_type++; 176 + 169 177 if (rdes4 & ERDES4_PTP_FRAME_TYPE) 170 178 x->ptp_frame_type++; 171 179 if (rdes4 & ERDES4_PTP_VER)
+1
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 128 128 int irq_wake; 129 129 spinlock_t ptp_lock; 130 130 void __iomem *mmcaddr; 131 + void __iomem *ptpaddr; 131 132 u32 rx_tail_addr; 132 133 u32 tx_tail_addr; 133 134 u32 mss;
+11 -8
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 115 115 STMMAC_STAT(ip_csum_bypassed), 116 116 STMMAC_STAT(ipv4_pkt_rcvd), 117 117 STMMAC_STAT(ipv6_pkt_rcvd), 118 - STMMAC_STAT(rx_msg_type_ext_no_ptp), 119 - STMMAC_STAT(rx_msg_type_sync), 120 - STMMAC_STAT(rx_msg_type_follow_up), 121 - STMMAC_STAT(rx_msg_type_delay_req), 122 - STMMAC_STAT(rx_msg_type_delay_resp), 123 - STMMAC_STAT(rx_msg_type_pdelay_req), 124 - STMMAC_STAT(rx_msg_type_pdelay_resp), 125 - STMMAC_STAT(rx_msg_type_pdelay_follow_up), 118 + STMMAC_STAT(no_ptp_rx_msg_type_ext), 119 + STMMAC_STAT(ptp_rx_msg_type_sync), 120 + STMMAC_STAT(ptp_rx_msg_type_follow_up), 121 + STMMAC_STAT(ptp_rx_msg_type_delay_req), 122 + STMMAC_STAT(ptp_rx_msg_type_delay_resp), 123 + STMMAC_STAT(ptp_rx_msg_type_pdelay_req), 124 + STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), 125 + STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), 126 + STMMAC_STAT(ptp_rx_msg_type_announce), 127 + STMMAC_STAT(ptp_rx_msg_type_management), 128 + STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), 126 129 STMMAC_STAT(ptp_frame_type), 127 130 STMMAC_STAT(ptp_ver), 128 131 STMMAC_STAT(timestamp_dropped),
+34 -9
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
··· 34 34 } 35 35 36 36 static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, 37 - u32 ptp_clock) 37 + u32 ptp_clock, int gmac4) 38 38 { 39 39 u32 value = readl(ioaddr + PTP_TCR); 40 40 unsigned long data; 41 41 42 - /* Convert the ptp_clock to nano second 43 - * formula = (2/ptp_clock) * 1000000000 44 - * where, ptp_clock = 50MHz. 42 + /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second 43 + * formula = (1/ptp_clock) * 1000000000 44 + * where ptp_clock is 50MHz if fine method is used to update system 45 45 */ 46 - data = (2000000000ULL / ptp_clock); 46 + if (value & PTP_TCR_TSCFUPDT) 47 + data = (1000000000ULL / 50000000); 48 + else 49 + data = (1000000000ULL / ptp_clock); 47 50 48 51 /* 0.465ns accuracy */ 49 52 if (!(value & PTP_TCR_TSCTRLSSR)) 50 53 data = (data * 1000) / 465; 54 + 55 + data &= PTP_SSIR_SSINC_MASK; 56 + 57 + if (gmac4) 58 + data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; 51 59 52 60 writel(data, ioaddr + PTP_SSIR); 53 61 ··· 112 104 } 113 105 114 106 static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, 115 - int add_sub) 107 + int add_sub, int gmac4) 116 108 { 117 109 u32 value; 118 110 int limit; 119 111 112 + if (add_sub) { 113 + /* If the new sec value needs to be subtracted with 114 + * the system time, then MAC_STSUR reg should be 115 + * programmed with (2^32 – <new_sec_value>) 116 + */ 117 + if (gmac4) 118 + sec = (100000000ULL - sec); 119 + 120 + value = readl(ioaddr + PTP_TCR); 121 + if (value & PTP_TCR_TSCTRLSSR) 122 + nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec); 123 + else 124 + nsec = (PTP_BINARY_ROLLOVER_MODE - nsec); 125 + } 126 + 120 127 writel(sec, ioaddr + PTP_STSUR); 121 - writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), 122 - ioaddr + PTP_STNSUR); 128 + value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec; 129 + writel(value, ioaddr + PTP_STNSUR); 130 + 123 131 /* issue command to initialize the system time value */ 124 132 value = readl(ioaddr + PTP_TCR); 125 133 value |= PTP_TCR_TSUPDT; ··· 158 134 { 159 135 u64 ns; 160 136 137 + /* Get the TSSS value */ 161 138 ns = readl(ioaddr + PTP_STNSR); 162 - /* convert sec time value to nanosecond */ 139 + /* Get the TSS and convert sec time value to nanosecond */ 163 140 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; 164 141 165 142 return ns;
+48 -45
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 342 342 343 343 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 344 344 * @priv: driver private structure 345 - * @entry : descriptor index to be used. 345 + * @p : descriptor pointer 346 346 * @skb : the socket buffer 347 347 * Description : 348 348 * This function will read timestamp from the descriptor & pass it to stack. 349 349 * and also perform some sanity checks. 350 350 */ 351 351 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 352 - unsigned int entry, struct sk_buff *skb) 352 + struct dma_desc *p, struct sk_buff *skb) 353 353 { 354 354 struct skb_shared_hwtstamps shhwtstamp; 355 355 u64 ns; 356 - void *desc = NULL; 357 356 358 357 if (!priv->hwts_tx_en) 359 358 return; ··· 361 362 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 362 363 return; 363 364 364 - if (priv->adv_ts) 365 - desc = (priv->dma_etx + entry); 366 - else 367 - desc = (priv->dma_tx + entry); 368 - 369 365 /* check tx tstamp status */ 370 - if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) 371 - return; 366 + if (!priv->hw->desc->get_tx_timestamp_status(p)) { 367 + /* get the valid tstamp */ 368 + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 372 369 373 - /* get the valid tstamp */ 374 - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 370 + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 371 + shhwtstamp.hwtstamp = ns_to_ktime(ns); 375 372 376 - memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 377 - shhwtstamp.hwtstamp = ns_to_ktime(ns); 378 - /* pass tstamp to stack */ 379 - skb_tstamp_tx(skb, &shhwtstamp); 373 + netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); 374 + /* pass tstamp to stack */ 375 + skb_tstamp_tx(skb, &shhwtstamp); 376 + } 380 377 381 378 return; 382 379 } 383 380 384 381 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 385 382 * @priv: driver private structure 386 - * @entry : descriptor index to be used. 383 + * @p : descriptor pointer 384 + * @np : next descriptor pointer 387 385 * @skb : the socket buffer 388 386 * Description : 389 387 * This function will read received packet's timestamp from the descriptor 390 388 * and pass it to stack. It also perform some sanity checks. 391 389 */ 392 - static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, 393 - unsigned int entry, struct sk_buff *skb) 390 + static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 391 + struct dma_desc *np, struct sk_buff *skb) 394 392 { 395 393 struct skb_shared_hwtstamps *shhwtstamp = NULL; 396 394 u64 ns; 397 - void *desc = NULL; 398 395 399 396 if (!priv->hwts_rx_en) 400 397 return; 401 398 402 - if (priv->adv_ts) 403 - desc = (priv->dma_erx + entry); 404 - else 405 - desc = (priv->dma_rx + entry); 399 + /* Check if timestamp is available */ 400 + if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 401 + /* For GMAC4, the valid timestamp is from CTX next desc. */ 402 + if (priv->plat->has_gmac4) 403 + ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); 404 + else 405 + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 406 406 407 - /* exit if rx tstamp is not valid */ 408 - if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) 409 - return; 410 - 411 - /* get valid tstamp */ 412 - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 413 - shhwtstamp = skb_hwtstamps(skb); 414 - memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 415 - shhwtstamp->hwtstamp = ns_to_ktime(ns); 407 + netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); 408 + shhwtstamp = skb_hwtstamps(skb); 409 + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 410 + shhwtstamp->hwtstamp = ns_to_ktime(ns); 411 + } else { 412 + netdev_err(priv->dev, "cannot get RX hw timestamp\n"); 413 + } 416 414 } 417 415 418 416 /** ··· 596 600 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 597 601 598 602 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 599 - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); 603 + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); 600 604 else { 601 605 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 602 606 tstamp_all | ptp_v2 | ptp_over_ethernet | 603 607 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 604 608 ts_master_en | snap_type_sel); 605 - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); 609 + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); 606 610 607 611 /* program Sub Second Increment reg */ 608 612 sec_inc = priv->hw->ptp->config_sub_second_increment( 609 - priv->ioaddr, priv->clk_ptp_rate); 613 + priv->ptpaddr, priv->clk_ptp_rate, 614 + priv->plat->has_gmac4); 610 615 temp = div_u64(1000000000ULL, sec_inc); 611 616 612 617 /* calculate default added value: ··· 617 620 */ 618 621 temp = (u64)(temp << 32); 619 622 priv->default_addend = div_u64(temp, priv->clk_ptp_rate); 620 - priv->hw->ptp->config_addend(priv->ioaddr, 623 + priv->hw->ptp->config_addend(priv->ptpaddr, 621 624 priv->default_addend); 622 625 623 626 /* initialize system time */ 624 627 ktime_get_real_ts64(&now); 625 628 626 629 /* lower 32 bits of tv_sec are safe until y2106 */ 627 - priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, 630 + priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, 628 631 now.tv_nsec); 629 632 } 630 633 ··· 1338 1341 priv->dev->stats.tx_packets++; 1339 1342 priv->xstats.tx_pkt_n++; 1340 1343 } 1341 - stmmac_get_tx_hwtstamp(priv, entry, skb); 1344 + stmmac_get_tx_hwtstamp(priv, p, skb); 1342 1345 } 1343 1346 1344 1347 if (likely(priv->tx_skbuff_dma[entry].buf)) { ··· 1484 1487 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1485 1488 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1486 1489 1487 - if (priv->synopsys_id >= DWMAC_CORE_4_00) 1490 + if (priv->synopsys_id >= DWMAC_CORE_4_00) { 1491 + priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; 1488 1492 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; 1489 - else 1493 + } else { 1494 + priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; 1490 1495 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; 1496 + } 1491 1497 1492 1498 dwmac_mmc_intr_all_mask(priv->mmcaddr); 1493 1499 ··· 2504 2504 while (count < limit) { 2505 2505 int status; 2506 2506 struct dma_desc *p; 2507 + struct dma_desc *np; 2507 2508 2508 2509 if (priv->extend_desc) 2509 2510 p = (struct dma_desc *)(priv->dma_erx + entry); ··· 2524 2523 next_entry = priv->cur_rx; 2525 2524 2526 2525 if (priv->extend_desc) 2527 - prefetch(priv->dma_erx + next_entry); 2526 + np = (struct dma_desc *)(priv->dma_erx + next_entry); 2528 2527 else 2529 - prefetch(priv->dma_rx + next_entry); 2528 + np = priv->dma_rx + next_entry; 2529 + 2530 + prefetch(np); 2530 2531 2531 2532 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 2532 2533 priv->hw->desc->rx_extended_status(&priv->dev->stats, ··· 2638 2635 DMA_FROM_DEVICE); 2639 2636 } 2640 2637 2641 - stmmac_get_rx_hwtstamp(priv, entry, skb); 2642 - 2643 2638 if (netif_msg_pktdata(priv)) { 2644 2639 netdev_dbg(priv->dev, "frame received (%dbytes)", 2645 2640 frame_len); 2646 2641 print_pkt(skb->data, frame_len); 2647 2642 } 2643 + 2644 + stmmac_get_rx_hwtstamp(priv, p, np, skb); 2648 2645 2649 2646 stmmac_rx_vlan(priv->dev, skb); 2650 2647
+5 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
··· 54 54 55 55 spin_lock_irqsave(&priv->ptp_lock, flags); 56 56 57 - priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 + priv->hw->ptp->config_addend(priv->ptpaddr, addend); 58 58 59 59 spin_unlock_irqrestore(&priv->ptp_lock, flags); 60 60 ··· 89 89 90 90 spin_lock_irqsave(&priv->ptp_lock, flags); 91 91 92 - priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 + priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj, 93 + priv->plat->has_gmac4); 93 94 94 95 spin_unlock_irqrestore(&priv->ptp_lock, flags); 95 96 ··· 115 114 116 115 spin_lock_irqsave(&priv->ptp_lock, flags); 117 116 118 - ns = priv->hw->ptp->get_systime(priv->ioaddr); 117 + ns = priv->hw->ptp->get_systime(priv->ptpaddr); 119 118 120 119 spin_unlock_irqrestore(&priv->ptp_lock, flags); 121 120 ··· 142 141 143 142 spin_lock_irqsave(&priv->ptp_lock, flags); 144 143 145 - priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); 144 + priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec); 146 145 147 146 spin_unlock_irqrestore(&priv->ptp_lock, flags); 148 147
+37 -35
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
··· 22 22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> 23 23 ******************************************************************************/ 24 24 25 - #ifndef __STMMAC_PTP_H__ 26 - #define __STMMAC_PTP_H__ 25 + #ifndef __STMMAC_PTP_H__ 26 + #define __STMMAC_PTP_H__ 27 + 28 + #define PTP_GMAC4_OFFSET 0xb00 29 + #define PTP_GMAC3_X_OFFSET 0x700 27 30 28 31 /* IEEE 1588 PTP register offsets */ 29 - #define PTP_TCR 0x0700 /* Timestamp Control Reg */ 30 - #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ 31 - #define PTP_STSR 0x0708 /* System Time – Seconds Regr */ 32 - #define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */ 33 - #define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */ 34 - #define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */ 35 - #define PTP_TAR 0x0718 /* Timestamp Addend Reg */ 36 - #define PTP_TTSR 0x071C /* Target Time Seconds Reg */ 37 - #define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */ 38 - #define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */ 39 - #define PTP_TSR 0x0728 /* Timestamp Status */ 32 + #define PTP_TCR 0x00 /* Timestamp Control Reg */ 33 + #define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ 34 + #define PTP_STSR 0x08 /* System Time – Seconds Regr */ 35 + #define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ 36 + #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ 37 + #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ 38 + #define PTP_TAR 0x18 /* Timestamp Addend Reg */ 40 39 41 - #define PTP_STNSUR_ADDSUB_SHIFT 31 40 + #define PTP_STNSUR_ADDSUB_SHIFT 31 41 + #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ 42 + #define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ 42 43 43 - /* PTP TCR defines */ 44 - #define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ 45 - #define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ 46 - #define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */ 47 - #define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */ 48 - /* Timestamp Interrupt Trigger Enable */ 49 - #define PTP_TCR_TSTRIG 0x00000010 50 - #define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */ 51 - #define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */ 52 - /* Timestamp Digital or Binary Rollover Control */ 53 - #define PTP_TCR_TSCTRLSSR 0x00000200 54 - 44 + /* PTP Timestamp control register defines */ 45 + #define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */ 46 + #define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ 47 + #define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ 48 + #define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ 49 + #define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ 50 + #define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ 51 + #define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ 52 + #define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ 55 53 /* Enable PTP packet Processing for Version 2 Format */ 56 - #define PTP_TCR_TSVER2ENA 0x00000400 54 + #define PTP_TCR_TSVER2ENA BIT(10) 57 55 /* Enable Processing of PTP over Ethernet Frames */ 58 - #define PTP_TCR_TSIPENA 0x00000800 56 + #define PTP_TCR_TSIPENA BIT(11) 59 57 /* Enable Processing of PTP Frames Sent over IPv6-UDP */ 60 - #define PTP_TCR_TSIPV6ENA 0x00001000 58 + #define PTP_TCR_TSIPV6ENA BIT(12) 61 59 /* Enable Processing of PTP Frames Sent over IPv4-UDP */ 62 - #define PTP_TCR_TSIPV4ENA 0x00002000 60 + #define PTP_TCR_TSIPV4ENA BIT(13) 63 61 /* Enable Timestamp Snapshot for Event Messages */ 64 - #define PTP_TCR_TSEVNTENA 0x00004000 62 + #define PTP_TCR_TSEVNTENA BIT(14) 65 63 /* Enable Snapshot for Messages Relevant to Master */ 66 - #define PTP_TCR_TSMSTRENA 0x00008000 64 + #define PTP_TCR_TSMSTRENA BIT(15) 67 65 /* Select PTP packets for Taking Snapshots */ 68 - #define PTP_TCR_SNAPTYPSEL_1 0x00010000 66 + #define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) 69 67 /* Enable MAC address for PTP Frame Filtering */ 70 - #define PTP_TCR_TSENMACADDR 0x00040000 68 + #define PTP_TCR_TSENMACADDR BIT(18) 71 69 72 - #endif /* __STMMAC_PTP_H__ */ 70 + /* SSIR defines */ 71 + #define PTP_SSIR_SSINC_MASK 0xff 72 + #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 73 + 74 + #endif /* __STMMAC_PTP_H__ */
+3 -2
drivers/net/ethernet/sun/sunbmac.c
··· 623 623 void __iomem *gregs = bp->gregs; 624 624 void __iomem *cregs = bp->creg; 625 625 void __iomem *bregs = bp->bregs; 626 + __u32 bblk_dvma = (__u32)bp->bblock_dvma; 626 627 unsigned char *e = &bp->dev->dev_addr[0]; 627 628 628 629 /* Latch current counters into statistics. */ ··· 672 671 bregs + BMAC_XIFCFG); 673 672 674 673 /* Tell the QEC where the ring descriptors are. */ 675 - sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), 674 + sbus_writel(bblk_dvma + bib_offset(be_rxd, 0), 676 675 cregs + CREG_RXDS); 677 - sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), 676 + sbus_writel(bblk_dvma + bib_offset(be_txd, 0), 678 677 cregs + CREG_TXDS); 679 678 680 679 /* Setup the FIFO pointers into QEC local memory. */
+1 -1
drivers/net/ethernet/sun/sunbmac.h
··· 291 291 void __iomem *bregs; /* BigMAC Registers */ 292 292 void __iomem *tregs; /* BigMAC Transceiver */ 293 293 struct bmac_init_block *bmac_block; /* RX and TX descriptors */ 294 - __u32 bblock_dvma; /* RX and TX descriptors */ 294 + dma_addr_t bblock_dvma; /* RX and TX descriptors */ 295 295 296 296 spinlock_t lock; 297 297
+6 -5
drivers/net/ethernet/sun/sunqe.c
··· 124 124 { 125 125 struct qe_init_block *qb = qep->qe_block; 126 126 struct sunqe_buffers *qbufs = qep->buffers; 127 - __u32 qbufs_dvma = qep->buffers_dvma; 127 + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; 128 128 int i; 129 129 130 130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; ··· 144 144 void __iomem *mregs = qep->mregs; 145 145 void __iomem *gregs = qecp->gregs; 146 146 unsigned char *e = &qep->dev->dev_addr[0]; 147 + __u32 qblk_dvma = (__u32)qep->qblock_dvma; 147 148 u32 tmp; 148 149 int i; 149 150 ··· 153 152 return -EAGAIN; 154 153 155 154 /* Setup initial rx/tx init block pointers. */ 156 - sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 157 - sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 155 + sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 156 + sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 158 157 159 158 /* Enable/mask the various irq's. */ 160 159 sbus_writel(0, cregs + CREG_RIMASK); ··· 414 413 struct net_device *dev = qep->dev; 415 414 struct qe_rxd *this; 416 415 struct sunqe_buffers *qbufs = qep->buffers; 417 - __u32 qbufs_dvma = qep->buffers_dvma; 416 + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; 418 417 int elem = qep->rx_new; 419 418 u32 flags; 420 419 ··· 573 572 { 574 573 struct sunqe *qep = netdev_priv(dev); 575 574 struct sunqe_buffers *qbufs = qep->buffers; 576 - __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; 575 + __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; 577 576 unsigned char *txbuf; 578 577 int len, entry; 579 578
+2 -2
drivers/net/ethernet/sun/sunqe.h
··· 334 334 void __iomem *qcregs; /* QEC per-channel Registers */ 335 335 void __iomem *mregs; /* Per-channel MACE Registers */ 336 336 struct qe_init_block *qe_block; /* RX and TX descriptors */ 337 - __u32 qblock_dvma; /* RX and TX descriptors */ 337 + dma_addr_t qblock_dvma; /* RX and TX descriptors */ 338 338 spinlock_t lock; /* Protects txfull state */ 339 339 int rx_new, rx_old; /* RX ring extents */ 340 340 int tx_new, tx_old; /* TX ring extents */ 341 341 struct sunqe_buffers *buffers; /* CPU visible address. */ 342 - __u32 buffers_dvma; /* DVMA visible address. */ 342 + dma_addr_t buffers_dvma; /* DVMA visible address. */ 343 343 struct sunqec *parent; 344 344 u8 mconfig; /* Base MACE mconfig value */ 345 345 struct platform_device *op; /* QE's OF device struct */
+74 -21
drivers/net/ethernet/ti/cpsw.c
··· 2373 2373 * to the PHY is the Ethernet MAC DT node. 2374 2374 */ 2375 2375 ret = of_phy_register_fixed_link(slave_node); 2376 - if (ret) 2376 + if (ret) { 2377 + if (ret != -EPROBE_DEFER) 2378 + dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret); 2377 2379 return ret; 2380 + } 2378 2381 slave_data->phy_node = of_node_get(slave_node); 2379 2382 } else if (parp) { 2380 2383 u32 phyid; ··· 2398 2395 } 2399 2396 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2400 2397 PHY_ID_FMT, mdio->name, phyid); 2398 + put_device(&mdio->dev); 2401 2399 } else { 2402 2400 dev_err(&pdev->dev, 2403 2401 "No slave[%d] phy_id, phy-handle, or fixed-link property\n", ··· 2440 2436 } 2441 2437 2442 2438 return 0; 2439 + } 2440 + 2441 + static void cpsw_remove_dt(struct platform_device *pdev) 2442 + { 2443 + struct net_device *ndev = platform_get_drvdata(pdev); 2444 + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 2445 + struct cpsw_platform_data *data = &cpsw->data; 2446 + struct device_node *node = pdev->dev.of_node; 2447 + struct device_node *slave_node; 2448 + int i = 0; 2449 + 2450 + for_each_available_child_of_node(node, slave_node) { 2451 + struct cpsw_slave_data *slave_data = &data->slave_data[i]; 2452 + 2453 + if (strcmp(slave_node->name, "slave")) 2454 + continue; 2455 + 2456 + if (of_phy_is_fixed_link(slave_node)) { 2457 + struct phy_device *phydev; 2458 + 2459 + phydev = of_phy_find_device(slave_node); 2460 + if (phydev) { 2461 + fixed_phy_unregister(phydev); 2462 + /* Put references taken by 2463 + * of_phy_find_device() and 2464 + * of_phy_register_fixed_link(). 2465 + */ 2466 + phy_device_free(phydev); 2467 + phy_device_free(phydev); 2468 + } 2469 + } 2470 + 2471 + of_node_put(slave_data->phy_node); 2472 + 2473 + i++; 2474 + if (i == data->slaves) 2475 + break; 2476 + } 2477 + 2478 + of_platform_depopulate(&pdev->dev); 2443 2479 } 2444 2480 2445 2481 static int cpsw_probe_dual_emac(struct cpsw_priv *priv) ··· 2589 2545 int irq; 2590 2546 2591 2547 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); 2548 + if (!cpsw) 2549 + return -ENOMEM; 2550 + 2592 2551 cpsw->dev = &pdev->dev; 2593 2552 2594 2553 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); ··· 2629 2582 /* Select default pin state */ 2630 2583 pinctrl_pm_select_default_state(&pdev->dev); 2631 2584 2632 - if (cpsw_probe_dt(&cpsw->data, pdev)) { 2633 - dev_err(&pdev->dev, "cpsw: platform data missing\n"); 2634 - ret = -ENODEV; 2585 + /* Need to enable clocks with runtime PM api to access module 2586 + * registers 2587 + */ 2588 + ret = pm_runtime_get_sync(&pdev->dev); 2589 + if (ret < 0) { 2590 + pm_runtime_put_noidle(&pdev->dev); 2635 2591 goto clean_runtime_disable_ret; 2636 2592 } 2593 + 2594 + ret = cpsw_probe_dt(&cpsw->data, pdev); 2595 + if (ret) 2596 + goto clean_dt_ret; 2597 + 2637 2598 data = &cpsw->data; 2638 2599 cpsw->rx_ch_num = 1; 2639 2600 cpsw->tx_ch_num = 1; ··· 2661 2606 GFP_KERNEL); 2662 2607 if (!cpsw->slaves) { 2663 2608 ret = -ENOMEM; 2664 - goto clean_runtime_disable_ret; 2609 + goto clean_dt_ret; 2665 2610 } 2666 2611 for (i = 0; i < data->slaves; i++) 2667 2612 cpsw->slaves[i].slave_num = i; ··· 2673 2618 if (IS_ERR(clk)) { 2674 2619 dev_err(priv->dev, "fck is not found\n"); 2675 2620 ret = -ENODEV; 2676 - goto clean_runtime_disable_ret; 2621 + goto clean_dt_ret; 2677 2622 } 2678 2623 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 2679 2624 ··· 2681 2626 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); 2682 2627 if (IS_ERR(ss_regs)) { 2683 2628 ret = PTR_ERR(ss_regs); 2684 - goto clean_runtime_disable_ret; 2629 + goto clean_dt_ret; 2685 2630 } 2686 2631 cpsw->regs = ss_regs; 2687 2632 2688 - /* Need to enable clocks with runtime PM api to access module 2689 - * registers 2690 - */ 2691 - ret = pm_runtime_get_sync(&pdev->dev); 2692 - if (ret < 0) { 2693 - pm_runtime_put_noidle(&pdev->dev); 2694 - goto clean_runtime_disable_ret; 2695 - } 2696 2633 cpsw->version = readl(&cpsw->regs->id_ver); 2697 - pm_runtime_put_sync(&pdev->dev); 2698 2634 2699 2635 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2700 2636 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); 2701 2637 if (IS_ERR(cpsw->wr_regs)) { 2702 2638 ret = PTR_ERR(cpsw->wr_regs); 2703 - goto clean_runtime_disable_ret; 2639 + goto clean_dt_ret; 2704 2640 } 2705 2641 2706 2642 memset(&dma_params, 0, sizeof(dma_params)); ··· 2728 2682 default: 2729 2683 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); 2730 2684 ret = -ENODEV; 2731 - goto clean_runtime_disable_ret; 2685 + goto clean_dt_ret; 2732 2686 } 2733 2687 for (i = 0; i < cpsw->data.slaves; i++) { 2734 2688 struct cpsw_slave *slave = &cpsw->slaves[i]; ··· 2757 2711 if (!cpsw->dma) { 2758 2712 dev_err(priv->dev, "error initializing dma\n"); 2759 2713 ret = -ENOMEM; 2760 - goto clean_runtime_disable_ret; 2714 + goto clean_dt_ret; 2761 2715 } 2762 2716 2763 2717 cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); ··· 2855 2809 ret = cpsw_probe_dual_emac(priv); 2856 2810 if (ret) { 2857 2811 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2858 - goto clean_ale_ret; 2812 + goto clean_unregister_netdev_ret; 2859 2813 } 2860 2814 } 2861 2815 2816 + pm_runtime_put(&pdev->dev); 2817 + 2862 2818 return 0; 2863 2819 2820 + clean_unregister_netdev_ret: 2821 + unregister_netdev(ndev); 2864 2822 clean_ale_ret: 2865 2823 cpsw_ale_destroy(cpsw->ale); 2866 2824 clean_dma_ret: 2867 2825 cpdma_ctlr_destroy(cpsw->dma); 2826 + clean_dt_ret: 2827 + cpsw_remove_dt(pdev); 2828 + pm_runtime_put_sync(&pdev->dev); 2868 2829 clean_runtime_disable_ret: 2869 2830 pm_runtime_disable(&pdev->dev); 2870 2831 clean_ndev_ret: ··· 2897 2844 2898 2845 cpsw_ale_destroy(cpsw->ale); 2899 2846 cpdma_ctlr_destroy(cpsw->dma); 2900 - of_platform_depopulate(&pdev->dev); 2847 + cpsw_remove_dt(pdev); 2901 2848 pm_runtime_put_sync(&pdev->dev); 2902 2849 pm_runtime_disable(&pdev->dev); 2903 2850 if (cpsw->data.dual_emac)
+1 -1
drivers/net/phy/fixed_phy.c
··· 279 279 void fixed_phy_unregister(struct phy_device *phy) 280 280 { 281 281 phy_device_remove(phy); 282 - 282 + of_node_put(phy->mdio.dev.of_node); 283 283 fixed_phy_del(phy->mdio.addr); 284 284 } 285 285 EXPORT_SYMBOL_GPL(fixed_phy_unregister);
+33 -1
drivers/net/phy/vitesse.c
··· 62 62 /* Vitesse Extended Page Access Register */ 63 63 #define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f 64 64 65 + /* Vitesse VSC8601 Extended PHY Control Register 1 */ 66 + #define MII_VSC8601_EPHY_CTL 0x17 67 + #define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8) 68 + 65 69 #define PHY_ID_VSC8234 0x000fc620 66 70 #define PHY_ID_VSC8244 0x000fc6c0 67 71 #define PHY_ID_VSC8514 0x00070670 ··· 113 109 err = vsc824x_add_skew(phydev); 114 110 115 111 return err; 112 + } 113 + 114 + /* This adds a skew for both TX and RX clocks, so the skew should only be 115 + * applied to "rgmii-id" interfaces. It may not work as expected 116 + * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */ 117 + static int vsc8601_add_skew(struct phy_device *phydev) 118 + { 119 + int ret; 120 + 121 + ret = phy_read(phydev, MII_VSC8601_EPHY_CTL); 122 + if (ret < 0) 123 + return ret; 124 + 125 + ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW; 126 + return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret); 127 + } 128 + 129 + static int vsc8601_config_init(struct phy_device *phydev) 130 + { 131 + int ret = 0; 132 + 133 + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) 134 + ret = vsc8601_add_skew(phydev); 135 + 136 + if (ret < 0) 137 + return ret; 138 + 139 + return genphy_config_init(phydev); 116 140 } 117 141 118 142 static int vsc824x_ack_interrupt(struct phy_device *phydev) ··· 307 275 .phy_id_mask = 0x000ffff0, 308 276 .features = PHY_GBIT_FEATURES, 309 277 .flags = PHY_HAS_INTERRUPT, 310 - .config_init = &genphy_config_init, 278 + .config_init = &vsc8601_config_init, 311 279 .config_aneg = &genphy_config_aneg, 312 280 .read_status = &genphy_read_status, 313 281 .ack_interrupt = &vsc824x_ack_interrupt,
+5
drivers/net/virtio_net.c
··· 1485 1485 netif_napi_del(&vi->rq[i].napi); 1486 1486 } 1487 1487 1488 + /* We called napi_hash_del() before netif_napi_del(), 1489 + * we need to respect an RCU grace period before freeing vi->rq 1490 + */ 1491 + synchronize_net(); 1492 + 1488 1493 kfree(vi->rq); 1489 1494 kfree(vi->sq); 1490 1495 }
+1 -1
drivers/net/wireless/mac80211_hwsim.c
··· 819 819 data->bcn_delta = do_div(delta, bcn_int); 820 820 } else { 821 821 data->tsf_offset -= delta; 822 - data->bcn_delta = -do_div(delta, bcn_int); 822 + data->bcn_delta = -(s64)do_div(delta, bcn_int); 823 823 } 824 824 } 825 825
+4 -14
drivers/nvme/host/pci.c
··· 1242 1242 1243 1243 result = nvme_enable_ctrl(&dev->ctrl, cap); 1244 1244 if (result) 1245 - goto free_nvmeq; 1245 + return result; 1246 1246 1247 1247 nvmeq->cq_vector = 0; 1248 1248 result = queue_request_irq(nvmeq); 1249 1249 if (result) { 1250 1250 nvmeq->cq_vector = -1; 1251 - goto free_nvmeq; 1251 + return result; 1252 1252 } 1253 1253 1254 - return result; 1255 - 1256 - free_nvmeq: 1257 - nvme_free_queues(dev, 0); 1258 1254 return result; 1259 1255 } 1260 1256 ··· 1313 1317 max = min(dev->max_qid, dev->queue_count - 1); 1314 1318 for (i = dev->online_queues; i <= max; i++) { 1315 1319 ret = nvme_create_queue(dev->queues[i], i); 1316 - if (ret) { 1317 - nvme_free_queues(dev, i); 1320 + if (ret) 1318 1321 break; 1319 - } 1320 1322 } 1321 1323 1322 1324 /* ··· 1454 1460 result = queue_request_irq(adminq); 1455 1461 if (result) { 1456 1462 adminq->cq_vector = -1; 1457 - goto free_queues; 1463 + return result; 1458 1464 } 1459 1465 return nvme_create_io_queues(dev); 1460 - 1461 - free_queues: 1462 - nvme_free_queues(dev, 1); 1463 - return result; 1464 1466 } 1465 1467 1466 1468 static void nvme_del_queue_end(struct request *req, int error)
+39 -3
drivers/nvme/host/rdma.c
··· 83 83 NVME_RDMA_Q_CONNECTED = (1 << 0), 84 84 NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), 85 85 NVME_RDMA_Q_DELETING = (1 << 2), 86 + NVME_RDMA_Q_LIVE = (1 << 3), 86 87 }; 87 88 88 89 struct nvme_rdma_queue { ··· 625 624 626 625 for (i = 1; i < ctrl->queue_count; i++) { 627 626 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 628 - if (ret) 629 - break; 627 + if (ret) { 628 + dev_info(ctrl->ctrl.device, 629 + "failed to connect i/o queue: %d\n", ret); 630 + goto out_free_queues; 631 + } 632 + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 630 633 } 631 634 635 + return 0; 636 + 637 + out_free_queues: 638 + nvme_rdma_free_io_queues(ctrl); 632 639 return ret; 633 640 } 634 641 ··· 721 712 if (ret) 722 713 goto stop_admin_q; 723 714 715 + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 716 + 724 717 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 725 718 if (ret) 726 719 goto stop_admin_q; ··· 772 761 773 762 nvme_stop_keep_alive(&ctrl->ctrl); 774 763 775 - for (i = 0; i < ctrl->queue_count; i++) 764 + for (i = 0; i < ctrl->queue_count; i++) { 776 765 clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); 766 + clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 767 + } 777 768 778 769 if (ctrl->queue_count > 1) 779 770 nvme_stop_queues(&ctrl->ctrl); ··· 1391 1378 return BLK_EH_HANDLED; 1392 1379 } 1393 1380 1381 + /* 1382 + * We cannot accept any other command until the Connect command has completed. 1383 + */ 1384 + static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, 1385 + struct request *rq) 1386 + { 1387 + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1388 + struct nvme_command *cmd = (struct nvme_command *)rq->cmd; 1389 + 1390 + if (rq->cmd_type != REQ_TYPE_DRV_PRIV || 1391 + cmd->common.opcode != nvme_fabrics_command || 1392 + cmd->fabrics.fctype != nvme_fabrics_type_connect) 1393 + return false; 1394 + } 1395 + 1396 + return true; 1397 + } 1398 + 1394 1399 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1395 1400 const struct blk_mq_queue_data *bd) 1396 1401 { ··· 1424 1393 int ret; 1425 1394 1426 1395 WARN_ON_ONCE(rq->tag < 0); 1396 + 1397 + if (!nvme_rdma_queue_is_ready(queue, rq)) 1398 + return BLK_MQ_RQ_QUEUE_BUSY; 1427 1399 1428 1400 dev = queue->device->dev; 1429 1401 ib_dma_sync_single_for_cpu(dev, sqe->dma, ··· 1577 1543 error = nvmf_connect_admin_queue(&ctrl->ctrl); 1578 1544 if (error) 1579 1545 goto out_cleanup_queue; 1546 + 1547 + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 1580 1548 1581 1549 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); 1582 1550 if (error) {
+7 -3
drivers/nvme/target/core.c
··· 838 838 839 839 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) 840 840 { 841 - ctrl->csts |= NVME_CSTS_CFS; 842 - INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 843 - schedule_work(&ctrl->fatal_err_work); 841 + mutex_lock(&ctrl->lock); 842 + if (!(ctrl->csts & NVME_CSTS_CFS)) { 843 + ctrl->csts |= NVME_CSTS_CFS; 844 + INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 845 + schedule_work(&ctrl->fatal_err_work); 846 + } 847 + mutex_unlock(&ctrl->lock); 844 848 } 845 849 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); 846 850
+15 -3
drivers/nvme/target/rdma.c
··· 951 951 952 952 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 953 953 { 954 + ib_drain_qp(queue->cm_id->qp); 954 955 rdma_destroy_qp(queue->cm_id); 955 956 ib_free_cq(queue->cq); 956 957 } ··· 1067 1066 spin_lock_init(&queue->rsp_wr_wait_lock); 1068 1067 INIT_LIST_HEAD(&queue->free_rsps); 1069 1068 spin_lock_init(&queue->rsps_lock); 1069 + INIT_LIST_HEAD(&queue->queue_list); 1070 1070 1071 1071 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 1072 1072 if (queue->idx < 0) { ··· 1246 1244 1247 1245 if (disconnect) { 1248 1246 rdma_disconnect(queue->cm_id); 1249 - ib_drain_qp(queue->cm_id->qp); 1250 1247 schedule_work(&queue->release_work); 1251 1248 } 1252 1249 } ··· 1270 1269 { 1271 1270 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 1272 1271 1273 - pr_err("failed to connect queue\n"); 1272 + mutex_lock(&nvmet_rdma_queue_mutex); 1273 + if (!list_empty(&queue->queue_list)) 1274 + list_del_init(&queue->queue_list); 1275 + mutex_unlock(&nvmet_rdma_queue_mutex); 1276 + 1277 + pr_err("failed to connect queue %d\n", queue->idx); 1274 1278 schedule_work(&queue->release_work); 1275 1279 } 1276 1280 ··· 1358 1352 case RDMA_CM_EVENT_ADDR_CHANGE: 1359 1353 case RDMA_CM_EVENT_DISCONNECTED: 1360 1354 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1361 - nvmet_rdma_queue_disconnect(queue); 1355 + /* 1356 + * We might end up here when we already freed the qp 1357 + * which means queue release sequence is in progress, 1358 + * so don't get in the way... 1359 + */ 1360 + if (queue) 1361 + nvmet_rdma_queue_disconnect(queue); 1362 1362 break; 1363 1363 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1364 1364 ret = nvmet_rdma_device_removal(cm_id, queue);
+5 -1
drivers/of/of_mdio.c
··· 292 292 mdiodev = to_mdio_device(d); 293 293 if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) 294 294 return to_phy_device(d); 295 + put_device(d); 295 296 } 296 297 297 298 return NULL; ··· 457 456 status.link = 1; 458 457 status.duplex = of_property_read_bool(fixed_link_node, 459 458 "full-duplex"); 460 - if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) 459 + if (of_property_read_u32(fixed_link_node, "speed", 460 + &status.speed)) { 461 + of_node_put(fixed_link_node); 461 462 return -EINVAL; 463 + } 462 464 status.pause = of_property_read_bool(fixed_link_node, "pause"); 463 465 status.asym_pause = of_property_read_bool(fixed_link_node, 464 466 "asym-pause");
+2 -2
drivers/video/fbdev/amba-clcd-versatile.c
··· 526 526 np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, 527 527 &clcd_id); 528 528 if (!np) { 529 - dev_err(dev, "no Versatile syscon node\n"); 530 - return -ENODEV; 529 + /* Vexpress does not have this */ 530 + return 0; 531 531 } 532 532 versatile_clcd_type = (enum versatile_clcd)clcd_id->data; 533 533
+21 -32
fs/crypto/fname.c
··· 39 39 static int fname_encrypt(struct inode *inode, 40 40 const struct qstr *iname, struct fscrypt_str *oname) 41 41 { 42 - u32 ciphertext_len; 43 42 struct skcipher_request *req = NULL; 44 43 DECLARE_FS_COMPLETION_RESULT(ecr); 45 44 struct fscrypt_info *ci = inode->i_crypt_info; 46 45 struct crypto_skcipher *tfm = ci->ci_ctfm; 47 46 int res = 0; 48 47 char iv[FS_CRYPTO_BLOCK_SIZE]; 49 - struct scatterlist src_sg, dst_sg; 48 + struct scatterlist sg; 50 49 int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); 51 - char *workbuf, buf[32], *alloc_buf = NULL; 52 - unsigned lim; 50 + unsigned int lim; 51 + unsigned int cryptlen; 53 52 54 53 lim = inode->i_sb->s_cop->max_namelen(inode); 55 54 if (iname->len <= 0 || iname->len > lim) 56 55 return -EIO; 57 56 58 - ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE); 59 - ciphertext_len = round_up(ciphertext_len, padding); 60 - ciphertext_len = min(ciphertext_len, lim); 57 + /* 58 + * Copy the filename to the output buffer for encrypting in-place and 59 + * pad it with the needed number of NUL bytes. 60 + */ 61 + cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE); 62 + cryptlen = round_up(cryptlen, padding); 63 + cryptlen = min(cryptlen, lim); 64 + memcpy(oname->name, iname->name, iname->len); 65 + memset(oname->name + iname->len, 0, cryptlen - iname->len); 61 66 62 - if (ciphertext_len <= sizeof(buf)) { 63 - workbuf = buf; 64 - } else { 65 - alloc_buf = kmalloc(ciphertext_len, GFP_NOFS); 66 - if (!alloc_buf) 67 - return -ENOMEM; 68 - workbuf = alloc_buf; 69 - } 67 + /* Initialize the IV */ 68 + memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); 70 69 71 - /* Allocate request */ 70 + /* Set up the encryption request */ 72 71 req = skcipher_request_alloc(tfm, GFP_NOFS); 73 72 if (!req) { 74 73 printk_ratelimited(KERN_ERR 75 - "%s: crypto_request_alloc() failed\n", __func__); 76 - kfree(alloc_buf); 74 + "%s: skcipher_request_alloc() failed\n", __func__); 77 75 return -ENOMEM; 78 76 } 79 77 skcipher_request_set_callback(req, 80 78 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 81 79 fname_crypt_complete, &ecr); 80 + sg_init_one(&sg, oname->name, cryptlen); 81 + skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); 82 82 83 - /* Copy the input */ 84 - memcpy(workbuf, iname->name, iname->len); 85 - if (iname->len < ciphertext_len) 86 - memset(workbuf + iname->len, 0, ciphertext_len - iname->len); 87 - 88 - /* Initialize IV */ 89 - memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); 90 - 91 - /* Create encryption request */ 92 - sg_init_one(&src_sg, workbuf, ciphertext_len); 93 - sg_init_one(&dst_sg, oname->name, ciphertext_len); 94 - skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); 83 + /* Do the encryption */ 95 84 res = crypto_skcipher_encrypt(req); 96 85 if (res == -EINPROGRESS || res == -EBUSY) { 86 + /* Request is being completed asynchronously; wait for it */ 97 87 wait_for_completion(&ecr.completion); 98 88 res = ecr.res; 99 89 } 100 - kfree(alloc_buf); 101 90 skcipher_request_free(req); 102 91 if (res < 0) { 103 92 printk_ratelimited(KERN_ERR ··· 94 105 return res; 95 106 } 96 107 97 - oname->len = ciphertext_len; 108 + oname->len = cryptlen; 98 109 return 0; 99 110 } 100 111
+13 -3
fs/crypto/keyinfo.c
··· 185 185 struct crypto_skcipher *ctfm; 186 186 const char *cipher_str; 187 187 int keysize; 188 - u8 raw_key[FS_MAX_KEY_SIZE]; 188 + u8 *raw_key = NULL; 189 189 int res; 190 190 191 191 res = fscrypt_initialize(); ··· 238 238 if (res) 239 239 goto out; 240 240 241 + /* 242 + * This cannot be a stack buffer because it is passed to the scatterlist 243 + * crypto API as part of key derivation. 244 + */ 245 + res = -ENOMEM; 246 + raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); 247 + if (!raw_key) 248 + goto out; 249 + 241 250 if (fscrypt_dummy_context_enabled(inode)) { 242 251 memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); 243 252 goto got_key; ··· 285 276 if (res) 286 277 goto out; 287 278 288 - memzero_explicit(raw_key, sizeof(raw_key)); 279 + kzfree(raw_key); 280 + raw_key = NULL; 289 281 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { 290 282 put_crypt_info(crypt_info); 291 283 goto retry; ··· 297 287 if (res == -ENOKEY) 298 288 res = 0; 299 289 put_crypt_info(crypt_info); 300 - memzero_explicit(raw_key, sizeof(raw_key)); 290 + kzfree(raw_key); 301 291 return res; 302 292 } 303 293
+1
fs/ext4/ext4.h
··· 235 235 #define EXT4_MAX_BLOCK_SIZE 65536 236 236 #define EXT4_MIN_BLOCK_LOG_SIZE 10 237 237 #define EXT4_MAX_BLOCK_LOG_SIZE 16 238 + #define EXT4_MAX_CLUSTER_LOG_SIZE 30 238 239 #ifdef __KERNEL__ 239 240 # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) 240 241 #else
+16 -1
fs/ext4/super.c
··· 3565 3565 if (blocksize < EXT4_MIN_BLOCK_SIZE || 3566 3566 blocksize > EXT4_MAX_BLOCK_SIZE) { 3567 3567 ext4_msg(sb, KERN_ERR, 3568 - "Unsupported filesystem blocksize %d", blocksize); 3568 + "Unsupported filesystem blocksize %d (%d log_block_size)", 3569 + blocksize, le32_to_cpu(es->s_log_block_size)); 3570 + goto failed_mount; 3571 + } 3572 + if (le32_to_cpu(es->s_log_block_size) > 3573 + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 3574 + ext4_msg(sb, KERN_ERR, 3575 + "Invalid log block size: %u", 3576 + le32_to_cpu(es->s_log_block_size)); 3569 3577 goto failed_mount; 3570 3578 } 3571 3579 ··· 3703 3695 ext4_msg(sb, KERN_ERR, 3704 3696 "cluster size (%d) smaller than " 3705 3697 "block size (%d)", clustersize, blocksize); 3698 + goto failed_mount; 3699 + } 3700 + if (le32_to_cpu(es->s_log_cluster_size) > 3701 + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 3702 + ext4_msg(sb, KERN_ERR, 3703 + "Invalid log cluster size: %u", 3704 + le32_to_cpu(es->s_log_cluster_size)); 3706 3705 goto failed_mount; 3707 3706 } 3708 3707 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+5
fs/fuse/dir.c
··· 286 286 .d_release = fuse_dentry_release, 287 287 }; 288 288 289 + const struct dentry_operations fuse_root_dentry_operations = { 290 + .d_init = fuse_dentry_init, 291 + .d_release = fuse_dentry_release, 292 + }; 293 + 289 294 int fuse_valid_type(int m) 290 295 { 291 296 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
+6
fs/fuse/file.c
··· 1985 1985 { 1986 1986 struct inode *inode = page->mapping->host; 1987 1987 1988 + /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ 1989 + if (!copied) 1990 + goto unlock; 1991 + 1988 1992 if (!PageUptodate(page)) { 1989 1993 /* Zero any unwritten bytes at the end of the page */ 1990 1994 size_t endoff = (pos + copied) & ~PAGE_MASK; ··· 1999 1995 2000 1996 fuse_write_update_size(inode, pos + copied); 2001 1997 set_page_dirty(page); 1998 + 1999 + unlock: 2002 2000 unlock_page(page); 2003 2001 put_page(page); 2004 2002
+1
fs/fuse/fuse_i.h
··· 692 692 extern const struct file_operations fuse_dev_operations; 693 693 694 694 extern const struct dentry_operations fuse_dentry_operations; 695 + extern const struct dentry_operations fuse_root_dentry_operations; 695 696 696 697 /** 697 698 * Inode to nodeid comparison.
+2 -1
fs/fuse/inode.c
··· 1131 1131 1132 1132 err = -ENOMEM; 1133 1133 root = fuse_get_root_inode(sb, d.rootmode); 1134 + sb->s_d_op = &fuse_root_dentry_operations; 1134 1135 root_dentry = d_make_root(root); 1135 1136 if (!root_dentry) 1136 1137 goto err_dev_free; 1137 - /* only now - we want root dentry with NULL ->d_op */ 1138 + /* Root dentry doesn't have .d_revalidate */ 1138 1139 sb->s_d_op = &fuse_dentry_operations; 1139 1140 1140 1141 init_req = fuse_request_alloc(0);
+2
fs/orangefs/orangefs-debugfs.c
··· 114 114 }; 115 115 116 116 const struct file_operations debug_help_fops = { 117 + .owner = THIS_MODULE, 117 118 .open = orangefs_debug_help_open, 118 119 .read = seq_read, 119 120 .release = seq_release, ··· 122 121 }; 123 122 124 123 static const struct file_operations kernel_debug_fops = { 124 + .owner = THIS_MODULE, 125 125 .open = orangefs_debug_open, 126 126 .read = orangefs_debug_read, 127 127 .write = orangefs_debug_write,
+14 -8
fs/xattr.c
··· 170 170 const void *value, size_t size, int flags) 171 171 { 172 172 struct inode *inode = dentry->d_inode; 173 - int error = -EOPNOTSUPP; 173 + int error = -EAGAIN; 174 174 int issec = !strncmp(name, XATTR_SECURITY_PREFIX, 175 175 XATTR_SECURITY_PREFIX_LEN); 176 176 ··· 183 183 security_inode_post_setxattr(dentry, name, value, 184 184 size, flags); 185 185 } 186 - } else if (issec) { 187 - const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; 188 - 186 + } else { 189 187 if (unlikely(is_bad_inode(inode))) 190 188 return -EIO; 191 - error = security_inode_setsecurity(inode, suffix, value, 192 - size, flags); 193 - if (!error) 194 - fsnotify_xattr(dentry); 189 + } 190 + if (error == -EAGAIN) { 191 + error = -EOPNOTSUPP; 192 + 193 + if (issec) { 194 + const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; 195 + 196 + error = security_inode_setsecurity(inode, suffix, value, 197 + size, flags); 198 + if (!error) 199 + fsnotify_xattr(dentry); 200 + } 195 201 } 196 202 197 203 return error;
+70 -94
include/acpi/actbl.h
··· 230 230 /* Fields common to all versions of the FADT */ 231 231 232 232 struct acpi_table_fadt { 233 - struct acpi_table_header header; /* [V1] Common ACPI table header */ 234 - u32 facs; /* [V1] 32-bit physical address of FACS */ 235 - u32 dsdt; /* [V1] 32-bit physical address of DSDT */ 236 - u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ 237 - u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */ 238 - u16 sci_interrupt; /* [V1] System vector of SCI interrupt */ 239 - u32 smi_command; /* [V1] 32-bit Port address of SMI command port */ 240 - u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */ 241 - u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */ 242 - u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */ 243 - u8 pstate_control; /* [V1] Processor performance state control */ 244 - u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */ 245 - u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */ 246 - u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */ 247 - u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */ 248 - u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */ 249 - u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ 250 - u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */ 251 - u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */ 252 - u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */ 253 - u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */ 254 - u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */ 255 - u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */ 256 - u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */ 257 - u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */ 258 - u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */ 259 - u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */ 260 - u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */ 261 - u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */ 262 - u16 flush_size; /* [V1] Processor memory cache line width, in bytes */ 263 - u16 flush_stride; /* [V1] Number of flush strides that need to be read */ 264 - u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */ 265 - u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */ 266 - u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */ 267 - u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */ 268 - u8 century; /* [V1] Index to century in RTC CMOS RAM */ 269 - u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */ 270 - u8 reserved; /* [V1] Reserved, must be zero */ 271 - u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */ 272 - /* End of Version 1 FADT fields (ACPI 1.0) */ 273 - 274 - struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */ 275 - u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */ 276 - u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ 277 - u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */ 278 - u64 Xfacs; /* [V3] 64-bit physical address of FACS */ 279 - u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */ 280 - struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */ 281 - struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */ 282 - struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */ 283 - struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */ 284 - struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */ 285 - struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ 286 - struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */ 287 - struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */ 288 - /* End of Version 3 FADT fields (ACPI 2.0) */ 289 - 290 - struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */ 291 - /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */ 292 - 293 - struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */ 294 - /* End of Version 5 FADT fields (ACPI 5.0) */ 295 - 296 - u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */ 297 - /* End of Version 6 FADT fields (ACPI 6.0) */ 298 - 233 + struct acpi_table_header header; /* Common ACPI table header */ 234 + u32 facs; /* 32-bit physical address of FACS */ 235 + u32 dsdt; /* 32-bit physical address of DSDT */ 236 + u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ 237 + u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ 238 + u16 sci_interrupt; /* System vector of SCI interrupt */ 239 + u32 smi_command; /* 32-bit Port address of SMI command port */ 240 + u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ 241 + u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ 242 + u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ 243 + u8 pstate_control; /* Processor performance state control */ 244 + u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ 245 + u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ 246 + u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ 247 + u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ 248 + u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ 249 + u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ 250 + u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ 251 + u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ 252 + u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ 253 + u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ 254 + u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ 255 + u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ 256 + u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ 257 + u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ 258 + u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ 259 + u8 cst_control; /* Support for the _CST object and C-States change notification */ 260 + u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ 261 + u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ 262 + u16 flush_size; /* Processor memory cache line width, in bytes */ 263 + u16 flush_stride; /* Number of flush strides that need to be read */ 264 + u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ 265 + u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ 266 + u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ 267 + u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ 268 + u8 century; /* Index to century in RTC CMOS RAM */ 269 + u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ 270 + u8 reserved; /* Reserved, must be zero */ 271 + u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ 272 + struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ 273 + u8 reset_value; /* Value to write to the reset_register port to reset the system */ 274 + u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ 275 + u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ 276 + u64 Xfacs; /* 64-bit physical address of FACS */ 277 + u64 Xdsdt; /* 64-bit physical address of DSDT */ 278 + struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ 279 + struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ 280 + struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ 281 + struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ 282 + struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ 283 + struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ 284 + struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ 285 + struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ 286 + struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ 287 + struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ 288 + u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ 299 289 }; 300 290 301 291 /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ ··· 301 311 302 312 /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ 303 313 304 - #define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */ 305 - #define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */ 314 + #define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ 315 + #define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ 306 316 307 317 /* Masks for FADT flags */ 308 318 ··· 399 409 * match the expected length. In other words, the length of the 400 410 * FADT is the bottom line as to what the version really is. 401 411 * 402 - * NOTE: There is no officialy released V2 of the FADT. This 403 - * version was used only for prototyping and testing during the 404 - * 32-bit to 64-bit transition. V3 was the first official 64-bit 405 - * version of the FADT. 406 - * 407 - * Update this list of defines when a new version of the FADT is 408 - * added to the ACPI specification. Note that the FADT version is 409 - * only incremented when new fields are appended to the existing 410 - * version. Therefore, the FADT version is competely independent 411 - * from the version of the ACPI specification where it is 412 - * defined. 413 - * 414 - * For reference, the various FADT lengths are as follows: 415 - * FADT V1 size: 0x074 ACPI 1.0 416 - * FADT V3 size: 0x0F4 ACPI 2.0 417 - * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0 418 - * FADT V5 size: 0x10C ACPI 5.0 419 - * FADT V6 size: 0x114 ACPI 6.0 412 + * For reference, the values below are as follows: 413 + * FADT V1 size: 0x074 414 + * FADT V2 size: 0x084 415 + * FADT V3 size: 0x0F4 416 + * FADT V4 size: 0x0F4 417 + * FADT V5 size: 0x10C 418 + * FADT V6 size: 0x114 420 419 */ 421 - #define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */ 422 - #define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */ 423 - #define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */ 424 - #define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */ 425 - #define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */ 420 + #define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) 421 + #define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) 422 + #define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) 423 + #define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) 424 + #define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) 426 425 427 - /* Update these when new FADT versions are added */ 428 - 429 - #define ACPI_FADT_MAX_VERSION 6 430 426 #define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" 431 427 432 428 #endif /* __ACTBL_H__ */
+3
include/acpi/platform/aclinux.h
··· 191 191 #ifndef __init 192 192 #define __init 193 193 #endif 194 + #ifndef __iomem 195 + #define __iomem 196 + #endif 194 197 195 198 /* Host-dependent types and defines for user-space ACPICA */ 196 199
+3 -2
include/linux/bpf_verifier.h
··· 14 14 * are obviously wrong for any sort of memory access. 15 15 */ 16 16 #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) 17 - #define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) 17 + #define BPF_REGISTER_MIN_RANGE -1 18 18 19 19 struct bpf_reg_state { 20 20 enum bpf_reg_type type; ··· 22 22 * Used to determine if any memory access using this register will 23 23 * result in a bad access. 24 24 */ 25 - u64 min_value, max_value; 25 + s64 min_value; 26 + u64 max_value; 26 27 u32 id; 27 28 union { 28 29 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+1 -1
include/linux/huge_mm.h
··· 22 22 unsigned char *vec); 23 23 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 24 24 unsigned long new_addr, unsigned long old_end, 25 - pmd_t *old_pmd, pmd_t *new_pmd); 25 + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); 26 26 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 27 27 unsigned long addr, pgprot_t newprot, 28 28 int prot_numa);
+1
include/linux/sunrpc/svc_xprt.h
··· 25 25 void (*xpo_detach)(struct svc_xprt *); 26 26 void (*xpo_free)(struct svc_xprt *); 27 27 int (*xpo_secure_port)(struct svc_rqst *); 28 + void (*xpo_kill_temp_xprt)(struct svc_xprt *); 28 29 }; 29 30 30 31 struct svc_xprt_class {
+3
include/net/gro_cells.h
··· 68 68 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 69 69 70 70 __skb_queue_head_init(&cell->napi_skbs); 71 + 72 + set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); 73 + 71 74 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); 72 75 napi_enable(&cell->napi); 73 76 }
+1
include/net/ip_fib.h
··· 243 243 struct netlink_callback *cb); 244 244 int fib_table_flush(struct net *net, struct fib_table *table); 245 245 struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 246 + void fib_table_flush_external(struct fib_table *table); 246 247 void fib_free_table(struct fib_table *tb); 247 248 248 249 #ifndef CONFIG_IP_MULTIPLE_TABLES
+1 -1
include/net/net_namespace.h
··· 170 170 extern struct list_head net_namespace_list; 171 171 172 172 struct net *get_net_ns_by_pid(pid_t pid); 173 - struct net *get_net_ns_by_fd(int pid); 173 + struct net *get_net_ns_by_fd(int fd); 174 174 175 175 #ifdef CONFIG_SYSCTL 176 176 void ipx_register_sysctl(void);
+7
include/uapi/linux/kvm.h
··· 972 972 __u8 pad[16]; 973 973 }; 974 974 975 + /* For KVM_CAP_ADJUST_CLOCK */ 976 + 977 + /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ 978 + #define KVM_CLOCK_TSC_STABLE 2 979 + 975 980 struct kvm_clock_data { 976 981 __u64 clock; 977 982 __u32 flags; 978 983 __u32 pad[9]; 979 984 }; 985 + 986 + /* For KVM_CAP_SW_TLB */ 980 987 981 988 #define KVM_MMU_FSL_BOOKE_NOHV 0 982 989 #define KVM_MMU_FSL_BOOKE_HV 1
+47 -23
kernel/bpf/verifier.c
··· 234 234 reg->map_ptr->value_size, 235 235 reg->id); 236 236 if (reg->min_value != BPF_REGISTER_MIN_RANGE) 237 - verbose(",min_value=%llu", 238 - (unsigned long long)reg->min_value); 237 + verbose(",min_value=%lld", 238 + (long long)reg->min_value); 239 239 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 240 240 verbose(",max_value=%llu", 241 241 (unsigned long long)reg->max_value); ··· 778 778 * index'es we need to make sure that whatever we use 779 779 * will have a set floor within our range. 780 780 */ 781 - if ((s64)reg->min_value < 0) { 781 + if (reg->min_value < 0) { 782 782 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 783 783 regno); 784 784 return -EACCES; ··· 1490 1490 { 1491 1491 if (reg->max_value > BPF_REGISTER_MAX_RANGE) 1492 1492 reg->max_value = BPF_REGISTER_MAX_RANGE; 1493 - if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) 1493 + if (reg->min_value < BPF_REGISTER_MIN_RANGE || 1494 + reg->min_value > BPF_REGISTER_MAX_RANGE) 1494 1495 reg->min_value = BPF_REGISTER_MIN_RANGE; 1495 1496 } 1496 1497 ··· 1499 1498 struct bpf_insn *insn) 1500 1499 { 1501 1500 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1502 - u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; 1501 + s64 min_val = BPF_REGISTER_MIN_RANGE; 1502 + u64 max_val = BPF_REGISTER_MAX_RANGE; 1503 1503 u8 opcode = BPF_OP(insn->code); 1504 1504 1505 1505 dst_reg = &regs[insn->dst_reg]; ··· 1534 1532 return; 1535 1533 } 1536 1534 1535 + /* If one of our values was at the end of our ranges then we can't just 1536 + * do our normal operations to the register, we need to set the values 1537 + * to the min/max since they are undefined. 1538 + */ 1539 + if (min_val == BPF_REGISTER_MIN_RANGE) 1540 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1541 + if (max_val == BPF_REGISTER_MAX_RANGE) 1542 + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1543 + 1537 1544 switch (opcode) { 1538 1545 case BPF_ADD: 1539 - dst_reg->min_value += min_val; 1540 - dst_reg->max_value += max_val; 1546 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1547 + dst_reg->min_value += min_val; 1548 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1549 + dst_reg->max_value += max_val; 1541 1550 break; 1542 1551 case BPF_SUB: 1543 - dst_reg->min_value -= min_val; 1544 - dst_reg->max_value -= max_val; 1552 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1553 + dst_reg->min_value -= min_val; 1554 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1555 + dst_reg->max_value -= max_val; 1545 1556 break; 1546 1557 case BPF_MUL: 1547 - dst_reg->min_value *= min_val; 1548 - dst_reg->max_value *= max_val; 1558 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1559 + dst_reg->min_value *= min_val; 1560 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1561 + dst_reg->max_value *= max_val; 1549 1562 break; 1550 1563 case BPF_AND: 1551 - /* & is special since it could end up with 0 bits set. */ 1552 - dst_reg->min_value &= min_val; 1564 + /* Disallow AND'ing of negative numbers, ain't nobody got time 1565 + * for that. Otherwise the minimum is 0 and the max is the max 1566 + * value we could AND against. 1567 + */ 1568 + if (min_val < 0) 1569 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1570 + else 1571 + dst_reg->min_value = 0; 1553 1572 dst_reg->max_value = max_val; 1554 1573 break; 1555 1574 case BPF_LSH: ··· 1580 1557 */ 1581 1558 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1582 1559 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1583 - else 1560 + else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1584 1561 dst_reg->min_value <<= min_val; 1585 1562 1586 1563 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1587 1564 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1588 - else 1565 + else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1589 1566 dst_reg->max_value <<= max_val; 1590 1567 break; 1591 1568 case BPF_RSH: 1592 - dst_reg->min_value >>= min_val; 1593 - dst_reg->max_value >>= max_val; 1594 - break; 1595 - case BPF_MOD: 1596 - /* % is special since it is an unsigned modulus, so the floor 1597 - * will always be 0. 1569 + /* RSH by a negative number is undefined, and the BPF_RSH is an 1570 + * unsigned shift, so make the appropriate casts. 1598 1571 */ 1599 - dst_reg->min_value = 0; 1600 - dst_reg->max_value = max_val - 1; 1572 + if (min_val < 0 || dst_reg->min_value < 0) 1573 + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1574 + else 1575 + dst_reg->min_value = 1576 + (u64)(dst_reg->min_value) >> min_val; 1577 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1578 + dst_reg->max_value >>= max_val; 1601 1579 break; 1602 1580 default: 1603 1581 reset_reg_range_values(regs, insn->dst_reg);
+17 -3
kernel/locking/lockdep_internals.h
··· 46 46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 47 47 48 48 /* 49 + * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, 50 + * .data and .bss to fit in required 32MB limit for the kernel. With 51 + * PROVE_LOCKING we could go over this limit and cause system boot-up problems. 52 + * So, reduce the static allocations for lockdeps related structures so that 53 + * everything fits in current required size limit. 54 + */ 55 + #ifdef CONFIG_PROVE_LOCKING_SMALL 56 + /* 49 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 50 58 * we track. 51 59 * ··· 62 54 * table (if it's not there yet), and we check it for lock order 63 55 * conflicts and deadlocks. 64 56 */ 57 + #define MAX_LOCKDEP_ENTRIES 16384UL 58 + #define MAX_LOCKDEP_CHAINS_BITS 15 59 + #define MAX_STACK_TRACE_ENTRIES 262144UL 60 + #else 65 61 #define MAX_LOCKDEP_ENTRIES 32768UL 66 62 67 63 #define MAX_LOCKDEP_CHAINS_BITS 16 68 - #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 69 - 70 - #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) 71 64 72 65 /* 73 66 * Stack-trace: tightly packed array of stack backtrace 74 67 * addresses. Protected by the hash_lock. 75 68 */ 76 69 #define MAX_STACK_TRACE_ENTRIES 524288UL 70 + #endif 71 + 72 + #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 73 + 74 + #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) 77 75 78 76 extern struct list_head all_lock_classes; 79 77 extern struct lock_chain lock_chains[];
+23 -1
kernel/trace/ftrace.c
··· 1862 1862 1863 1863 /* Update rec->flags */ 1864 1864 do_for_each_ftrace_rec(pg, rec) { 1865 + 1866 + if (rec->flags & FTRACE_FL_DISABLED) 1867 + continue; 1868 + 1865 1869 /* We need to update only differences of filter_hash */ 1866 1870 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1867 1871 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); ··· 1888 1884 1889 1885 /* Roll back what we did above */ 1890 1886 do_for_each_ftrace_rec(pg, rec) { 1887 + 1888 + if (rec->flags & FTRACE_FL_DISABLED) 1889 + continue; 1890 + 1891 1891 if (rec == end) 1892 1892 goto err_out; 1893 1893 ··· 2405 2397 return; 2406 2398 2407 2399 do_for_each_ftrace_rec(pg, rec) { 2400 + 2401 + if (rec->flags & FTRACE_FL_DISABLED) 2402 + continue; 2403 + 2408 2404 failed = __ftrace_replace_code(rec, enable); 2409 2405 if (failed) { 2410 2406 ftrace_bug(failed, rec); ··· 2775 2763 struct dyn_ftrace *rec; 2776 2764 2777 2765 do_for_each_ftrace_rec(pg, rec) { 2778 - if (FTRACE_WARN_ON_ONCE(rec->flags)) 2766 + if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 2779 2767 pr_warn(" %pS flags:%lx\n", 2780 2768 (void *)rec->ip, rec->flags); 2781 2769 } while_for_each_ftrace_rec(); ··· 3610 3598 goto out_unlock; 3611 3599 3612 3600 do_for_each_ftrace_rec(pg, rec) { 3601 + 3602 + if (rec->flags & FTRACE_FL_DISABLED) 3603 + continue; 3604 + 3613 3605 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3614 3606 ret = enter_record(hash, rec, clear_filter); 3615 3607 if (ret < 0) { ··· 3808 3792 mutex_lock(&ftrace_lock); 3809 3793 3810 3794 do_for_each_ftrace_rec(pg, rec) { 3795 + 3796 + if (rec->flags & FTRACE_FL_DISABLED) 3797 + continue; 3811 3798 3812 3799 if (!ftrace_match_record(rec, &func_g, NULL, 0)) 3813 3800 continue; ··· 4703 4684 } 4704 4685 4705 4686 do_for_each_ftrace_rec(pg, rec) { 4687 + 4688 + if (rec->flags & FTRACE_FL_DISABLED) 4689 + continue; 4706 4690 4707 4691 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 4708 4692 /* if it is in the array */
+3
lib/Kconfig.debug
··· 1085 1085 1086 1086 For more details, see Documentation/locking/lockdep-design.txt. 1087 1087 1088 + config PROVE_LOCKING_SMALL 1089 + bool 1090 + 1088 1091 config LOCKDEP 1089 1092 bool 1090 1093 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+3 -1
lib/iov_iter.c
··· 683 683 struct pipe_inode_info *pipe = i->pipe; 684 684 struct pipe_buffer *buf; 685 685 int idx = i->idx; 686 - size_t off = i->iov_offset; 686 + size_t off = i->iov_offset, orig_sz; 687 687 688 688 if (unlikely(i->count < size)) 689 689 size = i->count; 690 + orig_sz = size; 690 691 691 692 if (size) { 692 693 if (off) /* make it relative to the beginning of buffer */ ··· 714 713 pipe->nrbufs--; 715 714 } 716 715 } 716 + i->count -= orig_sz; 717 717 } 718 718 719 719 void iov_iter_advance(struct iov_iter *i, size_t size)
+8 -1
mm/huge_memory.c
··· 1426 1426 1427 1427 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1428 1428 unsigned long new_addr, unsigned long old_end, 1429 - pmd_t *old_pmd, pmd_t *new_pmd) 1429 + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) 1430 1430 { 1431 1431 spinlock_t *old_ptl, *new_ptl; 1432 1432 pmd_t pmd; 1433 1433 struct mm_struct *mm = vma->vm_mm; 1434 + bool force_flush = false; 1434 1435 1435 1436 if ((old_addr & ~HPAGE_PMD_MASK) || 1436 1437 (new_addr & ~HPAGE_PMD_MASK) || ··· 1456 1455 new_ptl = pmd_lockptr(mm, new_pmd); 1457 1456 if (new_ptl != old_ptl) 1458 1457 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1458 + if (pmd_present(*old_pmd) && pmd_dirty(*old_pmd)) 1459 + force_flush = true; 1459 1460 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1460 1461 VM_BUG_ON(!pmd_none(*new_pmd)); 1461 1462 ··· 1470 1467 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1471 1468 if (new_ptl != old_ptl) 1472 1469 spin_unlock(new_ptl); 1470 + if (force_flush) 1471 + flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1472 + else 1473 + *need_flush = true; 1473 1474 spin_unlock(old_ptl); 1474 1475 return true; 1475 1476 }
+21 -9
mm/mremap.c
··· 104 104 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 105 105 unsigned long old_addr, unsigned long old_end, 106 106 struct vm_area_struct *new_vma, pmd_t *new_pmd, 107 - unsigned long new_addr, bool need_rmap_locks) 107 + unsigned long new_addr, bool need_rmap_locks, bool *need_flush) 108 108 { 109 109 struct mm_struct *mm = vma->vm_mm; 110 110 pte_t *old_pte, *new_pte, pte; 111 111 spinlock_t *old_ptl, *new_ptl; 112 + bool force_flush = false; 113 + unsigned long len = old_end - old_addr; 112 114 113 115 /* 114 116 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma ··· 148 146 new_pte++, new_addr += PAGE_SIZE) { 149 147 if (pte_none(*old_pte)) 150 148 continue; 149 + 150 + /* 151 + * We are remapping a dirty PTE, make sure to 152 + * flush TLB before we drop the PTL for the 153 + * old PTE or we may race with page_mkclean(). 154 + */ 155 + if (pte_present(*old_pte) && pte_dirty(*old_pte)) 156 + force_flush = true; 151 157 pte = ptep_get_and_clear(mm, old_addr, old_pte); 152 158 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 153 159 pte = move_soft_dirty_pte(pte); ··· 166 156 if (new_ptl != old_ptl) 167 157 spin_unlock(new_ptl); 168 158 pte_unmap(new_pte - 1); 159 + if (force_flush) 160 + flush_tlb_range(vma, old_end - len, old_end); 161 + else 162 + *need_flush = true; 169 163 pte_unmap_unlock(old_pte - 1, old_ptl); 170 164 if (need_rmap_locks) 171 165 drop_rmap_locks(vma); ··· 215 201 if (need_rmap_locks) 216 202 take_rmap_locks(vma); 217 203 moved = move_huge_pmd(vma, old_addr, new_addr, 218 - old_end, old_pmd, new_pmd); 204 + old_end, old_pmd, new_pmd, 205 + &need_flush); 219 206 if (need_rmap_locks) 220 207 drop_rmap_locks(vma); 221 - if (moved) { 222 - need_flush = true; 208 + if (moved) 223 209 continue; 224 - } 225 210 } 226 211 split_huge_pmd(vma, old_pmd, old_addr); 227 212 if (pmd_trans_unstable(old_pmd)) ··· 233 220 extent = next - new_addr; 234 221 if (extent > LATENCY_LIMIT) 235 222 extent = LATENCY_LIMIT; 236 - move_ptes(vma, old_pmd, old_addr, old_addr + extent, 237 - new_vma, new_pmd, new_addr, need_rmap_locks); 238 - need_flush = true; 223 + move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 224 + new_pmd, new_addr, need_rmap_locks, &need_flush); 239 225 } 240 - if (likely(need_flush)) 226 + if (need_flush) 241 227 flush_tlb_range(vma, old_end-len, old_addr); 242 228 243 229 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+1
net/batman-adv/hard-interface.c
··· 851 851 batadv_softif_destroy_sysfs(hard_iface->soft_iface); 852 852 } 853 853 854 + hard_iface->soft_iface = NULL; 854 855 batadv_hardif_put(hard_iface); 855 856 856 857 out:
+1
net/batman-adv/tp_meter.c
··· 834 834 primary_if = batadv_primary_if_get_selected(bat_priv); 835 835 if (unlikely(!primary_if)) { 836 836 err = BATADV_TP_REASON_DST_UNREACHABLE; 837 + tp_vars->reason = err; 837 838 goto out; 838 839 } 839 840
+2
net/core/net_namespace.c
··· 218 218 bool alloc; 219 219 int id; 220 220 221 + if (atomic_read(&net->count) == 0) 222 + return NETNSA_NSID_NOT_ASSIGNED; 221 223 spin_lock_irqsave(&net->nsid_lock, flags); 222 224 alloc = atomic_read(&peer->count) == 0 ? false : true; 223 225 id = __peernet2id_alloc(net, peer, &alloc);
+14 -8
net/core/rtnetlink.c
··· 840 840 if (dev->dev.parent && dev_is_pci(dev->dev.parent) && 841 841 (ext_filter_mask & RTEXT_FILTER_VF)) { 842 842 int num_vfs = dev_num_vf(dev->dev.parent); 843 - size_t size = nla_total_size(sizeof(struct nlattr)); 844 - size += nla_total_size(num_vfs * sizeof(struct nlattr)); 843 + size_t size = nla_total_size(0); 845 844 size += num_vfs * 846 - (nla_total_size(sizeof(struct ifla_vf_mac)) + 847 - nla_total_size(MAX_VLAN_LIST_LEN * 848 - sizeof(struct nlattr)) + 845 + (nla_total_size(0) + 846 + nla_total_size(sizeof(struct ifla_vf_mac)) + 847 + nla_total_size(sizeof(struct ifla_vf_vlan)) + 848 + nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 849 849 nla_total_size(MAX_VLAN_LIST_LEN * 850 850 sizeof(struct ifla_vf_vlan_info)) + 851 851 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 852 + nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 852 853 nla_total_size(sizeof(struct ifla_vf_rate)) + 853 854 nla_total_size(sizeof(struct ifla_vf_link_state)) + 854 855 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 856 + nla_total_size(0) + /* nest IFLA_VF_STATS */ 855 857 /* IFLA_VF_STATS_RX_PACKETS */ 856 858 nla_total_size_64bit(sizeof(__u64)) + 857 859 /* IFLA_VF_STATS_TX_PACKETS */ ··· 901 899 902 900 static size_t rtnl_xdp_size(const struct net_device *dev) 903 901 { 904 - size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */ 902 + size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 903 + nla_total_size(1); /* XDP_ATTACHED */ 905 904 906 905 if (!dev->netdev_ops->ndo_xdp) 907 906 return 0; ··· 1609 1606 head = &net->dev_index_head[h]; 1610 1607 hlist_for_each_entry(dev, head, index_hlist) { 1611 1608 if (link_dump_filtered(dev, master_idx, kind_ops)) 1612 - continue; 1609 + goto cont; 1613 1610 if (idx < s_idx) 1614 1611 goto cont; 1615 1612 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, ··· 2852 2849 2853 2850 static inline size_t rtnl_fdb_nlmsg_size(void) 2854 2851 { 2855 - return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); 2852 + return NLMSG_ALIGN(sizeof(struct ndmsg)) + 2853 + nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 2854 + nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 2855 + 0; 2856 2856 } 2857 2857 2858 2858 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
+15 -5
net/ipv4/fib_frontend.c
··· 151 151 152 152 int fib_unmerge(struct net *net) 153 153 { 154 - struct fib_table *old, *new; 154 + struct fib_table *old, *new, *main_table; 155 155 156 156 /* attempt to fetch local table if it has been allocated */ 157 157 old = fib_get_table(net, RT_TABLE_LOCAL); ··· 162 162 if (!new) 163 163 return -ENOMEM; 164 164 165 + /* table is already unmerged */ 166 + if (new == old) 167 + return 0; 168 + 165 169 /* replace merged table with clean table */ 166 - if (new != old) { 167 - fib_replace_table(net, old, new); 168 - fib_free_table(old); 169 - } 170 + fib_replace_table(net, old, new); 171 + fib_free_table(old); 172 + 173 + /* attempt to fetch main table if it has been allocated */ 174 + main_table = fib_get_table(net, RT_TABLE_MAIN); 175 + if (!main_table) 176 + return 0; 177 + 178 + /* flush local entries from main table */ 179 + fib_table_flush_external(main_table); 170 180 171 181 return 0; 172 182 }
+68 -1
net/ipv4/fib_trie.c
··· 1743 1743 local_l = fib_find_node(lt, &local_tp, l->key); 1744 1744 1745 1745 if (fib_insert_alias(lt, local_tp, local_l, new_fa, 1746 - NULL, l->key)) 1746 + NULL, l->key)) { 1747 + kmem_cache_free(fn_alias_kmem, new_fa); 1747 1748 goto out; 1749 + } 1748 1750 } 1749 1751 1750 1752 /* stop loop if key wrapped back to 0 */ ··· 1760 1758 fib_trie_free(local_tb); 1761 1759 1762 1760 return NULL; 1761 + } 1762 + 1763 + /* Caller must hold RTNL */ 1764 + void fib_table_flush_external(struct fib_table *tb) 1765 + { 1766 + struct trie *t = (struct trie *)tb->tb_data; 1767 + struct key_vector *pn = t->kv; 1768 + unsigned long cindex = 1; 1769 + struct hlist_node *tmp; 1770 + struct fib_alias *fa; 1771 + 1772 + /* walk trie in reverse order */ 1773 + for (;;) { 1774 + unsigned char slen = 0; 1775 + struct key_vector *n; 1776 + 1777 + if (!(cindex--)) { 1778 + t_key pkey = pn->key; 1779 + 1780 + /* cannot resize the trie vector */ 1781 + if (IS_TRIE(pn)) 1782 + break; 1783 + 1784 + /* resize completed node */ 1785 + pn = resize(t, pn); 1786 + cindex = get_index(pkey, pn); 1787 + 1788 + continue; 1789 + } 1790 + 1791 + /* grab the next available node */ 1792 + n = get_child(pn, cindex); 1793 + if (!n) 1794 + continue; 1795 + 1796 + if (IS_TNODE(n)) { 1797 + /* record pn and cindex for leaf walking */ 1798 + pn = n; 1799 + cindex = 1ul << n->bits; 1800 + 1801 + continue; 1802 + } 1803 + 1804 + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { 1805 + /* if alias was cloned to local then we just 1806 + * need to remove the local copy from main 1807 + */ 1808 + if (tb->tb_id != fa->tb_id) { 1809 + hlist_del_rcu(&fa->fa_list); 1810 + alias_free_mem_rcu(fa); 1811 + continue; 1812 + } 1813 + 1814 + /* record local slen */ 1815 + slen = fa->fa_slen; 1816 + } 1817 + 1818 + /* update leaf slen */ 1819 + n->slen = slen; 1820 + 1821 + if (hlist_empty(&n->leaf)) { 1822 + put_child_root(pn, n->key, NULL); 1823 + node_free(n); 1824 + } 1825 + } 1763 1826 } 1764 1827 1765 1828 /* Caller must hold RTNL. */
+36 -14
net/ipv4/igmp.c
··· 162 162 } 163 163 164 164 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); 165 - static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); 165 + static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im); 166 166 static void igmpv3_clear_delrec(struct in_device *in_dev); 167 167 static int sf_setstate(struct ip_mc_list *pmc); 168 168 static void sf_markstate(struct ip_mc_list *pmc); ··· 1130 1130 spin_unlock_bh(&in_dev->mc_tomb_lock); 1131 1131 } 1132 1132 1133 - static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) 1133 + /* 1134 + * restore ip_mc_list deleted records 1135 + */ 1136 + static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) 1134 1137 { 1135 1138 struct ip_mc_list *pmc, *pmc_prev; 1136 - struct ip_sf_list *psf, *psf_next; 1139 + struct ip_sf_list *psf; 1140 + struct net *net = dev_net(in_dev->dev); 1141 + __be32 multiaddr = im->multiaddr; 1137 1142 1138 1143 spin_lock_bh(&in_dev->mc_tomb_lock); 1139 1144 pmc_prev = NULL; ··· 1154 1149 in_dev->mc_tomb = pmc->next; 1155 1150 } 1156 1151 spin_unlock_bh(&in_dev->mc_tomb_lock); 1152 + 1153 + spin_lock_bh(&im->lock); 1157 1154 if (pmc) { 1158 - for (psf = pmc->tomb; psf; psf = psf_next) { 1159 - psf_next = psf->sf_next; 1160 - kfree(psf); 1155 + im->interface = pmc->interface; 1156 + im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; 1157 + im->sfmode = pmc->sfmode; 1158 + if (pmc->sfmode == MCAST_INCLUDE) { 1159 + im->tomb = pmc->tomb; 1160 + im->sources = pmc->sources; 1161 + for (psf = im->sources; psf; psf = psf->sf_next) 1162 + psf->sf_crcount = im->crcount; 1161 1163 } 1162 1164 in_dev_put(pmc->interface); 1163 - kfree(pmc); 1164 1165 } 1166 + spin_unlock_bh(&im->lock); 1165 1167 } 1166 1168 1169 + /* 1170 + * flush ip_mc_list deleted records 1171 + */ 1167 1172 static void igmpv3_clear_delrec(struct in_device *in_dev) 1168 1173 { 1169 1174 struct ip_mc_list *pmc, *nextpmc; ··· 1381 1366 ip_mc_hash_add(in_dev, im); 1382 1367 1383 1368 #ifdef CONFIG_IP_MULTICAST 1384 - igmpv3_del_delrec(in_dev, im->multiaddr); 1369 + igmpv3_del_delrec(in_dev, im); 1385 1370 #endif 1386 1371 igmp_group_added(im); 1387 1372 if (!in_dev->dead) ··· 1641 1626 1642 1627 ASSERT_RTNL(); 1643 1628 1644 - for_each_pmc_rtnl(in_dev, pmc) 1629 + for_each_pmc_rtnl(in_dev, pmc) { 1630 + #ifdef CONFIG_IP_MULTICAST 1631 + igmpv3_del_delrec(in_dev, pmc); 1632 + #endif 1645 1633 igmp_group_added(pmc); 1634 + } 1646 1635 } 1647 1636 1648 1637 /* Device going down */ ··· 1667 1648 in_dev->mr_gq_running = 0; 1668 1649 if (del_timer(&in_dev->mr_gq_timer)) 1669 1650 __in_dev_put(in_dev); 1670 - igmpv3_clear_delrec(in_dev); 1671 1651 #endif 1672 1652 1673 1653 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); ··· 1706 1688 #endif 1707 1689 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1708 1690 1709 - for_each_pmc_rtnl(in_dev, pmc) 1691 + for_each_pmc_rtnl(in_dev, pmc) { 1692 + #ifdef CONFIG_IP_MULTICAST 1693 + igmpv3_del_delrec(in_dev, pmc); 1694 + #endif 1710 1695 igmp_group_added(pmc); 1696 + } 1711 1697 } 1712 1698 1713 1699 /* ··· 1726 1704 1727 1705 /* Deactivate timers */ 1728 1706 ip_mc_down(in_dev); 1707 + #ifdef CONFIG_IP_MULTICAST 1708 + igmpv3_clear_delrec(in_dev); 1709 + #endif 1729 1710 1730 1711 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { 1731 1712 in_dev->mc_list = i->next_rcu; 1732 1713 in_dev->mc_count--; 1733 - 1734 - /* We've dropped the groups in ip_mc_down already */ 1735 - ip_mc_clear_src(i); 1736 1714 ip_ma_put(i); 1737 1715 } 1738 1716 }
+3 -1
net/ipv4/tcp_cong.c
··· 201 201 icsk->icsk_ca_ops = ca; 202 202 icsk->icsk_ca_setsockopt = 1; 203 203 204 - if (sk->sk_state != TCP_CLOSE) 204 + if (sk->sk_state != TCP_CLOSE) { 205 + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 205 206 tcp_init_congestion_control(sk); 207 + } 206 208 } 207 209 208 210 /* Manage refcounts on socket close. */
+3 -3
net/ipv4/udp.c
··· 1743 1743 1744 1744 if (use_hash2) { 1745 1745 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 1746 - udp_table.mask; 1747 - hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; 1746 + udptable->mask; 1747 + hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; 1748 1748 start_lookup: 1749 - hslot = &udp_table.hash2[hash2]; 1749 + hslot = &udptable->hash2[hash2]; 1750 1750 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 1751 1751 } 1752 1752
+11 -2
net/ipv6/ip6_tunnel.c
··· 1034 1034 int mtu; 1035 1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1036 1036 unsigned int max_headroom = psh_hlen; 1037 + bool use_cache = false; 1037 1038 u8 hop_limit; 1038 1039 int err = -1; 1039 1040 ··· 1067 1066 1068 1067 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1069 1068 neigh_release(neigh); 1070 - } else if (!fl6->flowi6_mark) 1069 + } else if (!(t->parms.flags & 1070 + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1071 + /* enable the cache only only if the routing decision does 1072 + * not depend on the current inner header value 1073 + */ 1074 + use_cache = true; 1075 + } 1076 + 1077 + if (use_cache) 1071 1078 dst = dst_cache_get(&t->dst_cache); 1072 1079 1073 1080 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) ··· 1159 1150 if (t->encap.type != TUNNEL_ENCAP_NONE) 1160 1151 goto tx_err_dst_release; 1161 1152 } else { 1162 - if (!fl6->flowi6_mark && ndst) 1153 + if (use_cache && ndst) 1163 1154 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1164 1155 } 1165 1156 skb_dst_set(skb, dst);
+3 -3
net/ipv6/udp.c
··· 691 691 692 692 if (use_hash2) { 693 693 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & 694 - udp_table.mask; 695 - hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; 694 + udptable->mask; 695 + hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask; 696 696 start_lookup: 697 - hslot = &udp_table.hash2[hash2]; 697 + hslot = &udptable->hash2[hash2]; 698 698 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 699 699 } 700 700
+1 -1
net/l2tp/l2tp_eth.c
··· 97 97 unsigned int len = skb->len; 98 98 int ret = l2tp_xmit_skb(session, skb, session->hdr_len); 99 99 100 - if (likely(ret == NET_XMIT_SUCCESS)) { 100 + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 101 101 atomic_long_add(len, &priv->tx_bytes); 102 102 atomic_long_inc(&priv->tx_packets); 103 103 } else {
+3 -2
net/l2tp/l2tp_ip.c
··· 251 251 int ret; 252 252 int chk_addr_ret; 253 253 254 - if (!sock_flag(sk, SOCK_ZAPPED)) 255 - return -EINVAL; 256 254 if (addr_len < sizeof(struct sockaddr_l2tpip)) 257 255 return -EINVAL; 258 256 if (addr->l2tp_family != AF_INET) ··· 265 267 read_unlock_bh(&l2tp_ip_lock); 266 268 267 269 lock_sock(sk); 270 + if (!sock_flag(sk, SOCK_ZAPPED)) 271 + goto out; 272 + 268 273 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) 269 274 goto out; 270 275
+3 -2
net/l2tp/l2tp_ip6.c
··· 269 269 int addr_type; 270 270 int err; 271 271 272 - if (!sock_flag(sk, SOCK_ZAPPED)) 273 - return -EINVAL; 274 272 if (addr->l2tp_family != AF_INET6) 275 273 return -EINVAL; 276 274 if (addr_len < sizeof(*addr)) ··· 294 296 lock_sock(sk); 295 297 296 298 err = -EINVAL; 299 + if (!sock_flag(sk, SOCK_ZAPPED)) 300 + goto out_unlock; 301 + 297 302 if (sk->sk_state != TCP_CLOSE) 298 303 goto out_unlock; 299 304
+1 -1
net/mac80211/sta_info.c
··· 688 688 } 689 689 690 690 /* No need to do anything if the driver does all */ 691 - if (!local->ops->set_tim) 691 + if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) 692 692 return; 693 693 694 694 if (sta->dead)
+10 -4
net/mac80211/tx.c
··· 1500 1500 struct sta_info *sta, 1501 1501 struct sk_buff *skb) 1502 1502 { 1503 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1504 1503 struct fq *fq = &local->fq; 1505 1504 struct ieee80211_vif *vif; 1506 1505 struct txq_info *txqi; ··· 1523 1524 1524 1525 if (!txqi) 1525 1526 return false; 1526 - 1527 - info->control.vif = vif; 1528 1527 1529 1528 spin_lock_bh(&fq->lock); 1530 1529 ieee80211_txq_enqueue(local, txqi, skb); ··· 3235 3238 3236 3239 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { 3237 3240 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 3238 - *ieee80211_get_qos_ctl(hdr) = tid; 3239 3241 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); 3240 3242 } else { 3241 3243 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; ··· 3359 3363 (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); 3360 3364 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; 3361 3365 3366 + if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { 3367 + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 3368 + *ieee80211_get_qos_ctl(hdr) = tid; 3369 + } 3370 + 3362 3371 __skb_queue_head_init(&tx.skbs); 3363 3372 3364 3373 tx.flags = IEEE80211_TX_UNICAST; ··· 3451 3450 ieee80211_free_txskb(&local->hw, skb); 3452 3451 goto begin; 3453 3452 } 3453 + 3454 + if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) 3455 + info->flags |= IEEE80211_TX_CTL_AMPDU; 3456 + else 3457 + info->flags &= ~IEEE80211_TX_CTL_AMPDU; 3454 3458 3455 3459 if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { 3456 3460 struct sta_info *sta = container_of(txq->sta, struct sta_info,
+16
net/mac80211/vht.c
··· 270 270 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); 271 271 } 272 272 273 + /* 274 + * This is a workaround for VHT-enabled STAs which break the spec 275 + * and have the VHT-MCS Rx map filled in with value 3 for all eight 276 + * spacial streams, an example is AR9462. 277 + * 278 + * As per spec, in section 22.1.1 Introduction to the VHT PHY 279 + * A VHT STA shall support at least single spactial stream VHT-MCSs 280 + * 0 to 7 (transmit and receive) in all supported channel widths. 281 + */ 282 + if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) { 283 + vht_cap->vht_supported = false; 284 + sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n", 285 + sta->addr); 286 + return; 287 + } 288 + 273 289 /* finally set up the bandwidth */ 274 290 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 275 291 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+3 -2
net/sched/cls_api.c
··· 112 112 113 113 for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL; 114 114 it_chain = &tp->next) 115 - tfilter_notify(net, oskb, n, tp, 0, event, false); 115 + tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false); 116 116 } 117 117 118 118 /* Select new prio value from the range, managed by kernel. */ ··· 430 430 if (!skb) 431 431 return -ENOBUFS; 432 432 433 - if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { 433 + if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 434 + n->nlmsg_flags, event) <= 0) { 434 435 kfree_skb(skb); 435 436 return -EINVAL; 436 437 }
+15
net/socket.c
··· 341 341 .get = sockfs_xattr_get, 342 342 }; 343 343 344 + static int sockfs_security_xattr_set(const struct xattr_handler *handler, 345 + struct dentry *dentry, struct inode *inode, 346 + const char *suffix, const void *value, 347 + size_t size, int flags) 348 + { 349 + /* Handled by LSM. */ 350 + return -EAGAIN; 351 + } 352 + 353 + static const struct xattr_handler sockfs_security_xattr_handler = { 354 + .prefix = XATTR_SECURITY_PREFIX, 355 + .set = sockfs_security_xattr_set, 356 + }; 357 + 344 358 static const struct xattr_handler *sockfs_xattr_handlers[] = { 345 359 &sockfs_xattr_handler, 360 + &sockfs_security_xattr_handler, 346 361 NULL 347 362 }; 348 363
+1 -10
net/sunrpc/svc_xprt.c
··· 1002 1002 void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) 1003 1003 { 1004 1004 struct svc_xprt *xprt; 1005 - struct svc_sock *svsk; 1006 - struct socket *sock; 1007 1005 struct list_head *le, *next; 1008 1006 LIST_HEAD(to_be_closed); 1009 - struct linger no_linger = { 1010 - .l_onoff = 1, 1011 - .l_linger = 0, 1012 - }; 1013 1007 1014 1008 spin_lock_bh(&serv->sv_lock); 1015 1009 list_for_each_safe(le, next, &serv->sv_tempsocks) { ··· 1021 1027 list_del_init(le); 1022 1028 xprt = list_entry(le, struct svc_xprt, xpt_list); 1023 1029 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); 1024 - svsk = container_of(xprt, struct svc_sock, sk_xprt); 1025 - sock = svsk->sk_sock; 1026 - kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, 1027 - (char *)&no_linger, sizeof(no_linger)); 1030 + xprt->xpt_ops->xpo_kill_temp_xprt(xprt); 1028 1031 svc_close_xprt(xprt); 1029 1032 } 1030 1033 }
+21
net/sunrpc/svcsock.c
··· 451 451 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 452 452 } 453 453 454 + static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt) 455 + { 456 + struct svc_sock *svsk; 457 + struct socket *sock; 458 + struct linger no_linger = { 459 + .l_onoff = 1, 460 + .l_linger = 0, 461 + }; 462 + 463 + svsk = container_of(xprt, struct svc_sock, sk_xprt); 464 + sock = svsk->sk_sock; 465 + kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, 466 + (char *)&no_linger, sizeof(no_linger)); 467 + } 468 + 454 469 /* 455 470 * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo 456 471 */ ··· 675 660 return NULL; 676 661 } 677 662 663 + static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt) 664 + { 665 + } 666 + 678 667 static struct svc_xprt *svc_udp_create(struct svc_serv *serv, 679 668 struct net *net, 680 669 struct sockaddr *sa, int salen, ··· 698 679 .xpo_has_wspace = svc_udp_has_wspace, 699 680 .xpo_accept = svc_udp_accept, 700 681 .xpo_secure_port = svc_sock_secure_port, 682 + .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt, 701 683 }; 702 684 703 685 static struct svc_xprt_class svc_udp_class = { ··· 1274 1254 .xpo_has_wspace = svc_tcp_has_wspace, 1275 1255 .xpo_accept = svc_tcp_accept, 1276 1256 .xpo_secure_port = svc_sock_secure_port, 1257 + .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt, 1277 1258 }; 1278 1259 1279 1260 static struct svc_xprt_class svc_tcp_class = {
+6
net/sunrpc/xprtrdma/svc_rdma_transport.c
··· 67 67 static void svc_rdma_free(struct svc_xprt *xprt); 68 68 static int svc_rdma_has_wspace(struct svc_xprt *xprt); 69 69 static int svc_rdma_secure_port(struct svc_rqst *); 70 + static void svc_rdma_kill_temp_xprt(struct svc_xprt *); 70 71 71 72 static struct svc_xprt_ops svc_rdma_ops = { 72 73 .xpo_create = svc_rdma_create, ··· 80 79 .xpo_has_wspace = svc_rdma_has_wspace, 81 80 .xpo_accept = svc_rdma_accept, 82 81 .xpo_secure_port = svc_rdma_secure_port, 82 + .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt, 83 83 }; 84 84 85 85 struct svc_xprt_class svc_rdma_class = { ··· 1317 1315 static int svc_rdma_secure_port(struct svc_rqst *rqstp) 1318 1316 { 1319 1317 return 1; 1318 + } 1319 + 1320 + static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt) 1321 + { 1320 1322 } 1321 1323 1322 1324 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+1 -47
net/tipc/socket.c
··· 1 1 /* 2 2 * net/tipc/socket.c: TIPC socket API 3 3 * 4 - * Copyright (c) 2001-2007, 2012-2015, Ericsson AB 4 + * Copyright (c) 2001-2007, 2012-2016, Ericsson AB 5 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 6 * All rights reserved. 7 7 * ··· 127 127 static const struct proto_ops stream_ops; 128 128 static const struct proto_ops msg_ops; 129 129 static struct proto tipc_proto; 130 - 131 130 static const struct rhashtable_params tsk_rht_params; 132 - 133 - /* 134 - * Revised TIPC socket locking policy: 135 - * 136 - * Most socket operations take the standard socket lock when they start 137 - * and hold it until they finish (or until they need to sleep). Acquiring 138 - * this lock grants the owner exclusive access to the fields of the socket 139 - * data structures, with the exception of the backlog queue. A few socket 140 - * operations can be done without taking the socket lock because they only 141 - * read socket information that never changes during the life of the socket. 142 - * 143 - * Socket operations may acquire the lock for the associated TIPC port if they 144 - * need to perform an operation on the port. If any routine needs to acquire 145 - * both the socket lock and the port lock it must take the socket lock first 146 - * to avoid the risk of deadlock. 147 - * 148 - * The dispatcher handling incoming messages cannot grab the socket lock in 149 - * the standard fashion, since invoked it runs at the BH level and cannot block. 150 - * Instead, it checks to see if the socket lock is currently owned by someone, 151 - * and either handles the message itself or adds it to the socket's backlog 152 - * queue; in the latter case the queued message is processed once the process 153 - * owning the socket lock releases it. 154 - * 155 - * NOTE: Releasing the socket lock while an operation is sleeping overcomes 156 - * the problem of a blocked socket operation preventing any other operations 157 - * from occurring. However, applications must be careful if they have 158 - * multiple threads trying to send (or receive) on the same socket, as these 159 - * operations might interfere with each other. For example, doing a connect 160 - * and a receive at the same time might allow the receive to consume the 161 - * ACK message meant for the connect. While additional work could be done 162 - * to try and overcome this, it doesn't seem to be worthwhile at the present. 163 - * 164 - * NOTE: Releasing the socket lock while an operation is sleeping also ensures 165 - * that another operation that must be performed in a non-blocking manner is 166 - * not delayed for very long because the lock has already been taken. 167 - * 168 - * NOTE: This code assumes that certain fields of a port/socket pair are 169 - * constant over its lifetime; such fields can be examined without taking 170 - * the socket lock and/or port lock, and do not need to be re-read even 171 - * after resuming processing after waiting. These fields include: 172 - * - socket type 173 - * - pointer to socket sk structure (aka tipc_sock structure) 174 - * - pointer to port structure 175 - * - port reference 176 - */ 177 131 178 132 static u32 tsk_own_node(struct tipc_sock *tsk) 179 133 {
+11 -6
net/unix/af_unix.c
··· 2199 2199 * Sleep until more data has arrived. But check for races.. 2200 2200 */ 2201 2201 static long unix_stream_data_wait(struct sock *sk, long timeo, 2202 - struct sk_buff *last, unsigned int last_len) 2202 + struct sk_buff *last, unsigned int last_len, 2203 + bool freezable) 2203 2204 { 2204 2205 struct sk_buff *tail; 2205 2206 DEFINE_WAIT(wait); ··· 2221 2220 2222 2221 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2223 2222 unix_state_unlock(sk); 2224 - timeo = freezable_schedule_timeout(timeo); 2223 + if (freezable) 2224 + timeo = freezable_schedule_timeout(timeo); 2225 + else 2226 + timeo = schedule_timeout(timeo); 2225 2227 unix_state_lock(sk); 2226 2228 2227 2229 if (sock_flag(sk, SOCK_DEAD)) ··· 2254 2250 unsigned int splice_flags; 2255 2251 }; 2256 2252 2257 - static int unix_stream_read_generic(struct unix_stream_read_state *state) 2253 + static int unix_stream_read_generic(struct unix_stream_read_state *state, 2254 + bool freezable) 2258 2255 { 2259 2256 struct scm_cookie scm; 2260 2257 struct socket *sock = state->socket; ··· 2335 2330 mutex_unlock(&u->iolock); 2336 2331 2337 2332 timeo = unix_stream_data_wait(sk, timeo, last, 2338 - last_len); 2333 + last_len, freezable); 2339 2334 2340 2335 if (signal_pending(current)) { 2341 2336 err = sock_intr_errno(timeo); ··· 2477 2472 .flags = flags 2478 2473 }; 2479 2474 2480 - return unix_stream_read_generic(&state); 2475 + return unix_stream_read_generic(&state, true); 2481 2476 } 2482 2477 2483 2478 static int unix_stream_splice_actor(struct sk_buff *skb, ··· 2508 2503 flags & SPLICE_F_NONBLOCK) 2509 2504 state.flags = MSG_DONTWAIT; 2510 2505 2511 - return unix_stream_read_generic(&state); 2506 + return unix_stream_read_generic(&state, false); 2512 2507 } 2513 2508 2514 2509 static int unix_shutdown(struct socket *sock, int mode)
+1
net/wireless/core.h
··· 71 71 struct list_head bss_list; 72 72 struct rb_root bss_tree; 73 73 u32 bss_generation; 74 + u32 bss_entries; 74 75 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 75 76 struct sk_buff *scan_msg; 76 77 struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+69
net/wireless/scan.c
··· 57 57 * also linked into the probe response struct. 58 58 */ 59 59 60 + /* 61 + * Limit the number of BSS entries stored in mac80211. Each one is 62 + * a bit over 4k at most, so this limits to roughly 4-5M of memory. 63 + * If somebody wants to really attack this though, they'd likely 64 + * use small beacons, and only one type of frame, limiting each of 65 + * the entries to a much smaller size (in order to generate more 66 + * entries in total, so overhead is bigger.) 67 + */ 68 + static int bss_entries_limit = 1000; 69 + module_param(bss_entries_limit, int, 0644); 70 + MODULE_PARM_DESC(bss_entries_limit, 71 + "limit to number of scan BSS entries (per wiphy, default 1000)"); 72 + 60 73 #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) 61 74 62 75 static void bss_free(struct cfg80211_internal_bss *bss) ··· 150 137 151 138 list_del_init(&bss->list); 152 139 rb_erase(&bss->rbn, &rdev->bss_tree); 140 + rdev->bss_entries--; 141 + WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), 142 + "rdev bss entries[%d]/list[empty:%d] corruption\n", 143 + rdev->bss_entries, list_empty(&rdev->bss_list)); 153 144 bss_ref_put(rdev, bss); 154 145 return true; 155 146 } ··· 178 161 179 162 if (expired) 180 163 rdev->bss_generation++; 164 + } 165 + 166 + static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) 167 + { 168 + struct cfg80211_internal_bss *bss, *oldest = NULL; 169 + bool ret; 170 + 171 + lockdep_assert_held(&rdev->bss_lock); 172 + 173 + list_for_each_entry(bss, &rdev->bss_list, list) { 174 + if (atomic_read(&bss->hold)) 175 + continue; 176 + 177 + if (!list_empty(&bss->hidden_list) && 178 + !bss->pub.hidden_beacon_bss) 179 + continue; 180 + 181 + if (oldest && time_before(oldest->ts, bss->ts)) 182 + continue; 183 + oldest = bss; 184 + } 185 + 186 + if (WARN_ON(!oldest)) 187 + return false; 188 + 189 + /* 190 + * The callers make sure to increase rdev->bss_generation if anything 191 + * gets removed (and a new entry added), so there's no need to also do 192 + * it here. 193 + */ 194 + 195 + ret = __cfg80211_unlink_bss(rdev, oldest); 196 + WARN_ON(!ret); 197 + return ret; 181 198 } 182 199 183 200 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, ··· 740 689 const u8 *ie; 741 690 int i, ssidlen; 742 691 u8 fold = 0; 692 + u32 n_entries = 0; 743 693 744 694 ies = rcu_access_pointer(new->pub.beacon_ies); 745 695 if (WARN_ON(!ies)) ··· 764 712 /* This is the bad part ... */ 765 713 766 714 list_for_each_entry(bss, &rdev->bss_list, list) { 715 + /* 716 + * we're iterating all the entries anyway, so take the 717 + * opportunity to validate the list length accounting 718 + */ 719 + n_entries++; 720 + 767 721 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) 768 722 continue; 769 723 if (bss->pub.channel != new->pub.channel) ··· 797 739 rcu_assign_pointer(bss->pub.beacon_ies, 798 740 new->pub.beacon_ies); 799 741 } 742 + 743 + WARN_ONCE(n_entries != rdev->bss_entries, 744 + "rdev bss entries[%d]/list[len:%d] corruption\n", 745 + rdev->bss_entries, n_entries); 800 746 801 747 return true; 802 748 } ··· 956 894 } 957 895 } 958 896 897 + if (rdev->bss_entries >= bss_entries_limit && 898 + !cfg80211_bss_expire_oldest(rdev)) { 899 + kfree(new); 900 + goto drop; 901 + } 902 + 959 903 list_add_tail(&new->list, &rdev->bss_list); 904 + rdev->bss_entries++; 960 905 rb_insert_bss(rdev, new); 961 906 found = new; 962 907 }
+2 -1
net/wireless/util.c
··· 1159 1159 58500000, 1160 1160 65000000, 1161 1161 78000000, 1162 - 0, 1162 + /* not in the spec, but some devices use this: */ 1163 + 86500000, 1163 1164 }, 1164 1165 { 13500000, 1165 1166 27000000,
+75 -6
scripts/Makefile.build
··· 159 159 $(obj)/%.i: $(src)/%.c FORCE 160 160 $(call if_changed_dep,cpp_i_c) 161 161 162 - cmd_gensymtypes = \ 162 + # These mirror gensymtypes_S and co below, keep them in synch. 163 + cmd_gensymtypes_c = \ 163 164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ 164 165 $(GENKSYMS) $(if $(1), -T $(2)) \ 165 166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ ··· 170 169 quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ 171 170 cmd_cc_symtypes_c = \ 172 171 set -e; \ 173 - $(call cmd_gensymtypes,true,$@) >/dev/null; \ 172 + $(call cmd_gensymtypes_c,true,$@) >/dev/null; \ 174 173 test -s $@ || rm -f $@ 175 174 176 175 $(obj)/%.symtypes : $(src)/%.c FORCE ··· 199 198 # the actual value of the checksum generated by genksyms 200 199 201 200 cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< 202 - cmd_modversions = \ 201 + 202 + cmd_modversions_c = \ 203 203 if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ 204 - $(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ 204 + $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ 205 205 > $(@D)/.tmp_$(@F:.o=.ver); \ 206 206 \ 207 207 $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ ··· 270 268 define rule_cc_o_c 271 269 $(call echo-cmd,checksrc) $(cmd_checksrc) \ 272 270 $(call cmd_and_fixdep,cc_o_c) \ 273 - $(cmd_modversions) \ 271 + $(cmd_modversions_c) \ 274 272 $(cmd_objtool) \ 275 273 $(call echo-cmd,record_mcount) $(cmd_record_mcount) 276 274 endef 277 275 278 276 define rule_as_o_S 279 277 $(call cmd_and_fixdep,as_o_S) \ 278 + $(cmd_modversions_S) \ 280 279 $(cmd_objtool) 281 280 endef 282 281 ··· 317 314 $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) 318 315 $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) 319 316 317 + # .S file exports must have their C prototypes defined in asm/asm-prototypes.h 318 + # or a file that it includes, in order to get versioned symbols. We build a 319 + # dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from 320 + # the .S file (with trailing ';'), and run genksyms on that, to extract vers. 321 + # 322 + # This is convoluted. The .S file must first be preprocessed to run guards and 323 + # expand names, then the resulting exports must be constructed into plain 324 + # EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed 325 + # to make the genksyms input. 326 + # 327 + # These mirror gensymtypes_c and co above, keep them in synch. 328 + cmd_gensymtypes_S = \ 329 + (echo "\#include <linux/kernel.h>" ; \ 330 + echo "\#include <asm/asm-prototypes.h>" ; \ 331 + $(CPP) $(a_flags) $< | \ 332 + grep "\<___EXPORT_SYMBOL\>" | \ 333 + sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \ 334 + $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ 335 + $(GENKSYMS) $(if $(1), -T $(2)) \ 336 + $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 337 + $(if $(KBUILD_PRESERVE),-p) \ 338 + -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 339 + 340 + quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@ 341 + cmd_cc_symtypes_S = \ 342 + set -e; \ 343 + $(call cmd_gensymtypes_S,true,$@) >/dev/null; \ 344 + test -s $@ || rm -f $@ 345 + 346 + $(obj)/%.symtypes : $(src)/%.S FORCE 347 + $(call cmd,cc_symtypes_S) 348 + 349 + 320 350 quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@ 321 351 cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $< 322 352 ··· 357 321 $(call if_changed_dep,cpp_s_S) 358 322 359 323 quiet_cmd_as_o_S = AS $(quiet_modtag) $@ 360 - cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< 324 + 325 + ifndef CONFIG_MODVERSIONS 326 + cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< 327 + 328 + else 329 + 330 + ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h) 331 + 332 + ifeq ($(ASM_PROTOTYPES),) 333 + cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< 334 + 335 + else 336 + 337 + # versioning matches the C process described above, with difference that 338 + # we parse asm-prototypes.h C header to get function definitions. 339 + 340 + cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $< 341 + 342 + cmd_modversions_S = \ 343 + if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ 344 + $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ 345 + > $(@D)/.tmp_$(@F:.o=.ver); \ 346 + \ 347 + $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ 348 + -T $(@D)/.tmp_$(@F:.o=.ver); \ 349 + rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \ 350 + else \ 351 + mv -f $(@D)/.tmp_$(@F) $@; \ 352 + fi; 353 + endif 354 + endif 361 355 362 356 $(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE 363 357 $(call if_changed_rule,as_o_S) ··· 496 430 497 431 $(obj)/lib-ksyms.o: $(lib-target) FORCE 498 432 $(call if_changed,export_list) 433 + 434 + targets += $(obj)/lib-ksyms.o 435 + 499 436 endif 500 437 501 438 #
+1 -1
scripts/gcc-x86_64-has-stack-protector.sh
··· 1 1 #!/bin/sh 2 2 3 - echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" 3 + echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" 4 4 if [ "$?" -eq "0" ] ; then 5 5 echo y 6 6 else
+4 -2
security/apparmor/domain.c
··· 621 621 /* released below */ 622 622 cred = get_current_cred(); 623 623 cxt = cred_cxt(cred); 624 - profile = aa_cred_profile(cred); 625 - previous_profile = cxt->previous; 624 + profile = aa_get_newest_profile(aa_cred_profile(cred)); 625 + previous_profile = aa_get_newest_profile(cxt->previous); 626 626 627 627 if (unconfined(profile)) { 628 628 info = "unconfined"; ··· 718 718 out: 719 719 aa_put_profile(hat); 720 720 kfree(name); 721 + aa_put_profile(profile); 722 + aa_put_profile(previous_profile); 721 723 put_cred(cred); 722 724 723 725 return error;
-2
sound/pci/hda/patch_realtek.c
··· 6907 6907 .v.pins = (const struct hda_pintbl[]) { 6908 6908 { 0x15, 0x40f000f0 }, /* disabled */ 6909 6909 { 0x16, 0x40f000f0 }, /* disabled */ 6910 - { 0x18, 0x01014011 }, /* LO */ 6911 - { 0x1a, 0x01014012 }, /* LO */ 6912 6910 { } 6913 6911 } 6914 6912 },
+2 -1
sound/pci/hda/thinkpad_helper.c
··· 13 13 static bool is_thinkpad(struct hda_codec *codec) 14 14 { 15 15 return (codec->core.subsystem_id >> 16 == 0x17aa) && 16 - (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068")); 16 + (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") || 17 + acpi_dev_found("IBM0068")); 17 18 } 18 19 19 20 static void update_tpacpi_mute_led(void *private_data, int enabled)
+2 -1
sound/usb/card.c
··· 315 315 snd_usb_endpoint_free(ep); 316 316 317 317 mutex_destroy(&chip->mutex); 318 - dev_set_drvdata(&chip->dev->dev, NULL); 318 + if (!atomic_read(&chip->shutdown)) 319 + dev_set_drvdata(&chip->dev->dev, NULL); 319 320 kfree(chip); 320 321 return 0; 321 322 }
+12 -11
tools/power/acpi/Makefile.config
··· 8 8 # as published by the Free Software Foundation; version 2 9 9 # of the License. 10 10 11 - include ../../../../scripts/Makefile.include 11 + ifeq ($(srctree),) 12 + srctree := $(patsubst %/,%,$(dir $(shell pwd))) 13 + srctree := $(patsubst %/,%,$(dir $(srctree))) 14 + #$(info Determined 'srctree' to be $(srctree)) 15 + endif 12 16 13 - OUTPUT=./ 17 + include $(srctree)/../../scripts/Makefile.include 18 + 19 + OUTPUT=$(srctree)/ 14 20 ifeq ("$(origin O)", "command line") 15 - OUTPUT := $(O)/ 21 + OUTPUT := $(O)/power/acpi/ 16 22 endif 17 - 18 - ifneq ($(OUTPUT),) 19 - # check that the output directory actually exists 20 - OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) 21 - $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 22 - endif 23 + #$(info Determined 'OUTPUT' to be $(OUTPUT)) 23 24 24 25 # --- CONFIGURATION BEGIN --- 25 26 ··· 71 70 WARNINGS += $(call cc-supports,-Wstrict-prototypes) 72 71 WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) 73 72 74 - KERNEL_INCLUDE := ../../../include 75 - ACPICA_INCLUDE := ../../../drivers/acpi/acpica 73 + KERNEL_INCLUDE := $(OUTPUT)include 74 + ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica 76 75 CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) 77 76 CFLAGS += $(WARNINGS) 78 77
+27 -13
tools/power/acpi/Makefile.rules
··· 8 8 # as published by the Free Software Foundation; version 2 9 9 # of the License. 10 10 11 - $(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE 12 - $(ECHO) " LD " $@ 13 - $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@ 11 + objdir := $(OUTPUT)tools/$(TOOL)/ 12 + toolobjs := $(addprefix $(objdir),$(TOOL_OBJS)) 13 + $(OUTPUT)$(TOOL): $(toolobjs) FORCE 14 + $(ECHO) " LD " $(subst $(OUTPUT),,$@) 15 + $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(toolobjs) -L$(OUTPUT) -o $@ 16 + $(ECHO) " STRIP " $(subst $(OUTPUT),,$@) 14 17 $(QUIET) $(STRIPCMD) $@ 15 18 16 - $(OUTPUT)%.o: %.c 17 - $(ECHO) " CC " $@ 19 + $(KERNEL_INCLUDE): 20 + $(ECHO) " MKDIR " $(subst $(OUTPUT),,$@) 21 + $(QUIET) mkdir -p $(KERNEL_INCLUDE) 22 + $(ECHO) " CP " $(subst $(OUTPUT),,$@) 23 + $(QUIET) cp -rf $(srctree)/../../../include/acpi $(KERNEL_INCLUDE)/ 24 + 25 + $(objdir)%.o: %.c $(KERNEL_INCLUDE) 26 + $(ECHO) " CC " $(subst $(OUTPUT),,$@) 18 27 $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< 19 28 20 29 all: $(OUTPUT)$(TOOL) 21 30 clean: 22 - -find $(OUTPUT) \( -not -type d \) \ 23 - -and \( -name '*~' -o -name '*.[oas]' \) \ 24 - -type f -print \ 25 - | xargs rm -f 26 - -rm -f $(OUTPUT)$(TOOL) 31 + $(ECHO) " RMOBJ " $(subst $(OUTPUT),,$(objdir)) 32 + $(QUIET) find $(objdir) \( -not -type d \)\ 33 + -and \( -name '*~' -o -name '*.[oas]' \)\ 34 + -type f -print | xargs rm -f 35 + $(ECHO) " RM " $(TOOL) 36 + $(QUIET) rm -f $(OUTPUT)$(TOOL) 37 + $(ECHO) " RMINC " $(subst $(OUTPUT),,$(KERNEL_INCLUDE)) 38 + $(QUIET) rm -rf $(KERNEL_INCLUDE) 27 39 28 40 install-tools: 29 - $(INSTALL) -d $(DESTDIR)${sbindir} 30 - $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir} 41 + $(ECHO) " INST " $(TOOL) 42 + $(QUIET) $(INSTALL) -d $(DESTDIR)$(sbindir) 43 + $(QUIET) $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)$(sbindir) 31 44 uninstall-tools: 32 - - rm -f $(DESTDIR)${sbindir}/$(TOOL) 45 + $(ECHO) " UNINST " $(TOOL) 46 + $(QUIET) rm -f $(DESTDIR)$(sbindir)/$(TOOL) 33 47 34 48 install: all install-tools $(EXTRA_INSTALL) 35 49 uninstall: uninstall-tools $(EXTRA_UNINSTALL)
+1 -3
tools/power/acpi/tools/acpidbg/Makefile
··· 17 17 ../../os_specific/service_layers\ 18 18 . 19 19 CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\ 20 - -I.\ 21 - -I../../../../../drivers/acpi/acpica\ 22 - -I../../../../../include 20 + -I. 23 21 LDFLAGS += -lpthread 24 22 TOOL_OBJS = \ 25 23 acpidbg.o
+7 -1
tools/power/acpi/tools/acpidbg/acpidbg.c
··· 12 12 #include <acpi/acpi.h> 13 13 14 14 /* Headers not included by include/acpi/platform/aclinux.h */ 15 + #include <unistd.h> 16 + #include <stdio.h> 17 + #include <stdlib.h> 18 + #include <string.h> 19 + #include <error.h> 15 20 #include <stdbool.h> 16 21 #include <fcntl.h> 17 22 #include <assert.h> 18 - #include <linux/circ_buf.h> 23 + #include <sys/select.h> 24 + #include "../../../../../include/linux/circ_buf.h" 19 25 20 26 #define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg" 21 27 #define ACPI_AML_SEC_TICK 1
+6 -6
tools/power/acpi/tools/acpidump/Makefile
··· 19 19 ./\ 20 20 ../../common\ 21 21 ../../os_specific/service_layers 22 - CFLAGS += -DACPI_DUMP_APP -I.\ 23 - -I../../../../../drivers/acpi/acpica\ 24 - -I../../../../../include 22 + CFLAGS += -DACPI_DUMP_APP -I. 25 23 TOOL_OBJS = \ 26 24 apdump.o\ 27 25 apfiles.o\ ··· 47 49 48 50 include ../../Makefile.rules 49 51 50 - install-man: ../../man/acpidump.8 51 - $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8 52 + install-man: $(srctree)/man/acpidump.8 53 + $(ECHO) " INST " acpidump.8 54 + $(QUIET) $(INSTALL_DATA) -D $< $(DESTDIR)$(mandir)/man8/acpidump.8 52 55 uninstall-man: 53 - - rm -f $(DESTDIR)${mandir}/man8/acpidump.8 56 + $(ECHO) " UNINST " acpidump.8 57 + $(QUIET) rm -f $(DESTDIR)$(mandir)/man8/acpidump.8
+5 -3
virt/kvm/arm/pmu.c
··· 305 305 continue; 306 306 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) 307 307 & ARMV8_PMU_EVTYPE_EVENT; 308 - if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) 308 + if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR) 309 309 && (enable & BIT(i))) { 310 310 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; 311 311 reg = lower_32_bits(reg); ··· 379 379 eventsel = data & ARMV8_PMU_EVTYPE_EVENT; 380 380 381 381 /* Software increment event does't need to be backed by a perf event */ 382 - if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) 382 + if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR && 383 + select_idx != ARMV8_PMU_CYCLE_IDX) 383 384 return; 384 385 385 386 memset(&attr, 0, sizeof(struct perf_event_attr)); ··· 392 391 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; 393 392 attr.exclude_hv = 1; /* Don't count EL2 events */ 394 393 attr.exclude_host = 1; /* Don't count host events */ 395 - attr.config = eventsel; 394 + attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ? 395 + ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel; 396 396 397 397 counter = kvm_pmu_get_counter_value(vcpu, select_idx); 398 398 /* The initial sample period (overflow count) of an event. */
+12 -1
virt/kvm/async_pf.c
··· 91 91 92 92 spin_lock(&vcpu->async_pf.lock); 93 93 list_add_tail(&apf->link, &vcpu->async_pf.done); 94 + apf->vcpu = NULL; 94 95 spin_unlock(&vcpu->async_pf.lock); 95 96 96 97 /* ··· 114 113 115 114 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) 116 115 { 116 + spin_lock(&vcpu->async_pf.lock); 117 + 117 118 /* cancel outstanding work queue item */ 118 119 while (!list_empty(&vcpu->async_pf.queue)) { 119 120 struct kvm_async_pf *work = ··· 123 120 typeof(*work), queue); 124 121 list_del(&work->queue); 125 122 123 + /* 124 + * We know it's present in vcpu->async_pf.done, do 125 + * nothing here. 126 + */ 127 + if (!work->vcpu) 128 + continue; 129 + 130 + spin_unlock(&vcpu->async_pf.lock); 126 131 #ifdef CONFIG_KVM_ASYNC_PF_SYNC 127 132 flush_work(&work->work); 128 133 #else ··· 140 129 kmem_cache_free(async_pf_cache, work); 141 130 } 142 131 #endif 132 + spin_lock(&vcpu->async_pf.lock); 143 133 } 144 134 145 - spin_lock(&vcpu->async_pf.lock); 146 135 while (!list_empty(&vcpu->async_pf.done)) { 147 136 struct kvm_async_pf *work = 148 137 list_first_entry(&vcpu->async_pf.done,