Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+1951 -968
+5 -2
Documentation/devicetree/bindings/net/mediatek-net.txt
··· 9 9 Required properties: 10 10 - compatible: Should be "mediatek,mt7623-eth" 11 11 - reg: Address and length of the register set for the device 12 - - interrupts: Should contain the frame engines interrupt 12 + - interrupts: Should contain the three frame engines interrupts in numeric 13 + order. These are fe_int0, fe_int1 and fe_int2. 13 14 - clocks: the clock used by the core 14 15 - clock-names: the names of the clock listed in the clocks property. These are 15 16 "ethif", "esw", "gp2", "gp1" ··· 43 42 <&ethsys CLK_ETHSYS_GP2>, 44 43 <&ethsys CLK_ETHSYS_GP1>; 45 44 clock-names = "ethif", "esw", "gp2", "gp1"; 46 - interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>; 45 + interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW 46 + GIC_SPI 199 IRQ_TYPE_LEVEL_LOW 47 + GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>; 47 48 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; 48 49 resets = <&ethsys MT2701_ETHSYS_ETH_RST>; 49 50 reset-names = "eth";
+11 -7
Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
··· 8 8 of memory mapped region. 9 9 - clock-names: from common clock binding: 10 10 Required elements: "24m" 11 - - rockchip,grf: phandle to the syscon managing the "general register files" 12 11 - #phy-cells : from the generic PHY bindings, must be 0; 13 12 14 13 Example: 15 14 16 - edp_phy: edp-phy { 17 - compatible = "rockchip,rk3288-dp-phy"; 18 - rockchip,grf = <&grf>; 19 - clocks = <&cru SCLK_EDP_24M>; 20 - clock-names = "24m"; 21 - #phy-cells = <0>; 15 + grf: syscon@ff770000 { 16 + compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd"; 17 + 18 + ... 19 + 20 + edp_phy: edp-phy { 21 + compatible = "rockchip,rk3288-dp-phy"; 22 + clocks = <&cru SCLK_EDP_24M>; 23 + clock-names = "24m"; 24 + #phy-cells = <0>; 25 + }; 22 26 };
+14 -8
Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
··· 3 3 4 4 Required properties: 5 5 - compatible: rockchip,rk3399-emmc-phy 6 - - rockchip,grf : phandle to the syscon managing the "general 7 - register files" 8 6 - #phy-cells: must be 0 9 - - reg: PHY configure reg address offset in "general 7 + - reg: PHY register address offset and length in "general 10 8 register files" 11 9 12 10 Example: 13 11 14 - emmcphy: phy { 15 - compatible = "rockchip,rk3399-emmc-phy"; 16 - rockchip,grf = <&grf>; 17 - reg = <0xf780>; 18 - #phy-cells = <0>; 12 + 13 + grf: syscon@ff770000 { 14 + compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd"; 15 + #address-cells = <1>; 16 + #size-cells = <1>; 17 + 18 + ... 19 + 20 + emmcphy: phy@f780 { 21 + compatible = "rockchip,rk3399-emmc-phy"; 22 + reg = <0xf780 0x20>; 23 + #phy-cells = <0>; 24 + }; 19 25 };
+4 -3
Documentation/devicetree/bindings/rtc/s3c-rtc.txt
··· 15 15 is the rtc tick interrupt. The number of cells representing a interrupt 16 16 depends on the parent interrupt controller. 17 17 - clocks: Must contain a list of phandle and clock specifier for the rtc 18 - and source clocks. 19 - - clock-names: Must contain "rtc" and "rtc_src" entries sorted in the 20 - same order as the clocks property. 18 + clock and in the case of a s3c6410 compatible controller, also 19 + a source clock. 20 + - clock-names: Must contain "rtc" and for a s3c6410 compatible controller, 21 + a "rtc_src" sorted in the same order as the clocks property. 21 22 22 23 Example: 23 24
+4
Documentation/input/event-codes.txt
··· 173 173 proximity of the device and while the value of the BTN_TOUCH code is 0. If 174 174 the input device may be used freely in three dimensions, consider ABS_Z 175 175 instead. 176 + - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable 177 + proximity and set to 0 when the tool leaves detectable proximity. 178 + BTN_TOOL_<name> signals the type of tool that is currently detected by the 179 + hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH. 176 180 177 181 * ABS_MT_<name>: 178 182 - Used to describe multitouch input events. Please see
+3 -3
Documentation/x86/x86_64/mm.txt
··· 19 19 ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space 20 20 ... unused hole ... 21 21 ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 22 - ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space 22 + ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space 23 23 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls 24 24 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole 25 25 ··· 31 31 the processes using the page fault handler, with init_level4_pgt as 32 32 reference. 33 33 34 - Current X86-64 implementations only support 40 bits of address space, 35 - but we support up to 46 bits. This expands into MBZ space in the page tables. 34 + Current X86-64 implementations support up to 46 bits of address space (64 TB), 35 + which is our current limit. This expands into MBZ space in the page tables. 36 36 37 37 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual 38 38 memory window (this size is arbitrary, it can be raised later if needed).
+3 -2
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = -rc5 5 5 NAME = Blurry Fish Butt 6 6 7 7 # *DOCUMENTATION* ··· 1008 1008 prepare: prepare0 prepare-objtool 1009 1009 1010 1010 ifdef CONFIG_STACK_VALIDATION 1011 - has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0) 1011 + has_libelf := $(call try-run,\ 1012 + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) 1012 1013 ifeq ($(has_libelf),1) 1013 1014 objtool_target := tools/objtool FORCE 1014 1015 else
+1 -1
arch/arm/include/asm/cputype.h
··· 276 276 int feature = (features >> field) & 15; 277 277 278 278 /* feature registers are signed values */ 279 - if (feature > 8) 279 + if (feature > 7) 280 280 feature -= 16; 281 281 282 282 return feature;
+1 -1
arch/arm/kernel/setup.c
··· 512 512 */ 513 513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || 514 514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && 515 - cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3)) 515 + cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3)) 516 516 elf_hwcap &= ~HWCAP_SWP; 517 517 } 518 518
+2 -1
arch/arm/mm/dma-mapping.c
··· 762 762 if (!mask) 763 763 return NULL; 764 764 765 - buf = kzalloc(sizeof(*buf), gfp); 765 + buf = kzalloc(sizeof(*buf), 766 + gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 766 767 if (!buf) 767 768 return NULL; 768 769
+12 -1
arch/arm64/kernel/head.S
··· 588 588 msr vpidr_el2, x0 589 589 msr vmpidr_el2, x1 590 590 591 + /* 592 + * When VHE is not in use, early init of EL2 and EL1 needs to be 593 + * done here. 594 + * When VHE _is_ in use, EL1 will not be used in the host and 595 + * requires no configuration, and all non-hyp-specific EL2 setup 596 + * will be done via the _EL1 system register aliases in __cpu_setup. 597 + */ 598 + cbnz x2, 1f 599 + 591 600 /* sctlr_el1 */ 592 601 mov x0, #0x0800 // Set/clear RES{1,0} bits 593 602 CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems ··· 606 597 /* Coprocessor traps. */ 607 598 mov x0, #0x33ff 608 599 msr cptr_el2, x0 // Disable copro. traps to EL2 600 + 1: 609 601 610 602 #ifdef CONFIG_COMPAT 611 603 msr hstr_el2, xzr // Disable CP15 traps to EL2 ··· 744 734 745 735 .macro update_early_cpu_boot_status status, tmp1, tmp2 746 736 mov \tmp2, #\status 747 - str_l \tmp2, __early_cpu_boot_status, \tmp1 737 + adr_l \tmp1, __early_cpu_boot_status 738 + str \tmp2, [\tmp1] 748 739 dmb sy 749 740 dc ivac, \tmp1 // Invalidate potentially stale cache line 750 741 .endm
+6 -5
arch/arm64/kernel/smp_spin_table.c
··· 52 52 static int smp_spin_table_cpu_init(unsigned int cpu) 53 53 { 54 54 struct device_node *dn; 55 + int ret; 55 56 56 57 dn = of_get_cpu_node(cpu, NULL); 57 58 if (!dn) ··· 61 60 /* 62 61 * Determine the address from which the CPU is polling. 63 62 */ 64 - if (of_property_read_u64(dn, "cpu-release-addr", 65 - &cpu_release_addr[cpu])) { 63 + ret = of_property_read_u64(dn, "cpu-release-addr", 64 + &cpu_release_addr[cpu]); 65 + if (ret) 66 66 pr_err("CPU %d: missing or invalid cpu-release-addr property\n", 67 67 cpu); 68 68 69 - return -1; 70 - } 69 + of_node_put(dn); 71 70 72 - return 0; 71 + return ret; 73 72 } 74 73 75 74 static int smp_spin_table_cpu_prepare(unsigned int cpu)
+1
arch/powerpc/include/uapi/asm/cputable.h
··· 31 31 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ 32 32 0x00000040 33 33 34 + /* Reserved - do not use 0x00000004 */ 34 35 #define PPC_FEATURE_TRUE_LE 0x00000002 35 36 #define PPC_FEATURE_PPC_LE 0x00000001 36 37
+15 -11
arch/powerpc/kernel/prom.c
··· 148 148 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 149 149 unsigned long mmu_features; /* MMU_FTR_xxx bit */ 150 150 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 151 + unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */ 151 152 unsigned char pabyte; /* byte number in ibm,pa-features */ 152 153 unsigned char pabit; /* bit number (big-endian) */ 153 154 unsigned char invert; /* if 1, pa bit set => clear feature */ 154 155 } ibm_pa_features[] __initdata = { 155 - {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 156 - {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 157 - {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, 158 - {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, 159 - {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, 160 - {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 161 - {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 156 + {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0}, 157 + {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0}, 158 + {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0}, 159 + {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0}, 160 + {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1}, 161 + {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0}, 162 + {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0}, 162 163 /* 163 - * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), 164 - * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP 165 - * which is 0 if the kernel doesn't support TM. 164 + * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n), 165 + * we don't want to turn on TM here, so we use the *_COMP versions 166 + * which are 0 if the kernel doesn't support TM. 166 167 */ 167 - {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, 168 + {CPU_FTR_TM_COMP, 0, 0, 169 + PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, 168 170 }; 169 171 170 172 static void __init scan_features(unsigned long node, const unsigned char *ftrs, ··· 197 195 if (bit ^ fp->invert) { 198 196 cur_cpu_spec->cpu_features |= fp->cpu_features; 199 197 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 198 + cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2; 200 199 cur_cpu_spec->mmu_features |= fp->mmu_features; 201 200 } else { 202 201 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 203 202 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 203 + cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2; 204 204 cur_cpu_spec->mmu_features &= ~fp->mmu_features; 205 205 } 206 206 }
+3
arch/s390/Kconfig
··· 4 4 config ZONE_DMA 5 5 def_bool y 6 6 7 + config CPU_BIG_ENDIAN 8 + def_bool y 9 + 7 10 config LOCKDEP_SUPPORT 8 11 def_bool y 9 12
+2 -1
arch/s390/include/asm/pci.h
··· 44 44 u64 rpcit_ops; 45 45 u64 dma_rbytes; 46 46 u64 dma_wbytes; 47 - } __packed __aligned(64); 47 + u64 pad[2]; 48 + } __packed __aligned(128); 48 49 49 50 enum zpci_state { 50 51 ZPCI_FN_STATE_RESERVED,
+2
arch/s390/include/asm/seccomp.h
··· 13 13 #define __NR_seccomp_exit_32 __NR_exit 14 14 #define __NR_seccomp_sigreturn_32 __NR_sigreturn 15 15 16 + #include <asm-generic/seccomp.h> 17 + 16 18 #endif /* _ASM_S390_SECCOMP_H */
+1
arch/s390/lib/spinlock.c
··· 105 105 if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 106 106 return; 107 107 local_irq_restore(flags); 108 + continue; 108 109 } 109 110 /* Check if the lock owner is running. */ 110 111 if (first_diag && cpu_is_preempted(~owner)) {
+2 -2
arch/x86/crypto/sha-mb/sha1_mb.c
··· 453 453 454 454 req = cast_mcryptd_ctx_to_req(req_ctx); 455 455 if (irqs_disabled()) 456 - rctx->complete(&req->base, ret); 456 + req_ctx->complete(&req->base, ret); 457 457 else { 458 458 local_bh_disable(); 459 - rctx->complete(&req->base, ret); 459 + req_ctx->complete(&req->base, ret); 460 460 local_bh_enable(); 461 461 } 462 462 }
+1
arch/x86/include/asm/hugetlb.h
··· 4 4 #include <asm/page.h> 5 5 #include <asm-generic/hugetlb.h> 6 6 7 + #define hugepages_supported() cpu_has_pse 7 8 8 9 static inline int is_hugepage_only_range(struct mm_struct *mm, 9 10 unsigned long addr,
+12
arch/x86/kernel/cpu/mshyperv.c
··· 152 152 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 153 153 }; 154 154 155 + static unsigned char hv_get_nmi_reason(void) 156 + { 157 + return 0; 158 + } 159 + 155 160 static void __init ms_hyperv_init_platform(void) 156 161 { 157 162 /* ··· 196 191 machine_ops.crash_shutdown = hv_machine_crash_shutdown; 197 192 #endif 198 193 mark_tsc_unstable("running on Hyper-V"); 194 + 195 + /* 196 + * Generation 2 instances don't support reading the NMI status from 197 + * 0x61 port. 198 + */ 199 + if (efi_enabled(EFI_BOOT)) 200 + x86_platform.get_nmi_reason = hv_get_nmi_reason; 199 201 } 200 202 201 203 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+6 -6
crypto/rsa-pkcs1pad.c
··· 387 387 req_ctx->child_req.src = req->src; 388 388 req_ctx->child_req.src_len = req->src_len; 389 389 req_ctx->child_req.dst = req_ctx->out_sg; 390 - req_ctx->child_req.dst_len = ctx->key_size - 1; 390 + req_ctx->child_req.dst_len = ctx->key_size ; 391 391 392 - req_ctx->out_buf = kmalloc(ctx->key_size - 1, 392 + req_ctx->out_buf = kmalloc(ctx->key_size, 393 393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 394 394 GFP_KERNEL : GFP_ATOMIC); 395 395 if (!req_ctx->out_buf) 396 396 return -ENOMEM; 397 397 398 398 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 399 - ctx->key_size - 1, NULL); 399 + ctx->key_size, NULL); 400 400 401 401 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 402 402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, ··· 595 595 req_ctx->child_req.src = req->src; 596 596 req_ctx->child_req.src_len = req->src_len; 597 597 req_ctx->child_req.dst = req_ctx->out_sg; 598 - req_ctx->child_req.dst_len = ctx->key_size - 1; 598 + req_ctx->child_req.dst_len = ctx->key_size; 599 599 600 - req_ctx->out_buf = kmalloc(ctx->key_size - 1, 600 + req_ctx->out_buf = kmalloc(ctx->key_size, 601 601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 602 602 GFP_KERNEL : GFP_ATOMIC); 603 603 if (!req_ctx->out_buf) 604 604 return -ENOMEM; 605 605 606 606 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 607 - ctx->key_size - 1, NULL); 607 + ctx->key_size, NULL); 608 608 609 609 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 610 610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+4 -13
drivers/bcma/main.c
··· 136 136 return false; 137 137 } 138 138 139 - #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) 140 139 static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 141 140 struct bcma_device *core) 142 141 { ··· 183 184 struct of_phandle_args out_irq; 184 185 int ret; 185 186 186 - if (!parent || !parent->dev.of_node) 187 + if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node) 187 188 return 0; 188 189 189 190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); ··· 201 202 { 202 203 struct device_node *node; 203 204 205 + if (!IS_ENABLED(CONFIG_OF_IRQ)) 206 + return; 207 + 204 208 node = bcma_of_find_child_device(parent, core); 205 209 if (node) 206 210 core->dev.of_node = node; 207 211 208 212 core->irq = bcma_of_get_irq(parent, core, 0); 209 213 } 210 - #else 211 - static void bcma_of_fill_device(struct platform_device *parent, 212 - struct bcma_device *core) 213 - { 214 - } 215 - static inline unsigned int bcma_of_get_irq(struct platform_device *parent, 216 - struct bcma_device *core, int num) 217 - { 218 - return 0; 219 - } 220 - #endif /* CONFIG_OF */ 221 214 222 215 unsigned int bcma_core_irq(struct bcma_device *core, int num) 223 216 {
+1 -1
drivers/clocksource/tango_xtal.c
··· 42 42 43 43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350, 44 44 32, clocksource_mmio_readl_up); 45 - if (!ret) { 45 + if (ret) { 46 46 pr_err("%s: registration failed\n", np->full_name); 47 47 return; 48 48 }
+3
drivers/cpufreq/cpufreq.c
··· 1491 1491 { 1492 1492 unsigned int new_freq; 1493 1493 1494 + if (cpufreq_suspended) 1495 + return 0; 1496 + 1494 1497 new_freq = cpufreq_driver->get(policy->cpu); 1495 1498 if (!new_freq) 1496 1499 return 0;
+4
drivers/cpufreq/intel_pstate.c
··· 1130 1130 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1131 1131 int_tofp(duration_ns)); 1132 1132 core_busy = mul_fp(core_busy, sample_ratio); 1133 + } else { 1134 + sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1135 + if (sample_ratio < int_tofp(1)) 1136 + core_busy = 0; 1133 1137 } 1134 1138 1135 1139 cpu->sample.busy_scaled = core_busy;
+3
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
··· 225 225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); 226 226 struct ccp_aes_cmac_exp_ctx state; 227 227 228 + /* Don't let anything leak to 'out' */ 229 + memset(&state, 0, sizeof(state)); 230 + 228 231 state.null_msg = rctx->null_msg; 229 232 memcpy(state.iv, rctx->iv, sizeof(state.iv)); 230 233 state.buf_count = rctx->buf_count;
+3
drivers/crypto/ccp/ccp-crypto-sha.c
··· 212 212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 213 213 struct ccp_sha_exp_ctx state; 214 214 215 + /* Don't let anything leak to 'out' */ 216 + memset(&state, 0, sizeof(state)); 217 + 215 218 state.type = rctx->type; 216 219 state.msg_bits = rctx->msg_bits; 217 220 state.first = rctx->first;
+26 -4
drivers/edac/sb_edac.c
··· 362 362 363 363 /* Memory type detection */ 364 364 bool is_mirrored, is_lockstep, is_close_pg; 365 + bool is_chan_hash; 365 366 366 367 /* Fifo double buffers */ 367 368 struct mce mce_entry[MCE_LOG_LEN]; ··· 1061 1060 return (pkg >> 2) & 0x1; 1062 1061 } 1063 1062 1063 + static int haswell_chan_hash(int idx, u64 addr) 1064 + { 1065 + int i; 1066 + 1067 + /* 1068 + * XOR even bits from 12:26 to bit0 of idx, 1069 + * odd bits from 13:27 to bit1 1070 + */ 1071 + for (i = 12; i < 28; i += 2) 1072 + idx ^= (addr >> i) & 3; 1073 + 1074 + return idx; 1075 + } 1076 + 1064 1077 /**************************************************************************** 1065 1078 Memory check routines 1066 1079 ****************************************************************************/ ··· 1631 1616 KNL_MAX_CHANNELS : NUM_CHANNELS; 1632 1617 u64 knl_mc_sizes[KNL_MAX_CHANNELS]; 1633 1618 1619 + if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { 1620 + pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg); 1621 + pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21); 1622 + } 1634 1623 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || 1635 1624 pvt->info.type == KNIGHTS_LANDING) 1636 1625 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg); ··· 2137 2118 } 2138 2119 2139 2120 ch_way = TAD_CH(reg) + 1; 2140 - sck_way = 1 << TAD_SOCK(reg); 2121 + sck_way = TAD_SOCK(reg); 2141 2122 2142 2123 if (ch_way == 3) 2143 2124 idx = addr >> 6; 2144 - else 2125 + else { 2145 2126 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 2127 + if (pvt->is_chan_hash) 2128 + idx = haswell_chan_hash(idx, addr); 2129 + } 2146 2130 idx = idx % ch_way; 2147 2131 2148 2132 /* ··· 2179 2157 switch(ch_way) { 2180 2158 case 2: 2181 2159 case 4: 2182 - sck_xch = 1 << sck_way * (ch_way >> 1); 2160 + sck_xch = (1 << sck_way) * (ch_way >> 1); 2183 2161 break; 2184 2162 default: 2185 2163 sprintf(msg, "Invalid mirror set. Can't decode addr"); ··· 2215 2193 2216 2194 ch_addr = addr - offset; 2217 2195 ch_addr >>= (6 + shiftup); 2218 - ch_addr /= ch_way * sck_way; 2196 + ch_addr /= sck_xch; 2219 2197 ch_addr <<= (6 + shiftup); 2220 2198 ch_addr |= addr & ((1 << (6 + shiftup)) - 1); 2221 2199
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1591 1591 struct amdgpu_bo *vcpu_bo; 1592 1592 void *cpu_addr; 1593 1593 uint64_t gpu_addr; 1594 + unsigned fw_version; 1594 1595 void *saved_bo; 1595 1596 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1596 1597 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 425 425 struct acp_pm_domain *apd; 426 426 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 427 427 428 + /* return early if no ACP */ 429 + if (!adev->acp.acp_genpd) 430 + return 0; 431 + 428 432 /* SMU block will power on ACP irrespective of ACP runtime status. 429 433 * Power off explicitly based on genpd ACP runtime status so that ACP 430 434 * hw and ACP-genpd status are in sync.
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 303 303 fw_info.feature = adev->vce.fb_version; 304 304 break; 305 305 case AMDGPU_INFO_FW_UVD: 306 - fw_info.ver = 0; 306 + fw_info.ver = adev->uvd.fw_version; 307 307 fw_info.feature = 0; 308 308 break; 309 309 case AMDGPU_INFO_FW_GMC:
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 53 53 54 54 #define AMDGPU_MAX_HPD_PINS 6 55 55 #define AMDGPU_MAX_CRTCS 6 56 - #define AMDGPU_MAX_AFMT_BLOCKS 7 56 + #define AMDGPU_MAX_AFMT_BLOCKS 9 57 57 58 58 enum amdgpu_rmx_type { 59 59 RMX_OFF, ··· 309 309 struct atom_context *atom_context; 310 310 struct card_info *atom_card_info; 311 311 bool mode_config_initialized; 312 - struct amdgpu_crtc *crtcs[6]; 313 - struct amdgpu_afmt *afmt[7]; 312 + struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; 313 + struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; 314 314 /* DVI-I properties */ 315 315 struct drm_property *coherent_mode_property; 316 316 /* DAC enable load detect */
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 223 223 { 224 224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); 225 225 226 + if (amdgpu_ttm_tt_get_usermm(bo->ttm)) 227 + return -EPERM; 226 228 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 227 229 } 228 230
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 158 158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 159 159 version_major, version_minor, family_id); 160 160 161 + adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | 162 + (family_id << 8)); 163 + 161 164 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) 162 165 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; 163 166 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, ··· 257 254 258 255 if (i == AMDGPU_MAX_UVD_HANDLES) 259 256 return 0; 257 + 258 + cancel_delayed_work_sync(&adev->uvd.idle_work); 260 259 261 260 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 262 261 ptr = adev->uvd.cpu_addr;
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 234 234 if (i == AMDGPU_MAX_VCE_HANDLES) 235 235 return 0; 236 236 237 + cancel_delayed_work_sync(&adev->vce.idle_work); 237 238 /* TODO: suspending running encoding sessions isn't supported */ 238 239 return -EINVAL; 239 240 }
+8 -1
drivers/gpu/drm/drm_dp_mst_topology.c
··· 1672 1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1673 1673 int i; 1674 1674 1675 + port = drm_dp_get_validated_port_ref(mgr, port); 1676 + if (!port) 1677 + return -EINVAL; 1678 + 1675 1679 port_num = port->port_num; 1676 1680 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1677 1681 if (!mstb) { 1678 1682 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1679 1683 1680 - if (!mstb) 1684 + if (!mstb) { 1685 + drm_dp_put_port(port); 1681 1686 return -EINVAL; 1687 + } 1682 1688 } 1683 1689 1684 1690 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); ··· 1713 1707 kfree(txmsg); 1714 1708 fail_put: 1715 1709 drm_dp_put_mst_branch_device(mstb); 1710 + drm_dp_put_port(port); 1716 1711 return ret; 1717 1712 } 1718 1713
+3 -2
drivers/gpu/drm/i915/i915_drv.h
··· 2634 2634 2635 2635 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2636 2636 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2637 - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ 2638 - IS_SKL_REVID(dev, 0, SKL_REVID_F0))) 2637 + IS_SKL_GT3(dev) || \ 2638 + IS_SKL_GT4(dev)) 2639 + 2639 2640 /* 2640 2641 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2641 2642 * even when in MSI mode. This results in spurious interrupt warnings if the
+16 -11
drivers/gpu/drm/i915/i915_gem_userptr.c
··· 501 501 if (pvec != NULL) { 502 502 struct mm_struct *mm = obj->userptr.mm->mm; 503 503 504 - down_read(&mm->mmap_sem); 505 - while (pinned < npages) { 506 - ret = get_user_pages_remote(work->task, mm, 507 - obj->userptr.ptr + pinned * PAGE_SIZE, 508 - npages - pinned, 509 - !obj->userptr.read_only, 0, 510 - pvec + pinned, NULL); 511 - if (ret < 0) 512 - break; 504 + ret = -EFAULT; 505 + if (atomic_inc_not_zero(&mm->mm_users)) { 506 + down_read(&mm->mmap_sem); 507 + while (pinned < npages) { 508 + ret = get_user_pages_remote 509 + (work->task, mm, 510 + obj->userptr.ptr + pinned * PAGE_SIZE, 511 + npages - pinned, 512 + !obj->userptr.read_only, 0, 513 + pvec + pinned, NULL); 514 + if (ret < 0) 515 + break; 513 516 514 - pinned += ret; 517 + pinned += ret; 518 + } 519 + up_read(&mm->mmap_sem); 520 + mmput(mm); 515 521 } 516 - up_read(&mm->mmap_sem); 517 522 } 518 523 519 524 mutex_lock(&dev->struct_mutex);
+11 -5
drivers/gpu/drm/i915/intel_lrc.c
··· 841 841 if (unlikely(total_bytes > remain_usable)) { 842 842 /* 843 843 * The base request will fit but the reserved space 844 - * falls off the end. So only need to to wait for the 845 - * reserved size after flushing out the remainder. 844 + * falls off the end. So don't need an immediate wrap 845 + * and only need to effectively wait for the reserved 846 + * size space from the start of ringbuffer. 846 847 */ 847 848 wait_bytes = remain_actual + ringbuf->reserved_size; 848 - need_wrap = true; 849 849 } else if (total_bytes > ringbuf->space) { 850 850 /* No wrapping required, just waiting. */ 851 851 wait_bytes = total_bytes; ··· 1913 1913 struct intel_ringbuffer *ringbuf = request->ringbuf; 1914 1914 int ret; 1915 1915 1916 - ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); 1916 + ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); 1917 1917 if (ret) 1918 1918 return ret; 1919 + 1920 + /* We're using qword write, seqno should be aligned to 8 bytes. */ 1921 + BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); 1919 1922 1920 1923 /* w/a for post sync ops following a GPGPU operation we 1921 1924 * need a prior CS_STALL, which is emitted by the flush 1922 1925 * following the batch. 1923 1926 */ 1924 - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); 1927 + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1925 1928 intel_logical_ring_emit(ringbuf, 1926 1929 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1927 1930 PIPE_CONTROL_CS_STALL | ··· 1932 1929 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); 1933 1930 intel_logical_ring_emit(ringbuf, 0); 1934 1931 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1932 + /* We're thrashing one dword of HWS. */ 1933 + intel_logical_ring_emit(ringbuf, 0); 1935 1934 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1935 + intel_logical_ring_emit(ringbuf, MI_NOOP); 1936 1936 return intel_logical_ring_advance_and_submit(request); 1937 1937 } 1938 1938
+28 -14
drivers/gpu/drm/i915/intel_pm.c
··· 2876 2876 const struct drm_plane_state *pstate, 2877 2877 int y) 2878 2878 { 2879 - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2879 + struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 2880 2880 struct drm_framebuffer *fb = pstate->fb; 2881 + uint32_t width = 0, height = 0; 2882 + 2883 + width = drm_rect_width(&intel_pstate->src) >> 16; 2884 + height = drm_rect_height(&intel_pstate->src) >> 16; 2885 + 2886 + if (intel_rotation_90_or_270(pstate->rotation)) 2887 + swap(width, height); 2881 2888 2882 2889 /* for planar format */ 2883 2890 if (fb->pixel_format == DRM_FORMAT_NV12) { 2884 2891 if (y) /* y-plane data rate */ 2885 - return intel_crtc->config->pipe_src_w * 2886 - intel_crtc->config->pipe_src_h * 2892 + return width * height * 2887 2893 drm_format_plane_cpp(fb->pixel_format, 0); 2888 2894 else /* uv-plane data rate */ 2889 - return (intel_crtc->config->pipe_src_w/2) * 2890 - (intel_crtc->config->pipe_src_h/2) * 2895 + return (width / 2) * (height / 2) * 2891 2896 drm_format_plane_cpp(fb->pixel_format, 1); 2892 2897 } 2893 2898 2894 2899 /* for packed formats */ 2895 - return intel_crtc->config->pipe_src_w * 2896 - intel_crtc->config->pipe_src_h * 2897 - drm_format_plane_cpp(fb->pixel_format, 0); 2900 + return width * height * drm_format_plane_cpp(fb->pixel_format, 0); 2898 2901 } 2899 2902 2900 2903 /* ··· 2976 2973 struct drm_framebuffer *fb = plane->state->fb; 2977 2974 int id = skl_wm_plane_id(intel_plane); 2978 2975 2979 - if (fb == NULL) 2976 + if (!to_intel_plane_state(plane->state)->visible) 2980 2977 continue; 2978 + 2981 2979 if (plane->type == DRM_PLANE_TYPE_CURSOR) 2982 2980 continue; 2983 2981 ··· 3004 3000 uint16_t plane_blocks, y_plane_blocks = 0; 3005 3001 int id = skl_wm_plane_id(intel_plane); 3006 3002 3007 - if (pstate->fb == NULL) 3003 + if (!to_intel_plane_state(pstate)->visible) 3008 3004 continue; 3009 3005 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3010 3006 continue; ··· 3127 3123 { 3128 3124 struct drm_plane *plane = &intel_plane->base; 3129 3125 struct drm_framebuffer *fb = plane->state->fb; 3126 + struct intel_plane_state *intel_pstate = 3127 + to_intel_plane_state(plane->state); 3130 3128 uint32_t latency = dev_priv->wm.skl_latency[level]; 3131 3129 uint32_t method1, method2; 3132 3130 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3133 3131 uint32_t res_blocks, res_lines; 3134 3132 uint32_t selected_result; 3135 3133 uint8_t cpp; 3134 + uint32_t width = 0, height = 0; 3136 3135 3137 - if (latency == 0 || !cstate->base.active || !fb) 3136 + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) 3138 3137 return false; 3138 + 3139 + width = drm_rect_width(&intel_pstate->src) >> 16; 3140 + height = drm_rect_height(&intel_pstate->src) >> 16; 3141 + 3142 + if (intel_rotation_90_or_270(plane->state->rotation)) 3143 + swap(width, height); 3139 3144 3140 3145 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3141 3146 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3142 3147 cpp, latency); 3143 3148 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3144 3149 cstate->base.adjusted_mode.crtc_htotal, 3145 - cstate->pipe_src_w, 3146 - cpp, fb->modifier[0], 3150 + width, 3151 + cpp, 3152 + fb->modifier[0], 3147 3153 latency); 3148 3154 3149 - plane_bytes_per_line = cstate->pipe_src_w * cpp; 3155 + plane_bytes_per_line = width * cpp; 3150 3156 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3151 3157 3152 3158 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+11 -7
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 968 968 969 969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 970 970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 971 - if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || 971 + if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || 972 972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 973 973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 974 974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); ··· 1085 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1086 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1087 1087 1088 - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { 1088 + /* This is tied to WaForceContextSaveRestoreNonCoherent */ 1089 + if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { 1089 1090 /* 1090 1091 *Use Force Non-Coherent whenever executing a 3D context. This 1091 1092 * is a workaround for a possible hang in the unlikely event ··· 2091 2090 { 2092 2091 struct drm_i915_private *dev_priv = to_i915(dev); 2093 2092 struct drm_i915_gem_object *obj = ringbuf->obj; 2093 + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2094 + unsigned flags = PIN_OFFSET_BIAS | 4096; 2094 2095 int ret; 2095 2096 2096 2097 if (HAS_LLC(dev_priv) && !obj->stolen) { 2097 - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); 2098 + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); 2098 2099 if (ret) 2099 2100 return ret; 2100 2101 ··· 2112 2109 return -ENOMEM; 2113 2110 } 2114 2111 } else { 2115 - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2112 + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 2113 + flags | PIN_MAPPABLE); 2116 2114 if (ret) 2117 2115 return ret; 2118 2116 ··· 2458 2454 if (unlikely(total_bytes > remain_usable)) { 2459 2455 /* 2460 2456 * The base request will fit but the reserved space 2461 - * falls off the end. So only need to to wait for the 2462 - * reserved size after flushing out the remainder. 2457 + * falls off the end. So don't need an immediate wrap 2458 + * and only need to effectively wait for the reserved 2459 + * size space from the start of ringbuffer. 2463 2460 */ 2464 2461 wait_bytes = remain_actual + ringbuf->reserved_size; 2465 - need_wrap = true; 2466 2462 } else if (total_bytes > ringbuf->space) { 2467 2463 /* No wrapping required, just waiting. */ 2468 2464 wait_bytes = total_bytes;
+5 -1
drivers/gpu/drm/i915/intel_uncore.c
··· 1189 1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1190 1190 dev_priv->uncore.funcs.force_wake_get = 1191 1191 fw_domains_get_with_thread_status; 1192 - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1192 + if (IS_HASWELL(dev)) 1193 + dev_priv->uncore.funcs.force_wake_put = 1194 + fw_domains_put_with_fifo; 1195 + else 1196 + dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1193 1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1194 1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1195 1199 } else if (IS_IVYBRIDGE(dev)) {
+2 -2
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 1276 1276 break; 1277 1277 default: 1278 1278 if (disp->dithering_mode) { 1279 + nv_connector->dithering_mode = DITHERING_MODE_AUTO; 1279 1280 drm_object_attach_property(&connector->base, 1280 1281 disp->dithering_mode, 1281 1282 nv_connector-> 1282 1283 dithering_mode); 1283 - nv_connector->dithering_mode = DITHERING_MODE_AUTO; 1284 1284 } 1285 1285 if (disp->dithering_depth) { 1286 + nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; 1286 1287 drm_object_attach_property(&connector->base, 1287 1288 disp->dithering_depth, 1288 1289 nv_connector-> 1289 1290 dithering_depth); 1290 - nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; 1291 1291 } 1292 1292 break; 1293 1293 }
+2
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
··· 1832 1832 1833 1833 gf100_gr_mmio(gr, gr->func->mmio); 1834 1834 1835 + nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); 1836 + 1835 1837 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1836 1838 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 1837 1839 do {
+7 -4
drivers/gpu/drm/radeon/radeon_atpx_handler.c
··· 62 62 return radeon_atpx_priv.atpx_detected; 63 63 } 64 64 65 - bool radeon_has_atpx_dgpu_power_cntl(void) { 66 - return radeon_atpx_priv.atpx.functions.power_cntl; 67 - } 68 - 69 65 /** 70 66 * radeon_atpx_call - call an ATPX method 71 67 * ··· 141 145 */ 142 146 static int radeon_atpx_validate(struct radeon_atpx *atpx) 143 147 { 148 + /* make sure required functions are enabled */ 149 + /* dGPU power control is required */ 150 + if (atpx->functions.power_cntl == false) { 151 + printk("ATPX dGPU power cntl not present, forcing\n"); 152 + atpx->functions.power_cntl = true; 153 + } 154 + 144 155 if (atpx->functions.px_params) { 145 156 union acpi_object *info; 146 157 struct atpx_px_params output;
+6 -1
drivers/gpu/drm/radeon/radeon_connectors.c
··· 2002 2002 rdev->mode_info.dither_property, 2003 2003 RADEON_FMT_DITHER_DISABLE); 2004 2004 2005 - if (radeon_audio != 0) 2005 + if (radeon_audio != 0) { 2006 2006 drm_object_attach_property(&radeon_connector->base.base, 2007 2007 rdev->mode_info.audio_property, 2008 2008 RADEON_AUDIO_AUTO); 2009 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2010 + } 2009 2011 if (ASIC_IS_DCE5(rdev)) 2010 2012 drm_object_attach_property(&radeon_connector->base.base, 2011 2013 rdev->mode_info.output_csc_property, ··· 2132 2130 drm_object_attach_property(&radeon_connector->base.base, 2133 2131 rdev->mode_info.audio_property, 2134 2132 RADEON_AUDIO_AUTO); 2133 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2135 2134 } 2136 2135 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 2137 2136 radeon_connector->dac_load_detect = true; ··· 2188 2185 drm_object_attach_property(&radeon_connector->base.base, 2189 2186 rdev->mode_info.audio_property, 2190 2187 RADEON_AUDIO_AUTO); 2188 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2191 2189 } 2192 2190 if (ASIC_IS_DCE5(rdev)) 2193 2191 drm_object_attach_property(&radeon_connector->base.base, ··· 2241 2237 drm_object_attach_property(&radeon_connector->base.base, 2242 2238 rdev->mode_info.audio_property, 2243 2239 RADEON_AUDIO_AUTO); 2240 + radeon_connector->audio = RADEON_AUDIO_AUTO; 2244 2241 } 2245 2242 if (ASIC_IS_DCE5(rdev)) 2246 2243 drm_object_attach_property(&radeon_connector->base.base,
+4 -10
drivers/gpu/drm/radeon/radeon_device.c
··· 103 103 "LAST", 104 104 }; 105 105 106 - #if defined(CONFIG_VGA_SWITCHEROO) 107 - bool radeon_has_atpx_dgpu_power_cntl(void); 108 - #else 109 - static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } 110 - #endif 111 - 112 106 #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 113 107 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 114 108 ··· 1299 1305 } 1300 1306 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); 1301 1307 1302 - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1303 - radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1304 - pdev->subsystem_vendor, pdev->subsystem_device); 1308 + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1309 + radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1310 + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 1305 1311 1306 1312 /* mutex initialization are all done here so we 1307 1313 * can recall function without having locking issues */ ··· 1433 1439 * ignore it */ 1434 1440 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1435 1441 1436 - if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl()) 1442 + if (rdev->flags & RADEON_IS_PX) 1437 1443 runtime = true; 1438 1444 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 1439 1445 if (runtime)
+2
drivers/gpu/drm/radeon/radeon_ttm.c
··· 235 235 { 236 236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 237 237 238 + if (radeon_ttm_tt_has_userptr(bo->ttm)) 239 + return -EPERM; 238 240 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 239 241 } 240 242
+1
drivers/gpu/drm/radeon/si_dpm.c
··· 2931 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 2932 2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2933 2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, 2934 + { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, 2934 2935 { 0, 0, 0, 0 }, 2935 2936 }; 2936 2937
+2
drivers/input/joystick/xpad.c
··· 153 153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 154 154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 155 155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, 156 + { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, 156 157 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 157 158 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, 158 159 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, ··· 305 304 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ 306 305 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 307 306 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ 307 + XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ 308 308 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 309 309 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 310 310 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
-1
drivers/input/misc/arizona-haptics.c
··· 178 178 input_set_drvdata(haptics->input_dev, haptics); 179 179 180 180 haptics->input_dev->name = "arizona:haptics"; 181 - haptics->input_dev->dev.parent = pdev->dev.parent; 182 181 haptics->input_dev->close = arizona_haptics_close; 183 182 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit); 184 183
+4 -3
drivers/input/misc/pmic8xxx-pwrkey.c
··· 353 353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay)) 354 354 kpd_delay = 15625; 355 355 356 - if (kpd_delay > 62500 || kpd_delay == 0) { 356 + /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */ 357 + if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) { 357 358 dev_err(&pdev->dev, "invalid power key trigger delay\n"); 358 359 return -EINVAL; 359 360 } ··· 386 385 pwr->name = "pmic8xxx_pwrkey"; 387 386 pwr->phys = "pmic8xxx_pwrkey/input0"; 388 387 389 - delay = (kpd_delay << 10) / USEC_PER_SEC; 390 - delay = 1 + ilog2(delay); 388 + delay = (kpd_delay << 6) / USEC_PER_SEC; 389 + delay = ilog2(delay); 391 390 392 391 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl); 393 392 if (err < 0) {
-1
drivers/input/misc/twl4030-vibra.c
··· 222 222 223 223 info->input_dev->name = "twl4030:vibrator"; 224 224 info->input_dev->id.version = 1; 225 - info->input_dev->dev.parent = pdev->dev.parent; 226 225 info->input_dev->close = twl4030_vibra_close; 227 226 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 228 227
+1 -7
drivers/input/misc/twl6040-vibra.c
··· 45 45 struct vibra_info { 46 46 struct device *dev; 47 47 struct input_dev *input_dev; 48 - struct workqueue_struct *workqueue; 49 48 struct work_struct play_work; 50 49 struct mutex mutex; 51 50 int irq; ··· 212 213 info->strong_speed = effect->u.rumble.strong_magnitude; 213 214 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1; 214 215 215 - ret = queue_work(info->workqueue, &info->play_work); 216 - if (!ret) { 217 - dev_info(&input->dev, "work is already on queue\n"); 218 - return ret; 219 - } 216 + schedule_work(&info->play_work); 220 217 221 218 return 0; 222 219 } ··· 357 362 358 363 info->input_dev->name = "twl6040:vibrator"; 359 364 info->input_dev->id.version = 1; 360 - info->input_dev->dev.parent = pdev->dev.parent; 361 365 info->input_dev->close = twl6040_vibra_close; 362 366 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 363 367
+9 -1
drivers/input/tablet/gtco.c
··· 858 858 goto err_free_buf; 859 859 } 860 860 861 + /* Sanity check that a device has an endpoint */ 862 + if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { 863 + dev_err(&usbinterface->dev, 864 + "Invalid number of endpoints\n"); 865 + error = -EINVAL; 866 + goto err_free_urb; 867 + } 868 + 861 869 /* 862 870 * The endpoint is always altsetting 0, we know this since we know 863 871 * this device only has one interrupt endpoint ··· 887 879 * HID report descriptor 888 880 */ 889 881 if (usb_get_extra_descriptor(usbinterface->cur_altsetting, 890 - HID_DEVICE_TYPE, &hid_desc) != 0){ 882 + HID_DEVICE_TYPE, &hid_desc) != 0) { 891 883 dev_err(&usbinterface->dev, 892 884 "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); 893 885 error = -EIO;
+76 -11
drivers/iommu/amd_iommu.c
··· 92 92 struct list_head dev_data_list; /* For global dev_data_list */ 93 93 struct protection_domain *domain; /* Domain the device is bound to */ 94 94 u16 devid; /* PCI Device ID */ 95 + u16 alias; /* Alias Device ID */ 95 96 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 97 bool passthrough; /* Device is identity mapped */ 97 98 struct { ··· 167 166 return container_of(dom, struct protection_domain, domain); 168 167 } 169 168 169 + static inline u16 get_device_id(struct device *dev) 170 + { 171 + struct pci_dev *pdev = to_pci_dev(dev); 172 + 173 + return PCI_DEVID(pdev->bus->number, pdev->devfn); 174 + } 175 + 170 176 static struct iommu_dev_data *alloc_dev_data(u16 devid) 171 177 { 172 178 struct iommu_dev_data *dev_data; ··· 211 203 return dev_data; 212 204 } 213 205 206 + static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) 207 + { 208 + *(u16 *)data = alias; 209 + return 0; 210 + } 211 + 212 + static u16 get_alias(struct device *dev) 213 + { 214 + struct pci_dev *pdev = to_pci_dev(dev); 215 + u16 devid, ivrs_alias, pci_alias; 216 + 217 + devid = get_device_id(dev); 218 + ivrs_alias = amd_iommu_alias_table[devid]; 219 + pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); 220 + 221 + if (ivrs_alias == pci_alias) 222 + return ivrs_alias; 223 + 224 + /* 225 + * DMA alias showdown 226 + * 227 + * The IVRS is fairly reliable in telling us about aliases, but it 228 + * can't know about every screwy device. If we don't have an IVRS 229 + * reported alias, use the PCI reported alias. In that case we may 230 + * still need to initialize the rlookup and dev_table entries if the 231 + * alias is to a non-existent device. 232 + */ 233 + if (ivrs_alias == devid) { 234 + if (!amd_iommu_rlookup_table[pci_alias]) { 235 + amd_iommu_rlookup_table[pci_alias] = 236 + amd_iommu_rlookup_table[devid]; 237 + memcpy(amd_iommu_dev_table[pci_alias].data, 238 + amd_iommu_dev_table[devid].data, 239 + sizeof(amd_iommu_dev_table[pci_alias].data)); 240 + } 241 + 242 + return pci_alias; 243 + } 244 + 245 + pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d " 246 + "for device %s[%04x:%04x], kernel reported alias " 247 + "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), 248 + PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device, 249 + PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), 250 + PCI_FUNC(pci_alias)); 251 + 252 + /* 253 + * If we don't have a PCI DMA alias and the IVRS alias is on the same 254 + * bus, then the IVRS table may know about a quirk that we don't. 255 + */ 256 + if (pci_alias == devid && 257 + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { 258 + pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; 259 + pdev->dma_alias_devfn = ivrs_alias & 0xff; 260 + pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", 261 + PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), 262 + dev_name(dev)); 263 + } 264 + 265 + return ivrs_alias; 266 + } 267 + 214 268 static struct iommu_dev_data *find_dev_data(u16 devid) 215 269 { 216 270 struct iommu_dev_data *dev_data; ··· 283 213 dev_data = alloc_dev_data(devid); 284 214 285 215 return dev_data; 286 - } 287 - 288 - static inline u16 get_device_id(struct device *dev) 289 - { 290 - struct pci_dev *pdev = to_pci_dev(dev); 291 - 292 - return PCI_DEVID(pdev->bus->number, pdev->devfn); 293 216 } 294 217 295 218 static struct iommu_dev_data *get_dev_data(struct device *dev) ··· 412 349 if (!dev_data) 413 350 return -ENOMEM; 414 351 352 + dev_data->alias = get_alias(dev); 353 + 415 354 if (pci_iommuv2_capable(pdev)) { 416 355 struct amd_iommu *iommu; 417 356 ··· 434 369 u16 devid, alias; 435 370 436 371 devid = get_device_id(dev); 437 - alias = amd_iommu_alias_table[devid]; 372 + alias = get_alias(dev); 438 373 439 374 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); 440 375 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); ··· 1126 1061 int ret; 1127 1062 1128 1063 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1129 - alias = amd_iommu_alias_table[dev_data->devid]; 1064 + alias = dev_data->alias; 1130 1065 1131 1066 ret = iommu_flush_dte(iommu, dev_data->devid); 1132 1067 if (!ret && alias != dev_data->devid) ··· 2104 2039 bool ats; 2105 2040 2106 2041 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2107 - alias = amd_iommu_alias_table[dev_data->devid]; 2042 + alias = dev_data->alias; 2108 2043 ats = dev_data->ats.enabled; 2109 2044 2110 2045 /* Update data structures */ ··· 2138 2073 return; 2139 2074 2140 2075 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2141 - alias = amd_iommu_alias_table[dev_data->devid]; 2076 + alias = dev_data->alias; 2142 2077 2143 2078 /* decrease reference counters */ 2144 2079 dev_data->domain->dev_iommu[iommu->index] -= 1;
+16 -8
drivers/iommu/arm-smmu.c
··· 826 826 if (smmu_domain->smmu) 827 827 goto out_unlock; 828 828 829 + /* We're bypassing these SIDs, so don't allocate an actual context */ 830 + if (domain->type == IOMMU_DOMAIN_DMA) { 831 + smmu_domain->smmu = smmu; 832 + goto out_unlock; 833 + } 834 + 829 835 /* 830 836 * Mapping the requested stage onto what we support is surprisingly 831 837 * complicated, mainly because the spec allows S1+S2 SMMUs without ··· 954 948 void __iomem *cb_base; 955 949 int irq; 956 950 957 - if (!smmu) 951 + if (!smmu || domain->type == IOMMU_DOMAIN_DMA) 958 952 return; 959 953 960 954 /* ··· 1095 1089 struct arm_smmu_device *smmu = smmu_domain->smmu; 1096 1090 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1097 1091 1092 + /* 1093 + * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1094 + * for all devices behind the SMMU. Note that we need to take 1095 + * care configuring SMRs for devices both a platform_device and 1096 + * and a PCI device (i.e. a PCI host controller) 1097 + */ 1098 + if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1099 + return 0; 1100 + 1098 1101 /* Devices in an IOMMU group may already be configured */ 1099 1102 ret = arm_smmu_master_configure_smrs(smmu, cfg); 1100 1103 if (ret) 1101 1104 return ret == -EEXIST ? 0 : ret; 1102 - 1103 - /* 1104 - * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1105 - * for all devices behind the SMMU. 1106 - */ 1107 - if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1108 - return 0; 1109 1105 1110 1106 for (i = 0; i < cfg->num_streamids; ++i) { 1111 1107 u32 idx, s2cr;
+2 -2
drivers/irqchip/irq-mips-gic.c
··· 467 467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 468 468 469 469 /* Update the pcpu_masks */ 470 - for (i = 0; i < gic_vpes; i++) 470 + for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 471 471 clear_bit(irq, pcpu_masks[i].pcpu_mask); 472 472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 473 473 ··· 707 707 spin_lock_irqsave(&gic_lock, flags); 708 708 gic_map_to_pin(intr, gic_cpu_pin); 709 709 gic_map_to_vpe(intr, vpe); 710 - for (i = 0; i < gic_vpes; i++) 710 + for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 711 711 clear_bit(intr, pcpu_masks[i].pcpu_mask); 712 712 set_bit(intr, pcpu_masks[vpe].pcpu_mask); 713 713 spin_unlock_irqrestore(&gic_lock, flags);
+3
drivers/isdn/mISDN/socket.c
··· 715 715 if (!maddr || maddr->family != AF_ISDN) 716 716 return -EINVAL; 717 717 718 + if (addr_len < sizeof(struct sockaddr_mISDN)) 719 + return -EINVAL; 720 + 718 721 lock_sock(sk); 719 722 720 723 if (_pms(sk)->dev) {
+1
drivers/net/Kconfig
··· 195 195 196 196 config MACSEC 197 197 tristate "IEEE 802.1AE MAC-level encryption (MACsec)" 198 + select CRYPTO 198 199 select CRYPTO_AES 199 200 select CRYPTO_GCM 200 201 ---help---
+5 -29
drivers/net/dsa/mv88e6xxx.c
··· 2181 2181 struct net_device *bridge) 2182 2182 { 2183 2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2184 - u16 fid; 2185 2184 int i, err; 2186 2185 2187 2186 mutex_lock(&ps->smi_mutex); 2188 - 2189 - /* Get or create the bridge FID and assign it to the port */ 2190 - for (i = 0; i < ps->num_ports; ++i) 2191 - if (ps->ports[i].bridge_dev == bridge) 2192 - break; 2193 - 2194 - if (i < ps->num_ports) 2195 - err = _mv88e6xxx_port_fid_get(ds, i, &fid); 2196 - else 2197 - err = _mv88e6xxx_fid_new(ds, &fid); 2198 - if (err) 2199 - goto unlock; 2200 - 2201 - err = _mv88e6xxx_port_fid_set(ds, port, fid); 2202 - if (err) 2203 - goto unlock; 2204 2187 2205 2188 /* Assign the bridge and remap each port's VLANTable */ 2206 2189 ps->ports[port].bridge_dev = bridge; ··· 2196 2213 } 2197 2214 } 2198 2215 2199 - unlock: 2200 2216 mutex_unlock(&ps->smi_mutex); 2201 2217 2202 2218 return err; ··· 2205 2223 { 2206 2224 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2207 2225 struct net_device *bridge = ps->ports[port].bridge_dev; 2208 - u16 fid; 2209 2226 int i; 2210 2227 2211 2228 mutex_lock(&ps->smi_mutex); 2212 - 2213 - /* Give the port a fresh Filtering Information Database */ 2214 - if (_mv88e6xxx_fid_new(ds, &fid) || 2215 - _mv88e6xxx_port_fid_set(ds, port, fid)) 2216 - netdev_warn(ds->ports[port], "failed to assign a new FID\n"); 2217 2229 2218 2230 /* Unassign the bridge and remap each port's VLANTable */ 2219 2231 ps->ports[port].bridge_dev = NULL; ··· 2452 2476 * the other bits clear. 2453 2477 */ 2454 2478 reg = 1 << port; 2455 - /* Disable learning for DSA and CPU ports */ 2456 - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) 2457 - reg = PORT_ASSOC_VECTOR_LOCKED_PORT; 2479 + /* Disable learning for CPU port */ 2480 + if (dsa_is_cpu_port(ds, port)) 2481 + reg = 0; 2458 2482 2459 2483 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); 2460 2484 if (ret) ··· 2534 2558 if (ret) 2535 2559 goto abort; 2536 2560 2537 - /* Port based VLAN map: give each port its own address 2561 + /* Port based VLAN map: give each port the same default address 2538 2562 * database, and allow bidirectional communication between the 2539 2563 * CPU and DSA port(s), and the other ports. 2540 2564 */ 2541 - ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); 2565 + ret = _mv88e6xxx_port_fid_set(ds, port, 0); 2542 2566 if (ret) 2543 2567 goto abort; 2544 2568
+1 -1
drivers/net/ethernet/atheros/atlx/atl2.c
··· 1412 1412 1413 1413 err = -EIO; 1414 1414 1415 - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; 1415 + netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; 1416 1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1417 1417 1418 1418 /* Init PHY as early as possible due to power saving issue */
+5
drivers/net/ethernet/broadcom/bgmac.c
··· 1572 1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1573 1573 } 1574 1574 1575 + /* This (reset &) enable is not preset in specs or reference driver but 1576 + * Broadcom does it in arch PCI code when enabling fake PCI device. 1577 + */ 1578 + bcma_core_enable(core, 0); 1579 + 1575 1580 /* Allocation and references */ 1576 1581 net_dev = alloc_etherdev(sizeof(*bgmac)); 1577 1582 if (!net_dev)
+3 -3
drivers/net/ethernet/broadcom/bgmac.h
··· 199 199 #define BGMAC_CMDCFG_TAI 0x00000200 200 200 #define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ 201 201 #define BGMAC_CMDCFG_HD_SHIFT 10 202 - #define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ 203 - #define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ 204 - #define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 202 + #define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */ 203 + #define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */ 204 + #define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 205 205 #define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ 206 206 #define BGMAC_CMDCFG_AE 0x00400000 207 207 #define BGMAC_CMDCFG_CFE 0x00800000
+5 -1
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 878 878 else 879 879 p = (char *)priv; 880 880 p += s->stat_offset; 881 - data[i] = *(u32 *)p; 881 + if (sizeof(unsigned long) != sizeof(u32) && 882 + s->stat_sizeof == sizeof(unsigned long)) 883 + data[i] = *(unsigned long *)p; 884 + else 885 + data[i] = *(u32 *)p; 882 886 } 883 887 } 884 888
+3 -2
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1011 1011 } 1012 1012 1013 1013 lmac++; 1014 - if (lmac == MAX_LMAC_PER_BGX) 1014 + if (lmac == MAX_LMAC_PER_BGX) { 1015 + of_node_put(node); 1015 1016 break; 1017 + } 1016 1018 } 1017 - of_node_put(node); 1018 1019 return 0; 1019 1020 1020 1021 defer:
+3
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 1451 1451 unsigned int mmd, unsigned int reg, u16 *valp); 1452 1452 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1453 1453 unsigned int mmd, unsigned int reg, u16 val); 1454 + int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 1455 + unsigned int vf, unsigned int iqtype, unsigned int iqid, 1456 + unsigned int fl0id, unsigned int fl1id); 1454 1457 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1455 1458 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1456 1459 unsigned int fl0id, unsigned int fl1id);
+17 -3
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2981 2981 void t4_free_sge_resources(struct adapter *adap) 2982 2982 { 2983 2983 int i; 2984 - struct sge_eth_rxq *eq = adap->sge.ethrxq; 2985 - struct sge_eth_txq *etq = adap->sge.ethtxq; 2984 + struct sge_eth_rxq *eq; 2985 + struct sge_eth_txq *etq; 2986 + 2987 + /* stop all Rx queues in order to start them draining */ 2988 + for (i = 0; i < adap->sge.ethqsets; i++) { 2989 + eq = &adap->sge.ethrxq[i]; 2990 + if (eq->rspq.desc) 2991 + t4_iq_stop(adap, adap->mbox, adap->pf, 0, 2992 + FW_IQ_TYPE_FL_INT_CAP, 2993 + eq->rspq.cntxt_id, 2994 + eq->fl.size ? eq->fl.cntxt_id : 0xffff, 2995 + 0xffff); 2996 + } 2986 2997 2987 2998 /* clean up Ethernet Tx/Rx queues */ 2988 - for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { 2999 + for (i = 0; i < adap->sge.ethqsets; i++) { 3000 + eq = &adap->sge.ethrxq[i]; 2989 3001 if (eq->rspq.desc) 2990 3002 free_rspq_fl(adap, &eq->rspq, 2991 3003 eq->fl.size ? &eq->fl : NULL); 3004 + 3005 + etq = &adap->sge.ethtxq[i]; 2992 3006 if (etq->q.desc) { 2993 3007 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 2994 3008 etq->q.cntxt_id);
+43
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 2557 2557 } 2558 2558 2559 2559 #define EEPROM_STAT_ADDR 0x7bfc 2560 + #define VPD_SIZE 0x800 2560 2561 #define VPD_BASE 0x400 2561 2562 #define VPD_BASE_OLD 0 2562 2563 #define VPD_LEN 1024 ··· 2594 2593 vpd = vmalloc(VPD_LEN); 2595 2594 if (!vpd) 2596 2595 return -ENOMEM; 2596 + 2597 + /* We have two VPD data structures stored in the adapter VPD area. 2598 + * By default, Linux calculates the size of the VPD area by traversing 2599 + * the first VPD area at offset 0x0, so we need to tell the OS what 2600 + * our real VPD size is. 2601 + */ 2602 + ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); 2603 + if (ret < 0) 2604 + goto out; 2597 2605 2598 2606 /* Card information normally starts at VPD_BASE but early cards had 2599 2607 * it at 0. ··· 6946 6936 FW_VI_ENABLE_CMD_VIID_V(viid)); 6947 6937 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); 6948 6938 c.blinkdur = cpu_to_be16(nblinks); 6939 + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6940 + } 6941 + 6942 + /** 6943 + * t4_iq_stop - stop an ingress queue and its FLs 6944 + * @adap: the adapter 6945 + * @mbox: mailbox to use for the FW command 6946 + * @pf: the PF owning the queues 6947 + * @vf: the VF owning the queues 6948 + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 6949 + * @iqid: ingress queue id 6950 + * @fl0id: FL0 queue id or 0xffff if no attached FL0 6951 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 6952 + * 6953 + * Stops an ingress queue and its associated FLs, if any. This causes 6954 + * any current or future data/messages destined for these queues to be 6955 + * tossed. 6956 + */ 6957 + int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 6958 + unsigned int vf, unsigned int iqtype, unsigned int iqid, 6959 + unsigned int fl0id, unsigned int fl1id) 6960 + { 6961 + struct fw_iq_cmd c; 6962 + 6963 + memset(&c, 0, sizeof(c)); 6964 + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 6965 + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | 6966 + FW_IQ_CMD_VFN_V(vf)); 6967 + c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c)); 6968 + c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 6969 + c.iqid = cpu_to_be16(iqid); 6970 + c.fl0id = cpu_to_be16(fl0id); 6971 + c.fl1id = cpu_to_be16(fl1id); 6949 6972 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6950 6973 } 6951 6974
+22 -8
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
··· 1223 1223 if (err) 1224 1224 return err; 1225 1225 1226 - /* verify upper 16 bits are zero */ 1227 - if (vid >> 16) 1228 - return FM10K_ERR_PARAM; 1229 - 1230 1226 set = !(vid & FM10K_VLAN_CLEAR); 1231 1227 vid &= ~FM10K_VLAN_CLEAR; 1232 1228 1233 - err = fm10k_iov_select_vid(vf_info, (u16)vid); 1234 - if (err < 0) 1235 - return err; 1229 + /* if the length field has been set, this is a multi-bit 1230 + * update request. For multi-bit requests, simply disallow 1231 + * them when the pf_vid has been set. In this case, the PF 1232 + * should have already cleared the VLAN_TABLE, and if we 1233 + * allowed them, it could allow a rogue VF to receive traffic 1234 + * on a VLAN it was not assigned. In the single-bit case, we 1235 + * need to modify requests for VLAN 0 to use the default PF or 1236 + * SW vid when assigned. 1237 + */ 1236 1238 1237 - vid = err; 1239 + if (vid >> 16) { 1240 + /* prevent multi-bit requests when PF has 1241 + * administratively set the VLAN for this VF 1242 + */ 1243 + if (vf_info->pf_vid) 1244 + return FM10K_ERR_PARAM; 1245 + } else { 1246 + err = fm10k_iov_select_vid(vf_info, (u16)vid); 1247 + if (err < 0) 1248 + return err; 1249 + 1250 + vid = err; 1251 + } 1238 1252 1239 1253 /* update VSI info for VF in regards to VLAN table */ 1240 1254 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
+24 -25
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 2594 2594 } 2595 2595 2596 2596 /** 2597 - * __i40e_chk_linearize - Check if there are more than 8 fragments per packet 2597 + * __i40e_chk_linearize - Check if there are more than 8 buffers per packet 2598 2598 * @skb: send buffer 2599 2599 * 2600 - * Note: Our HW can't scatter-gather more than 8 fragments to build 2601 - * a packet on the wire and so we need to figure out the cases where we 2602 - * need to linearize the skb. 2600 + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 2601 + * and so we need to figure out the cases where we need to linearize the skb. 2602 + * 2603 + * For TSO we need to count the TSO header and segment payload separately. 2604 + * As such we need to check cases where we have 7 fragments or more as we 2605 + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2606 + * the segment payload in the first descriptor, and another 7 for the 2607 + * fragments. 2603 2608 **/ 2604 2609 bool __i40e_chk_linearize(struct sk_buff *skb) 2605 2610 { 2606 2611 const struct skb_frag_struct *frag, *stale; 2607 - int gso_size, nr_frags, sum; 2612 + int nr_frags, sum; 2608 2613 2609 - /* check to see if TSO is enabled, if so we may get a repreive */ 2610 - gso_size = skb_shinfo(skb)->gso_size; 2611 - if (unlikely(!gso_size)) 2612 - return true; 2613 - 2614 - /* no need to check if number of frags is less than 8 */ 2614 + /* no need to check if number of frags is less than 7 */ 2615 2615 nr_frags = skb_shinfo(skb)->nr_frags; 2616 - if (nr_frags < I40E_MAX_BUFFER_TXD) 2616 + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 2617 2617 return false; 2618 2618 2619 2619 /* We need to walk through the list and validate that each group 2620 2620 * of 6 fragments totals at least gso_size. However we don't need 2621 - * to perform such validation on the first or last 6 since the first 2622 - * 6 cannot inherit any data from a descriptor before them, and the 2623 - * last 6 cannot inherit any data from a descriptor after them. 2621 + * to perform such validation on the last 6 since the last 6 cannot 2622 + * inherit any data from a descriptor after them. 2624 2623 */ 2625 - nr_frags -= I40E_MAX_BUFFER_TXD - 1; 2624 + nr_frags -= I40E_MAX_BUFFER_TXD - 2; 2626 2625 frag = &skb_shinfo(skb)->frags[0]; 2627 2626 2628 2627 /* Initialize size to the negative value of gso_size minus 1. We ··· 2630 2631 * descriptors for a single transmit as the header and previous 2631 2632 * fragment are already consuming 2 descriptors. 2632 2633 */ 2633 - sum = 1 - gso_size; 2634 + sum = 1 - skb_shinfo(skb)->gso_size; 2634 2635 2635 - /* Add size of frags 1 through 5 to create our initial sum */ 2636 - sum += skb_frag_size(++frag); 2637 - sum += skb_frag_size(++frag); 2638 - sum += skb_frag_size(++frag); 2639 - sum += skb_frag_size(++frag); 2640 - sum += skb_frag_size(++frag); 2636 + /* Add size of frags 0 through 4 to create our initial sum */ 2637 + sum += skb_frag_size(frag++); 2638 + sum += skb_frag_size(frag++); 2639 + sum += skb_frag_size(frag++); 2640 + sum += skb_frag_size(frag++); 2641 + sum += skb_frag_size(frag++); 2641 2642 2642 2643 /* Walk through fragments adding latest fragment, testing it, and 2643 2644 * then removing stale fragments from the sum. 2644 2645 */ 2645 2646 stale = &skb_shinfo(skb)->frags[0]; 2646 2647 for (;;) { 2647 - sum += skb_frag_size(++frag); 2648 + sum += skb_frag_size(frag++); 2648 2649 2649 2650 /* if sum is negative we failed to make sufficient progress */ 2650 2651 if (sum < 0) ··· 2654 2655 if (!--nr_frags) 2655 2656 break; 2656 2657 2657 - sum -= skb_frag_size(++stale); 2658 + sum -= skb_frag_size(stale++); 2658 2659 } 2659 2660 2660 2661 return false;
+7 -3
drivers/net/ethernet/intel/i40e/i40e_txrx.h
··· 413 413 **/ 414 414 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 415 415 { 416 - /* we can only support up to 8 data buffers for a single send */ 417 - if (likely(count <= I40E_MAX_BUFFER_TXD)) 416 + /* Both TSO and single send will work if count is less than 8 */ 417 + if (likely(count < I40E_MAX_BUFFER_TXD)) 418 418 return false; 419 419 420 - return __i40e_chk_linearize(skb); 420 + if (skb_is_gso(skb)) 421 + return __i40e_chk_linearize(skb); 422 + 423 + /* we can support up to 8 data buffers for a single send */ 424 + return count != I40E_MAX_BUFFER_TXD; 421 425 } 422 426 #endif /* _I40E_TXRX_H_ */
+24 -25
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 1796 1796 } 1797 1797 1798 1798 /** 1799 - * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet 1799 + * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet 1800 1800 * @skb: send buffer 1801 1801 * 1802 - * Note: Our HW can't scatter-gather more than 8 fragments to build 1803 - * a packet on the wire and so we need to figure out the cases where we 1804 - * need to linearize the skb. 1802 + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 1803 + * and so we need to figure out the cases where we need to linearize the skb. 1804 + * 1805 + * For TSO we need to count the TSO header and segment payload separately. 1806 + * As such we need to check cases where we have 7 fragments or more as we 1807 + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 1808 + * the segment payload in the first descriptor, and another 7 for the 1809 + * fragments. 1805 1810 **/ 1806 1811 bool __i40evf_chk_linearize(struct sk_buff *skb) 1807 1812 { 1808 1813 const struct skb_frag_struct *frag, *stale; 1809 - int gso_size, nr_frags, sum; 1814 + int nr_frags, sum; 1810 1815 1811 - /* check to see if TSO is enabled, if so we may get a repreive */ 1812 - gso_size = skb_shinfo(skb)->gso_size; 1813 - if (unlikely(!gso_size)) 1814 - return true; 1815 - 1816 - /* no need to check if number of frags is less than 8 */ 1816 + /* no need to check if number of frags is less than 7 */ 1817 1817 nr_frags = skb_shinfo(skb)->nr_frags; 1818 - if (nr_frags < I40E_MAX_BUFFER_TXD) 1818 + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 1819 1819 return false; 1820 1820 1821 1821 /* We need to walk through the list and validate that each group 1822 1822 * of 6 fragments totals at least gso_size. However we don't need 1823 - * to perform such validation on the first or last 6 since the first 1824 - * 6 cannot inherit any data from a descriptor before them, and the 1825 - * last 6 cannot inherit any data from a descriptor after them. 1823 + * to perform such validation on the last 6 since the last 6 cannot 1824 + * inherit any data from a descriptor after them. 1826 1825 */ 1827 - nr_frags -= I40E_MAX_BUFFER_TXD - 1; 1826 + nr_frags -= I40E_MAX_BUFFER_TXD - 2; 1828 1827 frag = &skb_shinfo(skb)->frags[0]; 1829 1828 1830 1829 /* Initialize size to the negative value of gso_size minus 1. We ··· 1832 1833 * descriptors for a single transmit as the header and previous 1833 1834 * fragment are already consuming 2 descriptors. 1834 1835 */ 1835 - sum = 1 - gso_size; 1836 + sum = 1 - skb_shinfo(skb)->gso_size; 1836 1837 1837 - /* Add size of frags 1 through 5 to create our initial sum */ 1838 - sum += skb_frag_size(++frag); 1839 - sum += skb_frag_size(++frag); 1840 - sum += skb_frag_size(++frag); 1841 - sum += skb_frag_size(++frag); 1842 - sum += skb_frag_size(++frag); 1838 + /* Add size of frags 0 through 4 to create our initial sum */ 1839 + sum += skb_frag_size(frag++); 1840 + sum += skb_frag_size(frag++); 1841 + sum += skb_frag_size(frag++); 1842 + sum += skb_frag_size(frag++); 1843 + sum += skb_frag_size(frag++); 1843 1844 1844 1845 /* Walk through fragments adding latest fragment, testing it, and 1845 1846 * then removing stale fragments from the sum. 1846 1847 */ 1847 1848 stale = &skb_shinfo(skb)->frags[0]; 1848 1849 for (;;) { 1849 - sum += skb_frag_size(++frag); 1850 + sum += skb_frag_size(frag++); 1850 1851 1851 1852 /* if sum is negative we failed to make sufficient progress */ 1852 1853 if (sum < 0) ··· 1856 1857 if (!--nr_frags) 1857 1858 break; 1858 1859 1859 - sum -= skb_frag_size(++stale); 1860 + sum -= skb_frag_size(stale++); 1860 1861 } 1861 1862 1862 1863 return false;
+7 -3
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
··· 395 395 **/ 396 396 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 397 397 { 398 - /* we can only support up to 8 data buffers for a single send */ 399 - if (likely(count <= I40E_MAX_BUFFER_TXD)) 398 + /* Both TSO and single send will work if count is less than 8 */ 399 + if (likely(count < I40E_MAX_BUFFER_TXD)) 400 400 return false; 401 401 402 - return __i40evf_chk_linearize(skb); 402 + if (skb_is_gso(skb)) 403 + return __i40evf_chk_linearize(skb); 404 + 405 + /* we can support up to 8 data buffers for a single send */ 406 + return count != I40E_MAX_BUFFER_TXD; 403 407 } 404 408 #endif /* _I40E_TXRX_H_ */
+4 -1
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 337 337 case ETH_SS_STATS: 338 338 return bitmap_iterator_count(&it) + 339 339 (priv->tx_ring_num * 2) + 340 - (priv->rx_ring_num * 2); 340 + (priv->rx_ring_num * 3); 341 341 case ETH_SS_TEST: 342 342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 343 343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; ··· 404 404 for (i = 0; i < priv->rx_ring_num; i++) { 405 405 data[index++] = priv->rx_ring[i]->packets; 406 406 data[index++] = priv->rx_ring[i]->bytes; 407 + data[index++] = priv->rx_ring[i]->dropped; 407 408 } 408 409 spin_unlock_bh(&priv->stats_lock); 409 410 ··· 478 477 "rx%d_packets", i); 479 478 sprintf(data + (index++) * ETH_GSTRING_LEN, 480 479 "rx%d_bytes", i); 480 + sprintf(data + (index++) * ETH_GSTRING_LEN, 481 + "rx%d_dropped", i); 481 482 } 482 483 break; 483 484 case ETH_SS_PRIV_FLAGS:
+4 -1
drivers/net/ethernet/mellanox/mlx4/en_port.c
··· 158 158 u64 in_mod = reset << 8 | port; 159 159 int err; 160 160 int i, counter_index; 161 + unsigned long sw_rx_dropped = 0; 161 162 162 163 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 163 164 if (IS_ERR(mailbox)) ··· 181 180 for (i = 0; i < priv->rx_ring_num; i++) { 182 181 stats->rx_packets += priv->rx_ring[i]->packets; 183 182 stats->rx_bytes += priv->rx_ring[i]->bytes; 183 + sw_rx_dropped += priv->rx_ring[i]->dropped; 184 184 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; 185 185 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; 186 186 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; ··· 238 236 &mlx4_en_stats->MCAST_prio_1, 239 237 NUM_PRIORITIES); 240 238 stats->collisions = 0; 241 - stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 239 + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + 240 + sw_rx_dropped; 242 241 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 243 242 stats->rx_over_errors = 0; 244 243 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+8 -4
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 61 61 gfp_t gfp = _gfp; 62 62 63 63 if (order) 64 - gfp |= __GFP_COMP | __GFP_NOWARN; 64 + gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC; 65 65 page = alloc_pages(gfp, order); 66 66 if (likely(page)) 67 67 break; ··· 126 126 dma_unmap_page(priv->ddev, page_alloc[i].dma, 127 127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 128 128 page = page_alloc[i].page; 129 - set_page_count(page, 1); 129 + /* Revert changes done by mlx4_alloc_pages */ 130 + page_ref_sub(page, page_alloc[i].page_size / 131 + priv->frag_info[i].frag_stride - 1); 130 132 put_page(page); 131 133 } 132 134 } ··· 178 176 dma_unmap_page(priv->ddev, page_alloc->dma, 179 177 page_alloc->page_size, PCI_DMA_FROMDEVICE); 180 178 page = page_alloc->page; 181 - set_page_count(page, 1); 179 + /* Revert changes done by mlx4_alloc_pages */ 180 + page_ref_sub(page, page_alloc->page_size / 181 + priv->frag_info[i].frag_stride - 1); 182 182 put_page(page); 183 183 page_alloc->page = NULL; 184 184 } ··· 943 939 /* GRO not possible, complete processing here */ 944 940 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 945 941 if (!skb) { 946 - priv->stats.rx_dropped++; 942 + ring->dropped++; 947 943 goto next; 948 944 } 949 945
+57 -19
drivers/net/ethernet/mellanox/mlx4/main.c
··· 3172 3172 return 0; 3173 3173 } 3174 3174 3175 + static int mlx4_pci_enable_device(struct mlx4_dev *dev) 3176 + { 3177 + struct pci_dev *pdev = dev->persist->pdev; 3178 + int err = 0; 3179 + 3180 + mutex_lock(&dev->persist->pci_status_mutex); 3181 + if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { 3182 + err = pci_enable_device(pdev); 3183 + if (!err) 3184 + dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; 3185 + } 3186 + mutex_unlock(&dev->persist->pci_status_mutex); 3187 + 3188 + return err; 3189 + } 3190 + 3191 + static void mlx4_pci_disable_device(struct mlx4_dev *dev) 3192 + { 3193 + struct pci_dev *pdev = dev->persist->pdev; 3194 + 3195 + mutex_lock(&dev->persist->pci_status_mutex); 3196 + if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { 3197 + pci_disable_device(pdev); 3198 + dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; 3199 + } 3200 + mutex_unlock(&dev->persist->pci_status_mutex); 3201 + } 3202 + 3175 3203 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3176 3204 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3177 3205 int reset_flow) ··· 3610 3582 3611 3583 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3612 3584 3613 - err = pci_enable_device(pdev); 3585 + err = mlx4_pci_enable_device(&priv->dev); 3614 3586 if (err) { 3615 3587 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3616 3588 return err; ··· 3743 3715 pci_release_regions(pdev); 3744 3716 3745 3717 err_disable_pdev: 3746 - pci_disable_device(pdev); 3718 + mlx4_pci_disable_device(&priv->dev); 3747 3719 pci_set_drvdata(pdev, NULL); 3748 3720 return err; 3749 3721 } ··· 3803 3775 priv->pci_dev_data = id->driver_data; 3804 3776 mutex_init(&dev->persist->device_state_mutex); 3805 3777 mutex_init(&dev->persist->interface_state_mutex); 3778 + mutex_init(&dev->persist->pci_status_mutex); 3806 3779 3807 3780 ret = devlink_register(devlink, &pdev->dev); 3808 3781 if (ret) ··· 3952 3923 } 3953 3924 3954 3925 pci_release_regions(pdev); 3955 - pci_disable_device(pdev); 3926 + mlx4_pci_disable_device(dev); 3956 3927 devlink_unregister(devlink); 3957 3928 kfree(dev->persist); 3958 3929 devlink_free(devlink); ··· 4071 4042 if (state == pci_channel_io_perm_failure) 4072 4043 return PCI_ERS_RESULT_DISCONNECT; 4073 4044 4074 - pci_disable_device(pdev); 4045 + mlx4_pci_disable_device(persist->dev); 4075 4046 return PCI_ERS_RESULT_NEED_RESET; 4076 4047 } 4077 4048 ··· 4079 4050 { 4080 4051 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4081 4052 struct mlx4_dev *dev = persist->dev; 4082 - struct mlx4_priv *priv = mlx4_priv(dev); 4083 - int ret; 4084 - int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4085 - int total_vfs; 4053 + int err; 4086 4054 4087 4055 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4088 - ret = pci_enable_device(pdev); 4089 - if (ret) { 4090 - mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 4056 + err = mlx4_pci_enable_device(dev); 4057 + if (err) { 4058 + mlx4_err(dev, "Can not re-enable device, err=%d\n", err); 4091 4059 return PCI_ERS_RESULT_DISCONNECT; 4092 4060 } 4093 4061 4094 4062 pci_set_master(pdev); 4095 4063 pci_restore_state(pdev); 4096 4064 pci_save_state(pdev); 4065 + return PCI_ERS_RESULT_RECOVERED; 4066 + } 4097 4067 4068 + static void mlx4_pci_resume(struct pci_dev *pdev) 4069 + { 4070 + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4071 + struct mlx4_dev *dev = persist->dev; 4072 + struct mlx4_priv *priv = mlx4_priv(dev); 4073 + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4074 + int total_vfs; 4075 + int err; 4076 + 4077 + mlx4_err(dev, "%s was called\n", __func__); 4098 4078 total_vfs = dev->persist->num_vfs; 4099 4079 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4100 4080 4101 4081 mutex_lock(&persist->interface_state_mutex); 4102 4082 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4103 - ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4083 + err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4104 4084 priv, 1); 4105 - if (ret) { 4106 - mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 4107 - __func__, ret); 4085 + if (err) { 4086 + mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", 4087 + __func__, err); 4108 4088 goto end; 4109 4089 } 4110 4090 4111 - ret = restore_current_port_types(dev, dev->persist-> 4091 + err = restore_current_port_types(dev, dev->persist-> 4112 4092 curr_port_type, dev->persist-> 4113 4093 curr_port_poss_type); 4114 - if (ret) 4115 - mlx4_err(dev, "could not restore original port types (%d)\n", ret); 4094 + if (err) 4095 + mlx4_err(dev, "could not restore original port types (%d)\n", err); 4116 4096 } 4117 4097 end: 4118 4098 mutex_unlock(&persist->interface_state_mutex); 4119 4099 4120 - return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 4121 4100 } 4122 4101 4123 4102 static void mlx4_shutdown(struct pci_dev *pdev) ··· 4142 4105 static const struct pci_error_handlers mlx4_err_handler = { 4143 4106 .error_detected = mlx4_pci_err_detected, 4144 4107 .slot_reset = mlx4_pci_slot_reset, 4108 + .resume = mlx4_pci_resume, 4145 4109 }; 4146 4110 4147 4111 static struct pci_driver mlx4_driver = {
+2
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 586 586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 587 587 int init_port_ref[MLX4_MAX_PORTS + 1]; 588 588 u16 max_mtu[MLX4_MAX_PORTS + 1]; 589 + u8 pptx; 590 + u8 pprx; 589 591 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 590 592 struct mlx4_resource_tracker res_tracker; 591 593 struct workqueue_struct *comm_wq;
+1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 323 323 unsigned long csum_ok; 324 324 unsigned long csum_none; 325 325 unsigned long csum_complete; 326 + unsigned long dropped; 326 327 int hwtstamp_rx_filter; 327 328 cpumask_var_t affinity_mask; 328 329 };
+13
drivers/net/ethernet/mellanox/mlx4/port.c
··· 1317 1317 } 1318 1318 1319 1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 1320 + /* Slave cannot change Global Pause configuration */ 1321 + if (slave != mlx4_master_func_num(dev) && 1322 + ((gen_context->pptx != master->pptx) || 1323 + (gen_context->pprx != master->pprx))) { 1324 + gen_context->pptx = master->pptx; 1325 + gen_context->pprx = master->pprx; 1326 + mlx4_warn(dev, 1327 + "denying Global Pause change for slave:%d\n", 1328 + slave); 1329 + } else { 1330 + master->pptx = gen_context->pptx; 1331 + master->pprx = gen_context->pprx; 1332 + } 1320 1333 break; 1321 1334 case MLX4_SET_PORT_GID_TABLE: 1322 1335 /* change to MULTIPLE entries: number of guest's gids
+101 -58
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 750 750 return false; 751 751 } 752 752 753 + static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) 754 + { 755 + qed_chain_consume(&rxq->rx_bd_ring); 756 + rxq->sw_rx_cons++; 757 + } 758 + 753 759 /* This function reuses the buffer(from an offset) from 754 760 * consumer index to producer index in the bd ring 755 761 */ ··· 779 773 curr_cons->data = NULL; 780 774 } 781 775 776 + /* In case of allocation failures reuse buffers 777 + * from consumer index to produce buffers for firmware 778 + */ 779 + static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, 780 + struct qede_dev *edev, u8 count) 781 + { 782 + struct sw_rx_data *curr_cons; 783 + 784 + for (; count > 0; count--) { 785 + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; 786 + qede_reuse_page(edev, rxq, curr_cons); 787 + qede_rx_bd_ring_consume(rxq); 788 + } 789 + } 790 + 782 791 static inline int qede_realloc_rx_buffer(struct qede_dev *edev, 783 792 struct qede_rx_queue *rxq, 784 793 struct sw_rx_data *curr_cons) ··· 802 781 curr_cons->page_offset += rxq->rx_buf_seg_size; 803 782 804 783 if (curr_cons->page_offset == PAGE_SIZE) { 805 - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 784 + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { 785 + /* Since we failed to allocate new buffer 786 + * current buffer can be used again. 787 + */ 788 + curr_cons->page_offset -= rxq->rx_buf_seg_size; 789 + 806 790 return -ENOMEM; 791 + } 807 792 808 793 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, 809 794 PAGE_SIZE, DMA_FROM_DEVICE); ··· 928 901 len_on_bd); 929 902 930 903 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { 931 - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 904 + /* Incr page ref count to reuse on allocation failure 905 + * so that it doesn't get freed while freeing SKB. 906 + */ 907 + atomic_inc(&current_bd->data->_count); 932 908 goto out; 933 909 } 934 910 ··· 945 915 return 0; 946 916 947 917 out: 918 + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 919 + qede_recycle_rx_bd_ring(rxq, edev, 1); 948 920 return -ENOMEM; 949 921 } 950 922 ··· 998 966 tpa_info->skb = netdev_alloc_skb(edev->ndev, 999 967 le16_to_cpu(cqe->len_on_first_bd)); 1000 968 if (unlikely(!tpa_info->skb)) { 969 + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); 1001 970 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 1002 - return; 971 + goto cons_buf; 1003 972 } 1004 973 1005 974 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); ··· 1023 990 /* This is needed in order to enable forwarding support */ 1024 991 qede_set_gro_params(edev, tpa_info->skb, cqe); 1025 992 993 + cons_buf: /* We still need to handle bd_len_list to consume buffers */ 1026 994 if (likely(cqe->ext_bd_len_list[0])) 1027 995 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 1028 996 le16_to_cpu(cqe->ext_bd_len_list[0])); ··· 1041 1007 const struct iphdr *iph = ip_hdr(skb); 1042 1008 struct tcphdr *th; 1043 1009 1044 - skb_set_network_header(skb, 0); 1045 1010 skb_set_transport_header(skb, sizeof(struct iphdr)); 1046 1011 th = tcp_hdr(skb); 1047 1012 ··· 1055 1022 struct ipv6hdr *iph = ipv6_hdr(skb); 1056 1023 struct tcphdr *th; 1057 1024 1058 - skb_set_network_header(skb, 0); 1059 1025 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 1060 1026 th = tcp_hdr(skb); 1061 1027 ··· 1069 1037 struct sk_buff *skb, 1070 1038 u16 vlan_tag) 1071 1039 { 1040 + /* FW can send a single MTU sized packet from gro flow 1041 + * due to aggregation timeout/last segment etc. which 1042 + * is not expected to be a gro packet. If a skb has zero 1043 + * frags then simply push it in the stack as non gso skb. 1044 + */ 1045 + if (unlikely(!skb->data_len)) { 1046 + skb_shinfo(skb)->gso_type = 0; 1047 + skb_shinfo(skb)->gso_size = 0; 1048 + goto send_skb; 1049 + } 1050 + 1072 1051 #ifdef CONFIG_INET 1073 1052 if (skb_shinfo(skb)->gso_size) { 1053 + skb_set_network_header(skb, 0); 1054 + 1074 1055 switch (skb->protocol) { 1075 1056 case htons(ETH_P_IP): 1076 1057 qede_gro_ip_csum(skb); ··· 1098 1053 } 1099 1054 } 1100 1055 #endif 1056 + 1057 + send_skb: 1101 1058 skb_record_rx_queue(skb, fp->rss_id); 1102 1059 qede_skb_receive(edev, fp, skb, vlan_tag); 1103 1060 } ··· 1291 1244 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", 1292 1245 sw_comp_cons, parse_flag); 1293 1246 rxq->rx_hw_errors++; 1294 - qede_reuse_page(edev, rxq, sw_rx_data); 1295 - goto next_rx; 1247 + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); 1248 + goto next_cqe; 1296 1249 } 1297 1250 1298 1251 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1299 1252 if (unlikely(!skb)) { 1300 1253 DP_NOTICE(edev, 1301 1254 "Build_skb failed, dropping incoming packet\n"); 1302 - qede_reuse_page(edev, rxq, sw_rx_data); 1255 + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); 1303 1256 rxq->rx_alloc_errors++; 1304 - goto next_rx; 1257 + goto next_cqe; 1305 1258 } 1306 1259 1307 1260 /* Copy data into SKB */ ··· 1335 1288 if (unlikely(qede_realloc_rx_buffer(edev, rxq, 1336 1289 sw_rx_data))) { 1337 1290 DP_ERR(edev, "Failed to allocate rx buffer\n"); 1291 + /* Incr page ref count to reuse on allocation 1292 + * failure so that it doesn't get freed while 1293 + * freeing SKB. 1294 + */ 1295 + 1296 + atomic_inc(&sw_rx_data->data->_count); 1338 1297 rxq->rx_alloc_errors++; 1298 + qede_recycle_rx_bd_ring(rxq, edev, 1299 + fp_cqe->bd_num); 1300 + dev_kfree_skb_any(skb); 1339 1301 goto next_cqe; 1340 1302 } 1341 1303 } 1304 + 1305 + qede_rx_bd_ring_consume(rxq); 1342 1306 1343 1307 if (fp_cqe->bd_num != 1) { 1344 1308 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); ··· 1361 1303 num_frags--) { 1362 1304 u16 cur_size = pkt_len > rxq->rx_buf_size ? 1363 1305 rxq->rx_buf_size : pkt_len; 1364 - 1365 - WARN_ONCE(!cur_size, 1366 - "Still got %d BDs for mapping jumbo, but length became 0\n", 1367 - num_frags); 1368 - 1369 - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 1306 + if (unlikely(!cur_size)) { 1307 + DP_ERR(edev, 1308 + "Still got %d BDs for mapping jumbo, but length became 0\n", 1309 + num_frags); 1310 + qede_recycle_rx_bd_ring(rxq, edev, 1311 + num_frags); 1312 + dev_kfree_skb_any(skb); 1370 1313 goto next_cqe; 1314 + } 1371 1315 1372 - rxq->sw_rx_cons++; 1316 + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { 1317 + qede_recycle_rx_bd_ring(rxq, edev, 1318 + num_frags); 1319 + dev_kfree_skb_any(skb); 1320 + goto next_cqe; 1321 + } 1322 + 1373 1323 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1374 1324 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1375 - qed_chain_consume(&rxq->rx_bd_ring); 1325 + qede_rx_bd_ring_consume(rxq); 1326 + 1376 1327 dma_unmap_page(&edev->pdev->dev, 1377 1328 sw_rx_data->mapping, 1378 1329 PAGE_SIZE, DMA_FROM_DEVICE); ··· 1397 1330 pkt_len -= cur_size; 1398 1331 } 1399 1332 1400 - if (pkt_len) 1333 + if (unlikely(pkt_len)) 1401 1334 DP_ERR(edev, 1402 1335 "Mapped all BDs of jumbo, but still have %d bytes\n", 1403 1336 pkt_len); ··· 1416 1349 skb_record_rx_queue(skb, fp->rss_id); 1417 1350 1418 1351 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); 1419 - 1420 - qed_chain_consume(&rxq->rx_bd_ring); 1421 - next_rx: 1422 - rxq->sw_rx_cons++; 1423 1352 next_rx_only: 1424 1353 rx_pkt++; 1425 1354 ··· 2320 2257 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 2321 2258 struct sw_rx_data *replace_buf = &tpa_info->replace_buf; 2322 2259 2323 - if (replace_buf) { 2260 + if (replace_buf->data) { 2324 2261 dma_unmap_page(&edev->pdev->dev, 2325 2262 dma_unmap_addr(replace_buf, mapping), 2326 2263 PAGE_SIZE, DMA_FROM_DEVICE); ··· 2440 2377 static int qede_alloc_mem_rxq(struct qede_dev *edev, 2441 2378 struct qede_rx_queue *rxq) 2442 2379 { 2443 - int i, rc, size, num_allocated; 2380 + int i, rc, size; 2444 2381 2445 2382 rxq->num_rx_buffers = edev->q_num_rx_buffers; 2446 2383 ··· 2457 2394 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 2458 2395 if (!rxq->sw_rx_ring) { 2459 2396 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 2397 + rc = -ENOMEM; 2460 2398 goto err; 2461 2399 } 2462 2400 ··· 2485 2421 /* Allocate buffers for the Rx ring */ 2486 2422 for (i = 0; i < rxq->num_rx_buffers; i++) { 2487 2423 rc = qede_alloc_rx_buffer(edev, rxq); 2488 - if (rc) 2489 - break; 2490 - } 2491 - num_allocated = i; 2492 - if (!num_allocated) { 2493 - DP_ERR(edev, "Rx buffers allocation failed\n"); 2494 - goto err; 2495 - } else if (num_allocated < rxq->num_rx_buffers) { 2496 - DP_NOTICE(edev, 2497 - "Allocated less buffers than desired (%d allocated)\n", 2498 - num_allocated); 2424 + if (rc) { 2425 + DP_ERR(edev, 2426 + "Rx buffers allocation failed at index %d\n", i); 2427 + goto err; 2428 + } 2499 2429 } 2500 2430 2501 - qede_alloc_sge_mem(edev, rxq); 2502 - 2503 - return 0; 2504 - 2431 + rc = qede_alloc_sge_mem(edev, rxq); 2505 2432 err: 2506 - qede_free_mem_rxq(edev, rxq); 2507 - return -ENOMEM; 2433 + return rc; 2508 2434 } 2509 2435 2510 2436 static void qede_free_mem_txq(struct qede_dev *edev, ··· 2577 2523 } 2578 2524 2579 2525 return 0; 2580 - 2581 2526 err: 2582 - qede_free_mem_fp(edev, fp); 2583 - return -ENOMEM; 2527 + return rc; 2584 2528 } 2585 2529 2586 2530 static void qede_free_mem_load(struct qede_dev *edev) ··· 2601 2549 struct qede_fastpath *fp = &edev->fp_array[rss_id]; 2602 2550 2603 2551 rc = qede_alloc_mem_fp(edev, fp); 2604 - if (rc) 2605 - break; 2606 - } 2607 - 2608 - if (rss_id != QEDE_RSS_CNT(edev)) { 2609 - /* Failed allocating memory for all the queues */ 2610 - if (!rss_id) { 2552 + if (rc) { 2611 2553 DP_ERR(edev, 2612 - "Failed to allocate memory for the leading queue\n"); 2613 - rc = -ENOMEM; 2614 - } else { 2615 - DP_NOTICE(edev, 2616 - "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", 2617 - QEDE_RSS_CNT(edev), rss_id); 2554 + "Failed to allocate memory for fastpath - rss id = %d\n", 2555 + rss_id); 2556 + qede_free_mem_load(edev); 2557 + return rc; 2618 2558 } 2619 - edev->num_rss = rss_id; 2620 2559 } 2621 2560 2622 2561 return 0;
+3
drivers/net/ethernet/renesas/ravb_main.c
··· 1691 1691 rate = clk_get_rate(clk); 1692 1692 clk_put(clk); 1693 1693 1694 + if (!rate) 1695 + return -EINVAL; 1696 + 1694 1697 inc = 1000000000ULL << 20; 1695 1698 do_div(inc, rate); 1696 1699
+1 -5
drivers/net/ethernet/renesas/sh_eth.c
··· 2194 2194 __func__); 2195 2195 return ret; 2196 2196 } 2197 - ret = sh_eth_dev_init(ndev, false); 2197 + ret = sh_eth_dev_init(ndev, true); 2198 2198 if (ret < 0) { 2199 2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", 2200 2200 __func__); 2201 2201 return ret; 2202 2202 } 2203 2203 2204 - mdp->irq_enabled = true; 2205 - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 2206 - /* Setting the Rx mode will start the Rx process. */ 2207 - sh_eth_write(ndev, EDRRR_R, EDRRR); 2208 2204 netif_device_attach(ndev); 2209 2205 } 2210 2206
+13 -3
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
··· 34 34 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 35 35 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 36 36 37 + #define SYSMGR_FPGAGRP_MODULE_REG 0x00000028 38 + #define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004 39 + 37 40 #define EMAC_SPLITTER_CTRL_REG 0x0 38 41 #define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 39 42 #define EMAC_SPLITTER_CTRL_SPEED_10 0x2 ··· 151 148 int phymode = dwmac->interface; 152 149 u32 reg_offset = dwmac->reg_offset; 153 150 u32 reg_shift = dwmac->reg_shift; 154 - u32 ctrl, val; 151 + u32 ctrl, val, module; 155 152 156 153 switch (phymode) { 157 154 case PHY_INTERFACE_MODE_RGMII: ··· 178 175 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 179 176 ctrl |= val << reg_shift; 180 177 181 - if (dwmac->f2h_ptp_ref_clk) 178 + if (dwmac->f2h_ptp_ref_clk) { 182 179 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 183 - else 180 + regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, 181 + &module); 182 + module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2)); 183 + regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, 184 + module); 185 + } else { 184 186 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); 187 + } 185 188 186 189 regmap_write(sys_mgr_base_addr, reg_offset, ctrl); 190 + 187 191 return 0; 188 192 } 189 193
+2 -2
drivers/net/ethernet/ti/cpsw.c
··· 1251 1251 int i, ret; 1252 1252 u32 reg; 1253 1253 1254 + pm_runtime_get_sync(&priv->pdev->dev); 1255 + 1254 1256 if (!cpsw_common_res_usage_state(priv)) 1255 1257 cpsw_intr_disable(priv); 1256 1258 netif_carrier_off(ndev); 1257 - 1258 - pm_runtime_get_sync(&priv->pdev->dev); 1259 1259 1260 1260 reg = priv->version; 1261 1261
+1 -2
drivers/net/ethernet/ti/davinci_emac.c
··· 1878 1878 pdata->hw_ram_addr = auxdata->hw_ram_addr; 1879 1879 } 1880 1880 1881 - pdev->dev.platform_data = pdata; 1882 - 1883 1881 return pdata; 1884 1882 } 1885 1883 ··· 2099 2101 cpdma_ctlr_destroy(priv->dma); 2100 2102 2101 2103 unregister_netdev(ndev); 2104 + pm_runtime_disable(&pdev->dev); 2102 2105 free_netdev(ndev); 2103 2106 2104 2107 return 0;
+1 -1
drivers/net/phy/spi_ks8995.c
··· 441 441 return -ENOMEM; 442 442 443 443 mutex_init(&ks->lock); 444 - ks->spi = spi_dev_get(spi); 444 + ks->spi = spi; 445 445 ks->chip = &ks8995_chip[variant]; 446 446 447 447 if (ks->spi->dev.of_node) {
+7 -2
drivers/net/usb/cdc_mbim.c
··· 617 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 618 618 .driver_info = (unsigned long)&cdc_mbim_info, 619 619 }, 620 - /* Huawei E3372 fails unless NDP comes after the IP packets */ 621 - { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 620 + 621 + /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372 622 + * (12d1:157d), are known to fail unless the NDP is placed 623 + * after the IP packets. Applying the quirk to all Huawei 624 + * devices is broader than necessary, but harmless. 625 + */ 626 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 622 627 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, 623 628 }, 624 629 /* default entry */
+8 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 1152 1152 union Vmxnet3_GenericDesc *gdesc) 1153 1153 { 1154 1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1155 - /* typical case: TCP/UDP over IP and both csums are correct */ 1156 - if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1157 - VMXNET3_RCD_CSUM_OK) { 1155 + if (gdesc->rcd.v4 && 1156 + (le32_to_cpu(gdesc->dword[3]) & 1157 + VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { 1158 1158 skb->ip_summed = CHECKSUM_UNNECESSARY; 1159 1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1160 - BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); 1160 + BUG_ON(gdesc->rcd.frg); 1161 + } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & 1162 + (1 << VMXNET3_RCD_TUC_SHIFT))) { 1163 + skb->ip_summed = CHECKSUM_UNNECESSARY; 1164 + BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1161 1165 BUG_ON(gdesc->rcd.frg); 1162 1166 } else { 1163 1167 if (gdesc->rcd.csum) {
+2 -2
drivers/net/vmxnet3/vmxnet3_int.h
··· 69 69 /* 70 70 * Version numbers 71 71 */ 72 - #define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k" 72 + #define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" 73 73 74 74 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75 - #define VMXNET3_DRIVER_VERSION_NUM 0x01040600 75 + #define VMXNET3_DRIVER_VERSION_NUM 0x01040700 76 76 77 77 #if defined(CONFIG_PCI_MSI) 78 78 /* RSS only makes sense if MSI-X is supported. */
+16 -161
drivers/net/vrf.c
··· 60 60 struct u64_stats_sync syncp; 61 61 }; 62 62 63 - static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie) 64 - { 65 - return dst; 66 - } 67 - 68 - static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 69 - { 70 - return ip_local_out(net, sk, skb); 71 - } 72 - 73 - static unsigned int vrf_v4_mtu(const struct dst_entry *dst) 74 - { 75 - /* TO-DO: return max ethernet size? */ 76 - return dst->dev->mtu; 77 - } 78 - 79 - static void vrf_dst_destroy(struct dst_entry *dst) 80 - { 81 - /* our dst lives forever - or until the device is closed */ 82 - } 83 - 84 - static unsigned int vrf_default_advmss(const struct dst_entry *dst) 85 - { 86 - return 65535 - 40; 87 - } 88 - 89 - static struct dst_ops vrf_dst_ops = { 90 - .family = AF_INET, 91 - .local_out = vrf_ip_local_out, 92 - .check = vrf_ip_check, 93 - .mtu = vrf_v4_mtu, 94 - .destroy = vrf_dst_destroy, 95 - .default_advmss = vrf_default_advmss, 96 - }; 97 - 98 63 /* neighbor handling is done with actual device; do not want 99 64 * to flip skb->dev for those ndisc packets. This really fails 100 65 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is ··· 314 349 } 315 350 316 351 #if IS_ENABLED(CONFIG_IPV6) 317 - static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie) 318 - { 319 - return dst; 320 - } 321 - 322 - static struct dst_ops vrf_dst_ops6 = { 323 - .family = AF_INET6, 324 - .local_out = ip6_local_out, 325 - .check = vrf_ip6_check, 326 - .mtu = vrf_v4_mtu, 327 - .destroy = vrf_dst_destroy, 328 - .default_advmss = vrf_default_advmss, 329 - }; 330 - 331 - static int init_dst_ops6_kmem_cachep(void) 332 - { 333 - vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache", 334 - sizeof(struct rt6_info), 335 - 0, 336 - SLAB_HWCACHE_ALIGN, 337 - NULL); 338 - 339 - if (!vrf_dst_ops6.kmem_cachep) 340 - return -ENOMEM; 341 - 342 - return 0; 343 - } 344 - 345 - static void free_dst_ops6_kmem_cachep(void) 346 - { 347 - kmem_cache_destroy(vrf_dst_ops6.kmem_cachep); 348 - } 349 - 350 - static int vrf_input6(struct sk_buff *skb) 351 - { 352 - skb->dev->stats.rx_errors++; 353 - kfree_skb(skb); 354 - return 0; 355 - } 356 - 357 352 /* modelled after ip6_finish_output2 */ 358 353 static int vrf_finish_output6(struct net *net, struct sock *sk, 359 354 struct sk_buff *skb) ··· 354 429 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 355 430 } 356 431 357 - static void vrf_rt6_destroy(struct net_vrf *vrf) 432 + static void vrf_rt6_release(struct net_vrf *vrf) 358 433 { 359 - dst_destroy(&vrf->rt6->dst); 360 - free_percpu(vrf->rt6->rt6i_pcpu); 434 + dst_release(&vrf->rt6->dst); 361 435 vrf->rt6 = NULL; 362 436 } 363 437 364 438 static int vrf_rt6_create(struct net_device *dev) 365 439 { 366 440 struct net_vrf *vrf = netdev_priv(dev); 367 - struct dst_entry *dst; 441 + struct net *net = dev_net(dev); 368 442 struct rt6_info *rt6; 369 - int cpu; 370 443 int rc = -ENOMEM; 371 444 372 - rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, 373 - DST_OBSOLETE_NONE, 374 - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); 445 + rt6 = ip6_dst_alloc(net, dev, 446 + DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); 375 447 if (!rt6) 376 448 goto out; 377 449 378 - dst = &rt6->dst; 379 - 380 - rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL); 381 - if (!rt6->rt6i_pcpu) { 382 - dst_destroy(dst); 383 - goto out; 384 - } 385 - for_each_possible_cpu(cpu) { 386 - struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); 387 - *p = NULL; 388 - } 389 - 390 - memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst)); 391 - 392 - INIT_LIST_HEAD(&rt6->rt6i_siblings); 393 - INIT_LIST_HEAD(&rt6->rt6i_uncached); 394 - 395 - rt6->dst.input = vrf_input6; 396 450 rt6->dst.output = vrf_output6; 397 - 398 - rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); 399 - 400 - atomic_set(&rt6->dst.__refcnt, 2); 401 - 451 + rt6->rt6i_table = fib6_get_table(net, vrf->tb_id); 452 + dst_hold(&rt6->dst); 402 453 vrf->rt6 = rt6; 403 454 rc = 0; 404 455 out: 405 456 return rc; 406 457 } 407 458 #else 408 - static int init_dst_ops6_kmem_cachep(void) 409 - { 410 - return 0; 411 - } 412 - 413 - static void free_dst_ops6_kmem_cachep(void) 414 - { 415 - } 416 - 417 - static void vrf_rt6_destroy(struct net_vrf *vrf) 459 + static void vrf_rt6_release(struct net_vrf *vrf) 418 460 { 419 461 } 420 462 ··· 449 557 !(IPCB(skb)->flags & IPSKB_REROUTED)); 450 558 } 451 559 452 - static void vrf_rtable_destroy(struct net_vrf *vrf) 560 + static void vrf_rtable_release(struct net_vrf *vrf) 453 561 { 454 562 struct dst_entry *dst = (struct dst_entry *)vrf->rth; 455 563 456 - dst_destroy(dst); 564 + dst_release(dst); 457 565 vrf->rth = NULL; 458 566 } 459 567 ··· 462 570 struct net_vrf *vrf = netdev_priv(dev); 463 571 struct rtable *rth; 464 572 465 - rth = dst_alloc(&vrf_dst_ops, dev, 2, 466 - DST_OBSOLETE_NONE, 467 - (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); 573 + rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); 468 574 if (rth) { 469 575 rth->dst.output = vrf_output; 470 - rth->rt_genid = rt_genid_ipv4(dev_net(dev)); 471 - rth->rt_flags = 0; 472 - rth->rt_type = RTN_UNICAST; 473 - rth->rt_is_input = 0; 474 - rth->rt_iif = 0; 475 - rth->rt_pmtu = 0; 476 - rth->rt_gateway = 0; 477 - rth->rt_uses_gateway = 0; 478 576 rth->rt_table_id = vrf->tb_id; 479 - INIT_LIST_HEAD(&rth->rt_uncached); 480 - rth->rt_uncached_list = NULL; 481 577 } 482 578 483 579 return rth; ··· 553 673 struct net_device *port_dev; 554 674 struct list_head *iter; 555 675 556 - vrf_rtable_destroy(vrf); 557 - vrf_rt6_destroy(vrf); 676 + vrf_rtable_release(vrf); 677 + vrf_rt6_release(vrf); 558 678 559 679 netdev_for_each_lower_dev(dev, port_dev, iter) 560 680 vrf_del_slave(dev, port_dev); ··· 584 704 return 0; 585 705 586 706 out_rth: 587 - vrf_rtable_destroy(vrf); 707 + vrf_rtable_release(vrf); 588 708 out_stats: 589 709 free_percpu(dev->dstats); 590 710 dev->dstats = NULL; ··· 617 737 struct net_vrf *vrf = netdev_priv(dev); 618 738 619 739 rth = vrf->rth; 620 - atomic_inc(&rth->dst.__refcnt); 740 + dst_hold(&rth->dst); 621 741 } 622 742 623 743 return rth; ··· 668 788 struct net_vrf *vrf = netdev_priv(dev); 669 789 670 790 rt = vrf->rt6; 671 - atomic_inc(&rt->dst.__refcnt); 791 + dst_hold(&rt->dst); 672 792 } 673 793 674 794 return (struct dst_entry *)rt; ··· 826 946 { 827 947 int rc; 828 948 829 - vrf_dst_ops.kmem_cachep = 830 - kmem_cache_create("vrf_ip_dst_cache", 831 - sizeof(struct rtable), 0, 832 - SLAB_HWCACHE_ALIGN, 833 - NULL); 834 - 835 - if (!vrf_dst_ops.kmem_cachep) 836 - return -ENOMEM; 837 - 838 - rc = init_dst_ops6_kmem_cachep(); 839 - if (rc != 0) 840 - goto error2; 841 - 842 949 register_netdevice_notifier(&vrf_notifier_block); 843 950 844 951 rc = rtnl_link_register(&vrf_link_ops); ··· 836 969 837 970 error: 838 971 unregister_netdevice_notifier(&vrf_notifier_block); 839 - free_dst_ops6_kmem_cachep(); 840 - error2: 841 - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); 842 972 return rc; 843 973 } 844 974 845 - static void __exit vrf_cleanup_module(void) 846 - { 847 - rtnl_link_unregister(&vrf_link_ops); 848 - unregister_netdevice_notifier(&vrf_notifier_block); 849 - kmem_cache_destroy(vrf_dst_ops.kmem_cachep); 850 - free_dst_ops6_kmem_cachep(); 851 - } 852 - 853 975 module_init(vrf_init_module); 854 - module_exit(vrf_cleanup_module); 855 976 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); 856 977 MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); 857 978 MODULE_LICENSE("GPL");
+4 -2
drivers/net/wireless/broadcom/b43/main.c
··· 5680 5680 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5681 5681 schedule_work(&wl->firmware_load); 5682 5682 5683 - bcma_out: 5684 5683 return err; 5685 5684 5686 5685 bcma_err_wireless_exit: 5687 5686 ieee80211_free_hw(wl->hw); 5687 + bcma_out: 5688 + kfree(dev); 5688 5689 return err; 5689 5690 } 5690 5691 ··· 5713 5712 b43_rng_exit(wl); 5714 5713 5715 5714 b43_leds_unregister(wl); 5716 - 5717 5715 ieee80211_free_hw(wl->hw); 5716 + kfree(wldev->dev); 5718 5717 } 5719 5718 5720 5719 static struct bcma_driver b43_bcma_driver = { ··· 5797 5796 5798 5797 b43_leds_unregister(wl); 5799 5798 b43_wireless_exit(dev, wl); 5799 + kfree(dev); 5800 5800 } 5801 5801 5802 5802 static struct ssb_driver b43_ssb_driver = {
+2
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 1147 1147 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1148 1148 iwl_mvm_del_aux_sta(mvm); 1149 1149 1150 + iwl_free_fw_paging(mvm); 1151 + 1150 1152 /* 1151 1153 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1152 1154 * won't be called in this case).
-2
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 761 761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 762 762 kfree(mvm->nvm_sections[i].data); 763 763 764 - iwl_free_fw_paging(mvm); 765 - 766 764 iwl_mvm_tof_clean(mvm); 767 765 768 766 ieee80211_free_hw(mvm->hw);
+2 -2
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 732 732 */ 733 733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 734 734 if (val & (BIT(1) | BIT(17))) { 735 - IWL_INFO(trans, 736 - "can't access the RSA semaphore it is write protected\n"); 735 + IWL_DEBUG_INFO(trans, 736 + "can't access the RSA semaphore it is write protected\n"); 737 737 return 0; 738 738 } 739 739
+3 -3
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
··· 2488 2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) 2489 2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; 2490 2490 2491 - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2492 - "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2493 - rtldm->thermalvalue, thermal_value); 2491 + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2492 + "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2493 + rtldm->thermalvalue, thermal_value); 2494 2494 /*Record last Power Tracking Thermal Value*/ 2495 2495 rtldm->thermalvalue = thermal_value; 2496 2496 }
+42
drivers/pci/access.c
··· 275 275 } 276 276 EXPORT_SYMBOL(pci_write_vpd); 277 277 278 + /** 279 + * pci_set_vpd_size - Set size of Vital Product Data space 280 + * @dev: pci device struct 281 + * @len: size of vpd space 282 + */ 283 + int pci_set_vpd_size(struct pci_dev *dev, size_t len) 284 + { 285 + if (!dev->vpd || !dev->vpd->ops) 286 + return -ENODEV; 287 + return dev->vpd->ops->set_size(dev, len); 288 + } 289 + EXPORT_SYMBOL(pci_set_vpd_size); 290 + 278 291 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) 279 292 280 293 /** ··· 511 498 return ret ? ret : count; 512 499 } 513 500 501 + static int pci_vpd_set_size(struct pci_dev *dev, size_t len) 502 + { 503 + struct pci_vpd *vpd = dev->vpd; 504 + 505 + if (len == 0 || len > PCI_VPD_MAX_SIZE) 506 + return -EIO; 507 + 508 + vpd->valid = 1; 509 + vpd->len = len; 510 + 511 + return 0; 512 + } 513 + 514 514 static const struct pci_vpd_ops pci_vpd_ops = { 515 515 .read = pci_vpd_read, 516 516 .write = pci_vpd_write, 517 + .set_size = pci_vpd_set_size, 517 518 }; 518 519 519 520 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, ··· 560 533 return ret; 561 534 } 562 535 536 + static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len) 537 + { 538 + struct pci_dev *tdev = pci_get_slot(dev->bus, 539 + PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); 540 + int ret; 541 + 542 + if (!tdev) 543 + return -ENODEV; 544 + 545 + ret = pci_set_vpd_size(tdev, len); 546 + pci_dev_put(tdev); 547 + return ret; 548 + } 549 + 563 550 static const struct pci_vpd_ops pci_vpd_f0_ops = { 564 551 .read = pci_vpd_f0_read, 565 552 .write = pci_vpd_f0_write, 553 + .set_size = pci_vpd_f0_set_size, 566 554 }; 567 555 568 556 int pci_vpd_init(struct pci_dev *dev)
+14 -6
drivers/pci/host/pci-imx6.c
··· 32 32 #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) 33 33 34 34 struct imx6_pcie { 35 - struct gpio_desc *reset_gpio; 35 + int reset_gpio; 36 36 struct clk *pcie_bus; 37 37 struct clk *pcie_phy; 38 38 struct clk *pcie; ··· 309 309 usleep_range(200, 500); 310 310 311 311 /* Some boards don't have PCIe reset GPIO. */ 312 - if (imx6_pcie->reset_gpio) { 313 - gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0); 312 + if (gpio_is_valid(imx6_pcie->reset_gpio)) { 313 + gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0); 314 314 msleep(100); 315 - gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1); 315 + gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1); 316 316 } 317 317 return 0; 318 318 ··· 523 523 { 524 524 struct imx6_pcie *imx6_pcie; 525 525 struct pcie_port *pp; 526 + struct device_node *np = pdev->dev.of_node; 526 527 struct resource *dbi_base; 527 528 struct device_node *node = pdev->dev.of_node; 528 529 int ret; ··· 545 544 return PTR_ERR(pp->dbi_base); 546 545 547 546 /* Fetch GPIOs */ 548 - imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", 549 - GPIOD_OUT_LOW); 547 + imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); 548 + if (gpio_is_valid(imx6_pcie->reset_gpio)) { 549 + ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio, 550 + GPIOF_OUT_INIT_LOW, "PCIe reset"); 551 + if (ret) { 552 + dev_err(&pdev->dev, "unable to get reset gpio\n"); 553 + return ret; 554 + } 555 + } 550 556 551 557 /* Fetch clocks */ 552 558 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+1
drivers/pci/pci.h
··· 97 97 struct pci_vpd_ops { 98 98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 99 99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 100 + int (*set_size)(struct pci_dev *dev, size_t len); 100 101 }; 101 102 102 103 struct pci_vpd {
+13 -2
drivers/perf/arm_pmu.c
··· 737 737 break; 738 738 case CPU_PM_EXIT: 739 739 case CPU_PM_ENTER_FAILED: 740 - /* Restore and enable the counter */ 741 - armpmu_start(event, PERF_EF_RELOAD); 740 + /* 741 + * Restore and enable the counter. 742 + * armpmu_start() indirectly calls 743 + * 744 + * perf_event_update_userpage() 745 + * 746 + * that requires RCU read locking to be functional, 747 + * wrap the call within RCU_NONIDLE to make the 748 + * RCU subsystem aware this cpu is not idle from 749 + * an RCU perspective for the armpmu_start() call 750 + * duration. 751 + */ 752 + RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); 742 753 break; 743 754 default: 744 755 break;
+5 -2
drivers/phy/phy-rockchip-dp.c
··· 86 86 if (!np) 87 87 return -ENODEV; 88 88 89 + if (!dev->parent || !dev->parent->of_node) 90 + return -ENODEV; 91 + 89 92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 90 93 if (IS_ERR(dp)) 91 94 return -ENOMEM; ··· 107 104 return ret; 108 105 } 109 106 110 - dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 107 + dp->grf = syscon_node_to_regmap(dev->parent->of_node); 111 108 if (IS_ERR(dp->grf)) { 112 - dev_err(dev, "rk3288-dp needs rockchip,grf property\n"); 109 + dev_err(dev, "rk3288-dp needs the General Register Files syscon\n"); 113 110 return PTR_ERR(dp->grf); 114 111 } 115 112
+4 -1
drivers/phy/phy-rockchip-emmc.c
··· 176 176 struct regmap *grf; 177 177 unsigned int reg_offset; 178 178 179 - grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 179 + if (!dev->parent || !dev->parent->of_node) 180 + return -ENODEV; 181 + 182 + grf = syscon_node_to_regmap(dev->parent->of_node); 180 183 if (IS_ERR(grf)) { 181 184 dev_err(dev, "Missing rockchip,grf property\n"); 182 185 return PTR_ERR(grf);
+1
drivers/pinctrl/freescale/Kconfig
··· 2 2 bool 3 3 select PINMUX 4 4 select PINCONF 5 + select REGMAP 5 6 6 7 config PINCTRL_IMX1_CORE 7 8 bool
+5 -4
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
··· 1004 1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent); 1005 1005 int eint_num, virq, eint_offset; 1006 1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc; 1007 - static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256}; 1007 + static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000, 1008 + 128000, 256000}; 1008 1009 const struct mtk_desc_pin *pin; 1009 1010 struct irq_data *d; 1010 1011 ··· 1023 1022 if (!mtk_eint_can_en_debounce(pctl, eint_num)) 1024 1023 return -ENOSYS; 1025 1024 1026 - dbnc = ARRAY_SIZE(dbnc_arr); 1027 - for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) { 1028 - if (debounce <= dbnc_arr[i]) { 1025 + dbnc = ARRAY_SIZE(debounce_time); 1026 + for (i = 0; i < ARRAY_SIZE(debounce_time); i++) { 1027 + if (debounce <= debounce_time[i]) { 1029 1028 dbnc = i; 1030 1029 break; 1031 1030 }
+3 -3
drivers/pinctrl/pinctrl-single.c
··· 1280 1280 1281 1281 /* Parse pins in each row from LSB */ 1282 1282 while (mask) { 1283 - bit_pos = ffs(mask); 1283 + bit_pos = __ffs(mask); 1284 1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin; 1285 - mask_pos = ((pcs->fmask) << (bit_pos - 1)); 1285 + mask_pos = ((pcs->fmask) << bit_pos); 1286 1286 val_pos = val & mask_pos; 1287 1287 submask = mask & mask_pos; 1288 1288 ··· 1852 1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask", 1853 1853 &pcs->fmask); 1854 1854 if (!ret) { 1855 - pcs->fshift = ffs(pcs->fmask) - 1; 1855 + pcs->fshift = __ffs(pcs->fmask); 1856 1856 pcs->fmax = pcs->fmask >> pcs->fshift; 1857 1857 } else { 1858 1858 /* If mask property doesn't exist, function mux is invalid. */
+5 -1
drivers/platform/x86/hp_accel.c
··· 127 127 arg0.integer.value = reg; 128 128 129 129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret); 130 + if (ACPI_FAILURE(status)) 131 + return -EINVAL; 130 132 *ret = lret; 131 - return (status != AE_OK) ? -EINVAL : 0; 133 + return 0; 132 134 } 133 135 134 136 /** ··· 175 173 DEFINE_CONV(normal, 1, 2, 3); 176 174 DEFINE_CONV(y_inverted, 1, -2, 3); 177 175 DEFINE_CONV(x_inverted, -1, 2, 3); 176 + DEFINE_CONV(x_inverted_usd, -1, 2, -3); 178 177 DEFINE_CONV(z_inverted, 1, 2, -3); 179 178 DEFINE_CONV(xy_swap, 2, 1, 3); 180 179 DEFINE_CONV(xy_rotated_left, -2, 1, 3); ··· 239 236 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), 240 237 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), 241 238 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), 239 + AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd), 242 240 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 243 241 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 244 242 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+2
drivers/platform/x86/intel-hid.c
··· 91 91 } 92 92 93 93 static const struct dev_pm_ops intel_hid_pl_pm_ops = { 94 + .freeze = intel_hid_pl_suspend_handler, 95 + .restore = intel_hid_pl_resume_handler, 94 96 .suspend = intel_hid_pl_suspend_handler, 95 97 .resume = intel_hid_pl_resume_handler, 96 98 };
+22 -26
drivers/platform/x86/intel_pmc_ipc.c
··· 687 687 ipcdev.acpi_io_size = size; 688 688 dev_info(&pdev->dev, "io res: %pR\n", res); 689 689 690 - /* This is index 0 to cover BIOS data register */ 691 690 punit_res = punit_res_array; 691 + /* This is index 0 to cover BIOS data register */ 692 692 res = platform_get_resource(pdev, IORESOURCE_MEM, 693 693 PLAT_RESOURCE_BIOS_DATA_INDEX); 694 694 if (!res) { ··· 698 698 *punit_res = *res; 699 699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res); 700 700 701 + /* This is index 1 to cover BIOS interface register */ 701 702 res = platform_get_resource(pdev, IORESOURCE_MEM, 702 703 PLAT_RESOURCE_BIOS_IFACE_INDEX); 703 704 if (!res) { 704 705 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n"); 705 706 return -ENXIO; 706 707 } 707 - /* This is index 1 to cover BIOS interface register */ 708 708 *++punit_res = *res; 709 709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res); 710 710 711 + /* This is index 2 to cover ISP data register, optional */ 711 712 res = platform_get_resource(pdev, IORESOURCE_MEM, 712 713 PLAT_RESOURCE_ISP_DATA_INDEX); 713 - if (!res) { 714 - dev_err(&pdev->dev, "Failed to get res of punit ISP data\n"); 715 - return -ENXIO; 714 + ++punit_res; 715 + if (res) { 716 + *punit_res = *res; 717 + dev_info(&pdev->dev, "punit ISP data res: %pR\n", res); 716 718 } 717 - /* This is index 2 to cover ISP data register */ 718 - *++punit_res = *res; 719 - dev_info(&pdev->dev, "punit ISP data res: %pR\n", res); 720 719 720 + /* This is index 3 to cover ISP interface register, optional */ 721 721 res = platform_get_resource(pdev, IORESOURCE_MEM, 722 722 PLAT_RESOURCE_ISP_IFACE_INDEX); 723 - if (!res) { 724 - dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n"); 725 - return -ENXIO; 723 + ++punit_res; 724 + if (res) { 725 + *punit_res = *res; 726 + dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res); 726 727 } 727 - /* This is index 3 to cover ISP interface register */ 728 - *++punit_res = *res; 729 - dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res); 730 728 729 + /* This is index 4 to cover GTD data register, optional */ 731 730 res = platform_get_resource(pdev, IORESOURCE_MEM, 732 731 PLAT_RESOURCE_GTD_DATA_INDEX); 733 - if (!res) { 734 - dev_err(&pdev->dev, "Failed to get res of punit GTD data\n"); 735 - return -ENXIO; 732 + ++punit_res; 733 + if (res) { 734 + *punit_res = *res; 735 + dev_info(&pdev->dev, "punit GTD data res: %pR\n", res); 736 736 } 737 - /* This is index 4 to cover GTD data register */ 738 - *++punit_res = *res; 739 - dev_info(&pdev->dev, "punit GTD data res: %pR\n", res); 740 737 738 + /* This is index 5 to cover GTD interface register, optional */ 741 739 res = platform_get_resource(pdev, IORESOURCE_MEM, 742 740 PLAT_RESOURCE_GTD_IFACE_INDEX); 743 - if (!res) { 744 - dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n"); 745 - return -ENXIO; 741 + ++punit_res; 742 + if (res) { 743 + *punit_res = *res; 744 + dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res); 746 745 } 747 - /* This is index 5 to cover GTD interface register */ 748 - *++punit_res = *res; 749 - dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res); 750 746 751 747 res = platform_get_resource(pdev, IORESOURCE_MEM, 752 748 PLAT_RESOURCE_IPC_INDEX);
+32 -16
drivers/platform/x86/intel_punit_ipc.c
··· 227 227 struct resource *res; 228 228 void __iomem *addr; 229 229 230 + /* 231 + * The following resources are required 232 + * - BIOS_IPC BASE_DATA 233 + * - BIOS_IPC BASE_IFACE 234 + */ 230 235 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 231 236 addr = devm_ioremap_resource(&pdev->dev, res); 232 237 if (IS_ERR(addr)) ··· 244 239 return PTR_ERR(addr); 245 240 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr; 246 241 242 + /* 243 + * The following resources are optional 244 + * - ISPDRIVER_IPC BASE_DATA 245 + * - ISPDRIVER_IPC BASE_IFACE 246 + * - GTDRIVER_IPC BASE_DATA 247 + * - GTDRIVER_IPC BASE_IFACE 248 + */ 247 249 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 248 - addr = devm_ioremap_resource(&pdev->dev, res); 249 - if (IS_ERR(addr)) 250 - return PTR_ERR(addr); 251 - punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 250 + if (res) { 251 + addr = devm_ioremap_resource(&pdev->dev, res); 252 + if (!IS_ERR(addr)) 253 + punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 254 + } 252 255 253 256 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 254 - addr = devm_ioremap_resource(&pdev->dev, res); 255 - if (IS_ERR(addr)) 256 - return PTR_ERR(addr); 257 - punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 257 + if (res) { 258 + addr = devm_ioremap_resource(&pdev->dev, res); 259 + if (!IS_ERR(addr)) 260 + punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 261 + } 258 262 259 263 res = platform_get_resource(pdev, IORESOURCE_MEM, 4); 260 - addr = devm_ioremap_resource(&pdev->dev, res); 261 - if (IS_ERR(addr)) 262 - return PTR_ERR(addr); 263 - punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 264 + if (res) { 265 + addr = devm_ioremap_resource(&pdev->dev, res); 266 + if (!IS_ERR(addr)) 267 + punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 268 + } 264 269 265 270 res = platform_get_resource(pdev, IORESOURCE_MEM, 5); 266 - addr = devm_ioremap_resource(&pdev->dev, res); 267 - if (IS_ERR(addr)) 268 - return PTR_ERR(addr); 269 - punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; 271 + if (res) { 272 + addr = devm_ioremap_resource(&pdev->dev, res); 273 + if (!IS_ERR(addr)) 274 + punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; 275 + } 270 276 271 277 return 0; 272 278 }
+1 -1
drivers/platform/x86/intel_telemetry_pltdrv.c
··· 659 659 static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period) 660 660 { 661 661 u32 telem_ctrl = 0; 662 - int ret; 662 + int ret = 0; 663 663 664 664 mutex_lock(&(telm_conf->telem_lock)); 665 665 if (ioss_period) {
+3 -1
drivers/platform/x86/thinkpad_acpi.c
··· 7972 7972 fan_update_desired_level(s); 7973 7973 mutex_unlock(&fan_mutex); 7974 7974 7975 + if (rc) 7976 + return rc; 7975 7977 if (status) 7976 7978 *status = s; 7977 7979 7978 - return rc; 7980 + return 0; 7979 7981 } 7980 7982 7981 7983 static int fan_get_speed(unsigned int *speed)
+3 -3
drivers/rtc/rtc-ds1307.c
··· 863 863 * A user-initiated temperature conversion is not started by this function, 864 864 * so the temperature is updated once every 64 seconds. 865 865 */ 866 - static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC) 866 + static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC) 867 867 { 868 868 struct ds1307 *ds1307 = dev_get_drvdata(dev); 869 869 u8 temp_buf[2]; ··· 892 892 struct device_attribute *attr, char *buf) 893 893 { 894 894 int ret; 895 - s16 temp; 895 + s32 temp; 896 896 897 897 ret = ds3231_hwmon_read_temp(dev, &temp); 898 898 if (ret) ··· 1531 1531 return PTR_ERR(ds1307->rtc); 1532 1532 } 1533 1533 1534 - if (ds1307_can_wakeup_device) { 1534 + if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) { 1535 1535 /* Disable request for an IRQ */ 1536 1536 want_irq = false; 1537 1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
+3 -2
drivers/s390/block/dcssblk.c
··· 756 756 blk_cleanup_queue(dev_info->dcssblk_queue); 757 757 dev_info->gd->queue = NULL; 758 758 put_disk(dev_info->gd); 759 - device_unregister(&dev_info->dev); 760 759 761 760 /* unload all related segments */ 762 761 list_for_each_entry(entry, &dev_info->seg_list, lh) 763 762 segment_unload(entry->segment_name); 764 763 765 - put_device(&dev_info->dev); 766 764 up_write(&dcssblk_devices_sem); 765 + 766 + device_unregister(&dev_info->dev); 767 + put_device(&dev_info->dev); 767 768 768 769 rc = count; 769 770 out_buf:
+1 -1
drivers/s390/block/scm_blk.c
··· 303 303 if (req->cmd_type != REQ_TYPE_FS) { 304 304 blk_start_request(req); 305 305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); 306 - blk_end_request_all(req, -EIO); 306 + __blk_end_request_all(req, -EIO); 307 307 continue; 308 308 } 309 309
+2
drivers/thermal/Kconfig
··· 376 376 tristate "Temperature sensor driver for mediatek SoCs" 377 377 depends on ARCH_MEDIATEK || COMPILE_TEST 378 378 depends on HAS_IOMEM 379 + depends on NVMEM || NVMEM=n 380 + depends on RESET_CONTROLLER 379 381 default y 380 382 help 381 383 Enable this option if you want to have support for thermal management
+1 -2
drivers/thermal/mtk_thermal.c
··· 27 27 #include <linux/thermal.h> 28 28 #include <linux/reset.h> 29 29 #include <linux/types.h> 30 - #include <linux/nvmem-consumer.h> 31 30 32 31 /* AUXADC Registers */ 33 32 #define AUXADC_CON0_V 0x000 ··· 618 619 619 620 module_platform_driver(mtk_thermal_driver); 620 621 621 - MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); 622 + MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 622 623 MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>"); 623 624 MODULE_DESCRIPTION("Mediatek thermal driver"); 624 625 MODULE_LICENSE("GPL v2");
+2 -2
drivers/thermal/of-thermal.c
··· 803 803 * otherwise, it returns a corresponding ERR_PTR(). Caller must 804 804 * check the return value with help of IS_ERR() helper. 805 805 */ 806 - static struct __thermal_zone * 807 - thermal_of_build_thermal_zone(struct device_node *np) 806 + static struct __thermal_zone 807 + __init *thermal_of_build_thermal_zone(struct device_node *np) 808 808 { 809 809 struct device_node *child = NULL, *gchild; 810 810 struct __thermal_zone *tz;
+1 -1
drivers/thermal/power_allocator.c
··· 301 301 capped_extra_power = 0; 302 302 extra_power = 0; 303 303 for (i = 0; i < num_actors; i++) { 304 - u64 req_range = req_power[i] * power_range; 304 + u64 req_range = (u64)req_power[i] * power_range; 305 305 306 306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, 307 307 total_req_power);
+4 -4
drivers/thermal/thermal_core.c
··· 688 688 { 689 689 struct thermal_zone_device *tz = to_thermal_zone(dev); 690 690 int trip, ret; 691 - unsigned long temperature; 691 + int temperature; 692 692 693 693 if (!tz->ops->set_trip_temp) 694 694 return -EPERM; ··· 696 696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) 697 697 return -EINVAL; 698 698 699 - if (kstrtoul(buf, 10, &temperature)) 699 + if (kstrtoint(buf, 10, &temperature)) 700 700 return -EINVAL; 701 701 702 702 ret = tz->ops->set_trip_temp(tz, trip, temperature); ··· 899 899 { 900 900 struct thermal_zone_device *tz = to_thermal_zone(dev); 901 901 int ret = 0; 902 - unsigned long temperature; 902 + int temperature; 903 903 904 - if (kstrtoul(buf, 10, &temperature)) 904 + if (kstrtoint(buf, 10, &temperature)) 905 905 return -EINVAL; 906 906 907 907 if (!tz->ops->set_emul_temp) {
+30 -33
drivers/tty/pty.c
··· 663 663 /* this is called once with whichever end is closed last */ 664 664 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 665 665 { 666 - struct inode *ptmx_inode; 666 + struct pts_fs_info *fsi; 667 667 668 668 if (tty->driver->subtype == PTY_TYPE_MASTER) 669 - ptmx_inode = tty->driver_data; 669 + fsi = tty->driver_data; 670 670 else 671 - ptmx_inode = tty->link->driver_data; 672 - devpts_kill_index(ptmx_inode, tty->index); 673 - devpts_del_ref(ptmx_inode); 671 + fsi = tty->link->driver_data; 672 + devpts_kill_index(fsi, tty->index); 673 + devpts_put_ref(fsi); 674 674 } 675 675 676 676 static const struct tty_operations ptm_unix98_ops = { ··· 720 720 721 721 static int ptmx_open(struct inode *inode, struct file *filp) 722 722 { 723 + struct pts_fs_info *fsi; 723 724 struct tty_struct *tty; 724 725 struct inode *slave_inode; 725 726 int retval; ··· 735 734 if (retval) 736 735 return retval; 737 736 737 + fsi = devpts_get_ref(inode, filp); 738 + retval = -ENODEV; 739 + if (!fsi) 740 + goto out_free_file; 741 + 738 742 /* find a device that is not in use. */ 739 743 mutex_lock(&devpts_mutex); 740 - index = devpts_new_index(inode); 741 - if (index < 0) { 742 - retval = index; 743 - mutex_unlock(&devpts_mutex); 744 - goto err_file; 745 - } 746 - 744 + index = devpts_new_index(fsi); 747 745 mutex_unlock(&devpts_mutex); 746 + 747 + retval = index; 748 + if (index < 0) 749 + goto out_put_ref; 750 + 748 751 749 752 mutex_lock(&tty_mutex); 750 753 tty = tty_init_dev(ptm_driver, index); 751 - 752 - if (IS_ERR(tty)) { 753 - retval = PTR_ERR(tty); 754 - goto out; 755 - } 756 - 757 754 /* The tty returned here is locked so we can safely 758 755 drop the mutex */ 759 756 mutex_unlock(&tty_mutex); 760 757 761 - set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 762 - tty->driver_data = inode; 758 + retval = PTR_ERR(tty); 759 + if (IS_ERR(tty)) 760 + goto out; 763 761 764 762 /* 765 - * In the case where all references to ptmx inode are dropped and we 766 - * still have /dev/tty opened pointing to the master/slave pair (ptmx 767 - * is closed/released before /dev/tty), we must make sure that the inode 768 - * is still valid when we call the final pty_unix98_shutdown, thus we 769 - * hold an additional reference to the ptmx inode. For the same /dev/tty 770 - * last close case, we also need to make sure the super_block isn't 771 - * destroyed (devpts instance unmounted), before /dev/tty is closed and 772 - * on its release devpts_kill_index is called. 763 + * From here on out, the tty is "live", and the index and 764 + * fsi will be killed/put by the tty_release() 773 765 */ 774 - devpts_add_ref(inode); 766 + set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 767 + tty->driver_data = fsi; 775 768 776 769 tty_add_file(tty, filp); 777 770 778 - slave_inode = devpts_pty_new(inode, 771 + slave_inode = devpts_pty_new(fsi, 779 772 MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, 780 773 tty->link); 781 774 if (IS_ERR(slave_inode)) { ··· 788 793 return 0; 789 794 err_release: 790 795 tty_unlock(tty); 796 + // This will also put-ref the fsi 791 797 tty_release(inode, filp); 792 798 return retval; 793 799 out: 794 - mutex_unlock(&tty_mutex); 795 - devpts_kill_index(inode, index); 796 - err_file: 800 + devpts_kill_index(fsi, index); 801 + out_put_ref: 802 + devpts_put_ref(fsi); 803 + out_free_file: 797 804 tty_free_file(filp); 798 805 return retval; 799 806 }
+10 -1
drivers/tty/serial/8250/8250_port.c
··· 1403 1403 /* 1404 1404 * Empty the RX FIFO, we are not interested in anything 1405 1405 * received during the half-duplex transmission. 1406 + * Enable previously disabled RX interrupts. 1406 1407 */ 1407 - if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) 1408 + if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { 1408 1409 serial8250_clear_fifos(p); 1410 + 1411 + serial8250_rpm_get(p); 1412 + 1413 + p->ier |= UART_IER_RLSI | UART_IER_RDI; 1414 + serial_port_out(&p->port, UART_IER, p->ier); 1415 + 1416 + serial8250_rpm_put(p); 1417 + } 1409 1418 } 1410 1419 1411 1420 static void serial8250_em485_handle_stop_tx(unsigned long arg)
-1
drivers/tty/serial/8250/Kconfig
··· 324 324 config SERIAL_8250_RT288X 325 325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" 326 326 depends on SERIAL_8250 327 - depends on MIPS || COMPILE_TEST 328 327 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620 329 328 help 330 329 Selecting this option will add support for the alternate register
+4 -4
drivers/tty/serial/uartlite.c
··· 72 72 iowrite32be(val, addr); 73 73 } 74 74 75 - static const struct uartlite_reg_ops uartlite_be = { 75 + static struct uartlite_reg_ops uartlite_be = { 76 76 .in = uartlite_inbe32, 77 77 .out = uartlite_outbe32, 78 78 }; ··· 87 87 iowrite32(val, addr); 88 88 } 89 89 90 - static const struct uartlite_reg_ops uartlite_le = { 90 + static struct uartlite_reg_ops uartlite_le = { 91 91 .in = uartlite_inle32, 92 92 .out = uartlite_outle32, 93 93 }; 94 94 95 95 static inline u32 uart_in32(u32 offset, struct uart_port *port) 96 96 { 97 - const struct uartlite_reg_ops *reg_ops = port->private_data; 97 + struct uartlite_reg_ops *reg_ops = port->private_data; 98 98 99 99 return reg_ops->in(port->membase + offset); 100 100 } 101 101 102 102 static inline void uart_out32(u32 val, u32 offset, struct uart_port *port) 103 103 { 104 - const struct uartlite_reg_ops *reg_ops = port->private_data; 104 + struct uartlite_reg_ops *reg_ops = port->private_data; 105 105 106 106 reg_ops->out(val, port->membase + offset); 107 107 }
+22 -1
drivers/usb/dwc3/core.c
··· 1150 1150 phy_exit(dwc->usb2_generic_phy); 1151 1151 phy_exit(dwc->usb3_generic_phy); 1152 1152 1153 + usb_phy_set_suspend(dwc->usb2_phy, 1); 1154 + usb_phy_set_suspend(dwc->usb3_phy, 1); 1155 + WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0); 1156 + WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0); 1157 + 1153 1158 pinctrl_pm_select_sleep_state(dev); 1154 1159 1155 1160 return 0; ··· 1168 1163 1169 1164 pinctrl_pm_select_default_state(dev); 1170 1165 1166 + usb_phy_set_suspend(dwc->usb2_phy, 0); 1167 + usb_phy_set_suspend(dwc->usb3_phy, 0); 1168 + ret = phy_power_on(dwc->usb2_generic_phy); 1169 + if (ret < 0) 1170 + return ret; 1171 + 1172 + ret = phy_power_on(dwc->usb3_generic_phy); 1173 + if (ret < 0) 1174 + goto err_usb2phy_power; 1175 + 1171 1176 usb_phy_init(dwc->usb3_phy); 1172 1177 usb_phy_init(dwc->usb2_phy); 1173 1178 ret = phy_init(dwc->usb2_generic_phy); 1174 1179 if (ret < 0) 1175 - return ret; 1180 + goto err_usb3phy_power; 1176 1181 1177 1182 ret = phy_init(dwc->usb3_generic_phy); 1178 1183 if (ret < 0) ··· 1214 1199 1215 1200 err_usb2phy_init: 1216 1201 phy_exit(dwc->usb2_generic_phy); 1202 + 1203 + err_usb3phy_power: 1204 + phy_power_off(dwc->usb3_generic_phy); 1205 + 1206 + err_usb2phy_power: 1207 + phy_power_off(dwc->usb2_generic_phy); 1217 1208 1218 1209 return ret; 1219 1210 }
+8 -5
drivers/usb/dwc3/debugfs.c
··· 645 645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); 646 646 if (!file) { 647 647 ret = -ENOMEM; 648 - goto err1; 648 + goto err2; 649 649 } 650 650 651 651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { ··· 653 653 dwc, &dwc3_mode_fops); 654 654 if (!file) { 655 655 ret = -ENOMEM; 656 - goto err1; 656 + goto err2; 657 657 } 658 658 } 659 659 ··· 663 663 dwc, &dwc3_testmode_fops); 664 664 if (!file) { 665 665 ret = -ENOMEM; 666 - goto err1; 666 + goto err2; 667 667 } 668 668 669 669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, 670 670 dwc, &dwc3_link_state_fops); 671 671 if (!file) { 672 672 ret = -ENOMEM; 673 - goto err1; 673 + goto err2; 674 674 } 675 675 } 676 676 677 677 return 0; 678 + 679 + err2: 680 + kfree(dwc->regset); 678 681 679 682 err1: 680 683 debugfs_remove_recursive(root); ··· 689 686 void dwc3_debugfs_exit(struct dwc3 *dwc) 690 687 { 691 688 debugfs_remove_recursive(dwc->root); 692 - dwc->root = NULL; 689 + kfree(dwc->regset); 693 690 }
+4 -8
drivers/usb/dwc3/dwc3-omap.c
··· 496 496 ret = pm_runtime_get_sync(dev); 497 497 if (ret < 0) { 498 498 dev_err(dev, "get_sync failed with err %d\n", ret); 499 - goto err0; 499 + goto err1; 500 500 } 501 501 502 502 dwc3_omap_map_offset(omap); ··· 516 516 517 517 ret = dwc3_omap_extcon_register(omap); 518 518 if (ret < 0) 519 - goto err2; 519 + goto err1; 520 520 521 521 ret = of_platform_populate(node, NULL, NULL, dev); 522 522 if (ret) { 523 523 dev_err(&pdev->dev, "failed to create dwc3 core\n"); 524 - goto err3; 524 + goto err2; 525 525 } 526 526 527 527 dwc3_omap_enable_irqs(omap); 528 528 529 529 return 0; 530 530 531 - err3: 531 + err2: 532 532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 533 533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 534 - err2: 535 - dwc3_omap_disable_irqs(omap); 536 534 537 535 err1: 538 536 pm_runtime_put_sync(dev); 539 - 540 - err0: 541 537 pm_runtime_disable(dev); 542 538 543 539 return ret;
+6
drivers/usb/dwc3/gadget.c
··· 2936 2936 2937 2937 int dwc3_gadget_suspend(struct dwc3 *dwc) 2938 2938 { 2939 + if (!dwc->gadget_driver) 2940 + return 0; 2941 + 2939 2942 if (dwc->pullups_connected) { 2940 2943 dwc3_gadget_disable_irq(dwc); 2941 2944 dwc3_gadget_run_stop(dwc, true, true); ··· 2956 2953 { 2957 2954 struct dwc3_ep *dep; 2958 2955 int ret; 2956 + 2957 + if (!dwc->gadget_driver) 2958 + return 0; 2959 2959 2960 2960 /* Start with SuperSpeed Default */ 2961 2961 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+2
drivers/usb/gadget/composite.c
··· 651 651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1); 652 652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 653 653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE; 654 + ssp_cap->bReserved = 0; 655 + ssp_cap->wReserved = 0; 654 656 655 657 /* SSAC = 1 (2 attributes) */ 656 658 ssp_cap->bmAttributes = cpu_to_le32(1);
+2 -3
drivers/usb/gadget/function/f_fs.c
··· 646 646 work); 647 647 int ret = io_data->req->status ? io_data->req->status : 648 648 io_data->req->actual; 649 + bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD; 649 650 650 651 if (io_data->read && ret > 0) { 651 652 use_mm(io_data->mm); ··· 658 657 659 658 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); 660 659 661 - if (io_data->ffs->ffs_eventfd && 662 - !(io_data->kiocb->ki_flags & IOCB_EVENTFD)) 660 + if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd) 663 661 eventfd_signal(io_data->ffs->ffs_eventfd, 1); 664 662 665 663 usb_ep_free_request(io_data->ep, io_data->req); 666 664 667 - io_data->kiocb->private = NULL; 668 665 if (io_data->read) 669 666 kfree(io_data->to_free); 670 667 kfree(io_data->buf);
+8 -7
drivers/video/fbdev/amba-clcd.c
··· 440 440 fb->off_ienb = CLCD_PL111_IENB; 441 441 fb->off_cntl = CLCD_PL111_CNTL; 442 442 } else { 443 - #ifdef CONFIG_ARCH_VERSATILE 444 - fb->off_ienb = CLCD_PL111_IENB; 445 - fb->off_cntl = CLCD_PL111_CNTL; 446 - #else 447 - fb->off_ienb = CLCD_PL110_IENB; 448 - fb->off_cntl = CLCD_PL110_CNTL; 449 - #endif 443 + if (of_machine_is_compatible("arm,versatile-ab") || 444 + of_machine_is_compatible("arm,versatile-pb")) { 445 + fb->off_ienb = CLCD_PL111_IENB; 446 + fb->off_cntl = CLCD_PL111_CNTL; 447 + } else { 448 + fb->off_ienb = CLCD_PL110_IENB; 449 + fb->off_cntl = CLCD_PL110_CNTL; 450 + } 450 451 } 451 452 452 453 fb->clk = clk_get(&fb->dev->dev, NULL);
+4 -8
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
··· 200 200 static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, 201 201 char *desc, struct gpio_desc **gpiod) 202 202 { 203 - struct gpio_desc *gd; 204 203 int r; 205 204 206 - *gpiod = NULL; 207 - 208 205 r = devm_gpio_request_one(dev, gpio, flags, desc); 209 - if (r) 206 + if (r) { 207 + *gpiod = NULL; 210 208 return r == -ENOENT ? 0 : r; 209 + } 211 210 212 - gd = gpio_to_desc(gpio); 213 - if (IS_ERR(gd)) 214 - return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd); 211 + *gpiod = gpio_to_desc(gpio); 215 212 216 - *gpiod = gd; 217 213 return 0; 218 214 } 219 215
+24 -25
fs/devpts/inode.c
··· 128 128 struct pts_fs_info { 129 129 struct ida allocated_ptys; 130 130 struct pts_mount_opts mount_opts; 131 + struct super_block *sb; 131 132 struct dentry *ptmx_dentry; 132 133 }; 133 134 ··· 359 358 .show_options = devpts_show_options, 360 359 }; 361 360 362 - static void *new_pts_fs_info(void) 361 + static void *new_pts_fs_info(struct super_block *sb) 363 362 { 364 363 struct pts_fs_info *fsi; 365 364 ··· 370 369 ida_init(&fsi->allocated_ptys); 371 370 fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; 372 371 fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; 372 + fsi->sb = sb; 373 373 374 374 return fsi; 375 375 } ··· 386 384 s->s_op = &devpts_sops; 387 385 s->s_time_gran = 1; 388 386 389 - s->s_fs_info = new_pts_fs_info(); 387 + s->s_fs_info = new_pts_fs_info(s); 390 388 if (!s->s_fs_info) 391 389 goto fail; 392 390 ··· 526 524 * to the System V naming convention 527 525 */ 528 526 529 - int devpts_new_index(struct inode *ptmx_inode) 527 + int devpts_new_index(struct pts_fs_info *fsi) 530 528 { 531 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 532 - struct pts_fs_info *fsi; 533 529 int index; 534 530 int ida_ret; 535 531 536 - if (!sb) 532 + if (!fsi) 537 533 return -ENODEV; 538 534 539 - fsi = DEVPTS_SB(sb); 540 535 retry: 541 536 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 542 537 return -ENOMEM; ··· 563 564 return index; 564 565 } 565 566 566 - void devpts_kill_index(struct inode *ptmx_inode, int idx) 567 + void devpts_kill_index(struct pts_fs_info *fsi, int idx) 567 568 { 568 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 569 - struct pts_fs_info *fsi = DEVPTS_SB(sb); 570 - 571 569 mutex_lock(&allocated_ptys_lock); 572 570 ida_remove(&fsi->allocated_ptys, idx); 573 571 pty_count--; ··· 574 578 /* 575 579 * pty code needs to hold extra references in case of last /dev/tty close 576 580 */ 577 - 578 - void devpts_add_ref(struct inode *ptmx_inode) 581 + struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file) 579 582 { 580 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 583 + struct super_block *sb; 584 + struct pts_fs_info *fsi; 585 + 586 + sb = pts_sb_from_inode(ptmx_inode); 587 + if (!sb) 588 + return NULL; 589 + fsi = DEVPTS_SB(sb); 590 + if (!fsi) 591 + return NULL; 581 592 582 593 atomic_inc(&sb->s_active); 583 - ihold(ptmx_inode); 594 + return fsi; 584 595 } 585 596 586 - void devpts_del_ref(struct inode *ptmx_inode) 597 + void devpts_put_ref(struct pts_fs_info *fsi) 587 598 { 588 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 589 - 590 - iput(ptmx_inode); 591 - deactivate_super(sb); 599 + deactivate_super(fsi->sb); 592 600 } 593 601 594 602 /** ··· 604 604 * 605 605 * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. 606 606 */ 607 - struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 607 + struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, 608 608 void *priv) 609 609 { 610 610 struct dentry *dentry; 611 - struct super_block *sb = pts_sb_from_inode(ptmx_inode); 611 + struct super_block *sb; 612 612 struct inode *inode; 613 613 struct dentry *root; 614 - struct pts_fs_info *fsi; 615 614 struct pts_mount_opts *opts; 616 615 char s[12]; 617 616 618 - if (!sb) 617 + if (!fsi) 619 618 return ERR_PTR(-ENODEV); 620 619 620 + sb = fsi->sb; 621 621 root = sb->s_root; 622 - fsi = DEVPTS_SB(sb); 623 622 opts = &fsi->mount_opts; 624 623 625 624 inode = new_inode(sb);
+6 -2
include/asm-generic/futex.h
··· 108 108 u32 val; 109 109 110 110 preempt_disable(); 111 - if (unlikely(get_user(val, uaddr) != 0)) 111 + if (unlikely(get_user(val, uaddr) != 0)) { 112 + preempt_enable(); 112 113 return -EFAULT; 114 + } 113 115 114 - if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) 116 + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { 117 + preempt_enable(); 115 118 return -EFAULT; 119 + } 116 120 117 121 *uval = val; 118 122 preempt_enable();
+2
include/drm/drm_cache.h
··· 39 39 { 40 40 #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) 41 41 return false; 42 + #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3) 43 + return false; 42 44 #else 43 45 return true; 44 46 #endif
+10 -24
include/linux/devpts_fs.h
··· 15 15 16 16 #include <linux/errno.h> 17 17 18 + struct pts_fs_info; 19 + 18 20 #ifdef CONFIG_UNIX98_PTYS 19 21 20 - int devpts_new_index(struct inode *ptmx_inode); 21 - void devpts_kill_index(struct inode *ptmx_inode, int idx); 22 - void devpts_add_ref(struct inode *ptmx_inode); 23 - void devpts_del_ref(struct inode *ptmx_inode); 22 + /* Look up a pts fs info and get a ref to it */ 23 + struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); 24 + void devpts_put_ref(struct pts_fs_info *); 25 + 26 + int devpts_new_index(struct pts_fs_info *); 27 + void devpts_kill_index(struct pts_fs_info *, int); 28 + 24 29 /* mknod in devpts */ 25 - struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 26 - void *priv); 30 + struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *); 27 31 /* get private structure */ 28 32 void *devpts_get_priv(struct inode *pts_inode); 29 33 /* unlink */ 30 34 void devpts_pty_kill(struct inode *inode); 31 - 32 - #else 33 - 34 - /* Dummy stubs in the no-pty case */ 35 - static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } 36 - static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } 37 - static inline void devpts_add_ref(struct inode *ptmx_inode) { } 38 - static inline void devpts_del_ref(struct inode *ptmx_inode) { } 39 - static inline struct inode *devpts_pty_new(struct inode *ptmx_inode, 40 - dev_t device, int index, void *priv) 41 - { 42 - return ERR_PTR(-EINVAL); 43 - } 44 - static inline void *devpts_get_priv(struct inode *pts_inode) 45 - { 46 - return NULL; 47 - } 48 - static inline void devpts_pty_kill(struct inode *inode) { } 49 35 50 36 #endif 51 37
+7
include/linux/mlx4/device.h
··· 828 828 u8 n_ports; 829 829 }; 830 830 831 + enum mlx4_pci_status { 832 + MLX4_PCI_STATUS_DISABLED, 833 + MLX4_PCI_STATUS_ENABLED, 834 + }; 835 + 831 836 struct mlx4_dev_persistent { 832 837 struct pci_dev *pdev; 833 838 struct mlx4_dev *dev; ··· 846 841 u8 state; 847 842 struct mutex interface_state_mutex; /* protect SW state */ 848 843 u8 interface_state; 844 + struct mutex pci_status_mutex; /* sync pci state */ 845 + enum mlx4_pci_status pci_status; 849 846 }; 850 847 851 848 struct mlx4_dev {
+1
include/linux/pci.h
··· 1111 1111 /* Vital product data routines */ 1112 1112 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1113 1113 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1114 + int pci_set_vpd_size(struct pci_dev *dev, size_t len); 1114 1115 1115 1116 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1116 1117 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
+39
include/linux/rculist_nulls.h
··· 98 98 if (!is_a_nulls(first)) 99 99 first->pprev = &n->next; 100 100 } 101 + 102 + /** 103 + * hlist_nulls_add_tail_rcu 104 + * @n: the element to add to the hash list. 105 + * @h: the list to add to. 106 + * 107 + * Description: 108 + * Adds the specified element to the end of the specified hlist_nulls, 109 + * while permitting racing traversals. NOTE: tail insertion requires 110 + * list traversal. 111 + * 112 + * The caller must take whatever precautions are necessary 113 + * (such as holding appropriate locks) to avoid racing 114 + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() 115 + * or hlist_nulls_del_rcu(), running on this same list. 116 + * However, it is perfectly legal to run concurrently with 117 + * the _rcu list-traversal primitives, such as 118 + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency 119 + * problems on Alpha CPUs. Regardless of the type of CPU, the 120 + * list-traversal primitive must be guarded by rcu_read_lock(). 121 + */ 122 + static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, 123 + struct hlist_nulls_head *h) 124 + { 125 + struct hlist_nulls_node *i, *last = NULL; 126 + 127 + for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i); 128 + i = hlist_nulls_next_rcu(i)) 129 + last = i; 130 + 131 + if (last) { 132 + n->next = last->next; 133 + n->pprev = &last->next; 134 + rcu_assign_pointer(hlist_nulls_next_rcu(last), n); 135 + } else { 136 + hlist_nulls_add_head_rcu(n, h); 137 + } 138 + } 139 + 101 140 /** 102 141 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 103 142 * @tpos: the type * to use as a loop cursor.
+2 -2
include/linux/thermal.h
··· 352 352 353 353 struct thermal_trip { 354 354 struct device_node *np; 355 - unsigned long int temperature; 356 - unsigned long int hysteresis; 355 + int temperature; 356 + int hysteresis; 357 357 enum thermal_trip_type type; 358 358 }; 359 359
+5 -2
include/net/cls_cgroup.h
··· 17 17 #include <linux/hardirq.h> 18 18 #include <linux/rcupdate.h> 19 19 #include <net/sock.h> 20 + #include <net/inet_sock.h> 20 21 21 22 #ifdef CONFIG_CGROUP_NET_CLASSID 22 23 struct cgroup_cls_state { ··· 64 63 * softirqs always disables bh. 65 64 */ 66 65 if (in_serving_softirq()) { 66 + struct sock *sk = skb_to_full_sk(skb); 67 + 67 68 /* If there is an sock_cgroup_classid we'll use that. */ 68 - if (!skb->sk) 69 + if (!sk || !sk_fullsock(sk)) 69 70 return 0; 70 71 71 - classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data); 72 + classid = sock_cgroup_classid(&sk->sk_cgrp_data); 72 73 } 73 74 74 75 return classid;
+3
include/net/ip6_route.h
··· 101 101 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 102 102 const struct in6_addr *addr, bool anycast); 103 103 104 + struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, 105 + int flags); 106 + 104 107 /* 105 108 * support functions for ND 106 109 *
+2
include/net/ipv6.h
··· 959 959 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 960 960 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, 961 961 int addr_len); 962 + int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr); 963 + void ip6_datagram_release_cb(struct sock *sk); 962 964 963 965 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, 964 966 int *addr_len);
+3
include/net/route.h
··· 209 209 void ip_rt_multicast_event(struct in_device *); 210 210 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 211 211 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 212 + struct rtable *rt_dst_alloc(struct net_device *dev, 213 + unsigned int flags, u16 type, 214 + bool nopolicy, bool noxfrm, bool will_cache); 212 215 213 216 struct in_ifaddr; 214 217 void fib_add_ifaddr(struct in_ifaddr *);
+7 -1
include/net/sctp/structs.h
··· 847 847 */ 848 848 ktime_t last_time_heard; 849 849 850 + /* When was the last time that we sent a chunk using this 851 + * transport? We use this to check for idle transports 852 + */ 853 + unsigned long last_time_sent; 854 + 850 855 /* Last time(in jiffies) when cwnd is reduced due to the congestion 851 856 * indication based on ECNE chunk. 852 857 */ ··· 957 952 struct sctp_sock *); 958 953 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); 959 954 void sctp_transport_free(struct sctp_transport *); 960 - void sctp_transport_reset_timers(struct sctp_transport *); 955 + void sctp_transport_reset_t3_rtx(struct sctp_transport *); 956 + void sctp_transport_reset_hb_timer(struct sctp_transport *); 961 957 int sctp_transport_hold(struct sctp_transport *); 962 958 void sctp_transport_put(struct sctp_transport *); 963 959 void sctp_transport_update_rto(struct sctp_transport *, __u32);
+5 -1
include/net/sock.h
··· 630 630 631 631 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 632 632 { 633 - hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 633 + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 634 + sk->sk_family == AF_INET6) 635 + hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); 636 + else 637 + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 634 638 } 635 639 636 640 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+2
include/net/tcp.h
··· 552 552 void tcp_send_delayed_ack(struct sock *sk); 553 553 void tcp_send_loss_probe(struct sock *sk); 554 554 bool tcp_schedule_loss_probe(struct sock *sk); 555 + void tcp_skb_collapse_tstamp(struct sk_buff *skb, 556 + const struct sk_buff *next_skb); 555 557 556 558 /* tcp_input.c */ 557 559 void tcp_resume_early_retransmit(struct sock *sk);
+2
include/sound/hda_regmap.h
··· 17 17 unsigned int verb); 18 18 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 19 19 unsigned int *val); 20 + int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, 21 + unsigned int reg, unsigned int *val); 20 22 int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, 21 23 unsigned int val); 22 24 int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
+5 -1
include/uapi/asm-generic/unistd.h
··· 717 717 __SYSCALL(__NR_mlock2, sys_mlock2) 718 718 #define __NR_copy_file_range 285 719 719 __SYSCALL(__NR_copy_file_range, sys_copy_file_range) 720 + #define __NR_preadv2 286 721 + __SYSCALL(__NR_preadv2, sys_preadv2) 722 + #define __NR_pwritev2 287 723 + __SYSCALL(__NR_pwritev2, sys_pwritev2) 720 724 721 725 #undef __NR_syscalls 722 - #define __NR_syscalls 286 726 + #define __NR_syscalls 288 723 727 724 728 /* 725 729 * All syscalls below here should go away really,
+1
include/uapi/linux/Kbuild
··· 96 96 header-y += cycx_cfm.h 97 97 header-y += dcbnl.h 98 98 header-y += dccp.h 99 + header-y += devlink.h 99 100 header-y += dlmconstants.h 100 101 header-y += dlm_device.h 101 102 header-y += dlm.h
+1
kernel/bpf/verifier.c
··· 1374 1374 } 1375 1375 1376 1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1377 + BPF_SIZE(insn->code) == BPF_DW || 1377 1378 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1378 1379 verbose("BPF_LD_ABS uses reserved fields\n"); 1379 1380 return -EINVAL;
+26 -7
kernel/cpu.c
··· 36 36 * @target: The target state 37 37 * @thread: Pointer to the hotplug thread 38 38 * @should_run: Thread should execute 39 + * @rollback: Perform a rollback 39 40 * @cb_stat: The state for a single callback (install/uninstall) 40 41 * @cb: Single callback function (install/uninstall) 41 42 * @result: Result of the operation ··· 48 47 #ifdef CONFIG_SMP 49 48 struct task_struct *thread; 50 49 bool should_run; 50 + bool rollback; 51 51 enum cpuhp_state cb_state; 52 52 int (*cb)(unsigned int cpu); 53 53 int result; ··· 303 301 return __cpu_notify(val, cpu, -1, NULL); 304 302 } 305 303 304 + static void cpu_notify_nofail(unsigned long val, unsigned int cpu) 305 + { 306 + BUG_ON(cpu_notify(val, cpu)); 307 + } 308 + 306 309 /* Notifier wrappers for transitioning to state machine */ 307 310 static int notify_prepare(unsigned int cpu) 308 311 { ··· 484 477 } else { 485 478 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); 486 479 } 480 + } else if (st->rollback) { 481 + BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 482 + 483 + undo_cpu_down(cpu, st, cpuhp_ap_states); 484 + /* 485 + * This is a momentary workaround to keep the notifier users 486 + * happy. Will go away once we got rid of the notifiers. 487 + */ 488 + cpu_notify_nofail(CPU_DOWN_FAILED, cpu); 489 + st->rollback = false; 487 490 } else { 488 491 /* Cannot happen .... */ 489 492 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); ··· 653 636 read_unlock(&tasklist_lock); 654 637 } 655 638 656 - static void cpu_notify_nofail(unsigned long val, unsigned int cpu) 657 - { 658 - BUG_ON(cpu_notify(val, cpu)); 659 - } 660 - 661 639 static int notify_down_prepare(unsigned int cpu) 662 640 { 663 641 int err, nr_calls = 0; ··· 733 721 */ 734 722 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); 735 723 if (err) { 736 - /* CPU didn't die: tell everyone. Can't complain. */ 737 - cpu_notify_nofail(CPU_DOWN_FAILED, cpu); 724 + /* CPU refused to die */ 738 725 irq_unlock_sparse(); 726 + /* Unpark the hotplug thread so we can rollback there */ 727 + kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); 739 728 return err; 740 729 } 741 730 BUG_ON(cpu_online(cpu)); ··· 845 832 * to do the further cleanups. 846 833 */ 847 834 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); 835 + if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 836 + st->target = prev_state; 837 + st->rollback = true; 838 + cpuhp_kick_ap_work(cpu); 839 + } 848 840 849 841 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; 850 842 out: ··· 1267 1249 .name = "notify:online", 1268 1250 .startup = notify_online, 1269 1251 .teardown = notify_down_prepare, 1252 + .skip_onerr = true, 1270 1253 }, 1271 1254 #endif 1272 1255 /*
+23 -4
kernel/futex.c
··· 1295 1295 if (unlikely(should_fail_futex(true))) 1296 1296 ret = -EFAULT; 1297 1297 1298 - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) 1298 + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { 1299 1299 ret = -EFAULT; 1300 - else if (curval != uval) 1301 - ret = -EINVAL; 1300 + } else if (curval != uval) { 1301 + /* 1302 + * If a unconditional UNLOCK_PI operation (user space did not 1303 + * try the TID->0 transition) raced with a waiter setting the 1304 + * FUTEX_WAITERS flag between get_user() and locking the hash 1305 + * bucket lock, retry the operation. 1306 + */ 1307 + if ((FUTEX_TID_MASK & curval) == uval) 1308 + ret = -EAGAIN; 1309 + else 1310 + ret = -EINVAL; 1311 + } 1302 1312 if (ret) { 1303 1313 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1304 1314 return ret; ··· 1535 1525 if (likely(&hb1->chain != &hb2->chain)) { 1536 1526 plist_del(&q->list, &hb1->chain); 1537 1527 hb_waiters_dec(hb1); 1538 - plist_add(&q->list, &hb2->chain); 1539 1528 hb_waiters_inc(hb2); 1529 + plist_add(&q->list, &hb2->chain); 1540 1530 q->lock_ptr = &hb2->lock; 1541 1531 } 1542 1532 get_futex_key_refs(key2); ··· 2632 2622 */ 2633 2623 if (ret == -EFAULT) 2634 2624 goto pi_faulted; 2625 + /* 2626 + * A unconditional UNLOCK_PI op raced against a waiter 2627 + * setting the FUTEX_WAITERS bit. Try again. 2628 + */ 2629 + if (ret == -EAGAIN) { 2630 + spin_unlock(&hb->lock); 2631 + put_futex_key(&key); 2632 + goto retry; 2633 + } 2635 2634 /* 2636 2635 * wake_futex_pi has detected invalid state. Tell user 2637 2636 * space.
+1
kernel/irq/ipi.c
··· 94 94 data = irq_get_irq_data(virq + i); 95 95 cpumask_copy(data->common->affinity, dest); 96 96 data->common->ipi_offset = offset; 97 + irq_set_status_flags(virq + i, IRQ_NO_BALANCING); 97 98 } 98 99 return virq; 99 100
+5 -3
kernel/locking/qspinlock_stat.h
··· 136 136 } 137 137 138 138 if (counter == qstat_pv_hash_hops) { 139 - u64 frac; 139 + u64 frac = 0; 140 140 141 - frac = 100ULL * do_div(stat, kicks); 142 - frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 141 + if (kicks) { 142 + frac = 100ULL * do_div(stat, kicks); 143 + frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 144 + } 143 145 144 146 /* 145 147 * Return a X.XX decimal number
+16 -13
kernel/sched/core.c
··· 596 596 return false; 597 597 598 598 /* 599 - * FIFO realtime policy runs the highest priority task (after DEADLINE). 600 - * Other runnable tasks are of a lower priority. The scheduler tick 601 - * isn't needed. 602 - */ 603 - fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 604 - if (fifo_nr_running) 605 - return true; 606 - 607 - /* 608 - * Round-robin realtime tasks time slice with other tasks at the same 609 - * realtime priority. 599 + * If there are more than one RR tasks, we need the tick to effect the 600 + * actual RR behaviour. 610 601 */ 611 602 if (rq->rt.rr_nr_running) { 612 603 if (rq->rt.rr_nr_running == 1) ··· 606 615 return false; 607 616 } 608 617 609 - /* Normal multitasking need periodic preemption checks */ 610 - if (rq->cfs.nr_running > 1) 618 + /* 619 + * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 620 + * forced preemption between FIFO tasks. 621 + */ 622 + fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 623 + if (fifo_nr_running) 624 + return true; 625 + 626 + /* 627 + * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 628 + * if there's more than one we need the tick for involuntary 629 + * preemption. 630 + */ 631 + if (rq->nr_running > 1) 611 632 return false; 612 633 613 634 return true;
+5 -1
net/bridge/netfilter/ebtables.c
··· 370 370 left - sizeof(struct ebt_entry_match) < m->match_size) 371 371 return -EINVAL; 372 372 373 - match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); 373 + match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 374 + if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { 375 + request_module("ebt_%s", m->u.name); 376 + match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 377 + } 374 378 if (IS_ERR(match)) 375 379 return PTR_ERR(match); 376 380 m->u.match = match;
+5 -2
net/core/skbuff.c
··· 4502 4502 __skb_push(skb, offset); 4503 4503 err = __vlan_insert_tag(skb, skb->vlan_proto, 4504 4504 skb_vlan_tag_get(skb)); 4505 - if (err) 4505 + if (err) { 4506 + __skb_pull(skb, offset); 4506 4507 return err; 4508 + } 4509 + 4507 4510 skb->protocol = skb->vlan_proto; 4508 4511 skb->mac_len += VLAN_HLEN; 4509 - __skb_pull(skb, offset); 4510 4512 4511 4513 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4514 + __skb_pull(skb, offset); 4512 4515 } 4513 4516 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4514 4517 return 0;
+8 -1
net/decnet/dn_route.c
··· 1034 1034 if (!fld.daddr) { 1035 1035 fld.daddr = fld.saddr; 1036 1036 1037 - err = -EADDRNOTAVAIL; 1038 1037 if (dev_out) 1039 1038 dev_put(dev_out); 1039 + err = -EINVAL; 1040 1040 dev_out = init_net.loopback_dev; 1041 + if (!dev_out->dn_ptr) 1042 + goto out; 1043 + err = -EADDRNOTAVAIL; 1041 1044 dev_hold(dev_out); 1042 1045 if (!fld.daddr) { 1043 1046 fld.daddr = ··· 1113 1110 if (dev_out == NULL) 1114 1111 goto out; 1115 1112 dn_db = rcu_dereference_raw(dev_out->dn_ptr); 1113 + if (!dn_db) 1114 + goto e_inval; 1116 1115 /* Possible improvement - check all devices for local addr */ 1117 1116 if (dn_dev_islocal(dev_out, fld.daddr)) { 1118 1117 dev_put(dev_out); ··· 1156 1151 dev_put(dev_out); 1157 1152 dev_out = init_net.loopback_dev; 1158 1153 dev_hold(dev_out); 1154 + if (!dev_out->dn_ptr) 1155 + goto e_inval; 1159 1156 fld.flowidn_oif = dev_out->ifindex; 1160 1157 if (res.fi) 1161 1158 dn_fib_info_put(res.fi);
+6
net/ipv4/netfilter/arptable_filter.c
··· 81 81 return ret; 82 82 } 83 83 84 + ret = arptable_filter_table_init(&init_net); 85 + if (ret) { 86 + unregister_pernet_subsys(&arptable_filter_net_ops); 87 + kfree(arpfilter_ops); 88 + } 89 + 84 90 return ret; 85 91 } 86 92
+16 -3
net/ipv4/route.c
··· 1438 1438 #endif 1439 1439 } 1440 1440 1441 - static struct rtable *rt_dst_alloc(struct net_device *dev, 1442 - unsigned int flags, u16 type, 1443 - bool nopolicy, bool noxfrm, bool will_cache) 1441 + struct rtable *rt_dst_alloc(struct net_device *dev, 1442 + unsigned int flags, u16 type, 1443 + bool nopolicy, bool noxfrm, bool will_cache) 1444 1444 { 1445 1445 struct rtable *rt; 1446 1446 ··· 1468 1468 1469 1469 return rt; 1470 1470 } 1471 + EXPORT_SYMBOL(rt_dst_alloc); 1471 1472 1472 1473 /* called in rcu_read_lock() section */ 1473 1474 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, ··· 2046 2045 */ 2047 2046 if (fi && res->prefixlen < 4) 2048 2047 fi = NULL; 2048 + } else if ((type == RTN_LOCAL) && (orig_oif != 0) && 2049 + (orig_oif != dev_out->ifindex)) { 2050 + /* For local routes that require a particular output interface 2051 + * we do not want to cache the result. Caching the result 2052 + * causes incorrect behaviour when there are multiple source 2053 + * addresses on the interface, the end result being that if the 2054 + * intended recipient is waiting on that interface for the 2055 + * packet he won't receive it because it will be delivered on 2056 + * the loopback interface and the IP_PKTINFO ipi_ifindex will 2057 + * be set to the loopback interface as well. 2058 + */ 2059 + fi = NULL; 2049 2060 } 2050 2061 2051 2062 fnhe = NULL;
+3 -1
net/ipv4/tcp_input.c
··· 1309 1309 if (skb == tcp_highest_sack(sk)) 1310 1310 tcp_advance_highest_sack(sk, skb); 1311 1311 1312 + tcp_skb_collapse_tstamp(prev, skb); 1312 1313 tcp_unlink_write_queue(skb, sk); 1313 1314 sk_wmem_free_skb(sk, skb); 1314 1315 ··· 3099 3098 3100 3099 shinfo = skb_shinfo(skb); 3101 3100 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && 3102 - between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) 3101 + !before(shinfo->tskey, prior_snd_una) && 3102 + before(shinfo->tskey, tcp_sk(sk)->snd_una)) 3103 3103 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3104 3104 } 3105 3105
+16
net/ipv4/tcp_output.c
··· 2441 2441 return window; 2442 2442 } 2443 2443 2444 + void tcp_skb_collapse_tstamp(struct sk_buff *skb, 2445 + const struct sk_buff *next_skb) 2446 + { 2447 + const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb); 2448 + u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; 2449 + 2450 + if (unlikely(tsflags)) { 2451 + struct skb_shared_info *shinfo = skb_shinfo(skb); 2452 + 2453 + shinfo->tx_flags |= tsflags; 2454 + shinfo->tskey = next_shinfo->tskey; 2455 + } 2456 + } 2457 + 2444 2458 /* Collapses two adjacent SKB's during retransmission. */ 2445 2459 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2446 2460 { ··· 2497 2483 tp->retransmit_skb_hint = skb; 2498 2484 2499 2485 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2486 + 2487 + tcp_skb_collapse_tstamp(skb, next_skb); 2500 2488 2501 2489 sk_wmem_free_skb(sk, next_skb); 2502 2490 }
+7 -2
net/ipv4/udp.c
··· 339 339 340 340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 341 341 spin_lock(&hslot2->lock); 342 - hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 343 - &hslot2->head); 342 + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 343 + sk->sk_family == AF_INET6) 344 + hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 345 + &hslot2->head); 346 + else 347 + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 348 + &hslot2->head); 344 349 hslot2->count++; 345 350 spin_unlock(&hslot2->lock); 346 351 }
+20 -2
net/ipv6/addrconf.c
··· 3255 3255 void *ptr) 3256 3256 { 3257 3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3258 + struct netdev_notifier_changeupper_info *info; 3258 3259 struct inet6_dev *idev = __in6_dev_get(dev); 3259 3260 int run_pending = 0; 3260 3261 int err; ··· 3414 3413 if (idev) 3415 3414 addrconf_type_change(dev, event); 3416 3415 break; 3416 + 3417 + case NETDEV_CHANGEUPPER: 3418 + info = ptr; 3419 + 3420 + /* flush all routes if dev is linked to or unlinked from 3421 + * an L3 master device (e.g., VRF) 3422 + */ 3423 + if (info->upper_dev && netif_is_l3_master(info->upper_dev)) 3424 + addrconf_ifdown(dev, 0); 3417 3425 } 3418 3426 3419 3427 return NOTIFY_OK; ··· 3446 3436 ipv6_mc_remap(idev); 3447 3437 else if (event == NETDEV_PRE_TYPE_CHANGE) 3448 3438 ipv6_mc_unmap(idev); 3439 + } 3440 + 3441 + static bool addr_is_local(const struct in6_addr *addr) 3442 + { 3443 + return ipv6_addr_type(addr) & 3444 + (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 3449 3445 } 3450 3446 3451 3447 static int addrconf_ifdown(struct net_device *dev, int how) ··· 3511 3495 * address is retained on a down event 3512 3496 */ 3513 3497 if (!keep_addr || 3514 - !(ifa->flags & IFA_F_PERMANENT)) { 3498 + !(ifa->flags & IFA_F_PERMANENT) || 3499 + addr_is_local(&ifa->addr)) { 3515 3500 hlist_del_init_rcu(&ifa->addr_lst); 3516 3501 goto restart; 3517 3502 } ··· 3561 3544 write_unlock_bh(&idev->lock); 3562 3545 spin_lock_bh(&ifa->lock); 3563 3546 3564 - if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) { 3547 + if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && 3548 + !addr_is_local(&ifa->addr)) { 3565 3549 /* set state to skip the notifier below */ 3566 3550 state = INET6_IFADDR_STATE_DEAD; 3567 3551 ifa->state = 0;
+106 -63
net/ipv6/datagram.c
··· 40 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 41 41 } 42 42 43 + static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk) 44 + { 45 + struct inet_sock *inet = inet_sk(sk); 46 + struct ipv6_pinfo *np = inet6_sk(sk); 47 + 48 + memset(fl6, 0, sizeof(*fl6)); 49 + fl6->flowi6_proto = sk->sk_protocol; 50 + fl6->daddr = sk->sk_v6_daddr; 51 + fl6->saddr = np->saddr; 52 + fl6->flowi6_oif = sk->sk_bound_dev_if; 53 + fl6->flowi6_mark = sk->sk_mark; 54 + fl6->fl6_dport = inet->inet_dport; 55 + fl6->fl6_sport = inet->inet_sport; 56 + fl6->flowlabel = np->flow_label; 57 + 58 + if (!fl6->flowi6_oif) 59 + fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 60 + 61 + if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) 62 + fl6->flowi6_oif = np->mcast_oif; 63 + 64 + security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 65 + } 66 + 67 + int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) 68 + { 69 + struct ip6_flowlabel *flowlabel = NULL; 70 + struct in6_addr *final_p, final; 71 + struct ipv6_txoptions *opt; 72 + struct dst_entry *dst; 73 + struct inet_sock *inet = inet_sk(sk); 74 + struct ipv6_pinfo *np = inet6_sk(sk); 75 + struct flowi6 fl6; 76 + int err = 0; 77 + 78 + if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) { 79 + flowlabel = fl6_sock_lookup(sk, np->flow_label); 80 + if (!flowlabel) 81 + return -EINVAL; 82 + } 83 + ip6_datagram_flow_key_init(&fl6, sk); 84 + 85 + rcu_read_lock(); 86 + opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); 87 + final_p = fl6_update_dst(&fl6, opt, &final); 88 + rcu_read_unlock(); 89 + 90 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 91 + if (IS_ERR(dst)) { 92 + err = PTR_ERR(dst); 93 + goto out; 94 + } 95 + 96 + if (fix_sk_saddr) { 97 + if (ipv6_addr_any(&np->saddr)) 98 + np->saddr = fl6.saddr; 99 + 100 + if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { 101 + sk->sk_v6_rcv_saddr = fl6.saddr; 102 + inet->inet_rcv_saddr = LOOPBACK4_IPV6; 103 + if (sk->sk_prot->rehash) 104 + sk->sk_prot->rehash(sk); 105 + } 106 + } 107 + 108 + ip6_dst_store(sk, dst, 109 + ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? 110 + &sk->sk_v6_daddr : NULL, 111 + #ifdef CONFIG_IPV6_SUBTREES 112 + ipv6_addr_equal(&fl6.saddr, &np->saddr) ? 113 + &np->saddr : 114 + #endif 115 + NULL); 116 + 117 + out: 118 + fl6_sock_release(flowlabel); 119 + return err; 120 + } 121 + 122 + void ip6_datagram_release_cb(struct sock *sk) 123 + { 124 + struct dst_entry *dst; 125 + 126 + if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 127 + return; 128 + 129 + rcu_read_lock(); 130 + dst = __sk_dst_get(sk); 131 + if (!dst || !dst->obsolete || 132 + dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) { 133 + rcu_read_unlock(); 134 + return; 135 + } 136 + rcu_read_unlock(); 137 + 138 + ip6_datagram_dst_update(sk, false); 139 + } 140 + EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); 141 + 43 142 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 44 143 { 45 144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 46 145 struct inet_sock *inet = inet_sk(sk); 47 146 struct ipv6_pinfo *np = inet6_sk(sk); 48 - struct in6_addr *daddr, *final_p, final; 49 - struct dst_entry *dst; 50 - struct flowi6 fl6; 51 - struct ip6_flowlabel *flowlabel = NULL; 52 - struct ipv6_txoptions *opt; 147 + struct in6_addr *daddr; 53 148 int addr_type; 54 149 int err; 150 + __be32 fl6_flowlabel = 0; 55 151 56 152 if (usin->sin6_family == AF_INET) { 57 153 if (__ipv6_only_sock(sk)) ··· 162 66 if (usin->sin6_family != AF_INET6) 163 67 return -EAFNOSUPPORT; 164 68 165 - memset(&fl6, 0, sizeof(fl6)); 166 - if (np->sndflow) { 167 - fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 168 - if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 169 - flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 170 - if (!flowlabel) 171 - return -EINVAL; 172 - } 173 - } 69 + if (np->sndflow) 70 + fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 174 71 175 72 addr_type = ipv6_addr_type(&usin->sin6_addr); 176 73 ··· 234 145 } 235 146 236 147 sk->sk_v6_daddr = *daddr; 237 - np->flow_label = fl6.flowlabel; 148 + np->flow_label = fl6_flowlabel; 238 149 239 150 inet->inet_dport = usin->sin6_port; 240 151 ··· 243 154 * destination cache for it. 244 155 */ 245 156 246 - fl6.flowi6_proto = sk->sk_protocol; 247 - fl6.daddr = sk->sk_v6_daddr; 248 - fl6.saddr = np->saddr; 249 - fl6.flowi6_oif = sk->sk_bound_dev_if; 250 - fl6.flowi6_mark = sk->sk_mark; 251 - fl6.fl6_dport = inet->inet_dport; 252 - fl6.fl6_sport = inet->inet_sport; 253 - 254 - if (!fl6.flowi6_oif) 255 - fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 256 - 257 - if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST)) 258 - fl6.flowi6_oif = np->mcast_oif; 259 - 260 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 261 - 262 - rcu_read_lock(); 263 - opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); 264 - final_p = fl6_update_dst(&fl6, opt, &final); 265 - rcu_read_unlock(); 266 - 267 - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 268 - err = 0; 269 - if (IS_ERR(dst)) { 270 - err = PTR_ERR(dst); 157 + err = ip6_datagram_dst_update(sk, true); 158 + if (err) 271 159 goto out; 272 - } 273 - 274 - /* source address lookup done in ip6_dst_lookup */ 275 - 276 - if (ipv6_addr_any(&np->saddr)) 277 - np->saddr = fl6.saddr; 278 - 279 - if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { 280 - sk->sk_v6_rcv_saddr = fl6.saddr; 281 - inet->inet_rcv_saddr = LOOPBACK4_IPV6; 282 - if (sk->sk_prot->rehash) 283 - sk->sk_prot->rehash(sk); 284 - } 285 - 286 - ip6_dst_store(sk, dst, 287 - ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? 288 - &sk->sk_v6_daddr : NULL, 289 - #ifdef CONFIG_IPV6_SUBTREES 290 - ipv6_addr_equal(&fl6.saddr, &np->saddr) ? 291 - &np->saddr : 292 - #endif 293 - NULL); 294 160 295 161 sk->sk_state = TCP_ESTABLISHED; 296 162 sk_set_txhash(sk); 297 163 out: 298 - fl6_sock_release(flowlabel); 299 164 return err; 300 165 } 301 166
+16 -3
net/ipv6/route.c
··· 338 338 return rt; 339 339 } 340 340 341 - static struct rt6_info *ip6_dst_alloc(struct net *net, 342 - struct net_device *dev, 343 - int flags) 341 + struct rt6_info *ip6_dst_alloc(struct net *net, 342 + struct net_device *dev, 343 + int flags) 344 344 { 345 345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 346 346 ··· 364 364 365 365 return rt; 366 366 } 367 + EXPORT_SYMBOL(ip6_dst_alloc); 367 368 368 369 static void ip6_dst_destroy(struct dst_entry *dst) 369 370 { ··· 1418 1417 1419 1418 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 1420 1419 { 1420 + struct dst_entry *dst; 1421 + 1421 1422 ip6_update_pmtu(skb, sock_net(sk), mtu, 1422 1423 sk->sk_bound_dev_if, sk->sk_mark); 1424 + 1425 + dst = __sk_dst_get(sk); 1426 + if (!dst || !dst->obsolete || 1427 + dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) 1428 + return; 1429 + 1430 + bh_lock_sock(sk); 1431 + if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 1432 + ip6_datagram_dst_update(sk, false); 1433 + bh_unlock_sock(sk); 1423 1434 } 1424 1435 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 1425 1436
+1
net/ipv6/udp.c
··· 1539 1539 .sendmsg = udpv6_sendmsg, 1540 1540 .recvmsg = udpv6_recvmsg, 1541 1541 .backlog_rcv = __udpv6_queue_rcv_skb, 1542 + .release_cb = ip6_datagram_release_cb, 1542 1543 .hash = udp_lib_hash, 1543 1544 .unhash = udp_lib_unhash, 1544 1545 .rehash = udp_v6_rehash,
+4
net/netfilter/nf_conntrack_proto_tcp.c
··· 410 410 length--; 411 411 continue; 412 412 default: 413 + if (length < 2) 414 + return; 413 415 opsize=*ptr++; 414 416 if (opsize < 2) /* "silly options" */ 415 417 return; ··· 472 470 length--; 473 471 continue; 474 472 default: 473 + if (length < 2) 474 + return; 475 475 opsize = *ptr++; 476 476 if (opsize < 2) /* "silly options" */ 477 477 return;
+1 -1
net/netlink/af_netlink.c
··· 688 688 689 689 skb_queue_purge(&sk->sk_write_queue); 690 690 691 - if (nlk->portid) { 691 + if (nlk->portid && nlk->bound) { 692 692 struct netlink_notify n = { 693 693 .net = sock_net(sk), 694 694 .protocol = sk->sk_protocol,
+2 -2
net/openvswitch/actions.c
··· 461 461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); 462 462 463 463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { 464 - set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, 464 + set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked, 465 465 true); 466 466 memcpy(&flow_key->ipv6.addr.src, masked, 467 467 sizeof(flow_key->ipv6.addr.src)); ··· 483 483 NULL, &flags) 484 484 != NEXTHDR_ROUTING); 485 485 486 - set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, 486 + set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked, 487 487 recalc_csum); 488 488 memcpy(&flow_key->ipv6.addr.dst, masked, 489 489 sizeof(flow_key->ipv6.addr.dst));
+1
net/openvswitch/conntrack.c
··· 367 367 } else if (key->eth.type == htons(ETH_P_IPV6)) { 368 368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 369 369 370 + skb_orphan(skb); 370 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 372 err = nf_ct_frag6_gather(net, skb, user); 372 373 if (err)
+1
net/packet/af_packet.c
··· 3521 3521 i->ifindex = mreq->mr_ifindex; 3522 3522 i->alen = mreq->mr_alen; 3523 3523 memcpy(i->addr, mreq->mr_address, i->alen); 3524 + memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3524 3525 i->count = 1; 3525 3526 i->next = po->mclist; 3526 3527 po->mclist = i;
+2 -2
net/rds/cong.c
··· 299 299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 300 300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 301 301 302 - __set_bit_le(off, (void *)map->m_page_addrs[i]); 302 + set_bit_le(off, (void *)map->m_page_addrs[i]); 303 303 } 304 304 305 305 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) ··· 313 313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 314 314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 315 315 316 - __clear_bit_le(off, (void *)map->m_page_addrs[i]); 316 + clear_bit_le(off, (void *)map->m_page_addrs[i]); 317 317 } 318 318 319 319 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
+1 -1
net/rds/ib_cm.c
··· 194 194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); 195 195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); 196 196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); 197 - dp->dp_ack_seq = rds_ib_piggyb_ack(ic); 197 + dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic)); 198 198 199 199 /* Advertise flow control */ 200 200 if (ic->i_flowctl) {
+4 -1
net/sched/sch_generic.c
··· 159 159 if (validate) 160 160 skb = validate_xmit_skb_list(skb, dev); 161 161 162 - if (skb) { 162 + if (likely(skb)) { 163 163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 164 164 if (!netif_xmit_frozen_or_stopped(txq)) 165 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 166 166 167 167 HARD_TX_UNLOCK(dev, txq); 168 + } else { 169 + spin_lock(root_lock); 170 + return qdisc_qlen(q); 168 171 } 169 172 spin_lock(root_lock); 170 173
+10 -5
net/sctp/outqueue.c
··· 866 866 * sender MUST assure that at least one T3-rtx 867 867 * timer is running. 868 868 */ 869 - if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 870 - sctp_transport_reset_timers(transport); 869 + if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { 870 + sctp_transport_reset_t3_rtx(transport); 871 + transport->last_time_sent = jiffies; 872 + } 871 873 } 872 874 break; 873 875 ··· 926 924 error = sctp_outq_flush_rtx(q, packet, 927 925 rtx_timeout, &start_timer); 928 926 929 - if (start_timer) 930 - sctp_transport_reset_timers(transport); 927 + if (start_timer) { 928 + sctp_transport_reset_t3_rtx(transport); 929 + transport->last_time_sent = jiffies; 930 + } 931 931 932 932 /* This can happen on COOKIE-ECHO resend. Only 933 933 * one chunk can get bundled with a COOKIE-ECHO. ··· 1066 1062 list_add_tail(&chunk->transmitted_list, 1067 1063 &transport->transmitted); 1068 1064 1069 - sctp_transport_reset_timers(transport); 1065 + sctp_transport_reset_t3_rtx(transport); 1066 + transport->last_time_sent = jiffies; 1070 1067 1071 1068 /* Only let one DATA chunk get bundled with a 1072 1069 * COOKIE-ECHO chunk.
+1 -2
net/sctp/sm_make_chunk.c
··· 3080 3080 return SCTP_ERROR_RSRC_LOW; 3081 3081 3082 3082 /* Start the heartbeat timer. */ 3083 - if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3084 - sctp_transport_hold(peer); 3083 + sctp_transport_reset_hb_timer(peer); 3085 3084 asoc->new_transport = peer; 3086 3085 break; 3087 3086 case SCTP_PARAM_DEL_IP:
+16 -20
net/sctp/sm_sideeffect.c
··· 69 69 sctp_cmd_seq_t *commands, 70 70 gfp_t gfp); 71 71 72 - static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, 73 - struct sctp_transport *t); 74 72 /******************************************************************** 75 73 * Helper functions 76 74 ********************************************************************/ ··· 365 367 struct sctp_association *asoc = transport->asoc; 366 368 struct sock *sk = asoc->base.sk; 367 369 struct net *net = sock_net(sk); 370 + u32 elapsed, timeout; 368 371 369 372 bh_lock_sock(sk); 370 373 if (sock_owned_by_user(sk)) { ··· 373 374 374 375 /* Try again later. */ 375 376 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) 377 + sctp_transport_hold(transport); 378 + goto out_unlock; 379 + } 380 + 381 + /* Check if we should still send the heartbeat or reschedule */ 382 + elapsed = jiffies - transport->last_time_sent; 383 + timeout = sctp_transport_timeout(transport); 384 + if (elapsed < timeout) { 385 + elapsed = timeout - elapsed; 386 + if (!mod_timer(&transport->hb_timer, jiffies + elapsed)) 376 387 sctp_transport_hold(transport); 377 388 goto out_unlock; 378 389 } ··· 516 507 0); 517 508 518 509 /* Update the hb timer to resend a heartbeat every rto */ 519 - sctp_cmd_hb_timer_update(commands, transport); 510 + sctp_transport_reset_hb_timer(transport); 520 511 } 521 512 522 513 if (transport->state != SCTP_INACTIVE && ··· 643 634 * hold a reference on the transport to make sure none of 644 635 * the needed data structures go away. 645 636 */ 646 - list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 647 - 648 - if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 649 - sctp_transport_hold(t); 650 - } 637 + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) 638 + sctp_transport_reset_hb_timer(t); 651 639 } 652 640 653 641 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, ··· 674 668 } 675 669 } 676 670 677 - 678 - /* Helper function to update the heartbeat timer. */ 679 - static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, 680 - struct sctp_transport *t) 681 - { 682 - /* Update the heartbeat timer. */ 683 - if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 684 - sctp_transport_hold(t); 685 - } 686 671 687 672 /* Helper function to handle the reception of an HEARTBEAT ACK. */ 688 673 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, ··· 739 742 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 740 743 741 744 /* Update the heartbeat timer. */ 742 - if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 743 - sctp_transport_hold(t); 745 + sctp_transport_reset_hb_timer(t); 744 746 745 747 if (was_unconfirmed && asoc->peer.transport_count == 1) 746 748 sctp_transport_immediate_rtx(t); ··· 1610 1614 1611 1615 case SCTP_CMD_HB_TIMER_UPDATE: 1612 1616 t = cmd->obj.transport; 1613 - sctp_cmd_hb_timer_update(commands, t); 1617 + sctp_transport_reset_hb_timer(t); 1614 1618 break; 1615 1619 1616 1620 case SCTP_CMD_HB_TIMERS_STOP:
+13 -6
net/sctp/transport.c
··· 183 183 /* Start T3_rtx timer if it is not already running and update the heartbeat 184 184 * timer. This routine is called every time a DATA chunk is sent. 185 185 */ 186 - void sctp_transport_reset_timers(struct sctp_transport *transport) 186 + void sctp_transport_reset_t3_rtx(struct sctp_transport *transport) 187 187 { 188 188 /* RFC 2960 6.3.2 Retransmission Timer Rules 189 189 * ··· 197 197 if (!mod_timer(&transport->T3_rtx_timer, 198 198 jiffies + transport->rto)) 199 199 sctp_transport_hold(transport); 200 + } 201 + 202 + void sctp_transport_reset_hb_timer(struct sctp_transport *transport) 203 + { 204 + unsigned long expires; 200 205 201 206 /* When a data chunk is sent, reset the heartbeat interval. */ 202 - if (!mod_timer(&transport->hb_timer, 203 - sctp_transport_timeout(transport))) 204 - sctp_transport_hold(transport); 207 + expires = jiffies + sctp_transport_timeout(transport); 208 + if (time_before(transport->hb_timer.expires, expires) && 209 + !mod_timer(&transport->hb_timer, 210 + expires + prandom_u32_max(transport->rto))) 211 + sctp_transport_hold(transport); 205 212 } 206 213 207 214 /* This transport has been assigned to an association. ··· 602 595 unsigned long sctp_transport_timeout(struct sctp_transport *trans) 603 596 { 604 597 /* RTO + timer slack +/- 50% of RTO */ 605 - unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); 598 + unsigned long timeout = trans->rto >> 1; 606 599 607 600 if (trans->state != SCTP_UNCONFIRMED && 608 601 trans->state != SCTP_PF) 609 602 timeout += trans->hbinterval; 610 603 611 - return timeout + jiffies; 604 + return timeout; 612 605 } 613 606 614 607 /* Reset transport variables to their initial values */
+1
net/tipc/core.c
··· 69 69 if (err) 70 70 goto out_nametbl; 71 71 72 + INIT_LIST_HEAD(&tn->dist_queue); 72 73 err = tipc_topsrv_start(net); 73 74 if (err) 74 75 goto out_subscr;
+3
net/tipc/core.h
··· 103 103 spinlock_t nametbl_lock; 104 104 struct name_table *nametbl; 105 105 106 + /* Name dist queue */ 107 + struct list_head dist_queue; 108 + 106 109 /* Topology subscription server */ 107 110 struct tipc_server *topsrv; 108 111 atomic_t subscription_count;
+26 -9
net/tipc/name_distr.c
··· 40 40 41 41 int sysctl_tipc_named_timeout __read_mostly = 2000; 42 42 43 - /** 44 - * struct tipc_dist_queue - queue holding deferred name table updates 45 - */ 46 - static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue); 47 - 48 43 struct distr_queue_item { 49 44 struct distr_item i; 50 45 u32 dtype; ··· 224 229 kfree_rcu(p, rcu); 225 230 } 226 231 232 + /** 233 + * tipc_dist_queue_purge - remove deferred updates from a node that went down 234 + */ 235 + static void tipc_dist_queue_purge(struct net *net, u32 addr) 236 + { 237 + struct tipc_net *tn = net_generic(net, tipc_net_id); 238 + struct distr_queue_item *e, *tmp; 239 + 240 + spin_lock_bh(&tn->nametbl_lock); 241 + list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) { 242 + if (e->node != addr) 243 + continue; 244 + list_del(&e->next); 245 + kfree(e); 246 + } 247 + spin_unlock_bh(&tn->nametbl_lock); 248 + } 249 + 227 250 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) 228 251 { 229 252 struct publication *publ, *tmp; 230 253 231 254 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 232 255 tipc_publ_purge(net, publ, addr); 256 + tipc_dist_queue_purge(net, addr); 233 257 } 234 258 235 259 /** ··· 293 279 * tipc_named_add_backlog - add a failed name table update to the backlog 294 280 * 295 281 */ 296 - static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) 282 + static void tipc_named_add_backlog(struct net *net, struct distr_item *i, 283 + u32 type, u32 node) 297 284 { 298 285 struct distr_queue_item *e; 286 + struct tipc_net *tn = net_generic(net, tipc_net_id); 299 287 unsigned long now = get_jiffies_64(); 300 288 301 289 e = kzalloc(sizeof(*e), GFP_ATOMIC); ··· 307 291 e->node = node; 308 292 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); 309 293 memcpy(e, i, sizeof(*i)); 310 - list_add_tail(&e->next, &tipc_dist_queue); 294 + list_add_tail(&e->next, &tn->dist_queue); 311 295 } 312 296 313 297 /** ··· 317 301 void tipc_named_process_backlog(struct net *net) 318 302 { 319 303 struct distr_queue_item *e, *tmp; 304 + struct tipc_net *tn = net_generic(net, tipc_net_id); 320 305 char addr[16]; 321 306 unsigned long now = get_jiffies_64(); 322 307 323 - list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 308 + list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) { 324 309 if (time_after(e->expires, now)) { 325 310 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 326 311 continue; ··· 361 344 node = msg_orignode(msg); 362 345 while (count--) { 363 346 if (!tipc_update_nametbl(net, item, node, mtype)) 364 - tipc_named_add_backlog(item, mtype, node); 347 + tipc_named_add_backlog(net, item, mtype, node); 365 348 item++; 366 349 } 367 350 kfree_skb(skb);
+2 -5
net/vmw_vsock/vmci_transport.c
··· 1735 1735 /* Retrieve the head sk_buff from the socket's receive queue. */ 1736 1736 err = 0; 1737 1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1738 - if (err) 1739 - return err; 1740 - 1741 1738 if (!skb) 1742 - return -EAGAIN; 1739 + return err; 1743 1740 1744 1741 dg = (struct vmci_datagram *)skb->data; 1745 1742 if (!dg) ··· 2151 2154 2152 2155 MODULE_AUTHOR("VMware, Inc."); 2153 2156 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2154 - MODULE_VERSION("1.0.3.0-k"); 2157 + MODULE_VERSION("1.0.4.0-k"); 2155 2158 MODULE_LICENSE("GPL v2"); 2156 2159 MODULE_ALIAS("vmware_vsock"); 2157 2160 MODULE_ALIAS_NETPROTO(PF_VSOCK);
+1 -1
net/wireless/nl80211.c
··· 13216 13216 struct wireless_dev *wdev; 13217 13217 struct cfg80211_beacon_registration *reg, *tmp; 13218 13218 13219 - if (state != NETLINK_URELEASE) 13219 + if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC) 13220 13220 return NOTIFY_DONE; 13221 13221 13222 13222 rcu_read_lock();
+4 -6
sound/hda/hdac_device.c
··· 299 299 int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid, 300 300 int parm) 301 301 { 302 - int val; 302 + unsigned int cmd, val; 303 303 304 - if (codec->regmap) 305 - regcache_cache_bypass(codec->regmap, true); 306 - val = snd_hdac_read_parm(codec, nid, parm); 307 - if (codec->regmap) 308 - regcache_cache_bypass(codec->regmap, false); 304 + cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm; 305 + if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0) 306 + return -1; 309 307 return val; 310 308 } 311 309 EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
+28 -12
sound/hda/hdac_regmap.c
··· 453 453 EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw); 454 454 455 455 static int reg_raw_read(struct hdac_device *codec, unsigned int reg, 456 - unsigned int *val) 456 + unsigned int *val, bool uncached) 457 457 { 458 - if (!codec->regmap) 458 + if (uncached || !codec->regmap) 459 459 return hda_reg_read(codec, reg, val); 460 460 else 461 461 return regmap_read(codec->regmap, reg, val); 462 + } 463 + 464 + static int __snd_hdac_regmap_read_raw(struct hdac_device *codec, 465 + unsigned int reg, unsigned int *val, 466 + bool uncached) 467 + { 468 + int err; 469 + 470 + err = reg_raw_read(codec, reg, val, uncached); 471 + if (err == -EAGAIN) { 472 + err = snd_hdac_power_up_pm(codec); 473 + if (!err) 474 + err = reg_raw_read(codec, reg, val, uncached); 475 + snd_hdac_power_down_pm(codec); 476 + } 477 + return err; 462 478 } 463 479 464 480 /** ··· 488 472 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 489 473 unsigned int *val) 490 474 { 491 - int err; 492 - 493 - err = reg_raw_read(codec, reg, val); 494 - if (err == -EAGAIN) { 495 - err = snd_hdac_power_up_pm(codec); 496 - if (!err) 497 - err = reg_raw_read(codec, reg, val); 498 - snd_hdac_power_down_pm(codec); 499 - } 500 - return err; 475 + return __snd_hdac_regmap_read_raw(codec, reg, val, false); 501 476 } 502 477 EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw); 478 + 479 + /* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the 480 + * cache but always via hda verbs. 481 + */ 482 + int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, 483 + unsigned int reg, unsigned int *val) 484 + { 485 + return __snd_hdac_regmap_read_raw(codec, reg, val, true); 486 + } 503 487 504 488 /** 505 489 * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
+4 -2
sound/pci/hda/hda_generic.c
··· 826 826 bool allow_powerdown) 827 827 { 828 828 hda_nid_t nid, changed = 0; 829 - int i, state; 829 + int i, state, power; 830 830 831 831 for (i = 0; i < path->depth; i++) { 832 832 nid = path->path[i]; ··· 838 838 state = AC_PWRST_D0; 839 839 else 840 840 state = AC_PWRST_D3; 841 - if (!snd_hda_check_power_state(codec, nid, state)) { 841 + power = snd_hda_codec_read(codec, nid, 0, 842 + AC_VERB_GET_POWER_STATE, 0); 843 + if (power != (state | (state << 4))) { 842 844 snd_hda_codec_write(codec, nid, 0, 843 845 AC_VERB_SET_POWER_STATE, state); 844 846 changed = nid;
+3
sound/pci/hda/hda_intel.c
··· 2232 2232 /* Broxton-P(Apollolake) */ 2233 2233 { PCI_DEVICE(0x8086, 0x5a98), 2234 2234 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2235 + /* Broxton-T */ 2236 + { PCI_DEVICE(0x8086, 0x1a98), 2237 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2235 2238 /* Haswell */ 2236 2239 { PCI_DEVICE(0x8086, 0x0a0c), 2237 2240 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+14
sound/pci/hda/patch_cirrus.c
··· 361 361 { 362 362 struct cs_spec *spec = codec->spec; 363 363 int err; 364 + int i; 364 365 365 366 err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0); 366 367 if (err < 0) ··· 370 369 err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); 371 370 if (err < 0) 372 371 return err; 372 + 373 + /* keep the ADCs powered up when it's dynamically switchable */ 374 + if (spec->gen.dyn_adc_switch) { 375 + unsigned int done = 0; 376 + for (i = 0; i < spec->gen.input_mux.num_items; i++) { 377 + int idx = spec->gen.dyn_adc_idx[i]; 378 + if (done & (1 << idx)) 379 + continue; 380 + snd_hda_gen_fix_pin_power(codec, 381 + spec->gen.adc_nids[idx]); 382 + done |= 1 << idx; 383 + } 384 + } 373 385 374 386 return 0; 375 387 }
+2
sound/pci/hda/patch_hdmi.c
··· 1858 1858 struct hdmi_spec *spec = codec->spec; 1859 1859 struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx); 1860 1860 1861 + if (!per_pin) 1862 + return; 1861 1863 mutex_lock(&per_pin->lock); 1862 1864 per_pin->chmap_set = true; 1863 1865 memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
+1
sound/pci/hda/patch_realtek.c
··· 5449 5449 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5450 5450 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5451 5451 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5452 + SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5452 5453 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK), 5453 5454 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5454 5455 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+1
sound/pci/pcxhr/pcxhr_core.c
··· 1341 1341 } 1342 1342 1343 1343 pcxhr_msg_thread(mgr); 1344 + mutex_unlock(&mgr->lock); 1344 1345 return IRQ_HANDLED; 1345 1346 }
+29 -9
tools/objtool/Documentation/stack-validation.txt
··· 299 299 Errors in .c files 300 300 ------------------ 301 301 302 - If you're getting an objtool error in a compiled .c file, chances are 303 - the file uses an asm() statement which has a "call" instruction. An 304 - asm() statement with a call instruction must declare the use of the 305 - stack pointer in its output operand. For example, on x86_64: 302 + 1. c_file.o: warning: objtool: funcA() falls through to next function funcB() 306 303 307 - register void *__sp asm("rsp"); 308 - asm volatile("call func" : "+r" (__sp)); 304 + This means that funcA() doesn't end with a return instruction or an 305 + unconditional jump, and that objtool has determined that the function 306 + can fall through into the next function. There could be different 307 + reasons for this: 309 308 310 - Otherwise the stack frame may not get created before the call. 309 + 1) funcA()'s last instruction is a call to a "noreturn" function like 310 + panic(). In this case the noreturn function needs to be added to 311 + objtool's hard-coded global_noreturns array. Feel free to bug the 312 + objtool maintainer, or you can submit a patch. 311 313 312 - Another possible cause for errors in C code is if the Makefile removes 313 - -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options. 314 + 2) funcA() uses the unreachable() annotation in a section of code 315 + that is actually reachable. 316 + 317 + 3) If funcA() calls an inline function, the object code for funcA() 318 + might be corrupt due to a gcc bug. For more details, see: 319 + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646 320 + 321 + 2. If you're getting any other objtool error in a compiled .c file, it 322 + may be because the file uses an asm() statement which has a "call" 323 + instruction. An asm() statement with a call instruction must declare 324 + the use of the stack pointer in its output operand. For example, on 325 + x86_64: 326 + 327 + register void *__sp asm("rsp"); 328 + asm volatile("call func" : "+r" (__sp)); 329 + 330 + Otherwise the stack frame may not get created before the call. 331 + 332 + 3. Another possible cause for errors in C code is if the Makefile removes 333 + -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options. 314 334 315 335 Also see the above section for .S file errors for more information what 316 336 the individual error messages mean.
+72 -25
tools/objtool/builtin-check.c
··· 54 54 struct symbol *call_dest; 55 55 struct instruction *jump_dest; 56 56 struct list_head alts; 57 + struct symbol *func; 57 58 }; 58 59 59 60 struct alternative { ··· 67 66 struct list_head insn_list; 68 67 DECLARE_HASHTABLE(insn_hash, 16); 69 68 struct section *rodata, *whitelist; 69 + bool ignore_unreachables, c_file; 70 70 }; 71 71 72 72 const char *objname; ··· 230 228 } 231 229 } 232 230 233 - if (insn->type == INSN_JUMP_DYNAMIC) 231 + if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts)) 234 232 /* sibling call */ 235 233 return 0; 236 234 } ··· 250 248 static int decode_instructions(struct objtool_file *file) 251 249 { 252 250 struct section *sec; 251 + struct symbol *func; 253 252 unsigned long offset; 254 253 struct instruction *insn; 255 254 int ret; ··· 283 280 284 281 hash_add(file->insn_hash, &insn->hash, insn->offset); 285 282 list_add_tail(&insn->list, &file->insn_list); 283 + } 284 + 285 + list_for_each_entry(func, &sec->symbol_list, list) { 286 + if (func->type != STT_FUNC) 287 + continue; 288 + 289 + if (!find_insn(file, sec, func->offset)) { 290 + WARN("%s(): can't find starting instruction", 291 + func->name); 292 + return -1; 293 + } 294 + 295 + func_for_each_insn(file, func, insn) 296 + if (!insn->func) 297 + insn->func = func; 286 298 } 287 299 } 288 300 ··· 682 664 text_rela->addend); 683 665 684 666 /* 685 - * TODO: Document where this is needed, or get rid of it. 686 - * 687 667 * rare case: jmpq *[addr](%rip) 668 + * 669 + * This check is for a rare gcc quirk, currently only seen in 670 + * three driver functions in the kernel, only with certain 671 + * obscure non-distro configs. 672 + * 673 + * As part of an optimization, gcc makes a copy of an existing 674 + * switch jump table, modifies it, and then hard-codes the jump 675 + * (albeit with an indirect jump) to use a single entry in the 676 + * table. The rest of the jump table and some of its jump 677 + * targets remain as dead code. 678 + * 679 + * In such a case we can just crudely ignore all unreachable 680 + * instruction warnings for the entire object file. Ideally we 681 + * would just ignore them for the function, but that would 682 + * require redesigning the code quite a bit. And honestly 683 + * that's just not worth doing: unreachable instruction 684 + * warnings are of questionable value anyway, and this is such 685 + * a rare issue. 686 + * 687 + * kbuild reports: 688 + * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com 689 + * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com 690 + * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com 691 + * 692 + * gcc bug: 693 + * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604 688 694 */ 689 - if (!rodata_rela) 695 + if (!rodata_rela) { 690 696 rodata_rela = find_rela_by_dest(file->rodata, 691 697 text_rela->addend + 4); 698 + if (rodata_rela) 699 + file->ignore_unreachables = true; 700 + } 692 701 693 702 if (!rodata_rela) 694 703 continue; ··· 776 731 static int decode_sections(struct objtool_file *file) 777 732 { 778 733 int ret; 779 - 780 - file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard"); 781 - file->rodata = find_section_by_name(file->elf, ".rodata"); 782 734 783 735 ret = decode_instructions(file); 784 736 if (ret) ··· 841 799 struct alternative *alt; 842 800 struct instruction *insn; 843 801 struct section *sec; 802 + struct symbol *func = NULL; 844 803 unsigned char state; 845 804 int ret; 846 805 ··· 856 813 } 857 814 858 815 while (1) { 816 + if (file->c_file && insn->func) { 817 + if (func && func != insn->func) { 818 + WARN("%s() falls through to next function %s()", 819 + func->name, insn->func->name); 820 + return 1; 821 + } 822 + 823 + func = insn->func; 824 + } 825 + 859 826 if (insn->visited) { 860 827 if (frame_state(insn->state) != frame_state(state)) { 861 828 WARN_FUNC("frame pointer state mismatch", ··· 875 822 876 823 return 0; 877 824 } 878 - 879 - /* 880 - * Catch a rare case where a noreturn function falls through to 881 - * the next function. 882 - */ 883 - if (is_fentry_call(insn) && (state & STATE_FENTRY)) 884 - return 0; 885 825 886 826 insn->visited = true; 887 827 insn->state = state; ··· 1081 1035 continue; 1082 1036 1083 1037 insn = find_insn(file, sec, func->offset); 1084 - if (!insn) { 1085 - WARN("%s(): can't find starting instruction", 1086 - func->name); 1087 - warnings++; 1038 + if (!insn) 1088 1039 continue; 1089 - } 1090 1040 1091 1041 ret = validate_branch(file, insn, 0); 1092 1042 warnings += ret; ··· 1098 1056 if (insn->visited) 1099 1057 continue; 1100 1058 1101 - if (!ignore_unreachable_insn(func, insn) && 1102 - !warnings) { 1103 - WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset); 1104 - warnings++; 1105 - } 1106 - 1107 1059 insn->visited = true; 1060 + 1061 + if (file->ignore_unreachables || warnings || 1062 + ignore_unreachable_insn(func, insn)) 1063 + continue; 1064 + 1065 + WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset); 1066 + warnings++; 1108 1067 } 1109 1068 } 1110 1069 } ··· 1176 1133 1177 1134 INIT_LIST_HEAD(&file.insn_list); 1178 1135 hash_init(file.insn_hash); 1136 + file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard"); 1137 + file.rodata = find_section_by_name(file.elf, ".rodata"); 1138 + file.ignore_unreachables = false; 1139 + file.c_file = find_section_by_name(file.elf, ".comment"); 1179 1140 1180 1141 ret = decode_sections(&file); 1181 1142 if (ret < 0)
+1 -1
tools/perf/util/intel-pt.c
··· 1130 1130 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n", 1131 1131 ret); 1132 1132 1133 - if (pt->synth_opts.callchain) 1133 + if (pt->synth_opts.last_branch) 1134 1134 intel_pt_reset_last_branch_rb(ptq); 1135 1135 1136 1136 return ret;
+1
tools/testing/selftests/net/.gitignore
··· 3 3 psock_tpacket 4 4 reuseport_bpf 5 5 reuseport_bpf_cpu 6 + reuseport_dualstack
+1 -1
tools/testing/selftests/net/Makefile
··· 4 4 5 5 CFLAGS += -I../../../../usr/include/ 6 6 7 - NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu 7 + NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack 8 8 9 9 all: $(NET_PROGS) 10 10 %: %.c
+208
tools/testing/selftests/net/reuseport_dualstack.c
··· 1 + /* 2 + * It is possible to use SO_REUSEPORT to open multiple sockets bound to 3 + * equivalent local addresses using AF_INET and AF_INET6 at the same time. If 4 + * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should 5 + * receive a given incoming packet. However, when it is not set, incoming v4 6 + * packets should prefer the AF_INET socket(s). This behavior was defined with 7 + * the original SO_REUSEPORT implementation, but broke with 8 + * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection") 9 + * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the 10 + * AF_INET preference for v4 packets. 11 + */ 12 + 13 + #define _GNU_SOURCE 14 + 15 + #include <arpa/inet.h> 16 + #include <errno.h> 17 + #include <error.h> 18 + #include <linux/in.h> 19 + #include <linux/unistd.h> 20 + #include <stdio.h> 21 + #include <stdlib.h> 22 + #include <string.h> 23 + #include <sys/epoll.h> 24 + #include <sys/types.h> 25 + #include <sys/socket.h> 26 + #include <unistd.h> 27 + 28 + static const int PORT = 8888; 29 + 30 + static void build_rcv_fd(int family, int proto, int *rcv_fds, int count) 31 + { 32 + struct sockaddr_storage addr; 33 + struct sockaddr_in *addr4; 34 + struct sockaddr_in6 *addr6; 35 + int opt, i; 36 + 37 + switch (family) { 38 + case AF_INET: 39 + addr4 = (struct sockaddr_in *)&addr; 40 + addr4->sin_family = AF_INET; 41 + addr4->sin_addr.s_addr = htonl(INADDR_ANY); 42 + addr4->sin_port = htons(PORT); 43 + break; 44 + case AF_INET6: 45 + addr6 = (struct sockaddr_in6 *)&addr; 46 + addr6->sin6_family = AF_INET6; 47 + addr6->sin6_addr = in6addr_any; 48 + addr6->sin6_port = htons(PORT); 49 + break; 50 + default: 51 + error(1, 0, "Unsupported family %d", family); 52 + } 53 + 54 + for (i = 0; i < count; ++i) { 55 + rcv_fds[i] = socket(family, proto, 0); 56 + if (rcv_fds[i] < 0) 57 + error(1, errno, "failed to create receive socket"); 58 + 59 + opt = 1; 60 + if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt, 61 + sizeof(opt))) 62 + error(1, errno, "failed to set SO_REUSEPORT"); 63 + 64 + if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr))) 65 + error(1, errno, "failed to bind receive socket"); 66 + 67 + if (proto == SOCK_STREAM && listen(rcv_fds[i], 10)) 68 + error(1, errno, "failed to listen on receive port"); 69 + } 70 + } 71 + 72 + static void send_from_v4(int proto) 73 + { 74 + struct sockaddr_in saddr, daddr; 75 + int fd; 76 + 77 + saddr.sin_family = AF_INET; 78 + saddr.sin_addr.s_addr = htonl(INADDR_ANY); 79 + saddr.sin_port = 0; 80 + 81 + daddr.sin_family = AF_INET; 82 + daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 83 + daddr.sin_port = htons(PORT); 84 + 85 + fd = socket(AF_INET, proto, 0); 86 + if (fd < 0) 87 + error(1, errno, "failed to create send socket"); 88 + 89 + if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr))) 90 + error(1, errno, "failed to bind send socket"); 91 + 92 + if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr))) 93 + error(1, errno, "failed to connect send socket"); 94 + 95 + if (send(fd, "a", 1, 0) < 0) 96 + error(1, errno, "failed to send message"); 97 + 98 + close(fd); 99 + } 100 + 101 + static int receive_once(int epfd, int proto) 102 + { 103 + struct epoll_event ev; 104 + int i, fd; 105 + char buf[8]; 106 + 107 + i = epoll_wait(epfd, &ev, 1, -1); 108 + if (i < 0) 109 + error(1, errno, "epoll_wait failed"); 110 + 111 + if (proto == SOCK_STREAM) { 112 + fd = accept(ev.data.fd, NULL, NULL); 113 + if (fd < 0) 114 + error(1, errno, "failed to accept"); 115 + i = recv(fd, buf, sizeof(buf), 0); 116 + close(fd); 117 + } else { 118 + i = recv(ev.data.fd, buf, sizeof(buf), 0); 119 + } 120 + 121 + if (i < 0) 122 + error(1, errno, "failed to recv"); 123 + 124 + return ev.data.fd; 125 + } 126 + 127 + static void test(int *rcv_fds, int count, int proto) 128 + { 129 + struct epoll_event ev; 130 + int epfd, i, test_fd; 131 + uint16_t test_family; 132 + socklen_t len; 133 + 134 + epfd = epoll_create(1); 135 + if (epfd < 0) 136 + error(1, errno, "failed to create epoll"); 137 + 138 + ev.events = EPOLLIN; 139 + for (i = 0; i < count; ++i) { 140 + ev.data.fd = rcv_fds[i]; 141 + if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev)) 142 + error(1, errno, "failed to register sock epoll"); 143 + } 144 + 145 + send_from_v4(proto); 146 + 147 + test_fd = receive_once(epfd, proto); 148 + if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len)) 149 + error(1, errno, "failed to read socket domain"); 150 + if (test_family != AF_INET) 151 + error(1, 0, "expected to receive on v4 socket but got v6 (%d)", 152 + test_family); 153 + 154 + close(epfd); 155 + } 156 + 157 + int main(void) 158 + { 159 + int rcv_fds[32], i; 160 + 161 + fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n"); 162 + build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5); 163 + build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5); 164 + test(rcv_fds, 10, SOCK_DGRAM); 165 + for (i = 0; i < 10; ++i) 166 + close(rcv_fds[i]); 167 + 168 + fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n"); 169 + build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5); 170 + build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5); 171 + test(rcv_fds, 10, SOCK_DGRAM); 172 + for (i = 0; i < 10; ++i) 173 + close(rcv_fds[i]); 174 + 175 + /* NOTE: UDP socket lookups traverse a different code path when there 176 + * are > 10 sockets in a group. 177 + */ 178 + fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n"); 179 + build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16); 180 + build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16); 181 + test(rcv_fds, 32, SOCK_DGRAM); 182 + for (i = 0; i < 32; ++i) 183 + close(rcv_fds[i]); 184 + 185 + fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n"); 186 + build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16); 187 + build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16); 188 + test(rcv_fds, 32, SOCK_DGRAM); 189 + for (i = 0; i < 32; ++i) 190 + close(rcv_fds[i]); 191 + 192 + fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n"); 193 + build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5); 194 + build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5); 195 + test(rcv_fds, 10, SOCK_STREAM); 196 + for (i = 0; i < 10; ++i) 197 + close(rcv_fds[i]); 198 + 199 + fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n"); 200 + build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5); 201 + build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5); 202 + test(rcv_fds, 10, SOCK_STREAM); 203 + for (i = 0; i < 10; ++i) 204 + close(rcv_fds[i]); 205 + 206 + fprintf(stderr, "SUCCESS\n"); 207 + return 0; 208 + }