Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 4.4-rc6 into usb-next

We want the USB and PHY fixes in here as well to make things easier for
testing and development.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+2277 -1241
+4 -6
Documentation/devicetree/bindings/dma/ti-edma.txt
··· 22 22 Optional properties: 23 23 - ti,hwmods: Name of the hwmods associated to the eDMA CC 24 24 - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow 25 - these channels will be SW triggered channels. The list must 26 - contain 16 bits numbers, see example. 25 + these channels will be SW triggered channels. See example. 27 26 - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by 28 27 the driver, they are allocated to be used by for example the 29 28 DSP. See example. ··· 55 56 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; 56 57 57 58 /* Channel 20 and 21 is allocated for memcpy */ 58 - ti,edma-memcpy-channels = /bits/ 16 <20 21>; 59 - /* The following PaRAM slots are reserved: 35-45 and 100-110 */ 60 - ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, 61 - /bits/ 16 <100 10>; 59 + ti,edma-memcpy-channels = <20 21>; 60 + /* The following PaRAM slots are reserved: 35-44 and 100-109 */ 61 + ti,edma-reserved-slot-ranges = <35 10>, <100 10>; 62 62 }; 63 63 64 64 edma_tptc0: tptc@49800000 {
+1 -1
Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
··· 12 12 Required subnode-properties: 13 13 - label: Descriptive name of the key. 14 14 - linux,code: Keycode to emit. 15 - - channel: Channel this key is attached to, mut be 0 or 1. 15 + - channel: Channel this key is attached to, must be 0 or 1. 16 16 - voltage: Voltage in µV at lradc input when this key is pressed. 17 17 18 18 Example:
+6 -1
Documentation/devicetree/bindings/mtd/partition.txt
··· 6 6 as RedBoot. 7 7 8 8 The partition table should be a subnode of the mtd node and should be named 9 - 'partitions'. Partitions are defined in subnodes of the partitions node. 9 + 'partitions'. This node should have the following property: 10 + - compatible : (required) must be "fixed-partitions" 11 + Partitions are then defined in subnodes of the partitions node. 10 12 11 13 For backwards compatibility partitions as direct subnodes of the mtd device are 12 14 supported. This use is discouraged. ··· 38 36 39 37 flash@0 { 40 38 partitions { 39 + compatible = "fixed-partitions"; 41 40 #address-cells = <1>; 42 41 #size-cells = <1>; 43 42 ··· 56 53 57 54 flash@1 { 58 55 partitions { 56 + compatible = "fixed-partitions"; 59 57 #address-cells = <1>; 60 58 #size-cells = <2>; 61 59 ··· 70 66 71 67 flash@2 { 72 68 partitions { 69 + compatible = "fixed-partitions"; 73 70 #address-cells = <2>; 74 71 #size-cells = <2>; 75 72
-14
Documentation/networking/e100.txt
··· 181 181 If an issue is identified with the released source code on the supported 182 182 kernel with a supported adapter, email the specific information related to the 183 183 issue to e1000-devel@lists.sourceforge.net. 184 - 185 - 186 - License 187 - ======= 188 - 189 - This software program is released under the terms of a license agreement 190 - between you ('Licensee') and Intel. Do not use or load this software or any 191 - associated materials (collectively, the 'Software') until you have carefully 192 - read the full terms and conditions of the file COPYING located in this software 193 - package. By loading or using the Software, you agree to the terms of this 194 - Agreement. If you do not agree with the terms of this Agreement, do not install 195 - or use the Software. 196 - 197 - * Other names and brands may be claimed as the property of others.
+16 -1
MAINTAINERS
··· 5578 5578 R: Shannon Nelson <shannon.nelson@intel.com> 5579 5579 R: Carolyn Wyborny <carolyn.wyborny@intel.com> 5580 5580 R: Don Skidmore <donald.c.skidmore@intel.com> 5581 - R: Matthew Vick <matthew.vick@intel.com> 5581 + R: Bruce Allan <bruce.w.allan@intel.com> 5582 5582 R: John Ronciak <john.ronciak@intel.com> 5583 5583 R: Mitch Williams <mitch.a.williams@intel.com> 5584 5584 L: intel-wired-lan@lists.osuosl.org ··· 8380 8380 S: Maintained 8381 8381 F: drivers/pinctrl/samsung/ 8382 8382 8383 + PIN CONTROLLER - SINGLE 8384 + M: Tony Lindgren <tony@atomide.com> 8385 + M: Haojian Zhuang <haojian.zhuang@linaro.org> 8386 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8387 + L: linux-omap@vger.kernel.org 8388 + S: Maintained 8389 + F: drivers/pinctrl/pinctrl-single.c 8390 + 8383 8391 PIN CONTROLLER - ST SPEAR 8384 8392 M: Viresh Kumar <vireshk@kernel.org> 8385 8393 L: spear-devel@list.st.com ··· 8953 8945 F: drivers/rpmsg/ 8954 8946 F: Documentation/rpmsg.txt 8955 8947 F: include/linux/rpmsg.h 8948 + 8949 + RENESAS ETHERNET DRIVERS 8950 + R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> 8951 + L: netdev@vger.kernel.org 8952 + L: linux-sh@vger.kernel.org 8953 + F: drivers/net/ethernet/renesas/ 8954 + F: include/linux/sh_eth.h 8956 8955 8957 8956 RESET CONTROLLER FRAMEWORK 8958 8957 M: Philipp Zabel <p.zabel@pengutronix.de>
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 4 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc6 5 5 NAME = Blurry Fish Butt 6 6 7 7 # *DOCUMENTATION*
+1
arch/arc/Kconfig
··· 445 445 However some customers have peripherals mapped at this addr, so 446 446 Linux needs to be scooted a bit. 447 447 If you don't know what the above means, leave this setting alone. 448 + This needs to match memory start address specified in Device Tree 448 449 449 450 config HIGHMEM 450 451 bool "High Memory Support"
+1
arch/arc/boot/dts/axs10x_mb.dtsi
··· 46 46 snps,pbl = < 32 >; 47 47 clocks = <&apbclk>; 48 48 clock-names = "stmmaceth"; 49 + max-speed = <100>; 49 50 }; 50 51 51 52 ehci@0x40000 {
+2 -1
arch/arc/boot/dts/nsim_hs.dts
··· 17 17 18 18 memory { 19 19 device_type = "memory"; 20 - reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ 20 + /* CONFIG_LINUX_LINK_BASE needs to match low mem start */ 21 + reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */ 21 22 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ 22 23 }; 23 24
+2 -2
arch/arc/include/asm/mach_desc.h
··· 23 23 * @dt_compat: Array of device tree 'compatible' strings 24 24 * (XXX: although only 1st entry is looked at) 25 25 * @init_early: Very early callback [called from setup_arch()] 26 - * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP) 26 + * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP) 27 27 * [(M):init_IRQ(), (o):start_kernel_secondary()] 28 28 * @init_machine: arch initcall level callback (e.g. populate static 29 29 * platform devices or parse Devicetree) ··· 35 35 const char **dt_compat; 36 36 void (*init_early)(void); 37 37 #ifdef CONFIG_SMP 38 - void (*init_cpu_smp)(unsigned int); 38 + void (*init_per_cpu)(unsigned int); 39 39 #endif 40 40 void (*init_machine)(void); 41 41 void (*init_late)(void);
+2 -2
arch/arc/include/asm/smp.h
··· 48 48 * @init_early_smp: A SMP specific h/w block can init itself 49 49 * Could be common across platforms so not covered by 50 50 * mach_desc->init_early() 51 - * @init_irq_cpu: Called for each core so SMP h/w block driver can do 51 + * @init_per_cpu: Called for each core so SMP h/w block driver can do 52 52 * any needed setup per cpu (e.g. IPI request) 53 53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) 54 54 * @ipi_send: To send IPI to a @cpu ··· 57 57 struct plat_smp_ops { 58 58 const char *info; 59 59 void (*init_early_smp)(void); 60 - void (*init_irq_cpu)(int cpu); 60 + void (*init_per_cpu)(int cpu); 61 61 void (*cpu_kick)(int cpu, unsigned long pc); 62 62 void (*ipi_send)(int cpu); 63 63 void (*ipi_clear)(int irq);
-4
arch/arc/include/asm/unwind.h
··· 112 112 113 113 extern int arc_unwind(struct unwind_frame_info *frame); 114 114 extern void arc_unwind_init(void); 115 - extern void arc_unwind_setup(void); 116 115 extern void *unwind_add_table(struct module *module, const void *table_start, 117 116 unsigned long table_size); 118 117 extern void unwind_remove_table(void *handle, int init_only); ··· 151 152 { 152 153 } 153 154 154 - static inline void arc_unwind_setup(void) 155 - { 156 - } 157 155 #define unwind_add_table(a, b, c) 158 156 #define unwind_remove_table(a, b) 159 157
+13 -2
arch/arc/kernel/intc-arcv2.c
··· 106 106 static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, 107 107 irq_hw_number_t hw) 108 108 { 109 - if (irq == TIMER0_IRQ || irq == IPI_IRQ) 109 + /* 110 + * core intc IRQs [16, 23]: 111 + * Statically assigned always private-per-core (Timers, WDT, IPI, PCT) 112 + */ 113 + if (hw < 24) { 114 + /* 115 + * A subsequent request_percpu_irq() fails if percpu_devid is 116 + * not set. That in turns sets NOAUTOEN, meaning each core needs 117 + * to call enable_percpu_irq() 118 + */ 119 + irq_set_percpu_devid(irq); 110 120 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); 111 - else 121 + } else { 112 122 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); 123 + } 113 124 114 125 return 0; 115 126 }
+24 -9
arch/arc/kernel/irq.c
··· 29 29 30 30 #ifdef CONFIG_SMP 31 31 /* a SMP H/w block could do IPI IRQ request here */ 32 - if (plat_smp_ops.init_irq_cpu) 33 - plat_smp_ops.init_irq_cpu(smp_processor_id()); 32 + if (plat_smp_ops.init_per_cpu) 33 + plat_smp_ops.init_per_cpu(smp_processor_id()); 34 34 35 - if (machine_desc->init_cpu_smp) 36 - machine_desc->init_cpu_smp(smp_processor_id()); 35 + if (machine_desc->init_per_cpu) 36 + machine_desc->init_per_cpu(smp_processor_id()); 37 37 #endif 38 38 } 39 39 ··· 51 51 set_irq_regs(old_regs); 52 52 } 53 53 54 + /* 55 + * API called for requesting percpu interrupts - called by each CPU 56 + * - For boot CPU, actually request the IRQ with genirq core + enables 57 + * - For subsequent callers only enable called locally 58 + * 59 + * Relies on being called by boot cpu first (i.e. request called ahead) of 60 + * any enable as expected by genirq. Hence Suitable only for TIMER, IPI 61 + * which are guaranteed to be setup on boot core first. 62 + * Late probed peripherals such as perf can't use this as there no guarantee 63 + * of being called on boot CPU first. 64 + */ 65 + 54 66 void arc_request_percpu_irq(int irq, int cpu, 55 67 irqreturn_t (*isr)(int irq, void *dev), 56 68 const char *irq_nm, ··· 72 60 if (!cpu) { 73 61 int rc; 74 62 63 + #ifdef CONFIG_ISA_ARCOMPACT 75 64 /* 76 - * These 2 calls are essential to making percpu IRQ APIs work 77 - * Ideally these details could be hidden in irq chip map function 78 - * but the issue is IPIs IRQs being static (non-DT) and platform 79 - * specific, so we can't identify them there. 65 + * A subsequent request_percpu_irq() fails if percpu_devid is 66 + * not set. That in turns sets NOAUTOEN, meaning each core needs 67 + * to call enable_percpu_irq() 68 + * 69 + * For ARCv2, this is done in irq map function since we know 70 + * which irqs are strictly per cpu 80 71 */ 81 72 irq_set_percpu_devid(irq); 82 - irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ 73 + #endif 83 74 84 75 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); 85 76 if (rc)
+1 -1
arch/arc/kernel/mcip.c
··· 132 132 struct plat_smp_ops plat_smp_ops = { 133 133 .info = smp_cpuinfo_buf, 134 134 .init_early_smp = mcip_probe_n_setup, 135 - .init_irq_cpu = mcip_setup_per_cpu, 135 + .init_per_cpu = mcip_setup_per_cpu, 136 136 .ipi_send = mcip_ipi_send, 137 137 .ipi_clear = mcip_ipi_clear, 138 138 };
+8 -22
arch/arc/kernel/perf_event.c
··· 428 428 429 429 #endif /* CONFIG_ISA_ARCV2 */ 430 430 431 - void arc_cpu_pmu_irq_init(void) 431 + static void arc_cpu_pmu_irq_init(void *data) 432 432 { 433 - struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); 433 + int irq = *(int *)data; 434 434 435 - arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, 436 - "ARC perf counters", pmu_cpu); 435 + enable_percpu_irq(irq, IRQ_TYPE_NONE); 437 436 438 437 /* Clear all pending interrupt flags */ 439 438 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); ··· 514 515 515 516 if (has_interrupts) { 516 517 int irq = platform_get_irq(pdev, 0); 517 - unsigned long flags; 518 518 519 519 if (irq < 0) { 520 520 pr_err("Cannot get IRQ number for the platform\n"); ··· 522 524 523 525 arc_pmu->irq = irq; 524 526 525 - /* 526 - * arc_cpu_pmu_irq_init() needs to be called on all cores for 527 - * their respective local PMU. 528 - * However we use opencoded on_each_cpu() to ensure it is called 529 - * on core0 first, so that arc_request_percpu_irq() sets up 530 - * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable 531 - * perf IRQ on non master cores. 532 - * see arc_request_percpu_irq() 533 - */ 534 - preempt_disable(); 535 - local_irq_save(flags); 536 - arc_cpu_pmu_irq_init(); 537 - local_irq_restore(flags); 538 - smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1); 539 - preempt_enable(); 527 + /* intc map function ensures irq_set_percpu_devid() called */ 528 + request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", 529 + this_cpu_ptr(&arc_pmu_cpu)); 540 530 541 - /* Clean all pending interrupt flags */ 542 - write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 531 + on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); 532 + 543 533 } else 544 534 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 545 535
-1
arch/arc/kernel/setup.c
··· 429 429 #endif 430 430 431 431 arc_unwind_init(); 432 - arc_unwind_setup(); 433 432 } 434 433 435 434 static int __init customize_machine(void)
+4 -4
arch/arc/kernel/smp.c
··· 132 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 133 133 134 134 /* Some SMP H/w setup - for each cpu */ 135 - if (plat_smp_ops.init_irq_cpu) 136 - plat_smp_ops.init_irq_cpu(cpu); 135 + if (plat_smp_ops.init_per_cpu) 136 + plat_smp_ops.init_per_cpu(cpu); 137 137 138 - if (machine_desc->init_cpu_smp) 139 - machine_desc->init_cpu_smp(cpu); 138 + if (machine_desc->init_per_cpu) 139 + machine_desc->init_per_cpu(cpu); 140 140 141 141 arc_local_timer_setup(); 142 142
+35 -18
arch/arc/kernel/unwind.c
··· 170 170 171 171 static unsigned long read_pointer(const u8 **pLoc, 172 172 const void *end, signed ptrType); 173 + static void init_unwind_hdr(struct unwind_table *table, 174 + void *(*alloc) (unsigned long)); 175 + 176 + /* 177 + * wrappers for header alloc (vs. calling one vs. other at call site) 178 + * to elide section mismatches warnings 179 + */ 180 + static void *__init unw_hdr_alloc_early(unsigned long sz) 181 + { 182 + return __alloc_bootmem_nopanic(sz, sizeof(unsigned int), 183 + MAX_DMA_ADDRESS); 184 + } 185 + 186 + static void *unw_hdr_alloc(unsigned long sz) 187 + { 188 + return kmalloc(sz, GFP_KERNEL); 189 + } 173 190 174 191 static void init_unwind_table(struct unwind_table *table, const char *name, 175 192 const void *core_start, unsigned long core_size, ··· 226 209 __start_unwind, __end_unwind - __start_unwind, 227 210 NULL, 0); 228 211 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ 212 + 213 + init_unwind_hdr(&root_table, unw_hdr_alloc_early); 229 214 } 230 215 231 216 static const u32 bad_cie, not_fde; ··· 260 241 e2->fde = v; 261 242 } 262 243 263 - static void __init setup_unwind_table(struct unwind_table *table, 264 - void *(*alloc) (unsigned long)) 244 + static void init_unwind_hdr(struct unwind_table *table, 245 + void *(*alloc) (unsigned long)) 265 246 { 266 247 const u8 *ptr; 267 248 unsigned long tableSize = table->size, hdrSize; ··· 293 274 const u32 *cie = cie_for_fde(fde, table); 294 275 signed ptrType; 295 276 296 - if (cie == &not_fde) 277 + if (cie == &not_fde) /* only process FDE here */ 297 278 continue; 298 279 if (cie == NULL || cie == &bad_cie) 299 - return; 280 + continue; /* say FDE->CIE.version != 1 */ 300 281 ptrType = fde_pointer_type(cie); 301 282 if (ptrType < 0) 302 - return; 283 + continue; 303 284 304 285 ptr = (const u8 *)(fde + 2); 305 286 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ··· 319 300 320 301 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) 321 302 + 2 * n * sizeof(unsigned long); 303 + 322 304 header = alloc(hdrSize); 323 305 if (!header) 324 306 return; 307 + 325 308 header->version = 1; 326 309 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; 327 310 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; ··· 343 322 344 323 if (fde[1] == 0xffffffff) 345 324 continue; /* this is a CIE */ 325 + 326 + if (*(u8 *)(cie + 2) != 1) 327 + continue; /* FDE->CIE.version not supported */ 328 + 346 329 ptr = (const u8 *)(fde + 2); 347 330 header->table[n].start = read_pointer(&ptr, 348 331 (const u8 *)(fde + 1) + ··· 365 340 table->hdrsz = hdrSize; 366 341 smp_wmb(); 367 342 table->header = (const void *)header; 368 - } 369 - 370 - static void *__init balloc(unsigned long sz) 371 - { 372 - return __alloc_bootmem_nopanic(sz, 373 - sizeof(unsigned int), 374 - __pa(MAX_DMA_ADDRESS)); 375 - } 376 - 377 - void __init arc_unwind_setup(void) 378 - { 379 - setup_unwind_table(&root_table, balloc); 380 343 } 381 344 382 345 #ifdef CONFIG_MODULES ··· 389 376 module->module_init, module->init_size, 390 377 table_start, table_size, 391 378 NULL, 0); 379 + 380 + init_unwind_hdr(table, unw_hdr_alloc); 392 381 393 382 #ifdef UNWIND_DEBUG 394 383 unw_debug("Table added for [%s] %lx %lx\n", ··· 454 439 info.init_only = init_only; 455 440 456 441 unlink_table(&info); /* XXX: SMP */ 442 + kfree(table->header); 457 443 kfree(table); 458 444 } 459 445 ··· 523 507 524 508 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) 525 509 || (*cie & (sizeof(*cie) - 1)) 526 - || (cie[1] != 0xffffffff)) 510 + || (cie[1] != 0xffffffff) 511 + || ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */ 527 512 return NULL; /* this is not a (valid) CIE */ 528 513 return cie; 529 514 }
+3 -1
arch/arc/mm/init.c
··· 51 51 int in_use = 0; 52 52 53 53 if (!low_mem_sz) { 54 - BUG_ON(base != low_mem_start); 54 + if (base != low_mem_start) 55 + panic("CONFIG_LINUX_LINK_BASE != DT memory { }"); 56 + 55 57 low_mem_sz = size; 56 58 in_use = 1; 57 59 } else {
+4
arch/arm/include/asm/uaccess.h
··· 510 510 static inline unsigned long __must_check 511 511 __copy_to_user(void __user *to, const void *from, unsigned long n) 512 512 { 513 + #ifndef CONFIG_UACCESS_WITH_MEMCPY 513 514 unsigned int __ua_flags = uaccess_save_and_enable(); 514 515 n = arm_copy_to_user(to, from, n); 515 516 uaccess_restore(__ua_flags); 516 517 return n; 518 + #else 519 + return arm_copy_to_user(to, from, n); 520 + #endif 517 521 } 518 522 519 523 extern unsigned long __must_check
+18 -15
arch/arm/kernel/process.c
··· 95 95 { 96 96 unsigned long flags; 97 97 char buf[64]; 98 + #ifndef CONFIG_CPU_V7M 99 + unsigned int domain; 100 + #ifdef CONFIG_CPU_SW_DOMAIN_PAN 101 + /* 102 + * Get the domain register for the parent context. In user 103 + * mode, we don't save the DACR, so lets use what it should 104 + * be. For other modes, we place it after the pt_regs struct. 105 + */ 106 + if (user_mode(regs)) 107 + domain = DACR_UACCESS_ENABLE; 108 + else 109 + domain = *(unsigned int *)(regs + 1); 110 + #else 111 + domain = get_domain(); 112 + #endif 113 + #endif 98 114 99 115 show_regs_print_info(KERN_DEFAULT); 100 116 ··· 139 123 140 124 #ifndef CONFIG_CPU_V7M 141 125 { 142 - unsigned int domain = get_domain(); 143 126 const char *segment; 144 - 145 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 146 - /* 147 - * Get the domain register for the parent context. In user 148 - * mode, we don't save the DACR, so lets use what it should 149 - * be. For other modes, we place it after the pt_regs struct. 150 - */ 151 - if (user_mode(regs)) 152 - domain = DACR_UACCESS_ENABLE; 153 - else 154 - domain = *(unsigned int *)(regs + 1); 155 - #endif 156 127 157 128 if ((domain & domain_mask(DOMAIN_USER)) == 158 129 domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) ··· 166 163 buf[0] = '\0'; 167 164 #ifdef CONFIG_CPU_CP15_MMU 168 165 { 169 - unsigned int transbase, dac = get_domain(); 166 + unsigned int transbase; 170 167 asm("mrc p15, 0, %0, c2, c0\n\t" 171 168 : "=r" (transbase)); 172 169 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 173 - transbase, dac); 170 + transbase, domain); 174 171 } 175 172 #endif 176 173 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
+3 -3
arch/arm/kernel/swp_emulate.c
··· 36 36 */ 37 37 #define __user_swpX_asm(data, addr, res, temp, B) \ 38 38 __asm__ __volatile__( \ 39 - " mov %2, %1\n" \ 40 - "0: ldrex"B" %1, [%3]\n" \ 41 - "1: strex"B" %0, %2, [%3]\n" \ 39 + "0: ldrex"B" %2, [%3]\n" \ 40 + "1: strex"B" %0, %1, [%3]\n" \ 42 41 " cmp %0, #0\n" \ 42 + " moveq %1, %2\n" \ 43 43 " movne %0, %4\n" \ 44 44 "2:\n" \ 45 45 " .section .text.fixup,\"ax\"\n" \
+23 -6
arch/arm/lib/uaccess_with_memcpy.c
··· 88 88 static unsigned long noinline 89 89 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) 90 90 { 91 + unsigned long ua_flags; 91 92 int atomic; 92 93 93 94 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { ··· 119 118 if (tocopy > n) 120 119 tocopy = n; 121 120 121 + ua_flags = uaccess_save_and_enable(); 122 122 memcpy((void *)to, from, tocopy); 123 + uaccess_restore(ua_flags); 123 124 to += tocopy; 124 125 from += tocopy; 125 126 n -= tocopy; ··· 148 145 * With frame pointer disabled, tail call optimization kicks in 149 146 * as well making this test almost invisible. 150 147 */ 151 - if (n < 64) 152 - return __copy_to_user_std(to, from, n); 153 - return __copy_to_user_memcpy(to, from, n); 148 + if (n < 64) { 149 + unsigned long ua_flags = uaccess_save_and_enable(); 150 + n = __copy_to_user_std(to, from, n); 151 + uaccess_restore(ua_flags); 152 + } else { 153 + n = __copy_to_user_memcpy(to, from, n); 154 + } 155 + return n; 154 156 } 155 157 156 158 static unsigned long noinline 157 159 __clear_user_memset(void __user *addr, unsigned long n) 158 160 { 161 + unsigned long ua_flags; 162 + 159 163 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 160 164 memset((void *)addr, 0, n); 161 165 return 0; ··· 185 175 if (tocopy > n) 186 176 tocopy = n; 187 177 178 + ua_flags = uaccess_save_and_enable(); 188 179 memset((void *)addr, 0, tocopy); 180 + uaccess_restore(ua_flags); 189 181 addr += tocopy; 190 182 n -= tocopy; 191 183 ··· 205 193 unsigned long arm_clear_user(void __user *addr, unsigned long n) 206 194 { 207 195 /* See rational for this in __copy_to_user() above. */ 208 - if (n < 64) 209 - return __clear_user_std(addr, n); 210 - return __clear_user_memset(addr, n); 196 + if (n < 64) { 197 + unsigned long ua_flags = uaccess_save_and_enable(); 198 + n = __clear_user_std(addr, n); 199 + uaccess_restore(ua_flags); 200 + } else { 201 + n = __clear_user_memset(addr, n); 202 + } 203 + return n; 211 204 } 212 205 213 206 #if 0
+26 -12
arch/arm/mm/context.c
··· 165 165 __flush_icache_all(); 166 166 } 167 167 168 - static int is_reserved_asid(u64 asid) 168 + static bool check_update_reserved_asid(u64 asid, u64 newasid) 169 169 { 170 170 int cpu; 171 - for_each_possible_cpu(cpu) 172 - if (per_cpu(reserved_asids, cpu) == asid) 173 - return 1; 174 - return 0; 171 + bool hit = false; 172 + 173 + /* 174 + * Iterate over the set of reserved ASIDs looking for a match. 175 + * If we find one, then we can update our mm to use newasid 176 + * (i.e. the same ASID in the current generation) but we can't 177 + * exit the loop early, since we need to ensure that all copies 178 + * of the old ASID are updated to reflect the mm. Failure to do 179 + * so could result in us missing the reserved ASID in a future 180 + * generation. 181 + */ 182 + for_each_possible_cpu(cpu) { 183 + if (per_cpu(reserved_asids, cpu) == asid) { 184 + hit = true; 185 + per_cpu(reserved_asids, cpu) = newasid; 186 + } 187 + } 188 + 189 + return hit; 175 190 } 176 191 177 192 static u64 new_context(struct mm_struct *mm, unsigned int cpu) ··· 196 181 u64 generation = atomic64_read(&asid_generation); 197 182 198 183 if (asid != 0) { 184 + u64 newasid = generation | (asid & ~ASID_MASK); 185 + 199 186 /* 200 187 * If our current ASID was active during a rollover, we 201 188 * can continue to use it and this was just a false alarm. 202 189 */ 203 - if (is_reserved_asid(asid)) 204 - return generation | (asid & ~ASID_MASK); 190 + if (check_update_reserved_asid(asid, newasid)) 191 + return newasid; 205 192 206 193 /* 207 194 * We had a valid ASID in a previous life, so try to re-use ··· 211 194 */ 212 195 asid &= ~ASID_MASK; 213 196 if (!__test_and_set_bit(asid, asid_map)) 214 - goto bump_gen; 197 + return newasid; 215 198 } 216 199 217 200 /* ··· 233 216 234 217 __set_bit(asid, asid_map); 235 218 cur_idx = asid; 236 - 237 - bump_gen: 238 - asid |= generation; 239 219 cpumask_clear(mm_cpumask(mm)); 240 - return asid; 220 + return asid | generation; 241 221 } 242 222 243 223 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
+1 -1
arch/arm/mm/dma-mapping.c
··· 1521 1521 return -ENOMEM; 1522 1522 1523 1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1524 - phys_addr_t phys = sg_phys(s) & PAGE_MASK; 1524 + phys_addr_t phys = page_to_phys(sg_page(s)); 1525 1525 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1526 1526 1527 1527 if (!is_coherent &&
+62 -30
arch/arm/mm/init.c
··· 22 22 #include <linux/memblock.h> 23 23 #include <linux/dma-contiguous.h> 24 24 #include <linux/sizes.h> 25 + #include <linux/stop_machine.h> 25 26 26 27 #include <asm/cp15.h> 27 28 #include <asm/mach-types.h> ··· 628 627 * safe to be called with preemption disabled, as under stop_machine(). 629 628 */ 630 629 static inline void section_update(unsigned long addr, pmdval_t mask, 631 - pmdval_t prot) 630 + pmdval_t prot, struct mm_struct *mm) 632 631 { 633 - struct mm_struct *mm; 634 632 pmd_t *pmd; 635 633 636 - mm = current->active_mm; 637 634 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 638 635 639 636 #ifdef CONFIG_ARM_LPAE ··· 655 656 return !!(get_cr() & CR_XP); 656 657 } 657 658 658 - #define set_section_perms(perms, field) { \ 659 - size_t i; \ 660 - unsigned long addr; \ 661 - \ 662 - if (!arch_has_strict_perms()) \ 663 - return; \ 664 - \ 665 - for (i = 0; i < ARRAY_SIZE(perms); i++) { \ 666 - if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ 667 - !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ 668 - pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ 669 - perms[i].start, perms[i].end, \ 670 - SECTION_SIZE); \ 671 - continue; \ 672 - } \ 673 - \ 674 - for (addr = perms[i].start; \ 675 - addr < perms[i].end; \ 676 - addr += SECTION_SIZE) \ 677 - section_update(addr, perms[i].mask, \ 678 - perms[i].field); \ 679 - } \ 659 + void set_section_perms(struct section_perm *perms, int n, bool set, 660 + struct mm_struct *mm) 661 + { 662 + size_t i; 663 + unsigned long addr; 664 + 665 + if (!arch_has_strict_perms()) 666 + return; 667 + 668 + for (i = 0; i < n; i++) { 669 + if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 670 + !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 671 + pr_err("BUG: section %lx-%lx not aligned to %lx\n", 672 + perms[i].start, perms[i].end, 673 + SECTION_SIZE); 674 + continue; 675 + } 676 + 677 + for (addr = perms[i].start; 678 + addr < perms[i].end; 679 + addr += SECTION_SIZE) 680 + section_update(addr, perms[i].mask, 681 + set ? perms[i].prot : perms[i].clear, mm); 682 + } 683 + 680 684 } 681 685 682 - static inline void fix_kernmem_perms(void) 686 + static void update_sections_early(struct section_perm perms[], int n) 683 687 { 684 - set_section_perms(nx_perms, prot); 688 + struct task_struct *t, *s; 689 + 690 + read_lock(&tasklist_lock); 691 + for_each_process(t) { 692 + if (t->flags & PF_KTHREAD) 693 + continue; 694 + for_each_thread(t, s) 695 + set_section_perms(perms, n, true, s->mm); 696 + } 697 + read_unlock(&tasklist_lock); 698 + set_section_perms(perms, n, true, current->active_mm); 699 + set_section_perms(perms, n, true, &init_mm); 700 + } 701 + 702 + int __fix_kernmem_perms(void *unused) 703 + { 704 + update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 705 + return 0; 706 + } 707 + 708 + void fix_kernmem_perms(void) 709 + { 710 + stop_machine(__fix_kernmem_perms, NULL, NULL); 685 711 } 686 712 687 713 #ifdef CONFIG_DEBUG_RODATA 714 + int __mark_rodata_ro(void *unused) 715 + { 716 + update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 717 + return 0; 718 + } 719 + 688 720 void mark_rodata_ro(void) 689 721 { 690 - set_section_perms(ro_perms, prot); 722 + stop_machine(__mark_rodata_ro, NULL, NULL); 691 723 } 692 724 693 725 void set_kernel_text_rw(void) 694 726 { 695 - set_section_perms(ro_perms, clear); 727 + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 728 + current->active_mm); 696 729 } 697 730 698 731 void set_kernel_text_ro(void) 699 732 { 700 - set_section_perms(ro_perms, prot); 733 + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 734 + current->active_mm); 701 735 } 702 736 #endif /* CONFIG_DEBUG_RODATA */ 703 737
+2 -2
arch/arm/mm/proc-v7.S
··· 95 95 .equ cpu_v7_suspend_size, 4 * 9 96 96 #ifdef CONFIG_ARM_CPU_SUSPEND 97 97 ENTRY(cpu_v7_do_suspend) 98 - stmfd sp!, {r4 - r10, lr} 98 + stmfd sp!, {r4 - r11, lr} 99 99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 100 100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID 101 101 stmia r0!, {r4 - r5} ··· 112 112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 113 113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 114 114 stmia r0, {r5 - r11} 115 - ldmfd sp!, {r4 - r10, pc} 115 + ldmfd sp!, {r4 - r11, pc} 116 116 ENDPROC(cpu_v7_do_suspend) 117 117 118 118 ENTRY(cpu_v7_do_resume)
+1 -1
arch/ia64/include/asm/unistd.h
··· 11 11 12 12 13 13 14 - #define NR_syscalls 322 /* length of syscall table */ 14 + #define NR_syscalls 323 /* length of syscall table */ 15 15 16 16 /* 17 17 * The following defines stop scripts/checksyscalls.sh from complaining about
+1
arch/ia64/include/uapi/asm/unistd.h
··· 335 335 #define __NR_userfaultfd 1343 336 336 #define __NR_membarrier 1344 337 337 #define __NR_kcmp 1345 338 + #define __NR_mlock2 1346 338 339 339 340 #endif /* _UAPI_ASM_IA64_UNISTD_H */
+1
arch/ia64/kernel/entry.S
··· 1771 1771 data8 sys_userfaultfd 1772 1772 data8 sys_membarrier 1773 1773 data8 sys_kcmp // 1345 1774 + data8 sys_mlock2 1774 1775 1775 1776 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
+2 -1
arch/microblaze/kernel/dma.c
··· 61 61 /* FIXME this part of code is untested */ 62 62 for_each_sg(sgl, sg, nents, i) { 63 63 sg->dma_address = sg_phys(sg); 64 - __dma_sync(sg_phys(sg), sg->length, direction); 64 + __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, 65 + sg->length, direction); 65 66 } 66 67 67 68 return nents;
+12 -12
arch/powerpc/include/asm/systbl.h
··· 370 370 PPC64ONLY(switch_endian) 371 371 SYSCALL_SPU(userfaultfd) 372 372 SYSCALL_SPU(membarrier) 373 - SYSCALL(semop) 374 - SYSCALL(semget) 375 - COMPAT_SYS(semctl) 376 - COMPAT_SYS(semtimedop) 377 - COMPAT_SYS(msgsnd) 378 - COMPAT_SYS(msgrcv) 379 - SYSCALL(msgget) 380 - COMPAT_SYS(msgctl) 381 - COMPAT_SYS(shmat) 382 - SYSCALL(shmdt) 383 - SYSCALL(shmget) 384 - COMPAT_SYS(shmctl) 373 + SYSCALL(ni_syscall) 374 + SYSCALL(ni_syscall) 375 + SYSCALL(ni_syscall) 376 + SYSCALL(ni_syscall) 377 + SYSCALL(ni_syscall) 378 + SYSCALL(ni_syscall) 379 + SYSCALL(ni_syscall) 380 + SYSCALL(ni_syscall) 381 + SYSCALL(ni_syscall) 382 + SYSCALL(ni_syscall) 383 + SYSCALL(ni_syscall) 384 + SYSCALL(ni_syscall) 385 385 SYSCALL(mlock2)
-12
arch/powerpc/include/uapi/asm/unistd.h
··· 388 388 #define __NR_switch_endian 363 389 389 #define __NR_userfaultfd 364 390 390 #define __NR_membarrier 365 391 - #define __NR_semop 366 392 - #define __NR_semget 367 393 - #define __NR_semctl 368 394 - #define __NR_semtimedop 369 395 - #define __NR_msgsnd 370 396 - #define __NR_msgrcv 371 397 - #define __NR_msgget 372 398 - #define __NR_msgctl 373 399 - #define __NR_shmat 374 400 - #define __NR_shmdt 375 401 - #define __NR_shmget 376 402 - #define __NR_shmctl 377 403 391 #define __NR_mlock2 378 404 392 405 393 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+13 -1
arch/powerpc/platforms/powernv/opal-irqchip.c
··· 83 83 set_bit(d->hwirq, &opal_event_irqchip.mask); 84 84 85 85 opal_poll_events(&events); 86 - opal_handle_events(be64_to_cpu(events)); 86 + last_outstanding_events = be64_to_cpu(events); 87 + 88 + /* 89 + * We can't just handle the events now with opal_handle_events(). 90 + * If we did we would deadlock when opal_event_unmask() is called from 91 + * handle_level_irq() with the irq descriptor lock held, because 92 + * calling opal_handle_events() would call generic_handle_irq() and 93 + * then handle_level_irq() which would try to take the descriptor lock 94 + * again. Instead queue the events for later. 95 + */ 96 + if (last_outstanding_events & opal_event_irqchip.mask) 97 + /* Need to retrigger the interrupt */ 98 + irq_work_queue(&opal_event_irq_work); 87 99 } 88 100 89 101 static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
+1 -1
arch/powerpc/platforms/powernv/opal.c
··· 278 278 279 279 /* Sanity check */ 280 280 if (type >= OPAL_MSG_TYPE_MAX) { 281 - pr_warning("%s: Unknown message type: %u\n", __func__, type); 281 + pr_warn_once("%s: Unknown message type: %u\n", __func__, type); 282 282 return; 283 283 } 284 284 opal_message_do_notify(type, (void *)&msg);
+1 -1
arch/x86/mm/dump_pagetables.c
··· 89 89 { 0/* VMALLOC_START */, "vmalloc() Area" }, 90 90 { 0/*VMALLOC_END*/, "vmalloc() End" }, 91 91 # ifdef CONFIG_HIGHMEM 92 - { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, 92 + { 0/*PKMAP_BASE*/, "Persistent kmap() Area" }, 93 93 # endif 94 94 { 0/*FIXADDR_START*/, "Fixmap Area" }, 95 95 #endif
+2 -7
arch/x86/xen/mmu.c
··· 2495 2495 { 2496 2496 x86_init.paging.pagetable_init = xen_pagetable_init; 2497 2497 2498 - /* Optimization - we can use the HVM one but it has no idea which 2499 - * VCPUs are descheduled - which means that it will needlessly IPI 2500 - * them. Xen knows so let it do the job. 2501 - */ 2502 - if (xen_feature(XENFEAT_auto_translated_physmap)) { 2503 - pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others; 2498 + if (xen_feature(XENFEAT_auto_translated_physmap)) 2504 2499 return; 2505 - } 2500 + 2506 2501 pv_mmu_ops = xen_mmu_ops; 2507 2502 2508 2503 memset(dummy_mapping, 0xff, PAGE_SIZE);
+10 -10
arch/x86/xen/suspend.c
··· 68 68 69 69 void xen_arch_pre_suspend(void) 70 70 { 71 - int cpu; 72 - 73 - for_each_online_cpu(cpu) 74 - xen_pmu_finish(cpu); 75 - 76 71 if (xen_pv_domain()) 77 72 xen_pv_pre_suspend(); 78 73 } 79 74 80 75 void xen_arch_post_suspend(int cancelled) 81 76 { 82 - int cpu; 83 - 84 77 if (xen_pv_domain()) 85 78 xen_pv_post_suspend(cancelled); 86 79 else 87 80 xen_hvm_post_suspend(cancelled); 88 - 89 - for_each_online_cpu(cpu) 90 - xen_pmu_init(cpu); 91 81 } 92 82 93 83 static void xen_vcpu_notify_restore(void *data) ··· 96 106 97 107 void xen_arch_resume(void) 98 108 { 109 + int cpu; 110 + 99 111 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 112 + 113 + for_each_online_cpu(cpu) 114 + xen_pmu_init(cpu); 100 115 } 101 116 102 117 void xen_arch_suspend(void) 103 118 { 119 + int cpu; 120 + 121 + for_each_online_cpu(cpu) 122 + xen_pmu_finish(cpu); 123 + 104 124 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); 105 125 }
+1 -1
crypto/ablkcipher.c
··· 277 277 if (WARN_ON_ONCE(in_irq())) 278 278 return -EDEADLK; 279 279 280 + walk->iv = req->info; 280 281 walk->nbytes = walk->total; 281 282 if (unlikely(!walk->total)) 282 283 return 0; 283 284 284 285 walk->iv_buffer = NULL; 285 - walk->iv = req->info; 286 286 if (unlikely(((unsigned long)walk->iv & alignmask))) { 287 287 int err = ablkcipher_copy_iv(walk, tfm, alignmask); 288 288
+1 -1
crypto/blkcipher.c
··· 326 326 if (WARN_ON_ONCE(in_irq())) 327 327 return -EDEADLK; 328 328 329 + walk->iv = desc->info; 329 330 walk->nbytes = walk->total; 330 331 if (unlikely(!walk->total)) 331 332 return 0; 332 333 333 334 walk->buffer = NULL; 334 - walk->iv = desc->info; 335 335 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 336 336 int err = blkcipher_copy_iv(walk); 337 337 if (err)
+1 -1
drivers/acpi/nfit.c
··· 1810 1810 if (!dev->driver) { 1811 1811 /* dev->driver may be null if we're being removed */ 1812 1812 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 1813 - return; 1813 + goto out_unlock; 1814 1814 } 1815 1815 1816 1816 if (!acpi_desc) {
+22 -11
drivers/base/power/domain.c
··· 390 390 struct generic_pm_domain *genpd; 391 391 bool (*stop_ok)(struct device *__dev); 392 392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 393 + bool runtime_pm = pm_runtime_enabled(dev); 393 394 ktime_t time_start; 394 395 s64 elapsed_ns; 395 396 int ret; ··· 401 400 if (IS_ERR(genpd)) 402 401 return -EINVAL; 403 402 403 + /* 404 + * A runtime PM centric subsystem/driver may re-use the runtime PM 405 + * callbacks for other purposes than runtime PM. In those scenarios 406 + * runtime PM is disabled. Under these circumstances, we shall skip 407 + * validating/measuring the PM QoS latency. 408 + */ 404 409 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 405 - if (stop_ok && !stop_ok(dev)) 410 + if (runtime_pm && stop_ok && !stop_ok(dev)) 406 411 return -EBUSY; 407 412 408 413 /* Measure suspend latency. */ 409 - time_start = ktime_get(); 414 + if (runtime_pm) 415 + time_start = ktime_get(); 410 416 411 417 ret = genpd_save_dev(genpd, dev); 412 418 if (ret) ··· 426 418 } 427 419 428 420 /* Update suspend latency value if the measured time exceeds it. */ 429 - elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 430 - if (elapsed_ns > td->suspend_latency_ns) { 431 - td->suspend_latency_ns = elapsed_ns; 432 - dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 433 - elapsed_ns); 434 - genpd->max_off_time_changed = true; 435 - td->constraint_changed = true; 421 + if (runtime_pm) { 422 + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 423 + if (elapsed_ns > td->suspend_latency_ns) { 424 + td->suspend_latency_ns = elapsed_ns; 425 + dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 426 + elapsed_ns); 427 + genpd->max_off_time_changed = true; 428 + td->constraint_changed = true; 429 + } 436 430 } 437 431 438 432 /* ··· 463 453 { 464 454 struct generic_pm_domain *genpd; 465 455 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 456 + bool runtime_pm = pm_runtime_enabled(dev); 466 457 ktime_t time_start; 467 458 s64 elapsed_ns; 468 459 int ret; ··· 490 479 491 480 out: 492 481 /* Measure resume latency. */ 493 - if (timed) 482 + if (timed && runtime_pm) 494 483 time_start = ktime_get(); 495 484 496 485 genpd_start_dev(genpd, dev); 497 486 genpd_restore_dev(genpd, dev); 498 487 499 488 /* Update resume latency value if the measured time exceeds it. */ 500 - if (timed) { 489 + if (timed && runtime_pm) { 501 490 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 502 491 if (elapsed_ns > td->resume_latency_ns) { 503 492 td->resume_latency_ns = elapsed_ns;
+10 -5
drivers/block/xen-blkback/blkback.c
··· 950 950 goto unmap; 951 951 952 952 for (n = 0, i = 0; n < nseg; n++) { 953 + uint8_t first_sect, last_sect; 954 + 953 955 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { 954 956 /* Map indirect segments */ 955 957 if (segments) ··· 959 957 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); 960 958 } 961 959 i = n % SEGS_PER_INDIRECT_FRAME; 960 + 962 961 pending_req->segments[n]->gref = segments[i].gref; 963 - seg[n].nsec = segments[i].last_sect - 964 - segments[i].first_sect + 1; 965 - seg[n].offset = (segments[i].first_sect << 9); 966 - if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || 967 - (segments[i].last_sect < segments[i].first_sect)) { 962 + 963 + first_sect = READ_ONCE(segments[i].first_sect); 964 + last_sect = READ_ONCE(segments[i].last_sect); 965 + if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) { 968 966 rc = -EINVAL; 969 967 goto unmap; 970 968 } 969 + 970 + seg[n].nsec = last_sect - first_sect + 1; 971 + seg[n].offset = first_sect << 9; 971 972 preq->nr_sects += seg[n].nsec; 972 973 } 973 974
+4 -4
drivers/block/xen-blkback/common.h
··· 408 408 struct blkif_x86_32_request *src) 409 409 { 410 410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 411 - dst->operation = src->operation; 412 - switch (src->operation) { 411 + dst->operation = READ_ONCE(src->operation); 412 + switch (dst->operation) { 413 413 case BLKIF_OP_READ: 414 414 case BLKIF_OP_WRITE: 415 415 case BLKIF_OP_WRITE_BARRIER: ··· 456 456 struct blkif_x86_64_request *src) 457 457 { 458 458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 459 - dst->operation = src->operation; 460 - switch (src->operation) { 459 + dst->operation = READ_ONCE(src->operation); 460 + switch (dst->operation) { 461 461 case BLKIF_OP_READ: 462 462 case BLKIF_OP_WRITE: 463 463 case BLKIF_OP_WRITE_BARRIER:
+1 -1
drivers/cpufreq/Kconfig.arm
··· 226 226 227 227 config ARM_TEGRA124_CPUFREQ 228 228 tristate "Tegra124 CPUFreq support" 229 - depends on ARCH_TEGRA && CPUFREQ_DT 229 + depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR 230 230 default y 231 231 help 232 232 This adds the CPUFreq driver support for Tegra124 SOCs.
+1 -1
drivers/cpufreq/intel_pstate.c
··· 1123 1123 limits->max_sysfs_pct); 1124 1124 limits->max_perf_pct = max(limits->min_policy_pct, 1125 1125 limits->max_perf_pct); 1126 - limits->max_perf = round_up(limits->max_perf, 8); 1126 + limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1127 1127 1128 1128 /* Make sure min_perf_pct <= max_perf_pct */ 1129 1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+6 -3
drivers/dma/at_xdmac.c
··· 156 156 #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ 157 157 #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) 158 158 #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) 159 - #define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ 159 + #define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ 160 160 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ 161 161 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ 162 162 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ ··· 965 965 NULL, 966 966 src_addr, dst_addr, 967 967 xt, xt->sgl); 968 - for (i = 0; i < xt->numf; i++) 968 + 969 + /* Length of the block is (BLEN+1) microblocks. */ 970 + for (i = 0; i < xt->numf - 1; i++) 969 971 at_xdmac_increment_block_count(chan, first); 970 972 971 973 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", ··· 1088 1086 /* Check remaining length and change data width if needed. */ 1089 1087 dwidth = at_xdmac_align_width(chan, 1090 1088 src_addr | dst_addr | xfer_size); 1089 + chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK; 1091 1090 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); 1092 1091 1093 1092 ublen = xfer_size >> dwidth; ··· 1336 1333 * since we don't care about the stride anymore. 1337 1334 */ 1338 1335 if ((i == (sg_len - 1)) && 1339 - sg_dma_len(ppsg) == sg_dma_len(psg)) { 1336 + sg_dma_len(psg) == sg_dma_len(sg)) { 1340 1337 dev_dbg(chan2dev(chan), 1341 1338 "%s: desc 0x%p can be merged with desc 0x%p\n", 1342 1339 __func__, desc, pdesc);
+54 -24
drivers/dma/bcm2835-dma.c
··· 31 31 */ 32 32 #include <linux/dmaengine.h> 33 33 #include <linux/dma-mapping.h> 34 + #include <linux/dmapool.h> 34 35 #include <linux/err.h> 35 36 #include <linux/init.h> 36 37 #include <linux/interrupt.h> ··· 63 62 uint32_t pad[2]; 64 63 }; 65 64 65 + struct bcm2835_cb_entry { 66 + struct bcm2835_dma_cb *cb; 67 + dma_addr_t paddr; 68 + }; 69 + 66 70 struct bcm2835_chan { 67 71 struct virt_dma_chan vc; 68 72 struct list_head node; ··· 78 72 79 73 int ch; 80 74 struct bcm2835_desc *desc; 75 + struct dma_pool *cb_pool; 81 76 82 77 void __iomem *chan_base; 83 78 int irq_number; 84 79 }; 85 80 86 81 struct bcm2835_desc { 82 + struct bcm2835_chan *c; 87 83 struct virt_dma_desc vd; 88 84 enum dma_transfer_direction dir; 89 85 90 - unsigned int control_block_size; 91 - struct bcm2835_dma_cb *control_block_base; 92 - dma_addr_t control_block_base_phys; 86 + struct bcm2835_cb_entry *cb_list; 93 87 94 88 unsigned int frames; 95 89 size_t size; ··· 149 143 static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 150 144 { 151 145 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); 152 - dma_free_coherent(desc->vd.tx.chan->device->dev, 153 - desc->control_block_size, 154 - desc->control_block_base, 155 - desc->control_block_base_phys); 146 + int i; 147 + 148 + for (i = 0; i < desc->frames; i++) 149 + dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, 150 + desc->cb_list[i].paddr); 151 + 152 + kfree(desc->cb_list); 156 153 kfree(desc); 157 154 } 158 155 ··· 208 199 209 200 c->desc = d = to_bcm2835_dma_desc(&vd->tx); 210 201 211 - writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); 202 + writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); 212 203 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 213 204 } 214 205 ··· 241 232 static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) 242 233 { 243 234 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 235 + struct device *dev = c->vc.chan.device->dev; 244 236 245 - dev_dbg(c->vc.chan.device->dev, 246 - "Allocating DMA channel %d\n", c->ch); 237 + dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); 238 + 239 + c->cb_pool = dma_pool_create(dev_name(dev), dev, 240 + sizeof(struct bcm2835_dma_cb), 0, 0); 241 + if (!c->cb_pool) { 242 + dev_err(dev, "unable to allocate descriptor pool\n"); 243 + return -ENOMEM; 244 + } 247 245 248 246 return request_irq(c->irq_number, 249 247 bcm2835_dma_callback, 0, "DMA IRQ", c); ··· 262 246 263 247 vchan_free_chan_resources(&c->vc); 264 248 free_irq(c->irq_number, c); 249 + dma_pool_destroy(c->cb_pool); 265 250 266 251 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); 267 252 } ··· 278 261 size_t size; 279 262 280 263 for (size = i = 0; i < d->frames; i++) { 281 - struct bcm2835_dma_cb *control_block = 282 - &d->control_block_base[i]; 264 + struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; 283 265 size_t this_size = control_block->length; 284 266 dma_addr_t dma; 285 267 ··· 359 343 dma_addr_t dev_addr; 360 344 unsigned int es, sync_type; 361 345 unsigned int frame; 346 + int i; 362 347 363 348 /* Grab configuration */ 364 349 if (!is_slave_direction(direction)) { ··· 391 374 if (!d) 392 375 return NULL; 393 376 377 + d->c = c; 394 378 d->dir = direction; 395 379 d->frames = buf_len / period_len; 396 380 397 - /* Allocate memory for control blocks */ 398 - d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); 399 - d->control_block_base = dma_zalloc_coherent(chan->device->dev, 400 - d->control_block_size, &d->control_block_base_phys, 401 - GFP_NOWAIT); 402 - 403 - if (!d->control_block_base) { 381 + d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); 382 + if (!d->cb_list) { 404 383 kfree(d); 405 384 return NULL; 385 + } 386 + /* Allocate memory for control blocks */ 387 + for (i = 0; i < d->frames; i++) { 388 + struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; 389 + 390 + cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, 391 + &cb_entry->paddr); 392 + if (!cb_entry->cb) 393 + goto error_cb; 406 394 } 407 395 408 396 /* ··· 415 393 * for each frame and link them together. 416 394 */ 417 395 for (frame = 0; frame < d->frames; frame++) { 418 - struct bcm2835_dma_cb *control_block = 419 - &d->control_block_base[frame]; 396 + struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; 420 397 421 398 /* Setup adresses */ 422 399 if (d->dir == DMA_DEV_TO_MEM) { ··· 449 428 * This DMA engine driver currently only supports cyclic DMA. 450 429 * Therefore, wrap around at number of frames. 451 430 */ 452 - control_block->next = d->control_block_base_phys + 453 - sizeof(struct bcm2835_dma_cb) 454 - * ((frame + 1) % d->frames); 431 + control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; 455 432 } 456 433 457 434 return vchan_tx_prep(&c->vc, &d->vd, flags); 435 + error_cb: 436 + i--; 437 + for (; i >= 0; i--) { 438 + struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; 439 + 440 + dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); 441 + } 442 + 443 + kfree(d->cb_list); 444 + kfree(d); 445 + return NULL; 458 446 } 459 447 460 448 static int bcm2835_dma_slave_config(struct dma_chan *chan,
+36 -19
drivers/dma/edma.c
··· 1752 1752 return ret; 1753 1753 } 1754 1754 1755 - static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) 1755 + static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) 1756 1756 { 1757 - s16 *memcpy_ch = memcpy_channels; 1758 - 1759 1757 if (!memcpy_channels) 1760 1758 return false; 1761 - while (*memcpy_ch != -1) { 1762 - if (*memcpy_ch == ch_num) 1759 + while (*memcpy_channels != -1) { 1760 + if (*memcpy_channels == ch_num) 1763 1761 return true; 1764 - memcpy_ch++; 1762 + memcpy_channels++; 1765 1763 } 1766 1764 return false; 1767 1765 } ··· 1773 1775 { 1774 1776 struct dma_device *s_ddev = &ecc->dma_slave; 1775 1777 struct dma_device *m_ddev = NULL; 1776 - s16 *memcpy_channels = ecc->info->memcpy_channels; 1778 + s32 *memcpy_channels = ecc->info->memcpy_channels; 1777 1779 int i, j; 1778 1780 1779 1781 dma_cap_zero(s_ddev->cap_mask); ··· 1994 1996 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); 1995 1997 if (prop) { 1996 1998 const char pname[] = "ti,edma-memcpy-channels"; 1997 - size_t nelm = sz / sizeof(s16); 1998 - s16 *memcpy_ch; 1999 + size_t nelm = sz / sizeof(s32); 2000 + s32 *memcpy_ch; 1999 2001 2000 - memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), 2002 + memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), 2001 2003 GFP_KERNEL); 2002 2004 if (!memcpy_ch) 2003 2005 return ERR_PTR(-ENOMEM); 2004 2006 2005 - ret = of_property_read_u16_array(dev->of_node, pname, 2006 - (u16 *)memcpy_ch, nelm); 2007 + ret = of_property_read_u32_array(dev->of_node, pname, 2008 + (u32 *)memcpy_ch, nelm); 2007 2009 if (ret) 2008 2010 return ERR_PTR(ret); 2009 2011 ··· 2015 2017 &sz); 2016 2018 if (prop) { 2017 2019 const char pname[] = "ti,edma-reserved-slot-ranges"; 2020 + u32 (*tmp)[2]; 2018 2021 s16 (*rsv_slots)[2]; 2019 - size_t nelm = sz / sizeof(*rsv_slots); 2022 + size_t nelm = sz / sizeof(*tmp); 2020 2023 struct edma_rsv_info *rsv_info; 2024 + int i; 2021 2025 2022 2026 if (!nelm) 2023 2027 return info; 2024 2028 2025 - rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); 2026 - if (!rsv_info) 2029 + tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL); 2030 + if (!tmp) 2027 2031 return ERR_PTR(-ENOMEM); 2032 + 2033 + rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); 2034 + if (!rsv_info) { 2035 + kfree(tmp); 2036 + return ERR_PTR(-ENOMEM); 2037 + } 2028 2038 2029 2039 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), 2030 2040 GFP_KERNEL); 2031 - if (!rsv_slots) 2041 + if (!rsv_slots) { 2042 + kfree(tmp); 2032 2043 return ERR_PTR(-ENOMEM); 2044 + } 2033 2045 2034 - ret = of_property_read_u16_array(dev->of_node, pname, 2035 - (u16 *)rsv_slots, nelm * 2); 2036 - if (ret) 2046 + ret = of_property_read_u32_array(dev->of_node, pname, 2047 + (u32 *)tmp, nelm * 2); 2048 + if (ret) { 2049 + kfree(tmp); 2037 2050 return ERR_PTR(ret); 2051 + } 2038 2052 2053 + for (i = 0; i < nelm; i++) { 2054 + rsv_slots[i][0] = tmp[i][0]; 2055 + rsv_slots[i][1] = tmp[i][1]; 2056 + } 2039 2057 rsv_slots[nelm][0] = -1; 2040 2058 rsv_slots[nelm][1] = -1; 2059 + 2041 2060 info->rsv = rsv_info; 2042 2061 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; 2062 + 2063 + kfree(tmp); 2043 2064 } 2044 2065 2045 2066 return info;
+10 -5
drivers/dma/mic_x100_dma.c
··· 317 317 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 318 318 struct device *dev = mic_dma_ch_to_device(mic_ch); 319 319 int result; 320 + struct dma_async_tx_descriptor *tx = NULL; 320 321 321 322 if (!len && !flags) 322 323 return NULL; ··· 325 324 spin_lock(&mic_ch->prep_lock); 326 325 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); 327 326 if (result >= 0) 328 - return allocate_tx(mic_ch); 329 - dev_err(dev, "Error enqueueing dma, error=%d\n", result); 327 + tx = allocate_tx(mic_ch); 328 + 329 + if (!tx) 330 + dev_err(dev, "Error enqueueing dma, error=%d\n", result); 331 + 330 332 spin_unlock(&mic_ch->prep_lock); 331 - return NULL; 333 + return tx; 332 334 } 333 335 334 336 static struct dma_async_tx_descriptor * ··· 339 335 { 340 336 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 341 337 int ret; 338 + struct dma_async_tx_descriptor *tx = NULL; 342 339 343 340 spin_lock(&mic_ch->prep_lock); 344 341 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); 345 342 if (!ret) 346 - return allocate_tx(mic_ch); 343 + tx = allocate_tx(mic_ch); 347 344 spin_unlock(&mic_ch->prep_lock); 348 - return NULL; 345 + return tx; 349 346 } 350 347 351 348 /* Return the status of the transaction */
+1 -1
drivers/gpio/gpio-ath79.c
··· 113 113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); 114 114 115 115 __raw_writel( 116 - __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset), 116 + __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset), 117 117 ctrl->base + AR71XX_GPIO_REG_OE); 118 118 119 119 spin_unlock_irqrestore(&ctrl->lock, flags);
+2 -2
drivers/gpio/gpio-generic.c
··· 141 141 unsigned long pinmask = bgc->pin2mask(bgc, gpio); 142 142 143 143 if (bgc->dir & pinmask) 144 - return bgc->read_reg(bgc->reg_set) & pinmask; 144 + return !!(bgc->read_reg(bgc->reg_set) & pinmask); 145 145 else 146 - return bgc->read_reg(bgc->reg_dat) & pinmask; 146 + return !!(bgc->read_reg(bgc->reg_dat) & pinmask); 147 147 } 148 148 149 149 static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+7 -1
drivers/gpio/gpiolib.c
··· 1279 1279 chip = desc->chip; 1280 1280 offset = gpio_chip_hwgpio(desc); 1281 1281 value = chip->get ? chip->get(chip, offset) : -EIO; 1282 - value = value < 0 ? value : !!value; 1282 + /* 1283 + * FIXME: fix all drivers to clamp to [0,1] or return negative, 1284 + * then change this to: 1285 + * value = value < 0 ? value : !!value; 1286 + * so we can properly propagate error codes. 1287 + */ 1288 + value = !!value; 1283 1289 trace_gpio_value(desc_to_gpio(desc), 1, value); 1284 1290 return value; 1285 1291 }
+2 -1
drivers/gpu/drm/drm_probe_helper.c
··· 229 229 mode_flags |= DRM_MODE_FLAG_3D_MASK; 230 230 231 231 list_for_each_entry(mode, &connector->modes, head) { 232 - mode->status = drm_mode_validate_basic(mode); 232 + if (mode->status == MODE_OK) 233 + mode->status = drm_mode_validate_basic(mode); 233 234 234 235 if (mode->status == MODE_OK) 235 236 mode->status = drm_mode_validate_size(mode, maxX, maxY);
-2
drivers/gpu/drm/i915/i915_gem_context.c
··· 141 141 if (!ppgtt) 142 142 return; 143 143 144 - WARN_ON(!list_empty(&ppgtt->base.active_list)); 145 - 146 144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 147 145 mm_list) { 148 146 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
+3 -1
drivers/gpu/drm/i915/intel_display.c
··· 6309 6309 if (to_intel_plane_state(crtc->primary->state)->visible) { 6310 6310 intel_crtc_wait_for_pending_flips(crtc); 6311 6311 intel_pre_disable_primary(crtc); 6312 + 6313 + intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 6314 + to_intel_plane_state(crtc->primary->state)->visible = false; 6312 6315 } 6313 6316 6314 - intel_crtc_disable_planes(crtc, crtc->state->plane_mask); 6315 6317 dev_priv->display.crtc_disable(crtc); 6316 6318 intel_crtc->active = false; 6317 6319 intel_update_watermarks(crtc);
+2 -3
drivers/gpu/drm/i915/intel_pm.c
··· 4782 4782 /* 2b: Program RC6 thresholds.*/ 4783 4783 4784 4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 4785 - if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && 4786 - (INTEL_REVID(dev) <= SKL_REVID_E0))) 4785 + if (IS_SKYLAKE(dev)) 4787 4786 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 4788 4787 else 4789 4788 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); ··· 4824 4825 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 4825 4826 */ 4826 4827 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 4827 - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 4828 + ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) 4828 4829 I915_WRITE(GEN9_PG_ENABLE, 0); 4829 4830 else 4830 4831 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+1 -4
drivers/gpu/drm/omapdrm/omap_fbdev.c
··· 112 112 dma_addr_t paddr; 113 113 int ret; 114 114 115 - /* only doing ARGB32 since this is what is needed to alpha-blend 116 - * with video overlays: 117 - */ 118 115 sizes->surface_bpp = 32; 119 - sizes->surface_depth = 32; 116 + sizes->surface_depth = 24; 120 117 121 118 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 122 119 sizes->surface_height, sizes->surface_bpp,
+1
drivers/hwmon/Kconfig
··· 1217 1217 config SENSORS_SHT15 1218 1218 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." 1219 1219 depends on GPIOLIB || COMPILE_TEST 1220 + select BITREVERSE 1220 1221 help 1221 1222 If you say yes here you get support for the Sensiron SHT10, SHT11, 1222 1223 SHT15, SHT71, SHT75 humidity and temperature sensors.
+15 -1
drivers/hwmon/tmp102.c
··· 58 58 u16 config_orig; 59 59 unsigned long last_update; 60 60 int temp[3]; 61 + bool first_time; 61 62 }; 62 63 63 64 /* convert left adjusted 13-bit TMP102 register value to milliCelsius */ ··· 94 93 tmp102->temp[i] = tmp102_reg_to_mC(status); 95 94 } 96 95 tmp102->last_update = jiffies; 96 + tmp102->first_time = false; 97 97 } 98 98 mutex_unlock(&tmp102->lock); 99 99 return tmp102; ··· 103 101 static int tmp102_read_temp(void *dev, int *temp) 104 102 { 105 103 struct tmp102 *tmp102 = tmp102_update_device(dev); 104 + 105 + /* Is it too early even to return a conversion? */ 106 + if (tmp102->first_time) { 107 + dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__); 108 + return -EAGAIN; 109 + } 106 110 107 111 *temp = tmp102->temp[0]; 108 112 ··· 121 113 { 122 114 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); 123 115 struct tmp102 *tmp102 = tmp102_update_device(dev); 116 + 117 + /* Is it too early even to return a read? */ 118 + if (tmp102->first_time) 119 + return -EAGAIN; 124 120 125 121 return sprintf(buf, "%d\n", tmp102->temp[sda->index]); 126 122 } ··· 219 207 status = -ENODEV; 220 208 goto fail_restore_config; 221 209 } 222 - tmp102->last_update = jiffies - HZ; 210 + tmp102->last_update = jiffies; 211 + /* Mark that we are not ready with data until conversion is complete */ 212 + tmp102->first_time = true; 223 213 mutex_init(&tmp102->lock); 224 214 225 215 hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+9 -2
drivers/i2c/busses/i2c-davinci.c
··· 202 202 * d is always 6 on Keystone I2C controller 203 203 */ 204 204 205 - /* get minimum of 7 MHz clock, but max of 12 MHz */ 206 - psc = (input_clock / 7000000) - 1; 205 + /* 206 + * Both Davinci and current Keystone User Guides recommend a value 207 + * between 7MHz and 12MHz. In reality 7MHz module clock doesn't 208 + * always produce enough margin between SDA and SCL transitions. 209 + * Measurements show that the higher the module clock is, the 210 + * bigger is the margin, providing more reliable communication. 211 + * So we better target for 12MHz. 212 + */ 213 + psc = (input_clock / 12000000) - 1; 207 214 if ((input_clock / (psc + 1)) > 12000000) 208 215 psc++; /* better to run under spec than over */ 209 216 d = (psc >= 2) ? 5 : 7 - psc;
+6
drivers/i2c/busses/i2c-designware-core.c
··· 813 813 tx_aborted: 814 814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) 815 815 complete(&dev->cmd_complete); 816 + else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { 817 + /* workaround to trigger pending interrupt */ 818 + stat = dw_readl(dev, DW_IC_INTR_MASK); 819 + i2c_dw_disable_int(dev); 820 + dw_writel(dev, stat, DW_IC_INTR_MASK); 821 + } 816 822 817 823 return IRQ_HANDLED; 818 824 }
+1
drivers/i2c/busses/i2c-designware-core.h
··· 111 111 112 112 #define ACCESS_SWAP 0x00000001 113 113 #define ACCESS_16BIT 0x00000002 114 + #define ACCESS_INTR_MASK 0x00000004 114 115 115 116 extern int i2c_dw_init(struct dw_i2c_dev *dev); 116 117 extern void i2c_dw_disable(struct dw_i2c_dev *dev);
+10 -6
drivers/i2c/busses/i2c-designware-platdrv.c
··· 93 93 static int dw_i2c_acpi_configure(struct platform_device *pdev) 94 94 { 95 95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 96 + const struct acpi_device_id *id; 96 97 97 98 dev->adapter.nr = -1; 98 99 dev->tx_fifo_depth = 32; ··· 107 106 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 108 107 &dev->sda_hold_time); 109 108 109 + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); 110 + if (id && id->driver_data) 111 + dev->accessor_flags |= (u32)id->driver_data; 112 + 110 113 return 0; 111 114 } 112 115 ··· 121 116 { "INT3433", 0 }, 122 117 { "80860F41", 0 }, 123 118 { "808622C1", 0 }, 124 - { "AMD0010", 0 }, 119 + { "AMD0010", ACCESS_INTR_MASK }, 125 120 { } 126 121 }; 127 122 MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); ··· 245 240 } 246 241 247 242 r = i2c_dw_probe(dev); 248 - if (r) { 243 + if (r && !dev->pm_runtime_disabled) 249 244 pm_runtime_disable(&pdev->dev); 250 - return r; 251 - } 252 245 253 - return 0; 246 + return r; 254 247 } 255 248 256 249 static int dw_i2c_plat_remove(struct platform_device *pdev) ··· 263 260 264 261 pm_runtime_dont_use_autosuspend(&pdev->dev); 265 262 pm_runtime_put_sync(&pdev->dev); 266 - pm_runtime_disable(&pdev->dev); 263 + if (!dev->pm_runtime_disabled) 264 + pm_runtime_disable(&pdev->dev); 267 265 268 266 return 0; 269 267 }
+2 -2
drivers/i2c/busses/i2c-imx.c
··· 1119 1119 i2c_imx, IMX_I2C_I2CR); 1120 1120 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); 1121 1121 1122 + i2c_imx_init_recovery_info(i2c_imx, pdev); 1123 + 1122 1124 /* Add I2C adapter */ 1123 1125 ret = i2c_add_numbered_adapter(&i2c_imx->adapter); 1124 1126 if (ret < 0) { 1125 1127 dev_err(&pdev->dev, "registration failed\n"); 1126 1128 goto clk_disable; 1127 1129 } 1128 - 1129 - i2c_imx_init_recovery_info(i2c_imx, pdev); 1130 1130 1131 1131 /* Set up platform driver data */ 1132 1132 platform_set_drvdata(pdev, i2c_imx);
+18 -9
drivers/i2c/busses/i2c-mv64xxx.c
··· 146 146 bool errata_delay; 147 147 struct reset_control *rstc; 148 148 bool irq_clear_inverted; 149 + /* Clk div is 2 to the power n, not 2 to the power n + 1 */ 150 + bool clk_n_base_0; 149 151 }; 150 152 151 153 static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { ··· 759 757 #ifdef CONFIG_OF 760 758 #ifdef CONFIG_HAVE_CLK 761 759 static int 762 - mv64xxx_calc_freq(const int tclk, const int n, const int m) 760 + mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data, 761 + const int tclk, const int n, const int m) 763 762 { 764 - return tclk / (10 * (m + 1) * (2 << n)); 763 + if (drv_data->clk_n_base_0) 764 + return tclk / (10 * (m + 1) * (1 << n)); 765 + else 766 + return tclk / (10 * (m + 1) * (2 << n)); 765 767 } 766 768 767 769 static bool 768 - mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, 769 - u32 *best_m) 770 + mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data, 771 + const u32 req_freq, const u32 tclk) 770 772 { 771 773 int freq, delta, best_delta = INT_MAX; 772 774 int m, n; 773 775 774 776 for (n = 0; n <= 7; n++) 775 777 for (m = 0; m <= 15; m++) { 776 - freq = mv64xxx_calc_freq(tclk, n, m); 778 + freq = mv64xxx_calc_freq(drv_data, tclk, n, m); 777 779 delta = req_freq - freq; 778 780 if (delta >= 0 && delta < best_delta) { 779 - *best_m = m; 780 - *best_n = n; 781 + drv_data->freq_m = m; 782 + drv_data->freq_n = n; 781 783 best_delta = delta; 782 784 } 783 785 if (best_delta == 0) ··· 819 813 if (of_property_read_u32(np, "clock-frequency", &bus_freq)) 820 814 bus_freq = 100000; /* 100kHz by default */ 821 815 822 - if (!mv64xxx_find_baud_factors(bus_freq, tclk, 823 - &drv_data->freq_n, &drv_data->freq_m)) { 816 + if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") || 817 + of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) 818 + drv_data->clk_n_base_0 = true; 819 + 820 + if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) { 824 821 rc = -EINVAL; 825 822 goto out; 826 823 }
+2 -2
drivers/i2c/busses/i2c-rcar.c
··· 576 576 if (slave->flags & I2C_CLIENT_TEN) 577 577 return -EAFNOSUPPORT; 578 578 579 - pm_runtime_forbid(rcar_i2c_priv_to_dev(priv)); 579 + pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv)); 580 580 581 581 priv->slave = slave; 582 582 rcar_i2c_write(priv, ICSAR, slave->addr); ··· 598 598 599 599 priv->slave = NULL; 600 600 601 - pm_runtime_allow(rcar_i2c_priv_to_dev(priv)); 601 + pm_runtime_put(rcar_i2c_priv_to_dev(priv)); 602 602 603 603 return 0; 604 604 }
+1 -1
drivers/i2c/busses/i2c-rk3x.c
··· 908 908 &i2c->scl_fall_ns)) 909 909 i2c->scl_fall_ns = 300; 910 910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", 911 - &i2c->scl_fall_ns)) 911 + &i2c->sda_fall_ns)) 912 912 i2c->sda_fall_ns = i2c->scl_fall_ns; 913 913 914 914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+1 -1
drivers/i2c/busses/i2c-st.c
··· 822 822 823 823 adap = &i2c_dev->adap; 824 824 i2c_set_adapdata(adap, i2c_dev); 825 - snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); 825 + snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start); 826 826 adap->owner = THIS_MODULE; 827 827 adap->timeout = 2 * HZ; 828 828 adap->retries = 0;
+1
drivers/input/joystick/db9.c
··· 592 592 return; 593 593 } 594 594 595 + memset(&db9_parport_cb, 0, sizeof(db9_parport_cb)); 595 596 db9_parport_cb.flags = PARPORT_FLAG_EXCL; 596 597 597 598 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx);
+1
drivers/input/joystick/gamecon.c
··· 951 951 pads = gc_cfg[port_idx].args + 1; 952 952 n_pads = gc_cfg[port_idx].nargs - 1; 953 953 954 + memset(&gc_parport_cb, 0, sizeof(gc_parport_cb)); 954 955 gc_parport_cb.flags = PARPORT_FLAG_EXCL; 955 956 956 957 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb,
+1
drivers/input/joystick/turbografx.c
··· 181 181 n_buttons = tgfx_cfg[port_idx].args + 1; 182 182 n_devs = tgfx_cfg[port_idx].nargs - 1; 183 183 184 + memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb)); 184 185 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; 185 186 186 187 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb,
+1
drivers/input/joystick/walkera0701.c
··· 218 218 219 219 w->parport = pp; 220 220 221 + memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb)); 221 222 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; 222 223 walkera0701_parport_cb.irq_func = walkera0701_irq_handler; 223 224 walkera0701_parport_cb.private = w;
+1 -2
drivers/input/misc/arizona-haptics.c
··· 97 97 98 98 ret = regmap_update_bits(arizona->regmap, 99 99 ARIZONA_HAPTICS_CONTROL_1, 100 - ARIZONA_HAP_CTRL_MASK, 101 - 1 << ARIZONA_HAP_CTRL_SHIFT); 100 + ARIZONA_HAP_CTRL_MASK, 0); 102 101 if (ret != 0) { 103 102 dev_err(arizona->dev, "Failed to stop haptics: %d\n", 104 103 ret);
+3
drivers/input/mouse/elan_i2c_core.c
··· 41 41 42 42 #define DRIVER_NAME "elan_i2c" 43 43 #define ELAN_DRIVER_VERSION "1.6.1" 44 + #define ELAN_VENDOR_ID 0x04f3 44 45 #define ETP_MAX_PRESSURE 255 45 46 #define ETP_FWIDTH_REDUCE 90 46 47 #define ETP_FINGER_WIDTH 15 ··· 915 914 916 915 input->name = "Elan Touchpad"; 917 916 input->id.bustype = BUS_I2C; 917 + input->id.vendor = ELAN_VENDOR_ID; 918 + input->id.product = data->product_id; 918 919 input_set_drvdata(input, data); 919 920 920 921 error = input_mt_init_slots(input, ETP_MAX_FINGERS,
+1
drivers/input/serio/parkbd.c
··· 145 145 { 146 146 struct pardev_cb parkbd_parport_cb; 147 147 148 + memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb)); 148 149 parkbd_parport_cb.irq_func = parkbd_interrupt; 149 150 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; 150 151
+9
drivers/input/tablet/aiptek.c
··· 1819 1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); 1820 1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); 1821 1821 1822 + /* Verify that a device really has an endpoint */ 1823 + if (intf->altsetting[0].desc.bNumEndpoints < 1) { 1824 + dev_err(&intf->dev, 1825 + "interface has %d endpoints, but must have minimum 1\n", 1826 + intf->altsetting[0].desc.bNumEndpoints); 1827 + err = -EINVAL; 1828 + goto fail3; 1829 + } 1822 1830 endpoint = &intf->altsetting[0].endpoint[0].desc; 1823 1831 1824 1832 /* Go set up our URB, which is called when the tablet receives ··· 1869 1861 if (i == ARRAY_SIZE(speeds)) { 1870 1862 dev_info(&intf->dev, 1871 1863 "Aiptek tried all speeds, no sane response\n"); 1864 + err = -EINVAL; 1872 1865 goto fail3; 1873 1866 } 1874 1867
+34
drivers/input/touchscreen/atmel_mxt_ts.c
··· 2487 2487 { } 2488 2488 }; 2489 2489 2490 + static unsigned int chromebook_tp_buttons[] = { 2491 + KEY_RESERVED, 2492 + KEY_RESERVED, 2493 + KEY_RESERVED, 2494 + KEY_RESERVED, 2495 + KEY_RESERVED, 2496 + BTN_LEFT 2497 + }; 2498 + 2499 + static struct mxt_acpi_platform_data chromebook_platform_data[] = { 2500 + { 2501 + /* Touchpad */ 2502 + .hid = "ATML0000", 2503 + .pdata = { 2504 + .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons), 2505 + .t19_keymap = chromebook_tp_buttons, 2506 + }, 2507 + }, 2508 + { 2509 + /* Touchscreen */ 2510 + .hid = "ATML0001", 2511 + }, 2512 + { } 2513 + }; 2514 + 2490 2515 static const struct dmi_system_id mxt_dmi_table[] = { 2491 2516 { 2492 2517 /* 2015 Google Pixel */ ··· 2521 2496 DMI_MATCH(DMI_PRODUCT_NAME, "Samus"), 2522 2497 }, 2523 2498 .driver_data = samus_platform_data, 2499 + }, 2500 + { 2501 + /* Other Google Chromebooks */ 2502 + .ident = "Chromebook", 2503 + .matches = { 2504 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 2505 + }, 2506 + .driver_data = chromebook_platform_data, 2524 2507 }, 2525 2508 { } 2526 2509 }; ··· 2734 2701 { "qt602240_ts", 0 }, 2735 2702 { "atmel_mxt_ts", 0 }, 2736 2703 { "atmel_mxt_tp", 0 }, 2704 + { "maxtouch", 0 }, 2737 2705 { "mXT224", 0 }, 2738 2706 { } 2739 2707 };
+12 -9
drivers/input/touchscreen/elants_i2c.c
··· 1316 1316 1317 1317 disable_irq(client->irq); 1318 1318 1319 - if (device_may_wakeup(dev) || ts->keep_power_in_suspend) { 1319 + if (device_may_wakeup(dev)) { 1320 + /* 1321 + * The device will automatically enter idle mode 1322 + * that has reduced power consumption. 1323 + */ 1324 + ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0); 1325 + } else if (ts->keep_power_in_suspend) { 1320 1326 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1321 1327 error = elants_i2c_send(client, set_sleep_cmd, 1322 1328 sizeof(set_sleep_cmd)); ··· 1332 1326 dev_err(&client->dev, 1333 1327 "suspend command failed: %d\n", error); 1334 1328 } 1335 - 1336 - if (device_may_wakeup(dev)) 1337 - ts->wake_irq_enabled = 1338 - (enable_irq_wake(client->irq) == 0); 1339 1329 } else { 1340 1330 elants_i2c_power_off(ts); 1341 1331 } ··· 1347 1345 int retry_cnt; 1348 1346 int error; 1349 1347 1350 - if (device_may_wakeup(dev) && ts->wake_irq_enabled) 1351 - disable_irq_wake(client->irq); 1352 - 1353 - if (ts->keep_power_in_suspend) { 1348 + if (device_may_wakeup(dev)) { 1349 + if (ts->wake_irq_enabled) 1350 + disable_irq_wake(client->irq); 1351 + elants_i2c_sw_reset(client); 1352 + } else if (ts->keep_power_in_suspend) { 1354 1353 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1355 1354 error = elants_i2c_send(client, set_active_cmd, 1356 1355 sizeof(set_active_cmd));
+18 -2
drivers/iommu/amd_iommu_v2.c
··· 494 494 } 495 495 } 496 496 497 + static bool access_error(struct vm_area_struct *vma, struct fault *fault) 498 + { 499 + unsigned long requested = 0; 500 + 501 + if (fault->flags & PPR_FAULT_EXEC) 502 + requested |= VM_EXEC; 503 + 504 + if (fault->flags & PPR_FAULT_READ) 505 + requested |= VM_READ; 506 + 507 + if (fault->flags & PPR_FAULT_WRITE) 508 + requested |= VM_WRITE; 509 + 510 + return (requested & ~vma->vm_flags) != 0; 511 + } 512 + 497 513 static void do_fault(struct work_struct *work) 498 514 { 499 515 struct fault *fault = container_of(work, struct fault, work); ··· 532 516 goto out; 533 517 } 534 518 535 - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { 536 - /* handle_mm_fault would BUG_ON() */ 519 + /* Check if we have the right permissions on the vma */ 520 + if (access_error(vma, fault)) { 537 521 up_read(&mm->mmap_sem); 538 522 handle_fault_error(fault); 539 523 goto out;
+2 -2
drivers/iommu/intel-iommu.c
··· 2159 2159 sg_res = aligned_nrpages(sg->offset, sg->length); 2160 2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2161 2161 sg->dma_length = sg->length; 2162 - pteval = (sg_phys(sg) & PAGE_MASK) | prot; 2162 + pteval = page_to_phys(sg_page(sg)) | prot; 2163 2163 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2164 2164 } 2165 2165 ··· 3704 3704 3705 3705 for_each_sg(sglist, sg, nelems, i) { 3706 3706 BUG_ON(!sg_page(sg)); 3707 - sg->dma_address = sg_phys(sg); 3707 + sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; 3708 3708 sg->dma_length = sg->length; 3709 3709 } 3710 3710 return nelems;
+20
drivers/iommu/intel-svm.c
··· 484 484 }; 485 485 486 486 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) 487 + 488 + static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) 489 + { 490 + unsigned long requested = 0; 491 + 492 + if (req->exe_req) 493 + requested |= VM_EXEC; 494 + 495 + if (req->rd_req) 496 + requested |= VM_READ; 497 + 498 + if (req->wr_req) 499 + requested |= VM_WRITE; 500 + 501 + return (requested & ~vma->vm_flags) != 0; 502 + } 503 + 487 504 static irqreturn_t prq_event_thread(int irq, void *d) 488 505 { 489 506 struct intel_iommu *iommu = d; ··· 554 537 down_read(&svm->mm->mmap_sem); 555 538 vma = find_extend_vma(svm->mm, address); 556 539 if (!vma || address < vma->vm_start) 540 + goto invalid; 541 + 542 + if (access_error(vma, req)) 557 543 goto invalid; 558 544 559 545 ret = handle_mm_fault(svm->mm, vma, address,
+1 -1
drivers/iommu/iommu.c
··· 1430 1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1431 1431 1432 1432 for_each_sg(sg, s, nents, i) { 1433 - phys_addr_t phys = sg_phys(s); 1433 + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; 1434 1434 1435 1435 /* 1436 1436 * We are mapping on IOMMU page boundaries, so offset within
+11 -12
drivers/isdn/gigaset/ser-gigaset.c
··· 67 67 struct sk_buff *skb = bcs->tx_skb; 68 68 int sent = -EOPNOTSUPP; 69 69 70 - if (!tty || !tty->driver || !skb) 71 - return -EINVAL; 70 + WARN_ON(!tty || !tty->ops || !skb); 72 71 73 72 if (!skb->len) { 74 73 dev_kfree_skb_any(skb); ··· 108 109 unsigned long flags; 109 110 int sent = 0; 110 111 111 - if (!tty || !tty->driver) 112 - return -EFAULT; 112 + WARN_ON(!tty || !tty->ops); 113 113 114 114 cb = cs->cmdbuf; 115 115 if (!cb) ··· 368 370 tasklet_kill(&cs->write_tasklet); 369 371 if (!cs->hw.ser) 370 372 return; 371 - dev_set_drvdata(&cs->hw.ser->dev.dev, NULL); 372 373 platform_device_unregister(&cs->hw.ser->dev); 373 - kfree(cs->hw.ser); 374 - cs->hw.ser = NULL; 375 374 } 376 375 377 376 static void gigaset_device_release(struct device *dev) 378 377 { 379 - struct platform_device *pdev = to_platform_device(dev); 378 + struct cardstate *cs = dev_get_drvdata(dev); 380 379 381 - /* adapted from platform_device_release() in drivers/base/platform.c */ 382 - kfree(dev->platform_data); 383 - kfree(pdev->resource); 380 + if (!cs) 381 + return; 382 + dev_set_drvdata(dev, NULL); 383 + kfree(cs->hw.ser); 384 + cs->hw.ser = NULL; 384 385 } 385 386 386 387 /* ··· 429 432 struct tty_struct *tty = cs->hw.ser->tty; 430 433 unsigned int set, clear; 431 434 432 - if (!tty || !tty->driver || !tty->ops->tiocmset) 435 + WARN_ON(!tty || !tty->ops); 436 + /* tiocmset is an optional tty driver method */ 437 + if (!tty->ops->tiocmset) 433 438 return -EINVAL; 434 439 set = new_state & ~old_state; 435 440 clear = old_state & ~new_state;
+3 -4
drivers/isdn/hardware/mISDN/mISDNipac.c
··· 1170 1170 1171 1171 if (ipac->type & IPAC_TYPE_IPACX) { 1172 1172 ista = ReadIPAC(ipac, ISACX_ISTA); 1173 - while (ista && cnt--) { 1173 + while (ista && --cnt) { 1174 1174 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1175 1175 if (ista & IPACX__ICA) 1176 1176 ipac_irq(&ipac->hscx[0], ista); ··· 1182 1182 } 1183 1183 } else if (ipac->type & IPAC_TYPE_IPAC) { 1184 1184 ista = ReadIPAC(ipac, IPAC_ISTA); 1185 - while (ista && cnt--) { 1185 + while (ista && --cnt) { 1186 1186 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1187 1187 if (ista & (IPAC__ICD | IPAC__EXD)) { 1188 1188 istad = ReadISAC(isac, ISAC_ISTA); ··· 1200 1200 ista = ReadIPAC(ipac, IPAC_ISTA); 1201 1201 } 1202 1202 } else if (ipac->type & IPAC_TYPE_HSCX) { 1203 - while (cnt) { 1203 + while (--cnt) { 1204 1204 ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off); 1205 1205 pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista); 1206 1206 if (ista) ··· 1211 1211 mISDNisac_irq(isac, istad); 1212 1212 if (0 == (ista | istad)) 1213 1213 break; 1214 - cnt--; 1215 1214 } 1216 1215 } 1217 1216 if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
+15 -7
drivers/md/md.c
··· 314 314 */ 315 315 void mddev_suspend(struct mddev *mddev) 316 316 { 317 - BUG_ON(mddev->suspended); 318 - mddev->suspended = 1; 317 + if (mddev->suspended++) 318 + return; 319 319 synchronize_rcu(); 320 320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 321 321 mddev->pers->quiesce(mddev, 1); ··· 326 326 327 327 void mddev_resume(struct mddev *mddev) 328 328 { 329 - mddev->suspended = 0; 329 + if (--mddev->suspended) 330 + return; 330 331 wake_up(&mddev->sb_wait); 331 332 mddev->pers->quiesce(mddev, 0); 332 333 ··· 1653 1652 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1654 1653 if (mddev->recovery_cp == MaxSector) 1655 1654 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 1656 - rdev->raid_disk = mddev->raid_disks; 1655 + rdev->raid_disk = 0; 1657 1656 break; 1658 1657 default: 1659 1658 rdev->saved_raid_disk = role; ··· 2774 2773 /* Activating a spare .. or possibly reactivating 2775 2774 * if we ever get bitmaps working here. 2776 2775 */ 2776 + int err; 2777 2777 2778 2778 if (rdev->raid_disk != -1) 2779 2779 return -EBUSY; ··· 2796 2794 rdev->saved_raid_disk = -1; 2797 2795 clear_bit(In_sync, &rdev->flags); 2798 2796 clear_bit(Bitmap_sync, &rdev->flags); 2799 - remove_and_add_spares(rdev->mddev, rdev); 2800 - if (rdev->raid_disk == -1) 2801 - return -EBUSY; 2797 + err = rdev->mddev->pers-> 2798 + hot_add_disk(rdev->mddev, rdev); 2799 + if (err) { 2800 + rdev->raid_disk = -1; 2801 + return err; 2802 + } else 2803 + sysfs_notify_dirent_safe(rdev->sysfs_state); 2804 + if (sysfs_link_rdev(rdev->mddev, rdev)) 2805 + /* failure here is OK */; 2802 2806 /* don't wakeup anyone, leave that to userspace. */ 2803 2807 } else { 2804 2808 if (slot >= rdev->mddev->raid_disks &&
+6 -2
drivers/md/md.h
··· 566 566 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 567 567 { 568 568 char nm[20]; 569 - if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 569 + if (!test_bit(Replacement, &rdev->flags) && 570 + !test_bit(Journal, &rdev->flags) && 571 + mddev->kobj.sd) { 570 572 sprintf(nm, "rd%d", rdev->raid_disk); 571 573 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 572 574 } else ··· 578 576 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 579 577 { 580 578 char nm[20]; 581 - if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 579 + if (!test_bit(Replacement, &rdev->flags) && 580 + !test_bit(Journal, &rdev->flags) && 581 + mddev->kobj.sd) { 582 582 sprintf(nm, "rd%d", rdev->raid_disk); 583 583 sysfs_remove_link(&mddev->kobj, nm); 584 584 }
+3 -1
drivers/md/raid10.c
··· 1946 1946 1947 1947 first = i; 1948 1948 fbio = r10_bio->devs[i].bio; 1949 + fbio->bi_iter.bi_size = r10_bio->sectors << 9; 1950 + fbio->bi_iter.bi_idx = 0; 1949 1951 1950 1952 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 1951 1953 /* now find blocks with errors */ ··· 1991 1989 bio_reset(tbio); 1992 1990 1993 1991 tbio->bi_vcnt = vcnt; 1994 - tbio->bi_iter.bi_size = r10_bio->sectors << 9; 1992 + tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 1995 1993 tbio->bi_rw = WRITE; 1996 1994 tbio->bi_private = r10_bio; 1997 1995 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
+2 -2
drivers/media/pci/ivtv/ivtv-driver.c
··· 805 805 { 806 806 int i; 807 807 808 - for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++) 808 + for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++) 809 809 if (itv->card->video_inputs[i].video_type == 0) 810 810 break; 811 811 itv->nof_inputs = i; 812 - for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++) 812 + for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++) 813 813 if (itv->card->audio_inputs[i].audio_type == 0) 814 814 break; 815 815 itv->nof_audio_inputs = i;
+1 -1
drivers/media/usb/airspy/airspy.c
··· 134 134 int urbs_submitted; 135 135 136 136 /* USB control message buffer */ 137 - #define BUF_SIZE 24 137 + #define BUF_SIZE 128 138 138 u8 buf[BUF_SIZE]; 139 139 140 140 /* Current configuration */
+12 -1
drivers/media/usb/hackrf/hackrf.c
··· 24 24 #include <media/videobuf2-v4l2.h> 25 25 #include <media/videobuf2-vmalloc.h> 26 26 27 + /* 28 + * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too 29 + * strong signal or transmitting to bad antenna. 30 + * Set RF gain control to 'grabbed' state by default for sure. 31 + */ 32 + static bool hackrf_enable_rf_gain_ctrl; 33 + module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644); 34 + MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)"); 35 + 27 36 /* HackRF USB API commands (from HackRF Library) */ 28 37 enum { 29 38 CMD_SET_TRANSCEIVER_MODE = 0x01, ··· 1460 1451 dev_err(dev->dev, "Could not initialize controls\n"); 1461 1452 goto err_v4l2_ctrl_handler_free_rx; 1462 1453 } 1454 + v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl); 1463 1455 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); 1464 1456 1465 1457 /* Register controls for transmitter */ ··· 1481 1471 dev_err(dev->dev, "Could not initialize controls\n"); 1482 1472 goto err_v4l2_ctrl_handler_free_tx; 1483 1473 } 1474 + v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl); 1484 1475 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); 1485 1476 1486 1477 /* Register the v4l2_device structure */ ··· 1541 1530 err_kfree: 1542 1531 kfree(dev); 1543 1532 err: 1544 - dev_dbg(dev->dev, "failed=%d\n", ret); 1533 + dev_dbg(&intf->dev, "failed=%d\n", ret); 1545 1534 return ret; 1546 1535 } 1547 1536
+10 -2
drivers/mtd/ofpart.c
··· 46 46 47 47 ofpart_node = of_get_child_by_name(mtd_node, "partitions"); 48 48 if (!ofpart_node) { 49 - pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", 50 - master->name, mtd_node->full_name); 49 + /* 50 + * We might get here even when ofpart isn't used at all (e.g., 51 + * when using another parser), so don't be louder than 52 + * KERN_DEBUG 53 + */ 54 + pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", 55 + master->name, mtd_node->full_name); 51 56 ofpart_node = mtd_node; 52 57 dedicated = false; 58 + } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) { 59 + /* The 'partitions' subnode might be used by another parser */ 60 + return 0; 53 61 } 54 62 55 63 /* First count the subnodes */
+2 -2
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 1849 1849 usleep_range(10, 15); 1850 1850 1851 1851 /* Poll Until Poll Condition */ 1852 - while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1852 + while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1853 1853 usleep_range(500, 600); 1854 1854 1855 1855 if (!count) ··· 1873 1873 /* Poll Until Poll Condition */ 1874 1874 for (i = 0; i < pdata->tx_q_count; i++) { 1875 1875 count = 2000; 1876 - while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, 1876 + while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, 1877 1877 MTL_Q_TQOMR, FTQ)) 1878 1878 usleep_range(500, 600); 1879 1879
+22 -16
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 289 289 struct sk_buff *skb) 290 290 { 291 291 struct device *dev = ndev_to_dev(tx_ring->ndev); 292 + struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); 292 293 struct xgene_enet_raw_desc *raw_desc; 293 294 __le64 *exp_desc = NULL, *exp_bufs = NULL; 294 295 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; ··· 420 419 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 421 420 SET_VAL(USERINFO, tx_ring->tail)); 422 421 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 422 + pdata->tx_level += count; 423 423 tx_ring->tail = tail; 424 424 425 425 return count; ··· 431 429 { 432 430 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 433 431 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; 434 - struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; 435 - u32 tx_level, cq_level; 432 + u32 tx_level = pdata->tx_level; 436 433 int count; 437 434 438 - tx_level = pdata->ring_ops->len(tx_ring); 439 - cq_level = pdata->ring_ops->len(cp_ring); 440 - if (unlikely(tx_level > pdata->tx_qcnt_hi || 441 - cq_level > pdata->cp_qcnt_hi)) { 435 + if (tx_level < pdata->txc_level) 436 + tx_level += ((typeof(pdata->tx_level))~0U); 437 + 438 + if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { 442 439 netif_stop_queue(ndev); 443 440 return NETDEV_TX_BUSY; 444 441 } ··· 540 539 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 541 540 u16 head = ring->head; 542 541 u16 slots = ring->slots - 1; 543 - int ret, count = 0, processed = 0; 542 + int ret, desc_count, count = 0, processed = 0; 543 + bool is_completion; 544 544 545 545 do { 546 546 raw_desc = &ring->raw_desc[head]; 547 + desc_count = 0; 548 + is_completion = false; 547 549 exp_desc = NULL; 548 550 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 549 551 break; ··· 563 559 } 564 560 dma_rmb(); 565 561 count++; 562 + desc_count++; 566 563 } 567 - if (is_rx_desc(raw_desc)) 564 + if (is_rx_desc(raw_desc)) { 568 565 ret = xgene_enet_rx_frame(ring, raw_desc); 569 - else 566 + } else { 570 567 ret = xgene_enet_tx_completion(ring, raw_desc); 568 + is_completion = true; 569 + } 571 570 xgene_enet_mark_desc_slot_empty(raw_desc); 572 571 if (exp_desc) 573 572 xgene_enet_mark_desc_slot_empty(exp_desc); 574 573 575 574 head = (head + 1) & slots; 576 575 count++; 576 + desc_count++; 577 577 processed++; 578 + if (is_completion) 579 + pdata->txc_level += desc_count; 578 580 579 581 if (ret) 580 582 break; ··· 590 580 pdata->ring_ops->wr_cmd(ring, -count); 591 581 ring->head = head; 592 582 593 - if (netif_queue_stopped(ring->ndev)) { 594 - if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) 595 - netif_wake_queue(ring->ndev); 596 - } 583 + if (netif_queue_stopped(ring->ndev)) 584 + netif_start_queue(ring->ndev); 597 585 } 598 586 599 587 return processed; ··· 1041 1033 pdata->tx_ring->cp_ring = cp_ring; 1042 1034 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1043 1035 1044 - pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; 1045 - pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; 1046 - pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; 1036 + pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; 1047 1037 1048 1038 return 0; 1049 1039
+2 -2
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
··· 155 155 enum xgene_enet_id enet_id; 156 156 struct xgene_enet_desc_ring *tx_ring; 157 157 struct xgene_enet_desc_ring *rx_ring; 158 + u16 tx_level; 159 + u16 txc_level; 158 160 char *dev_name; 159 161 u32 rx_buff_cnt; 160 162 u32 tx_qcnt_hi; 161 - u32 cp_qcnt_hi; 162 - u32 cp_qcnt_low; 163 163 u32 rx_irq; 164 164 u32 txc_irq; 165 165 u8 cq_cnt;
+3 -4
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 1016 1016 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1017 1017 8 * 4; 1018 1018 1019 - ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1020 - &ring_header->dma); 1019 + ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, 1020 + &ring_header->dma, GFP_KERNEL); 1021 1021 if (unlikely(!ring_header->desc)) { 1022 - dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); 1022 + dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); 1023 1023 goto err_nomem; 1024 1024 } 1025 - memset(ring_header->desc, 0, ring_header->size); 1026 1025 /* init TPD ring */ 1027 1026 1028 1027 tpd_ring[0].dma = roundup(ring_header->dma, 8);
+1
drivers/net/ethernet/aurora/Kconfig
··· 13 13 14 14 config AURORA_NB8800 15 15 tristate "Aurora AU-NB8800 support" 16 + depends on HAS_DMA 16 17 select PHYLIB 17 18 help 18 19 Support for the AU-NB8800 gigabit Ethernet controller.
+32 -14
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 2693 2693 req.ver_upd = DRV_VER_UPD; 2694 2694 2695 2695 if (BNXT_PF(bp)) { 2696 - unsigned long vf_req_snif_bmap[4]; 2696 + DECLARE_BITMAP(vf_req_snif_bmap, 256); 2697 2697 u32 *data = (u32 *)vf_req_snif_bmap; 2698 2698 2699 - memset(vf_req_snif_bmap, 0, 32); 2699 + memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); 2700 2700 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 2701 2701 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 2702 2702 2703 - for (i = 0; i < 8; i++) { 2704 - req.vf_req_fwd[i] = cpu_to_le32(*data); 2705 - data++; 2706 - } 2703 + for (i = 0; i < 8; i++) 2704 + req.vf_req_fwd[i] = cpu_to_le32(data[i]); 2705 + 2707 2706 req.enables |= 2708 2707 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 2709 2708 } ··· 4602 4603 bp->nge_port_cnt = 1; 4603 4604 } 4604 4605 4605 - bp->state = BNXT_STATE_OPEN; 4606 + set_bit(BNXT_STATE_OPEN, &bp->state); 4606 4607 bnxt_enable_int(bp); 4607 4608 /* Enable TX queues */ 4608 4609 bnxt_tx_enable(bp); ··· 4678 4679 /* Change device state to avoid TX queue wake up's */ 4679 4680 bnxt_tx_disable(bp); 4680 4681 4681 - bp->state = BNXT_STATE_CLOSED; 4682 - cancel_work_sync(&bp->sp_task); 4682 + clear_bit(BNXT_STATE_OPEN, &bp->state); 4683 + smp_mb__after_atomic(); 4684 + while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) 4685 + msleep(20); 4683 4686 4684 4687 /* Flush rings before disabling interrupts */ 4685 4688 bnxt_shutdown_nic(bp, irq_re_init); ··· 5031 5030 static void bnxt_reset_task(struct bnxt *bp) 5032 5031 { 5033 5032 bnxt_dbg_dump_states(bp); 5034 - if (netif_running(bp->dev)) 5035 - bnxt_tx_disable(bp); /* prevent tx timout again */ 5033 + if (netif_running(bp->dev)) { 5034 + bnxt_close_nic(bp, false, false); 5035 + bnxt_open_nic(bp, false, false); 5036 + } 5036 5037 } 5037 5038 5038 5039 static void bnxt_tx_timeout(struct net_device *dev) ··· 5084 5081 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 5085 5082 int rc; 5086 5083 5087 - if (bp->state != BNXT_STATE_OPEN) 5084 + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5085 + smp_mb__after_atomic(); 5086 + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 5087 + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5088 5088 return; 5089 + } 5089 5090 5090 5091 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 5091 5092 bnxt_cfg_rx_mode(bp); ··· 5113 5106 bnxt_hwrm_tunnel_dst_port_free( 5114 5107 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5115 5108 } 5116 - if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 5109 + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { 5110 + /* bnxt_reset_task() calls bnxt_close_nic() which waits 5111 + * for BNXT_STATE_IN_SP_TASK to clear. 5112 + */ 5113 + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5114 + rtnl_lock(); 5117 5115 bnxt_reset_task(bp); 5116 + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5117 + rtnl_unlock(); 5118 + } 5119 + 5120 + smp_mb__before_atomic(); 5121 + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5118 5122 } 5119 5123 5120 5124 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) ··· 5204 5186 bp->timer.function = bnxt_timer; 5205 5187 bp->current_interval = BNXT_TIMER_INTERVAL; 5206 5188 5207 - bp->state = BNXT_STATE_CLOSED; 5189 + clear_bit(BNXT_STATE_OPEN, &bp->state); 5208 5190 5209 5191 return 0; 5210 5192
+3 -3
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 925 925 926 926 struct timer_list timer; 927 927 928 - int state; 929 - #define BNXT_STATE_CLOSED 0 930 - #define BNXT_STATE_OPEN 1 928 + unsigned long state; 929 + #define BNXT_STATE_OPEN 0 930 + #define BNXT_STATE_IN_SP_TASK 1 931 931 932 932 struct bnxt_irq *irq_tbl; 933 933 u8 mac_addr[ETH_ALEN];
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
··· 21 21 #ifdef CONFIG_BNXT_SRIOV 22 22 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 23 23 { 24 - if (bp->state != BNXT_STATE_OPEN) { 24 + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 25 25 netdev_err(bp->dev, "vf ndo called though PF is down\n"); 26 26 return -EINVAL; 27 27 }
+18 -21
drivers/net/ethernet/cavium/thunder/nic_main.c
··· 37 37 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 38 38 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 39 39 u8 vf_lmac_map[MAX_LMAC]; 40 - u8 lmac_cnt; 41 40 struct delayed_work dwork; 42 41 struct workqueue_struct *check_link; 43 42 u8 link[MAX_LMAC]; ··· 279 280 u64 lmac_credit; 280 281 281 282 nic->num_vf_en = 0; 282 - nic->lmac_cnt = 0; 283 283 284 284 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 285 285 if (!(bgx_map & (1 << bgx))) ··· 288 290 nic->vf_lmac_map[next_bgx_lmac++] = 289 291 NIC_SET_VF_LMAC_MAP(bgx, lmac); 290 292 nic->num_vf_en += lmac_cnt; 291 - nic->lmac_cnt += lmac_cnt; 292 293 293 294 /* Program LMAC credits */ 294 295 lmac_credit = (1ull << 1); /* channel credit enable */ ··· 615 618 return 0; 616 619 } 617 620 621 + static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) 622 + { 623 + int bgx, lmac; 624 + 625 + nic->vf_enabled[vf] = enable; 626 + 627 + if (vf >= nic->num_vf_en) 628 + return; 629 + 630 + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 631 + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 632 + 633 + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); 634 + } 635 + 618 636 /* Interrupt handler to handle mailbox messages from VFs */ 619 637 static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 620 638 { ··· 729 717 break; 730 718 case NIC_MBOX_MSG_CFG_DONE: 731 719 /* Last message of VF config msg sequence */ 732 - nic->vf_enabled[vf] = true; 733 - if (vf >= nic->lmac_cnt) 734 - goto unlock; 735 - 736 - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 737 - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 738 - 739 - bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); 720 + nic_enable_vf(nic, vf, true); 740 721 goto unlock; 741 722 case NIC_MBOX_MSG_SHUTDOWN: 742 723 /* First msg in VF teardown sequence */ 743 - nic->vf_enabled[vf] = false; 744 724 if (vf >= nic->num_vf_en) 745 725 nic->sqs_used[vf - nic->num_vf_en] = false; 746 726 nic->pqs_vf[vf] = 0; 747 - 748 - if (vf >= nic->lmac_cnt) 749 - break; 750 - 751 - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 752 - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 753 - 754 - bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); 727 + nic_enable_vf(nic, vf, false); 755 728 break; 756 729 case NIC_MBOX_MSG_ALLOC_SQS: 757 730 nic_alloc_sqs(nic, &mbx.sqs_alloc); ··· 955 958 956 959 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 957 960 958 - for (vf = 0; vf < nic->lmac_cnt; vf++) { 961 + for (vf = 0; vf < nic->num_vf_en; vf++) { 959 962 /* Poll only if VF is UP */ 960 963 if (!nic->vf_enabled[vf]) 961 964 continue;
+8 -20
drivers/net/ethernet/ezchip/nps_enet.c
··· 48 48 *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 49 49 else { /* !dst_is_aligned */ 50 50 for (i = 0; i < len; i++, reg++) { 51 - u32 buf = 52 - nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 53 - 54 - /* to accommodate word-unaligned address of "reg" 55 - * we have to do memcpy_toio() instead of simple "=". 56 - */ 57 - memcpy_toio((void __iomem *)reg, &buf, sizeof(buf)); 51 + u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 52 + put_unaligned(buf, reg); 58 53 } 59 54 } 60 55 61 56 /* copy last bytes (if any) */ 62 57 if (last) { 63 58 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 64 - 65 - memcpy_toio((void __iomem *)reg, &buf, last); 59 + memcpy((u8*)reg, &buf, last); 66 60 } 67 61 } 68 62 ··· 361 367 struct nps_enet_tx_ctl tx_ctrl; 362 368 short length = skb->len; 363 369 u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); 364 - u32 *src = (u32 *)virt_to_phys(skb->data); 370 + u32 *src = (void *)skb->data; 365 371 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); 366 372 367 373 tx_ctrl.value = 0; ··· 369 375 if (src_is_aligned) 370 376 for (i = 0; i < len; i++, src++) 371 377 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); 372 - else { /* !src_is_aligned */ 373 - for (i = 0; i < len; i++, src++) { 374 - u32 buf; 378 + else /* !src_is_aligned */ 379 + for (i = 0; i < len; i++, src++) 380 + nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, 381 + get_unaligned(src)); 375 382 376 - /* to accommodate word-unaligned address of "src" 377 - * we have to do memcpy_fromio() instead of simple "=" 378 - */ 379 - memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf)); 380 - nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf); 381 - } 382 - } 383 383 /* Write the length of the Frame */ 384 384 tx_ctrl.nt = length; 385 385
+1 -1
drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
··· 552 552 cbd_t __iomem *prev_bd; 553 553 cbd_t __iomem *last_tx_bd; 554 554 555 - last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); 555 + last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); 556 556 557 557 /* get the current bd held in TBPTR and scan back from this point */ 558 558 recheck_bd = curr_tbptr = (cbd_t __iomem *)
+1 -1
drivers/net/ethernet/freescale/fsl_pq_mdio.c
··· 464 464 * address). Print error message but continue anyway. 465 465 */ 466 466 if ((void *)tbipa > priv->map + resource_size(&res) - 4) 467 - dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", 467 + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", 468 468 ((void *)tbipa - priv->map) + 4); 469 469 470 470 iowrite32be(be32_to_cpup(prop), tbipa);
+5 -3
drivers/net/ethernet/freescale/gianfar.c
··· 894 894 FSL_GIANFAR_DEV_HAS_VLAN | 895 895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 896 896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 897 - FSL_GIANFAR_DEV_HAS_TIMER; 897 + FSL_GIANFAR_DEV_HAS_TIMER | 898 + FSL_GIANFAR_DEV_HAS_RX_FILER; 898 899 899 900 err = of_property_read_string(np, "phy-connection-type", &ctype); 900 901 ··· 1397 1396 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1398 1397 } 1399 1398 1400 - /* always enable rx filer */ 1401 - priv->rx_filer_enable = 1; 1399 + /* Always enable rx filer if available */ 1400 + priv->rx_filer_enable = 1401 + (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; 1402 1402 /* Enable most messages by default */ 1403 1403 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1404 1404 /* use pritority h/w tx queue scheduling for single queue devices */
+1
drivers/net/ethernet/freescale/gianfar.h
··· 923 923 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 924 924 #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 925 925 #define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 926 + #define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000 926 927 927 928 #if (MAXGROUPS == 2) 928 929 #define DEFAULT_MAPPING 0xAA
+14 -35
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
··· 1259 1259 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1260 1260 MAC_IS_BROADCAST(mac_entry->addr) || 1261 1261 MAC_IS_MULTICAST(mac_entry->addr)) { 1262 - dev_err(dsaf_dev->dev, 1263 - "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1264 - dsaf_dev->ae_dev.name, mac_entry->addr[0], 1265 - mac_entry->addr[1], mac_entry->addr[2], 1266 - mac_entry->addr[3], mac_entry->addr[4], 1267 - mac_entry->addr[5]); 1262 + dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n", 1263 + dsaf_dev->ae_dev.name, mac_entry->addr); 1268 1264 return -EINVAL; 1269 1265 } 1270 1266 ··· 1327 1331 1328 1332 /* mac addr check */ 1329 1333 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1330 - dev_err(dsaf_dev->dev, 1331 - "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1332 - dsaf_dev->ae_dev.name, mac_entry->addr[0], 1333 - mac_entry->addr[1], mac_entry->addr[2], 1334 - mac_entry->addr[3], 1335 - mac_entry->addr[4], mac_entry->addr[5]); 1334 + dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n", 1335 + dsaf_dev->ae_dev.name, mac_entry->addr); 1336 1336 return -EINVAL; 1337 1337 } 1338 1338 ··· 1402 1410 1403 1411 /*chechk mac addr */ 1404 1412 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1405 - dev_err(dsaf_dev->dev, 1406 - "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1407 - mac_entry->addr[0], mac_entry->addr[1], 1408 - mac_entry->addr[2], mac_entry->addr[3], 1409 - mac_entry->addr[4], mac_entry->addr[5]); 1413 + dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n", 1414 + mac_entry->addr); 1410 1415 return -EINVAL; 1411 1416 } 1412 1417 ··· 1486 1497 1487 1498 /*check mac addr */ 1488 1499 if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { 1489 - dev_err(dsaf_dev->dev, 1490 - "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1491 - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 1500 + dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n", 1501 + addr); 1492 1502 return -EINVAL; 1493 1503 } 1494 1504 ··· 1551 1563 1552 1564 /*check mac addr */ 1553 1565 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1554 - dev_err(dsaf_dev->dev, 1555 - "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1556 - mac_entry->addr[0], mac_entry->addr[1], 1557 - mac_entry->addr[2], mac_entry->addr[3], 1558 - mac_entry->addr[4], mac_entry->addr[5]); 1566 + dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n", 1567 + mac_entry->addr); 1559 1568 return -EINVAL; 1560 1569 } 1561 1570 ··· 1629 1644 /* check macaddr */ 1630 1645 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1631 1646 MAC_IS_BROADCAST(mac_entry->addr)) { 1632 - dev_err(dsaf_dev->dev, 1633 - "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1634 - mac_entry->addr[0], mac_entry->addr[1], 1635 - mac_entry->addr[2], mac_entry->addr[3], 1636 - mac_entry->addr[4], mac_entry->addr[5]); 1647 + dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", 1648 + mac_entry->addr); 1637 1649 return -EINVAL; 1638 1650 } 1639 1651 ··· 1677 1695 /*check mac addr */ 1678 1696 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1679 1697 MAC_IS_BROADCAST(mac_entry->addr)) { 1680 - dev_err(dsaf_dev->dev, 1681 - "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1682 - mac_entry->addr[0], mac_entry->addr[1], 1683 - mac_entry->addr[2], mac_entry->addr[3], 1684 - mac_entry->addr[4], mac_entry->addr[5]); 1698 + dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", 1699 + mac_entry->addr); 1685 1700 return -EINVAL; 1686 1701 } 1687 1702
+6 -5
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
··· 898 898 #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 899 899 #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 900 900 901 - static inline void dsaf_write_reg(void *base, u32 reg, u32 value) 901 + static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) 902 902 { 903 903 u8 __iomem *reg_addr = ACCESS_ONCE(base); 904 904 ··· 908 908 #define dsaf_write_dev(a, reg, value) \ 909 909 dsaf_write_reg((a)->io_base, (reg), (value)) 910 910 911 - static inline u32 dsaf_read_reg(u8 *base, u32 reg) 911 + static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) 912 912 { 913 913 u8 __iomem *reg_addr = ACCESS_ONCE(base); 914 914 ··· 927 927 #define dsaf_set_bit(origin, shift, val) \ 928 928 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 929 929 930 - static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 931 - u32 val) 930 + static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, 931 + u32 shift, u32 val) 932 932 { 933 933 u32 origin = dsaf_read_reg(base, reg); 934 934 ··· 947 947 #define dsaf_get_bit(origin, shift) \ 948 948 dsaf_get_field((origin), (1ull << (shift)), (shift)) 949 949 950 - static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 950 + static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, 951 + u32 shift) 951 952 { 952 953 u32 origin; 953 954
-6
drivers/net/ethernet/intel/i40e/i40e_adminq.c
··· 567 567 goto init_adminq_exit; 568 568 } 569 569 570 - /* initialize locks */ 571 - mutex_init(&hw->aq.asq_mutex); 572 - mutex_init(&hw->aq.arq_mutex); 573 - 574 570 /* Set up register offsets */ 575 571 i40e_adminq_init_regs(hw); 576 572 ··· 659 663 660 664 i40e_shutdown_asq(hw); 661 665 i40e_shutdown_arq(hw); 662 - 663 - /* destroy the locks */ 664 666 665 667 if (hw->nvm_buff.va) 666 668 i40e_free_virt_mem(hw, &hw->nvm_buff);
+10 -1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 10295 10295 /* set up a default setting for link flow control */ 10296 10296 pf->hw.fc.requested_mode = I40E_FC_NONE; 10297 10297 10298 + /* set up the locks for the AQ, do this only once in probe 10299 + * and destroy them only once in remove 10300 + */ 10301 + mutex_init(&hw->aq.asq_mutex); 10302 + mutex_init(&hw->aq.arq_mutex); 10303 + 10298 10304 err = i40e_init_adminq(hw); 10299 10305 10300 10306 /* provide nvm, fw, api versions */ ··· 10703 10697 set_bit(__I40E_DOWN, &pf->state); 10704 10698 del_timer_sync(&pf->service_timer); 10705 10699 cancel_work_sync(&pf->service_task); 10706 - i40e_fdir_teardown(pf); 10707 10700 10708 10701 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 10709 10702 i40e_free_vfs(pf); ··· 10744 10739 dev_warn(&pdev->dev, 10745 10740 "Failed to destroy the Admin Queue resources: %d\n", 10746 10741 ret_code); 10742 + 10743 + /* destroy the locks only once, here */ 10744 + mutex_destroy(&hw->aq.arq_mutex); 10745 + mutex_destroy(&hw->aq.asq_mutex); 10747 10746 10748 10747 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 10749 10748 i40e_clear_interrupt_scheme(pf);
-6
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
··· 551 551 goto init_adminq_exit; 552 552 } 553 553 554 - /* initialize locks */ 555 - mutex_init(&hw->aq.asq_mutex); 556 - mutex_init(&hw->aq.arq_mutex); 557 - 558 554 /* Set up register offsets */ 559 555 i40e_adminq_init_regs(hw); 560 556 ··· 591 595 592 596 i40e_shutdown_asq(hw); 593 597 i40e_shutdown_arq(hw); 594 - 595 - /* destroy the locks */ 596 598 597 599 if (hw->nvm_buff.va) 598 600 i40e_free_virt_mem(hw, &hw->nvm_buff);
+10
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 2476 2476 hw->bus.device = PCI_SLOT(pdev->devfn); 2477 2477 hw->bus.func = PCI_FUNC(pdev->devfn); 2478 2478 2479 + /* set up the locks for the AQ, do this only once in probe 2480 + * and destroy them only once in remove 2481 + */ 2482 + mutex_init(&hw->aq.asq_mutex); 2483 + mutex_init(&hw->aq.arq_mutex); 2484 + 2479 2485 INIT_LIST_HEAD(&adapter->mac_filter_list); 2480 2486 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2481 2487 ··· 2634 2628 2635 2629 if (hw->aq.asq.count) 2636 2630 i40evf_shutdown_adminq(hw); 2631 + 2632 + /* destroy the locks only once, here */ 2633 + mutex_destroy(&hw->aq.arq_mutex); 2634 + mutex_destroy(&hw->aq.asq_mutex); 2637 2635 2638 2636 iounmap(hw->hw_addr); 2639 2637 pci_release_regions(pdev);
+3
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 7920 7920 */ 7921 7921 if (netif_running(dev)) 7922 7922 ixgbe_close(dev); 7923 + else 7924 + ixgbe_reset(adapter); 7925 + 7923 7926 ixgbe_clear_interrupt_scheme(adapter); 7924 7927 7925 7928 #ifdef CONFIG_IXGBE_DCB
+33 -19
drivers/net/ethernet/marvell/mvpp2.c
··· 3413 3413 } 3414 3414 3415 3415 /* Free all buffers from the pool */ 3416 - static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 3416 + static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 3417 + struct mvpp2_bm_pool *bm_pool) 3417 3418 { 3418 3419 int i; 3419 3420 3420 3421 for (i = 0; i < bm_pool->buf_num; i++) { 3422 + dma_addr_t buf_phys_addr; 3421 3423 u32 vaddr; 3422 3424 3423 3425 /* Get buffer virtual address (indirect access) */ 3424 - mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3426 + buf_phys_addr = mvpp2_read(priv, 3427 + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3425 3428 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); 3429 + 3430 + dma_unmap_single(dev, buf_phys_addr, 3431 + bm_pool->buf_size, DMA_FROM_DEVICE); 3432 + 3426 3433 if (!vaddr) 3427 3434 break; 3428 3435 dev_kfree_skb_any((struct sk_buff *)vaddr); ··· 3446 3439 { 3447 3440 u32 val; 3448 3441 3449 - mvpp2_bm_bufs_free(priv, bm_pool); 3442 + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); 3450 3443 if (bm_pool->buf_num) { 3451 3444 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); 3452 3445 return 0; ··· 3699 3692 MVPP2_BM_LONG_BUF_NUM : 3700 3693 MVPP2_BM_SHORT_BUF_NUM; 3701 3694 else 3702 - mvpp2_bm_bufs_free(port->priv, new_pool); 3695 + mvpp2_bm_bufs_free(port->dev->dev.parent, 3696 + port->priv, new_pool); 3703 3697 3704 3698 new_pool->pkt_size = pkt_size; 3705 3699 ··· 3764 3756 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3765 3757 3766 3758 /* Update BM pool with new buffer size */ 3767 - mvpp2_bm_bufs_free(port->priv, port_pool); 3759 + mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); 3768 3760 if (port_pool->buf_num) { 3769 3761 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); 3770 3762 return -EIO; ··· 4409 4401 4410 4402 mvpp2_txq_inc_get(txq_pcpu); 4411 4403 4412 - if (!skb) 4413 - continue; 4414 - 4415 4404 dma_unmap_single(port->dev->dev.parent, buf_phys_addr, 4416 4405 skb_headlen(skb), DMA_TO_DEVICE); 4406 + if (!skb) 4407 + continue; 4417 4408 dev_kfree_skb_any(skb); 4418 4409 } 4419 4410 } ··· 5099 5092 struct mvpp2_rx_queue *rxq) 5100 5093 { 5101 5094 struct net_device *dev = port->dev; 5102 - int rx_received, rx_filled, i; 5095 + int rx_received; 5096 + int rx_done = 0; 5103 5097 u32 rcvd_pkts = 0; 5104 5098 u32 rcvd_bytes = 0; 5105 5099 ··· 5109 5101 if (rx_todo > rx_received) 5110 5102 rx_todo = rx_received; 5111 5103 5112 - rx_filled = 0; 5113 - for (i = 0; i < rx_todo; i++) { 5104 + while (rx_done < rx_todo) { 5114 5105 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 5115 5106 struct mvpp2_bm_pool *bm_pool; 5116 5107 struct sk_buff *skb; 5108 + dma_addr_t phys_addr; 5117 5109 u32 bm, rx_status; 5118 5110 int pool, rx_bytes, err; 5119 5111 5120 - rx_filled++; 5112 + rx_done++; 5121 5113 rx_status = rx_desc->status; 5122 5114 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; 5115 + phys_addr = rx_desc->buf_phys_addr; 5123 5116 5124 5117 bm = mvpp2_bm_cookie_build(rx_desc); 5125 5118 pool = mvpp2_bm_cookie_pool_get(bm); ··· 5137 5128 * comprised by the RX descriptor. 5138 5129 */ 5139 5130 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5131 + err_drop_frame: 5140 5132 dev->stats.rx_errors++; 5141 5133 mvpp2_rx_error(port, rx_desc); 5134 + /* Return the buffer to the pool */ 5142 5135 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, 5143 5136 rx_desc->buf_cookie); 5144 5137 continue; 5145 5138 } 5146 5139 5147 5140 skb = (struct sk_buff *)rx_desc->buf_cookie; 5141 + 5142 + err = mvpp2_rx_refill(port, bm_pool, bm, 0); 5143 + if (err) { 5144 + netdev_err(port->dev, "failed to refill BM pools\n"); 5145 + goto err_drop_frame; 5146 + } 5147 + 5148 + dma_unmap_single(dev->dev.parent, phys_addr, 5149 + bm_pool->buf_size, DMA_FROM_DEVICE); 5148 5150 5149 5151 rcvd_pkts++; 5150 5152 rcvd_bytes += rx_bytes; ··· 5167 5147 mvpp2_rx_csum(port, rx_status, skb); 5168 5148 5169 5149 napi_gro_receive(&port->napi, skb); 5170 - 5171 - err = mvpp2_rx_refill(port, bm_pool, bm, 0); 5172 - if (err) { 5173 - netdev_err(port->dev, "failed to refill BM pools\n"); 5174 - rx_filled--; 5175 - } 5176 5150 } 5177 5151 5178 5152 if (rcvd_pkts) { ··· 5180 5166 5181 5167 /* Update Rx queue management counters */ 5182 5168 wmb(); 5183 - mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); 5169 + mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); 5184 5170 5185 5171 return rx_todo; 5186 5172 }
+3 -2
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 4306 4306 return -EOPNOTSUPP; 4307 4307 4308 4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4309 - ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); 4310 - if (ctrl->port <= 0) 4309 + err = mlx4_slave_convert_port(dev, slave, ctrl->port); 4310 + if (err <= 0) 4311 4311 return -EINVAL; 4312 + ctrl->port = err; 4312 4313 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4313 4314 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4314 4315 if (err) {
+3
drivers/net/ethernet/qlogic/qed/qed.h
··· 299 299 300 300 /* Flag indicating whether interrupts are enabled or not*/ 301 301 bool b_int_enabled; 302 + bool b_int_requested; 302 303 303 304 struct qed_mcp_info *mcp_info; 304 305 ··· 491 490 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, 492 491 u32 input_len, u8 *input_buf, 493 492 u32 max_size, u8 *unzip_buf); 493 + 494 + int qed_slowpath_irq_req(struct qed_hwfn *hwfn); 494 495 495 496 #define QED_ETH_INTERFACE_VERSION 300 496 497
+32 -21
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 1385 1385 return rc; 1386 1386 } 1387 1387 1388 - static u32 qed_hw_bar_size(struct qed_dev *cdev, 1389 - u8 bar_id) 1388 + static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 1389 + u8 bar_id) 1390 1390 { 1391 - u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); 1391 + u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE 1392 + : PGLUE_B_REG_PF_BAR1_SIZE); 1393 + u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 1392 1394 1393 - return size / cdev->num_hwfns; 1395 + /* Get the BAR size(in KB) from hardware given val */ 1396 + return 1 << (val + 15); 1394 1397 } 1395 1398 1396 1399 int qed_hw_prepare(struct qed_dev *cdev, 1397 1400 int personality) 1398 1401 { 1399 - int rc, i; 1402 + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1403 + int rc; 1400 1404 1401 1405 /* Store the precompiled init data ptrs */ 1402 1406 qed_init_iro_array(cdev); 1403 1407 1404 1408 /* Initialize the first hwfn - will learn number of hwfns */ 1405 - rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, 1409 + rc = qed_hw_prepare_single(p_hwfn, 1410 + cdev->regview, 1406 1411 cdev->doorbells, personality); 1407 1412 if (rc) 1408 1413 return rc; 1409 1414 1410 - personality = cdev->hwfns[0].hw_info.personality; 1415 + personality = p_hwfn->hw_info.personality; 1411 1416 1412 1417 /* Initialize the rest of the hwfns */ 1413 - for (i = 1; i < cdev->num_hwfns; i++) { 1418 + if (cdev->num_hwfns > 1) { 1414 1419 void __iomem *p_regview, *p_doorbell; 1420 + u8 __iomem *addr; 1415 1421 1416 - p_regview = cdev->regview + 1417 - i * qed_hw_bar_size(cdev, 0); 1418 - p_doorbell = cdev->doorbells + 1419 - i * qed_hw_bar_size(cdev, 1); 1420 - rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, 1422 + /* adjust bar offset for second engine */ 1423 + addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; 1424 + p_regview = addr; 1425 + 1426 + /* adjust doorbell bar offset for second engine */ 1427 + addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; 1428 + p_doorbell = addr; 1429 + 1430 + /* prepare second hw function */ 1431 + rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, 1421 1432 p_doorbell, personality); 1433 + 1434 + /* in case of error, need to free the previously 1435 + * initiliazed hwfn 0. 1436 + */ 1422 1437 if (rc) { 1423 - /* Cleanup previously initialized hwfns */ 1424 - while (--i >= 0) { 1425 - qed_init_free(&cdev->hwfns[i]); 1426 - qed_mcp_free(&cdev->hwfns[i]); 1427 - qed_hw_hwfn_free(&cdev->hwfns[i]); 1428 - } 1429 - return rc; 1438 + qed_init_free(p_hwfn); 1439 + qed_mcp_free(p_hwfn); 1440 + qed_hw_hwfn_free(p_hwfn); 1430 1441 } 1431 1442 } 1432 1443 1433 - return 0; 1444 + return rc; 1434 1445 } 1435 1446 1436 1447 void qed_hw_remove(struct qed_dev *cdev)
+24 -9
drivers/net/ethernet/qlogic/qed/qed_int.c
··· 783 783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 784 784 } 785 785 786 - void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 787 - struct qed_ptt *p_ptt, 788 - enum qed_int_mode int_mode) 786 + int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 787 + enum qed_int_mode int_mode) 789 788 { 790 - int i; 791 - 792 - p_hwfn->b_int_enabled = 1; 789 + int rc, i; 793 790 794 791 /* Mask non-link attentions */ 795 792 for (i = 0; i < 9; i++) 796 793 qed_wr(p_hwfn, p_ptt, 797 794 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); 798 - 799 - /* Enable interrupt Generation */ 800 - qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 801 795 802 796 /* Configure AEU signal change to produce attentions for link */ 803 797 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); ··· 802 808 803 809 /* Unmask AEU signals toward IGU */ 804 810 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 811 + if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 812 + rc = qed_slowpath_irq_req(p_hwfn); 813 + if (rc != 0) { 814 + DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 815 + return -EINVAL; 816 + } 817 + p_hwfn->b_int_requested = true; 818 + } 819 + /* Enable interrupt Generation */ 820 + qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 821 + p_hwfn->b_int_enabled = 1; 822 + 823 + return rc; 805 824 } 806 825 807 826 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, ··· 1133 1126 *p_iov_blks = info->free_blks; 1134 1127 1135 1128 return info->igu_sb_cnt; 1129 + } 1130 + 1131 + void qed_int_disable_post_isr_release(struct qed_dev *cdev) 1132 + { 1133 + int i; 1134 + 1135 + for_each_hwfn(cdev, i) 1136 + cdev->hwfns[i].b_int_requested = false; 1136 1137 }
+10 -5
drivers/net/ethernet/qlogic/qed/qed_int.h
··· 169 169 int *p_iov_blks); 170 170 171 171 /** 172 - * @file 172 + * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR 173 + * release. The API need to be called after releasing all slowpath IRQs 174 + * of the device. 173 175 * 174 - * @brief Interrupt handler 176 + * @param cdev 177 + * 175 178 */ 179 + void qed_int_disable_post_isr_release(struct qed_dev *cdev); 176 180 177 181 #define QED_CAU_DEF_RX_TIMER_RES 0 178 182 #define QED_CAU_DEF_TX_TIMER_RES 0 ··· 370 366 * @param p_hwfn 371 367 * @param p_ptt 372 368 * @param int_mode 369 + * 370 + * @return int 373 371 */ 374 - void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 375 - struct qed_ptt *p_ptt, 376 - enum qed_int_mode int_mode); 372 + int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 373 + enum qed_int_mode int_mode); 377 374 378 375 /** 379 376 * @brief - Initialize CAU status block entry
+18 -38
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 476 476 return rc; 477 477 } 478 478 479 - static int qed_slowpath_irq_req(struct qed_dev *cdev) 479 + int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 480 480 { 481 - int i = 0, rc = 0; 481 + struct qed_dev *cdev = hwfn->cdev; 482 + int rc = 0; 483 + u8 id; 482 484 483 485 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 484 - /* Request all the slowpath MSI-X vectors */ 485 - for (i = 0; i < cdev->num_hwfns; i++) { 486 - snprintf(cdev->hwfns[i].name, NAME_SIZE, 487 - "sp-%d-%02x:%02x.%02x", 488 - i, cdev->pdev->bus->number, 489 - PCI_SLOT(cdev->pdev->devfn), 490 - cdev->hwfns[i].abs_pf_id); 491 - 492 - rc = request_irq(cdev->int_params.msix_table[i].vector, 493 - qed_msix_sp_int, 0, 494 - cdev->hwfns[i].name, 495 - cdev->hwfns[i].sp_dpc); 496 - if (rc) 497 - break; 498 - 499 - DP_VERBOSE(&cdev->hwfns[i], 500 - (NETIF_MSG_INTR | QED_MSG_SP), 486 + id = hwfn->my_id; 487 + snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 488 + id, cdev->pdev->bus->number, 489 + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 490 + rc = request_irq(cdev->int_params.msix_table[id].vector, 491 + qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 492 + if (!rc) 493 + DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 501 494 "Requested slowpath MSI-X\n"); 502 - } 503 - 504 - if (i != cdev->num_hwfns) { 505 - /* Free already request MSI-X vectors */ 506 - for (i--; i >= 0; i--) { 507 - unsigned int vec = 508 - cdev->int_params.msix_table[i].vector; 509 - synchronize_irq(vec); 510 - free_irq(cdev->int_params.msix_table[i].vector, 511 - cdev->hwfns[i].sp_dpc); 512 - } 513 - } 514 495 } else { 515 496 unsigned long flags = 0; 516 497 ··· 515 534 516 535 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 517 536 for_each_hwfn(cdev, i) { 537 + if (!cdev->hwfns[i].b_int_requested) 538 + break; 518 539 synchronize_irq(cdev->int_params.msix_table[i].vector); 519 540 free_irq(cdev->int_params.msix_table[i].vector, 520 541 cdev->hwfns[i].sp_dpc); 521 542 } 522 543 } else { 523 - free_irq(cdev->pdev->irq, cdev); 544 + if (QED_LEADING_HWFN(cdev)->b_int_requested) 545 + free_irq(cdev->pdev->irq, cdev); 524 546 } 547 + qed_int_disable_post_isr_release(cdev); 525 548 } 526 549 527 550 static int qed_nic_stop(struct qed_dev *cdev) ··· 750 765 if (rc) 751 766 goto err1; 752 767 753 - /* Request the slowpath IRQ */ 754 - rc = qed_slowpath_irq_req(cdev); 755 - if (rc) 756 - goto err2; 757 - 758 768 /* Allocate stream for unzipping */ 759 769 rc = qed_alloc_stream_mem(cdev); 760 770 if (rc) { 761 771 DP_NOTICE(cdev, "Failed to allocate stream memory\n"); 762 - goto err3; 772 + goto err2; 763 773 } 764 774 765 775 /* Start the slowpath */
+4
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
··· 363 363 0x7 << 0) 364 364 #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 365 365 0 366 + #define PGLUE_B_REG_PF_BAR0_SIZE \ 367 + 0x2aae60UL 368 + #define PGLUE_B_REG_PF_BAR1_SIZE \ 369 + 0x2aae64UL 366 370 #endif
+6 -2
drivers/net/ethernet/qlogic/qed/qed_sp.h
··· 124 124 dma_addr_t p_phys; 125 125 struct qed_spq_entry *p_virt; 126 126 127 - /* Used as index for completions (returns on EQ by FW) */ 128 - u16 echo_idx; 127 + #define SPQ_RING_SIZE \ 128 + (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) 129 + 130 + /* Bitmap for handling out-of-order completions */ 131 + DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); 132 + u8 comp_bitmap_idx; 129 133 130 134 /* Statistics */ 131 135 u32 unlimited_pending_count;
+51 -18
drivers/net/ethernet/qlogic/qed/qed_spq.c
··· 112 112 qed_spq_fill_entry(struct qed_hwfn *p_hwfn, 113 113 struct qed_spq_entry *p_ent) 114 114 { 115 - p_ent->elem.hdr.echo = 0; 116 - p_hwfn->p_spq->echo_idx++; 117 115 p_ent->flags = 0; 118 116 119 117 switch (p_ent->comp_mode) { ··· 193 195 struct qed_spq *p_spq, 194 196 struct qed_spq_entry *p_ent) 195 197 { 196 - struct qed_chain *p_chain = &p_hwfn->p_spq->chain; 198 + struct qed_chain *p_chain = &p_hwfn->p_spq->chain; 199 + u16 echo = qed_chain_get_prod_idx(p_chain); 197 200 struct slow_path_element *elem; 198 201 struct core_db_data db; 199 202 203 + p_ent->elem.hdr.echo = cpu_to_le16(echo); 200 204 elem = qed_chain_produce(p_chain); 201 205 if (!elem) { 202 206 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); ··· 437 437 p_spq->comp_count = 0; 438 438 p_spq->comp_sent_count = 0; 439 439 p_spq->unlimited_pending_count = 0; 440 - p_spq->echo_idx = 0; 440 + 441 + bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); 442 + p_spq->comp_bitmap_idx = 0; 441 443 442 444 /* SPQ cid, cannot fail */ 443 445 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); ··· 584 582 struct qed_spq *p_spq = p_hwfn->p_spq; 585 583 586 584 if (p_ent->queue == &p_spq->unlimited_pending) { 587 - struct qed_spq_entry *p_en2; 588 585 589 586 if (list_empty(&p_spq->free_pool)) { 590 587 list_add_tail(&p_ent->list, &p_spq->unlimited_pending); 591 588 p_spq->unlimited_pending_count++; 592 589 593 590 return 0; 591 + } else { 592 + struct qed_spq_entry *p_en2; 593 + 594 + p_en2 = list_first_entry(&p_spq->free_pool, 595 + struct qed_spq_entry, 596 + list); 597 + list_del(&p_en2->list); 598 + 599 + /* Copy the ring element physical pointer to the new 600 + * entry, since we are about to override the entire ring 601 + * entry and don't want to lose the pointer. 602 + */ 603 + p_ent->elem.data_ptr = p_en2->elem.data_ptr; 604 + 605 + *p_en2 = *p_ent; 606 + 607 + kfree(p_ent); 608 + 609 + p_ent = p_en2; 594 610 } 595 - 596 - p_en2 = list_first_entry(&p_spq->free_pool, 597 - struct qed_spq_entry, 598 - list); 599 - list_del(&p_en2->list); 600 - 601 - /* Strcut assignment */ 602 - *p_en2 = *p_ent; 603 - 604 - kfree(p_ent); 605 - 606 - p_ent = p_en2; 607 611 } 608 612 609 613 /* entry is to be placed in 'pending' queue */ ··· 785 777 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, 786 778 list) { 787 779 if (p_ent->elem.hdr.echo == echo) { 780 + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; 781 + 788 782 list_del(&p_ent->list); 789 783 790 - qed_chain_return_produced(&p_spq->chain); 784 + /* Avoid overriding of SPQ entries when getting 785 + * out-of-order completions, by marking the completions 786 + * in a bitmap and increasing the chain consumer only 787 + * for the first successive completed entries. 788 + */ 789 + bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); 790 + 791 + while (test_bit(p_spq->comp_bitmap_idx, 792 + p_spq->p_comp_bitmap)) { 793 + bitmap_clear(p_spq->p_comp_bitmap, 794 + p_spq->comp_bitmap_idx, 795 + SPQ_RING_SIZE); 796 + p_spq->comp_bitmap_idx++; 797 + qed_chain_return_produced(&p_spq->chain); 798 + } 799 + 791 800 p_spq->comp_count++; 792 801 found = p_ent; 793 802 break; 794 803 } 804 + 805 + /* This is relatively uncommon - depends on scenarios 806 + * which have mutliple per-PF sent ramrods. 807 + */ 808 + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 809 + "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", 810 + le16_to_cpu(echo), 811 + le16_to_cpu(p_ent->elem.hdr.echo)); 795 812 } 796 813 797 814 /* Release lock before callback, as callback may post
+2 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
··· 246 246 u32 state; 247 247 248 248 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 249 - while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { 249 + while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) { 250 + idc->vnic_wait_limit--; 250 251 msleep(1000); 251 252 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 252 253 }
+3 -2
drivers/net/ethernet/qlogic/qlge/qlge_main.c
··· 4211 4211 4212 4212 /* Wait for an outstanding reset to complete. */ 4213 4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4214 - int i = 3; 4215 - while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4214 + int i = 4; 4215 + 4216 + while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4216 4217 netif_err(qdev, ifup, qdev->ndev, 4217 4218 "Waiting for adapter UP...\n"); 4218 4219 ssleep(1);
+2 -3
drivers/net/ethernet/qualcomm/qca_spi.c
··· 736 736 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", 737 737 jiffies, jiffies - dev->trans_start); 738 738 qca->net_dev->stats.tx_errors++; 739 - /* wake the queue if there is room */ 740 - if (qcaspi_tx_ring_has_space(&qca->txr)) 741 - netif_wake_queue(dev); 739 + /* Trigger tx queue flush and QCA7000 reset */ 740 + qca->sync = QCASPI_SYNC_UNKNOWN; 742 741 } 743 742 744 743 static int
+4 -1
drivers/net/ethernet/renesas/ravb_main.c
··· 905 905 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); 906 906 } 907 907 908 + /* 10BASE is not supported */ 909 + phydev->supported &= ~PHY_10BT_FEATURES; 910 + 908 911 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", 909 912 phydev->addr, phydev->irq, phydev->drv->name); 910 913 ··· 1040 1037 "rx_queue_1_mcast_packets", 1041 1038 "rx_queue_1_errors", 1042 1039 "rx_queue_1_crc_errors", 1043 - "rx_queue_1_frame_errors_", 1040 + "rx_queue_1_frame_errors", 1044 1041 "rx_queue_1_length_errors", 1045 1042 "rx_queue_1_missed_errors", 1046 1043 "rx_queue_1_over_errors",
+41 -14
drivers/net/ethernet/renesas/sh_eth.c
··· 52 52 NETIF_MSG_RX_ERR| \ 53 53 NETIF_MSG_TX_ERR) 54 54 55 + #define SH_ETH_OFFSET_INVALID ((u16)~0) 56 + 55 57 #define SH_ETH_OFFSET_DEFAULTS \ 56 58 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID 57 59 ··· 405 403 406 404 static void sh_eth_rcv_snd_disable(struct net_device *ndev); 407 405 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); 406 + 407 + static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) 408 + { 409 + struct sh_eth_private *mdp = netdev_priv(ndev); 410 + u16 offset = mdp->reg_offset[enum_index]; 411 + 412 + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) 413 + return; 414 + 415 + iowrite32(data, mdp->addr + offset); 416 + } 417 + 418 + static u32 sh_eth_read(struct net_device *ndev, int enum_index) 419 + { 420 + struct sh_eth_private *mdp = netdev_priv(ndev); 421 + u16 offset = mdp->reg_offset[enum_index]; 422 + 423 + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) 424 + return ~0U; 425 + 426 + return ioread32(mdp->addr + offset); 427 + } 408 428 409 429 static bool sh_eth_is_gether(struct sh_eth_private *mdp) 410 430 { ··· 1196 1172 break; 1197 1173 } 1198 1174 mdp->rx_skbuff[i] = skb; 1199 - rxdesc->addr = dma_addr; 1175 + rxdesc->addr = cpu_to_edmac(mdp, dma_addr); 1200 1176 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1201 1177 1202 1178 /* Rx descriptor address set */ ··· 1427 1403 entry, edmac_to_cpu(mdp, txdesc->status)); 1428 1404 /* Free the original skb. */ 1429 1405 if (mdp->tx_skbuff[entry]) { 1430 - dma_unmap_single(&ndev->dev, txdesc->addr, 1406 + dma_unmap_single(&ndev->dev, 1407 + edmac_to_cpu(mdp, txdesc->addr), 1431 1408 txdesc->buffer_length, DMA_TO_DEVICE); 1432 1409 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1433 1410 mdp->tx_skbuff[entry] = NULL; ··· 1487 1462 if (mdp->cd->shift_rd0) 1488 1463 desc_status >>= 16; 1489 1464 1465 + skb = mdp->rx_skbuff[entry]; 1490 1466 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1491 1467 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1492 1468 ndev->stats.rx_errors++; ··· 1503 1477 ndev->stats.rx_missed_errors++; 1504 1478 if (desc_status & RD_RFS10) 1505 1479 ndev->stats.rx_over_errors++; 1506 - } else { 1480 + } else if (skb) { 1481 + dma_addr = edmac_to_cpu(mdp, rxdesc->addr); 1507 1482 if (!mdp->cd->hw_swap) 1508 1483 sh_eth_soft_swap( 1509 - phys_to_virt(ALIGN(rxdesc->addr, 4)), 1484 + phys_to_virt(ALIGN(dma_addr, 4)), 1510 1485 pkt_len + 2); 1511 - skb = mdp->rx_skbuff[entry]; 1512 1486 mdp->rx_skbuff[entry] = NULL; 1513 1487 if (mdp->cd->rpadir) 1514 1488 skb_reserve(skb, NET_IP_ALIGN); 1515 - dma_unmap_single(&ndev->dev, rxdesc->addr, 1489 + dma_unmap_single(&ndev->dev, dma_addr, 1516 1490 ALIGN(mdp->rx_buf_sz, 32), 1517 1491 DMA_FROM_DEVICE); 1518 1492 skb_put(skb, pkt_len); ··· 1549 1523 mdp->rx_skbuff[entry] = skb; 1550 1524 1551 1525 skb_checksum_none_assert(skb); 1552 - rxdesc->addr = dma_addr; 1526 + rxdesc->addr = cpu_to_edmac(mdp, dma_addr); 1553 1527 } 1554 1528 dma_wmb(); /* RACT bit must be set after all the above writes */ 1555 1529 if (entry >= mdp->num_rx_ring - 1) ··· 2357 2331 /* Free all the skbuffs in the Rx queue. */ 2358 2332 for (i = 0; i < mdp->num_rx_ring; i++) { 2359 2333 rxdesc = &mdp->rx_ring[i]; 2360 - rxdesc->status = 0; 2361 - rxdesc->addr = 0xBADF00D0; 2334 + rxdesc->status = cpu_to_edmac(mdp, 0); 2335 + rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0); 2362 2336 dev_kfree_skb(mdp->rx_skbuff[i]); 2363 2337 mdp->rx_skbuff[i] = NULL; 2364 2338 } ··· 2376 2350 { 2377 2351 struct sh_eth_private *mdp = netdev_priv(ndev); 2378 2352 struct sh_eth_txdesc *txdesc; 2353 + dma_addr_t dma_addr; 2379 2354 u32 entry; 2380 2355 unsigned long flags; 2381 2356 ··· 2399 2372 txdesc = &mdp->tx_ring[entry]; 2400 2373 /* soft swap. */ 2401 2374 if (!mdp->cd->hw_swap) 2402 - sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2403 - skb->len + 2); 2404 - txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2405 - DMA_TO_DEVICE); 2406 - if (dma_mapping_error(&ndev->dev, txdesc->addr)) { 2375 + sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); 2376 + dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2377 + DMA_TO_DEVICE); 2378 + if (dma_mapping_error(&ndev->dev, dma_addr)) { 2407 2379 kfree_skb(skb); 2408 2380 return NETDEV_TX_OK; 2409 2381 } 2382 + txdesc->addr = cpu_to_edmac(mdp, dma_addr); 2410 2383 txdesc->buffer_length = skb->len; 2411 2384 2412 2385 dma_wmb(); /* TACT bit must be set after all the above writes */
-25
drivers/net/ethernet/renesas/sh_eth.h
··· 546 546 #endif 547 547 } 548 548 549 - #define SH_ETH_OFFSET_INVALID ((u16) ~0) 550 - 551 - static inline void sh_eth_write(struct net_device *ndev, u32 data, 552 - int enum_index) 553 - { 554 - struct sh_eth_private *mdp = netdev_priv(ndev); 555 - u16 offset = mdp->reg_offset[enum_index]; 556 - 557 - if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) 558 - return; 559 - 560 - iowrite32(data, mdp->addr + offset); 561 - } 562 - 563 - static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) 564 - { 565 - struct sh_eth_private *mdp = netdev_priv(ndev); 566 - u16 offset = mdp->reg_offset[enum_index]; 567 - 568 - if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) 569 - return ~0U; 570 - 571 - return ioread32(mdp->addr + offset); 572 - } 573 - 574 549 static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, 575 550 int enum_index) 576 551 {
+12 -12
drivers/net/ethernet/sfc/ef10.c
··· 3299 3299 3300 3300 new_spec.priority = EFX_FILTER_PRI_AUTO; 3301 3301 new_spec.flags = (EFX_FILTER_FLAG_RX | 3302 - EFX_FILTER_FLAG_RX_RSS); 3302 + (efx_rss_enabled(efx) ? 3303 + EFX_FILTER_FLAG_RX_RSS : 0)); 3303 3304 new_spec.dmaq_id = 0; 3304 3305 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; 3305 3306 rc = efx_ef10_filter_push(efx, &new_spec, ··· 3922 3921 { 3923 3922 struct efx_ef10_filter_table *table = efx->filter_state; 3924 3923 struct efx_ef10_dev_addr *addr_list; 3924 + enum efx_filter_flags filter_flags; 3925 3925 struct efx_filter_spec spec; 3926 3926 u8 baddr[ETH_ALEN]; 3927 3927 unsigned int i, j; ··· 3937 3935 addr_count = table->dev_uc_count; 3938 3936 } 3939 3937 3938 + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 3939 + 3940 3940 /* Insert/renew filters */ 3941 3941 for (i = 0; i < addr_count; i++) { 3942 - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3943 - EFX_FILTER_FLAG_RX_RSS, 3944 - 0); 3942 + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 3945 3943 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3946 3944 addr_list[i].addr); 3947 3945 rc = efx_ef10_filter_insert(efx, &spec, true); ··· 3970 3968 3971 3969 if (multicast && rollback) { 3972 3970 /* Also need an Ethernet broadcast filter */ 3973 - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3974 - EFX_FILTER_FLAG_RX_RSS, 3975 - 0); 3971 + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 3976 3972 eth_broadcast_addr(baddr); 3977 3973 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); 3978 3974 rc = efx_ef10_filter_insert(efx, &spec, true); ··· 4000 4000 { 4001 4001 struct efx_ef10_filter_table *table = efx->filter_state; 4002 4002 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4003 + enum efx_filter_flags filter_flags; 4003 4004 struct efx_filter_spec spec; 4004 4005 u8 baddr[ETH_ALEN]; 4005 4006 int rc; 4006 4007 4007 - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4008 - EFX_FILTER_FLAG_RX_RSS, 4009 - 0); 4008 + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 4009 + 4010 + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 4010 4011 4011 4012 if (multicast) 4012 4013 efx_filter_set_mc_def(&spec); ··· 4024 4023 if (!nic_data->workaround_26807) { 4025 4024 /* Also need an Ethernet broadcast filter */ 4026 4025 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4027 - EFX_FILTER_FLAG_RX_RSS, 4028 - 0); 4026 + filter_flags, 0); 4029 4027 eth_broadcast_addr(baddr); 4030 4028 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 4031 4029 baddr);
+5
drivers/net/ethernet/sfc/efx.h
··· 76 76 #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ 77 77 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 78 78 79 + static inline bool efx_rss_enabled(struct efx_nic *efx) 80 + { 81 + return efx->rss_spread > 1; 82 + } 83 + 79 84 /* Filters */ 80 85 81 86 void efx_mac_reconfigure(struct efx_nic *efx);
+1 -1
drivers/net/ethernet/sfc/farch.c
··· 2242 2242 */ 2243 2243 spec->priority = EFX_FILTER_PRI_AUTO; 2244 2244 spec->flags = (EFX_FILTER_FLAG_RX | 2245 - (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | 2245 + (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | 2246 2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); 2247 2247 spec->dmaq_id = 0; 2248 2248 }
+1 -1
drivers/net/ethernet/sfc/txc43128_phy.c
··· 418 418 419 419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN); 420 420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); 421 - while (tries--) { 421 + while (--tries) { 422 422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); 423 423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) 424 424 break;
+5 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
··· 153 153 if (ret) 154 154 return ret; 155 155 156 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 156 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 157 + if (ret) 158 + sun7i_gmac_exit(pdev, plat_dat->bsp_priv); 159 + 160 + return ret; 157 161 } 158 162 159 163 static const struct of_device_id sun7i_dwmac_match[] = {
+6 -3
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 3046 3046 priv->hw->dma->stop_tx(priv->ioaddr); 3047 3047 priv->hw->dma->stop_rx(priv->ioaddr); 3048 3048 3049 - stmmac_clear_descriptors(priv); 3050 - 3051 3049 /* Enable Power down mode by programming the PMT regs */ 3052 3050 if (device_may_wakeup(priv->device)) { 3053 3051 priv->hw->mac->pmt(priv->hw, priv->wolopts); ··· 3103 3105 3104 3106 netif_device_attach(ndev); 3105 3107 3106 - init_dma_desc_rings(ndev, GFP_ATOMIC); 3108 + priv->cur_rx = 0; 3109 + priv->dirty_rx = 0; 3110 + priv->dirty_tx = 0; 3111 + priv->cur_tx = 0; 3112 + stmmac_clear_descriptors(priv); 3113 + 3107 3114 stmmac_hw_setup(ndev, false); 3108 3115 stmmac_init_tx_coalesce(priv); 3109 3116 stmmac_set_rx_mode(ndev);
-2
drivers/net/geneve.c
··· 967 967 err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, 968 968 &fl6.saddr, &fl6.daddr, prio, ttl, 969 969 sport, geneve->dst_port, !udp_csum); 970 - 971 - iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 972 970 return NETDEV_TX_OK; 973 971 974 972 tx_error:
+7 -2
drivers/net/phy/mdio-mux.c
··· 149 149 } 150 150 cb->bus_number = v; 151 151 cb->parent = pb; 152 - cb->mii_bus = mdiobus_alloc(); 153 - cb->mii_bus->priv = cb; 154 152 153 + cb->mii_bus = mdiobus_alloc(); 154 + if (!cb->mii_bus) { 155 + ret_val = -ENOMEM; 156 + of_node_put(child_bus_node); 157 + break; 158 + } 159 + cb->mii_bus->priv = cb; 155 160 cb->mii_bus->irq = cb->phy_irq; 156 161 cb->mii_bus->name = "mdio_mux"; 157 162 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
+11 -2
drivers/net/phy/micrel.c
··· 339 339 { 340 340 const struct device *dev = &phydev->dev; 341 341 const struct device_node *of_node = dev->of_node; 342 + const struct device *dev_walker; 342 343 343 - if (!of_node && dev->parent->of_node) 344 - of_node = dev->parent->of_node; 344 + /* The Micrel driver has a deprecated option to place phy OF 345 + * properties in the MAC node. Walk up the tree of devices to 346 + * find a device with an OF node. 347 + */ 348 + dev_walker = &phydev->dev; 349 + do { 350 + of_node = dev_walker->of_node; 351 + dev_walker = dev_walker->parent; 352 + 353 + } while (!of_node && dev_walker); 345 354 346 355 if (of_node) { 347 356 ksz9021_load_values_from_of(phydev, of_node,
+10 -4
drivers/net/ppp/pppoe.c
··· 568 568 sk->sk_family = PF_PPPOX; 569 569 sk->sk_protocol = PX_PROTO_OE; 570 570 571 + INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work, 572 + pppoe_unbind_sock_work); 573 + 571 574 return 0; 572 575 } 573 576 ··· 635 632 636 633 lock_sock(sk); 637 634 638 - INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work); 639 - 640 635 error = -EINVAL; 641 636 if (sp->sa_protocol != PX_PROTO_OE) 642 637 goto end; ··· 664 663 po->pppoe_dev = NULL; 665 664 } 666 665 667 - memset(sk_pppox(po) + 1, 0, 668 - sizeof(struct pppox_sock) - sizeof(struct sock)); 666 + po->pppoe_ifindex = 0; 667 + memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa)); 668 + memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay)); 669 + memset(&po->chan, 0, sizeof(po->chan)); 670 + po->next = NULL; 671 + po->num = 0; 672 + 669 673 sk->sk_state = PPPOX_NONE; 670 674 } 671 675
+6
drivers/net/ppp/pptp.c
··· 419 419 struct pptp_opt *opt = &po->proto.pptp; 420 420 int error = 0; 421 421 422 + if (sockaddr_len < sizeof(struct sockaddr_pppox)) 423 + return -EINVAL; 424 + 422 425 lock_sock(sk); 423 426 424 427 opt->src_addr = sp->sa_addr.pptp; ··· 442 439 struct rtable *rt; 443 440 struct flowi4 fl4; 444 441 int error = 0; 442 + 443 + if (sockaddr_len < sizeof(struct sockaddr_pppox)) 444 + return -EINVAL; 445 445 446 446 if (sp->sa_protocol != PX_PROTO_PPTP) 447 447 return -EINVAL;
+25 -1
drivers/net/usb/cdc_mbim.c
··· 158 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 159 159 goto err; 160 160 161 - ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); 161 + ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data); 162 162 if (ret) 163 163 goto err; 164 164 ··· 582 582 .tx_fixup = cdc_mbim_tx_fixup, 583 583 }; 584 584 585 + /* The spefication explicitly allows NDPs to be placed anywhere in the 586 + * frame, but some devices fail unless the NDP is placed after the IP 587 + * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this 588 + * behaviour. 589 + * 590 + * Note: The current implementation of this feature restricts each NTB 591 + * to a single NDP, implying that multiplexed sessions cannot share an 592 + * NTB. This might affect performace for multiplexed sessions. 593 + */ 594 + static const struct driver_info cdc_mbim_info_ndp_to_end = { 595 + .description = "CDC MBIM", 596 + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 597 + .bind = cdc_mbim_bind, 598 + .unbind = cdc_mbim_unbind, 599 + .manage_power = cdc_mbim_manage_power, 600 + .rx_fixup = cdc_mbim_rx_fixup, 601 + .tx_fixup = cdc_mbim_tx_fixup, 602 + .data = CDC_NCM_FLAG_NDP_TO_END, 603 + }; 604 + 585 605 static const struct usb_device_id mbim_devs[] = { 586 606 /* This duplicate NCM entry is intentional. MBIM devices can 587 607 * be disguised as NCM by default, and this is necessary to ··· 616 596 /* ZLP conformance whitelist: All Ericsson MBIM devices */ 617 597 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 618 598 .driver_info = (unsigned long)&cdc_mbim_info, 599 + }, 600 + /* Huawei E3372 fails unless NDP comes after the IP packets */ 601 + { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 602 + .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, 619 603 }, 620 604 /* default entry */ 621 605 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+9 -1
drivers/net/usb/cdc_ncm.c
··· 955 955 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and 956 956 * the wNdpIndex field in the header is actually not consistent with reality. It will be later. 957 957 */ 958 - if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 958 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { 959 959 if (ctx->delayed_ndp16->dwSignature == sign) 960 960 return ctx->delayed_ndp16; 961 + 962 + /* We can only push a single NDP to the end. Return 963 + * NULL to send what we've already got and queue this 964 + * skb for later. 965 + */ 966 + else if (ctx->delayed_ndp16->dwSignature) 967 + return NULL; 968 + } 961 969 962 970 /* follow the chain of NDPs, looking for a match */ 963 971 while (ndpoffset) {
+3 -18
drivers/net/usb/r8152.c
··· 3067 3067 3068 3068 mutex_lock(&tp->control); 3069 3069 3070 - /* The WORK_ENABLE may be set when autoresume occurs */ 3071 - if (test_bit(WORK_ENABLE, &tp->flags)) { 3072 - clear_bit(WORK_ENABLE, &tp->flags); 3073 - usb_kill_urb(tp->intr_urb); 3074 - cancel_delayed_work_sync(&tp->schedule); 3075 - 3076 - /* disable the tx/rx, if the workqueue has enabled them. */ 3077 - if (netif_carrier_ok(netdev)) 3078 - tp->rtl_ops.disable(tp); 3079 - } 3080 - 3081 3070 tp->rtl_ops.up(tp); 3082 3071 3083 3072 rtl8152_set_speed(tp, AUTONEG_ENABLE, ··· 3112 3123 rtl_stop_rx(tp); 3113 3124 } else { 3114 3125 mutex_lock(&tp->control); 3115 - 3116 - /* The autosuspend may have been enabled and wouldn't 3117 - * be disable when autoresume occurs, because the 3118 - * netif_running() would be false. 3119 - */ 3120 - rtl_runtime_suspend_enable(tp, false); 3121 3126 3122 3127 tp->rtl_ops.down(tp); 3123 3128 ··· 3495 3512 netif_device_attach(tp->netdev); 3496 3513 } 3497 3514 3498 - if (netif_running(tp->netdev)) { 3515 + if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3499 3516 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3500 3517 rtl_runtime_suspend_enable(tp, false); 3501 3518 clear_bit(SELECTIVE_SUSPEND, &tp->flags); ··· 3515 3532 } 3516 3533 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3517 3534 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3535 + if (tp->netdev->flags & IFF_UP) 3536 + rtl_runtime_suspend_enable(tp, false); 3518 3537 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3519 3538 } 3520 3539
+59 -16
drivers/net/vxlan.c
··· 1158 1158 struct pcpu_sw_netstats *stats; 1159 1159 union vxlan_addr saddr; 1160 1160 int err = 0; 1161 - union vxlan_addr *remote_ip; 1162 1161 1163 1162 /* For flow based devices, map all packets to VNI 0 */ 1164 1163 if (vs->flags & VXLAN_F_COLLECT_METADATA) ··· 1168 1169 if (!vxlan) 1169 1170 goto drop; 1170 1171 1171 - remote_ip = &vxlan->default_dst.remote_ip; 1172 1172 skb_reset_mac_header(skb); 1173 1173 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); 1174 1174 skb->protocol = eth_type_trans(skb, vxlan->dev); ··· 1177 1179 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1178 1180 goto drop; 1179 1181 1180 - /* Re-examine inner Ethernet packet */ 1181 - if (remote_ip->sa.sa_family == AF_INET) { 1182 + /* Get data from the outer IP header */ 1183 + if (vxlan_get_sk_family(vs) == AF_INET) { 1182 1184 oip = ip_hdr(skb); 1183 1185 saddr.sin.sin_addr.s_addr = oip->saddr; 1184 1186 saddr.sa.sa_family = AF_INET; ··· 1846 1848 !(vxflags & VXLAN_F_UDP_CSUM)); 1847 1849 } 1848 1850 1851 + #if IS_ENABLED(CONFIG_IPV6) 1852 + static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, 1853 + struct sk_buff *skb, int oif, 1854 + const struct in6_addr *daddr, 1855 + struct in6_addr *saddr) 1856 + { 1857 + struct dst_entry *ndst; 1858 + struct flowi6 fl6; 1859 + int err; 1860 + 1861 + memset(&fl6, 0, sizeof(fl6)); 1862 + fl6.flowi6_oif = oif; 1863 + fl6.daddr = *daddr; 1864 + fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 1865 + fl6.flowi6_mark = skb->mark; 1866 + fl6.flowi6_proto = IPPROTO_UDP; 1867 + 1868 + err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1869 + vxlan->vn6_sock->sock->sk, 1870 + &ndst, &fl6); 1871 + if (err < 0) 1872 + return ERR_PTR(err); 1873 + 1874 + *saddr = fl6.saddr; 1875 + return ndst; 1876 + } 1877 + #endif 1878 + 1849 1879 /* Bypass encapsulation if the destination is local */ 1850 1880 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1851 1881 struct vxlan_dev *dst_vxlan) ··· 2061 2035 #if IS_ENABLED(CONFIG_IPV6) 2062 2036 } else { 2063 2037 struct dst_entry *ndst; 2064 - struct flowi6 fl6; 2038 + struct in6_addr saddr; 2065 2039 u32 rt6i_flags; 2066 2040 2067 2041 if (!vxlan->vn6_sock) 2068 2042 goto drop; 2069 2043 sk = vxlan->vn6_sock->sock->sk; 2070 2044 2071 - memset(&fl6, 0, sizeof(fl6)); 2072 - fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; 2073 - fl6.daddr = dst->sin6.sin6_addr; 2074 - fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 2075 - fl6.flowi6_mark = skb->mark; 2076 - fl6.flowi6_proto = IPPROTO_UDP; 2077 - 2078 - if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) { 2045 + ndst = vxlan6_get_route(vxlan, skb, 2046 + rdst ? rdst->remote_ifindex : 0, 2047 + &dst->sin6.sin6_addr, &saddr); 2048 + if (IS_ERR(ndst)) { 2079 2049 netdev_dbg(dev, "no route to %pI6\n", 2080 2050 &dst->sin6.sin6_addr); 2081 2051 dev->stats.tx_carrier_errors++; ··· 2103 2081 } 2104 2082 2105 2083 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2106 - err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, 2084 + err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, 2107 2085 0, ttl, src_port, dst_port, htonl(vni << 8), md, 2108 2086 !net_eq(vxlan->net, dev_net(vxlan->dev)), 2109 2087 flags); ··· 2417 2395 vxlan->cfg.port_max, true); 2418 2396 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2419 2397 2420 - if (ip_tunnel_info_af(info) == AF_INET) 2398 + if (ip_tunnel_info_af(info) == AF_INET) { 2399 + if (!vxlan->vn4_sock) 2400 + return -EINVAL; 2421 2401 return egress_ipv4_tun_info(dev, skb, info, sport, dport); 2422 - return -EINVAL; 2402 + } else { 2403 + #if IS_ENABLED(CONFIG_IPV6) 2404 + struct dst_entry *ndst; 2405 + 2406 + if (!vxlan->vn6_sock) 2407 + return -EINVAL; 2408 + ndst = vxlan6_get_route(vxlan, skb, 0, 2409 + &info->key.u.ipv6.dst, 2410 + &info->key.u.ipv6.src); 2411 + if (IS_ERR(ndst)) 2412 + return PTR_ERR(ndst); 2413 + dst_release(ndst); 2414 + 2415 + info->key.tp_src = sport; 2416 + info->key.tp_dst = dport; 2417 + #else /* !CONFIG_IPV6 */ 2418 + return -EPFNOSUPPORT; 2419 + #endif 2420 + } 2421 + return 0; 2423 2422 } 2424 2423 2425 2424 static const struct net_device_ops vxlan_netdev_ops = {
+15 -19
drivers/net/xen-netback/netback.c
··· 258 258 struct netrx_pending_operations *npo) 259 259 { 260 260 struct xenvif_rx_meta *meta; 261 - struct xen_netif_rx_request *req; 261 + struct xen_netif_rx_request req; 262 262 263 - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 263 + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); 264 264 265 265 meta = npo->meta + npo->meta_prod++; 266 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 267 267 meta->gso_size = 0; 268 268 meta->size = 0; 269 - meta->id = req->id; 269 + meta->id = req.id; 270 270 271 271 npo->copy_off = 0; 272 - npo->copy_gref = req->gref; 272 + npo->copy_gref = req.gref; 273 273 274 274 return meta; 275 275 } ··· 424 424 struct xenvif *vif = netdev_priv(skb->dev); 425 425 int nr_frags = skb_shinfo(skb)->nr_frags; 426 426 int i; 427 - struct xen_netif_rx_request *req; 427 + struct xen_netif_rx_request req; 428 428 struct xenvif_rx_meta *meta; 429 429 unsigned char *data; 430 430 int head = 1; ··· 443 443 444 444 /* Set up a GSO prefix descriptor, if necessary */ 445 445 if ((1 << gso_type) & vif->gso_prefix_mask) { 446 - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 446 + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); 447 447 meta = npo->meta + npo->meta_prod++; 448 448 meta->gso_type = gso_type; 449 449 meta->gso_size = skb_shinfo(skb)->gso_size; 450 450 meta->size = 0; 451 - meta->id = req->id; 451 + meta->id = req.id; 452 452 } 453 453 454 - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 454 + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); 455 455 meta = npo->meta + npo->meta_prod++; 456 456 457 457 if ((1 << gso_type) & vif->gso_mask) { ··· 463 463 } 464 464 465 465 meta->size = 0; 466 - meta->id = req->id; 466 + meta->id = req.id; 467 467 npo->copy_off = 0; 468 - npo->copy_gref = req->gref; 468 + npo->copy_gref = req.gref; 469 469 470 470 data = skb->data; 471 471 while (data < skb_tail_pointer(skb)) { ··· 679 679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 680 680 * Otherwise the interface can seize up due to insufficient credit. 681 681 */ 682 - max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 683 - max_burst = min(max_burst, 131072UL); 684 - max_burst = max(max_burst, queue->credit_bytes); 682 + max_burst = max(131072UL, queue->credit_bytes); 685 683 686 684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 687 685 max_credit = queue->remaining_credit + queue->credit_bytes; ··· 709 711 spin_unlock_irqrestore(&queue->response_lock, flags); 710 712 if (cons == end) 711 713 break; 712 - txp = RING_GET_REQUEST(&queue->tx, cons++); 714 + RING_COPY_REQUEST(&queue->tx, cons++, txp); 713 715 } while (1); 714 716 queue->tx.req_cons = cons; 715 717 } ··· 776 778 if (drop_err) 777 779 txp = &dropped_tx; 778 780 779 - memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 780 - sizeof(*txp)); 781 + RING_COPY_REQUEST(&queue->tx, cons + slots, txp); 781 782 782 783 /* If the guest submitted a frame >= 64 KiB then 783 784 * first->size overflowed and following slots will ··· 1109 1112 return -EBADR; 1110 1113 } 1111 1114 1112 - memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1113 - sizeof(extra)); 1115 + RING_COPY_REQUEST(&queue->tx, cons, &extra); 1114 1116 if (unlikely(!extra.type || 1115 1117 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1116 1118 queue->tx.req_cons = ++cons; ··· 1318 1322 1319 1323 idx = queue->tx.req_cons; 1320 1324 rmb(); /* Ensure that we see the request before we copy it. */ 1321 - memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1325 + RING_COPY_REQUEST(&queue->tx, idx, &txreq); 1322 1326 1323 1327 /* Credit-based scheduling. */ 1324 1328 if (txreq.size > queue->remaining_credit &&
+1
drivers/phy/Kconfig
··· 233 233 tristate "Allwinner sun9i SoC USB PHY driver" 234 234 depends on ARCH_SUNXI && HAS_IOMEM && OF 235 235 depends on RESET_CONTROLLER 236 + depends on USB_COMMON 236 237 select GENERIC_PHY 237 238 help 238 239 Enable this to support the transceiver that is part of Allwinner
+12 -4
drivers/phy/phy-bcm-cygnus-pcie.c
··· 128 128 struct phy_provider *provider; 129 129 struct resource *res; 130 130 unsigned cnt = 0; 131 + int ret; 131 132 132 133 if (of_get_child_count(node) == 0) { 133 134 dev_err(dev, "PHY no child node\n"); ··· 155 154 if (of_property_read_u32(child, "reg", &id)) { 156 155 dev_err(dev, "missing reg property for %s\n", 157 156 child->name); 158 - return -EINVAL; 157 + ret = -EINVAL; 158 + goto put_child; 159 159 } 160 160 161 161 if (id >= MAX_NUM_PHYS) { 162 162 dev_err(dev, "invalid PHY id: %u\n", id); 163 - return -EINVAL; 163 + ret = -EINVAL; 164 + goto put_child; 164 165 } 165 166 166 167 if (core->phys[id].phy) { 167 168 dev_err(dev, "duplicated PHY id: %u\n", id); 168 - return -EINVAL; 169 + ret = -EINVAL; 170 + goto put_child; 169 171 } 170 172 171 173 p = &core->phys[id]; 172 174 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); 173 175 if (IS_ERR(p->phy)) { 174 176 dev_err(dev, "failed to create PHY\n"); 175 - return PTR_ERR(p->phy); 177 + ret = PTR_ERR(p->phy); 178 + goto put_child; 176 179 } 177 180 178 181 p->core = core; ··· 196 191 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); 197 192 198 193 return 0; 194 + put_child: 195 + of_node_put(child); 196 + return ret; 199 197 } 200 198 201 199 static const struct of_device_id cygnus_pcie_phy_match_table[] = {
+14 -6
drivers/phy/phy-berlin-sata.c
··· 195 195 struct phy_provider *phy_provider; 196 196 struct phy_berlin_priv *priv; 197 197 struct resource *res; 198 - int i = 0; 198 + int ret, i = 0; 199 199 u32 phy_id; 200 200 201 201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ··· 237 237 if (of_property_read_u32(child, "reg", &phy_id)) { 238 238 dev_err(dev, "missing reg property in node %s\n", 239 239 child->name); 240 - return -EINVAL; 240 + ret = -EINVAL; 241 + goto put_child; 241 242 } 242 243 243 244 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { 244 245 dev_err(dev, "invalid reg in node %s\n", child->name); 245 - return -EINVAL; 246 + ret = -EINVAL; 247 + goto put_child; 246 248 } 247 249 248 250 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); 249 - if (!phy_desc) 250 - return -ENOMEM; 251 + if (!phy_desc) { 252 + ret = -ENOMEM; 253 + goto put_child; 254 + } 251 255 252 256 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); 253 257 if (IS_ERR(phy)) { 254 258 dev_err(dev, "failed to create PHY %d\n", phy_id); 255 - return PTR_ERR(phy); 259 + ret = PTR_ERR(phy); 260 + goto put_child; 256 261 } 257 262 258 263 phy_desc->phy = phy; ··· 274 269 phy_provider = 275 270 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); 276 271 return PTR_ERR_OR_ZERO(phy_provider); 272 + put_child: 273 + of_node_put(child); 274 + return ret; 277 275 } 278 276 279 277 static const struct of_device_id phy_berlin_sata_of_match[] = {
+12 -5
drivers/phy/phy-brcmstb-sata.c
··· 140 140 struct brcm_sata_phy *priv; 141 141 struct resource *res; 142 142 struct phy_provider *provider; 143 - int count = 0; 143 + int ret, count = 0; 144 144 145 145 if (of_get_child_count(dn) == 0) 146 146 return -ENODEV; ··· 163 163 if (of_property_read_u32(child, "reg", &id)) { 164 164 dev_err(dev, "missing reg property in node %s\n", 165 165 child->name); 166 - return -EINVAL; 166 + ret = -EINVAL; 167 + goto put_child; 167 168 } 168 169 169 170 if (id >= MAX_PORTS) { 170 171 dev_err(dev, "invalid reg: %u\n", id); 171 - return -EINVAL; 172 + ret = -EINVAL; 173 + goto put_child; 172 174 } 173 175 if (priv->phys[id].phy) { 174 176 dev_err(dev, "already registered port %u\n", id); 175 - return -EINVAL; 177 + ret = -EINVAL; 178 + goto put_child; 176 179 } 177 180 178 181 port = &priv->phys[id]; ··· 185 182 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); 186 183 if (IS_ERR(port->phy)) { 187 184 dev_err(dev, "failed to create PHY\n"); 188 - return PTR_ERR(port->phy); 185 + ret = PTR_ERR(port->phy); 186 + goto put_child; 189 187 } 190 188 191 189 phy_set_drvdata(port->phy, port); ··· 202 198 dev_info(dev, "registered %d port(s)\n", count); 203 199 204 200 return 0; 201 + put_child: 202 + of_node_put(child); 203 + return ret; 205 204 } 206 205 207 206 static struct platform_driver brcm_sata_phy_driver = {
+15 -6
drivers/phy/phy-core.c
··· 636 636 * @np: node containing the phy 637 637 * @index: index of the phy 638 638 * 639 - * Gets the phy using _of_phy_get(), and associates a device with it using 640 - * devres. On driver detach, release function is invoked on the devres data, 639 + * Gets the phy using _of_phy_get(), then gets a refcount to it, 640 + * and associates a device with it using devres. On driver detach, 641 + * release function is invoked on the devres data, 641 642 * then, devres data is freed. 642 643 * 643 644 */ ··· 652 651 return ERR_PTR(-ENOMEM); 653 652 654 653 phy = _of_phy_get(np, index); 655 - if (!IS_ERR(phy)) { 656 - *ptr = phy; 657 - devres_add(dev, ptr); 658 - } else { 654 + if (IS_ERR(phy)) { 659 655 devres_free(ptr); 656 + return phy; 660 657 } 658 + 659 + if (!try_module_get(phy->ops->owner)) { 660 + devres_free(ptr); 661 + return ERR_PTR(-EPROBE_DEFER); 662 + } 663 + 664 + get_device(&phy->dev); 665 + 666 + *ptr = phy; 667 + devres_add(dev, ptr); 661 668 662 669 return phy; 663 670 }
+11 -5
drivers/phy/phy-miphy28lp.c
··· 1226 1226 1227 1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 1228 1228 GFP_KERNEL); 1229 - if (!miphy_phy) 1230 - return -ENOMEM; 1229 + if (!miphy_phy) { 1230 + ret = -ENOMEM; 1231 + goto put_child; 1232 + } 1231 1233 1232 1234 miphy_dev->phys[port] = miphy_phy; 1233 1235 1234 1236 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); 1235 1237 if (IS_ERR(phy)) { 1236 1238 dev_err(&pdev->dev, "failed to create PHY\n"); 1237 - return PTR_ERR(phy); 1239 + ret = PTR_ERR(phy); 1240 + goto put_child; 1238 1241 } 1239 1242 1240 1243 miphy_dev->phys[port]->phy = phy; ··· 1245 1242 1246 1243 ret = miphy28lp_of_probe(child, miphy_phy); 1247 1244 if (ret) 1248 - return ret; 1245 + goto put_child; 1249 1246 1250 1247 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); 1251 1248 if (ret) 1252 - return ret; 1249 + goto put_child; 1253 1250 1254 1251 phy_set_drvdata(phy, miphy_dev->phys[port]); 1255 1252 port++; ··· 1258 1255 1259 1256 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); 1260 1257 return PTR_ERR_OR_ZERO(provider); 1258 + put_child: 1259 + of_node_put(child); 1260 + return ret; 1261 1261 } 1262 1262 1263 1263 static const struct of_device_id miphy28lp_of_match[] = {
+11 -5
drivers/phy/phy-miphy365x.c
··· 566 566 567 567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 568 568 GFP_KERNEL); 569 - if (!miphy_phy) 570 - return -ENOMEM; 569 + if (!miphy_phy) { 570 + ret = -ENOMEM; 571 + goto put_child; 572 + } 571 573 572 574 miphy_dev->phys[port] = miphy_phy; 573 575 574 576 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); 575 577 if (IS_ERR(phy)) { 576 578 dev_err(&pdev->dev, "failed to create PHY\n"); 577 - return PTR_ERR(phy); 579 + ret = PTR_ERR(phy); 580 + goto put_child; 578 581 } 579 582 580 583 miphy_dev->phys[port]->phy = phy; 581 584 582 585 ret = miphy365x_of_probe(child, miphy_phy); 583 586 if (ret) 584 - return ret; 587 + goto put_child; 585 588 586 589 phy_set_drvdata(phy, miphy_dev->phys[port]); 587 590 ··· 594 591 &miphy_phy->ctrlreg); 595 592 if (ret) { 596 593 dev_err(&pdev->dev, "No sysconfig offset found\n"); 597 - return ret; 594 + goto put_child; 598 595 } 599 596 } 600 597 601 598 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); 602 599 return PTR_ERR_OR_ZERO(provider); 600 + put_child: 601 + of_node_put(child); 602 + return ret; 603 603 } 604 604 605 605 static const struct of_device_id miphy365x_of_match[] = {
+13 -7
drivers/phy/phy-mt65xx-usb3.c
··· 415 415 struct resource *sif_res; 416 416 struct mt65xx_u3phy *u3phy; 417 417 struct resource res; 418 - int port; 418 + int port, retval; 419 419 420 420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); 421 421 if (!u3phy) ··· 447 447 for_each_child_of_node(np, child_np) { 448 448 struct mt65xx_phy_instance *instance; 449 449 struct phy *phy; 450 - int retval; 451 450 452 451 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); 453 - if (!instance) 454 - return -ENOMEM; 452 + if (!instance) { 453 + retval = -ENOMEM; 454 + goto put_child; 455 + } 455 456 456 457 u3phy->phys[port] = instance; 457 458 458 459 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); 459 460 if (IS_ERR(phy)) { 460 461 dev_err(dev, "failed to create phy\n"); 461 - return PTR_ERR(phy); 462 + retval = PTR_ERR(phy); 463 + goto put_child; 462 464 } 463 465 464 466 retval = of_address_to_resource(child_np, 0, &res); 465 467 if (retval) { 466 468 dev_err(dev, "failed to get address resource(id-%d)\n", 467 469 port); 468 - return retval; 470 + goto put_child; 469 471 } 470 472 471 473 instance->port_base = devm_ioremap_resource(&phy->dev, &res); 472 474 if (IS_ERR(instance->port_base)) { 473 475 dev_err(dev, "failed to remap phy regs\n"); 474 - return PTR_ERR(instance->port_base); 476 + retval = PTR_ERR(instance->port_base); 477 + goto put_child; 475 478 } 476 479 477 480 instance->phy = phy; ··· 486 483 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); 487 484 488 485 return PTR_ERR_OR_ZERO(provider); 486 + put_child: 487 + of_node_put(child_np); 488 + return retval; 489 489 } 490 490 491 491 static const struct of_device_id mt65xx_u3phy_id_table[] = {
+12 -5
drivers/phy/phy-rockchip-usb.c
··· 108 108 109 109 for_each_available_child_of_node(dev->of_node, child) { 110 110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); 111 - if (!rk_phy) 112 - return -ENOMEM; 111 + if (!rk_phy) { 112 + err = -ENOMEM; 113 + goto put_child; 114 + } 113 115 114 116 if (of_property_read_u32(child, "reg", &reg_offset)) { 115 117 dev_err(dev, "missing reg property in node %s\n", 116 118 child->name); 117 - return -EINVAL; 119 + err = -EINVAL; 120 + goto put_child; 118 121 } 119 122 120 123 rk_phy->reg_offset = reg_offset; ··· 130 127 rk_phy->phy = devm_phy_create(dev, child, &ops); 131 128 if (IS_ERR(rk_phy->phy)) { 132 129 dev_err(dev, "failed to create PHY\n"); 133 - return PTR_ERR(rk_phy->phy); 130 + err = PTR_ERR(rk_phy->phy); 131 + goto put_child; 134 132 } 135 133 phy_set_drvdata(rk_phy->phy, rk_phy); 136 134 137 135 /* only power up usb phy when it use, so disable it when init*/ 138 136 err = rockchip_usb_phy_power(rk_phy, 1); 139 137 if (err) 140 - return err; 138 + goto put_child; 141 139 } 142 140 143 141 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 144 142 return PTR_ERR_OR_ZERO(phy_provider); 143 + put_child: 144 + of_node_put(child); 145 + return err; 145 146 } 146 147 147 148 static const struct of_device_id rockchip_usb_phy_dt_ids[] = {
+7 -6
drivers/pinctrl/bcm/pinctrl-bcm2835.c
··· 342 342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset); 343 343 } 344 344 345 - static int bcm2835_gpio_direction_output(struct gpio_chip *chip, 346 - unsigned offset, int value) 347 - { 348 - return pinctrl_gpio_direction_output(chip->base + offset); 349 - } 350 - 351 345 static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 352 346 { 353 347 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); 354 348 355 349 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); 350 + } 351 + 352 + static int bcm2835_gpio_direction_output(struct gpio_chip *chip, 353 + unsigned offset, int value) 354 + { 355 + bcm2835_gpio_set(chip, offset, value); 356 + return pinctrl_gpio_direction_output(chip->base + offset); 356 357 } 357 358 358 359 static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+1 -1
drivers/pinctrl/freescale/pinctrl-vf610.c
··· 299 299 static struct imx_pinctrl_soc_info vf610_pinctrl_info = { 300 300 .pins = vf610_pinctrl_pads, 301 301 .npins = ARRAY_SIZE(vf610_pinctrl_pads), 302 - .flags = SHARE_MUX_CONF_REG, 302 + .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID, 303 303 }; 304 304 305 305 static const struct of_device_id vf610_pinctrl_of_match[] = {
+1
drivers/pinctrl/intel/pinctrl-broxton.c
··· 28 28 .padcfglock_offset = BXT_PADCFGLOCK, \ 29 29 .hostown_offset = BXT_HOSTSW_OWN, \ 30 30 .ie_offset = BXT_GPI_IE, \ 31 + .gpp_size = 32, \ 31 32 .pin_base = (s), \ 32 33 .npins = ((e) - (s) + 1), \ 33 34 }
+20 -21
drivers/pinctrl/intel/pinctrl-intel.c
··· 25 25 26 26 #include "pinctrl-intel.h" 27 27 28 - /* Maximum number of pads in each group */ 29 - #define NPADS_IN_GPP 24 30 - 31 28 /* Offset from regs */ 32 29 #define PADBAR 0x00c 33 30 #define GPI_IS 0x100 ··· 34 37 #define PADOWN_BITS 4 35 38 #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) 36 39 #define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) 40 + #define PADOWN_GPP(p) ((p) / 8) 37 41 38 42 /* Offset from pad_regs */ 39 43 #define PADCFG0 0x000 ··· 140 142 static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) 141 143 { 142 144 const struct intel_community *community; 143 - unsigned padno, gpp, gpp_offset, offset; 145 + unsigned padno, gpp, offset, group; 144 146 void __iomem *padown; 145 147 146 148 community = intel_get_community(pctrl, pin); ··· 150 152 return true; 151 153 152 154 padno = pin_to_padno(community, pin); 153 - gpp = padno / NPADS_IN_GPP; 154 - gpp_offset = padno % NPADS_IN_GPP; 155 - offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; 155 + group = padno / community->gpp_size; 156 + gpp = PADOWN_GPP(padno % community->gpp_size); 157 + offset = community->padown_offset + 0x10 * group + gpp * 4; 156 158 padown = community->regs + offset; 157 159 158 160 return !(readl(padown) & PADOWN_MASK(padno)); ··· 171 173 return false; 172 174 173 175 padno = pin_to_padno(community, pin); 174 - gpp = padno / NPADS_IN_GPP; 176 + gpp = padno / community->gpp_size; 175 177 offset = community->hostown_offset + gpp * 4; 176 178 hostown = community->regs + offset; 177 179 178 - return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); 180 + return !(readl(hostown) & BIT(padno % community->gpp_size)); 179 181 } 180 182 181 183 static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) ··· 191 193 return false; 192 194 193 195 padno = pin_to_padno(community, pin); 194 - gpp = padno / NPADS_IN_GPP; 196 + gpp = padno / community->gpp_size; 195 197 196 198 /* 197 199 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, ··· 200 202 */ 201 203 offset = community->padcfglock_offset + gpp * 8; 202 204 value = readl(community->regs + offset); 203 - if (value & BIT(pin % NPADS_IN_GPP)) 205 + if (value & BIT(pin % community->gpp_size)) 204 206 return true; 205 207 206 208 offset = community->padcfglock_offset + 4 + gpp * 8; 207 209 value = readl(community->regs + offset); 208 - if (value & BIT(pin % NPADS_IN_GPP)) 210 + if (value & BIT(pin % community->gpp_size)) 209 211 return true; 210 212 211 213 return false; ··· 661 663 community = intel_get_community(pctrl, pin); 662 664 if (community) { 663 665 unsigned padno = pin_to_padno(community, pin); 664 - unsigned gpp_offset = padno % NPADS_IN_GPP; 665 - unsigned gpp = padno / NPADS_IN_GPP; 666 + unsigned gpp_offset = padno % community->gpp_size; 667 + unsigned gpp = padno / community->gpp_size; 666 668 667 669 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); 668 670 } ··· 683 685 community = intel_get_community(pctrl, pin); 684 686 if (community) { 685 687 unsigned padno = pin_to_padno(community, pin); 686 - unsigned gpp_offset = padno % NPADS_IN_GPP; 687 - unsigned gpp = padno / NPADS_IN_GPP; 688 + unsigned gpp_offset = padno % community->gpp_size; 689 + unsigned gpp = padno / community->gpp_size; 688 690 void __iomem *reg; 689 691 u32 value; 690 692 ··· 778 780 return -EINVAL; 779 781 780 782 padno = pin_to_padno(community, pin); 781 - gpp = padno / NPADS_IN_GPP; 782 - gpp_offset = padno % NPADS_IN_GPP; 783 + gpp = padno / community->gpp_size; 784 + gpp_offset = padno % community->gpp_size; 783 785 784 786 /* Clear the existing wake status */ 785 787 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); ··· 817 819 /* Only interrupts that are enabled */ 818 820 pending &= enabled; 819 821 820 - for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { 822 + for_each_set_bit(gpp_offset, &pending, community->gpp_size) { 821 823 unsigned padno, irq; 822 824 823 825 /* 824 826 * The last group in community can have less pins 825 827 * than NPADS_IN_GPP. 826 828 */ 827 - padno = gpp_offset + gpp * NPADS_IN_GPP; 829 + padno = gpp_offset + gpp * community->gpp_size; 828 830 if (padno >= community->npins) 829 831 break; 830 832 ··· 1000 1002 1001 1003 community->regs = regs; 1002 1004 community->pad_regs = regs + padbar; 1003 - community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); 1005 + community->ngpps = DIV_ROUND_UP(community->npins, 1006 + community->gpp_size); 1004 1007 } 1005 1008 1006 1009 irq = platform_get_irq(pdev, 0);
+3
drivers/pinctrl/intel/pinctrl-intel.h
··· 55 55 * ACPI). 56 56 * @ie_offset: Register offset of GPI_IE from @regs. 57 57 * @pin_base: Starting pin of pins in this community 58 + * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK, 59 + * HOSTSW_OWN, GPI_IS, GPI_IE, etc. 58 60 * @npins: Number of pins in this community 59 61 * @regs: Community specific common registers (reserved for core driver) 60 62 * @pad_regs: Community specific pad registers (reserved for core driver) ··· 70 68 unsigned hostown_offset; 71 69 unsigned ie_offset; 72 70 unsigned pin_base; 71 + unsigned gpp_size; 73 72 size_t npins; 74 73 void __iomem *regs; 75 74 void __iomem *pad_regs;
+1
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
··· 30 30 .padcfglock_offset = SPT_PADCFGLOCK, \ 31 31 .hostown_offset = SPT_HOSTSW_OWN, \ 32 32 .ie_offset = SPT_GPI_IE, \ 33 + .gpp_size = 24, \ 33 34 .pin_base = (s), \ 34 35 .npins = ((e) - (s) + 1), \ 35 36 }
+5 -2
drivers/powercap/intel_rapl.c
··· 1341 1341 1342 1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1343 1343 /* check if the domain is locked by BIOS */ 1344 - if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { 1344 + ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked); 1345 + if (ret) 1346 + return ret; 1347 + if (locked) { 1345 1348 pr_info("RAPL package %d domain %s locked by BIOS\n", 1346 1349 rp->id, rd->name); 1347 - rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1350 + rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1348 1351 } 1349 1352 } 1350 1353
+10 -11
drivers/rtc/rtc-da9063.c
··· 483 483 484 484 platform_set_drvdata(pdev, rtc); 485 485 486 - irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 487 - ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 488 - da9063_alarm_event, 489 - IRQF_TRIGGER_LOW | IRQF_ONESHOT, 490 - "ALARM", rtc); 491 - if (ret) { 492 - dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", 493 - irq_alarm, ret); 494 - return ret; 495 - } 496 - 497 486 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC, 498 487 &da9063_rtc_ops, THIS_MODULE); 499 488 if (IS_ERR(rtc->rtc_dev)) ··· 490 501 491 502 da9063_data_to_tm(data, &rtc->alarm_time, rtc); 492 503 rtc->rtc_sync = false; 504 + 505 + irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 506 + ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 507 + da9063_alarm_event, 508 + IRQF_TRIGGER_LOW | IRQF_ONESHOT, 509 + "ALARM", rtc); 510 + if (ret) 511 + dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", 512 + irq_alarm, ret); 513 + 493 514 return ret; 494 515 } 495 516
+44 -4
drivers/rtc/rtc-rk808.c
··· 56 56 int irq; 57 57 }; 58 58 59 + /* 60 + * The Rockchip calendar used by the RK808 counts November with 31 days. We use 61 + * these translation functions to convert its dates to/from the Gregorian 62 + * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016 63 + * as the day when both calendars were in sync, and treat all other dates 64 + * relative to that. 65 + * NOTE: Other system software (e.g. firmware) that reads the same hardware must 66 + * implement this exact same conversion algorithm, with the same anchor date. 67 + */ 68 + static time64_t nov2dec_transitions(struct rtc_time *tm) 69 + { 70 + return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0); 71 + } 72 + 73 + static void rockchip_to_gregorian(struct rtc_time *tm) 74 + { 75 + /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */ 76 + time64_t time = rtc_tm_to_time64(tm); 77 + rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm); 78 + } 79 + 80 + static void gregorian_to_rockchip(struct rtc_time *tm) 81 + { 82 + time64_t extra_days = nov2dec_transitions(tm); 83 + time64_t time = rtc_tm_to_time64(tm); 84 + rtc_time64_to_tm(time - extra_days * 86400, tm); 85 + 86 + /* Compensate if we went back over Nov 31st (will work up to 2381) */ 87 + if (nov2dec_transitions(tm) < extra_days) { 88 + if (tm->tm_mon + 1 == 11) 89 + tm->tm_mday++; /* This may result in 31! */ 90 + else 91 + rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm); 92 + } 93 + } 94 + 59 95 /* Read current time and date in RTC */ 60 96 static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) 61 97 { ··· 137 101 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; 138 102 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; 139 103 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); 104 + rockchip_to_gregorian(tm); 140 105 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 141 106 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 142 - tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); 107 + tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); 143 108 144 109 return ret; 145 110 } ··· 153 116 u8 rtc_data[NUM_TIME_REGS]; 154 117 int ret; 155 118 119 + dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 120 + 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 121 + tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); 122 + gregorian_to_rockchip(tm); 156 123 rtc_data[0] = bin2bcd(tm->tm_sec); 157 124 rtc_data[1] = bin2bcd(tm->tm_min); 158 125 rtc_data[2] = bin2bcd(tm->tm_hour); ··· 164 123 rtc_data[4] = bin2bcd(tm->tm_mon + 1); 165 124 rtc_data[5] = bin2bcd(tm->tm_year - 100); 166 125 rtc_data[6] = bin2bcd(tm->tm_wday); 167 - dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 168 - 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 169 - tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); 170 126 171 127 /* Stop RTC while updating the RTC registers */ 172 128 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, ··· 208 170 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); 209 171 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; 210 172 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; 173 + rockchip_to_gregorian(&alrm->time); 211 174 212 175 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); 213 176 if (ret) { ··· 266 227 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, 267 228 alrm->time.tm_min, alrm->time.tm_sec); 268 229 230 + gregorian_to_rockchip(&alrm->time); 269 231 alrm_data[0] = bin2bcd(alrm->time.tm_sec); 270 232 alrm_data[1] = bin2bcd(alrm->time.tm_min); 271 233 alrm_data[2] = bin2bcd(alrm->time.tm_hour);
+10 -10
drivers/scsi/scsi_pm.c
··· 219 219 struct scsi_device *sdev = to_scsi_device(dev); 220 220 int err = 0; 221 221 222 - if (pm && pm->runtime_suspend) { 223 - err = blk_pre_runtime_suspend(sdev->request_queue); 224 - if (err) 225 - return err; 222 + err = blk_pre_runtime_suspend(sdev->request_queue); 223 + if (err) 224 + return err; 225 + if (pm && pm->runtime_suspend) 226 226 err = pm->runtime_suspend(dev); 227 - blk_post_runtime_suspend(sdev->request_queue, err); 228 - } 227 + blk_post_runtime_suspend(sdev->request_queue, err); 228 + 229 229 return err; 230 230 } 231 231 ··· 248 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 249 249 int err = 0; 250 250 251 - if (pm && pm->runtime_resume) { 252 - blk_pre_runtime_resume(sdev->request_queue); 251 + blk_pre_runtime_resume(sdev->request_queue); 252 + if (pm && pm->runtime_resume) 253 253 err = pm->runtime_resume(dev); 254 - blk_post_runtime_resume(sdev->request_queue, err); 255 - } 254 + blk_post_runtime_resume(sdev->request_queue, err); 255 + 256 256 return err; 257 257 } 258 258
+28 -2
drivers/scsi/ses.c
··· 84 84 static int ses_recv_diag(struct scsi_device *sdev, int page_code, 85 85 void *buf, int bufflen) 86 86 { 87 + int ret; 87 88 unsigned char cmd[] = { 88 89 RECEIVE_DIAGNOSTIC, 89 90 1, /* Set PCV bit */ ··· 93 92 bufflen & 0xff, 94 93 0 95 94 }; 95 + unsigned char recv_page_code; 96 96 97 - return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 97 + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 98 98 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 99 + if (unlikely(!ret)) 100 + return ret; 101 + 102 + recv_page_code = ((unsigned char *)buf)[0]; 103 + 104 + if (likely(recv_page_code == page_code)) 105 + return ret; 106 + 107 + /* successful diagnostic but wrong page code. This happens to some 108 + * USB devices, just print a message and pretend there was an error */ 109 + 110 + sdev_printk(KERN_ERR, sdev, 111 + "Wrong diagnostic page; asked for %d got %u\n", 112 + page_code, recv_page_code); 113 + 114 + return -EINVAL; 99 115 } 100 116 101 117 static int ses_send_diag(struct scsi_device *sdev, int page_code, ··· 559 541 if (desc_ptr) 560 542 desc_ptr += len; 561 543 562 - if (addl_desc_ptr) 544 + if (addl_desc_ptr && 545 + /* only find additional descriptions for specific devices */ 546 + (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || 547 + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || 548 + type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || 549 + /* these elements are optional */ 550 + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || 551 + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || 552 + type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) 563 553 addl_desc_ptr += addl_desc_ptr[1] + 2; 564 554 565 555 }
+6 -6
drivers/spi/spi-fsl-dspi.c
··· 167 167 { 168 168 unsigned int val; 169 169 170 - regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val); 170 + regmap_read(dspi->regmap, SPI_CTAR(0), &val); 171 171 172 172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; 173 173 } ··· 257 257 258 258 return SPI_PUSHR_TXDATA(d16) | 259 259 SPI_PUSHR_PCS(dspi->cs) | 260 - SPI_PUSHR_CTAS(dspi->cs) | 260 + SPI_PUSHR_CTAS(0) | 261 261 SPI_PUSHR_CONT; 262 262 } 263 263 ··· 290 290 */ 291 291 if (tx_word && (dspi->len == 1)) { 292 292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 293 - regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 293 + regmap_update_bits(dspi->regmap, SPI_CTAR(0), 294 294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 295 295 tx_word = 0; 296 296 } ··· 339 339 340 340 if (tx_word && (dspi->len == 1)) { 341 341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 342 - regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 342 + regmap_update_bits(dspi->regmap, SPI_CTAR(0), 343 343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 344 344 tx_word = 0; 345 345 } ··· 407 407 regmap_update_bits(dspi->regmap, SPI_MCR, 408 408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 409 409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 410 - regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 410 + regmap_write(dspi->regmap, SPI_CTAR(0), 411 411 dspi->cur_chip->ctar_val); 412 412 413 413 trans_mode = dspi->devtype_data->trans_mode; ··· 566 566 if (!dspi->len) { 567 567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { 568 568 regmap_update_bits(dspi->regmap, 569 - SPI_CTAR(dspi->cs), 569 + SPI_CTAR(0), 570 570 SPI_FRAME_BITS_MASK, 571 571 SPI_FRAME_BITS(16)); 572 572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
+1 -1
drivers/spi/spi.c
··· 1705 1705 master->bus_num = -1; 1706 1706 master->num_chipselect = 1; 1707 1707 master->dev.class = &spi_master_class; 1708 - master->dev.parent = get_device(dev); 1708 + master->dev.parent = dev; 1709 1709 spi_master_set_devdata(master, &master[1]); 1710 1710 1711 1711 return master;
+1 -1
drivers/spi/spidev.c
··· 651 651 kfree(spidev->rx_buffer); 652 652 spidev->rx_buffer = NULL; 653 653 654 + spin_lock_irq(&spidev->spi_lock); 654 655 if (spidev->spi) 655 656 spidev->speed_hz = spidev->spi->max_speed_hz; 656 657 657 658 /* ... after we unbound from the underlying device? */ 658 - spin_lock_irq(&spidev->spi_lock); 659 659 dofree = (spidev->spi == NULL); 660 660 spin_unlock_irq(&spidev->spi_lock); 661 661
+2 -2
drivers/staging/android/ion/ion_chunk_heap.c
··· 81 81 err: 82 82 sg = table->sgl; 83 83 for (i -= 1; i >= 0; i--) { 84 - gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 84 + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 85 85 sg->length); 86 86 sg = sg_next(sg); 87 87 } ··· 109 109 DMA_BIDIRECTIONAL); 110 110 111 111 for_each_sg(table->sgl, sg, table->nents, i) { 112 - gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 112 + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 113 113 sg->length); 114 114 } 115 115 chunk_heap->allocated -= allocated_size;
+9 -13
drivers/tty/n_tty.c
··· 2054 2054 size_t eol; 2055 2055 size_t tail; 2056 2056 int ret, found = 0; 2057 - bool eof_push = 0; 2058 2057 2059 2058 /* N.B. avoid overrun if nr == 0 */ 2060 - n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); 2061 - if (!n) 2059 + if (!*nr) 2062 2060 return 0; 2061 + 2062 + n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); 2063 2063 2064 2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); 2065 2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); ··· 2081 2081 n = eol - tail; 2082 2082 if (n > N_TTY_BUF_SIZE) 2083 2083 n += N_TTY_BUF_SIZE; 2084 - n += found; 2085 - c = n; 2084 + c = n + found; 2086 2085 2087 - if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { 2088 - n--; 2089 - eof_push = !n && ldata->read_tail != ldata->line_start; 2086 + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { 2087 + c = min(*nr, c); 2088 + n = c; 2090 2089 } 2091 2090 2092 2091 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", ··· 2115 2116 ldata->push = 0; 2116 2117 tty_audit_push(tty); 2117 2118 } 2118 - return eof_push ? -EAGAIN : 0; 2119 + return 0; 2119 2120 } 2120 2121 2121 2122 extern ssize_t redirected_tty_write(struct file *, const char __user *, ··· 2272 2273 2273 2274 if (ldata->icanon && !L_EXTPROC(tty)) { 2274 2275 retval = canon_copy_from_read_buf(tty, &b, &nr); 2275 - if (retval == -EAGAIN) { 2276 - retval = 0; 2277 - continue; 2278 - } else if (retval) 2276 + if (retval) 2279 2277 break; 2280 2278 } else { 2281 2279 int uncopied;
+6 -2
drivers/tty/serial/8250/8250_uniphier.c
··· 115 115 */ 116 116 static int uniphier_serial_dl_read(struct uart_8250_port *up) 117 117 { 118 - return readl(up->port.membase + UNIPHIER_UART_DLR); 118 + int offset = UNIPHIER_UART_DLR << up->port.regshift; 119 + 120 + return readl(up->port.membase + offset); 119 121 } 120 122 121 123 static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) 122 124 { 123 - writel(value, up->port.membase + UNIPHIER_UART_DLR); 125 + int offset = UNIPHIER_UART_DLR << up->port.regshift; 126 + 127 + writel(value, up->port.membase + offset); 124 128 } 125 129 126 130 static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
+2
drivers/tty/serial/earlycon.c
··· 115 115 if (buf && !parse_options(&early_console_dev, buf)) 116 116 buf = NULL; 117 117 118 + spin_lock_init(&port->lock); 118 119 port->uartclk = BASE_BAUD * 16; 119 120 if (port->mapbase) 120 121 port->membase = earlycon_map(port->mapbase, 64); ··· 203 202 int err; 204 203 struct uart_port *port = &early_console_dev.port; 205 204 205 + spin_lock_init(&port->lock); 206 206 port->iotype = UPIO_MEM; 207 207 port->mapbase = addr; 208 208 port->uartclk = BASE_BAUD * 16;
+1 -1
drivers/tty/serial/sh-sci.c
··· 1437 1437 sg_init_table(sg, 1); 1438 1438 s->rx_buf[i] = buf; 1439 1439 sg_dma_address(sg) = dma; 1440 - sg->length = s->buf_len_rx; 1440 + sg_dma_len(sg) = s->buf_len_rx; 1441 1441 1442 1442 buf += s->buf_len_rx; 1443 1443 dma += s->buf_len_rx;
+1 -1
drivers/tty/tty_buffer.c
··· 450 450 count = disc->ops->receive_buf2(tty, p, f, count); 451 451 else { 452 452 count = min_t(int, count, tty->receive_room); 453 - if (count) 453 + if (count && disc->ops->receive_buf) 454 454 disc->ops->receive_buf(tty, p, f, count); 455 455 } 456 456 return count;
+19 -3
drivers/usb/core/hub.c
··· 1035 1035 unsigned delay; 1036 1036 1037 1037 /* Continue a partial initialization */ 1038 - if (type == HUB_INIT2) 1039 - goto init2; 1040 - if (type == HUB_INIT3) 1038 + if (type == HUB_INIT2 || type == HUB_INIT3) { 1039 + device_lock(hub->intfdev); 1040 + 1041 + /* Was the hub disconnected while we were waiting? */ 1042 + if (hub->disconnected) { 1043 + device_unlock(hub->intfdev); 1044 + kref_put(&hub->kref, hub_release); 1045 + return; 1046 + } 1047 + if (type == HUB_INIT2) 1048 + goto init2; 1041 1049 goto init3; 1050 + } 1051 + kref_get(&hub->kref); 1042 1052 1043 1053 /* The superspeed hub except for root hub has to use Hub Depth 1044 1054 * value as an offset into the route string to locate the bits ··· 1246 1236 queue_delayed_work(system_power_efficient_wq, 1247 1237 &hub->init_work, 1248 1238 msecs_to_jiffies(delay)); 1239 + device_unlock(hub->intfdev); 1249 1240 return; /* Continues at init3: below */ 1250 1241 } else { 1251 1242 msleep(delay); ··· 1268 1257 /* Allow autosuspend if it was suppressed */ 1269 1258 if (type <= HUB_INIT3) 1270 1259 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); 1260 + 1261 + if (type == HUB_INIT2 || type == HUB_INIT3) 1262 + device_unlock(hub->intfdev); 1263 + 1264 + kref_put(&hub->kref, hub_release); 1271 1265 } 1272 1266 1273 1267 /* Implement the continuations for the delays above */
+2 -1
drivers/usb/serial/ipaq.c
··· 531 531 * through. Since this has a reasonably high failure rate, we retry 532 532 * several times. 533 533 */ 534 - while (retries--) { 534 + while (retries) { 535 + retries--; 535 536 result = usb_control_msg(serial->dev, 536 537 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 537 538 0x1, 0, NULL, 0, 100);
+12 -1
drivers/video/fbdev/fsl-diu-fb.c
··· 479 479 port = FSL_DIU_PORT_DLVDS; 480 480 } 481 481 482 - return diu_ops.valid_monitor_port(port); 482 + if (diu_ops.valid_monitor_port) 483 + port = diu_ops.valid_monitor_port(port); 484 + 485 + return port; 483 486 } 484 487 485 488 /* ··· 1918 1915 #else 1919 1916 monitor_port = fsl_diu_name_to_port(monitor_string); 1920 1917 #endif 1918 + 1919 + /* 1920 + * Must to verify set_pixel_clock. If not implement on platform, 1921 + * then that means that there is no platform support for the DIU. 1922 + */ 1923 + if (!diu_ops.set_pixel_clock) 1924 + return -ENODEV; 1925 + 1921 1926 pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); 1922 1927 1923 1928 #ifdef CONFIG_NOT_COHERENT_CACHE
+12
drivers/video/fbdev/omap2/dss/venc.c
··· 275 275 .vbp = 41, 276 276 277 277 .interlace = true, 278 + 279 + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, 280 + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, 281 + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, 282 + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, 283 + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, 278 284 }; 279 285 EXPORT_SYMBOL(omap_dss_pal_timings); 280 286 ··· 296 290 .vbp = 31, 297 291 298 292 .interlace = true, 293 + 294 + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, 295 + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, 296 + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, 297 + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, 298 + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, 299 299 }; 300 300 EXPORT_SYMBOL(omap_dss_ntsc_timings); 301 301
+18 -5
drivers/xen/events/events_fifo.c
··· 281 281 282 282 static void consume_one_event(unsigned cpu, 283 283 struct evtchn_fifo_control_block *control_block, 284 - unsigned priority, unsigned long *ready) 284 + unsigned priority, unsigned long *ready, 285 + bool drop) 285 286 { 286 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 287 288 uint32_t head; ··· 314 313 if (head == 0) 315 314 clear_bit(priority, ready); 316 315 317 - if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 318 - handle_irq_for_port(port); 316 + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { 317 + if (unlikely(drop)) 318 + pr_warn("Dropping pending event for port %u\n", port); 319 + else 320 + handle_irq_for_port(port); 321 + } 319 322 320 323 q->head[priority] = head; 321 324 } 322 325 323 - static void evtchn_fifo_handle_events(unsigned cpu) 326 + static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) 324 327 { 325 328 struct evtchn_fifo_control_block *control_block; 326 329 unsigned long ready; ··· 336 331 337 332 while (ready) { 338 333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 339 - consume_one_event(cpu, control_block, q, &ready); 334 + consume_one_event(cpu, control_block, q, &ready, drop); 340 335 ready |= xchg(&control_block->ready, 0); 341 336 } 337 + } 338 + 339 + static void evtchn_fifo_handle_events(unsigned cpu) 340 + { 341 + __evtchn_fifo_handle_events(cpu, false); 342 342 } 343 343 344 344 static void evtchn_fifo_resume(void) ··· 429 419 case CPU_UP_PREPARE: 430 420 if (!per_cpu(cpu_control_block, cpu)) 431 421 ret = evtchn_fifo_alloc_control_block(cpu); 422 + break; 423 + case CPU_DEAD: 424 + __evtchn_fifo_handle_events(cpu, true); 432 425 break; 433 426 default: 434 427 break;
+1
drivers/xen/xen-pciback/pciback.h
··· 37 37 struct xen_pci_sharedinfo *sh_info; 38 38 unsigned long flags; 39 39 struct work_struct op_work; 40 + struct xen_pci_op op; 40 41 }; 41 42 42 43 struct xen_pcibk_dev_data {
+60 -15
drivers/xen/xen-pciback/pciback_ops.c
··· 70 70 enable ? "enable" : "disable"); 71 71 72 72 if (enable) { 73 + /* 74 + * The MSI or MSI-X should not have an IRQ handler. Otherwise 75 + * if the guest terminates we BUG_ON in free_msi_irqs. 76 + */ 77 + if (dev->msi_enabled || dev->msix_enabled) 78 + goto out; 79 + 73 80 rc = request_irq(dev_data->irq, 74 81 xen_pcibk_guest_interrupt, IRQF_SHARED, 75 82 dev_data->irq_name, dev); ··· 151 144 if (unlikely(verbose_request)) 152 145 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); 153 146 154 - status = pci_enable_msi(dev); 147 + if (dev->msi_enabled) 148 + status = -EALREADY; 149 + else if (dev->msix_enabled) 150 + status = -ENXIO; 151 + else 152 + status = pci_enable_msi(dev); 155 153 156 154 if (status) { 157 155 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", ··· 185 173 int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, 186 174 struct pci_dev *dev, struct xen_pci_op *op) 187 175 { 188 - struct xen_pcibk_dev_data *dev_data; 189 - 190 176 if (unlikely(verbose_request)) 191 177 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", 192 178 pci_name(dev)); 193 - pci_disable_msi(dev); 194 179 180 + if (dev->msi_enabled) { 181 + struct xen_pcibk_dev_data *dev_data; 182 + 183 + pci_disable_msi(dev); 184 + 185 + dev_data = pci_get_drvdata(dev); 186 + if (dev_data) 187 + dev_data->ack_intr = 1; 188 + } 195 189 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 196 190 if (unlikely(verbose_request)) 197 191 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), 198 192 op->value); 199 - dev_data = pci_get_drvdata(dev); 200 - if (dev_data) 201 - dev_data->ack_intr = 1; 202 193 return 0; 203 194 } 204 195 ··· 212 197 struct xen_pcibk_dev_data *dev_data; 213 198 int i, result; 214 199 struct msix_entry *entries; 200 + u16 cmd; 215 201 216 202 if (unlikely(verbose_request)) 217 203 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", 218 204 pci_name(dev)); 205 + 219 206 if (op->value > SH_INFO_MAX_VEC) 220 207 return -EINVAL; 208 + 209 + if (dev->msix_enabled) 210 + return -EALREADY; 211 + 212 + /* 213 + * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able 214 + * to access the BARs where the MSI-X entries reside. 215 + */ 216 + pci_read_config_word(dev, PCI_COMMAND, &cmd); 217 + if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) 218 + return -ENXIO; 221 219 222 220 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); 223 221 if (entries == NULL) ··· 273 245 int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, 274 246 struct pci_dev *dev, struct xen_pci_op *op) 275 247 { 276 - struct xen_pcibk_dev_data *dev_data; 277 248 if (unlikely(verbose_request)) 278 249 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", 279 250 pci_name(dev)); 280 - pci_disable_msix(dev); 281 251 252 + if (dev->msix_enabled) { 253 + struct xen_pcibk_dev_data *dev_data; 254 + 255 + pci_disable_msix(dev); 256 + 257 + dev_data = pci_get_drvdata(dev); 258 + if (dev_data) 259 + dev_data->ack_intr = 1; 260 + } 282 261 /* 283 262 * SR-IOV devices (which don't have any legacy IRQ) have 284 263 * an undefined IRQ value of zero. 285 264 */ 286 265 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 287 266 if (unlikely(verbose_request)) 288 - printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), 289 - op->value); 290 - dev_data = pci_get_drvdata(dev); 291 - if (dev_data) 292 - dev_data->ack_intr = 1; 267 + printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", 268 + pci_name(dev), op->value); 293 269 return 0; 294 270 } 295 271 #endif ··· 330 298 container_of(data, struct xen_pcibk_device, op_work); 331 299 struct pci_dev *dev; 332 300 struct xen_pcibk_dev_data *dev_data = NULL; 333 - struct xen_pci_op *op = &pdev->sh_info->op; 301 + struct xen_pci_op *op = &pdev->op; 334 302 int test_intx = 0; 335 303 304 + *op = pdev->sh_info->op; 305 + barrier(); 336 306 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); 337 307 338 308 if (dev == NULL) ··· 376 342 if ((dev_data->enable_intx != test_intx)) 377 343 xen_pcibk_control_isr(dev, 0 /* no reset */); 378 344 } 345 + pdev->sh_info->op.err = op->err; 346 + pdev->sh_info->op.value = op->value; 347 + #ifdef CONFIG_PCI_MSI 348 + if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { 349 + unsigned int i; 350 + 351 + for (i = 0; i < op->value; i++) 352 + pdev->sh_info->op.msix_entries[i].vector = 353 + op->msix_entries[i].vector; 354 + } 355 + #endif 379 356 /* Tell the driver domain that we're done. */ 380 357 wmb(); 381 358 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+3 -1
drivers/xen/xen-pciback/xenbus.c
··· 44 44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); 45 45 46 46 pdev->xdev = xdev; 47 - dev_set_drvdata(&xdev->dev, pdev); 48 47 49 48 mutex_init(&pdev->dev_lock); 50 49 ··· 57 58 kfree(pdev); 58 59 pdev = NULL; 59 60 } 61 + 62 + dev_set_drvdata(&xdev->dev, pdev); 63 + 60 64 out: 61 65 return pdev; 62 66 }
+1 -1
drivers/xen/xen-scsiback.c
··· 726 726 if (!pending_req) 727 727 return 1; 728 728 729 - ring_req = *RING_GET_REQUEST(ring, rc); 729 + RING_COPY_REQUEST(ring, rc, &ring_req); 730 730 ring->req_cons = ++rc; 731 731 732 732 err = prepare_pending_reqs(info, &ring_req, pending_req);
+7 -3
fs/btrfs/extent-tree.c
··· 10480 10480 * until transaction commit to do the actual discard. 10481 10481 */ 10482 10482 if (trimming) { 10483 - WARN_ON(!list_empty(&block_group->bg_list)); 10484 - spin_lock(&trans->transaction->deleted_bgs_lock); 10483 + spin_lock(&fs_info->unused_bgs_lock); 10484 + /* 10485 + * A concurrent scrub might have added us to the list 10486 + * fs_info->unused_bgs, so use a list_move operation 10487 + * to add the block group to the deleted_bgs list. 10488 + */ 10485 10489 list_move(&block_group->bg_list, 10486 10490 &trans->transaction->deleted_bgs); 10487 - spin_unlock(&trans->transaction->deleted_bgs_lock); 10491 + spin_unlock(&fs_info->unused_bgs_lock); 10488 10492 btrfs_get_block_group(block_group); 10489 10493 } 10490 10494 end_trans:
+14 -4
fs/btrfs/file.c
··· 1291 1291 * on error we return an unlocked page and the error value 1292 1292 * on success we return a locked page and 0 1293 1293 */ 1294 - static int prepare_uptodate_page(struct page *page, u64 pos, 1294 + static int prepare_uptodate_page(struct inode *inode, 1295 + struct page *page, u64 pos, 1295 1296 bool force_uptodate) 1296 1297 { 1297 1298 int ret = 0; ··· 1306 1305 if (!PageUptodate(page)) { 1307 1306 unlock_page(page); 1308 1307 return -EIO; 1308 + } 1309 + if (page->mapping != inode->i_mapping) { 1310 + unlock_page(page); 1311 + return -EAGAIN; 1309 1312 } 1310 1313 } 1311 1314 return 0; ··· 1329 1324 int faili; 1330 1325 1331 1326 for (i = 0; i < num_pages; i++) { 1327 + again: 1332 1328 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1333 1329 mask | __GFP_WRITE); 1334 1330 if (!pages[i]) { ··· 1339 1333 } 1340 1334 1341 1335 if (i == 0) 1342 - err = prepare_uptodate_page(pages[i], pos, 1336 + err = prepare_uptodate_page(inode, pages[i], pos, 1343 1337 force_uptodate); 1344 - if (i == num_pages - 1) 1345 - err = prepare_uptodate_page(pages[i], 1338 + if (!err && i == num_pages - 1) 1339 + err = prepare_uptodate_page(inode, pages[i], 1346 1340 pos + write_bytes, false); 1347 1341 if (err) { 1348 1342 page_cache_release(pages[i]); 1343 + if (err == -EAGAIN) { 1344 + err = 0; 1345 + goto again; 1346 + } 1349 1347 faili = i - 1; 1350 1348 goto fail; 1351 1349 }
+6 -4
fs/btrfs/free-space-cache.c
··· 891 891 spin_unlock(&block_group->lock); 892 892 ret = 0; 893 893 894 - btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", 894 + btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now", 895 895 block_group->key.objectid); 896 896 } 897 897 ··· 2972 2972 u64 cont1_bytes, u64 min_bytes) 2973 2973 { 2974 2974 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2975 - struct btrfs_free_space *entry; 2975 + struct btrfs_free_space *entry = NULL; 2976 2976 int ret = -ENOSPC; 2977 2977 u64 bitmap_offset = offset_to_bitmap(ctl, offset); 2978 2978 ··· 2983 2983 * The bitmap that covers offset won't be in the list unless offset 2984 2984 * is just its start offset. 2985 2985 */ 2986 - entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 2987 - if (entry->offset != bitmap_offset) { 2986 + if (!list_empty(bitmaps)) 2987 + entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 2988 + 2989 + if (!entry || entry->offset != bitmap_offset) { 2988 2990 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); 2989 2991 if (entry && list_empty(&entry->list)) 2990 2992 list_add(&entry->list, bitmaps);
-1
fs/btrfs/transaction.c
··· 274 274 cur_trans->num_dirty_bgs = 0; 275 275 spin_lock_init(&cur_trans->dirty_bgs_lock); 276 276 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 277 - spin_lock_init(&cur_trans->deleted_bgs_lock); 278 277 spin_lock_init(&cur_trans->dropped_roots_lock); 279 278 list_add_tail(&cur_trans->list, &fs_info->trans_list); 280 279 extent_io_tree_init(&cur_trans->dirty_pages,
+1 -1
fs/btrfs/transaction.h
··· 77 77 */ 78 78 struct mutex cache_write_mutex; 79 79 spinlock_t dirty_bgs_lock; 80 + /* Protected by spin lock fs_info->unused_bgs_lock. */ 80 81 struct list_head deleted_bgs; 81 - spinlock_t deleted_bgs_lock; 82 82 spinlock_t dropped_roots_lock; 83 83 struct btrfs_delayed_ref_root delayed_refs; 84 84 int aborted;
+1 -2
fs/btrfs/volumes.c
··· 3548 3548 3549 3549 ret = btrfs_force_chunk_alloc(trans, chunk_root, 3550 3550 BTRFS_BLOCK_GROUP_DATA); 3551 + btrfs_end_transaction(trans, chunk_root); 3551 3552 if (ret < 0) { 3552 3553 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3553 3554 goto error; 3554 3555 } 3555 - 3556 - btrfs_end_transaction(trans, chunk_root); 3557 3556 chunk_reserved = 1; 3558 3557 } 3559 3558
+1
fs/proc/base.c
··· 2494 2494 mm = get_task_mm(task); 2495 2495 if (!mm) 2496 2496 goto out_no_mm; 2497 + ret = 0; 2497 2498 2498 2499 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { 2499 2500 if (val & mask)
+4
include/linux/enclosure.h
··· 29 29 /* A few generic types ... taken from ses-2 */ 30 30 enum enclosure_component_type { 31 31 ENCLOSURE_COMPONENT_DEVICE = 0x01, 32 + ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, 33 + ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, 34 + ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, 32 35 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, 36 + ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, 33 37 }; 34 38 35 39 /* ses-2 common element status */
+1
include/linux/mmdebug.h
··· 1 1 #ifndef LINUX_MM_DEBUG_H 2 2 #define LINUX_MM_DEBUG_H 1 3 3 4 + #include <linux/bug.h> 4 5 #include <linux/stringify.h> 5 6 6 7 struct page;
+1 -1
include/linux/netdevice.h
··· 2084 2084 }) 2085 2085 2086 2086 #define netdev_alloc_pcpu_stats(type) \ 2087 - __netdev_alloc_pcpu_stats(type, GFP_KERNEL); 2087 + __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2088 2088 2089 2089 #include <linux/notifier.h> 2090 2090
+1 -1
include/linux/netfilter/nfnetlink.h
··· 14 14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb, 15 15 const struct nlmsghdr *nlh, 16 16 const struct nlattr * const cda[]); 17 - int (*call_batch)(struct sock *nl, struct sk_buff *skb, 17 + int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, 18 18 const struct nlmsghdr *nlh, 19 19 const struct nlattr * const cda[]); 20 20 const struct nla_policy *policy; /* netlink attribute policy */
+1 -1
include/linux/platform_data/edma.h
··· 72 72 struct edma_rsv_info *rsv; 73 73 74 74 /* List of channels allocated for memcpy, terminated with -1 */ 75 - s16 *memcpy_channels; 75 + s32 *memcpy_channels; 76 76 77 77 s8 (*queue_priority_mapping)[2]; 78 78 const s16 (*xbar_chans)[2];
+2
include/linux/qed/common_hsi.h
··· 9 9 #ifndef __COMMON_HSI__ 10 10 #define __COMMON_HSI__ 11 11 12 + #define CORE_SPQE_PAGE_SIZE_BYTES 4096 13 + 12 14 #define FW_MAJOR_VERSION 8 13 15 #define FW_MINOR_VERSION 4 14 16 #define FW_REVISION_VERSION 2
+2 -1
include/linux/qed/qed_chain.h
··· 111 111 used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - 112 112 (u32)p_chain->cons_idx; 113 113 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) 114 - used -= (used / p_chain->elem_per_page); 114 + used -= p_chain->prod_idx / p_chain->elem_per_page - 115 + p_chain->cons_idx / p_chain->elem_per_page; 115 116 116 117 return p_chain->capacity - used; 117 118 }
+11 -7
include/linux/rhashtable.h
··· 19 19 20 20 #include <linux/atomic.h> 21 21 #include <linux/compiler.h> 22 + #include <linux/err.h> 22 23 #include <linux/errno.h> 23 24 #include <linux/jhash.h> 24 25 #include <linux/list_nulls.h> ··· 340 339 int rhashtable_init(struct rhashtable *ht, 341 340 const struct rhashtable_params *params); 342 341 343 - int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 344 - struct rhash_head *obj, 345 - struct bucket_table *old_tbl); 346 - int rhashtable_insert_rehash(struct rhashtable *ht); 342 + struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 343 + const void *key, 344 + struct rhash_head *obj, 345 + struct bucket_table *old_tbl); 346 + int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); 347 347 348 348 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); 349 349 void rhashtable_walk_exit(struct rhashtable_iter *iter); ··· 600 598 601 599 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); 602 600 if (unlikely(new_tbl)) { 603 - err = rhashtable_insert_slow(ht, key, obj, new_tbl); 604 - if (err == -EAGAIN) 601 + tbl = rhashtable_insert_slow(ht, key, obj, new_tbl); 602 + if (!IS_ERR_OR_NULL(tbl)) 605 603 goto slow_path; 604 + 605 + err = PTR_ERR(tbl); 606 606 goto out; 607 607 } 608 608 ··· 615 611 if (unlikely(rht_grow_above_100(ht, tbl))) { 616 612 slow_path: 617 613 spin_unlock_bh(lock); 618 - err = rhashtable_insert_rehash(ht); 614 + err = rhashtable_insert_rehash(ht, tbl); 619 615 rcu_read_unlock(); 620 616 if (err) 621 617 return err;
+33
include/net/dst.h
··· 322 322 } 323 323 } 324 324 325 + /** 326 + * dst_hold_safe - Take a reference on a dst if possible 327 + * @dst: pointer to dst entry 328 + * 329 + * This helper returns false if it could not safely 330 + * take a reference on a dst. 331 + */ 332 + static inline bool dst_hold_safe(struct dst_entry *dst) 333 + { 334 + if (dst->flags & DST_NOCACHE) 335 + return atomic_inc_not_zero(&dst->__refcnt); 336 + dst_hold(dst); 337 + return true; 338 + } 339 + 340 + /** 341 + * skb_dst_force_safe - makes sure skb dst is refcounted 342 + * @skb: buffer 343 + * 344 + * If dst is not yet refcounted and not destroyed, grab a ref on it. 345 + */ 346 + static inline void skb_dst_force_safe(struct sk_buff *skb) 347 + { 348 + if (skb_dst_is_noref(skb)) { 349 + struct dst_entry *dst = skb_dst(skb); 350 + 351 + if (!dst_hold_safe(dst)) 352 + dst = NULL; 353 + 354 + skb->_skb_refdst = (unsigned long)dst; 355 + } 356 + } 357 + 325 358 326 359 /** 327 360 * __skb_tunnel_rx - prepare skb for rx reinsert
+23 -4
include/net/inet_sock.h
··· 210 210 #define IP_CMSG_ORIGDSTADDR BIT(6) 211 211 #define IP_CMSG_CHECKSUM BIT(7) 212 212 213 - /* SYNACK messages might be attached to request sockets. 213 + /** 214 + * sk_to_full_sk - Access to a full socket 215 + * @sk: pointer to a socket 216 + * 217 + * SYNACK messages might be attached to request sockets. 214 218 * Some places want to reach the listener in this case. 215 219 */ 216 - static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) 220 + static inline struct sock *sk_to_full_sk(struct sock *sk) 217 221 { 218 - struct sock *sk = skb->sk; 219 - 222 + #ifdef CONFIG_INET 220 223 if (sk && sk->sk_state == TCP_NEW_SYN_RECV) 221 224 sk = inet_reqsk(sk)->rsk_listener; 225 + #endif 222 226 return sk; 227 + } 228 + 229 + /* sk_to_full_sk() variant with a const argument */ 230 + static inline const struct sock *sk_const_to_full_sk(const struct sock *sk) 231 + { 232 + #ifdef CONFIG_INET 233 + if (sk && sk->sk_state == TCP_NEW_SYN_RECV) 234 + sk = ((const struct request_sock *)sk)->rsk_listener; 235 + #endif 236 + return sk; 237 + } 238 + 239 + static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) 240 + { 241 + return sk_to_full_sk(skb->sk); 223 242 } 224 243 225 244 static inline struct inet_sock *inet_sk(const struct sock *sk)
+1
include/net/inetpeer.h
··· 78 78 static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) 79 79 { 80 80 iaddr->a4.addr = ip; 81 + iaddr->a4.vif = 0; 81 82 iaddr->family = AF_INET; 82 83 } 83 84
+2 -1
include/net/sctp/structs.h
··· 1493 1493 * : SACK's are not delayed (see Section 6). 1494 1494 */ 1495 1495 __u8 sack_needed:1, /* Do we need to sack the peer? */ 1496 - sack_generation:1; 1496 + sack_generation:1, 1497 + zero_window_announced:1; 1497 1498 __u32 sack_cnt; 1498 1499 1499 1500 __u32 adaptation_ind; /* Adaptation Code point. */
+5 -2
include/net/sock.h
··· 388 388 struct socket_wq *sk_wq_raw; 389 389 }; 390 390 #ifdef CONFIG_XFRM 391 - struct xfrm_policy *sk_policy[2]; 391 + struct xfrm_policy __rcu *sk_policy[2]; 392 392 #endif 393 393 struct dst_entry *sk_rx_dst; 394 394 struct dst_entry __rcu *sk_dst_cache; ··· 404 404 sk_userlocks : 4, 405 405 sk_protocol : 8, 406 406 sk_type : 16; 407 + #define SK_PROTOCOL_MAX U8_MAX 407 408 kmemcheck_bitfield_end(flags); 408 409 int sk_wmem_queued; 409 410 gfp_t sk_allocation; ··· 741 740 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ 742 741 }; 743 742 743 + #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 744 + 744 745 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 745 746 { 746 747 nsk->sk_flags = osk->sk_flags; ··· 817 814 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 818 815 { 819 816 /* dont let skb dst not refcounted, we are going to leave rcu lock */ 820 - skb_dst_force(skb); 817 + skb_dst_force_safe(skb); 821 818 822 819 if (!sk->sk_backlog.tail) 823 820 sk->sk_backlog.head = skb;
+1 -1
include/net/vxlan.h
··· 79 79 }; 80 80 81 81 /* VXLAN header flags. */ 82 - #define VXLAN_HF_RCO BIT(24) 82 + #define VXLAN_HF_RCO BIT(21) 83 83 #define VXLAN_HF_VNI BIT(27) 84 84 #define VXLAN_HF_GBP BIT(31) 85 85
+16 -9
include/net/xfrm.h
··· 548 548 u16 family; 549 549 struct xfrm_sec_ctx *security; 550 550 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 551 + struct rcu_head rcu; 551 552 }; 552 553 553 554 static inline struct net *xp_net(const struct xfrm_policy *xp) ··· 1142 1141 return xfrm_route_forward(skb, AF_INET6); 1143 1142 } 1144 1143 1145 - int __xfrm_sk_clone_policy(struct sock *sk); 1144 + int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); 1146 1145 1147 - static inline int xfrm_sk_clone_policy(struct sock *sk) 1146 + static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 1148 1147 { 1149 - if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) 1150 - return __xfrm_sk_clone_policy(sk); 1148 + sk->sk_policy[0] = NULL; 1149 + sk->sk_policy[1] = NULL; 1150 + if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) 1151 + return __xfrm_sk_clone_policy(sk, osk); 1151 1152 return 0; 1152 1153 } 1153 1154 ··· 1157 1154 1158 1155 static inline void xfrm_sk_free_policy(struct sock *sk) 1159 1156 { 1160 - if (unlikely(sk->sk_policy[0] != NULL)) { 1161 - xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX); 1157 + struct xfrm_policy *pol; 1158 + 1159 + pol = rcu_dereference_protected(sk->sk_policy[0], 1); 1160 + if (unlikely(pol != NULL)) { 1161 + xfrm_policy_delete(pol, XFRM_POLICY_MAX); 1162 1162 sk->sk_policy[0] = NULL; 1163 1163 } 1164 - if (unlikely(sk->sk_policy[1] != NULL)) { 1165 - xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1); 1164 + pol = rcu_dereference_protected(sk->sk_policy[1], 1); 1165 + if (unlikely(pol != NULL)) { 1166 + xfrm_policy_delete(pol, XFRM_POLICY_MAX+1); 1166 1167 sk->sk_policy[1] = NULL; 1167 1168 } 1168 1169 } ··· 1176 1169 #else 1177 1170 1178 1171 static inline void xfrm_sk_free_policy(struct sock *sk) {} 1179 - static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; } 1172 + static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; } 1180 1173 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } 1181 1174 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } 1182 1175 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
+1
include/uapi/linux/Kbuild
··· 186 186 header-y += if_vlan.h 187 187 header-y += if_x25.h 188 188 header-y += igmp.h 189 + header-y += ila.h 189 190 header-y += in6.h 190 191 header-y += inet_diag.h 191 192 header-y += in.h
+1 -1
include/uapi/linux/openvswitch.h
··· 628 628 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the 629 629 * mask, the corresponding bit in the value is copied to the connection 630 630 * tracking mark field in the connection. 631 - * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN 631 + * @OVS_CT_ATTR_LABELS: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN 632 632 * mask. For each bit set in the mask, the corresponding bit in the value is 633 633 * copied to the connection tracking label field in the connection. 634 634 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
+14
include/xen/interface/io/ring.h
··· 181 181 #define RING_GET_REQUEST(_r, _idx) \ 182 182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 183 183 184 + /* 185 + * Get a local copy of a request. 186 + * 187 + * Use this in preference to RING_GET_REQUEST() so all processing is 188 + * done on a local copy that cannot be modified by the other end. 189 + * 190 + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this 191 + * to be ineffective where _req is a struct which consists of only bitfields. 192 + */ 193 + #define RING_COPY_REQUEST(_r, _idx, _req) do { \ 194 + /* Use volatile to force the copy into _req. */ \ 195 + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ 196 + } while (0) 197 + 184 198 #define RING_GET_RESPONSE(_r, _idx) \ 185 199 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 186 200
+5 -3
kernel/locking/osq_lock.c
··· 93 93 node->cpu = curr; 94 94 95 95 /* 96 - * ACQUIRE semantics, pairs with corresponding RELEASE 97 - * in unlock() uncontended, or fastpath. 96 + * We need both ACQUIRE (pairs with corresponding RELEASE in 97 + * unlock() uncontended, or fastpath) and RELEASE (to publish 98 + * the node fields we just initialised) semantics when updating 99 + * the lock tail. 98 100 */ 99 - old = atomic_xchg_acquire(&lock->tail, curr); 101 + old = atomic_xchg(&lock->tail, curr); 100 102 if (old == OSQ_UNLOCKED_VAL) 101 103 return true; 102 104
+2 -2
lib/dma-debug.c
··· 1464 1464 entry->type = dma_debug_coherent; 1465 1465 entry->dev = dev; 1466 1466 entry->pfn = page_to_pfn(virt_to_page(virt)); 1467 - entry->offset = (size_t) virt & PAGE_MASK; 1467 + entry->offset = (size_t) virt & ~PAGE_MASK; 1468 1468 entry->size = size; 1469 1469 entry->dev_addr = dma_addr; 1470 1470 entry->direction = DMA_BIDIRECTIONAL; ··· 1480 1480 .type = dma_debug_coherent, 1481 1481 .dev = dev, 1482 1482 .pfn = page_to_pfn(virt_to_page(virt)), 1483 - .offset = (size_t) virt & PAGE_MASK, 1483 + .offset = (size_t) virt & ~PAGE_MASK, 1484 1484 .dev_addr = addr, 1485 1485 .size = size, 1486 1486 .direction = DMA_BIDIRECTIONAL,
+41 -28
lib/rhashtable.c
··· 389 389 return false; 390 390 } 391 391 392 - int rhashtable_insert_rehash(struct rhashtable *ht) 392 + int rhashtable_insert_rehash(struct rhashtable *ht, 393 + struct bucket_table *tbl) 393 394 { 394 395 struct bucket_table *old_tbl; 395 396 struct bucket_table *new_tbl; 396 - struct bucket_table *tbl; 397 397 unsigned int size; 398 398 int err; 399 399 400 400 old_tbl = rht_dereference_rcu(ht->tbl, ht); 401 - tbl = rhashtable_last_table(ht, old_tbl); 402 401 403 402 size = tbl->size; 403 + 404 + err = -EBUSY; 404 405 405 406 if (rht_grow_above_75(ht, tbl)) 406 407 size *= 2; 407 408 /* Do not schedule more than one rehash */ 408 409 else if (old_tbl != tbl) 409 - return -EBUSY; 410 + goto fail; 411 + 412 + err = -ENOMEM; 410 413 411 414 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 412 - if (new_tbl == NULL) { 413 - /* Schedule async resize/rehash to try allocation 414 - * non-atomic context. 415 - */ 416 - schedule_work(&ht->run_work); 417 - return -ENOMEM; 418 - } 415 + if (new_tbl == NULL) 416 + goto fail; 419 417 420 418 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 421 419 if (err) { ··· 424 426 schedule_work(&ht->run_work); 425 427 426 428 return err; 429 + 430 + fail: 431 + /* Do not fail the insert if someone else did a rehash. */ 432 + if (likely(rcu_dereference_raw(tbl->future_tbl))) 433 + return 0; 434 + 435 + /* Schedule async rehash to retry allocation in process context. */ 436 + if (err == -ENOMEM) 437 + schedule_work(&ht->run_work); 438 + 439 + return err; 427 440 } 428 441 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); 429 442 430 - int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 431 - struct rhash_head *obj, 432 - struct bucket_table *tbl) 443 + struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 444 + const void *key, 445 + struct rhash_head *obj, 446 + struct bucket_table *tbl) 433 447 { 434 448 struct rhash_head *head; 435 449 unsigned int hash; ··· 477 467 exit: 478 468 spin_unlock(rht_bucket_lock(tbl, hash)); 479 469 480 - return err; 470 + if (err == 0) 471 + return NULL; 472 + else if (err == -EAGAIN) 473 + return tbl; 474 + else 475 + return ERR_PTR(err); 481 476 } 482 477 EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 483 478 ··· 518 503 if (!iter->walker) 519 504 return -ENOMEM; 520 505 521 - mutex_lock(&ht->mutex); 506 + spin_lock(&ht->lock); 522 507 iter->walker->tbl = rht_dereference(ht->tbl, ht); 523 508 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 524 - mutex_unlock(&ht->mutex); 509 + spin_unlock(&ht->lock); 525 510 526 511 return 0; 527 512 } ··· 535 520 */ 536 521 void rhashtable_walk_exit(struct rhashtable_iter *iter) 537 522 { 538 - mutex_lock(&iter->ht->mutex); 523 + spin_lock(&iter->ht->lock); 539 524 if (iter->walker->tbl) 540 525 list_del(&iter->walker->list); 541 - mutex_unlock(&iter->ht->mutex); 526 + spin_unlock(&iter->ht->lock); 542 527 kfree(iter->walker); 543 528 } 544 529 EXPORT_SYMBOL_GPL(rhashtable_walk_exit); ··· 562 547 { 563 548 struct rhashtable *ht = iter->ht; 564 549 565 - mutex_lock(&ht->mutex); 566 - 567 - if (iter->walker->tbl) 568 - list_del(&iter->walker->list); 569 - 570 550 rcu_read_lock(); 571 551 572 - mutex_unlock(&ht->mutex); 552 + spin_lock(&ht->lock); 553 + if (iter->walker->tbl) 554 + list_del(&iter->walker->list); 555 + spin_unlock(&ht->lock); 573 556 574 557 if (!iter->walker->tbl) { 575 558 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); ··· 736 723 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 737 724 return -EINVAL; 738 725 739 - if (params->nelem_hint) 740 - size = rounded_hashtable_size(params); 741 - 742 726 memset(ht, 0, sizeof(*ht)); 743 727 mutex_init(&ht->mutex); 744 728 spin_lock_init(&ht->lock); ··· 754 744 ht->p.insecure_max_entries = ht->p.max_size * 2; 755 745 756 746 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 747 + 748 + if (params->nelem_hint) 749 + size = rounded_hashtable_size(&ht->p); 757 750 758 751 /* The maximum (not average) chain length grows with the 759 752 * size of the hash table, at a rate of (log N)/(log log N).
+3 -3
mm/zswap.c
··· 541 541 return last; 542 542 } 543 543 544 + /* type and compressor must be null-terminated */ 544 545 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 545 546 { 546 547 struct zswap_pool *pool; ··· 549 548 assert_spin_locked(&zswap_pools_lock); 550 549 551 550 list_for_each_entry_rcu(pool, &zswap_pools, list) { 552 - if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name))) 551 + if (strcmp(pool->tfm_name, compressor)) 553 552 continue; 554 - if (strncmp(zpool_get_type(pool->zpool), type, 555 - sizeof(zswap_zpool_type))) 553 + if (strcmp(zpool_get_type(pool->zpool), type)) 556 554 continue; 557 555 /* if we can't get it, it's about to be destroyed */ 558 556 if (!zswap_pool_get(pool))
+3
net/ax25/af_ax25.c
··· 805 805 struct sock *sk; 806 806 ax25_cb *ax25; 807 807 808 + if (protocol < 0 || protocol > SK_PROTOCOL_MAX) 809 + return -EINVAL; 810 + 808 811 if (!net_eq(net, &init_net)) 809 812 return -EAFNOSUPPORT; 810 813
+4 -1
net/batman-adv/distributed-arp-table.c
··· 566 566 int select; 567 567 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; 568 568 struct batadv_dat_candidate *res; 569 + struct batadv_dat_entry dat; 569 570 570 571 if (!bat_priv->orig_hash) 571 572 return NULL; ··· 576 575 if (!res) 577 576 return NULL; 578 577 579 - ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, 578 + dat.ip = ip_dst; 579 + dat.vid = 0; 580 + ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat, 580 581 BATADV_DAT_ADDR_MAX); 581 582 582 583 batadv_dbg(BATADV_DBG_DAT, bat_priv,
+15 -4
net/batman-adv/routing.c
··· 836 836 u8 *orig_addr; 837 837 struct batadv_orig_node *orig_node = NULL; 838 838 int check, hdr_size = sizeof(*unicast_packet); 839 + enum batadv_subtype subtype; 839 840 bool is4addr; 840 841 841 842 unicast_packet = (struct batadv_unicast_packet *)skb->data; ··· 864 863 /* packet for me */ 865 864 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { 866 865 if (is4addr) { 867 - batadv_dat_inc_counter(bat_priv, 868 - unicast_4addr_packet->subtype); 869 - orig_addr = unicast_4addr_packet->src; 870 - orig_node = batadv_orig_hash_find(bat_priv, orig_addr); 866 + subtype = unicast_4addr_packet->subtype; 867 + batadv_dat_inc_counter(bat_priv, subtype); 868 + 869 + /* Only payload data should be considered for speedy 870 + * join. For example, DAT also uses unicast 4addr 871 + * types, but those packets should not be considered 872 + * for speedy join, since the clients do not actually 873 + * reside at the sending originator. 874 + */ 875 + if (subtype == BATADV_P_DATA) { 876 + orig_addr = unicast_4addr_packet->src; 877 + orig_node = batadv_orig_hash_find(bat_priv, 878 + orig_addr); 879 + } 871 880 } 872 881 873 882 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
+12 -4
net/batman-adv/translation-table.c
··· 68 68 unsigned short vid, const char *message, 69 69 bool roaming); 70 70 71 - /* returns 1 if they are the same mac addr */ 71 + /* returns 1 if they are the same mac addr and vid */ 72 72 static int batadv_compare_tt(const struct hlist_node *node, const void *data2) 73 73 { 74 74 const void *data1 = container_of(node, struct batadv_tt_common_entry, 75 75 hash_entry); 76 + const struct batadv_tt_common_entry *tt1 = data1; 77 + const struct batadv_tt_common_entry *tt2 = data2; 76 78 77 - return batadv_compare_eth(data1, data2); 79 + return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2); 78 80 } 79 81 80 82 /** ··· 1429 1427 } 1430 1428 1431 1429 /* if the client was temporary added before receiving the first 1432 - * OGM announcing it, we have to clear the TEMP flag 1430 + * OGM announcing it, we have to clear the TEMP flag. Also, 1431 + * remove the previous temporary orig node and re-add it 1432 + * if required. If the orig entry changed, the new one which 1433 + * is a non-temporary entry is preferred. 1433 1434 */ 1434 - common->flags &= ~BATADV_TT_CLIENT_TEMP; 1435 + if (common->flags & BATADV_TT_CLIENT_TEMP) { 1436 + batadv_tt_global_del_orig_list(tt_global_entry); 1437 + common->flags &= ~BATADV_TT_CLIENT_TEMP; 1438 + } 1435 1439 1436 1440 /* the change can carry possible "attribute" flags like the 1437 1441 * TT_CLIENT_WIFI, therefore they have to be copied in the
+3
net/bluetooth/sco.c
··· 526 526 if (!addr || addr->sa_family != AF_BLUETOOTH) 527 527 return -EINVAL; 528 528 529 + if (addr_len < sizeof(struct sockaddr_sco)) 530 + return -EINVAL; 531 + 529 532 lock_sock(sk); 530 533 531 534 if (sk->sk_state != BT_OPEN) {
+3 -2
net/core/skbuff.c
··· 3643 3643 serr->ee.ee_info = tstype; 3644 3644 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3645 3645 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3646 - if (sk->sk_protocol == IPPROTO_TCP) 3646 + if (sk->sk_protocol == IPPROTO_TCP && 3647 + sk->sk_type == SOCK_STREAM) 3647 3648 serr->ee.ee_data -= sk->sk_tskey; 3648 3649 } 3649 3650 ··· 4269 4268 return NULL; 4270 4269 } 4271 4270 4272 - memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len, 4271 + memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 4273 4272 2 * ETH_ALEN); 4274 4273 skb->mac_header += VLAN_HLEN; 4275 4274 return skb;
+3 -4
net/core/sock.c
··· 433 433 } 434 434 } 435 435 436 - #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 437 - 438 436 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 439 437 { 440 438 if (sk->sk_flags & flags) { ··· 872 874 873 875 if (val & SOF_TIMESTAMPING_OPT_ID && 874 876 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 875 - if (sk->sk_protocol == IPPROTO_TCP) { 877 + if (sk->sk_protocol == IPPROTO_TCP && 878 + sk->sk_type == SOCK_STREAM) { 876 879 if (sk->sk_state != TCP_ESTABLISHED) { 877 880 ret = -EINVAL; 878 881 break; ··· 1551 1552 */ 1552 1553 is_charged = sk_filter_charge(newsk, filter); 1553 1554 1554 - if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) { 1555 + if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1555 1556 /* It is still raw copy of parent, so invalidate 1556 1557 * destructor and make plain sk_free() */ 1557 1558 newsk->sk_destruct = NULL;
+3
net/decnet/af_decnet.c
··· 678 678 { 679 679 struct sock *sk; 680 680 681 + if (protocol < 0 || protocol > SK_PROTOCOL_MAX) 682 + return -EINVAL; 683 + 681 684 if (!net_eq(net, &init_net)) 682 685 return -EAFNOSUPPORT; 683 686
+3
net/ipv4/af_inet.c
··· 257 257 int try_loading_module = 0; 258 258 int err; 259 259 260 + if (protocol < 0 || protocol >= IPPROTO_MAX) 261 + return -EINVAL; 262 + 260 263 sock->state = SS_UNCONNECTED; 261 264 262 265 /* Look for the requested type/protocol pair. */
+9
net/ipv4/fib_frontend.c
··· 1155 1155 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1156 1156 { 1157 1157 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1158 + struct netdev_notifier_changeupper_info *info; 1158 1159 struct in_device *in_dev; 1159 1160 struct net *net = dev_net(dev); 1160 1161 unsigned int flags; ··· 1193 1192 /* fall through */ 1194 1193 case NETDEV_CHANGEMTU: 1195 1194 rt_cache_flush(net); 1195 + break; 1196 + case NETDEV_CHANGEUPPER: 1197 + info = ptr; 1198 + /* flush all routes if dev is linked to or unlinked from 1199 + * an L3 master device (e.g., VRF) 1200 + */ 1201 + if (info->upper_dev && netif_is_l3_master(info->upper_dev)) 1202 + fib_disable_ip(dev, NETDEV_DOWN, true); 1196 1203 break; 1197 1204 } 1198 1205 return NOTIFY_DONE;
+2 -1
net/ipv4/fou.c
··· 24 24 u16 type; 25 25 struct udp_offload udp_offloads; 26 26 struct list_head list; 27 + struct rcu_head rcu; 27 28 }; 28 29 29 30 #define FOU_F_REMCSUM_NOPARTIAL BIT(0) ··· 418 417 list_del(&fou->list); 419 418 udp_tunnel_sock_release(sock); 420 419 421 - kfree(fou); 420 + kfree_rcu(fou, rcu); 422 421 } 423 422 424 423 static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
+1
net/ipv4/netfilter/Kconfig
··· 60 60 61 61 config NFT_DUP_IPV4 62 62 tristate "IPv4 nf_tables packet duplication support" 63 + depends on !NF_CONNTRACK || NF_CONNTRACK 63 64 select NF_DUP_IPV4 64 65 help 65 66 This module enables IPv4 packet duplication support for nf_tables.
+2 -3
net/ipv4/tcp_ipv4.c
··· 1493 1493 if (likely(sk->sk_rx_dst)) 1494 1494 skb_dst_drop(skb); 1495 1495 else 1496 - skb_dst_force(skb); 1496 + skb_dst_force_safe(skb); 1497 1497 1498 1498 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1499 1499 tp->ucopy.memory += skb->truesize; ··· 1721 1721 { 1722 1722 struct dst_entry *dst = skb_dst(skb); 1723 1723 1724 - if (dst) { 1725 - dst_hold(dst); 1724 + if (dst && dst_hold_safe(dst)) { 1726 1725 sk->sk_rx_dst = dst; 1727 1726 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1728 1727 }
+12 -11
net/ipv4/tcp_output.c
··· 3150 3150 { 3151 3151 struct tcp_sock *tp = tcp_sk(sk); 3152 3152 struct tcp_fastopen_request *fo = tp->fastopen_req; 3153 - int syn_loss = 0, space, err = 0, copied; 3153 + int syn_loss = 0, space, err = 0; 3154 3154 unsigned long last_syn_loss = 0; 3155 3155 struct sk_buff *syn_data; 3156 3156 ··· 3188 3188 goto fallback; 3189 3189 syn_data->ip_summed = CHECKSUM_PARTIAL; 3190 3190 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 3191 - copied = copy_from_iter(skb_put(syn_data, space), space, 3192 - &fo->data->msg_iter); 3193 - if (unlikely(!copied)) { 3194 - kfree_skb(syn_data); 3195 - goto fallback; 3191 + if (space) { 3192 + int copied = copy_from_iter(skb_put(syn_data, space), space, 3193 + &fo->data->msg_iter); 3194 + if (unlikely(!copied)) { 3195 + kfree_skb(syn_data); 3196 + goto fallback; 3197 + } 3198 + if (copied != space) { 3199 + skb_trim(syn_data, copied); 3200 + space = copied; 3201 + } 3196 3202 } 3197 - if (copied != space) { 3198 - skb_trim(syn_data, copied); 3199 - space = copied; 3200 - } 3201 - 3202 3203 /* No more data pending in inet_wait_for_connect() */ 3203 3204 if (space == fo->size) 3204 3205 fo->data = NULL;
+7 -1
net/ipv6/addrconf.c
··· 350 350 setup_timer(&ndev->rs_timer, addrconf_rs_timer, 351 351 (unsigned long)ndev); 352 352 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); 353 + 354 + if (ndev->cnf.stable_secret.initialized) 355 + ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; 356 + else 357 + ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64; 358 + 353 359 ndev->cnf.mtu6 = dev->mtu; 354 360 ndev->cnf.sysctl = NULL; 355 361 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); ··· 2461 2455 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2462 2456 if (in6_dev->cnf.optimistic_dad && 2463 2457 !net->ipv6.devconf_all->forwarding && sllao) 2464 - addr_flags = IFA_F_OPTIMISTIC; 2458 + addr_flags |= IFA_F_OPTIMISTIC; 2465 2459 #endif 2466 2460 2467 2461 /* Do not allow to create too much of autoconfigured
+3
net/ipv6/af_inet6.c
··· 109 109 int try_loading_module = 0; 110 110 int err; 111 111 112 + if (protocol < 0 || protocol >= IPPROTO_MAX) 113 + return -EINVAL; 114 + 112 115 /* Look for the requested type/protocol pair. */ 113 116 lookup_protocol: 114 117 err = -ESOCKTNOSUPPORT;
+3 -5
net/ipv6/ip6_gre.c
··· 1571 1571 return -EEXIST; 1572 1572 } else { 1573 1573 t = nt; 1574 - 1575 - ip6gre_tunnel_unlink(ign, t); 1576 - ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 1577 - ip6gre_tunnel_link(ign, t); 1578 - netdev_state_change(dev); 1579 1574 } 1580 1575 1576 + ip6gre_tunnel_unlink(ign, t); 1577 + ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 1578 + ip6gre_tunnel_link(ign, t); 1581 1579 return 0; 1582 1580 } 1583 1581
+1
net/ipv6/netfilter/Kconfig
··· 49 49 50 50 config NFT_DUP_IPV6 51 51 tristate "IPv6 nf_tables packet duplication support" 52 + depends on !NF_CONNTRACK || NF_CONNTRACK 52 53 select NF_DUP_IPV6 53 54 help 54 55 This module enables IPv6 packet duplication support for nf_tables.
+1 -2
net/ipv6/tcp_ipv6.c
··· 93 93 { 94 94 struct dst_entry *dst = skb_dst(skb); 95 95 96 - if (dst) { 96 + if (dst && dst_hold_safe(dst)) { 97 97 const struct rt6_info *rt = (const struct rt6_info *)dst; 98 98 99 - dst_hold(dst); 100 99 sk->sk_rx_dst = dst; 101 100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 102 101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
+3
net/irda/af_irda.c
··· 1086 1086 struct sock *sk; 1087 1087 struct irda_sock *self; 1088 1088 1089 + if (protocol < 0 || protocol > SK_PROTOCOL_MAX) 1090 + return -EINVAL; 1091 + 1089 1092 if (net != &init_net) 1090 1093 return -EAFNOSUPPORT; 1091 1094
+1 -2
net/mac80211/cfg.c
··· 1169 1169 * rc isn't initialized here yet, so ignore it 1170 1170 */ 1171 1171 __ieee80211_vht_handle_opmode(sdata, sta, 1172 - params->opmode_notif, 1173 - band, false); 1172 + params->opmode_notif, band); 1174 1173 } 1175 1174 1176 1175 if (ieee80211_vif_is_mesh(&sdata->vif))
+2 -2
net/mac80211/ieee80211_i.h
··· 1709 1709 void ieee80211_sta_set_rx_nss(struct sta_info *sta); 1710 1710 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1711 1711 struct sta_info *sta, u8 opmode, 1712 - enum ieee80211_band band, bool nss_only); 1712 + enum ieee80211_band band); 1713 1713 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1714 1714 struct sta_info *sta, u8 opmode, 1715 - enum ieee80211_band band, bool nss_only); 1715 + enum ieee80211_band band); 1716 1716 void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, 1717 1717 struct ieee80211_sta_vht_cap *vht_cap); 1718 1718 void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+11 -6
net/mac80211/mlme.c
··· 1379 1379 */ 1380 1380 if (has_80211h_pwr && 1381 1381 (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { 1382 + new_ap_level = pwr_level_80211h; 1383 + 1384 + if (sdata->ap_power_level == new_ap_level) 1385 + return 0; 1386 + 1382 1387 sdata_dbg(sdata, 1383 1388 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", 1384 1389 pwr_level_80211h, chan_pwr, pwr_reduction_80211h, 1385 1390 sdata->u.mgd.bssid); 1386 - new_ap_level = pwr_level_80211h; 1387 1391 } else { /* has_cisco_pwr is always true here. */ 1392 + new_ap_level = pwr_level_cisco; 1393 + 1394 + if (sdata->ap_power_level == new_ap_level) 1395 + return 0; 1396 + 1388 1397 sdata_dbg(sdata, 1389 1398 "Limiting TX power to %d dBm as advertised by %pM\n", 1390 1399 pwr_level_cisco, sdata->u.mgd.bssid); 1391 - new_ap_level = pwr_level_cisco; 1392 1400 } 1393 - 1394 - if (sdata->ap_power_level == new_ap_level) 1395 - return 0; 1396 1401 1397 1402 sdata->ap_power_level = new_ap_level; 1398 1403 if (__ieee80211_recalc_txpower(sdata)) ··· 3580 3575 3581 3576 if (sta && elems.opmode_notif) 3582 3577 ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, 3583 - rx_status->band, true); 3578 + rx_status->band); 3584 3579 mutex_unlock(&local->sta_mtx); 3585 3580 3586 3581 changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt,
+1 -2
net/mac80211/rx.c
··· 2736 2736 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; 2737 2737 2738 2738 ieee80211_vht_handle_opmode(rx->sdata, rx->sta, 2739 - opmode, status->band, 2740 - false); 2739 + opmode, status->band); 2741 2740 goto handled; 2742 2741 } 2743 2742 default:
+63 -50
net/mac80211/util.c
··· 1641 1641 drv_stop(local); 1642 1642 } 1643 1643 1644 + static void ieee80211_flush_completed_scan(struct ieee80211_local *local, 1645 + bool aborted) 1646 + { 1647 + /* It's possible that we don't handle the scan completion in 1648 + * time during suspend, so if it's still marked as completed 1649 + * here, queue the work and flush it to clean things up. 1650 + * Instead of calling the worker function directly here, we 1651 + * really queue it to avoid potential races with other flows 1652 + * scheduling the same work. 1653 + */ 1654 + if (test_bit(SCAN_COMPLETED, &local->scanning)) { 1655 + /* If coming from reconfiguration failure, abort the scan so 1656 + * we don't attempt to continue a partial HW scan - which is 1657 + * possible otherwise if (e.g.) the 2.4 GHz portion was the 1658 + * completed scan, and a 5 GHz portion is still pending. 1659 + */ 1660 + if (aborted) 1661 + set_bit(SCAN_ABORTED, &local->scanning); 1662 + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); 1663 + flush_delayed_work(&local->scan_work); 1664 + } 1665 + } 1666 + 1644 1667 static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) 1645 1668 { 1646 1669 struct ieee80211_sub_if_data *sdata; ··· 1682 1659 local->resuming = false; 1683 1660 local->suspended = false; 1684 1661 local->in_reconfig = false; 1662 + 1663 + ieee80211_flush_completed_scan(local, true); 1685 1664 1686 1665 /* scheduled scan clearly can't be running any more, but tell 1687 1666 * cfg80211 and clear local state ··· 1721 1696 drv_assign_vif_chanctx(local, sdata, ctx); 1722 1697 } 1723 1698 mutex_unlock(&local->chanctx_mtx); 1699 + } 1700 + 1701 + static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata) 1702 + { 1703 + struct ieee80211_local *local = sdata->local; 1704 + struct sta_info *sta; 1705 + 1706 + /* add STAs back */ 1707 + mutex_lock(&local->sta_mtx); 1708 + list_for_each_entry(sta, &local->sta_list, list) { 1709 + enum ieee80211_sta_state state; 1710 + 1711 + if (!sta->uploaded || sta->sdata != sdata) 1712 + continue; 1713 + 1714 + for (state = IEEE80211_STA_NOTEXIST; 1715 + state < sta->sta_state; state++) 1716 + WARN_ON(drv_sta_state(local, sta->sdata, sta, state, 1717 + state + 1)); 1718 + } 1719 + mutex_unlock(&local->sta_mtx); 1724 1720 } 1725 1721 1726 1722 int ieee80211_reconfig(struct ieee80211_local *local) ··· 1879 1833 WARN_ON(drv_add_chanctx(local, ctx)); 1880 1834 mutex_unlock(&local->chanctx_mtx); 1881 1835 1882 - list_for_each_entry(sdata, &local->interfaces, list) { 1883 - if (!ieee80211_sdata_running(sdata)) 1884 - continue; 1885 - ieee80211_assign_chanctx(local, sdata); 1886 - } 1887 - 1888 1836 sdata = rtnl_dereference(local->monitor_sdata); 1889 1837 if (sdata && ieee80211_sdata_running(sdata)) 1890 1838 ieee80211_assign_chanctx(local, sdata); 1891 - } 1892 - 1893 - /* add STAs back */ 1894 - mutex_lock(&local->sta_mtx); 1895 - list_for_each_entry(sta, &local->sta_list, list) { 1896 - enum ieee80211_sta_state state; 1897 - 1898 - if (!sta->uploaded) 1899 - continue; 1900 - 1901 - /* AP-mode stations will be added later */ 1902 - if (sta->sdata->vif.type == NL80211_IFTYPE_AP) 1903 - continue; 1904 - 1905 - for (state = IEEE80211_STA_NOTEXIST; 1906 - state < sta->sta_state; state++) 1907 - WARN_ON(drv_sta_state(local, sta->sdata, sta, state, 1908 - state + 1)); 1909 - } 1910 - mutex_unlock(&local->sta_mtx); 1911 - 1912 - /* reconfigure tx conf */ 1913 - if (hw->queues >= IEEE80211_NUM_ACS) { 1914 - list_for_each_entry(sdata, &local->interfaces, list) { 1915 - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 1916 - sdata->vif.type == NL80211_IFTYPE_MONITOR || 1917 - !ieee80211_sdata_running(sdata)) 1918 - continue; 1919 - 1920 - for (i = 0; i < IEEE80211_NUM_ACS; i++) 1921 - drv_conf_tx(local, sdata, i, 1922 - &sdata->tx_conf[i]); 1923 - } 1924 1839 } 1925 1840 1926 1841 /* reconfigure hardware */ ··· 1895 1888 1896 1889 if (!ieee80211_sdata_running(sdata)) 1897 1890 continue; 1891 + 1892 + ieee80211_assign_chanctx(local, sdata); 1893 + 1894 + switch (sdata->vif.type) { 1895 + case NL80211_IFTYPE_AP_VLAN: 1896 + case NL80211_IFTYPE_MONITOR: 1897 + break; 1898 + default: 1899 + ieee80211_reconfig_stations(sdata); 1900 + /* fall through */ 1901 + case NL80211_IFTYPE_AP: /* AP stations are handled later */ 1902 + for (i = 0; i < IEEE80211_NUM_ACS; i++) 1903 + drv_conf_tx(local, sdata, i, 1904 + &sdata->tx_conf[i]); 1905 + break; 1906 + } 1898 1907 1899 1908 /* common change flags for all interface types */ 1900 1909 changed = BSS_CHANGED_ERP_CTS_PROT | ··· 2097 2074 mb(); 2098 2075 local->resuming = false; 2099 2076 2100 - /* It's possible that we don't handle the scan completion in 2101 - * time during suspend, so if it's still marked as completed 2102 - * here, queue the work and flush it to clean things up. 2103 - * Instead of calling the worker function directly here, we 2104 - * really queue it to avoid potential races with other flows 2105 - * scheduling the same work. 2106 - */ 2107 - if (test_bit(SCAN_COMPLETED, &local->scanning)) { 2108 - ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); 2109 - flush_delayed_work(&local->scan_work); 2110 - } 2077 + ieee80211_flush_completed_scan(local, false); 2111 2078 2112 2079 if (local->open_count && !reconfig_due_to_wowlan) 2113 2080 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
+3 -7
net/mac80211/vht.c
··· 378 378 379 379 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 380 380 struct sta_info *sta, u8 opmode, 381 - enum ieee80211_band band, bool nss_only) 381 + enum ieee80211_band band) 382 382 { 383 383 struct ieee80211_local *local = sdata->local; 384 384 struct ieee80211_supported_band *sband; ··· 400 400 sta->sta.rx_nss = nss; 401 401 changed |= IEEE80211_RC_NSS_CHANGED; 402 402 } 403 - 404 - if (nss_only) 405 - return changed; 406 403 407 404 switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { 408 405 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: ··· 427 430 428 431 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 429 432 struct sta_info *sta, u8 opmode, 430 - enum ieee80211_band band, bool nss_only) 433 + enum ieee80211_band band) 431 434 { 432 435 struct ieee80211_local *local = sdata->local; 433 436 struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; 434 437 435 - u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, 436 - band, nss_only); 438 + u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); 437 439 438 440 if (changed > 0) 439 441 rate_control_rate_update(local, sband, sta, changed);
+31 -12
net/mpls/af_mpls.c
··· 27 27 */ 28 28 #define MAX_MP_SELECT_LABELS 4 29 29 30 + #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1) 31 + 30 32 static int zero = 0; 31 33 static int label_limit = (1 << 20) - 1; 32 34 ··· 319 317 } 320 318 } 321 319 322 - err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb); 320 + /* If via wasn't specified then send out using device address */ 321 + if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC) 322 + err = neigh_xmit(NEIGH_LINK_TABLE, out_dev, 323 + out_dev->dev_addr, skb); 324 + else 325 + err = neigh_xmit(nh->nh_via_table, out_dev, 326 + mpls_nh_via(rt, nh), skb); 323 327 if (err) 324 328 net_dbg_ratelimited("%s: packet transmission failed: %d\n", 325 329 __func__, err); ··· 542 534 if (!mpls_dev_get(dev)) 543 535 goto errout; 544 536 537 + if ((nh->nh_via_table == NEIGH_LINK_TABLE) && 538 + (dev->addr_len != nh->nh_via_alen)) 539 + goto errout; 540 + 545 541 RCU_INIT_POINTER(nh->nh_dev, dev); 546 542 547 543 return 0; ··· 604 592 goto errout; 605 593 } 606 594 607 - err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, 608 - __mpls_nh_via(rt, nh)); 609 - if (err) 610 - goto errout; 595 + if (via) { 596 + err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, 597 + __mpls_nh_via(rt, nh)); 598 + if (err) 599 + goto errout; 600 + } else { 601 + nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC; 602 + } 611 603 612 604 err = mpls_nh_assign_dev(net, rt, nh, oif); 613 605 if (err) ··· 692 676 nla_via = nla_find(attrs, attrlen, RTA_VIA); 693 677 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); 694 678 } 695 - 696 - if (!nla_via) 697 - goto errout; 698 679 699 680 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, 700 681 rtnh->rtnh_ifindex, nla_via, ··· 1131 1118 1132 1119 cfg->rc_label = LABEL_NOT_SPECIFIED; 1133 1120 cfg->rc_protocol = rtm->rtm_protocol; 1121 + cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC; 1134 1122 cfg->rc_nlflags = nlh->nlmsg_flags; 1135 1123 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; 1136 1124 cfg->rc_nlinfo.nlh = nlh; ··· 1245 1231 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, 1246 1232 nh->nh_label)) 1247 1233 goto nla_put_failure; 1248 - if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), 1234 + if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && 1235 + nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), 1249 1236 nh->nh_via_alen)) 1250 1237 goto nla_put_failure; 1251 1238 dev = rtnl_dereference(nh->nh_dev); ··· 1272 1257 nh->nh_labels, 1273 1258 nh->nh_label)) 1274 1259 goto nla_put_failure; 1275 - if (nla_put_via(skb, nh->nh_via_table, 1260 + if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && 1261 + nla_put_via(skb, nh->nh_via_table, 1276 1262 mpls_nh_via(rt, nh), 1277 1263 nh->nh_via_alen)) 1278 1264 goto nla_put_failure; ··· 1335 1319 1336 1320 if (nh->nh_dev) 1337 1321 payload += nla_total_size(4); /* RTA_OIF */ 1338 - payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */ 1322 + if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */ 1323 + payload += nla_total_size(2 + nh->nh_via_alen); 1339 1324 if (nh->nh_labels) /* RTA_NEWDST */ 1340 1325 payload += nla_total_size(nh->nh_labels * 4); 1341 1326 } else { ··· 1345 1328 1346 1329 for_nexthops(rt) { 1347 1330 nhsize += nla_total_size(sizeof(struct rtnexthop)); 1348 - nhsize += nla_total_size(2 + nh->nh_via_alen); 1331 + /* RTA_VIA */ 1332 + if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) 1333 + nhsize += nla_total_size(2 + nh->nh_via_alen); 1349 1334 if (nh->nh_labels) 1350 1335 nhsize += nla_total_size(nh->nh_labels * 4); 1351 1336 } endfor_nexthops(rt);
+2 -2
net/mpls/mpls_iptunnel.c
··· 54 54 unsigned int ttl; 55 55 56 56 /* Obtain the ttl */ 57 - if (skb->protocol == htons(ETH_P_IP)) { 57 + if (dst->ops->family == AF_INET) { 58 58 ttl = ip_hdr(skb)->ttl; 59 59 rt = (struct rtable *)dst; 60 - } else if (skb->protocol == htons(ETH_P_IPV6)) { 60 + } else if (dst->ops->family == AF_INET6) { 61 61 ttl = ipv6_hdr(skb)->hop_limit; 62 62 rt6 = (struct rt6_info *)dst; 63 63 } else {
+47 -52
net/netfilter/nf_tables_api.c
··· 89 89 } 90 90 91 91 static void nft_ctx_init(struct nft_ctx *ctx, 92 + struct net *net, 92 93 const struct sk_buff *skb, 93 94 const struct nlmsghdr *nlh, 94 95 struct nft_af_info *afi, ··· 97 96 struct nft_chain *chain, 98 97 const struct nlattr * const *nla) 99 98 { 100 - ctx->net = sock_net(skb->sk); 99 + ctx->net = net; 101 100 ctx->afi = afi; 102 101 ctx->table = table; 103 102 ctx->chain = chain; ··· 673 672 return ret; 674 673 } 675 674 676 - static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, 677 - const struct nlmsghdr *nlh, 675 + static int nf_tables_newtable(struct net *net, struct sock *nlsk, 676 + struct sk_buff *skb, const struct nlmsghdr *nlh, 678 677 const struct nlattr * const nla[]) 679 678 { 680 679 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 681 680 const struct nlattr *name; 682 681 struct nft_af_info *afi; 683 682 struct nft_table *table; 684 - struct net *net = sock_net(skb->sk); 685 683 int family = nfmsg->nfgen_family; 686 684 u32 flags = 0; 687 685 struct nft_ctx ctx; ··· 706 706 if (nlh->nlmsg_flags & NLM_F_REPLACE) 707 707 return -EOPNOTSUPP; 708 708 709 - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 709 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); 710 710 return nf_tables_updtable(&ctx); 711 711 } 712 712 ··· 730 730 INIT_LIST_HEAD(&table->sets); 731 731 table->flags = flags; 732 732 733 - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 733 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); 734 734 err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); 735 735 if (err < 0) 736 736 goto err3; ··· 810 810 return err; 811 811 } 812 812 813 - static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, 814 - const struct nlmsghdr *nlh, 813 + static int nf_tables_deltable(struct net *net, struct sock *nlsk, 814 + struct sk_buff *skb, const struct nlmsghdr *nlh, 815 815 const struct nlattr * const nla[]) 816 816 { 817 817 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 818 818 struct nft_af_info *afi; 819 819 struct nft_table *table; 820 - struct net *net = sock_net(skb->sk); 821 820 int family = nfmsg->nfgen_family; 822 821 struct nft_ctx ctx; 823 822 824 - nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); 823 + nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla); 825 824 if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) 826 825 return nft_flush(&ctx, family); 827 826 ··· 1220 1221 } 1221 1222 } 1222 1223 1223 - static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, 1224 - const struct nlmsghdr *nlh, 1224 + static int nf_tables_newchain(struct net *net, struct sock *nlsk, 1225 + struct sk_buff *skb, const struct nlmsghdr *nlh, 1225 1226 const struct nlattr * const nla[]) 1226 1227 { 1227 1228 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); ··· 1231 1232 struct nft_chain *chain; 1232 1233 struct nft_base_chain *basechain = NULL; 1233 1234 struct nlattr *ha[NFTA_HOOK_MAX + 1]; 1234 - struct net *net = sock_net(skb->sk); 1235 1235 int family = nfmsg->nfgen_family; 1236 1236 struct net_device *dev = NULL; 1237 1237 u8 policy = NF_ACCEPT; ··· 1311 1313 return PTR_ERR(stats); 1312 1314 } 1313 1315 1314 - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1316 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); 1315 1317 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1316 1318 sizeof(struct nft_trans_chain)); 1317 1319 if (trans == NULL) { ··· 1459 1461 if (err < 0) 1460 1462 goto err1; 1461 1463 1462 - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1464 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); 1463 1465 err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); 1464 1466 if (err < 0) 1465 1467 goto err2; ··· 1474 1476 return err; 1475 1477 } 1476 1478 1477 - static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, 1478 - const struct nlmsghdr *nlh, 1479 + static int nf_tables_delchain(struct net *net, struct sock *nlsk, 1480 + struct sk_buff *skb, const struct nlmsghdr *nlh, 1479 1481 const struct nlattr * const nla[]) 1480 1482 { 1481 1483 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1482 1484 struct nft_af_info *afi; 1483 1485 struct nft_table *table; 1484 1486 struct nft_chain *chain; 1485 - struct net *net = sock_net(skb->sk); 1486 1487 int family = nfmsg->nfgen_family; 1487 1488 struct nft_ctx ctx; 1488 1489 ··· 1503 1506 if (chain->use > 0) 1504 1507 return -EBUSY; 1505 1508 1506 - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1509 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); 1507 1510 1508 1511 return nft_delchain(&ctx); 1509 1512 } ··· 2007 2010 2008 2011 static struct nft_expr_info *info; 2009 2012 2010 - static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, 2011 - const struct nlmsghdr *nlh, 2013 + static int nf_tables_newrule(struct net *net, struct sock *nlsk, 2014 + struct sk_buff *skb, const struct nlmsghdr *nlh, 2012 2015 const struct nlattr * const nla[]) 2013 2016 { 2014 2017 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2015 2018 struct nft_af_info *afi; 2016 - struct net *net = sock_net(skb->sk); 2017 2019 struct nft_table *table; 2018 2020 struct nft_chain *chain; 2019 2021 struct nft_rule *rule, *old_rule = NULL; ··· 2071 2075 return PTR_ERR(old_rule); 2072 2076 } 2073 2077 2074 - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 2078 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); 2075 2079 2076 2080 n = 0; 2077 2081 size = 0; ··· 2172 2176 return err; 2173 2177 } 2174 2178 2175 - static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, 2176 - const struct nlmsghdr *nlh, 2179 + static int nf_tables_delrule(struct net *net, struct sock *nlsk, 2180 + struct sk_buff *skb, const struct nlmsghdr *nlh, 2177 2181 const struct nlattr * const nla[]) 2178 2182 { 2179 2183 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2180 2184 struct nft_af_info *afi; 2181 - struct net *net = sock_net(skb->sk); 2182 2185 struct nft_table *table; 2183 2186 struct nft_chain *chain = NULL; 2184 2187 struct nft_rule *rule; ··· 2200 2205 return PTR_ERR(chain); 2201 2206 } 2202 2207 2203 - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 2208 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); 2204 2209 2205 2210 if (chain) { 2206 2211 if (nla[NFTA_RULE_HANDLE]) { ··· 2339 2344 [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, 2340 2345 }; 2341 2346 2342 - static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, 2347 + static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, 2343 2348 const struct sk_buff *skb, 2344 2349 const struct nlmsghdr *nlh, 2345 2350 const struct nlattr * const nla[]) 2346 2351 { 2347 - struct net *net = sock_net(skb->sk); 2348 2352 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2349 2353 struct nft_af_info *afi = NULL; 2350 2354 struct nft_table *table = NULL; ··· 2365 2371 return -ENOENT; 2366 2372 } 2367 2373 2368 - nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 2374 + nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); 2369 2375 return 0; 2370 2376 } 2371 2377 ··· 2617 2623 const struct nlmsghdr *nlh, 2618 2624 const struct nlattr * const nla[]) 2619 2625 { 2626 + struct net *net = sock_net(skb->sk); 2620 2627 const struct nft_set *set; 2621 2628 struct nft_ctx ctx; 2622 2629 struct sk_buff *skb2; ··· 2625 2630 int err; 2626 2631 2627 2632 /* Verify existence before starting dump */ 2628 - err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); 2633 + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); 2629 2634 if (err < 0) 2630 2635 return err; 2631 2636 ··· 2688 2693 return 0; 2689 2694 } 2690 2695 2691 - static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, 2692 - const struct nlmsghdr *nlh, 2696 + static int nf_tables_newset(struct net *net, struct sock *nlsk, 2697 + struct sk_buff *skb, const struct nlmsghdr *nlh, 2693 2698 const struct nlattr * const nla[]) 2694 2699 { 2695 2700 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2696 2701 const struct nft_set_ops *ops; 2697 2702 struct nft_af_info *afi; 2698 - struct net *net = sock_net(skb->sk); 2699 2703 struct nft_table *table; 2700 2704 struct nft_set *set; 2701 2705 struct nft_ctx ctx; ··· 2792 2798 if (IS_ERR(table)) 2793 2799 return PTR_ERR(table); 2794 2800 2795 - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 2801 + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); 2796 2802 2797 2803 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); 2798 2804 if (IS_ERR(set)) { ··· 2876 2882 nft_set_destroy(set); 2877 2883 } 2878 2884 2879 - static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, 2880 - const struct nlmsghdr *nlh, 2885 + static int nf_tables_delset(struct net *net, struct sock *nlsk, 2886 + struct sk_buff *skb, const struct nlmsghdr *nlh, 2881 2887 const struct nlattr * const nla[]) 2882 2888 { 2883 2889 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); ··· 2890 2896 if (nla[NFTA_SET_TABLE] == NULL) 2891 2897 return -EINVAL; 2892 2898 2893 - err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); 2899 + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); 2894 2900 if (err < 0) 2895 2901 return err; 2896 2902 ··· 3018 3024 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, 3019 3025 }; 3020 3026 3021 - static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, 3027 + static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, 3022 3028 const struct sk_buff *skb, 3023 3029 const struct nlmsghdr *nlh, 3024 3030 const struct nlattr * const nla[], ··· 3027 3033 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3028 3034 struct nft_af_info *afi; 3029 3035 struct nft_table *table; 3030 - struct net *net = sock_net(skb->sk); 3031 3036 3032 3037 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); 3033 3038 if (IS_ERR(afi)) ··· 3038 3045 if (!trans && (table->flags & NFT_TABLE_INACTIVE)) 3039 3046 return -ENOENT; 3040 3047 3041 - nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 3048 + nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); 3042 3049 return 0; 3043 3050 } 3044 3051 ··· 3128 3135 3129 3136 static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) 3130 3137 { 3138 + struct net *net = sock_net(skb->sk); 3131 3139 const struct nft_set *set; 3132 3140 struct nft_set_dump_args args; 3133 3141 struct nft_ctx ctx; ··· 3144 3150 if (err < 0) 3145 3151 return err; 3146 3152 3147 - err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, 3148 - false); 3153 + err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, 3154 + (void *)nla, false); 3149 3155 if (err < 0) 3150 3156 return err; 3151 3157 ··· 3206 3212 const struct nlmsghdr *nlh, 3207 3213 const struct nlattr * const nla[]) 3208 3214 { 3215 + struct net *net = sock_net(skb->sk); 3209 3216 const struct nft_set *set; 3210 3217 struct nft_ctx ctx; 3211 3218 int err; 3212 3219 3213 - err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); 3220 + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); 3214 3221 if (err < 0) 3215 3222 return err; 3216 3223 ··· 3523 3528 return err; 3524 3529 } 3525 3530 3526 - static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, 3527 - const struct nlmsghdr *nlh, 3531 + static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, 3532 + struct sk_buff *skb, const struct nlmsghdr *nlh, 3528 3533 const struct nlattr * const nla[]) 3529 3534 { 3530 - struct net *net = sock_net(skb->sk); 3531 3535 const struct nlattr *attr; 3532 3536 struct nft_set *set; 3533 3537 struct nft_ctx ctx; ··· 3535 3541 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3536 3542 return -EINVAL; 3537 3543 3538 - err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); 3544 + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, true); 3539 3545 if (err < 0) 3540 3546 return err; 3541 3547 ··· 3617 3623 return err; 3618 3624 } 3619 3625 3620 - static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, 3621 - const struct nlmsghdr *nlh, 3626 + static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, 3627 + struct sk_buff *skb, const struct nlmsghdr *nlh, 3622 3628 const struct nlattr * const nla[]) 3623 3629 { 3624 3630 const struct nlattr *attr; ··· 3629 3635 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3630 3636 return -EINVAL; 3631 3637 3632 - err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); 3638 + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); 3633 3639 if (err < 0) 3634 3640 return err; 3635 3641 ··· 4024 4030 struct nft_trans *trans, *next; 4025 4031 struct nft_trans_elem *te; 4026 4032 4027 - list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { 4033 + list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, 4034 + list) { 4028 4035 switch (trans->msg_type) { 4029 4036 case NFT_MSG_NEWTABLE: 4030 4037 if (nft_trans_table_update(trans)) {
+1 -3
net/netfilter/nfnetlink.c
··· 295 295 if (!skb) 296 296 return netlink_ack(oskb, nlh, -ENOMEM); 297 297 298 - skb->sk = oskb->sk; 299 - 300 298 nfnl_lock(subsys_id); 301 299 ss = rcu_dereference_protected(table[subsys_id].subsys, 302 300 lockdep_is_held(&table[subsys_id].mutex)); ··· 379 381 goto ack; 380 382 381 383 if (nc->call_batch) { 382 - err = nc->call_batch(net->nfnl, skb, nlh, 384 + err = nc->call_batch(net, net->nfnl, skb, nlh, 383 385 (const struct nlattr **)cda); 384 386 } 385 387
+13 -3
net/openvswitch/conntrack.c
··· 53 53 struct md_labels labels; 54 54 }; 55 55 56 + static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info); 57 + 56 58 static u16 key_to_nfproto(const struct sw_flow_key *key) 57 59 { 58 60 switch (ntohs(key->eth.type)) { ··· 143 141 * previously sent the packet to conntrack via the ct action. 144 142 */ 145 143 static void ovs_ct_update_key(const struct sk_buff *skb, 144 + const struct ovs_conntrack_info *info, 146 145 struct sw_flow_key *key, bool post_ct) 147 146 { 148 147 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; ··· 161 158 zone = nf_ct_zone(ct); 162 159 } else if (post_ct) { 163 160 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; 161 + if (info) 162 + zone = &info->zone; 164 163 } 165 164 __ovs_ct_update_key(key, state, zone, ct); 166 165 } 167 166 168 167 void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) 169 168 { 170 - ovs_ct_update_key(skb, key, false); 169 + ovs_ct_update_key(skb, NULL, key, false); 171 170 } 172 171 173 172 int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) ··· 423 418 } 424 419 } 425 420 426 - ovs_ct_update_key(skb, key, true); 421 + ovs_ct_update_key(skb, info, key, true); 427 422 428 423 return 0; 429 424 } ··· 713 708 nf_conntrack_get(&ct_info.ct->ct_general); 714 709 return 0; 715 710 err_free_ct: 716 - nf_conntrack_free(ct_info.ct); 711 + __ovs_ct_free_action(&ct_info); 717 712 return err; 718 713 } 719 714 ··· 755 750 { 756 751 struct ovs_conntrack_info *ct_info = nla_data(a); 757 752 753 + __ovs_ct_free_action(ct_info); 754 + } 755 + 756 + static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) 757 + { 758 758 if (ct_info->helper) 759 759 module_put(ct_info->helper->me); 760 760 if (ct_info->ct)
+3 -3
net/rfkill/core.c
··· 49 49 struct rfkill { 50 50 spinlock_t lock; 51 51 52 - const char *name; 53 52 enum rfkill_type type; 54 53 55 54 unsigned long state; ··· 72 73 struct delayed_work poll_work; 73 74 struct work_struct uevent_work; 74 75 struct work_struct sync_work; 76 + char name[]; 75 77 }; 76 78 #define to_rfkill(d) container_of(d, struct rfkill, dev) 77 79 ··· 876 876 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) 877 877 return NULL; 878 878 879 - rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); 879 + rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); 880 880 if (!rfkill) 881 881 return NULL; 882 882 883 883 spin_lock_init(&rfkill->lock); 884 884 INIT_LIST_HEAD(&rfkill->node); 885 885 rfkill->type = type; 886 - rfkill->name = name; 886 + strcpy(rfkill->name, name); 887 887 rfkill->ops = ops; 888 888 rfkill->data = ops_data; 889 889
+1 -1
net/sched/sch_api.c
··· 950 950 } 951 951 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); 952 952 if (!netif_is_multiqueue(dev)) 953 - sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 953 + sch->flags |= TCQ_F_ONETXQUEUE; 954 954 } 955 955 956 956 sch->handle = handle;
+9 -2
net/sctp/ipv6.c
··· 323 323 } 324 324 } 325 325 } 326 - rcu_read_unlock(); 327 - 328 326 if (baddr) { 329 327 fl6->saddr = baddr->v6.sin6_addr; 330 328 fl6->fl6_sport = baddr->v6.sin6_port; 331 329 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 332 330 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 333 331 } 332 + rcu_read_unlock(); 334 333 335 334 out: 336 335 if (!IS_ERR_OR_NULL(dst)) { ··· 641 642 struct sock *newsk; 642 643 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 643 644 struct sctp6_sock *newsctp6sk; 645 + struct ipv6_txoptions *opt; 644 646 645 647 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); 646 648 if (!newsk) ··· 660 660 newnp = inet6_sk(newsk); 661 661 662 662 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 663 + 664 + rcu_read_lock(); 665 + opt = rcu_dereference(np->opt); 666 + if (opt) 667 + opt = ipv6_dup_options(newsk, opt); 668 + RCU_INIT_POINTER(newnp->opt, opt); 669 + rcu_read_unlock(); 663 670 664 671 /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() 665 672 * and getpeername().
+2
net/sctp/outqueue.c
··· 324 324 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 325 325 "illegal chunk"); 326 326 327 + sctp_chunk_hold(chunk); 327 328 sctp_outq_tail_data(q, chunk); 328 329 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 329 330 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); ··· 1252 1251 */ 1253 1252 1254 1253 sack_a_rwnd = ntohl(sack->a_rwnd); 1254 + asoc->peer.zero_window_announced = !sack_a_rwnd; 1255 1255 outstanding = q->outstanding_bytes; 1256 1256 1257 1257 if (outstanding < sack_a_rwnd)
+2 -2
net/sctp/sm_make_chunk.c
··· 1652 1652 1653 1653 /* Set an expiration time for the cookie. */ 1654 1654 cookie->c.expiration = ktime_add(asoc->cookie_life, 1655 - ktime_get()); 1655 + ktime_get_real()); 1656 1656 1657 1657 /* Copy the peer's init packet. */ 1658 1658 memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, ··· 1780 1780 if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) 1781 1781 kt = skb_get_ktime(skb); 1782 1782 else 1783 - kt = ktime_get(); 1783 + kt = ktime_get_real(); 1784 1784 1785 1785 if (!asoc && ktime_before(bear_cookie->expiration, kt)) { 1786 1786 /*
+2 -1
net/sctp/sm_statefuns.c
··· 5412 5412 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); 5413 5413 5414 5414 if (asoc->overall_error_count >= asoc->max_retrans) { 5415 - if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { 5415 + if (asoc->peer.zero_window_announced && 5416 + asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { 5416 5417 /* 5417 5418 * We are here likely because the receiver had its rwnd 5418 5419 * closed for a while and we have not been able to
+6 -6
net/sctp/socket.c
··· 1952 1952 1953 1953 /* Now send the (possibly) fragmented message. */ 1954 1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1955 - sctp_chunk_hold(chunk); 1956 - 1957 1955 /* Do accounting for the write space. */ 1958 1956 sctp_set_owner_w(chunk); 1959 1957 ··· 1964 1966 * breaks. 1965 1967 */ 1966 1968 err = sctp_primitive_SEND(net, asoc, datamsg); 1969 + sctp_datamsg_put(datamsg); 1967 1970 /* Did the lower layer accept the chunk? */ 1968 - if (err) { 1969 - sctp_datamsg_free(datamsg); 1971 + if (err) 1970 1972 goto out_free; 1971 - } 1972 1973 1973 1974 pr_debug("%s: we sent primitively\n", __func__); 1974 1975 1975 - sctp_datamsg_put(datamsg); 1976 1976 err = msg_len; 1977 1977 1978 1978 if (unlikely(wait_connect)) { ··· 7163 7167 newsk->sk_type = sk->sk_type; 7164 7168 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7165 7169 newsk->sk_flags = sk->sk_flags; 7170 + newsk->sk_tsflags = sk->sk_tsflags; 7166 7171 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7167 7172 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7168 7173 newsk->sk_reuse = sk->sk_reuse; ··· 7196 7199 newinet->mc_ttl = 1; 7197 7200 newinet->mc_index = 0; 7198 7201 newinet->mc_list = NULL; 7202 + 7203 + if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 7204 + net_enable_timestamp(); 7199 7205 } 7200 7206 7201 7207 static inline void sctp_copy_descendant(struct sock *sk_to,
+1
net/socket.c
··· 1695 1695 msg.msg_name = addr ? (struct sockaddr *)&address : NULL; 1696 1696 /* We assume all kernel code knows the size of sockaddr_storage */ 1697 1697 msg.msg_namelen = 0; 1698 + msg.msg_iocb = NULL; 1698 1699 if (sock->file->f_flags & O_NONBLOCK) 1699 1700 flags |= MSG_DONTWAIT; 1700 1701 err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
+3 -10
net/unix/af_unix.c
··· 2256 2256 /* Lock the socket to prevent queue disordering 2257 2257 * while sleeps in memcpy_tomsg 2258 2258 */ 2259 - err = mutex_lock_interruptible(&u->readlock); 2260 - if (unlikely(err)) { 2261 - /* recvmsg() in non blocking mode is supposed to return -EAGAIN 2262 - * sk_rcvtimeo is not honored by mutex_lock_interruptible() 2263 - */ 2264 - err = noblock ? -EAGAIN : -ERESTARTSYS; 2265 - goto out; 2266 - } 2259 + mutex_lock(&u->readlock); 2267 2260 2268 2261 if (flags & MSG_PEEK) 2269 2262 skip = sk_peek_offset(sk, flags); ··· 2300 2307 timeo = unix_stream_data_wait(sk, timeo, last, 2301 2308 last_len); 2302 2309 2303 - if (signal_pending(current) || 2304 - mutex_lock_interruptible(&u->readlock)) { 2310 + if (signal_pending(current)) { 2305 2311 err = sock_intr_errno(timeo); 2306 2312 goto out; 2307 2313 } 2308 2314 2315 + mutex_lock(&u->readlock); 2309 2316 continue; 2310 2317 unlock: 2311 2318 unix_state_unlock(sk);
+4 -1
net/wireless/nl80211.c
··· 7941 7941 if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { 7942 7942 if (!(rdev->wiphy.features & 7943 7943 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || 7944 - !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) 7944 + !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) { 7945 + kzfree(connkeys); 7945 7946 return -EINVAL; 7947 + } 7946 7948 connect.flags |= ASSOC_REQ_USE_RRM; 7947 7949 } 7948 7950 ··· 9505 9503 if (new_triggers.tcp && new_triggers.tcp->sock) 9506 9504 sock_release(new_triggers.tcp->sock); 9507 9505 kfree(new_triggers.tcp); 9506 + kfree(new_triggers.nd_config); 9508 9507 return err; 9509 9508 } 9510 9509 #endif
+4 -1
net/wireless/reg.c
··· 3029 3029 break; 3030 3030 default: 3031 3031 WARN(1, "invalid initiator %d\n", lr->initiator); 3032 + kfree(rd); 3032 3033 return -EINVAL; 3033 3034 } 3034 3035 ··· 3222 3221 /* We always try to get an update for the static regdomain */ 3223 3222 err = regulatory_hint_core(cfg80211_world_regdom->alpha2); 3224 3223 if (err) { 3225 - if (err == -ENOMEM) 3224 + if (err == -ENOMEM) { 3225 + platform_device_unregister(reg_pdev); 3226 3226 return err; 3227 + } 3227 3228 /* 3228 3229 * N.B. kobject_uevent_env() can fail mainly for when we're out 3229 3230 * memory which is handled and propagated appropriately above
+36 -14
net/xfrm/xfrm_policy.c
··· 303 303 } 304 304 EXPORT_SYMBOL(xfrm_policy_alloc); 305 305 306 + static void xfrm_policy_destroy_rcu(struct rcu_head *head) 307 + { 308 + struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); 309 + 310 + security_xfrm_policy_free(policy->security); 311 + kfree(policy); 312 + } 313 + 306 314 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 307 315 308 316 void xfrm_policy_destroy(struct xfrm_policy *policy) ··· 320 312 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 321 313 BUG(); 322 314 323 - security_xfrm_policy_free(policy->security); 324 - kfree(policy); 315 + call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); 325 316 } 326 317 EXPORT_SYMBOL(xfrm_policy_destroy); 327 318 ··· 1221 1214 struct xfrm_policy *pol; 1222 1215 struct net *net = sock_net(sk); 1223 1216 1217 + rcu_read_lock(); 1224 1218 read_lock_bh(&net->xfrm.xfrm_policy_lock); 1225 - if ((pol = sk->sk_policy[dir]) != NULL) { 1219 + pol = rcu_dereference(sk->sk_policy[dir]); 1220 + if (pol != NULL) { 1226 1221 bool match = xfrm_selector_match(&pol->selector, fl, 1227 1222 sk->sk_family); 1228 1223 int err = 0; ··· 1248 1239 } 1249 1240 out: 1250 1241 read_unlock_bh(&net->xfrm.xfrm_policy_lock); 1242 + rcu_read_unlock(); 1251 1243 return pol; 1252 1244 } 1253 1245 ··· 1317 1307 #endif 1318 1308 1319 1309 write_lock_bh(&net->xfrm.xfrm_policy_lock); 1320 - old_pol = sk->sk_policy[dir]; 1321 - sk->sk_policy[dir] = pol; 1310 + old_pol = rcu_dereference_protected(sk->sk_policy[dir], 1311 + lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 1322 1312 if (pol) { 1323 1313 pol->curlft.add_time = get_seconds(); 1324 1314 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 1325 1315 xfrm_sk_policy_link(pol, dir); 1326 1316 } 1317 + rcu_assign_pointer(sk->sk_policy[dir], pol); 1327 1318 if (old_pol) { 1328 1319 if (pol) 1329 1320 xfrm_policy_requeue(old_pol, pol); ··· 1372 1361 return newp; 1373 1362 } 1374 1363 1375 - int __xfrm_sk_clone_policy(struct sock *sk) 1364 + int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 1376 1365 { 1377 - struct xfrm_policy *p0 = sk->sk_policy[0], 1378 - *p1 = sk->sk_policy[1]; 1366 + const struct xfrm_policy *p; 1367 + struct xfrm_policy *np; 1368 + int i, ret = 0; 1379 1369 1380 - sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1381 - if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1382 - return -ENOMEM; 1383 - if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1384 - return -ENOMEM; 1385 - return 0; 1370 + rcu_read_lock(); 1371 + for (i = 0; i < 2; i++) { 1372 + p = rcu_dereference(osk->sk_policy[i]); 1373 + if (p) { 1374 + np = clone_policy(p, i); 1375 + if (unlikely(!np)) { 1376 + ret = -ENOMEM; 1377 + break; 1378 + } 1379 + rcu_assign_pointer(sk->sk_policy[i], np); 1380 + } 1381 + } 1382 + rcu_read_unlock(); 1383 + return ret; 1386 1384 } 1387 1385 1388 1386 static int ··· 2218 2198 xdst = NULL; 2219 2199 route = NULL; 2220 2200 2201 + sk = sk_const_to_full_sk(sk); 2221 2202 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2222 2203 num_pols = 1; 2223 2204 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); ··· 2498 2477 } 2499 2478 2500 2479 pol = NULL; 2480 + sk = sk_to_full_sk(sk); 2501 2481 if (sk && sk->sk_policy[dir]) { 2502 2482 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2503 2483 if (IS_ERR(pol)) {
+37 -19
sound/pci/hda/patch_realtek.c
··· 111 111 void (*power_hook)(struct hda_codec *codec); 112 112 #endif 113 113 void (*shutup)(struct hda_codec *codec); 114 + void (*reboot_notify)(struct hda_codec *codec); 114 115 115 116 int init_amp; 116 117 int codec_variant; /* flag for other variants */ ··· 774 773 snd_hda_shutup_pins(codec); 775 774 } 776 775 776 + static void alc_reboot_notify(struct hda_codec *codec) 777 + { 778 + struct alc_spec *spec = codec->spec; 779 + 780 + if (spec && spec->reboot_notify) 781 + spec->reboot_notify(codec); 782 + else 783 + alc_shutup(codec); 784 + } 785 + 786 + /* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */ 787 + static void alc_d3_at_reboot(struct hda_codec *codec) 788 + { 789 + snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); 790 + snd_hda_codec_write(codec, codec->core.afg, 0, 791 + AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 792 + msleep(10); 793 + } 794 + 777 795 #define alc_free snd_hda_gen_free 778 796 779 797 #ifdef CONFIG_PM ··· 838 818 .suspend = alc_suspend, 839 819 .check_power_status = snd_hda_gen_check_power_status, 840 820 #endif 841 - .reboot_notify = alc_shutup, 821 + .reboot_notify = alc_reboot_notify, 842 822 }; 843 823 844 824 ··· 4218 4198 struct alc_spec *spec = codec->spec; 4219 4199 4220 4200 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4201 + spec->shutup = alc_no_shutup; /* reduce click noise */ 4202 + spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ 4221 4203 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 4222 4204 codec->power_save_node = 0; /* avoid click noises */ 4223 4205 snd_hda_apply_pincfgs(codec, pincfgs); 4224 - } 4225 - } 4226 - 4227 - /* additional fixup for Thinkpad T440s noise problem */ 4228 - static void alc_fixup_tpt440(struct hda_codec *codec, 4229 - const struct hda_fixup *fix, int action) 4230 - { 4231 - struct alc_spec *spec = codec->spec; 4232 - 4233 - if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4234 - spec->shutup = alc_no_shutup; /* reduce click noise */ 4235 - spec->gen.mixer_nid = 0; /* reduce background noise */ 4236 4206 } 4237 4207 } 4238 4208 ··· 4616 4606 ALC288_FIXUP_DISABLE_AAMIX, 4617 4607 ALC292_FIXUP_DELL_E7X, 4618 4608 ALC292_FIXUP_DISABLE_AAMIX, 4609 + ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, 4619 4610 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4620 4611 ALC275_FIXUP_DELL_XPS, 4621 4612 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, ··· 5077 5066 }, 5078 5067 [ALC292_FIXUP_TPT440] = { 5079 5068 .type = HDA_FIXUP_FUNC, 5080 - .v.func = alc_fixup_tpt440, 5069 + .v.func = alc_fixup_disable_aamix, 5081 5070 .chained = true, 5082 5071 .chain_id = ALC292_FIXUP_TPT440_DOCK, 5083 5072 }, ··· 5180 5169 .chained = true, 5181 5170 .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE 5182 5171 }, 5172 + [ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK] = { 5173 + .type = HDA_FIXUP_FUNC, 5174 + .v.func = alc_fixup_disable_aamix, 5175 + .chained = true, 5176 + .chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE 5177 + }, 5183 5178 [ALC292_FIXUP_DELL_E7X] = { 5184 5179 .type = HDA_FIXUP_FUNC, 5185 5180 .v.func = alc_fixup_dell_xps13, ··· 5264 5247 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5265 5248 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5266 5249 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5267 - SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5268 - SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5269 - SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5270 - SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5271 - SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5250 + SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5251 + SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5252 + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5253 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5254 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5272 5255 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5273 5256 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5274 5257 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), ··· 5375 5358 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5376 5359 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5377 5360 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5361 + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), 5378 5362 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 5379 5363 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5380 5364 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+2
sound/usb/mixer.c
··· 1354 1354 } 1355 1355 } 1356 1356 1357 + snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl); 1358 + 1357 1359 range = (cval->max - cval->min) / cval->res; 1358 1360 /* 1359 1361 * Are there devices with volume range more than 255? I use a bit more
-12
sound/usb/mixer_maps.c
··· 348 348 { 0 } /* terminator */ 349 349 }; 350 350 351 - /* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */ 352 - static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000}; 353 - static struct usbmix_name_map dragonfly_1_2_map[] = { 354 - { 7, NULL, .dB = &dragonfly_1_2_dB }, 355 - { 0 } /* terminator */ 356 - }; 357 - 358 351 /* 359 352 * Control map entries 360 353 */ ··· 462 469 /* Bose Companion 5 */ 463 470 .id = USB_ID(0x05a7, 0x1020), 464 471 .map = bose_companion5_map, 465 - }, 466 - { 467 - /* Dragonfly DAC 1.2 */ 468 - .id = USB_ID(0x21b4, 0x0081), 469 - .map = dragonfly_1_2_map, 470 472 }, 471 473 { 0 } /* terminator */ 472 474 };
+37
sound/usb/mixer_quirks.c
··· 37 37 #include <sound/control.h> 38 38 #include <sound/hwdep.h> 39 39 #include <sound/info.h> 40 + #include <sound/tlv.h> 40 41 41 42 #include "usbaudio.h" 42 43 #include "mixer.h" ··· 1822 1821 break; 1823 1822 default: 1824 1823 usb_audio_dbg(mixer->chip, "memory change in unknown unit %d\n", unitid); 1824 + break; 1825 + } 1826 + } 1827 + 1828 + static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer, 1829 + struct snd_kcontrol *kctl) 1830 + { 1831 + /* Approximation using 10 ranges based on output measurement on hw v1.2. 1832 + * This seems close to the cubic mapping e.g. alsamixer uses. */ 1833 + static const DECLARE_TLV_DB_RANGE(scale, 1834 + 0, 1, TLV_DB_MINMAX_ITEM(-5300, -4970), 1835 + 2, 5, TLV_DB_MINMAX_ITEM(-4710, -4160), 1836 + 6, 7, TLV_DB_MINMAX_ITEM(-3884, -3710), 1837 + 8, 14, TLV_DB_MINMAX_ITEM(-3443, -2560), 1838 + 15, 16, TLV_DB_MINMAX_ITEM(-2475, -2324), 1839 + 17, 19, TLV_DB_MINMAX_ITEM(-2228, -2031), 1840 + 20, 26, TLV_DB_MINMAX_ITEM(-1910, -1393), 1841 + 27, 31, TLV_DB_MINMAX_ITEM(-1322, -1032), 1842 + 32, 40, TLV_DB_MINMAX_ITEM(-968, -490), 1843 + 41, 50, TLV_DB_MINMAX_ITEM(-441, 0), 1844 + ); 1845 + 1846 + usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n"); 1847 + kctl->tlv.p = scale; 1848 + kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; 1849 + kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; 1850 + } 1851 + 1852 + void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, 1853 + struct usb_mixer_elem_info *cval, int unitid, 1854 + struct snd_kcontrol *kctl) 1855 + { 1856 + switch (mixer->chip->usb_id) { 1857 + case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */ 1858 + if (unitid == 7 && cval->min == 0 && cval->max == 50) 1859 + snd_dragonfly_quirk_db_scale(mixer, kctl); 1825 1860 break; 1826 1861 } 1827 1862 }
+4
sound/usb/mixer_quirks.h
··· 9 9 void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer, 10 10 int unitid); 11 11 12 + void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, 13 + struct usb_mixer_elem_info *cval, int unitid, 14 + struct snd_kcontrol *kctl); 15 + 12 16 #endif /* SND_USB_MIXER_QUIRKS_H */ 13 17
+1
sound/usb/quirks.c
··· 1125 1125 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1126 1126 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1127 1127 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1128 + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ 1128 1129 return true; 1129 1130 } 1130 1131 return false;