Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

+2586 -1267
+1 -1
Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
··· 12 12 Required subnode-properties: 13 13 - label: Descriptive name of the key. 14 14 - linux,code: Keycode to emit. 15 - - channel: Channel this key is attached to, mut be 0 or 1. 15 + - channel: Channel this key is attached to, must be 0 or 1. 16 16 - voltage: Voltage in µV at lradc input when this key is pressed. 17 17 18 18 Example:
+6 -1
Documentation/devicetree/bindings/mtd/partition.txt
··· 6 6 as RedBoot. 7 7 8 8 The partition table should be a subnode of the mtd node and should be named 9 - 'partitions'. Partitions are defined in subnodes of the partitions node. 9 + 'partitions'. This node should have the following property: 10 + - compatible : (required) must be "fixed-partitions" 11 + Partitions are then defined in subnodes of the partitions node. 10 12 11 13 For backwards compatibility partitions as direct subnodes of the mtd device are 12 14 supported. This use is discouraged. ··· 38 36 39 37 flash@0 { 40 38 partitions { 39 + compatible = "fixed-partitions"; 41 40 #address-cells = <1>; 42 41 #size-cells = <1>; 43 42 ··· 56 53 57 54 flash@1 { 58 55 partitions { 56 + compatible = "fixed-partitions"; 59 57 #address-cells = <1>; 60 58 #size-cells = <2>; 61 59 ··· 70 66 71 67 flash@2 { 72 68 partitions { 69 + compatible = "fixed-partitions"; 73 70 #address-cells = <2>; 74 71 #size-cells = <2>; 75 72
+3 -3
Documentation/devicetree/bindings/net/cpsw.txt
··· 40 40 41 41 Slave Properties: 42 42 Required properties: 43 - - phy_id : Specifies slave phy id 44 43 - phy-mode : See ethernet.txt file in the same directory 45 44 46 45 Optional properties: 47 46 - dual_emac_res_vlan : Specifies VID to be used to segregate the ports 48 47 - mac-address : See ethernet.txt file in the same directory 48 + - phy_id : Specifies slave phy id 49 49 - phy-handle : See ethernet.txt file in the same directory 50 50 51 51 Slave sub-nodes: 52 52 - fixed-link : See fixed-link.txt file in the same directory 53 - Either the properties phy_id and phy-mode, 54 - or the sub-node fixed-link can be specified 53 + Either the property phy_id, or the sub-node 54 + fixed-link can be specified 55 55 56 56 Note: "ti,hwmods" field is used to fetch the base address and irq 57 57 resources from TI, omap hwmod data base during device registration.
+8
MAINTAINERS
··· 8403 8403 S: Maintained 8404 8404 F: drivers/pinctrl/samsung/ 8405 8405 8406 + PIN CONTROLLER - SINGLE 8407 + M: Tony Lindgren <tony@atomide.com> 8408 + M: Haojian Zhuang <haojian.zhuang@linaro.org> 8409 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8410 + L: linux-omap@vger.kernel.org 8411 + S: Maintained 8412 + F: drivers/pinctrl/pinctrl-single.c 8413 + 8406 8414 PIN CONTROLLER - ST SPEAR 8407 8415 M: Viresh Kumar <vireshk@kernel.org> 8408 8416 L: spear-devel@list.st.com
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 4 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc7 5 5 NAME = Blurry Fish Butt 6 6 7 7 # *DOCUMENTATION*
+1
arch/arc/Kconfig
··· 445 445 However some customers have peripherals mapped at this addr, so 446 446 Linux needs to be scooted a bit. 447 447 If you don't know what the above means, leave this setting alone. 448 + This needs to match memory start address specified in Device Tree 448 449 449 450 config HIGHMEM 450 451 bool "High Memory Support"
+1 -1
arch/arc/Makefile
··· 81 81 LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 82 82 83 83 # Modules with short calls might break for calls into builtin-kernel 84 - KBUILD_CFLAGS_MODULE += -mlong-calls 84 + KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode 85 85 86 86 # Finally dump eveything into kernel build system 87 87 KBUILD_CFLAGS += $(cflags-y)
+1
arch/arc/boot/dts/axs10x_mb.dtsi
··· 46 46 snps,pbl = < 32 >; 47 47 clocks = <&apbclk>; 48 48 clock-names = "stmmaceth"; 49 + max-speed = <100>; 49 50 }; 50 51 51 52 ehci@0x40000 {
+2 -1
arch/arc/boot/dts/nsim_hs.dts
··· 17 17 18 18 memory { 19 19 device_type = "memory"; 20 - reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ 20 + /* CONFIG_LINUX_LINK_BASE needs to match low mem start */ 21 + reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */ 21 22 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ 22 23 }; 23 24
-2
arch/arc/include/asm/cache.h
··· 62 62 #define ARC_REG_IC_IVIC 0x10 63 63 #define ARC_REG_IC_CTRL 0x11 64 64 #define ARC_REG_IC_IVIL 0x19 65 - #if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4) 66 65 #define ARC_REG_IC_PTAG 0x1E 67 - #endif 68 66 #define ARC_REG_IC_PTAG_HI 0x1F 69 67 70 68 /* Bit val in IC_CTRL */
+2 -2
arch/arc/include/asm/mach_desc.h
··· 23 23 * @dt_compat: Array of device tree 'compatible' strings 24 24 * (XXX: although only 1st entry is looked at) 25 25 * @init_early: Very early callback [called from setup_arch()] 26 - * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP) 26 + * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP) 27 27 * [(M):init_IRQ(), (o):start_kernel_secondary()] 28 28 * @init_machine: arch initcall level callback (e.g. populate static 29 29 * platform devices or parse Devicetree) ··· 35 35 const char **dt_compat; 36 36 void (*init_early)(void); 37 37 #ifdef CONFIG_SMP 38 - void (*init_cpu_smp)(unsigned int); 38 + void (*init_per_cpu)(unsigned int); 39 39 #endif 40 40 void (*init_machine)(void); 41 41 void (*init_late)(void);
+2 -2
arch/arc/include/asm/smp.h
··· 48 48 * @init_early_smp: A SMP specific h/w block can init itself 49 49 * Could be common across platforms so not covered by 50 50 * mach_desc->init_early() 51 - * @init_irq_cpu: Called for each core so SMP h/w block driver can do 51 + * @init_per_cpu: Called for each core so SMP h/w block driver can do 52 52 * any needed setup per cpu (e.g. IPI request) 53 53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) 54 54 * @ipi_send: To send IPI to a @cpu ··· 57 57 struct plat_smp_ops { 58 58 const char *info; 59 59 void (*init_early_smp)(void); 60 - void (*init_irq_cpu)(int cpu); 60 + void (*init_per_cpu)(int cpu); 61 61 void (*cpu_kick)(int cpu, unsigned long pc); 62 62 void (*ipi_send)(int cpu); 63 63 void (*ipi_clear)(int irq);
-4
arch/arc/include/asm/unwind.h
··· 112 112 113 113 extern int arc_unwind(struct unwind_frame_info *frame); 114 114 extern void arc_unwind_init(void); 115 - extern void arc_unwind_setup(void); 116 115 extern void *unwind_add_table(struct module *module, const void *table_start, 117 116 unsigned long table_size); 118 117 extern void unwind_remove_table(void *handle, int init_only); ··· 151 152 { 152 153 } 153 154 154 - static inline void arc_unwind_setup(void) 155 - { 156 - } 157 155 #define unwind_add_table(a, b, c) 158 156 #define unwind_remove_table(a, b) 159 157
+13 -2
arch/arc/kernel/intc-arcv2.c
··· 106 106 static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, 107 107 irq_hw_number_t hw) 108 108 { 109 - if (irq == TIMER0_IRQ || irq == IPI_IRQ) 109 + /* 110 + * core intc IRQs [16, 23]: 111 + * Statically assigned always private-per-core (Timers, WDT, IPI, PCT) 112 + */ 113 + if (hw < 24) { 114 + /* 115 + * A subsequent request_percpu_irq() fails if percpu_devid is 116 + * not set. That in turns sets NOAUTOEN, meaning each core needs 117 + * to call enable_percpu_irq() 118 + */ 119 + irq_set_percpu_devid(irq); 110 120 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); 111 - else 121 + } else { 112 122 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); 123 + } 113 124 114 125 return 0; 115 126 }
+24 -9
arch/arc/kernel/irq.c
··· 29 29 30 30 #ifdef CONFIG_SMP 31 31 /* a SMP H/w block could do IPI IRQ request here */ 32 - if (plat_smp_ops.init_irq_cpu) 33 - plat_smp_ops.init_irq_cpu(smp_processor_id()); 32 + if (plat_smp_ops.init_per_cpu) 33 + plat_smp_ops.init_per_cpu(smp_processor_id()); 34 34 35 - if (machine_desc->init_cpu_smp) 36 - machine_desc->init_cpu_smp(smp_processor_id()); 35 + if (machine_desc->init_per_cpu) 36 + machine_desc->init_per_cpu(smp_processor_id()); 37 37 #endif 38 38 } 39 39 ··· 51 51 set_irq_regs(old_regs); 52 52 } 53 53 54 + /* 55 + * API called for requesting percpu interrupts - called by each CPU 56 + * - For boot CPU, actually request the IRQ with genirq core + enables 57 + * - For subsequent callers only enable called locally 58 + * 59 + * Relies on being called by boot cpu first (i.e. request called ahead) of 60 + * any enable as expected by genirq. Hence Suitable only for TIMER, IPI 61 + * which are guaranteed to be setup on boot core first. 62 + * Late probed peripherals such as perf can't use this as there no guarantee 63 + * of being called on boot CPU first. 64 + */ 65 + 54 66 void arc_request_percpu_irq(int irq, int cpu, 55 67 irqreturn_t (*isr)(int irq, void *dev), 56 68 const char *irq_nm, ··· 72 60 if (!cpu) { 73 61 int rc; 74 62 63 + #ifdef CONFIG_ISA_ARCOMPACT 75 64 /* 76 - * These 2 calls are essential to making percpu IRQ APIs work 77 - * Ideally these details could be hidden in irq chip map function 78 - * but the issue is IPIs IRQs being static (non-DT) and platform 79 - * specific, so we can't identify them there. 65 + * A subsequent request_percpu_irq() fails if percpu_devid is 66 + * not set. That in turns sets NOAUTOEN, meaning each core needs 67 + * to call enable_percpu_irq() 68 + * 69 + * For ARCv2, this is done in irq map function since we know 70 + * which irqs are strictly per cpu 80 71 */ 81 72 irq_set_percpu_devid(irq); 82 - irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ 73 + #endif 83 74 84 75 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); 85 76 if (rc)
+1 -1
arch/arc/kernel/mcip.c
··· 132 132 struct plat_smp_ops plat_smp_ops = { 133 133 .info = smp_cpuinfo_buf, 134 134 .init_early_smp = mcip_probe_n_setup, 135 - .init_irq_cpu = mcip_setup_per_cpu, 135 + .init_per_cpu = mcip_setup_per_cpu, 136 136 .ipi_send = mcip_ipi_send, 137 137 .ipi_clear = mcip_ipi_clear, 138 138 };
+8 -22
arch/arc/kernel/perf_event.c
··· 428 428 429 429 #endif /* CONFIG_ISA_ARCV2 */ 430 430 431 - void arc_cpu_pmu_irq_init(void) 431 + static void arc_cpu_pmu_irq_init(void *data) 432 432 { 433 - struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); 433 + int irq = *(int *)data; 434 434 435 - arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, 436 - "ARC perf counters", pmu_cpu); 435 + enable_percpu_irq(irq, IRQ_TYPE_NONE); 437 436 438 437 /* Clear all pending interrupt flags */ 439 438 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); ··· 514 515 515 516 if (has_interrupts) { 516 517 int irq = platform_get_irq(pdev, 0); 517 - unsigned long flags; 518 518 519 519 if (irq < 0) { 520 520 pr_err("Cannot get IRQ number for the platform\n"); ··· 522 524 523 525 arc_pmu->irq = irq; 524 526 525 - /* 526 - * arc_cpu_pmu_irq_init() needs to be called on all cores for 527 - * their respective local PMU. 528 - * However we use opencoded on_each_cpu() to ensure it is called 529 - * on core0 first, so that arc_request_percpu_irq() sets up 530 - * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable 531 - * perf IRQ on non master cores. 532 - * see arc_request_percpu_irq() 533 - */ 534 - preempt_disable(); 535 - local_irq_save(flags); 536 - arc_cpu_pmu_irq_init(); 537 - local_irq_restore(flags); 538 - smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1); 539 - preempt_enable(); 527 + /* intc map function ensures irq_set_percpu_devid() called */ 528 + request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", 529 + this_cpu_ptr(&arc_pmu_cpu)); 540 530 541 - /* Clean all pending interrupt flags */ 542 - write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 531 + on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); 532 + 543 533 } else 544 534 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 545 535
-1
arch/arc/kernel/setup.c
··· 429 429 #endif 430 430 431 431 arc_unwind_init(); 432 - arc_unwind_setup(); 433 432 } 434 433 435 434 static int __init customize_machine(void)
+4 -4
arch/arc/kernel/smp.c
··· 132 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 133 133 134 134 /* Some SMP H/w setup - for each cpu */ 135 - if (plat_smp_ops.init_irq_cpu) 136 - plat_smp_ops.init_irq_cpu(cpu); 135 + if (plat_smp_ops.init_per_cpu) 136 + plat_smp_ops.init_per_cpu(cpu); 137 137 138 - if (machine_desc->init_cpu_smp) 139 - machine_desc->init_cpu_smp(cpu); 138 + if (machine_desc->init_per_cpu) 139 + machine_desc->init_per_cpu(cpu); 140 140 141 141 arc_local_timer_setup(); 142 142
+34 -23
arch/arc/kernel/unwind.c
··· 170 170 171 171 static unsigned long read_pointer(const u8 **pLoc, 172 172 const void *end, signed ptrType); 173 + static void init_unwind_hdr(struct unwind_table *table, 174 + void *(*alloc) (unsigned long)); 175 + 176 + /* 177 + * wrappers for header alloc (vs. calling one vs. other at call site) 178 + * to elide section mismatches warnings 179 + */ 180 + static void *__init unw_hdr_alloc_early(unsigned long sz) 181 + { 182 + return __alloc_bootmem_nopanic(sz, sizeof(unsigned int), 183 + MAX_DMA_ADDRESS); 184 + } 185 + 186 + static void *unw_hdr_alloc(unsigned long sz) 187 + { 188 + return kmalloc(sz, GFP_KERNEL); 189 + } 173 190 174 191 static void init_unwind_table(struct unwind_table *table, const char *name, 175 192 const void *core_start, unsigned long core_size, ··· 226 209 __start_unwind, __end_unwind - __start_unwind, 227 210 NULL, 0); 228 211 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ 212 + 213 + init_unwind_hdr(&root_table, unw_hdr_alloc_early); 229 214 } 230 215 231 216 static const u32 bad_cie, not_fde; ··· 260 241 e2->fde = v; 261 242 } 262 243 263 - static void __init setup_unwind_table(struct unwind_table *table, 264 - void *(*alloc) (unsigned long)) 244 + static void init_unwind_hdr(struct unwind_table *table, 245 + void *(*alloc) (unsigned long)) 265 246 { 266 247 const u8 *ptr; 267 248 unsigned long tableSize = table->size, hdrSize; ··· 296 277 if (cie == &not_fde) 297 278 continue; 298 279 if (cie == NULL || cie == &bad_cie) 299 - return; 280 + goto ret_err; 300 281 ptrType = fde_pointer_type(cie); 301 282 if (ptrType < 0) 302 - return; 283 + goto ret_err; 303 284 304 285 ptr = (const u8 *)(fde + 2); 305 286 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ··· 315 296 } 316 297 317 298 if (tableSize || !n) 318 - return; 299 + goto ret_err; 319 300 320 301 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) 321 302 + 2 * n * sizeof(unsigned long); 303 + 322 304 header = alloc(hdrSize); 323 305 if (!header) 324 - return; 306 + goto ret_err; 307 + 325 308 header->version = 1; 326 309 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; 327 310 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; ··· 361 340 table->hdrsz = hdrSize; 362 341 smp_wmb(); 363 342 table->header = (const void *)header; 364 - } 343 + return; 365 344 366 - static void *__init balloc(unsigned long sz) 367 - { 368 - return __alloc_bootmem_nopanic(sz, 369 - sizeof(unsigned int), 370 - __pa(MAX_DMA_ADDRESS)); 371 - } 372 - 373 - void __init arc_unwind_setup(void) 374 - { 375 - setup_unwind_table(&root_table, balloc); 345 + ret_err: 346 + panic("Attention !!! Dwarf FDE parsing errors\n");; 376 347 } 377 348 378 349 #ifdef CONFIG_MODULES ··· 389 376 module->module_init, module->init_size, 390 377 table_start, table_size, 391 378 NULL, 0); 379 + 380 + init_unwind_hdr(table, unw_hdr_alloc); 392 381 393 382 #ifdef UNWIND_DEBUG 394 383 unw_debug("Table added for [%s] %lx %lx\n", ··· 454 439 info.init_only = init_only; 455 440 456 441 unlink_table(&info); /* XXX: SMP */ 442 + kfree(table->header); 457 443 kfree(table); 458 444 } 459 445 ··· 603 587 { 604 588 const u8 *ptr = (const u8 *)(cie + 2); 605 589 unsigned version = *ptr; 606 - 607 - if (version != 1) 608 - return -1; /* unsupported */ 609 590 610 591 if (*++ptr) { 611 592 const char *aug; ··· 1015 1002 ptr = (const u8 *)(cie + 2); 1016 1003 end = (const u8 *)(cie + 1) + *cie; 1017 1004 frame->call_frame = 1; 1018 - if ((state.version = *ptr) != 1) 1019 - cie = NULL; /* unsupported version */ 1020 - else if (*++ptr) { 1005 + if (*++ptr) { 1021 1006 /* check if augmentation size is first (thus present) */ 1022 1007 if (*ptr == 'z') { 1023 1008 while (++ptr < end && *ptr) {
+2 -2
arch/arc/mm/highmem.c
··· 111 111 } 112 112 EXPORT_SYMBOL(__kunmap_atomic); 113 113 114 - noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) 114 + static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) 115 115 { 116 116 pgd_t *pgd_k; 117 117 pud_t *pud_k; ··· 127 127 return pte_k; 128 128 } 129 129 130 - void kmap_init(void) 130 + void __init kmap_init(void) 131 131 { 132 132 /* Due to recursive include hell, we can't do this in processor.h */ 133 133 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+3 -1
arch/arc/mm/init.c
··· 51 51 int in_use = 0; 52 52 53 53 if (!low_mem_sz) { 54 - BUG_ON(base != low_mem_start); 54 + if (base != low_mem_start) 55 + panic("CONFIG_LINUX_LINK_BASE != DT memory { }"); 56 + 55 57 low_mem_sz = size; 56 58 in_use = 1; 57 59 } else {
+1 -1
arch/arm/boot/dts/imx6q-gw5400-a.dts
··· 154 154 &fec { 155 155 pinctrl-names = "default"; 156 156 pinctrl-0 = <&pinctrl_enet>; 157 - phy-mode = "rgmii"; 157 + phy-mode = "rgmii-id"; 158 158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>; 159 159 status = "okay"; 160 160 };
+1 -1
arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
··· 94 94 &fec { 95 95 pinctrl-names = "default"; 96 96 pinctrl-0 = <&pinctrl_enet>; 97 - phy-mode = "rgmii"; 97 + phy-mode = "rgmii-id"; 98 98 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 99 99 status = "okay"; 100 100 };
+1 -1
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
··· 154 154 &fec { 155 155 pinctrl-names = "default"; 156 156 pinctrl-0 = <&pinctrl_enet>; 157 - phy-mode = "rgmii"; 157 + phy-mode = "rgmii-id"; 158 158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 159 159 status = "okay"; 160 160 };
+1 -1
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
··· 155 155 &fec { 156 156 pinctrl-names = "default"; 157 157 pinctrl-0 = <&pinctrl_enet>; 158 - phy-mode = "rgmii"; 158 + phy-mode = "rgmii-id"; 159 159 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 160 160 status = "okay"; 161 161 };
+1 -1
arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
··· 145 145 &fec { 146 146 pinctrl-names = "default"; 147 147 pinctrl-0 = <&pinctrl_enet>; 148 - phy-mode = "rgmii"; 148 + phy-mode = "rgmii-id"; 149 149 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 150 150 status = "okay"; 151 151 };
+3 -3
arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
··· 113 113 &clks { 114 114 assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>, 115 115 <&clks IMX6QDL_PLL4_BYPASS>, 116 - <&clks IMX6QDL_CLK_PLL4_POST_DIV>, 117 116 <&clks IMX6QDL_CLK_LDB_DI0_SEL>, 118 - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; 117 + <&clks IMX6QDL_CLK_LDB_DI1_SEL>, 118 + <&clks IMX6QDL_CLK_PLL4_POST_DIV>; 119 119 assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>, 120 120 <&clks IMX6QDL_PLL4_BYPASS_SRC>, 121 121 <&clks IMX6QDL_CLK_PLL3_USB_OTG>, 122 122 <&clks IMX6QDL_CLK_PLL3_USB_OTG>; 123 - assigned-clock-rates = <0>, <0>, <24576000>; 123 + assigned-clock-rates = <0>, <0>, <0>, <0>, <24576000>; 124 124 }; 125 125 126 126 &ecspi1 {
+4
arch/arm/boot/dts/omap4-duovero-parlor.dts
··· 189 189 }; 190 190 }; 191 191 192 + &uart3 { 193 + interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH 194 + &omap4_pmx_core OMAP4_UART3_RX>; 195 + };
+1
arch/arm/boot/dts/sun6i-a31s-primo81.dts
··· 83 83 reg = <0x5d>; 84 84 interrupt-parent = <&pio>; 85 85 interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */ 86 + touchscreen-swapped-x-y; 86 87 }; 87 88 }; 88 89
+1 -1
arch/arm/boot/dts/tegra124-nyan.dtsi
··· 399 399 400 400 /* CPU DFLL clock */ 401 401 clock@0,70110000 { 402 - status = "okay"; 402 + status = "disabled"; 403 403 vdd-cpu-supply = <&vdd_cpu>; 404 404 nvidia,i2c-fs-rate = <400000>; 405 405 };
+39 -38
arch/arm/kernel/sys_oabi-compat.c
··· 193 193 pid_t l_pid; 194 194 } __attribute__ ((packed,aligned(4))); 195 195 196 + static long do_locks(unsigned int fd, unsigned int cmd, 197 + unsigned long arg) 198 + { 199 + struct flock64 kernel; 200 + struct oabi_flock64 user; 201 + mm_segment_t fs; 202 + long ret; 203 + 204 + if (copy_from_user(&user, (struct oabi_flock64 __user *)arg, 205 + sizeof(user))) 206 + return -EFAULT; 207 + kernel.l_type = user.l_type; 208 + kernel.l_whence = user.l_whence; 209 + kernel.l_start = user.l_start; 210 + kernel.l_len = user.l_len; 211 + kernel.l_pid = user.l_pid; 212 + 213 + fs = get_fs(); 214 + set_fs(KERNEL_DS); 215 + ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel); 216 + set_fs(fs); 217 + 218 + if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) { 219 + user.l_type = kernel.l_type; 220 + user.l_whence = kernel.l_whence; 221 + user.l_start = kernel.l_start; 222 + user.l_len = kernel.l_len; 223 + user.l_pid = kernel.l_pid; 224 + if (copy_to_user((struct oabi_flock64 __user *)arg, 225 + &user, sizeof(user))) 226 + ret = -EFAULT; 227 + } 228 + return ret; 229 + } 230 + 196 231 asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, 197 232 unsigned long arg) 198 233 { 199 - struct oabi_flock64 user; 200 - struct flock64 kernel; 201 - mm_segment_t fs = USER_DS; /* initialized to kill a warning */ 202 - unsigned long local_arg = arg; 203 - int ret; 204 - 205 234 switch (cmd) { 206 235 case F_OFD_GETLK: 207 236 case F_OFD_SETLK: ··· 238 209 case F_GETLK64: 239 210 case F_SETLK64: 240 211 case F_SETLKW64: 241 - if (copy_from_user(&user, (struct oabi_flock64 __user *)arg, 242 - sizeof(user))) 243 - return -EFAULT; 244 - kernel.l_type = user.l_type; 245 - kernel.l_whence = user.l_whence; 246 - kernel.l_start = user.l_start; 247 - kernel.l_len = user.l_len; 248 - kernel.l_pid = user.l_pid; 249 - local_arg = (unsigned long)&kernel; 250 - fs = get_fs(); 251 - set_fs(KERNEL_DS); 212 + return do_locks(fd, cmd, arg); 213 + 214 + default: 215 + return sys_fcntl64(fd, cmd, arg); 252 216 } 253 - 254 - ret = sys_fcntl64(fd, cmd, local_arg); 255 - 256 - switch (cmd) { 257 - case F_GETLK64: 258 - if (!ret) { 259 - user.l_type = kernel.l_type; 260 - user.l_whence = kernel.l_whence; 261 - user.l_start = kernel.l_start; 262 - user.l_len = kernel.l_len; 263 - user.l_pid = kernel.l_pid; 264 - if (copy_to_user((struct oabi_flock64 __user *)arg, 265 - &user, sizeof(user))) 266 - ret = -EFAULT; 267 - } 268 - case F_SETLK64: 269 - case F_SETLKW64: 270 - set_fs(fs); 271 - } 272 - 273 - return ret; 274 217 } 275 218 276 219 struct oabi_epoll_event {
+2
arch/arm/mach-omap2/Kconfig
··· 65 65 select MACH_OMAP_GENERIC 66 66 select MIGHT_HAVE_CACHE_L2X0 67 67 select HAVE_ARM_SCU 68 + select GENERIC_CLOCKEVENTS_BROADCAST 69 + select HAVE_ARM_TWD 68 70 69 71 config SOC_DRA7XX 70 72 bool "TI DRA7XX"
+6
arch/arm/mach-omap2/timer.c
··· 320 320 return r; 321 321 } 322 322 323 + #if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) 324 + void tick_broadcast(const struct cpumask *mask) 325 + { 326 + } 327 + #endif 328 + 323 329 static void __init omap2_gp_clockevent_init(int gptimer_id, 324 330 const char *fck_source, 325 331 const char *property)
+1
arch/m32r/include/asm/Kbuild
··· 3 3 generic-y += cputime.h 4 4 generic-y += exec.h 5 5 generic-y += irq_work.h 6 + generic-y += kvm_para.h 6 7 generic-y += mcs_spinlock.h 7 8 generic-y += mm-arch-hooks.h 8 9 generic-y += module.h
+9 -1
arch/m32r/include/asm/io.h
··· 168 168 #define writew_relaxed writew 169 169 #define writel_relaxed writel 170 170 171 - #define ioread8 read 171 + #define ioread8 readb 172 172 #define ioread16 readw 173 173 #define ioread32 readl 174 174 #define iowrite8 writeb 175 175 #define iowrite16 writew 176 176 #define iowrite32 writel 177 + 178 + #define ioread8_rep(p, dst, count) insb((unsigned long)(p), (dst), (count)) 179 + #define ioread16_rep(p, dst, count) insw((unsigned long)(p), (dst), (count)) 180 + #define ioread32_rep(p, dst, count) insl((unsigned long)(p), (dst), (count)) 181 + 182 + #define iowrite8_rep(p, src, count) outsb((unsigned long)(p), (src), (count)) 183 + #define iowrite16_rep(p, src, count) outsw((unsigned long)(p), (src), (count)) 184 + #define iowrite32_rep(p, src, count) outsl((unsigned long)(p), (src), (count)) 177 185 178 186 #define ioread16be(addr) be16_to_cpu(readw(addr)) 179 187 #define ioread32be(addr) be32_to_cpu(readl(addr))
+35 -17
arch/mips/include/asm/uaccess.h
··· 599 599 * On error, the variable @x is set to zero. 600 600 */ 601 601 #define __get_user_unaligned(x,ptr) \ 602 - __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) 602 + __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) 603 603 604 604 /* 605 605 * Yuck. We need two variants, one for 64bit operation and one ··· 620 620 do { \ 621 621 switch (size) { \ 622 622 case 1: __get_data_asm(val, "lb", ptr); break; \ 623 - case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ 624 - case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ 623 + case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \ 624 + case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \ 625 625 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ 626 626 default: __get_user_unaligned_unknown(); break; \ 627 627 } \ ··· 1122 1122 __cu_to = (to); \ 1123 1123 __cu_from = (from); \ 1124 1124 __cu_len = (n); \ 1125 - might_fault(); \ 1126 - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1127 - __cu_len); \ 1125 + if (eva_kernel_access()) { \ 1126 + __cu_len = __invoke_copy_from_kernel(__cu_to, \ 1127 + __cu_from, \ 1128 + __cu_len); \ 1129 + } else { \ 1130 + might_fault(); \ 1131 + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1132 + __cu_len); \ 1133 + } \ 1128 1134 __cu_len; \ 1129 1135 }) 1130 1136 ··· 1235 1229 { 1236 1230 __kernel_size_t res; 1237 1231 1238 - might_fault(); 1239 - __asm__ __volatile__( 1240 - "move\t$4, %1\n\t" 1241 - "move\t$5, $0\n\t" 1242 - "move\t$6, %2\n\t" 1243 - __MODULE_JAL(__bzero) 1244 - "move\t%0, $6" 1245 - : "=r" (res) 1246 - : "r" (addr), "r" (size) 1247 - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 1232 + if (eva_kernel_access()) { 1233 + __asm__ __volatile__( 1234 + "move\t$4, %1\n\t" 1235 + "move\t$5, $0\n\t" 1236 + "move\t$6, %2\n\t" 1237 + __MODULE_JAL(__bzero_kernel) 1238 + "move\t%0, $6" 1239 + : "=r" (res) 1240 + : "r" (addr), "r" (size) 1241 + : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 1242 + } else { 1243 + might_fault(); 1244 + __asm__ __volatile__( 1245 + "move\t$4, %1\n\t" 1246 + "move\t$5, $0\n\t" 1247 + "move\t$6, %2\n\t" 1248 + __MODULE_JAL(__bzero) 1249 + "move\t%0, $6" 1250 + : "=r" (res) 1251 + : "r" (addr), "r" (size) 1252 + : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 1253 + } 1248 1254 1249 1255 return res; 1250 1256 } ··· 1402 1384 might_fault(); 1403 1385 __asm__ __volatile__( 1404 1386 "move\t$4, %1\n\t" 1405 - __MODULE_JAL(__strlen_kernel_asm) 1387 + __MODULE_JAL(__strlen_user_asm) 1406 1388 "move\t%0, $2" 1407 1389 : "=r" (res) 1408 1390 : "r" (s)
-2
arch/mips/kernel/cps-vec.S
··· 257 257 has_mt t0, 3f 258 258 259 259 .set push 260 - .set mips64r2 261 260 .set mt 262 261 263 262 /* Only allow 1 TC per VPE to execute... */ ··· 375 376 nop 376 377 377 378 .set push 378 - .set mips64r2 379 379 .set mt 380 380 381 381 1: /* Enter VPE configuration state */
+2
arch/mips/kernel/mips_ksyms.c
··· 17 17 #include <asm/fpu.h> 18 18 #include <asm/msa.h> 19 19 20 + extern void *__bzero_kernel(void *__s, size_t __count); 20 21 extern void *__bzero(void *__s, size_t __count); 21 22 extern long __strncpy_from_kernel_nocheck_asm(char *__to, 22 23 const char *__from, long __len); ··· 65 64 EXPORT_SYMBOL(__copy_in_user_eva); 66 65 EXPORT_SYMBOL(__copy_to_user_eva); 67 66 EXPORT_SYMBOL(__copy_user_inatomic_eva); 67 + EXPORT_SYMBOL(__bzero_kernel); 68 68 #endif 69 69 EXPORT_SYMBOL(__bzero); 70 70 EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
+2
arch/mips/lib/memset.S
··· 283 283 1: 284 284 #ifndef CONFIG_EVA 285 285 FEXPORT(__bzero) 286 + #else 287 + FEXPORT(__bzero_kernel) 286 288 #endif 287 289 __BUILD_BZERO LEGACY_MODE 288 290
-1
arch/mips/pci/pci-rt2880.c
··· 221 221 static int rt288x_pci_probe(struct platform_device *pdev) 222 222 { 223 223 void __iomem *io_map_base; 224 - int i; 225 224 226 225 rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE); 227 226
-1
arch/mips/pmcs-msp71xx/msp_setup.c
··· 39 39 void msp7120_reset(void) 40 40 { 41 41 void *start, *end, *iptr; 42 - register int i; 43 42 44 43 /* Diasble all interrupts */ 45 44 local_irq_disable();
+1 -1
arch/mips/sni/reset.c
··· 26 26 /* XXX This ends up at the ARC firmware prompt ... */ 27 27 void sni_machine_restart(char *command) 28 28 { 29 - int i, j; 29 + int i; 30 30 31 31 /* This does a normal via the keyboard controller like a PC. 32 32 We can do that easier ... */
+2 -2
arch/mips/vdso/Makefile
··· 26 26 # the comments on that file. 27 27 # 28 28 ifndef CONFIG_CPU_MIPSR6 29 - ifeq ($(call ld-ifversion, -gt, 22400000, y),) 30 - $(warning MIPS VDSO requires binutils > 2.24) 29 + ifeq ($(call ld-ifversion, -lt, 22500000, y),) 30 + $(warning MIPS VDSO requires binutils >= 2.25) 31 31 obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y)) 32 32 ccflags-vdso += -DDISABLE_MIPS_VDSO 33 33 endif
+52 -12
arch/parisc/kernel/signal.c
··· 435 435 regs->gr[28]); 436 436 } 437 437 438 + /* 439 + * Check how the syscall number gets loaded into %r20 within 440 + * the delay branch in userspace and adjust as needed. 441 + */ 442 + 443 + static void check_syscallno_in_delay_branch(struct pt_regs *regs) 444 + { 445 + u32 opcode, source_reg; 446 + u32 __user *uaddr; 447 + int err; 448 + 449 + /* Usually we don't have to restore %r20 (the system call number) 450 + * because it gets loaded in the delay slot of the branch external 451 + * instruction via the ldi instruction. 452 + * In some cases a register-to-register copy instruction might have 453 + * been used instead, in which case we need to copy the syscall 454 + * number into the source register before returning to userspace. 455 + */ 456 + 457 + /* A syscall is just a branch, so all we have to do is fiddle the 458 + * return pointer so that the ble instruction gets executed again. 459 + */ 460 + regs->gr[31] -= 8; /* delayed branching */ 461 + 462 + /* Get assembler opcode of code in delay branch */ 463 + uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4); 464 + err = get_user(opcode, uaddr); 465 + if (err) 466 + return; 467 + 468 + /* Check if delay branch uses "ldi int,%r20" */ 469 + if ((opcode & 0xffff0000) == 0x34140000) 470 + return; /* everything ok, just return */ 471 + 472 + /* Check if delay branch uses "nop" */ 473 + if (opcode == INSN_NOP) 474 + return; 475 + 476 + /* Check if delay branch uses "copy %rX,%r20" */ 477 + if ((opcode & 0xffe0ffff) == 0x08000254) { 478 + source_reg = (opcode >> 16) & 31; 479 + regs->gr[source_reg] = regs->gr[20]; 480 + return; 481 + } 482 + 483 + pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n", 484 + current->comm, task_pid_nr(current), opcode); 485 + } 486 + 438 487 static inline void 439 488 syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) 440 489 { ··· 506 457 } 507 458 /* fallthrough */ 508 459 case -ERESTARTNOINTR: 509 - /* A syscall is just a branch, so all 510 - * we have to do is fiddle the return pointer. 511 - */ 512 - regs->gr[31] -= 8; /* delayed branching */ 460 + check_syscallno_in_delay_branch(regs); 513 461 break; 514 462 } 515 463 } ··· 556 510 } 557 511 case -ERESTARTNOHAND: 558 512 case -ERESTARTSYS: 559 - case -ERESTARTNOINTR: { 560 - /* Hooray for delayed branching. We don't 561 - * have to restore %r20 (the system call 562 - * number) because it gets loaded in the delay 563 - * slot of the branch external instruction. 564 - */ 565 - regs->gr[31] -= 8; 513 + case -ERESTARTNOINTR: 514 + check_syscallno_in_delay_branch(regs); 566 515 return; 567 - } 568 516 default: 569 517 break; 570 518 }
+12 -12
arch/powerpc/include/asm/systbl.h
··· 370 370 PPC64ONLY(switch_endian) 371 371 SYSCALL_SPU(userfaultfd) 372 372 SYSCALL_SPU(membarrier) 373 - SYSCALL(semop) 374 - SYSCALL(semget) 375 - COMPAT_SYS(semctl) 376 - COMPAT_SYS(semtimedop) 377 - COMPAT_SYS(msgsnd) 378 - COMPAT_SYS(msgrcv) 379 - SYSCALL(msgget) 380 - COMPAT_SYS(msgctl) 381 - COMPAT_SYS(shmat) 382 - SYSCALL(shmdt) 383 - SYSCALL(shmget) 384 - COMPAT_SYS(shmctl) 373 + SYSCALL(ni_syscall) 374 + SYSCALL(ni_syscall) 375 + SYSCALL(ni_syscall) 376 + SYSCALL(ni_syscall) 377 + SYSCALL(ni_syscall) 378 + SYSCALL(ni_syscall) 379 + SYSCALL(ni_syscall) 380 + SYSCALL(ni_syscall) 381 + SYSCALL(ni_syscall) 382 + SYSCALL(ni_syscall) 383 + SYSCALL(ni_syscall) 384 + SYSCALL(ni_syscall) 385 385 SYSCALL(mlock2)
-12
arch/powerpc/include/uapi/asm/unistd.h
··· 388 388 #define __NR_switch_endian 363 389 389 #define __NR_userfaultfd 364 390 390 #define __NR_membarrier 365 391 - #define __NR_semop 366 392 - #define __NR_semget 367 393 - #define __NR_semctl 368 394 - #define __NR_semtimedop 369 395 - #define __NR_msgsnd 370 396 - #define __NR_msgrcv 371 397 - #define __NR_msgget 372 398 - #define __NR_msgctl 373 399 - #define __NR_shmat 374 400 - #define __NR_shmdt 375 401 - #define __NR_shmget 376 402 - #define __NR_shmctl 377 403 391 #define __NR_mlock2 378 404 392 405 393 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+6
arch/powerpc/kvm/book3s_hv.c
··· 224 224 225 225 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 226 226 { 227 + /* 228 + * Check for illegal transactional state bit combination 229 + * and if we find it, force the TS field to a safe state. 230 + */ 231 + if ((msr & MSR_TS_MASK) == MSR_TS_MASK) 232 + msr &= ~MSR_TS_MASK; 227 233 vcpu->arch.shregs.msr = msr; 228 234 kvmppc_end_cede(vcpu); 229 235 }
+13 -1
arch/powerpc/platforms/powernv/opal-irqchip.c
··· 83 83 set_bit(d->hwirq, &opal_event_irqchip.mask); 84 84 85 85 opal_poll_events(&events); 86 - opal_handle_events(be64_to_cpu(events)); 86 + last_outstanding_events = be64_to_cpu(events); 87 + 88 + /* 89 + * We can't just handle the events now with opal_handle_events(). 90 + * If we did we would deadlock when opal_event_unmask() is called from 91 + * handle_level_irq() with the irq descriptor lock held, because 92 + * calling opal_handle_events() would call generic_handle_irq() and 93 + * then handle_level_irq() which would try to take the descriptor lock 94 + * again. Instead queue the events for later. 95 + */ 96 + if (last_outstanding_events & opal_event_irqchip.mask) 97 + /* Need to retrigger the interrupt */ 98 + irq_work_queue(&opal_event_irq_work); 87 99 } 88 100 89 101 static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
+1 -1
arch/powerpc/platforms/powernv/opal.c
··· 278 278 279 279 /* Sanity check */ 280 280 if (type >= OPAL_MSG_TYPE_MAX) { 281 - pr_warning("%s: Unknown message type: %u\n", __func__, type); 281 + pr_warn_once("%s: Unknown message type: %u\n", __func__, type); 282 282 return; 283 283 } 284 284 opal_message_do_notify(type, (void *)&msg);
+12 -5
arch/s390/kernel/dis.c
··· 1920 1920 } 1921 1921 if (separator) 1922 1922 ptr += sprintf(ptr, "%c", separator); 1923 + /* 1924 + * Use four '%' characters below because of the 1925 + * following two conversions: 1926 + * 1927 + * 1) sprintf: %%%%r -> %%r 1928 + * 2) printk : %%r -> %r 1929 + */ 1923 1930 if (operand->flags & OPERAND_GPR) 1924 - ptr += sprintf(ptr, "%%r%i", value); 1931 + ptr += sprintf(ptr, "%%%%r%i", value); 1925 1932 else if (operand->flags & OPERAND_FPR) 1926 - ptr += sprintf(ptr, "%%f%i", value); 1933 + ptr += sprintf(ptr, "%%%%f%i", value); 1927 1934 else if (operand->flags & OPERAND_AR) 1928 - ptr += sprintf(ptr, "%%a%i", value); 1935 + ptr += sprintf(ptr, "%%%%a%i", value); 1929 1936 else if (operand->flags & OPERAND_CR) 1930 - ptr += sprintf(ptr, "%%c%i", value); 1937 + ptr += sprintf(ptr, "%%%%c%i", value); 1931 1938 else if (operand->flags & OPERAND_VR) 1932 - ptr += sprintf(ptr, "%%v%i", value); 1939 + ptr += sprintf(ptr, "%%%%v%i", value); 1933 1940 else if (operand->flags & OPERAND_PCREL) 1934 1941 ptr += sprintf(ptr, "%lx", (signed int) value 1935 1942 + addr);
+1
arch/sparc/include/asm/elf_64.h
··· 95 95 * really available. So we simply advertise only "crypto" support. 96 96 */ 97 97 #define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */ 98 + #define HWCAP_SPARC_ADI 0x08000000 /* ADI available */ 98 99 99 100 #define CORE_DUMP_USE_REGSET 100 101
+6 -1
arch/sparc/include/uapi/asm/unistd.h
··· 417 417 #define __NR_bpf 349 418 418 #define __NR_execveat 350 419 419 #define __NR_membarrier 351 420 + #define __NR_userfaultfd 352 421 + #define __NR_bind 353 422 + #define __NR_listen 354 423 + #define __NR_setsockopt 355 424 + #define __NR_mlock2 356 420 425 421 - #define NR_syscalls 352 426 + #define NR_syscalls 357 422 427 423 428 /* Bitmask values returned from kern_features system call. */ 424 429 #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
+13
arch/sparc/kernel/head_64.S
··· 946 946 mov 1, %o0 947 947 ENDPROC(__retl_one) 948 948 949 + ENTRY(__retl_one_fp) 950 + VISExitHalf 951 + retl 952 + mov 1, %o0 953 + ENDPROC(__retl_one_fp) 954 + 949 955 ENTRY(__ret_one_asi) 950 956 wr %g0, ASI_AIUS, %asi 951 957 ret ··· 963 957 retl 964 958 mov 1, %o0 965 959 ENDPROC(__retl_one_asi) 960 + 961 + ENTRY(__retl_one_asi_fp) 962 + wr %g0, ASI_AIUS, %asi 963 + VISExitHalf 964 + retl 965 + mov 1, %o0 966 + ENDPROC(__retl_one_asi_fp) 966 967 967 968 ENTRY(__retl_o1) 968 969 retl
+11
arch/sparc/kernel/perf_event.c
··· 1828 1828 void 1829 1829 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 1830 1830 { 1831 + u64 saved_fault_address = current_thread_info()->fault_address; 1832 + u8 saved_fault_code = get_thread_fault_code(); 1833 + mm_segment_t old_fs; 1834 + 1831 1835 perf_callchain_store(entry, regs->tpc); 1832 1836 1833 1837 if (!current->mm) 1834 1838 return; 1839 + 1840 + old_fs = get_fs(); 1841 + set_fs(USER_DS); 1835 1842 1836 1843 flushw_user(); 1837 1844 ··· 1850 1843 perf_callchain_user_64(entry, regs); 1851 1844 1852 1845 pagefault_enable(); 1846 + 1847 + set_fs(old_fs); 1848 + set_thread_fault_code(saved_fault_code); 1849 + current_thread_info()->fault_address = saved_fault_address; 1853 1850 }
+7 -1
arch/sparc/kernel/rtrap_64.S
··· 73 73 andn %l1, %l4, %l1 74 74 srl %l4, 20, %l4 75 75 ba,pt %xcc, rtrap_no_irq_enable 76 - wrpr %l4, %pil 76 + nop 77 + /* Do not actually set the %pil here. We will do that 78 + * below after we clear PSTATE_IE in the %pstate register. 79 + * If we re-enable interrupts here, we can recurse down 80 + * the hardirq stack potentially endlessly, causing a 81 + * stack overflow. 82 + */ 77 83 78 84 .align 64 79 85 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
+5 -4
arch/sparc/kernel/setup_64.c
··· 380 380 */ 381 381 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", 382 382 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", 383 - "ima", "cspare", "pause", "cbcond", 383 + "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */, 384 + "adp", 384 385 }; 385 386 386 387 static const char *crypto_hwcaps[] = { ··· 397 396 seq_puts(m, "cpucaps\t\t: "); 398 397 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 399 398 unsigned long bit = 1UL << i; 400 - if (caps & bit) { 399 + if (hwcaps[i] && (caps & bit)) { 401 400 seq_printf(m, "%s%s", 402 401 printed ? "," : "", hwcaps[i]); 403 402 printed++; ··· 451 450 452 451 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 453 452 unsigned long bit = 1UL << i; 454 - if (caps & bit) 453 + if (hwcaps[i] && (caps & bit)) 455 454 report_one_hwcap(&printed, hwcaps[i]); 456 455 } 457 456 if (caps & HWCAP_SPARC_CRYPTO) ··· 486 485 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 487 486 unsigned long bit = 1UL << i; 488 487 489 - if (!strcmp(prop, hwcaps[i])) { 488 + if (hwcaps[i] && !strcmp(prop, hwcaps[i])) { 490 489 caps |= bit; 491 490 break; 492 491 }
+10 -9
arch/sparc/kernel/systbls_32.S
··· 35 35 /*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64 36 36 /*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid 37 37 /*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid 38 - /*95*/ .long sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall 38 + /*95*/ .long sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept 39 39 /*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending 40 40 /*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid 41 - /*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall 42 - /*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_getcwd 41 + /*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_recvmsg, sys_sendmsg 42 + /*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd 43 43 /*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod 44 - /*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate 45 - /*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall 46 - /*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64 47 - /*140*/ .long sys_sendfile64, sys_nis_syscall, sys_futex, sys_gettid, sys_getrlimit 44 + /*125*/ .long sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate 45 + /*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown 46 + /*135*/ .long sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64 47 + /*140*/ .long sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit 48 48 /*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write 49 - /*150*/ .long sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 49 + /*150*/ .long sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 50 50 /*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount 51 51 /*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall 52 52 /*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr ··· 87 87 /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 88 88 /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89 89 /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 90 - /*350*/ .long sys_execveat, sys_membarrier 90 + /*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 91 + /*355*/ .long sys_setsockopt, sys_mlock2
+10 -8
arch/sparc/kernel/systbls_64.S
··· 37 37 /*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, sys32_ftruncate64 38 38 .word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid 39 39 /*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid 40 - .word sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall 40 + .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept 41 41 /*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending 42 42 .word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid 43 - /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall 44 - .word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, sys_nis_syscall, sys_getcwd 43 + /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, compat_sys_recvmsg, compat_sys_sendmsg 44 + .word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, compat_sys_getsockopt, sys_getcwd 45 45 /*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod 46 - .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate 47 - /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall 48 - .word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 46 + .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate 47 + /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown 48 + .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 49 49 /*140*/ .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit 50 50 .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write 51 51 /*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 ··· 88 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 89 89 /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 90 90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 91 - /*350*/ .word sys32_execveat, sys_membarrier 91 + /*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 92 + .word compat_sys_setsockopt, sys_mlock2 92 93 93 94 #endif /* CONFIG_COMPAT */ 94 95 ··· 169 168 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 170 169 /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 171 170 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 172 - /*350*/ .word sys64_execveat, sys_membarrier 171 + /*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen 172 + .word sys_setsockopt, sys_mlock2
+8
arch/sparc/lib/NG2copy_from_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_LD_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_asi_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #ifndef ASI_AIUS 15 23 #define ASI_AIUS 0x11 16 24 #endif
+8
arch/sparc/lib/NG2copy_to_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_ST_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_asi_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #ifndef ASI_AIUS 15 23 #define ASI_AIUS 0x11 16 24 #endif
+62 -56
arch/sparc/lib/NG2memcpy.S
··· 34 34 #ifndef EX_LD 35 35 #define EX_LD(x) x 36 36 #endif 37 + #ifndef EX_LD_FP 38 + #define EX_LD_FP(x) x 39 + #endif 37 40 38 41 #ifndef EX_ST 39 42 #define EX_ST(x) x 43 + #endif 44 + #ifndef EX_ST_FP 45 + #define EX_ST_FP(x) x 40 46 #endif 41 47 42 48 #ifndef EX_RETVAL ··· 140 134 fsrc2 %x6, %f12; \ 141 135 fsrc2 %x7, %f14; 142 136 #define FREG_LOAD_1(base, x0) \ 143 - EX_LD(LOAD(ldd, base + 0x00, %x0)) 137 + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)) 144 138 #define FREG_LOAD_2(base, x0, x1) \ 145 - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 146 - EX_LD(LOAD(ldd, base + 0x08, %x1)); 139 + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 140 + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); 147 141 #define FREG_LOAD_3(base, x0, x1, x2) \ 148 - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 149 - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 150 - EX_LD(LOAD(ldd, base + 0x10, %x2)); 142 + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 143 + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 144 + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); 151 145 #define FREG_LOAD_4(base, x0, x1, x2, x3) \ 152 - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 153 - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 154 - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 155 - EX_LD(LOAD(ldd, base + 0x18, %x3)); 146 + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 147 + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 148 + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 149 + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); 156 150 #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ 157 - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 158 - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 159 - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 160 - EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 161 - EX_LD(LOAD(ldd, base + 0x20, %x4)); 151 + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 152 + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 153 + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 154 + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ 155 + EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); 162 156 #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ 163 - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 164 - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 165 - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 166 - EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 167 - EX_LD(LOAD(ldd, base + 0x20, %x4)); \ 168 - EX_LD(LOAD(ldd, base + 0x28, %x5)); 157 + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 158 + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 159 + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 160 + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ 161 + EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ 162 + EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); 169 163 #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ 170 - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 171 - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 172 - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 173 - EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 174 - EX_LD(LOAD(ldd, base + 0x20, %x4)); \ 175 - EX_LD(LOAD(ldd, base + 0x28, %x5)); \ 176 - EX_LD(LOAD(ldd, base + 0x30, %x6)); 164 + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ 165 + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ 166 + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ 167 + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ 168 + EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ 169 + EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \ 170 + EX_LD_FP(LOAD(ldd, base + 0x30, %x6)); 177 171 178 172 .register %g2,#scratch 179 173 .register %g3,#scratch ··· 281 275 nop 282 276 /* fall through for 0 < low bits < 8 */ 283 277 110: sub %o4, 64, %g2 284 - EX_LD(LOAD_BLK(%g2, %f0)) 285 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 286 - EX_LD(LOAD_BLK(%o4, %f16)) 278 + EX_LD_FP(LOAD_BLK(%g2, %f0)) 279 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 280 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 287 281 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) 288 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 282 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 289 283 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) 290 284 subcc %g1, 64, %g1 291 285 add %o4, 64, %o4 ··· 296 290 297 291 120: sub %o4, 56, %g2 298 292 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) 299 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 300 - EX_LD(LOAD_BLK(%o4, %f16)) 293 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 294 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 301 295 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) 302 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 296 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 303 297 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) 304 298 subcc %g1, 64, %g1 305 299 add %o4, 64, %o4 ··· 310 304 311 305 130: sub %o4, 48, %g2 312 306 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) 313 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 314 - EX_LD(LOAD_BLK(%o4, %f16)) 307 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 308 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 315 309 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) 316 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 310 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 317 311 FREG_MOVE_6(f20, f22, f24, f26, f28, f30) 318 312 subcc %g1, 64, %g1 319 313 add %o4, 64, %o4 ··· 324 318 325 319 140: sub %o4, 40, %g2 326 320 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) 327 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 328 - EX_LD(LOAD_BLK(%o4, %f16)) 321 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 322 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 329 323 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) 330 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 324 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 331 325 FREG_MOVE_5(f22, f24, f26, f28, f30) 332 326 subcc %g1, 64, %g1 333 327 add %o4, 64, %o4 ··· 338 332 339 333 150: sub %o4, 32, %g2 340 334 FREG_LOAD_4(%g2, f0, f2, f4, f6) 341 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 342 - EX_LD(LOAD_BLK(%o4, %f16)) 335 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 336 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 343 337 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) 344 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 338 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 345 339 FREG_MOVE_4(f24, f26, f28, f30) 346 340 subcc %g1, 64, %g1 347 341 add %o4, 64, %o4 ··· 352 346 353 347 160: sub %o4, 24, %g2 354 348 FREG_LOAD_3(%g2, f0, f2, f4) 355 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 356 - EX_LD(LOAD_BLK(%o4, %f16)) 349 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 350 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 357 351 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) 358 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 352 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 359 353 FREG_MOVE_3(f26, f28, f30) 360 354 subcc %g1, 64, %g1 361 355 add %o4, 64, %o4 ··· 366 360 367 361 170: sub %o4, 16, %g2 368 362 FREG_LOAD_2(%g2, f0, f2) 369 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 370 - EX_LD(LOAD_BLK(%o4, %f16)) 363 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 364 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 371 365 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) 372 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 366 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 373 367 FREG_MOVE_2(f28, f30) 374 368 subcc %g1, 64, %g1 375 369 add %o4, 64, %o4 ··· 380 374 381 375 180: sub %o4, 8, %g2 382 376 FREG_LOAD_1(%g2, f0) 383 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 384 - EX_LD(LOAD_BLK(%o4, %f16)) 377 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 378 + EX_LD_FP(LOAD_BLK(%o4, %f16)) 385 379 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) 386 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 380 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 387 381 FREG_MOVE_1(f30) 388 382 subcc %g1, 64, %g1 389 383 add %o4, 64, %o4 ··· 393 387 nop 394 388 395 389 190: 396 - 1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 390 + 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) 397 391 subcc %g1, 64, %g1 398 - EX_LD(LOAD_BLK(%o4, %f0)) 399 - EX_ST(STORE_BLK(%f0, %o4 + %g3)) 392 + EX_LD_FP(LOAD_BLK(%o4, %f0)) 393 + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) 400 394 add %o4, 64, %o4 401 395 bne,pt %xcc, 1b 402 396 LOAD(prefetch, %o4 + 64, #one_read)
+8
arch/sparc/lib/NG4copy_from_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_LD_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_asi_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #ifndef ASI_AIUS 15 23 #define ASI_AIUS 0x11 16 24 #endif
+8
arch/sparc/lib/NG4copy_to_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_ST_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_asi_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #ifndef ASI_AIUS 15 23 #define ASI_AIUS 0x11 16 24 #endif
+23 -17
arch/sparc/lib/NG4memcpy.S
··· 48 48 #ifndef EX_LD 49 49 #define EX_LD(x) x 50 50 #endif 51 + #ifndef EX_LD_FP 52 + #define EX_LD_FP(x) x 53 + #endif 51 54 52 55 #ifndef EX_ST 53 56 #define EX_ST(x) x 57 + #endif 58 + #ifndef EX_ST_FP 59 + #define EX_ST_FP(x) x 54 60 #endif 55 61 56 62 #ifndef EX_RETVAL ··· 216 210 sub %o2, %o4, %o2 217 211 alignaddr %o1, %g0, %g1 218 212 add %o1, %o4, %o1 219 - EX_LD(LOAD(ldd, %g1 + 0x00, %f0)) 220 - 1: EX_LD(LOAD(ldd, %g1 + 0x08, %f2)) 213 + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0)) 214 + 1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2)) 221 215 subcc %o4, 0x40, %o4 222 - EX_LD(LOAD(ldd, %g1 + 0x10, %f4)) 223 - EX_LD(LOAD(ldd, %g1 + 0x18, %f6)) 224 - EX_LD(LOAD(ldd, %g1 + 0x20, %f8)) 225 - EX_LD(LOAD(ldd, %g1 + 0x28, %f10)) 226 - EX_LD(LOAD(ldd, %g1 + 0x30, %f12)) 227 - EX_LD(LOAD(ldd, %g1 + 0x38, %f14)) 216 + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4)) 217 + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6)) 218 + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8)) 219 + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10)) 220 + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12)) 221 + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14)) 228 222 faligndata %f0, %f2, %f16 229 - EX_LD(LOAD(ldd, %g1 + 0x40, %f0)) 223 + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0)) 230 224 faligndata %f2, %f4, %f18 231 225 add %g1, 0x40, %g1 232 226 faligndata %f4, %f6, %f20 ··· 235 229 faligndata %f10, %f12, %f26 236 230 faligndata %f12, %f14, %f28 237 231 faligndata %f14, %f0, %f30 238 - EX_ST(STORE(std, %f16, %o0 + 0x00)) 239 - EX_ST(STORE(std, %f18, %o0 + 0x08)) 240 - EX_ST(STORE(std, %f20, %o0 + 0x10)) 241 - EX_ST(STORE(std, %f22, %o0 + 0x18)) 242 - EX_ST(STORE(std, %f24, %o0 + 0x20)) 243 - EX_ST(STORE(std, %f26, %o0 + 0x28)) 244 - EX_ST(STORE(std, %f28, %o0 + 0x30)) 245 - EX_ST(STORE(std, %f30, %o0 + 0x38)) 232 + EX_ST_FP(STORE(std, %f16, %o0 + 0x00)) 233 + EX_ST_FP(STORE(std, %f18, %o0 + 0x08)) 234 + EX_ST_FP(STORE(std, %f20, %o0 + 0x10)) 235 + EX_ST_FP(STORE(std, %f22, %o0 + 0x18)) 236 + EX_ST_FP(STORE(std, %f24, %o0 + 0x20)) 237 + EX_ST_FP(STORE(std, %f26, %o0 + 0x28)) 238 + EX_ST_FP(STORE(std, %f28, %o0 + 0x30)) 239 + EX_ST_FP(STORE(std, %f30, %o0 + 0x38)) 246 240 add %o0, 0x40, %o0 247 241 bne,pt %icc, 1b 248 242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+8
arch/sparc/lib/U1copy_from_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_LD_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #define FUNC_NAME ___copy_from_user 15 23 #define LOAD(type,addr,dest) type##a [addr] %asi, dest 16 24 #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
+8
arch/sparc/lib/U1copy_to_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_ST_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #define FUNC_NAME ___copy_to_user 15 23 #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 16 24 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
+27 -21
arch/sparc/lib/U1memcpy.S
··· 25 25 #ifndef EX_LD 26 26 #define EX_LD(x) x 27 27 #endif 28 + #ifndef EX_LD_FP 29 + #define EX_LD_FP(x) x 30 + #endif 28 31 29 32 #ifndef EX_ST 30 33 #define EX_ST(x) x 34 + #endif 35 + #ifndef EX_ST_FP 36 + #define EX_ST_FP(x) x 31 37 #endif 32 38 33 39 #ifndef EX_RETVAL ··· 79 73 faligndata %f8, %f9, %f62; 80 74 81 75 #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ 82 - EX_LD(LOAD_BLK(%src, %fdest)); \ 83 - EX_ST(STORE_BLK(%fsrc, %dest)); \ 76 + EX_LD_FP(LOAD_BLK(%src, %fdest)); \ 77 + EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ 84 78 add %src, 0x40, %src; \ 85 79 subcc %len, 0x40, %len; \ 86 80 be,pn %xcc, jmptgt; \ ··· 95 89 96 90 #define DO_SYNC membar #Sync; 97 91 #define STORE_SYNC(dest, fsrc) \ 98 - EX_ST(STORE_BLK(%fsrc, %dest)); \ 92 + EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ 99 93 add %dest, 0x40, %dest; \ 100 94 DO_SYNC 101 95 102 96 #define STORE_JUMP(dest, fsrc, target) \ 103 - EX_ST(STORE_BLK(%fsrc, %dest)); \ 97 + EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ 104 98 add %dest, 0x40, %dest; \ 105 99 ba,pt %xcc, target; \ 106 100 nop; ··· 109 103 subcc %left, 8, %left;\ 110 104 bl,pn %xcc, 95f; \ 111 105 faligndata %f0, %f1, %f48; \ 112 - EX_ST(STORE(std, %f48, %dest)); \ 106 + EX_ST_FP(STORE(std, %f48, %dest)); \ 113 107 add %dest, 8, %dest; 114 108 115 109 #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ ··· 166 160 and %g2, 0x38, %g2 167 161 168 162 1: subcc %g1, 0x1, %g1 169 - EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) 170 - EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) 163 + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) 164 + EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) 171 165 bgu,pt %XCC, 1b 172 166 add %o1, 0x1, %o1 173 167 ··· 178 172 be,pt %icc, 3f 179 173 alignaddr %o1, %g0, %o1 180 174 181 - EX_LD(LOAD(ldd, %o1, %f4)) 182 - 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) 175 + EX_LD_FP(LOAD(ldd, %o1, %f4)) 176 + 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) 183 177 add %o1, 0x8, %o1 184 178 subcc %g2, 0x8, %g2 185 179 faligndata %f4, %f6, %f0 186 - EX_ST(STORE(std, %f0, %o0)) 180 + EX_ST_FP(STORE(std, %f0, %o0)) 187 181 be,pn %icc, 3f 188 182 add %o0, 0x8, %o0 189 183 190 - EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) 184 + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) 191 185 add %o1, 0x8, %o1 192 186 subcc %g2, 0x8, %g2 193 187 faligndata %f6, %f4, %f0 194 - EX_ST(STORE(std, %f0, %o0)) 188 + EX_ST_FP(STORE(std, %f0, %o0)) 195 189 bne,pt %icc, 1b 196 190 add %o0, 0x8, %o0 197 191 ··· 214 208 add %g1, %GLOBAL_SPARE, %g1 215 209 subcc %o2, %g3, %o2 216 210 217 - EX_LD(LOAD_BLK(%o1, %f0)) 211 + EX_LD_FP(LOAD_BLK(%o1, %f0)) 218 212 add %o1, 0x40, %o1 219 213 add %g1, %g3, %g1 220 - EX_LD(LOAD_BLK(%o1, %f16)) 214 + EX_LD_FP(LOAD_BLK(%o1, %f16)) 221 215 add %o1, 0x40, %o1 222 216 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE 223 - EX_LD(LOAD_BLK(%o1, %f32)) 217 + EX_LD_FP(LOAD_BLK(%o1, %f32)) 224 218 add %o1, 0x40, %o1 225 219 226 220 /* There are 8 instances of the unrolled loop, ··· 432 426 62: FINISH_VISCHUNK(o0, f44, f46, g3) 433 427 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) 434 428 435 - 93: EX_LD(LOAD(ldd, %o1, %f2)) 429 + 93: EX_LD_FP(LOAD(ldd, %o1, %f2)) 436 430 add %o1, 8, %o1 437 431 subcc %g3, 8, %g3 438 432 faligndata %f0, %f2, %f8 439 - EX_ST(STORE(std, %f8, %o0)) 433 + EX_ST_FP(STORE(std, %f8, %o0)) 440 434 bl,pn %xcc, 95f 441 435 add %o0, 8, %o0 442 - EX_LD(LOAD(ldd, %o1, %f0)) 436 + EX_LD_FP(LOAD(ldd, %o1, %f0)) 443 437 add %o1, 8, %o1 444 438 subcc %g3, 8, %g3 445 439 faligndata %f2, %f0, %f8 446 - EX_ST(STORE(std, %f8, %o0)) 440 + EX_ST_FP(STORE(std, %f8, %o0)) 447 441 bge,pt %xcc, 93b 448 442 add %o0, 8, %o0 449 443 450 444 95: brz,pt %o2, 2f 451 445 mov %g1, %o1 452 446 453 - 1: EX_LD(LOAD(ldub, %o1, %o3)) 447 + 1: EX_LD_FP(LOAD(ldub, %o1, %o3)) 454 448 add %o1, 1, %o1 455 449 subcc %o2, 1, %o2 456 - EX_ST(STORE(stb, %o3, %o0)) 450 + EX_ST_FP(STORE(stb, %o3, %o0)) 457 451 bne,pt %xcc, 1b 458 452 add %o0, 1, %o0 459 453
+8
arch/sparc/lib/U3copy_from_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_LD_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #define FUNC_NAME U3copy_from_user 15 23 #define LOAD(type,addr,dest) type##a [addr] %asi, dest 16 24 #define EX_RETVAL(x) 0
+8
arch/sparc/lib/U3copy_to_user.S
··· 11 11 .text; \ 12 12 .align 4; 13 13 14 + #define EX_ST_FP(x) \ 15 + 98: x; \ 16 + .section __ex_table,"a";\ 17 + .align 4; \ 18 + .word 98b, __retl_one_fp;\ 19 + .text; \ 20 + .align 4; 21 + 14 22 #define FUNC_NAME U3copy_to_user 15 23 #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 16 24 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
+46 -40
arch/sparc/lib/U3memcpy.S
··· 24 24 #ifndef EX_LD 25 25 #define EX_LD(x) x 26 26 #endif 27 + #ifndef EX_LD_FP 28 + #define EX_LD_FP(x) x 29 + #endif 27 30 28 31 #ifndef EX_ST 29 32 #define EX_ST(x) x 33 + #endif 34 + #ifndef EX_ST_FP 35 + #define EX_ST_FP(x) x 30 36 #endif 31 37 32 38 #ifndef EX_RETVAL ··· 126 120 and %g2, 0x38, %g2 127 121 128 122 1: subcc %g1, 0x1, %g1 129 - EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) 130 - EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) 123 + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) 124 + EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) 131 125 bgu,pt %XCC, 1b 132 126 add %o1, 0x1, %o1 133 127 ··· 138 132 be,pt %icc, 3f 139 133 alignaddr %o1, %g0, %o1 140 134 141 - EX_LD(LOAD(ldd, %o1, %f4)) 142 - 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) 135 + EX_LD_FP(LOAD(ldd, %o1, %f4)) 136 + 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) 143 137 add %o1, 0x8, %o1 144 138 subcc %g2, 0x8, %g2 145 139 faligndata %f4, %f6, %f0 146 - EX_ST(STORE(std, %f0, %o0)) 140 + EX_ST_FP(STORE(std, %f0, %o0)) 147 141 be,pn %icc, 3f 148 142 add %o0, 0x8, %o0 149 143 150 - EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) 144 + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) 151 145 add %o1, 0x8, %o1 152 146 subcc %g2, 0x8, %g2 153 147 faligndata %f6, %f4, %f2 154 - EX_ST(STORE(std, %f2, %o0)) 148 + EX_ST_FP(STORE(std, %f2, %o0)) 155 149 bne,pt %icc, 1b 156 150 add %o0, 0x8, %o0 157 151 ··· 161 155 LOAD(prefetch, %o1 + 0x080, #one_read) 162 156 LOAD(prefetch, %o1 + 0x0c0, #one_read) 163 157 LOAD(prefetch, %o1 + 0x100, #one_read) 164 - EX_LD(LOAD(ldd, %o1 + 0x000, %f0)) 158 + EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0)) 165 159 LOAD(prefetch, %o1 + 0x140, #one_read) 166 - EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 160 + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) 167 161 LOAD(prefetch, %o1 + 0x180, #one_read) 168 - EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 162 + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) 169 163 LOAD(prefetch, %o1 + 0x1c0, #one_read) 170 164 faligndata %f0, %f2, %f16 171 - EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 165 + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) 172 166 faligndata %f2, %f4, %f18 173 - EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 167 + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) 174 168 faligndata %f4, %f6, %f20 175 - EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 169 + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) 176 170 faligndata %f6, %f8, %f22 177 171 178 - EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 172 + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) 179 173 faligndata %f8, %f10, %f24 180 - EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 174 + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) 181 175 faligndata %f10, %f12, %f26 182 - EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 176 + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) 183 177 184 178 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE 185 179 add %o1, 0x40, %o1 ··· 190 184 191 185 .align 64 192 186 1: 193 - EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 187 + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) 194 188 faligndata %f12, %f14, %f28 195 - EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 189 + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) 196 190 faligndata %f14, %f0, %f30 197 - EX_ST(STORE_BLK(%f16, %o0)) 198 - EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 191 + EX_ST_FP(STORE_BLK(%f16, %o0)) 192 + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) 199 193 faligndata %f0, %f2, %f16 200 194 add %o0, 0x40, %o0 201 195 202 - EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 196 + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) 203 197 faligndata %f2, %f4, %f18 204 - EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 198 + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) 205 199 faligndata %f4, %f6, %f20 206 - EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 200 + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) 207 201 subcc %o3, 0x01, %o3 208 202 faligndata %f6, %f8, %f22 209 - EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 203 + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) 210 204 211 205 faligndata %f8, %f10, %f24 212 - EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 206 + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) 213 207 LOAD(prefetch, %o1 + 0x1c0, #one_read) 214 208 faligndata %f10, %f12, %f26 215 209 bg,pt %XCC, 1b ··· 217 211 218 212 /* Finally we copy the last full 64-byte block. */ 219 213 2: 220 - EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 214 + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) 221 215 faligndata %f12, %f14, %f28 222 - EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 216 + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) 223 217 faligndata %f14, %f0, %f30 224 - EX_ST(STORE_BLK(%f16, %o0)) 225 - EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 218 + EX_ST_FP(STORE_BLK(%f16, %o0)) 219 + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) 226 220 faligndata %f0, %f2, %f16 227 - EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 221 + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) 228 222 faligndata %f2, %f4, %f18 229 - EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 223 + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) 230 224 faligndata %f4, %f6, %f20 231 - EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 225 + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) 232 226 faligndata %f6, %f8, %f22 233 - EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 227 + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) 234 228 faligndata %f8, %f10, %f24 235 229 cmp %g1, 0 236 230 be,pt %XCC, 1f 237 231 add %o0, 0x40, %o0 238 - EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 232 + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) 239 233 1: faligndata %f10, %f12, %f26 240 234 faligndata %f12, %f14, %f28 241 235 faligndata %f14, %f0, %f30 242 - EX_ST(STORE_BLK(%f16, %o0)) 236 + EX_ST_FP(STORE_BLK(%f16, %o0)) 243 237 add %o0, 0x40, %o0 244 238 add %o1, 0x40, %o1 245 239 membar #Sync ··· 259 253 260 254 sub %o2, %g2, %o2 261 255 be,a,pt %XCC, 1f 262 - EX_LD(LOAD(ldd, %o1 + 0x00, %f0)) 256 + EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0)) 263 257 264 - 1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2)) 258 + 1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2)) 265 259 add %o1, 0x8, %o1 266 260 subcc %g2, 0x8, %g2 267 261 faligndata %f0, %f2, %f8 268 - EX_ST(STORE(std, %f8, %o0)) 262 + EX_ST_FP(STORE(std, %f8, %o0)) 269 263 be,pn %XCC, 2f 270 264 add %o0, 0x8, %o0 271 - EX_LD(LOAD(ldd, %o1 + 0x08, %f0)) 265 + EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0)) 272 266 add %o1, 0x8, %o1 273 267 subcc %g2, 0x8, %g2 274 268 faligndata %f2, %f0, %f8 275 - EX_ST(STORE(std, %f8, %o0)) 269 + EX_ST_FP(STORE(std, %f8, %o0)) 276 270 bne,pn %XCC, 1b 277 271 add %o0, 0x8, %o0 278 272
+8
arch/x86/kvm/cpuid.h
··· 38 38 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 39 39 } 40 40 41 + static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu) 42 + { 43 + struct kvm_cpuid_entry2 *best; 44 + 45 + best = kvm_find_cpuid_entry(vcpu, 1, 0); 46 + return best && (best->edx & bit(X86_FEATURE_MTRR)); 47 + } 48 + 41 49 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) 42 50 { 43 51 struct kvm_cpuid_entry2 *best;
+19 -6
arch/x86/kvm/mtrr.c
··· 120 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 121 121 } 122 122 123 - static u8 mtrr_disabled_type(void) 123 + static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) 124 124 { 125 125 /* 126 126 * Intel SDM 11.11.2.2: all MTRRs are disabled when 127 127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC 128 128 * memory type is applied to all of physical memory. 129 + * 130 + * However, virtual machines can be run with CPUID such that 131 + * there are no MTRRs. In that case, the firmware will never 132 + * enable MTRRs and it is obviously undesirable to run the 133 + * guest entirely with UC memory and we use WB. 129 134 */ 130 - return MTRR_TYPE_UNCACHABLE; 135 + if (guest_cpuid_has_mtrr(vcpu)) 136 + return MTRR_TYPE_UNCACHABLE; 137 + else 138 + return MTRR_TYPE_WRBACK; 131 139 } 132 140 133 141 /* ··· 275 267 276 268 for (seg = 0; seg < seg_num; seg++) { 277 269 mtrr_seg = &fixed_seg_table[seg]; 278 - if (mtrr_seg->start >= addr && addr < mtrr_seg->end) 270 + if (mtrr_seg->start <= addr && addr < mtrr_seg->end) 279 271 return seg; 280 272 } 281 273 ··· 308 300 *start = range->base & PAGE_MASK; 309 301 310 302 mask = range->mask & PAGE_MASK; 311 - mask |= ~0ULL << boot_cpu_data.x86_phys_bits; 312 303 313 304 /* This cannot overflow because writing to the reserved bits of 314 305 * variable MTRRs causes a #GP. ··· 363 356 if (var_mtrr_range_is_valid(cur)) 364 357 list_del(&mtrr_state->var_ranges[index].node); 365 358 359 + /* Extend the mask with all 1 bits to the left, since those 360 + * bits must implicitly be 0. The bits are then cleared 361 + * when reading them. 362 + */ 366 363 if (!is_mtrr_mask) 367 364 cur->base = data; 368 365 else 369 - cur->mask = data; 366 + cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu)); 370 367 371 368 /* add it to the list if it's enabled. */ 372 369 if (var_mtrr_range_is_valid(cur)) { ··· 437 426 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; 438 427 else 439 428 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; 429 + 430 + *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1; 440 431 } 441 432 442 433 return 0; ··· 683 670 } 684 671 685 672 if (iter.mtrr_disabled) 686 - return mtrr_disabled_type(); 673 + return mtrr_disabled_type(vcpu); 687 674 688 675 /* not contained in any MTRRs. */ 689 676 if (type == -1)
+2 -2
arch/x86/kvm/svm.c
··· 3422 3422 struct kvm_run *kvm_run = vcpu->run; 3423 3423 u32 exit_code = svm->vmcb->control.exit_code; 3424 3424 3425 + trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); 3426 + 3425 3427 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) 3426 3428 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3427 3429 if (npt_enabled) ··· 3893 3891 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 3894 3892 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3895 3893 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3896 - 3897 - trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM); 3898 3894 3899 3895 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3900 3896 kvm_before_handle_nmi(&svm->vcpu);
+4 -3
arch/x86/kvm/vmx.c
··· 2803 2803 msr_info->data = vcpu->arch.ia32_xss; 2804 2804 break; 2805 2805 case MSR_TSC_AUX: 2806 - if (!guest_cpuid_has_rdtscp(vcpu)) 2806 + if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) 2807 2807 return 1; 2808 2808 /* Otherwise falls through */ 2809 2809 default: ··· 2909 2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); 2910 2910 break; 2911 2911 case MSR_TSC_AUX: 2912 - if (!guest_cpuid_has_rdtscp(vcpu)) 2912 + if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) 2913 2913 return 1; 2914 2914 /* Check reserved bit, higher 32 bits should be zero */ 2915 2915 if ((data >> 32) != 0) ··· 8042 8042 u32 exit_reason = vmx->exit_reason; 8043 8043 u32 vectoring_info = vmx->idt_vectoring_info; 8044 8044 8045 + trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); 8046 + 8045 8047 /* 8046 8048 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 8047 8049 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before ··· 8670 8668 vmx->loaded_vmcs->launched = 1; 8671 8669 8672 8670 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 8673 - trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); 8674 8671 8675 8672 /* 8676 8673 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
+8 -4
arch/x86/kvm/x86.c
··· 3572 3572 3573 3573 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3574 3574 { 3575 + int i; 3575 3576 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3576 3577 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3577 - kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3578 + for (i = 0; i < 3; i++) 3579 + kvm_pit_load_count(kvm, i, ps->channels[i].count, 0); 3578 3580 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3579 3581 return 0; 3580 3582 } ··· 3595 3593 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3596 3594 { 3597 3595 int start = 0; 3596 + int i; 3598 3597 u32 prev_legacy, cur_legacy; 3599 3598 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3600 3599 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; ··· 3605 3602 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3606 3603 sizeof(kvm->arch.vpit->pit_state.channels)); 3607 3604 kvm->arch.vpit->pit_state.flags = ps->flags; 3608 - kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3605 + for (i = 0; i < 3; i++) 3606 + kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start); 3609 3607 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3610 3608 return 0; 3611 3609 } ··· 6519 6515 if (req_immediate_exit) 6520 6516 smp_send_reschedule(vcpu->cpu); 6521 6517 6518 + trace_kvm_entry(vcpu->vcpu_id); 6519 + wait_lapic_expire(vcpu); 6522 6520 __kvm_guest_enter(); 6523 6521 6524 6522 if (unlikely(vcpu->arch.switch_db_regs)) { ··· 6533 6527 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 6534 6528 } 6535 6529 6536 - trace_kvm_entry(vcpu->vcpu_id); 6537 - wait_lapic_expire(vcpu); 6538 6530 kvm_x86_ops->run(vcpu); 6539 6531 6540 6532 /*
+1 -1
arch/x86/um/signal.c
··· 470 470 struct sigcontext __user *sc = &frame->sc; 471 471 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); 472 472 473 - if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) || 473 + if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) || 474 474 copy_from_user(&set.sig[1], frame->extramask, sig_size)) 475 475 goto segfault; 476 476
+2 -7
arch/x86/xen/mmu.c
··· 2495 2495 { 2496 2496 x86_init.paging.pagetable_init = xen_pagetable_init; 2497 2497 2498 - /* Optimization - we can use the HVM one but it has no idea which 2499 - * VCPUs are descheduled - which means that it will needlessly IPI 2500 - * them. Xen knows so let it do the job. 2501 - */ 2502 - if (xen_feature(XENFEAT_auto_translated_physmap)) { 2503 - pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others; 2498 + if (xen_feature(XENFEAT_auto_translated_physmap)) 2504 2499 return; 2505 - } 2500 + 2506 2501 pv_mmu_ops = xen_mmu_ops; 2507 2502 2508 2503 memset(dummy_mapping, 0xff, PAGE_SIZE);
+11 -10
arch/x86/xen/suspend.c
··· 1 1 #include <linux/types.h> 2 2 #include <linux/tick.h> 3 3 4 + #include <xen/xen.h> 4 5 #include <xen/interface/xen.h> 5 6 #include <xen/grant_table.h> 6 7 #include <xen/events.h> ··· 69 68 70 69 void xen_arch_pre_suspend(void) 71 70 { 72 - int cpu; 73 - 74 - for_each_online_cpu(cpu) 75 - xen_pmu_finish(cpu); 76 - 77 71 if (xen_pv_domain()) 78 72 xen_pv_pre_suspend(); 79 73 } 80 74 81 75 void xen_arch_post_suspend(int cancelled) 82 76 { 83 - int cpu; 84 - 85 77 if (xen_pv_domain()) 86 78 xen_pv_post_suspend(cancelled); 87 79 else 88 80 xen_hvm_post_suspend(cancelled); 89 - 90 - for_each_online_cpu(cpu) 91 - xen_pmu_init(cpu); 92 81 } 93 82 94 83 static void xen_vcpu_notify_restore(void *data) ··· 97 106 98 107 void xen_arch_resume(void) 99 108 { 109 + int cpu; 110 + 100 111 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 112 + 113 + for_each_online_cpu(cpu) 114 + xen_pmu_init(cpu); 101 115 } 102 116 103 117 void xen_arch_suspend(void) 104 118 { 119 + int cpu; 120 + 121 + for_each_online_cpu(cpu) 122 + xen_pmu_finish(cpu); 123 + 105 124 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); 106 125 }
+18 -2
block/blk-core.c
··· 207 207 EXPORT_SYMBOL(blk_delay_queue); 208 208 209 209 /** 210 + * blk_start_queue_async - asynchronously restart a previously stopped queue 211 + * @q: The &struct request_queue in question 212 + * 213 + * Description: 214 + * blk_start_queue_async() will clear the stop flag on the queue, and 215 + * ensure that the request_fn for the queue is run from an async 216 + * context. 217 + **/ 218 + void blk_start_queue_async(struct request_queue *q) 219 + { 220 + queue_flag_clear(QUEUE_FLAG_STOPPED, q); 221 + blk_run_queue_async(q); 222 + } 223 + EXPORT_SYMBOL(blk_start_queue_async); 224 + 225 + /** 210 226 * blk_start_queue - restart a previously stopped queue 211 227 * @q: The &struct request_queue in question 212 228 * ··· 1705 1689 struct request *req; 1706 1690 unsigned int request_count = 0; 1707 1691 1708 - blk_queue_split(q, &bio, q->bio_split); 1709 - 1710 1692 /* 1711 1693 * low level driver can indicate that it wants pages above a 1712 1694 * certain limit bounced to low memory (ie for highmem, or even 1713 1695 * ISA dma in theory) 1714 1696 */ 1715 1697 blk_queue_bounce(q, &bio); 1698 + 1699 + blk_queue_split(q, &bio, q->bio_split); 1716 1700 1717 1701 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1718 1702 bio->bi_error = -EIO;
+1 -1
block/blk-merge.c
··· 81 81 struct bio *new = NULL; 82 82 83 83 bio_for_each_segment(bv, bio, iter) { 84 - if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) 84 + if (sectors + (bv.bv_len >> 9) > blk_max_size_offset(q, bio->bi_iter.bi_sector)) 85 85 goto split; 86 86 87 87 /*
+30 -31
crypto/algif_skcipher.c
··· 47 47 bool merge; 48 48 bool enc; 49 49 50 - struct ablkcipher_request req; 50 + struct skcipher_request req; 51 51 }; 52 52 53 53 struct skcipher_async_rsgl { ··· 64 64 }; 65 65 66 66 #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ 67 - crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))) 67 + crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))) 68 68 69 69 #define GET_REQ_SIZE(ctx) \ 70 - crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)) 70 + crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)) 71 71 72 72 #define GET_IV_SIZE(ctx) \ 73 - crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req)) 73 + crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req)) 74 74 75 75 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ 76 76 sizeof(struct scatterlist) - 1) ··· 302 302 struct sock *sk = sock->sk; 303 303 struct alg_sock *ask = alg_sk(sk); 304 304 struct skcipher_ctx *ctx = ask->private; 305 - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 306 - unsigned ivsize = crypto_ablkcipher_ivsize(tfm); 305 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); 306 + unsigned ivsize = crypto_skcipher_ivsize(tfm); 307 307 struct skcipher_sg_list *sgl; 308 308 struct af_alg_control con = {}; 309 309 long copied = 0; ··· 507 507 struct skcipher_sg_list *sgl; 508 508 struct scatterlist *sg; 509 509 struct skcipher_async_req *sreq; 510 - struct ablkcipher_request *req; 510 + struct skcipher_request *req; 511 511 struct skcipher_async_rsgl *last_rsgl = NULL; 512 512 unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); 513 513 unsigned int reqlen = sizeof(struct skcipher_async_req) + ··· 531 531 } 532 532 sg_init_table(sreq->tsg, tx_nents); 533 533 memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); 534 - ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req)); 535 - ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 536 - skcipher_async_cb, sk); 534 + skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); 535 + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 536 + skcipher_async_cb, sk); 537 537 538 538 while (iov_iter_count(&msg->msg_iter)) { 539 539 struct skcipher_async_rsgl *rsgl; ··· 608 608 if (mark) 609 609 sg_mark_end(sreq->tsg + txbufs - 1); 610 610 611 - ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, 612 - len, sreq->iv); 613 - err = ctx->enc ? crypto_ablkcipher_encrypt(req) : 614 - crypto_ablkcipher_decrypt(req); 611 + skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, 612 + len, sreq->iv); 613 + err = ctx->enc ? crypto_skcipher_encrypt(req) : 614 + crypto_skcipher_decrypt(req); 615 615 if (err == -EINPROGRESS) { 616 616 atomic_inc(&ctx->inflight); 617 617 err = -EIOCBQUEUED; ··· 632 632 struct sock *sk = sock->sk; 633 633 struct alg_sock *ask = alg_sk(sk); 634 634 struct skcipher_ctx *ctx = ask->private; 635 - unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( 635 + unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( 636 636 &ctx->req)); 637 637 struct skcipher_sg_list *sgl; 638 638 struct scatterlist *sg; ··· 669 669 if (!used) 670 670 goto free; 671 671 672 - ablkcipher_request_set_crypt(&ctx->req, sg, 673 - ctx->rsgl.sg, used, 674 - ctx->iv); 672 + skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, 673 + ctx->iv); 675 674 676 675 err = af_alg_wait_for_completion( 677 676 ctx->enc ? 678 - crypto_ablkcipher_encrypt(&ctx->req) : 679 - crypto_ablkcipher_decrypt(&ctx->req), 677 + crypto_skcipher_encrypt(&ctx->req) : 678 + crypto_skcipher_decrypt(&ctx->req), 680 679 &ctx->completion); 681 680 682 681 free: ··· 750 751 751 752 static void *skcipher_bind(const char *name, u32 type, u32 mask) 752 753 { 753 - return crypto_alloc_ablkcipher(name, type, mask); 754 + return crypto_alloc_skcipher(name, type, mask); 754 755 } 755 756 756 757 static void skcipher_release(void *private) 757 758 { 758 - crypto_free_ablkcipher(private); 759 + crypto_free_skcipher(private); 759 760 } 760 761 761 762 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 762 763 { 763 - return crypto_ablkcipher_setkey(private, key, keylen); 764 + return crypto_skcipher_setkey(private, key, keylen); 764 765 } 765 766 766 767 static void skcipher_wait(struct sock *sk) ··· 777 778 { 778 779 struct alg_sock *ask = alg_sk(sk); 779 780 struct skcipher_ctx *ctx = ask->private; 780 - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 781 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); 781 782 782 783 if (atomic_read(&ctx->inflight)) 783 784 skcipher_wait(sk); 784 785 785 786 skcipher_free_sgl(sk); 786 - sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); 787 + sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 787 788 sock_kfree_s(sk, ctx, ctx->len); 788 789 af_alg_release_parent(sk); 789 790 } ··· 792 793 { 793 794 struct skcipher_ctx *ctx; 794 795 struct alg_sock *ask = alg_sk(sk); 795 - unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); 796 + unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private); 796 797 797 798 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 798 799 if (!ctx) 799 800 return -ENOMEM; 800 801 801 - ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), 802 + ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private), 802 803 GFP_KERNEL); 803 804 if (!ctx->iv) { 804 805 sock_kfree_s(sk, ctx, len); 805 806 return -ENOMEM; 806 807 } 807 808 808 - memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); 809 + memset(ctx->iv, 0, crypto_skcipher_ivsize(private)); 809 810 810 811 INIT_LIST_HEAD(&ctx->tsgl); 811 812 ctx->len = len; ··· 818 819 819 820 ask->private = ctx; 820 821 821 - ablkcipher_request_set_tfm(&ctx->req, private); 822 - ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, 823 - af_alg_complete, &ctx->completion); 822 + skcipher_request_set_tfm(&ctx->req, private); 823 + skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, 824 + af_alg_complete, &ctx->completion); 824 825 825 826 sk->sk_destruct = skcipher_sock_destruct; 826 827
+2 -1
drivers/acpi/processor_driver.c
··· 200 200 goto err_remove_sysfs_thermal; 201 201 } 202 202 203 - sysfs_remove_link(&pr->cdev->device.kobj, "device"); 203 + return 0; 204 + 204 205 err_remove_sysfs_thermal: 205 206 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 206 207 err_thermal_unregister:
+22 -11
drivers/base/power/domain.c
··· 390 390 struct generic_pm_domain *genpd; 391 391 bool (*stop_ok)(struct device *__dev); 392 392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 393 + bool runtime_pm = pm_runtime_enabled(dev); 393 394 ktime_t time_start; 394 395 s64 elapsed_ns; 395 396 int ret; ··· 401 400 if (IS_ERR(genpd)) 402 401 return -EINVAL; 403 402 403 + /* 404 + * A runtime PM centric subsystem/driver may re-use the runtime PM 405 + * callbacks for other purposes than runtime PM. In those scenarios 406 + * runtime PM is disabled. Under these circumstances, we shall skip 407 + * validating/measuring the PM QoS latency. 408 + */ 404 409 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 405 - if (stop_ok && !stop_ok(dev)) 410 + if (runtime_pm && stop_ok && !stop_ok(dev)) 406 411 return -EBUSY; 407 412 408 413 /* Measure suspend latency. */ 409 - time_start = ktime_get(); 414 + if (runtime_pm) 415 + time_start = ktime_get(); 410 416 411 417 ret = genpd_save_dev(genpd, dev); 412 418 if (ret) ··· 426 418 } 427 419 428 420 /* Update suspend latency value if the measured time exceeds it. */ 429 - elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 430 - if (elapsed_ns > td->suspend_latency_ns) { 431 - td->suspend_latency_ns = elapsed_ns; 432 - dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 433 - elapsed_ns); 434 - genpd->max_off_time_changed = true; 435 - td->constraint_changed = true; 421 + if (runtime_pm) { 422 + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 423 + if (elapsed_ns > td->suspend_latency_ns) { 424 + td->suspend_latency_ns = elapsed_ns; 425 + dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 426 + elapsed_ns); 427 + genpd->max_off_time_changed = true; 428 + td->constraint_changed = true; 429 + } 436 430 } 437 431 438 432 /* ··· 463 453 { 464 454 struct generic_pm_domain *genpd; 465 455 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 456 + bool runtime_pm = pm_runtime_enabled(dev); 466 457 ktime_t time_start; 467 458 s64 elapsed_ns; 468 459 int ret; ··· 490 479 491 480 out: 492 481 /* Measure resume latency. */ 493 - if (timed) 482 + if (timed && runtime_pm) 494 483 time_start = ktime_get(); 495 484 496 485 genpd_start_dev(genpd, dev); 497 486 genpd_restore_dev(genpd, dev); 498 487 499 488 /* Update resume latency value if the measured time exceeds it. */ 500 - if (timed) { 489 + if (timed && runtime_pm) { 501 490 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 502 491 if (elapsed_ns > td->resume_latency_ns) { 503 492 td->resume_latency_ns = elapsed_ns;
+7 -8
drivers/block/null_blk.c
··· 219 219 { 220 220 struct request_queue *q = NULL; 221 221 222 + if (cmd->rq) 223 + q = cmd->rq->q; 224 + 222 225 switch (queue_mode) { 223 226 case NULL_Q_MQ: 224 227 blk_mq_end_request(cmd->rq, 0); ··· 232 229 break; 233 230 case NULL_Q_BIO: 234 231 bio_endio(cmd->bio); 235 - goto free_cmd; 232 + break; 236 233 } 237 234 238 - if (cmd->rq) 239 - q = cmd->rq->q; 235 + free_cmd(cmd); 240 236 241 237 /* Restart queue if needed, as we are freeing a tag */ 242 - if (q && !q->mq_ops && blk_queue_stopped(q)) { 238 + if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) { 243 239 unsigned long flags; 244 240 245 241 spin_lock_irqsave(q->queue_lock, flags); 246 - if (blk_queue_stopped(q)) 247 - blk_start_queue(q); 242 + blk_start_queue_async(q); 248 243 spin_unlock_irqrestore(q->queue_lock, flags); 249 244 } 250 - free_cmd: 251 - free_cmd(cmd); 252 245 } 253 246 254 247 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
+10 -5
drivers/block/xen-blkback/blkback.c
··· 950 950 goto unmap; 951 951 952 952 for (n = 0, i = 0; n < nseg; n++) { 953 + uint8_t first_sect, last_sect; 954 + 953 955 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { 954 956 /* Map indirect segments */ 955 957 if (segments) ··· 959 957 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); 960 958 } 961 959 i = n % SEGS_PER_INDIRECT_FRAME; 960 + 962 961 pending_req->segments[n]->gref = segments[i].gref; 963 - seg[n].nsec = segments[i].last_sect - 964 - segments[i].first_sect + 1; 965 - seg[n].offset = (segments[i].first_sect << 9); 966 - if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || 967 - (segments[i].last_sect < segments[i].first_sect)) { 962 + 963 + first_sect = READ_ONCE(segments[i].first_sect); 964 + last_sect = READ_ONCE(segments[i].last_sect); 965 + if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) { 968 966 rc = -EINVAL; 969 967 goto unmap; 970 968 } 969 + 970 + seg[n].nsec = last_sect - first_sect + 1; 971 + seg[n].offset = first_sect << 9; 971 972 preq->nr_sects += seg[n].nsec; 972 973 } 973 974
+4 -4
drivers/block/xen-blkback/common.h
··· 408 408 struct blkif_x86_32_request *src) 409 409 { 410 410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 411 - dst->operation = src->operation; 412 - switch (src->operation) { 411 + dst->operation = READ_ONCE(src->operation); 412 + switch (dst->operation) { 413 413 case BLKIF_OP_READ: 414 414 case BLKIF_OP_WRITE: 415 415 case BLKIF_OP_WRITE_BARRIER: ··· 456 456 struct blkif_x86_64_request *src) 457 457 { 458 458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 459 - dst->operation = src->operation; 460 - switch (src->operation) { 459 + dst->operation = READ_ONCE(src->operation); 460 + switch (dst->operation) { 461 461 case BLKIF_OP_READ: 462 462 case BLKIF_OP_WRITE: 463 463 case BLKIF_OP_WRITE_BARRIER:
+4 -4
drivers/bus/sunxi-rsb.c
··· 342 342 343 343 ret = _sunxi_rsb_run_xfer(rsb); 344 344 if (ret) 345 - goto out; 345 + goto unlock; 346 346 347 347 *buf = readl(rsb->regs + RSB_DATA); 348 348 349 + unlock: 349 350 mutex_unlock(&rsb->lock); 350 351 351 - out: 352 352 return ret; 353 353 } 354 354 ··· 527 527 */ 528 528 529 529 static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = { 530 - { 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ 530 + { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ 531 531 { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */ 532 - { 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */ 532 + { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */ 533 533 }; 534 534 535 535 static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
+1 -1
drivers/cpufreq/Kconfig.arm
··· 226 226 227 227 config ARM_TEGRA124_CPUFREQ 228 228 tristate "Tegra124 CPUFreq support" 229 - depends on ARCH_TEGRA && CPUFREQ_DT 229 + depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR 230 230 default y 231 231 help 232 232 This adds the CPUFreq driver support for Tegra124 SOCs.
+1 -1
drivers/cpufreq/intel_pstate.c
··· 1123 1123 limits->max_sysfs_pct); 1124 1124 limits->max_perf_pct = max(limits->min_policy_pct, 1125 1125 limits->max_perf_pct); 1126 - limits->max_perf = round_up(limits->max_perf, 8); 1126 + limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1127 1127 1128 1128 /* Make sure min_perf_pct <= max_perf_pct */ 1129 1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+1 -1
drivers/cpufreq/scpi-cpufreq.c
··· 31 31 32 32 static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev) 33 33 { 34 - u8 domain = topology_physical_package_id(cpu_dev->id); 34 + int domain = topology_physical_package_id(cpu_dev->id); 35 35 36 36 if (domain < 0) 37 37 return ERR_PTR(-EINVAL);
+1 -1
drivers/gpio/gpio-ath79.c
··· 113 113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); 114 114 115 115 __raw_writel( 116 - __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset), 116 + __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset), 117 117 ctrl->base + AR71XX_GPIO_REG_OE); 118 118 119 119 spin_unlock_irqrestore(&ctrl->lock, flags);
+2 -2
drivers/gpio/gpio-generic.c
··· 141 141 unsigned long pinmask = bgc->pin2mask(bgc, gpio); 142 142 143 143 if (bgc->dir & pinmask) 144 - return bgc->read_reg(bgc->reg_set) & pinmask; 144 + return !!(bgc->read_reg(bgc->reg_set) & pinmask); 145 145 else 146 - return bgc->read_reg(bgc->reg_dat) & pinmask; 146 + return !!(bgc->read_reg(bgc->reg_dat) & pinmask); 147 147 } 148 148 149 149 static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+7 -1
drivers/gpio/gpiolib.c
··· 1279 1279 chip = desc->chip; 1280 1280 offset = gpio_chip_hwgpio(desc); 1281 1281 value = chip->get ? chip->get(chip, offset) : -EIO; 1282 - value = value < 0 ? value : !!value; 1282 + /* 1283 + * FIXME: fix all drivers to clamp to [0,1] or return negative, 1284 + * then change this to: 1285 + * value = value < 0 ? value : !!value; 1286 + * so we can properly propagate error codes. 1287 + */ 1288 + value = !!value; 1283 1289 trace_gpio_value(desc_to_gpio(desc), 1, value); 1284 1290 return value; 1285 1291 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1264 1264 struct ww_acquire_ctx ticket; 1265 1265 1266 1266 /* user fence */ 1267 - struct amdgpu_user_fence uf; 1267 + struct amdgpu_user_fence uf; 1268 + struct amdgpu_bo_list_entry uf_entry; 1268 1269 }; 1269 1270 1270 1271 struct amdgpu_job {
+42 -21
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 127 127 return 0; 128 128 } 129 129 130 + static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 131 + struct drm_amdgpu_cs_chunk_fence *fence_data) 132 + { 133 + struct drm_gem_object *gobj; 134 + uint32_t handle; 135 + 136 + handle = fence_data->handle; 137 + gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, 138 + fence_data->handle); 139 + if (gobj == NULL) 140 + return -EINVAL; 141 + 142 + p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 143 + p->uf.offset = fence_data->offset; 144 + 145 + if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { 146 + drm_gem_object_unreference_unlocked(gobj); 147 + return -EINVAL; 148 + } 149 + 150 + p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); 151 + p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; 152 + p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 153 + p->uf_entry.priority = 0; 154 + p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 155 + p->uf_entry.tv.shared = true; 156 + 157 + drm_gem_object_unreference_unlocked(gobj); 158 + return 0; 159 + } 160 + 130 161 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 131 162 { 132 163 union drm_amdgpu_cs *cs = data; ··· 238 207 239 208 case AMDGPU_CHUNK_ID_FENCE: 240 209 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 241 - if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { 242 - uint32_t handle; 243 - struct drm_gem_object *gobj; 244 - struct drm_amdgpu_cs_chunk_fence *fence_data; 245 - 246 - fence_data = (void *)p->chunks[i].kdata; 247 - handle = fence_data->handle; 248 - gobj = drm_gem_object_lookup(p->adev->ddev, 249 - p->filp, handle); 250 - if (gobj == NULL) { 251 - ret = -EINVAL; 252 - goto free_partial_kdata; 253 - } 254 - 255 - p->uf.bo = gem_to_amdgpu_bo(gobj); 256 - amdgpu_bo_ref(p->uf.bo); 257 - drm_gem_object_unreference_unlocked(gobj); 258 - p->uf.offset = fence_data->offset; 259 - } else { 210 + if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { 260 211 ret = -EINVAL; 261 212 goto free_partial_kdata; 262 213 } 214 + 215 + ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); 216 + if (ret) 217 + goto free_partial_kdata; 218 + 263 219 break; 264 220 265 221 case AMDGPU_CHUNK_ID_DEPENDENCIES: ··· 409 391 p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, 410 392 &p->validated); 411 393 394 + if (p->uf.bo) 395 + list_add(&p->uf_entry.tv.head, &p->validated); 396 + 412 397 if (need_mmap_lock) 413 398 down_read(&current->mm->mmap_sem); 414 399 ··· 509 488 for (i = 0; i < parser->num_ibs; i++) 510 489 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 511 490 kfree(parser->ibs); 512 - if (parser->uf.bo) 513 - amdgpu_bo_unref(&parser->uf.bo); 491 + amdgpu_bo_unref(&parser->uf.bo); 492 + amdgpu_bo_unref(&parser->uf_entry.robj); 514 493 } 515 494 516 495 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
+3
drivers/gpu/drm/exynos/exynos_drm_crtc.c
··· 55 55 { 56 56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 57 57 58 + if (!state->enable) 59 + return 0; 60 + 58 61 if (exynos_crtc->ops->atomic_check) 59 62 return exynos_crtc->ops->atomic_check(exynos_crtc, state); 60 63
+20 -8
drivers/gpu/drm/i915/i915_drv.h
··· 2193 2193 struct drm_i915_private *i915; 2194 2194 struct intel_engine_cs *ring; 2195 2195 2196 - /** GEM sequence number associated with this request. */ 2197 - uint32_t seqno; 2196 + /** GEM sequence number associated with the previous request, 2197 + * when the HWS breadcrumb is equal to this the GPU is processing 2198 + * this request. 2199 + */ 2200 + u32 previous_seqno; 2201 + 2202 + /** GEM sequence number associated with this request, 2203 + * when the HWS breadcrumb is equal or greater than this the GPU 2204 + * has finished processing this request. 2205 + */ 2206 + u32 seqno; 2198 2207 2199 2208 /** Position in the ringbuffer of the start of the request */ 2200 2209 u32 head; ··· 2848 2839 2849 2840 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2850 2841 u32 flags); 2842 + void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 2851 2843 int __must_check i915_vma_unbind(struct i915_vma *vma); 2852 2844 /* 2853 2845 * BEWARE: Do not use the function below unless you can _absolutely_ ··· 2920 2910 return (int32_t)(seq1 - seq2) >= 0; 2921 2911 } 2922 2912 2913 + static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, 2914 + bool lazy_coherency) 2915 + { 2916 + u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2917 + return i915_seqno_passed(seqno, req->previous_seqno); 2918 + } 2919 + 2923 2920 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2924 2921 bool lazy_coherency) 2925 2922 { 2926 - u32 seqno; 2927 - 2928 - BUG_ON(req == NULL); 2929 - 2930 - seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2931 - 2923 + u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2932 2924 return i915_seqno_passed(seqno, req->seqno); 2933 2925 } 2934 2926
+84 -27
drivers/gpu/drm/i915/i915_gem.c
··· 1146 1146 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1147 1147 } 1148 1148 1149 - static int __i915_spin_request(struct drm_i915_gem_request *req) 1149 + static unsigned long local_clock_us(unsigned *cpu) 1150 + { 1151 + unsigned long t; 1152 + 1153 + /* Cheaply and approximately convert from nanoseconds to microseconds. 1154 + * The result and subsequent calculations are also defined in the same 1155 + * approximate microseconds units. The principal source of timing 1156 + * error here is from the simple truncation. 1157 + * 1158 + * Note that local_clock() is only defined wrt to the current CPU; 1159 + * the comparisons are no longer valid if we switch CPUs. Instead of 1160 + * blocking preemption for the entire busywait, we can detect the CPU 1161 + * switch and use that as indicator of system load and a reason to 1162 + * stop busywaiting, see busywait_stop(). 1163 + */ 1164 + *cpu = get_cpu(); 1165 + t = local_clock() >> 10; 1166 + put_cpu(); 1167 + 1168 + return t; 1169 + } 1170 + 1171 + static bool busywait_stop(unsigned long timeout, unsigned cpu) 1172 + { 1173 + unsigned this_cpu; 1174 + 1175 + if (time_after(local_clock_us(&this_cpu), timeout)) 1176 + return true; 1177 + 1178 + return this_cpu != cpu; 1179 + } 1180 + 1181 + static int __i915_spin_request(struct drm_i915_gem_request *req, int state) 1150 1182 { 1151 1183 unsigned long timeout; 1184 + unsigned cpu; 1152 1185 1153 - if (i915_gem_request_get_ring(req)->irq_refcount) 1186 + /* When waiting for high frequency requests, e.g. during synchronous 1187 + * rendering split between the CPU and GPU, the finite amount of time 1188 + * required to set up the irq and wait upon it limits the response 1189 + * rate. By busywaiting on the request completion for a short while we 1190 + * can service the high frequency waits as quick as possible. However, 1191 + * if it is a slow request, we want to sleep as quickly as possible. 1192 + * The tradeoff between waiting and sleeping is roughly the time it 1193 + * takes to sleep on a request, on the order of a microsecond. 1194 + */ 1195 + 1196 + if (req->ring->irq_refcount) 1154 1197 return -EBUSY; 1155 1198 1156 - timeout = jiffies + 1; 1199 + /* Only spin if we know the GPU is processing this request */ 1200 + if (!i915_gem_request_started(req, true)) 1201 + return -EAGAIN; 1202 + 1203 + timeout = local_clock_us(&cpu) + 5; 1157 1204 while (!need_resched()) { 1158 1205 if (i915_gem_request_completed(req, true)) 1159 1206 return 0; 1160 1207 1161 - if (time_after_eq(jiffies, timeout)) 1208 + if (signal_pending_state(state, current)) 1209 + break; 1210 + 1211 + if (busywait_stop(timeout, cpu)) 1162 1212 break; 1163 1213 1164 1214 cpu_relax_lowlatency(); 1165 1215 } 1216 + 1166 1217 if (i915_gem_request_completed(req, false)) 1167 1218 return 0; 1168 1219 ··· 1248 1197 struct drm_i915_private *dev_priv = dev->dev_private; 1249 1198 const bool irq_test_in_progress = 1250 1199 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1200 + int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1251 1201 DEFINE_WAIT(wait); 1252 1202 unsigned long timeout_expire; 1253 1203 s64 before, now; ··· 1281 1229 before = ktime_get_raw_ns(); 1282 1230 1283 1231 /* Optimistic spin for the next jiffie before touching IRQs */ 1284 - ret = __i915_spin_request(req); 1232 + ret = __i915_spin_request(req, state); 1285 1233 if (ret == 0) 1286 1234 goto out; 1287 1235 ··· 1293 1241 for (;;) { 1294 1242 struct timer_list timer; 1295 1243 1296 - prepare_to_wait(&ring->irq_queue, &wait, 1297 - interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 1244 + prepare_to_wait(&ring->irq_queue, &wait, state); 1298 1245 1299 1246 /* We need to check whether any gpu reset happened in between 1300 1247 * the caller grabbing the seqno and now ... */ ··· 1311 1260 break; 1312 1261 } 1313 1262 1314 - if (interruptible && signal_pending(current)) { 1263 + if (signal_pending_state(state, current)) { 1315 1264 ret = -ERESTARTSYS; 1316 1265 break; 1317 1266 } ··· 2605 2554 request->batch_obj = obj; 2606 2555 2607 2556 request->emitted_jiffies = jiffies; 2557 + request->previous_seqno = ring->last_submitted_seqno; 2608 2558 ring->last_submitted_seqno = request->seqno; 2609 2559 list_add_tail(&request->list, &ring->request_list); 2610 2560 ··· 4132 4080 return false; 4133 4081 } 4134 4082 4083 + void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 4084 + { 4085 + struct drm_i915_gem_object *obj = vma->obj; 4086 + bool mappable, fenceable; 4087 + u32 fence_size, fence_alignment; 4088 + 4089 + fence_size = i915_gem_get_gtt_size(obj->base.dev, 4090 + obj->base.size, 4091 + obj->tiling_mode); 4092 + fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, 4093 + obj->base.size, 4094 + obj->tiling_mode, 4095 + true); 4096 + 4097 + fenceable = (vma->node.size == fence_size && 4098 + (vma->node.start & (fence_alignment - 1)) == 0); 4099 + 4100 + mappable = (vma->node.start + fence_size <= 4101 + to_i915(obj->base.dev)->gtt.mappable_end); 4102 + 4103 + obj->map_and_fenceable = mappable && fenceable; 4104 + } 4105 + 4135 4106 static int 4136 4107 i915_gem_object_do_pin(struct drm_i915_gem_object *obj, 4137 4108 struct i915_address_space *vm, ··· 4222 4147 4223 4148 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && 4224 4149 (bound ^ vma->bound) & GLOBAL_BIND) { 4225 - bool mappable, fenceable; 4226 - u32 fence_size, fence_alignment; 4227 - 4228 - fence_size = i915_gem_get_gtt_size(obj->base.dev, 4229 - obj->base.size, 4230 - obj->tiling_mode); 4231 - fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, 4232 - obj->base.size, 4233 - obj->tiling_mode, 4234 - true); 4235 - 4236 - fenceable = (vma->node.size == fence_size && 4237 - (vma->node.start & (fence_alignment - 1)) == 0); 4238 - 4239 - mappable = (vma->node.start + fence_size <= 4240 - dev_priv->gtt.mappable_end); 4241 - 4242 - obj->map_and_fenceable = mappable && fenceable; 4243 - 4150 + __i915_vma_set_map_and_fenceable(vma); 4244 4151 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); 4245 4152 } 4246 4153
+1
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2676 2676 return ret; 2677 2677 } 2678 2678 vma->bound |= GLOBAL_BIND; 2679 + __i915_vma_set_map_and_fenceable(vma); 2679 2680 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2680 2681 } 2681 2682
+1
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 687 687 } 688 688 689 689 vma->bound |= GLOBAL_BIND; 690 + __i915_vma_set_map_and_fenceable(vma); 690 691 list_add_tail(&vma->mm_list, &ggtt->inactive_list); 691 692 } 692 693
+44 -22
drivers/gpu/drm/i915/intel_display.c
··· 116 116 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 117 117 static void ironlake_pfit_enable(struct intel_crtc *crtc); 118 118 static void intel_modeset_setup_hw_state(struct drm_device *dev); 119 + static void intel_pre_disable_primary(struct drm_crtc *crtc); 119 120 120 121 typedef struct { 121 122 int min, max; ··· 2608 2607 struct drm_i915_gem_object *obj; 2609 2608 struct drm_plane *primary = intel_crtc->base.primary; 2610 2609 struct drm_plane_state *plane_state = primary->state; 2610 + struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2611 + struct intel_plane *intel_plane = to_intel_plane(primary); 2611 2612 struct drm_framebuffer *fb; 2612 2613 2613 2614 if (!plane_config->fb) ··· 2645 2642 goto valid_fb; 2646 2643 } 2647 2644 } 2645 + 2646 + /* 2647 + * We've failed to reconstruct the BIOS FB. Current display state 2648 + * indicates that the primary plane is visible, but has a NULL FB, 2649 + * which will lead to problems later if we don't fix it up. The 2650 + * simplest solution is to just disable the primary plane now and 2651 + * pretend the BIOS never had it enabled. 2652 + */ 2653 + to_intel_plane_state(plane_state)->visible = false; 2654 + crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); 2655 + intel_pre_disable_primary(&intel_crtc->base); 2656 + intel_plane->disable_plane(primary, &intel_crtc->base); 2648 2657 2649 2658 return; 2650 2659 ··· 9925 9910 return true; 9926 9911 } 9927 9912 9928 - static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 9913 + static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) 9929 9914 { 9930 9915 struct drm_device *dev = crtc->dev; 9931 9916 struct drm_i915_private *dev_priv = dev->dev_private; 9932 9917 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9933 9918 uint32_t cntl = 0, size = 0; 9934 9919 9935 - if (base) { 9920 + if (on) { 9936 9921 unsigned int width = intel_crtc->base.cursor->state->crtc_w; 9937 9922 unsigned int height = intel_crtc->base.cursor->state->crtc_h; 9938 9923 unsigned int stride = roundup_pow_of_two(width) * 4; ··· 9987 9972 } 9988 9973 } 9989 9974 9990 - static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 9975 + static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) 9991 9976 { 9992 9977 struct drm_device *dev = crtc->dev; 9993 9978 struct drm_i915_private *dev_priv = dev->dev_private; 9994 9979 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9995 9980 int pipe = intel_crtc->pipe; 9996 - uint32_t cntl; 9981 + uint32_t cntl = 0; 9997 9982 9998 - cntl = 0; 9999 - if (base) { 9983 + if (on) { 10000 9984 cntl = MCURSOR_GAMMA_ENABLE; 10001 9985 switch (intel_crtc->base.cursor->state->crtc_w) { 10002 9986 case 64: ··· 10046 10032 int y = cursor_state->crtc_y; 10047 10033 u32 base = 0, pos = 0; 10048 10034 10049 - if (on) 10050 - base = intel_crtc->cursor_addr; 10035 + base = intel_crtc->cursor_addr; 10051 10036 10052 10037 if (x >= intel_crtc->config->pipe_src_w) 10053 - base = 0; 10038 + on = false; 10054 10039 10055 10040 if (y >= intel_crtc->config->pipe_src_h) 10056 - base = 0; 10041 + on = false; 10057 10042 10058 10043 if (x < 0) { 10059 10044 if (x + cursor_state->crtc_w <= 0) 10060 - base = 0; 10045 + on = false; 10061 10046 10062 10047 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10063 10048 x = -x; ··· 10065 10052 10066 10053 if (y < 0) { 10067 10054 if (y + cursor_state->crtc_h <= 0) 10068 - base = 0; 10055 + on = false; 10069 10056 10070 10057 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10071 10058 y = -y; 10072 10059 } 10073 10060 pos |= y << CURSOR_Y_SHIFT; 10074 - 10075 - if (base == 0 && intel_crtc->cursor_base == 0) 10076 - return; 10077 10061 10078 10062 I915_WRITE(CURPOS(pipe), pos); 10079 10063 ··· 10082 10072 } 10083 10073 10084 10074 if (IS_845G(dev) || IS_I865G(dev)) 10085 - i845_update_cursor(crtc, base); 10075 + i845_update_cursor(crtc, base, on); 10086 10076 else 10087 - i9xx_update_cursor(crtc, base); 10077 + i9xx_update_cursor(crtc, base, on); 10088 10078 } 10089 10079 10090 10080 static bool cursor_size_ok(struct drm_device *dev, ··· 13728 13718 struct drm_crtc *crtc = crtc_state->base.crtc; 13729 13719 struct drm_framebuffer *fb = state->base.fb; 13730 13720 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13721 + enum pipe pipe = to_intel_plane(plane)->pipe; 13731 13722 unsigned stride; 13732 13723 int ret; 13733 13724 ··· 13762 13751 return -EINVAL; 13763 13752 } 13764 13753 13754 + /* 13755 + * There's something wrong with the cursor on CHV pipe C. 13756 + * If it straddles the left edge of the screen then 13757 + * moving it away from the edge or disabling it often 13758 + * results in a pipe underrun, and often that can lead to 13759 + * dead pipe (constant underrun reported, and it scans 13760 + * out just a solid color). To recover from that, the 13761 + * display power well must be turned off and on again. 13762 + * Refuse the put the cursor into that compromised position. 13763 + */ 13764 + if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && 13765 + state->visible && state->base.crtc_x < 0) { 13766 + DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 13767 + return -EINVAL; 13768 + } 13769 + 13765 13770 return 0; 13766 13771 } 13767 13772 ··· 13801 13774 crtc = crtc ? crtc : plane->crtc; 13802 13775 intel_crtc = to_intel_crtc(crtc); 13803 13776 13804 - if (intel_crtc->cursor_bo == obj) 13805 - goto update; 13806 - 13807 13777 if (!obj) 13808 13778 addr = 0; 13809 13779 else if (!INTEL_INFO(dev)->cursor_needs_physical) ··· 13809 13785 addr = obj->phys_handle->busaddr; 13810 13786 13811 13787 intel_crtc->cursor_addr = addr; 13812 - intel_crtc->cursor_bo = obj; 13813 13788 13814 - update: 13815 13789 if (crtc->state->active) 13816 13790 intel_crtc_update_cursor(crtc, state->visible); 13817 13791 }
-1
drivers/gpu/drm/i915/intel_drv.h
··· 550 550 int adjusted_x; 551 551 int adjusted_y; 552 552 553 - struct drm_i915_gem_object *cursor_bo; 554 553 uint32_t cursor_addr; 555 554 uint32_t cursor_cntl; 556 555 uint32_t cursor_size;
+4 -3
drivers/gpu/drm/i915/intel_hdmi.c
··· 1374 1374 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1375 1375 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1376 1376 bool live_status = false; 1377 - unsigned int retry = 3; 1377 + unsigned int try; 1378 1378 1379 1379 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1380 1380 connector->base.id, connector->name); 1381 1381 1382 1382 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); 1383 1383 1384 - while (!live_status && --retry) { 1384 + for (try = 0; !live_status && try < 4; try++) { 1385 + if (try) 1386 + msleep(10); 1385 1387 live_status = intel_digital_port_connected(dev_priv, 1386 1388 hdmi_to_dig_port(intel_hdmi)); 1387 - mdelay(10); 1388 1389 } 1389 1390 1390 1391 if (!live_status)
+1
drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
··· 83 83 fan->type = NVBIOS_THERM_FAN_UNK; 84 84 } 85 85 86 + fan->fan_mode = NVBIOS_THERM_FAN_LINEAR; 86 87 fan->min_duty = nvbios_rd08(bios, data + 0x02); 87 88 fan->max_duty = nvbios_rd08(bios, data + 0x03); 88 89
+1
drivers/hwmon/Kconfig
··· 1217 1217 config SENSORS_SHT15 1218 1218 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." 1219 1219 depends on GPIOLIB || COMPILE_TEST 1220 + select BITREVERSE 1220 1221 help 1221 1222 If you say yes here you get support for the Sensiron SHT10, SHT11, 1222 1223 SHT15, SHT71, SHT75 humidity and temperature sensors.
+15 -1
drivers/hwmon/tmp102.c
··· 58 58 u16 config_orig; 59 59 unsigned long last_update; 60 60 int temp[3]; 61 + bool first_time; 61 62 }; 62 63 63 64 /* convert left adjusted 13-bit TMP102 register value to milliCelsius */ ··· 94 93 tmp102->temp[i] = tmp102_reg_to_mC(status); 95 94 } 96 95 tmp102->last_update = jiffies; 96 + tmp102->first_time = false; 97 97 } 98 98 mutex_unlock(&tmp102->lock); 99 99 return tmp102; ··· 103 101 static int tmp102_read_temp(void *dev, int *temp) 104 102 { 105 103 struct tmp102 *tmp102 = tmp102_update_device(dev); 104 + 105 + /* Is it too early even to return a conversion? */ 106 + if (tmp102->first_time) { 107 + dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__); 108 + return -EAGAIN; 109 + } 106 110 107 111 *temp = tmp102->temp[0]; 108 112 ··· 121 113 { 122 114 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); 123 115 struct tmp102 *tmp102 = tmp102_update_device(dev); 116 + 117 + /* Is it too early even to return a read? */ 118 + if (tmp102->first_time) 119 + return -EAGAIN; 124 120 125 121 return sprintf(buf, "%d\n", tmp102->temp[sda->index]); 126 122 } ··· 219 207 status = -ENODEV; 220 208 goto fail_restore_config; 221 209 } 222 - tmp102->last_update = jiffies - HZ; 210 + tmp102->last_update = jiffies; 211 + /* Mark that we are not ready with data until conversion is complete */ 212 + tmp102->first_time = true; 223 213 mutex_init(&tmp102->lock); 224 214 225 215 hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+9 -2
drivers/i2c/busses/i2c-davinci.c
··· 202 202 * d is always 6 on Keystone I2C controller 203 203 */ 204 204 205 - /* get minimum of 7 MHz clock, but max of 12 MHz */ 206 - psc = (input_clock / 7000000) - 1; 205 + /* 206 + * Both Davinci and current Keystone User Guides recommend a value 207 + * between 7MHz and 12MHz. In reality 7MHz module clock doesn't 208 + * always produce enough margin between SDA and SCL transitions. 209 + * Measurements show that the higher the module clock is, the 210 + * bigger is the margin, providing more reliable communication. 211 + * So we better target for 12MHz. 212 + */ 213 + psc = (input_clock / 12000000) - 1; 207 214 if ((input_clock / (psc + 1)) > 12000000) 208 215 psc++; /* better to run under spec than over */ 209 216 d = (psc >= 2) ? 5 : 7 - psc;
+6
drivers/i2c/busses/i2c-designware-core.c
··· 813 813 tx_aborted: 814 814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) 815 815 complete(&dev->cmd_complete); 816 + else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { 817 + /* workaround to trigger pending interrupt */ 818 + stat = dw_readl(dev, DW_IC_INTR_MASK); 819 + i2c_dw_disable_int(dev); 820 + dw_writel(dev, stat, DW_IC_INTR_MASK); 821 + } 816 822 817 823 return IRQ_HANDLED; 818 824 }
+1
drivers/i2c/busses/i2c-designware-core.h
··· 111 111 112 112 #define ACCESS_SWAP 0x00000001 113 113 #define ACCESS_16BIT 0x00000002 114 + #define ACCESS_INTR_MASK 0x00000004 114 115 115 116 extern int i2c_dw_init(struct dw_i2c_dev *dev); 116 117 extern void i2c_dw_disable(struct dw_i2c_dev *dev);
+10 -6
drivers/i2c/busses/i2c-designware-platdrv.c
··· 93 93 static int dw_i2c_acpi_configure(struct platform_device *pdev) 94 94 { 95 95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 96 + const struct acpi_device_id *id; 96 97 97 98 dev->adapter.nr = -1; 98 99 dev->tx_fifo_depth = 32; ··· 107 106 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 108 107 &dev->sda_hold_time); 109 108 109 + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); 110 + if (id && id->driver_data) 111 + dev->accessor_flags |= (u32)id->driver_data; 112 + 110 113 return 0; 111 114 } 112 115 ··· 121 116 { "INT3433", 0 }, 122 117 { "80860F41", 0 }, 123 118 { "808622C1", 0 }, 124 - { "AMD0010", 0 }, 119 + { "AMD0010", ACCESS_INTR_MASK }, 125 120 { } 126 121 }; 127 122 MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); ··· 245 240 } 246 241 247 242 r = i2c_dw_probe(dev); 248 - if (r) { 243 + if (r && !dev->pm_runtime_disabled) 249 244 pm_runtime_disable(&pdev->dev); 250 - return r; 251 - } 252 245 253 - return 0; 246 + return r; 254 247 } 255 248 256 249 static int dw_i2c_plat_remove(struct platform_device *pdev) ··· 263 260 264 261 pm_runtime_dont_use_autosuspend(&pdev->dev); 265 262 pm_runtime_put_sync(&pdev->dev); 266 - pm_runtime_disable(&pdev->dev); 263 + if (!dev->pm_runtime_disabled) 264 + pm_runtime_disable(&pdev->dev); 267 265 268 266 return 0; 269 267 }
+2 -2
drivers/i2c/busses/i2c-imx.c
··· 1119 1119 i2c_imx, IMX_I2C_I2CR); 1120 1120 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); 1121 1121 1122 + i2c_imx_init_recovery_info(i2c_imx, pdev); 1123 + 1122 1124 /* Add I2C adapter */ 1123 1125 ret = i2c_add_numbered_adapter(&i2c_imx->adapter); 1124 1126 if (ret < 0) { 1125 1127 dev_err(&pdev->dev, "registration failed\n"); 1126 1128 goto clk_disable; 1127 1129 } 1128 - 1129 - i2c_imx_init_recovery_info(i2c_imx, pdev); 1130 1130 1131 1131 /* Set up platform driver data */ 1132 1132 platform_set_drvdata(pdev, i2c_imx);
+18 -9
drivers/i2c/busses/i2c-mv64xxx.c
··· 146 146 bool errata_delay; 147 147 struct reset_control *rstc; 148 148 bool irq_clear_inverted; 149 + /* Clk div is 2 to the power n, not 2 to the power n + 1 */ 150 + bool clk_n_base_0; 149 151 }; 150 152 151 153 static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { ··· 759 757 #ifdef CONFIG_OF 760 758 #ifdef CONFIG_HAVE_CLK 761 759 static int 762 - mv64xxx_calc_freq(const int tclk, const int n, const int m) 760 + mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data, 761 + const int tclk, const int n, const int m) 763 762 { 764 - return tclk / (10 * (m + 1) * (2 << n)); 763 + if (drv_data->clk_n_base_0) 764 + return tclk / (10 * (m + 1) * (1 << n)); 765 + else 766 + return tclk / (10 * (m + 1) * (2 << n)); 765 767 } 766 768 767 769 static bool 768 - mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, 769 - u32 *best_m) 770 + mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data, 771 + const u32 req_freq, const u32 tclk) 770 772 { 771 773 int freq, delta, best_delta = INT_MAX; 772 774 int m, n; 773 775 774 776 for (n = 0; n <= 7; n++) 775 777 for (m = 0; m <= 15; m++) { 776 - freq = mv64xxx_calc_freq(tclk, n, m); 778 + freq = mv64xxx_calc_freq(drv_data, tclk, n, m); 777 779 delta = req_freq - freq; 778 780 if (delta >= 0 && delta < best_delta) { 779 - *best_m = m; 780 - *best_n = n; 781 + drv_data->freq_m = m; 782 + drv_data->freq_n = n; 781 783 best_delta = delta; 782 784 } 783 785 if (best_delta == 0) ··· 819 813 if (of_property_read_u32(np, "clock-frequency", &bus_freq)) 820 814 bus_freq = 100000; /* 100kHz by default */ 821 815 822 - if (!mv64xxx_find_baud_factors(bus_freq, tclk, 823 - &drv_data->freq_n, &drv_data->freq_m)) { 816 + if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") || 817 + of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) 818 + drv_data->clk_n_base_0 = true; 819 + 820 + if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) { 824 821 rc = -EINVAL; 825 822 goto out; 826 823 }
+2 -2
drivers/i2c/busses/i2c-rcar.c
··· 576 576 if (slave->flags & I2C_CLIENT_TEN) 577 577 return -EAFNOSUPPORT; 578 578 579 - pm_runtime_forbid(rcar_i2c_priv_to_dev(priv)); 579 + pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv)); 580 580 581 581 priv->slave = slave; 582 582 rcar_i2c_write(priv, ICSAR, slave->addr); ··· 598 598 599 599 priv->slave = NULL; 600 600 601 - pm_runtime_allow(rcar_i2c_priv_to_dev(priv)); 601 + pm_runtime_put(rcar_i2c_priv_to_dev(priv)); 602 602 603 603 return 0; 604 604 }
+1 -1
drivers/i2c/busses/i2c-rk3x.c
··· 908 908 &i2c->scl_fall_ns)) 909 909 i2c->scl_fall_ns = 300; 910 910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", 911 - &i2c->scl_fall_ns)) 911 + &i2c->sda_fall_ns)) 912 912 i2c->sda_fall_ns = i2c->scl_fall_ns; 913 913 914 914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+1 -1
drivers/i2c/busses/i2c-st.c
··· 822 822 823 823 adap = &i2c_dev->adap; 824 824 i2c_set_adapdata(adap, i2c_dev); 825 - snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); 825 + snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start); 826 826 adap->owner = THIS_MODULE; 827 827 adap->timeout = 2 * HZ; 828 828 adap->retries = 0;
+9 -7
drivers/infiniband/core/cma.c
··· 1265 1265 return cma_protocol_roce_dev_port(device, port_num); 1266 1266 } 1267 1267 1268 - static bool cma_match_net_dev(const struct rdma_id_private *id_priv, 1269 - const struct net_device *net_dev) 1268 + static bool cma_match_net_dev(const struct rdma_cm_id *id, 1269 + const struct net_device *net_dev, 1270 + u8 port_num) 1270 1271 { 1271 - const struct rdma_addr *addr = &id_priv->id.route.addr; 1272 + const struct rdma_addr *addr = &id->route.addr; 1272 1273 1273 1274 if (!net_dev) 1274 1275 /* This request is an AF_IB request or a RoCE request */ 1275 - return addr->src_addr.ss_family == AF_IB || 1276 - cma_protocol_roce(&id_priv->id); 1276 + return (!id->port_num || id->port_num == port_num) && 1277 + (addr->src_addr.ss_family == AF_IB || 1278 + cma_protocol_roce_dev_port(id->device, port_num)); 1277 1279 1278 1280 return !addr->dev_addr.bound_dev_if || 1279 1281 (net_eq(dev_net(net_dev), addr->dev_addr.net) && ··· 1297 1295 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1298 1296 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1299 1297 if (id_priv->id.device == cm_id->device && 1300 - cma_match_net_dev(id_priv, net_dev)) 1298 + cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1301 1299 return id_priv; 1302 1300 list_for_each_entry(id_priv_dev, 1303 1301 &id_priv->listen_list, 1304 1302 listen_list) { 1305 1303 if (id_priv_dev->id.device == cm_id->device && 1306 - cma_match_net_dev(id_priv_dev, net_dev)) 1304 + cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1307 1305 return id_priv_dev; 1308 1306 } 1309 1307 }
+1 -1
drivers/infiniband/hw/mlx4/srq.c
··· 286 286 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); 287 287 ib_umem_release(msrq->umem); 288 288 } else { 289 - kfree(msrq->wrid); 289 + kvfree(msrq->wrid); 290 290 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, 291 291 &msrq->buf); 292 292 mlx4_db_free(dev->dev, &msrq->db);
+10
drivers/infiniband/hw/ocrdma/ocrdma.h
··· 232 232 u16 interface_type; 233 233 }; 234 234 235 + enum ocrdma_flags { 236 + OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01 237 + }; 238 + 235 239 struct ocrdma_dev { 236 240 struct ib_device ibdev; 237 241 struct ocrdma_dev_attr attr; ··· 291 287 atomic_t update_sl; 292 288 u16 pvid; 293 289 u32 asic_id; 290 + u32 flags; 294 291 295 292 ulong last_stats_time; 296 293 struct mutex stats_lock; /* provide synch for debugfs operations */ ··· 594 589 */ 595 590 return (state & OCRDMA_STATE_FLAG_ENABLED) && 596 591 (state & OCRDMA_STATE_FLAG_SYNC); 592 + } 593 + 594 + static inline u8 ocrdma_get_ae_link_state(u32 ae_state) 595 + { 596 + return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT); 597 597 } 598 598 599 599 #endif
+39 -10
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 579 579 580 580 cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE); 581 581 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE); 582 + /* Request link events on this MQ. */ 583 + cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE); 582 584 583 585 cmd->async_cqid_ringsize = cq->id; 584 586 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << ··· 821 819 } 822 820 } 823 821 822 + static void ocrdma_process_link_state(struct ocrdma_dev *dev, 823 + struct ocrdma_ae_mcqe *cqe) 824 + { 825 + struct ocrdma_ae_lnkst_mcqe *evt; 826 + u8 lstate; 827 + 828 + evt = (struct ocrdma_ae_lnkst_mcqe *)cqe; 829 + lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn); 830 + 831 + if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK)) 832 + return; 833 + 834 + if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT) 835 + ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK)); 836 + } 837 + 824 838 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) 825 839 { 826 840 /* async CQE processing */ 827 841 struct ocrdma_ae_mcqe *cqe = ae_cqe; 828 842 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> 829 843 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; 830 - 831 - if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE) 844 + switch (evt_code) { 845 + case OCRDMA_ASYNC_LINK_EVE_CODE: 846 + ocrdma_process_link_state(dev, cqe); 847 + break; 848 + case OCRDMA_ASYNC_RDMA_EVE_CODE: 832 849 ocrdma_dispatch_ibevent(dev, cqe); 833 - else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE) 850 + break; 851 + case OCRDMA_ASYNC_GRP5_EVE_CODE: 834 852 ocrdma_process_grp5_aync(dev, cqe); 835 - else 853 + break; 854 + default: 836 855 pr_err("%s(%d) invalid evt code=0x%x\n", __func__, 837 856 dev->id, evt_code); 857 + } 838 858 } 839 859 840 860 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) ··· 1387 1363 return status; 1388 1364 } 1389 1365 1390 - int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) 1366 + int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed, 1367 + u8 *lnk_state) 1391 1368 { 1392 1369 int status = -ENOMEM; 1393 1370 struct ocrdma_get_link_speed_rsp *rsp; ··· 1409 1384 goto mbx_err; 1410 1385 1411 1386 rsp = (struct ocrdma_get_link_speed_rsp *)cmd; 1412 - *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) 1413 - >> OCRDMA_PHY_PS_SHIFT; 1387 + if (lnk_speed) 1388 + *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) 1389 + >> OCRDMA_PHY_PS_SHIFT; 1390 + if (lnk_state) 1391 + *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK); 1414 1392 1415 1393 mbx_err: 1416 1394 kfree(cmd); ··· 2543 2515 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); 2544 2516 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); 2545 2517 2546 - if (vlan_id < 0x1000) { 2547 - if (dev->pfc_state) { 2548 - vlan_id = 0; 2518 + if (vlan_id == 0xFFFF) 2519 + vlan_id = 0; 2520 + if (vlan_id || dev->pfc_state) { 2521 + if (!vlan_id) { 2549 2522 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", 2550 2523 dev->id); 2551 2524 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+3 -1
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
··· 106 106 bool solicited, u16 cqe_popped); 107 107 108 108 /* verbs specific mailbox commands */ 109 - int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); 109 + int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed, 110 + u8 *lnk_st); 110 111 int ocrdma_query_config(struct ocrdma_dev *, 111 112 struct ocrdma_mbx_query_config *config); 112 113 ··· 154 153 void ocrdma_init_service_level(struct ocrdma_dev *); 155 154 void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); 156 155 void ocrdma_free_pd_range(struct ocrdma_dev *dev); 156 + void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate); 157 157 158 158 #endif /* __OCRDMA_HW_H__ */
+25 -32
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 290 290 static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) 291 291 { 292 292 int status = 0, i; 293 + u8 lstate = 0; 293 294 struct ocrdma_dev *dev; 294 295 295 296 dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); ··· 319 318 status = ocrdma_register_device(dev); 320 319 if (status) 321 320 goto alloc_err; 321 + 322 + /* Query Link state and update */ 323 + status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate); 324 + if (!status) 325 + ocrdma_update_link_state(dev, lstate); 322 326 323 327 for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) 324 328 if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i])) ··· 379 373 ocrdma_remove_free(dev); 380 374 } 381 375 382 - static int ocrdma_open(struct ocrdma_dev *dev) 376 + static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev) 383 377 { 384 378 struct ib_event port_event; 385 379 ··· 390 384 return 0; 391 385 } 392 386 393 - static int ocrdma_close(struct ocrdma_dev *dev) 387 + static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev) 394 388 { 395 - int i; 396 - struct ocrdma_qp *qp, **cur_qp; 397 389 struct ib_event err_event; 398 - struct ib_qp_attr attrs; 399 - int attr_mask = IB_QP_STATE; 400 - 401 - attrs.qp_state = IB_QPS_ERR; 402 - mutex_lock(&dev->dev_lock); 403 - if (dev->qp_tbl) { 404 - cur_qp = dev->qp_tbl; 405 - for (i = 0; i < OCRDMA_MAX_QP; i++) { 406 - qp = cur_qp[i]; 407 - if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { 408 - /* change the QP state to ERROR */ 409 - _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); 410 - 411 - err_event.event = IB_EVENT_QP_FATAL; 412 - err_event.element.qp = &qp->ibqp; 413 - err_event.device = &dev->ibdev; 414 - ib_dispatch_event(&err_event); 415 - } 416 - } 417 - } 418 - mutex_unlock(&dev->dev_lock); 419 390 420 391 err_event.event = IB_EVENT_PORT_ERR; 421 392 err_event.element.port_num = 1; ··· 403 420 404 421 static void ocrdma_shutdown(struct ocrdma_dev *dev) 405 422 { 406 - ocrdma_close(dev); 423 + ocrdma_dispatch_port_error(dev); 407 424 ocrdma_remove(dev); 408 425 } 409 426 ··· 414 431 static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) 415 432 { 416 433 switch (event) { 417 - case BE_DEV_UP: 418 - ocrdma_open(dev); 419 - break; 420 - case BE_DEV_DOWN: 421 - ocrdma_close(dev); 422 - break; 423 434 case BE_DEV_SHUTDOWN: 424 435 ocrdma_shutdown(dev); 425 436 break; 437 + default: 438 + break; 426 439 } 440 + } 441 + 442 + void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate) 443 + { 444 + if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) { 445 + dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT; 446 + if (!lstate) 447 + return; 448 + } 449 + 450 + if (!lstate) 451 + ocrdma_dispatch_port_error(dev); 452 + else 453 + ocrdma_dispatch_port_active(dev); 427 454 } 428 455 429 456 static struct ocrdma_driver ocrdma_drv = {
+45 -4
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
··· 465 465 u32 valid_ae_event; 466 466 }; 467 467 468 - #define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 469 - #define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 468 + enum ocrdma_async_event_code { 469 + OCRDMA_ASYNC_LINK_EVE_CODE = 0x01, 470 + OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05, 471 + OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14 472 + }; 470 473 471 474 enum ocrdma_async_grp5_events { 472 475 OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01, ··· 490 487 OCRDMA_QP_LAST_WQE_EVENT = 0x10, 491 488 492 489 OCRDMA_MAX_ASYNC_ERRORS 490 + }; 491 + 492 + struct ocrdma_ae_lnkst_mcqe { 493 + u32 speed_state_ptn; 494 + u32 qos_reason_falut; 495 + u32 evt_tag; 496 + u32 valid_ae_event; 497 + }; 498 + 499 + enum { 500 + OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F, 501 + OCRDMA_AE_LSC_PT_SHIFT = 0x06, 502 + OCRDMA_AE_LSC_PT_MASK = (0x03 << 503 + OCRDMA_AE_LSC_PT_SHIFT), 504 + OCRDMA_AE_LSC_LS_SHIFT = 0x08, 505 + OCRDMA_AE_LSC_LS_MASK = (0xFF << 506 + OCRDMA_AE_LSC_LS_SHIFT), 507 + OCRDMA_AE_LSC_LD_SHIFT = 0x10, 508 + OCRDMA_AE_LSC_LD_MASK = (0xFF << 509 + OCRDMA_AE_LSC_LD_SHIFT), 510 + OCRDMA_AE_LSC_PPS_SHIFT = 0x18, 511 + OCRDMA_AE_LSC_PPS_MASK = (0xFF << 512 + OCRDMA_AE_LSC_PPS_SHIFT), 513 + OCRDMA_AE_LSC_PPF_MASK = 0xFF, 514 + OCRDMA_AE_LSC_ER_SHIFT = 0x08, 515 + OCRDMA_AE_LSC_ER_MASK = (0xFF << 516 + OCRDMA_AE_LSC_ER_SHIFT), 517 + OCRDMA_AE_LSC_QOS_SHIFT = 0x10, 518 + OCRDMA_AE_LSC_QOS_MASK = (0xFFFF << 519 + OCRDMA_AE_LSC_QOS_SHIFT) 520 + }; 521 + 522 + enum { 523 + OCRDMA_AE_LSC_PLINK_DOWN = 0x00, 524 + OCRDMA_AE_LSC_PLINK_UP = 0x01, 525 + OCRDMA_AE_LSC_LLINK_DOWN = 0x02, 526 + OCRDMA_AE_LSC_LLINK_MASK = 0x02, 527 + OCRDMA_AE_LSC_LLINK_UP = 0x03 493 528 }; 494 529 495 530 /* mailbox command request and responses */ ··· 717 676 OCRDMA_PHY_PFLT_SHIFT = 0x18, 718 677 OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000, 719 678 OCRDMA_QOS_LNKSP_SHIFT = 0x10, 720 - OCRDMA_LLST_MASK = 0xFF, 679 + OCRDMA_LINK_ST_MASK = 0x01, 721 680 OCRDMA_PLFC_MASK = 0x00000400, 722 681 OCRDMA_PLFC_SHIFT = 0x8, 723 682 OCRDMA_PLRFC_MASK = 0x00000200, ··· 732 691 733 692 u32 pflt_pps_ld_pnum; 734 693 u32 qos_lsp; 735 - u32 res_lls; 694 + u32 res_lnk_st; 736 695 }; 737 696 738 697 enum {
+1 -1
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 171 171 int status; 172 172 u8 speed; 173 173 174 - status = ocrdma_mbx_get_link_speed(dev, &speed); 174 + status = ocrdma_mbx_get_link_speed(dev, &speed, NULL); 175 175 if (status) 176 176 speed = OCRDMA_PHYS_LINK_SPEED_ZERO; 177 177
+1
drivers/input/joystick/db9.c
··· 592 592 return; 593 593 } 594 594 595 + memset(&db9_parport_cb, 0, sizeof(db9_parport_cb)); 595 596 db9_parport_cb.flags = PARPORT_FLAG_EXCL; 596 597 597 598 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx);
+1
drivers/input/joystick/gamecon.c
··· 951 951 pads = gc_cfg[port_idx].args + 1; 952 952 n_pads = gc_cfg[port_idx].nargs - 1; 953 953 954 + memset(&gc_parport_cb, 0, sizeof(gc_parport_cb)); 954 955 gc_parport_cb.flags = PARPORT_FLAG_EXCL; 955 956 956 957 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb,
+1
drivers/input/joystick/turbografx.c
··· 181 181 n_buttons = tgfx_cfg[port_idx].args + 1; 182 182 n_devs = tgfx_cfg[port_idx].nargs - 1; 183 183 184 + memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb)); 184 185 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; 185 186 186 187 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb,
+1
drivers/input/joystick/walkera0701.c
··· 218 218 219 219 w->parport = pp; 220 220 221 + memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb)); 221 222 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; 222 223 walkera0701_parport_cb.irq_func = walkera0701_irq_handler; 223 224 walkera0701_parport_cb.private = w;
+1 -2
drivers/input/misc/arizona-haptics.c
··· 97 97 98 98 ret = regmap_update_bits(arizona->regmap, 99 99 ARIZONA_HAPTICS_CONTROL_1, 100 - ARIZONA_HAP_CTRL_MASK, 101 - 1 << ARIZONA_HAP_CTRL_SHIFT); 100 + ARIZONA_HAP_CTRL_MASK, 0); 102 101 if (ret != 0) { 103 102 dev_err(arizona->dev, "Failed to stop haptics: %d\n", 104 103 ret);
+3
drivers/input/mouse/elan_i2c_core.c
··· 41 41 42 42 #define DRIVER_NAME "elan_i2c" 43 43 #define ELAN_DRIVER_VERSION "1.6.1" 44 + #define ELAN_VENDOR_ID 0x04f3 44 45 #define ETP_MAX_PRESSURE 255 45 46 #define ETP_FWIDTH_REDUCE 90 46 47 #define ETP_FINGER_WIDTH 15 ··· 915 914 916 915 input->name = "Elan Touchpad"; 917 916 input->id.bustype = BUS_I2C; 917 + input->id.vendor = ELAN_VENDOR_ID; 918 + input->id.product = data->product_id; 918 919 input_set_drvdata(input, data); 919 920 920 921 error = input_mt_init_slots(input, ETP_MAX_FINGERS,
+1
drivers/input/serio/parkbd.c
··· 145 145 { 146 146 struct pardev_cb parkbd_parport_cb; 147 147 148 + memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb)); 148 149 parkbd_parport_cb.irq_func = parkbd_interrupt; 149 150 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; 150 151
+9
drivers/input/tablet/aiptek.c
··· 1819 1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); 1820 1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); 1821 1821 1822 + /* Verify that a device really has an endpoint */ 1823 + if (intf->altsetting[0].desc.bNumEndpoints < 1) { 1824 + dev_err(&intf->dev, 1825 + "interface has %d endpoints, but must have minimum 1\n", 1826 + intf->altsetting[0].desc.bNumEndpoints); 1827 + err = -EINVAL; 1828 + goto fail3; 1829 + } 1822 1830 endpoint = &intf->altsetting[0].endpoint[0].desc; 1823 1831 1824 1832 /* Go set up our URB, which is called when the tablet receives ··· 1869 1861 if (i == ARRAY_SIZE(speeds)) { 1870 1862 dev_info(&intf->dev, 1871 1863 "Aiptek tried all speeds, no sane response\n"); 1864 + err = -EINVAL; 1872 1865 goto fail3; 1873 1866 } 1874 1867
+34
drivers/input/touchscreen/atmel_mxt_ts.c
··· 2487 2487 { } 2488 2488 }; 2489 2489 2490 + static unsigned int chromebook_tp_buttons[] = { 2491 + KEY_RESERVED, 2492 + KEY_RESERVED, 2493 + KEY_RESERVED, 2494 + KEY_RESERVED, 2495 + KEY_RESERVED, 2496 + BTN_LEFT 2497 + }; 2498 + 2499 + static struct mxt_acpi_platform_data chromebook_platform_data[] = { 2500 + { 2501 + /* Touchpad */ 2502 + .hid = "ATML0000", 2503 + .pdata = { 2504 + .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons), 2505 + .t19_keymap = chromebook_tp_buttons, 2506 + }, 2507 + }, 2508 + { 2509 + /* Touchscreen */ 2510 + .hid = "ATML0001", 2511 + }, 2512 + { } 2513 + }; 2514 + 2490 2515 static const struct dmi_system_id mxt_dmi_table[] = { 2491 2516 { 2492 2517 /* 2015 Google Pixel */ ··· 2521 2496 DMI_MATCH(DMI_PRODUCT_NAME, "Samus"), 2522 2497 }, 2523 2498 .driver_data = samus_platform_data, 2499 + }, 2500 + { 2501 + /* Other Google Chromebooks */ 2502 + .ident = "Chromebook", 2503 + .matches = { 2504 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 2505 + }, 2506 + .driver_data = chromebook_platform_data, 2524 2507 }, 2525 2508 { } 2526 2509 }; ··· 2734 2701 { "qt602240_ts", 0 }, 2735 2702 { "atmel_mxt_ts", 0 }, 2736 2703 { "atmel_mxt_tp", 0 }, 2704 + { "maxtouch", 0 }, 2737 2705 { "mXT224", 0 }, 2738 2706 { } 2739 2707 };
+12 -9
drivers/input/touchscreen/elants_i2c.c
··· 1316 1316 1317 1317 disable_irq(client->irq); 1318 1318 1319 - if (device_may_wakeup(dev) || ts->keep_power_in_suspend) { 1319 + if (device_may_wakeup(dev)) { 1320 + /* 1321 + * The device will automatically enter idle mode 1322 + * that has reduced power consumption. 1323 + */ 1324 + ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0); 1325 + } else if (ts->keep_power_in_suspend) { 1320 1326 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1321 1327 error = elants_i2c_send(client, set_sleep_cmd, 1322 1328 sizeof(set_sleep_cmd)); ··· 1332 1326 dev_err(&client->dev, 1333 1327 "suspend command failed: %d\n", error); 1334 1328 } 1335 - 1336 - if (device_may_wakeup(dev)) 1337 - ts->wake_irq_enabled = 1338 - (enable_irq_wake(client->irq) == 0); 1339 1329 } else { 1340 1330 elants_i2c_power_off(ts); 1341 1331 } ··· 1347 1345 int retry_cnt; 1348 1346 int error; 1349 1347 1350 - if (device_may_wakeup(dev) && ts->wake_irq_enabled) 1351 - disable_irq_wake(client->irq); 1352 - 1353 - if (ts->keep_power_in_suspend) { 1348 + if (device_may_wakeup(dev)) { 1349 + if (ts->wake_irq_enabled) 1350 + disable_irq_wake(client->irq); 1351 + elants_i2c_sw_reset(client); 1352 + } else if (ts->keep_power_in_suspend) { 1354 1353 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1355 1354 error = elants_i2c_send(client, set_active_cmd, 1356 1355 sizeof(set_active_cmd));
+18 -2
drivers/iommu/amd_iommu_v2.c
··· 494 494 } 495 495 } 496 496 497 + static bool access_error(struct vm_area_struct *vma, struct fault *fault) 498 + { 499 + unsigned long requested = 0; 500 + 501 + if (fault->flags & PPR_FAULT_EXEC) 502 + requested |= VM_EXEC; 503 + 504 + if (fault->flags & PPR_FAULT_READ) 505 + requested |= VM_READ; 506 + 507 + if (fault->flags & PPR_FAULT_WRITE) 508 + requested |= VM_WRITE; 509 + 510 + return (requested & ~vma->vm_flags) != 0; 511 + } 512 + 497 513 static void do_fault(struct work_struct *work) 498 514 { 499 515 struct fault *fault = container_of(work, struct fault, work); ··· 532 516 goto out; 533 517 } 534 518 535 - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { 536 - /* handle_mm_fault would BUG_ON() */ 519 + /* Check if we have the right permissions on the vma */ 520 + if (access_error(vma, fault)) { 537 521 up_read(&mm->mmap_sem); 538 522 handle_fault_error(fault); 539 523 goto out;
+20
drivers/iommu/intel-svm.c
··· 484 484 }; 485 485 486 486 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) 487 + 488 + static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) 489 + { 490 + unsigned long requested = 0; 491 + 492 + if (req->exe_req) 493 + requested |= VM_EXEC; 494 + 495 + if (req->rd_req) 496 + requested |= VM_READ; 497 + 498 + if (req->wr_req) 499 + requested |= VM_WRITE; 500 + 501 + return (requested & ~vma->vm_flags) != 0; 502 + } 503 + 487 504 static irqreturn_t prq_event_thread(int irq, void *d) 488 505 { 489 506 struct intel_iommu *iommu = d; ··· 554 537 down_read(&svm->mm->mmap_sem); 555 538 vma = find_extend_vma(svm->mm, address); 556 539 if (!vma || address < vma->vm_start) 540 + goto invalid; 541 + 542 + if (access_error(vma, req)) 557 543 goto invalid; 558 544 559 545 ret = handle_mm_fault(svm->mm, vma, address,
+1 -1
drivers/lightnvm/gennvm.c
··· 75 75 struct nvm_block *blk; 76 76 int i; 77 77 78 - lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; 78 + lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun]; 79 79 80 80 for (i = 0; i < nr_blocks; i++) { 81 81 if (blks[i] == 0)
+22 -11
drivers/md/md.c
··· 314 314 */ 315 315 void mddev_suspend(struct mddev *mddev) 316 316 { 317 - BUG_ON(mddev->suspended); 318 - mddev->suspended = 1; 317 + if (mddev->suspended++) 318 + return; 319 319 synchronize_rcu(); 320 320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 321 321 mddev->pers->quiesce(mddev, 1); ··· 326 326 327 327 void mddev_resume(struct mddev *mddev) 328 328 { 329 - mddev->suspended = 0; 329 + if (--mddev->suspended) 330 + return; 330 331 wake_up(&mddev->sb_wait); 331 332 mddev->pers->quiesce(mddev, 0); 332 333 ··· 1653 1652 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1654 1653 if (mddev->recovery_cp == MaxSector) 1655 1654 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 1656 - rdev->raid_disk = mddev->raid_disks; 1655 + rdev->raid_disk = 0; 1657 1656 break; 1658 1657 default: 1659 1658 rdev->saved_raid_disk = role; ··· 2774 2773 /* Activating a spare .. or possibly reactivating 2775 2774 * if we ever get bitmaps working here. 2776 2775 */ 2776 + int err; 2777 2777 2778 2778 if (rdev->raid_disk != -1) 2779 2779 return -EBUSY; ··· 2796 2794 rdev->saved_raid_disk = -1; 2797 2795 clear_bit(In_sync, &rdev->flags); 2798 2796 clear_bit(Bitmap_sync, &rdev->flags); 2799 - remove_and_add_spares(rdev->mddev, rdev); 2800 - if (rdev->raid_disk == -1) 2801 - return -EBUSY; 2797 + err = rdev->mddev->pers-> 2798 + hot_add_disk(rdev->mddev, rdev); 2799 + if (err) { 2800 + rdev->raid_disk = -1; 2801 + return err; 2802 + } else 2803 + sysfs_notify_dirent_safe(rdev->sysfs_state); 2804 + if (sysfs_link_rdev(rdev->mddev, rdev)) 2805 + /* failure here is OK */; 2802 2806 /* don't wakeup anyone, leave that to userspace. */ 2803 2807 } else { 2804 2808 if (slot >= rdev->mddev->raid_disks && ··· 4326 4318 } 4327 4319 mddev_unlock(mddev); 4328 4320 } 4329 - } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4330 - test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4321 + } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4331 4322 return -EBUSY; 4332 4323 else if (cmd_match(page, "resync")) 4333 4324 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ··· 4339 4332 return -EINVAL; 4340 4333 err = mddev_lock(mddev); 4341 4334 if (!err) { 4342 - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4343 - err = mddev->pers->start_reshape(mddev); 4335 + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4336 + err = -EBUSY; 4337 + else { 4338 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4339 + err = mddev->pers->start_reshape(mddev); 4340 + } 4344 4341 mddev_unlock(mddev); 4345 4342 } 4346 4343 if (err)
+6 -2
drivers/md/md.h
··· 566 566 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 567 567 { 568 568 char nm[20]; 569 - if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 569 + if (!test_bit(Replacement, &rdev->flags) && 570 + !test_bit(Journal, &rdev->flags) && 571 + mddev->kobj.sd) { 570 572 sprintf(nm, "rd%d", rdev->raid_disk); 571 573 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 572 574 } else ··· 578 576 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 579 577 { 580 578 char nm[20]; 581 - if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 579 + if (!test_bit(Replacement, &rdev->flags) && 580 + !test_bit(Journal, &rdev->flags) && 581 + mddev->kobj.sd) { 582 582 sprintf(nm, "rd%d", rdev->raid_disk); 583 583 sysfs_remove_link(&mddev->kobj, nm); 584 584 }
+3 -1
drivers/md/raid10.c
··· 1946 1946 1947 1947 first = i; 1948 1948 fbio = r10_bio->devs[i].bio; 1949 + fbio->bi_iter.bi_size = r10_bio->sectors << 9; 1950 + fbio->bi_iter.bi_idx = 0; 1949 1951 1950 1952 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 1951 1953 /* now find blocks with errors */ ··· 1991 1989 bio_reset(tbio); 1992 1990 1993 1991 tbio->bi_vcnt = vcnt; 1994 - tbio->bi_iter.bi_size = r10_bio->sectors << 9; 1992 + tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 1995 1993 tbio->bi_rw = WRITE; 1996 1994 tbio->bi_private = r10_bio; 1997 1995 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
+2 -2
drivers/media/pci/ivtv/ivtv-driver.c
··· 805 805 { 806 806 int i; 807 807 808 - for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++) 808 + for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++) 809 809 if (itv->card->video_inputs[i].video_type == 0) 810 810 break; 811 811 itv->nof_inputs = i; 812 - for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++) 812 + for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++) 813 813 if (itv->card->audio_inputs[i].audio_type == 0) 814 814 break; 815 815 itv->nof_audio_inputs = i;
+1 -1
drivers/media/usb/airspy/airspy.c
··· 134 134 int urbs_submitted; 135 135 136 136 /* USB control message buffer */ 137 - #define BUF_SIZE 24 137 + #define BUF_SIZE 128 138 138 u8 buf[BUF_SIZE]; 139 139 140 140 /* Current configuration */
+12 -1
drivers/media/usb/hackrf/hackrf.c
··· 24 24 #include <media/videobuf2-v4l2.h> 25 25 #include <media/videobuf2-vmalloc.h> 26 26 27 + /* 28 + * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too 29 + * strong signal or transmitting to bad antenna. 30 + * Set RF gain control to 'grabbed' state by default for sure. 31 + */ 32 + static bool hackrf_enable_rf_gain_ctrl; 33 + module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644); 34 + MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)"); 35 + 27 36 /* HackRF USB API commands (from HackRF Library) */ 28 37 enum { 29 38 CMD_SET_TRANSCEIVER_MODE = 0x01, ··· 1460 1451 dev_err(dev->dev, "Could not initialize controls\n"); 1461 1452 goto err_v4l2_ctrl_handler_free_rx; 1462 1453 } 1454 + v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl); 1463 1455 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); 1464 1456 1465 1457 /* Register controls for transmitter */ ··· 1481 1471 dev_err(dev->dev, "Could not initialize controls\n"); 1482 1472 goto err_v4l2_ctrl_handler_free_tx; 1483 1473 } 1474 + v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl); 1484 1475 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); 1485 1476 1486 1477 /* Register the v4l2_device structure */ ··· 1541 1530 err_kfree: 1542 1531 kfree(dev); 1543 1532 err: 1544 - dev_dbg(dev->dev, "failed=%d\n", ret); 1533 + dev_dbg(&intf->dev, "failed=%d\n", ret); 1545 1534 return ret; 1546 1535 } 1547 1536
+1
drivers/memory/fsl_ifc.c
··· 22 22 #include <linux/module.h> 23 23 #include <linux/kernel.h> 24 24 #include <linux/compiler.h> 25 + #include <linux/sched.h> 25 26 #include <linux/spinlock.h> 26 27 #include <linux/types.h> 27 28 #include <linux/slab.h>
+10 -2
drivers/mtd/ofpart.c
··· 46 46 47 47 ofpart_node = of_get_child_by_name(mtd_node, "partitions"); 48 48 if (!ofpart_node) { 49 - pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", 50 - master->name, mtd_node->full_name); 49 + /* 50 + * We might get here even when ofpart isn't used at all (e.g., 51 + * when using another parser), so don't be louder than 52 + * KERN_DEBUG 53 + */ 54 + pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", 55 + master->name, mtd_node->full_name); 51 56 ofpart_node = mtd_node; 52 57 dedicated = false; 58 + } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) { 59 + /* The 'partitions' subnode might be used by another parser */ 60 + return 0; 53 61 } 54 62 55 63 /* First count the subnodes */
+1 -1
drivers/mtd/ubi/debug.c
··· 236 236 237 237 dfs_rootdir = debugfs_create_dir("ubi", NULL); 238 238 if (IS_ERR_OR_NULL(dfs_rootdir)) { 239 - int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); 239 + int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV; 240 240 241 241 pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n", 242 242 err);
+1 -1
drivers/mtd/ubi/io.c
··· 1299 1299 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 1300 1300 goto exit; 1301 1301 1302 - crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); 1302 + crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); 1303 1303 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); 1304 1304 if (hdr_crc != crc) { 1305 1305 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+29 -24
drivers/mtd/ubi/wl.c
··· 603 603 return 0; 604 604 } 605 605 606 + static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk); 606 607 /** 607 608 * do_sync_erase - run the erase worker synchronously. 608 609 * @ubi: UBI device description object ··· 616 615 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 617 616 int vol_id, int lnum, int torture) 618 617 { 619 - struct ubi_work *wl_wrk; 618 + struct ubi_work wl_wrk; 620 619 621 620 dbg_wl("sync erase of PEB %i", e->pnum); 622 621 623 - wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 624 - if (!wl_wrk) 625 - return -ENOMEM; 622 + wl_wrk.e = e; 623 + wl_wrk.vol_id = vol_id; 624 + wl_wrk.lnum = lnum; 625 + wl_wrk.torture = torture; 626 626 627 - wl_wrk->e = e; 628 - wl_wrk->vol_id = vol_id; 629 - wl_wrk->lnum = lnum; 630 - wl_wrk->torture = torture; 631 - 632 - return erase_worker(ubi, wl_wrk, 0); 627 + return __erase_worker(ubi, &wl_wrk); 633 628 } 634 629 635 630 /** ··· 1011 1014 } 1012 1015 1013 1016 /** 1014 - * erase_worker - physical eraseblock erase worker function. 1017 + * __erase_worker - physical eraseblock erase worker function. 1015 1018 * @ubi: UBI device description object 1016 1019 * @wl_wrk: the work object 1017 1020 * @shutdown: non-zero if the worker has to free memory and exit ··· 1022 1025 * needed. Returns zero in case of success and a negative error code in case of 1023 1026 * failure. 1024 1027 */ 1025 - static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1026 - int shutdown) 1028 + static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) 1027 1029 { 1028 1030 struct ubi_wl_entry *e = wl_wrk->e; 1029 1031 int pnum = e->pnum; ··· 1030 1034 int lnum = wl_wrk->lnum; 1031 1035 int err, available_consumed = 0; 1032 1036 1033 - if (shutdown) { 1034 - dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1035 - kfree(wl_wrk); 1036 - wl_entry_destroy(ubi, e); 1037 - return 0; 1038 - } 1039 - 1040 1037 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1041 1038 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 1042 1039 1043 1040 err = sync_erase(ubi, e, wl_wrk->torture); 1044 1041 if (!err) { 1045 - /* Fine, we've erased it successfully */ 1046 - kfree(wl_wrk); 1047 - 1048 1042 spin_lock(&ubi->wl_lock); 1049 1043 wl_tree_add(e, &ubi->free); 1050 1044 ubi->free_count++; ··· 1052 1066 } 1053 1067 1054 1068 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err); 1055 - kfree(wl_wrk); 1056 1069 1057 1070 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1058 1071 err == -EBUSY) { ··· 1060 1075 /* Re-schedule the LEB for erasure */ 1061 1076 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1062 1077 if (err1) { 1078 + wl_entry_destroy(ubi, e); 1063 1079 err = err1; 1064 1080 goto out_ro; 1065 1081 } ··· 1134 1148 } 1135 1149 ubi_ro_mode(ubi); 1136 1150 return err; 1151 + } 1152 + 1153 + static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1154 + int shutdown) 1155 + { 1156 + int ret; 1157 + 1158 + if (shutdown) { 1159 + struct ubi_wl_entry *e = wl_wrk->e; 1160 + 1161 + dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec); 1162 + kfree(wl_wrk); 1163 + wl_entry_destroy(ubi, e); 1164 + return 0; 1165 + } 1166 + 1167 + ret = __erase_worker(ubi, wl_wrk); 1168 + kfree(wl_wrk); 1169 + return ret; 1137 1170 } 1138 1171 1139 1172 /**
+13 -9
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 3370 3370 return rc; 3371 3371 } 3372 3372 3373 - #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 3373 + /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */ 3374 + #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4 3375 + 3376 + /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ 3377 + #define BNX2X_NUM_TSO_WIN_SUB_BDS 3 3378 + 3379 + #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) 3374 3380 /* check if packet requires linearization (packet is too fragmented) 3375 3381 no need to check fragmentation if page size > 8K (there will be no 3376 3382 violation to FW restrictions) */ 3377 3383 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 3378 3384 u32 xmit_type) 3379 3385 { 3380 - int to_copy = 0; 3381 - int hlen = 0; 3382 - int first_bd_sz = 0; 3386 + int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS; 3387 + int to_copy = 0, hlen = 0; 3383 3388 3384 - /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ 3385 - if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { 3389 + if (xmit_type & XMIT_GSO_ENC) 3390 + num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS; 3386 3391 3392 + if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { 3387 3393 if (xmit_type & XMIT_GSO) { 3388 3394 unsigned short lso_mss = skb_shinfo(skb)->gso_size; 3389 - /* Check if LSO packet needs to be copied: 3390 - 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 3391 - int wnd_size = MAX_FETCH_BD - 3; 3395 + int wnd_size = MAX_FETCH_BD - num_tso_win_sub; 3392 3396 /* Number of windows to check */ 3393 3397 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 3394 3398 int wnd_idx = 0;
-2
drivers/net/ethernet/emulex/benet/be.h
··· 845 845 /* 846 846 * internal function to open-close roce device during ifup-ifdown. 847 847 */ 848 - void be_roce_dev_open(struct be_adapter *); 849 - void be_roce_dev_close(struct be_adapter *); 850 848 void be_roce_dev_shutdown(struct be_adapter *); 851 849 852 850 #endif /* BE_H */
+3 -5
drivers/net/ethernet/emulex/benet/be_main.c
··· 3297 3297 3298 3298 return 0; 3299 3299 err_msix: 3300 - for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) 3300 + for (i--; i >= 0; i--) { 3301 + eqo = &adapter->eq_obj[i]; 3301 3302 free_irq(be_msix_vec_get(adapter, eqo), eqo); 3303 + } 3302 3304 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", 3303 3305 status); 3304 3306 be_msix_disable(adapter); ··· 3431 3429 return 0; 3432 3430 3433 3431 be_disable_if_filters(adapter); 3434 - 3435 - be_roce_dev_close(adapter); 3436 3432 3437 3433 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 3438 3434 for_all_evt_queues(adapter, eqo, i) { ··· 3599 3599 be_link_status_update(adapter, link_status); 3600 3600 3601 3601 netif_tx_start_all_queues(netdev); 3602 - be_roce_dev_open(adapter); 3603 - 3604 3602 #ifdef CONFIG_BE2NET_VXLAN 3605 3603 if (skyhawk_chip(adapter)) 3606 3604 vxlan_get_rx_port(netdev);
-36
drivers/net/ethernet/emulex/benet/be_roce.c
··· 116 116 } 117 117 } 118 118 119 - static void _be_roce_dev_open(struct be_adapter *adapter) 120 - { 121 - if (ocrdma_drv && adapter->ocrdma_dev && 122 - ocrdma_drv->state_change_handler) 123 - ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 124 - BE_DEV_UP); 125 - } 126 - 127 - void be_roce_dev_open(struct be_adapter *adapter) 128 - { 129 - if (be_roce_supported(adapter)) { 130 - mutex_lock(&be_adapter_list_lock); 131 - _be_roce_dev_open(adapter); 132 - mutex_unlock(&be_adapter_list_lock); 133 - } 134 - } 135 - 136 - static void _be_roce_dev_close(struct be_adapter *adapter) 137 - { 138 - if (ocrdma_drv && adapter->ocrdma_dev && 139 - ocrdma_drv->state_change_handler) 140 - ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 141 - BE_DEV_DOWN); 142 - } 143 - 144 - void be_roce_dev_close(struct be_adapter *adapter) 145 - { 146 - if (be_roce_supported(adapter)) { 147 - mutex_lock(&be_adapter_list_lock); 148 - _be_roce_dev_close(adapter); 149 - mutex_unlock(&be_adapter_list_lock); 150 - } 151 - } 152 - 153 119 void be_roce_dev_shutdown(struct be_adapter *adapter) 154 120 { 155 121 if (be_roce_supported(adapter)) { ··· 143 177 144 178 _be_roce_dev_add(dev); 145 179 netdev = dev->netdev; 146 - if (netif_running(netdev) && netif_oper_up(netdev)) 147 - _be_roce_dev_open(dev); 148 180 } 149 181 mutex_unlock(&be_adapter_list_lock); 150 182 return 0;
+1 -3
drivers/net/ethernet/emulex/benet/be_roce.h
··· 60 60 void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); 61 61 }; 62 62 63 - enum { 64 - BE_DEV_UP = 0, 65 - BE_DEV_DOWN = 1, 63 + enum be_roce_event { 66 64 BE_DEV_SHUTDOWN = 2 67 65 }; 68 66
+7
drivers/net/ethernet/mellanox/mlx4/en_clock.c
··· 242 242 unsigned long flags; 243 243 u64 ns, zero = 0; 244 244 245 + /* mlx4_en_init_timestamp is called for each netdev. 246 + * mdev->ptp_clock is common for all ports, skip initialization if 247 + * was done for other port. 248 + */ 249 + if (mdev->ptp_clock) 250 + return; 251 + 245 252 rwlock_init(&mdev->clock_lock); 246 253 247 254 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
-7
drivers/net/ethernet/mellanox/mlx4/en_main.c
··· 232 232 if (mdev->pndev[i]) 233 233 mlx4_en_destroy_netdev(mdev->pndev[i]); 234 234 235 - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 236 - mlx4_en_remove_timestamp(mdev); 237 - 238 235 flush_workqueue(mdev->workqueue); 239 236 destroy_workqueue(mdev->workqueue); 240 237 (void) mlx4_mr_free(dev, &mdev->mr); ··· 316 319 mdev->port_cnt = 0; 317 320 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 318 321 mdev->port_cnt++; 319 - 320 - /* Initialize time stamp mechanism */ 321 - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 322 - mlx4_en_init_timestamp(mdev); 323 322 324 323 /* Set default number of RX rings*/ 325 324 mlx4_en_set_num_rx_rings(mdev);
+8 -2
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 2035 2035 /* flush any pending task for this netdev */ 2036 2036 flush_workqueue(mdev->workqueue); 2037 2037 2038 + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2039 + mlx4_en_remove_timestamp(mdev); 2040 + 2038 2041 /* Detach the netdev so tasks would not attempt to access it */ 2039 2042 mutex_lock(&mdev->state_lock); 2040 2043 mdev->pndev[priv->port] = NULL; ··· 3021 3018 } 3022 3019 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3023 3020 3021 + /* Initialize time stamp mechanism */ 3024 3022 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3025 - queue_delayed_work(mdev->workqueue, &priv->service_task, 3026 - SERVICE_TASK_DELAY); 3023 + mlx4_en_init_timestamp(mdev); 3024 + 3025 + queue_delayed_work(mdev->workqueue, &priv->service_task, 3026 + SERVICE_TASK_DELAY); 3027 3027 3028 3028 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3029 3029 mdev->profile.prof[priv->port].rx_ppp,
+12
drivers/net/ethernet/natsemi/natsemi.c
··· 1937 1937 break; /* Better luck next round. */ 1938 1938 np->rx_dma[entry] = pci_map_single(np->pci_dev, 1939 1939 skb->data, buflen, PCI_DMA_FROMDEVICE); 1940 + if (pci_dma_mapping_error(np->pci_dev, 1941 + np->rx_dma[entry])) { 1942 + dev_kfree_skb_any(skb); 1943 + np->rx_skbuff[entry] = NULL; 1944 + break; /* Better luck next round. */ 1945 + } 1940 1946 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); 1941 1947 } 1942 1948 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); ··· 2099 2093 np->tx_skbuff[entry] = skb; 2100 2094 np->tx_dma[entry] = pci_map_single(np->pci_dev, 2101 2095 skb->data,skb->len, PCI_DMA_TODEVICE); 2096 + if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) { 2097 + np->tx_skbuff[entry] = NULL; 2098 + dev_kfree_skb_irq(skb); 2099 + dev->stats.tx_dropped++; 2100 + return NETDEV_TX_OK; 2101 + } 2102 2102 2103 2103 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); 2104 2104
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
··· 252 252 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 253 253 } 254 254 255 - if (!idc->vnic_wait_limit) { 255 + if (state != QLCNIC_DEV_NPAR_OPER) { 256 256 dev_err(&adapter->pdev->dev, 257 257 "vNIC mode not operational, state check timed out.\n"); 258 258 return -EIO;
+14 -11
drivers/net/ethernet/renesas/sh_eth.c
··· 1142 1142 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1143 1143 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; 1144 1144 dma_addr_t dma_addr; 1145 + u32 buf_len; 1145 1146 1146 1147 mdp->cur_rx = 0; 1147 1148 mdp->cur_tx = 0; ··· 1163 1162 /* RX descriptor */ 1164 1163 rxdesc = &mdp->rx_ring[i]; 1165 1164 /* The size of the buffer is a multiple of 32 bytes. */ 1166 - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); 1167 - dma_addr = dma_map_single(&ndev->dev, skb->data, 1168 - rxdesc->buffer_length, 1165 + buf_len = ALIGN(mdp->rx_buf_sz, 32); 1166 + rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); 1167 + dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, 1169 1168 DMA_FROM_DEVICE); 1170 1169 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1171 1170 kfree_skb(skb); ··· 1196 1195 mdp->tx_skbuff[i] = NULL; 1197 1196 txdesc = &mdp->tx_ring[i]; 1198 1197 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1199 - txdesc->buffer_length = 0; 1198 + txdesc->len = cpu_to_edmac(mdp, 0); 1200 1199 if (i == 0) { 1201 1200 /* Tx descriptor address set */ 1202 1201 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); ··· 1405 1404 if (mdp->tx_skbuff[entry]) { 1406 1405 dma_unmap_single(&ndev->dev, 1407 1406 edmac_to_cpu(mdp, txdesc->addr), 1408 - txdesc->buffer_length, DMA_TO_DEVICE); 1407 + edmac_to_cpu(mdp, txdesc->len) >> 16, 1408 + DMA_TO_DEVICE); 1409 1409 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1410 1410 mdp->tx_skbuff[entry] = NULL; 1411 1411 free_num++; ··· 1416 1414 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1417 1415 1418 1416 ndev->stats.tx_packets++; 1419 - ndev->stats.tx_bytes += txdesc->buffer_length; 1417 + ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16; 1420 1418 } 1421 1419 return free_num; 1422 1420 } ··· 1435 1433 u32 desc_status; 1436 1434 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; 1437 1435 dma_addr_t dma_addr; 1436 + u32 buf_len; 1438 1437 1439 1438 boguscnt = min(boguscnt, *quota); 1440 1439 limit = boguscnt; ··· 1444 1441 /* RACT bit must be checked before all the following reads */ 1445 1442 dma_rmb(); 1446 1443 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1447 - pkt_len = rxdesc->frame_length; 1444 + pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL; 1448 1445 1449 1446 if (--boguscnt < 0) 1450 1447 break; ··· 1510 1507 entry = mdp->dirty_rx % mdp->num_rx_ring; 1511 1508 rxdesc = &mdp->rx_ring[entry]; 1512 1509 /* The size of the buffer is 32 byte boundary. */ 1513 - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); 1510 + buf_len = ALIGN(mdp->rx_buf_sz, 32); 1511 + rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); 1514 1512 1515 1513 if (mdp->rx_skbuff[entry] == NULL) { 1516 1514 skb = netdev_alloc_skb(ndev, skbuff_size); ··· 1519 1515 break; /* Better luck next round. */ 1520 1516 sh_eth_set_receive_align(skb); 1521 1517 dma_addr = dma_map_single(&ndev->dev, skb->data, 1522 - rxdesc->buffer_length, 1523 - DMA_FROM_DEVICE); 1518 + buf_len, DMA_FROM_DEVICE); 1524 1519 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1525 1520 kfree_skb(skb); 1526 1521 break; ··· 2385 2382 return NETDEV_TX_OK; 2386 2383 } 2387 2384 txdesc->addr = cpu_to_edmac(mdp, dma_addr); 2388 - txdesc->buffer_length = skb->len; 2385 + txdesc->len = cpu_to_edmac(mdp, skb->len << 16); 2389 2386 2390 2387 dma_wmb(); /* TACT bit must be set after all the above writes */ 2391 2388 if (entry >= mdp->num_tx_ring - 1)
+16 -17
drivers/net/ethernet/renesas/sh_eth.h
··· 283 283 DMAC_M_RINT1 = 0x00000001, 284 284 }; 285 285 286 - /* Receive descriptor bit */ 286 + /* Receive descriptor 0 bits */ 287 287 enum RD_STS_BIT { 288 288 RD_RACT = 0x80000000, RD_RDLE = 0x40000000, 289 289 RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, ··· 298 298 #define RDFEND RD_RFP0 299 299 #define RD_RFP (RD_RFP1|RD_RFP0) 300 300 301 + /* Receive descriptor 1 bits */ 302 + enum RD_LEN_BIT { 303 + RD_RFL = 0x0000ffff, /* receive frame length */ 304 + RD_RBL = 0xffff0000, /* receive buffer length */ 305 + }; 306 + 301 307 /* FCFTR */ 302 308 enum FCFTR_BIT { 303 309 FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, ··· 313 307 #define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) 314 308 #define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) 315 309 316 - /* Transmit descriptor bit */ 310 + /* Transmit descriptor 0 bits */ 317 311 enum TD_STS_BIT { 318 312 TD_TACT = 0x80000000, TD_TDLE = 0x40000000, 319 313 TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, ··· 322 316 #define TDF1ST TD_TFP1 323 317 #define TDFEND TD_TFP0 324 318 #define TD_TFP (TD_TFP1|TD_TFP0) 319 + 320 + /* Transmit descriptor 1 bits */ 321 + enum TD_LEN_BIT { 322 + TD_TBL = 0xffff0000, /* transmit buffer length */ 323 + }; 325 324 326 325 /* RMCR */ 327 326 enum RMCR_BIT { ··· 436 425 */ 437 426 struct sh_eth_txdesc { 438 427 u32 status; /* TD0 */ 439 - #if defined(__LITTLE_ENDIAN) 440 - u16 pad0; /* TD1 */ 441 - u16 buffer_length; /* TD1 */ 442 - #else 443 - u16 buffer_length; /* TD1 */ 444 - u16 pad0; /* TD1 */ 445 - #endif 428 + u32 len; /* TD1 */ 446 429 u32 addr; /* TD2 */ 447 - u32 pad1; /* padding data */ 430 + u32 pad0; /* padding data */ 448 431 } __aligned(2) __packed; 449 432 450 433 /* The sh ether Rx buffer descriptors. ··· 446 441 */ 447 442 struct sh_eth_rxdesc { 448 443 u32 status; /* RD0 */ 449 - #if defined(__LITTLE_ENDIAN) 450 - u16 frame_length; /* RD1 */ 451 - u16 buffer_length; /* RD1 */ 452 - #else 453 - u16 buffer_length; /* RD1 */ 454 - u16 frame_length; /* RD1 */ 455 - #endif 444 + u32 len; /* RD1 */ 456 445 u32 addr; /* RD2 */ 457 446 u32 pad0; /* padding data */ 458 447 } __aligned(2) __packed;
+38 -25
drivers/net/ethernet/ti/cpsw.c
··· 2026 2026 for_each_child_of_node(node, slave_node) { 2027 2027 struct cpsw_slave_data *slave_data = data->slave_data + i; 2028 2028 const void *mac_addr = NULL; 2029 - u32 phyid; 2030 2029 int lenp; 2031 2030 const __be32 *parp; 2032 - struct device_node *mdio_node; 2033 - struct platform_device *mdio; 2034 2031 2035 2032 /* This is no slave child node, continue */ 2036 2033 if (strcmp(slave_node->name, "slave")) 2037 2034 continue; 2038 2035 2039 2036 priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); 2037 + parp = of_get_property(slave_node, "phy_id", &lenp); 2040 2038 if (of_phy_is_fixed_link(slave_node)) { 2041 - struct phy_device *pd; 2039 + struct device_node *phy_node; 2040 + struct phy_device *phy_dev; 2042 2041 2042 + /* In the case of a fixed PHY, the DT node associated 2043 + * to the PHY is the Ethernet MAC DT node. 2044 + */ 2043 2045 ret = of_phy_register_fixed_link(slave_node); 2044 2046 if (ret) 2045 2047 return ret; 2046 - pd = of_phy_find_device(slave_node); 2047 - if (!pd) 2048 + phy_node = of_node_get(slave_node); 2049 + phy_dev = of_phy_find_device(phy_node); 2050 + if (!phy_dev) 2048 2051 return -ENODEV; 2049 2052 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2050 - PHY_ID_FMT, pd->bus->id, pd->phy_id); 2053 + PHY_ID_FMT, phy_dev->bus->id, phy_dev->addr); 2054 + } else if (parp) { 2055 + u32 phyid; 2056 + struct device_node *mdio_node; 2057 + struct platform_device *mdio; 2058 + 2059 + if (lenp != (sizeof(__be32) * 2)) { 2060 + dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i); 2061 + goto no_phy_slave; 2062 + } 2063 + mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 2064 + phyid = be32_to_cpup(parp+1); 2065 + mdio = of_find_device_by_node(mdio_node); 2066 + of_node_put(mdio_node); 2067 + if (!mdio) { 2068 + dev_err(&pdev->dev, "Missing mdio platform device\n"); 2069 + return -EINVAL; 2070 + } 2071 + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2072 + PHY_ID_FMT, mdio->name, phyid); 2073 + } else { 2074 + dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i); 2051 2075 goto no_phy_slave; 2052 2076 } 2053 - parp = of_get_property(slave_node, "phy_id", &lenp); 2054 - if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 2055 - dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); 2056 - goto no_phy_slave; 2057 - } 2058 - mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 2059 - phyid = be32_to_cpup(parp+1); 2060 - mdio = of_find_device_by_node(mdio_node); 2061 - of_node_put(mdio_node); 2062 - if (!mdio) { 2063 - dev_err(&pdev->dev, "Missing mdio platform device\n"); 2064 - return -EINVAL; 2065 - } 2066 - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2067 - PHY_ID_FMT, mdio->name, phyid); 2068 2077 slave_data->phy_if = of_get_phy_mode(slave_node); 2069 2078 if (slave_data->phy_if < 0) { 2070 2079 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", ··· 2427 2418 ndev->irq = platform_get_irq(pdev, 1); 2428 2419 if (ndev->irq < 0) { 2429 2420 dev_err(priv->dev, "error getting irq resource\n"); 2430 - ret = -ENOENT; 2421 + ret = ndev->irq; 2431 2422 goto clean_ale_ret; 2432 2423 } 2433 2424 ··· 2448 2439 2449 2440 /* RX IRQ */ 2450 2441 irq = platform_get_irq(pdev, 1); 2451 - if (irq < 0) 2442 + if (irq < 0) { 2443 + ret = irq; 2452 2444 goto clean_ale_ret; 2445 + } 2453 2446 2454 2447 priv->irqs_table[0] = irq; 2455 2448 ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt, ··· 2463 2452 2464 2453 /* TX IRQ */ 2465 2454 irq = platform_get_irq(pdev, 2); 2466 - if (irq < 0) 2455 + if (irq < 0) { 2456 + ret = irq; 2467 2457 goto clean_ale_ret; 2458 + } 2468 2459 2469 2460 priv->irqs_table[1] = irq; 2470 2461 ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
+9 -1
drivers/net/geneve.c
··· 1223 1223 struct geneve_net *gn = net_generic(net, geneve_net_id); 1224 1224 struct geneve_dev *t, *geneve = netdev_priv(dev); 1225 1225 bool tun_collect_md, tun_on_same_port; 1226 - int err; 1226 + int err, encap_len; 1227 1227 1228 1228 if (!remote) 1229 1229 return -EINVAL; ··· 1255 1255 &tun_on_same_port, &tun_collect_md); 1256 1256 if (t) 1257 1257 return -EBUSY; 1258 + 1259 + /* make enough headroom for basic scenario */ 1260 + encap_len = GENEVE_BASE_HLEN + ETH_HLEN; 1261 + if (remote->sa.sa_family == AF_INET) 1262 + encap_len += sizeof(struct iphdr); 1263 + else 1264 + encap_len += sizeof(struct ipv6hdr); 1265 + dev->needed_headroom = encap_len + ETH_HLEN; 1258 1266 1259 1267 if (metadata) { 1260 1268 if (tun_on_same_port)
+4 -4
drivers/net/hamradio/6pack.c
··· 683 683 if (!atomic_dec_and_test(&sp->refcnt)) 684 684 down(&sp->dead_sem); 685 685 686 - unregister_netdev(sp->dev); 687 - 688 - del_timer(&sp->tx_t); 689 - del_timer(&sp->resync_t); 686 + del_timer_sync(&sp->tx_t); 687 + del_timer_sync(&sp->resync_t); 690 688 691 689 /* Free all 6pack frame buffers. */ 692 690 kfree(sp->rbuff); 693 691 kfree(sp->xbuff); 692 + 693 + unregister_netdev(sp->dev); 694 694 } 695 695 696 696 /* Perform I/O control on an active 6pack channel. */
+2 -2
drivers/net/hamradio/mkiss.c
··· 798 798 if (!atomic_dec_and_test(&ax->refcnt)) 799 799 down(&ax->dead_sem); 800 800 801 - unregister_netdev(ax->dev); 802 - 803 801 /* Free all AX25 frame buffers. */ 804 802 kfree(ax->rbuff); 805 803 kfree(ax->xbuff); 806 804 807 805 ax->tty = NULL; 806 + 807 + unregister_netdev(ax->dev); 808 808 } 809 809 810 810 /* Perform I/O control on an active ax25 channel. */
+1 -1
drivers/net/usb/cdc_mbim.c
··· 100 100 .ndo_stop = usbnet_stop, 101 101 .ndo_start_xmit = usbnet_start_xmit, 102 102 .ndo_tx_timeout = usbnet_tx_timeout, 103 - .ndo_change_mtu = usbnet_change_mtu, 103 + .ndo_change_mtu = cdc_ncm_change_mtu, 104 104 .ndo_set_mac_address = eth_mac_addr, 105 105 .ndo_validate_addr = eth_validate_addr, 106 106 .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
+49
drivers/net/usb/cdc_ncm.c
··· 41 41 #include <linux/module.h> 42 42 #include <linux/netdevice.h> 43 43 #include <linux/ctype.h> 44 + #include <linux/etherdevice.h> 44 45 #include <linux/ethtool.h> 45 46 #include <linux/workqueue.h> 46 47 #include <linux/mii.h> ··· 733 732 kfree(ctx); 734 733 } 735 734 735 + /* we need to override the usbnet change_mtu ndo for two reasons: 736 + * - respect the negotiated maximum datagram size 737 + * - avoid unwanted changes to rx and tx buffers 738 + */ 739 + int cdc_ncm_change_mtu(struct net_device *net, int new_mtu) 740 + { 741 + struct usbnet *dev = netdev_priv(net); 742 + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; 743 + int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev); 744 + 745 + if (new_mtu <= 0 || new_mtu > maxmtu) 746 + return -EINVAL; 747 + net->mtu = new_mtu; 748 + return 0; 749 + } 750 + EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu); 751 + 752 + static const struct net_device_ops cdc_ncm_netdev_ops = { 753 + .ndo_open = usbnet_open, 754 + .ndo_stop = usbnet_stop, 755 + .ndo_start_xmit = usbnet_start_xmit, 756 + .ndo_tx_timeout = usbnet_tx_timeout, 757 + .ndo_change_mtu = cdc_ncm_change_mtu, 758 + .ndo_set_mac_address = eth_mac_addr, 759 + .ndo_validate_addr = eth_validate_addr, 760 + }; 761 + 736 762 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) 737 763 { 738 764 struct cdc_ncm_ctx *ctx; ··· 893 865 894 866 /* add our sysfs attrs */ 895 867 dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group; 868 + 869 + /* must handle MTU changes */ 870 + dev->net->netdev_ops = &cdc_ncm_netdev_ops; 896 871 897 872 return 0; 898 873 ··· 1630 1599 .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, 1631 1600 .bInterfaceProtocol = USB_CDC_PROTO_NONE, 1632 1601 .driver_info = (unsigned long) &wwan_info, 1602 + }, 1603 + 1604 + /* DW5812 LTE Verizon Mobile Broadband Card 1605 + * Unlike DW5550 this device requires FLAG_NOARP 1606 + */ 1607 + { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bb, 1608 + USB_CLASS_COMM, 1609 + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1610 + .driver_info = (unsigned long)&wwan_noarp_info, 1611 + }, 1612 + 1613 + /* DW5813 LTE AT&T Mobile Broadband Card 1614 + * Unlike DW5550 this device requires FLAG_NOARP 1615 + */ 1616 + { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bc, 1617 + USB_CLASS_COMM, 1618 + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1619 + .driver_info = (unsigned long)&wwan_noarp_info, 1633 1620 }, 1634 1621 1635 1622 /* Dell branded MBM devices like DW5550 */
-6
drivers/net/veth.c
··· 117 117 kfree_skb(skb); 118 118 goto drop; 119 119 } 120 - /* don't change ip_summed == CHECKSUM_PARTIAL, as that 121 - * will cause bad checksum on forwarded packets 122 - */ 123 - if (skb->ip_summed == CHECKSUM_NONE && 124 - rcv->features & NETIF_F_RXCSUM) 125 - skb->ip_summed = CHECKSUM_UNNECESSARY; 126 120 127 121 if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { 128 122 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
+35 -14
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
··· 71 71 #include "iwl-agn-hw.h" 72 72 73 73 /* Highest firmware API version supported */ 74 - #define IWL7260_UCODE_API_MAX 19 74 + #define IWL7260_UCODE_API_MAX 17 75 + #define IWL7265_UCODE_API_MAX 19 76 + #define IWL7265D_UCODE_API_MAX 19 75 77 76 78 /* Oldest version we won't warn about */ 77 79 #define IWL7260_UCODE_API_OK 13 80 + #define IWL7265_UCODE_API_OK 13 81 + #define IWL7265D_UCODE_API_OK 13 78 82 79 83 /* Lowest firmware API version supported */ 80 84 #define IWL7260_UCODE_API_MIN 13 85 + #define IWL7265_UCODE_API_MIN 13 86 + #define IWL7265D_UCODE_API_MIN 13 81 87 82 88 /* NVM versions */ 83 89 #define IWL7260_NVM_VERSION 0x0a1d ··· 157 151 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 158 152 }; 159 153 160 - #define IWL_DEVICE_7000 \ 161 - .ucode_api_max = IWL7260_UCODE_API_MAX, \ 162 - .ucode_api_ok = IWL7260_UCODE_API_OK, \ 163 - .ucode_api_min = IWL7260_UCODE_API_MIN, \ 154 + #define IWL_DEVICE_7000_COMMON \ 164 155 .device_family = IWL_DEVICE_FAMILY_7000, \ 165 156 .max_inst_size = IWL60_RTC_INST_SIZE, \ 166 157 .max_data_size = IWL60_RTC_DATA_SIZE, \ ··· 167 164 .non_shared_ant = ANT_A, \ 168 165 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ 169 166 .dccm_offset = IWL7000_DCCM_OFFSET 167 + 168 + #define IWL_DEVICE_7000 \ 169 + IWL_DEVICE_7000_COMMON, \ 170 + .ucode_api_max = IWL7260_UCODE_API_MAX, \ 171 + .ucode_api_ok = IWL7260_UCODE_API_OK, \ 172 + .ucode_api_min = IWL7260_UCODE_API_MIN 173 + 174 + #define IWL_DEVICE_7005 \ 175 + IWL_DEVICE_7000_COMMON, \ 176 + .ucode_api_max = IWL7265_UCODE_API_MAX, \ 177 + .ucode_api_ok = IWL7265_UCODE_API_OK, \ 178 + .ucode_api_min = IWL7265_UCODE_API_MIN 179 + 180 + #define IWL_DEVICE_7005D \ 181 + IWL_DEVICE_7000_COMMON, \ 182 + .ucode_api_max = IWL7265D_UCODE_API_MAX, \ 183 + .ucode_api_ok = IWL7265D_UCODE_API_OK, \ 184 + .ucode_api_min = IWL7265D_UCODE_API_MIN 170 185 171 186 const struct iwl_cfg iwl7260_2ac_cfg = { 172 187 .name = "Intel(R) Dual Band Wireless AC 7260", ··· 289 268 const struct iwl_cfg iwl3165_2ac_cfg = { 290 269 .name = "Intel(R) Dual Band Wireless AC 3165", 291 270 .fw_name_pre = IWL7265D_FW_PRE, 292 - IWL_DEVICE_7000, 271 + IWL_DEVICE_7005D, 293 272 .ht_params = &iwl7000_ht_params, 294 273 .nvm_ver = IWL3165_NVM_VERSION, 295 274 .nvm_calib_ver = IWL3165_TX_POWER_VERSION, ··· 311 290 const struct iwl_cfg iwl7265_2ac_cfg = { 312 291 .name = "Intel(R) Dual Band Wireless AC 7265", 313 292 .fw_name_pre = IWL7265_FW_PRE, 314 - IWL_DEVICE_7000, 293 + IWL_DEVICE_7005, 315 294 .ht_params = &iwl7265_ht_params, 316 295 .nvm_ver = IWL7265_NVM_VERSION, 317 296 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, ··· 322 301 const struct iwl_cfg iwl7265_2n_cfg = { 323 302 .name = "Intel(R) Dual Band Wireless N 7265", 324 303 .fw_name_pre = IWL7265_FW_PRE, 325 - IWL_DEVICE_7000, 304 + IWL_DEVICE_7005, 326 305 .ht_params = &iwl7265_ht_params, 327 306 .nvm_ver = IWL7265_NVM_VERSION, 328 307 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, ··· 333 312 const struct iwl_cfg iwl7265_n_cfg = { 334 313 .name = "Intel(R) Wireless N 7265", 335 314 .fw_name_pre = IWL7265_FW_PRE, 336 - IWL_DEVICE_7000, 315 + IWL_DEVICE_7005, 337 316 .ht_params = &iwl7265_ht_params, 338 317 .nvm_ver = IWL7265_NVM_VERSION, 339 318 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, ··· 344 323 const struct iwl_cfg iwl7265d_2ac_cfg = { 345 324 .name = "Intel(R) Dual Band Wireless AC 7265", 346 325 .fw_name_pre = IWL7265D_FW_PRE, 347 - IWL_DEVICE_7000, 326 + IWL_DEVICE_7005D, 348 327 .ht_params = &iwl7265_ht_params, 349 328 .nvm_ver = IWL7265D_NVM_VERSION, 350 329 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, ··· 355 334 const struct iwl_cfg iwl7265d_2n_cfg = { 356 335 .name = "Intel(R) Dual Band Wireless N 7265", 357 336 .fw_name_pre = IWL7265D_FW_PRE, 358 - IWL_DEVICE_7000, 337 + IWL_DEVICE_7005D, 359 338 .ht_params = &iwl7265_ht_params, 360 339 .nvm_ver = IWL7265D_NVM_VERSION, 361 340 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, ··· 366 345 const struct iwl_cfg iwl7265d_n_cfg = { 367 346 .name = "Intel(R) Wireless N 7265", 368 347 .fw_name_pre = IWL7265D_FW_PRE, 369 - IWL_DEVICE_7000, 348 + IWL_DEVICE_7005D, 370 349 .ht_params = &iwl7265_ht_params, 371 350 .nvm_ver = IWL7265D_NVM_VERSION, 372 351 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, ··· 376 355 377 356 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 378 357 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 379 - MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 380 - MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 358 + MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); 359 + MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
+9 -6
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 1222 1222 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 1223 1223 u8 sta_id = mvmvif->ap_sta_id; 1224 1224 1225 - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1226 - lockdep_is_held(&mvm->mutex)); 1225 + sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 1226 + lockdep_is_held(&mvm->mutex)); 1227 1227 /* 1228 1228 * It is possible that the 'sta' parameter is NULL, 1229 1229 * for example when a GTK is removed - the sta_id will then ··· 1590 1590 u16 *phase1key) 1591 1591 { 1592 1592 struct iwl_mvm_sta *mvm_sta; 1593 - u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); 1593 + u8 sta_id; 1594 1594 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1595 1595 1596 - if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) 1597 - return; 1598 - 1599 1596 rcu_read_lock(); 1597 + 1598 + sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); 1599 + if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) 1600 + goto unlock; 1600 1601 1601 1602 if (!sta) { 1602 1603 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); ··· 1610 1609 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1611 1610 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1612 1611 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); 1612 + 1613 + unlock: 1613 1614 rcu_read_unlock(); 1614 1615 } 1615 1616
+15 -19
drivers/net/xen-netback/netback.c
··· 258 258 struct netrx_pending_operations *npo) 259 259 { 260 260 struct xenvif_rx_meta *meta; 261 - struct xen_netif_rx_request *req; 261 + struct xen_netif_rx_request req; 262 262 263 - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 263 + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); 264 264 265 265 meta = npo->meta + npo->meta_prod++; 266 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 267 267 meta->gso_size = 0; 268 268 meta->size = 0; 269 - meta->id = req->id; 269 + meta->id = req.id; 270 270 271 271 npo->copy_off = 0; 272 - npo->copy_gref = req->gref; 272 + npo->copy_gref = req.gref; 273 273 274 274 return meta; 275 275 } ··· 424 424 struct xenvif *vif = netdev_priv(skb->dev); 425 425 int nr_frags = skb_shinfo(skb)->nr_frags; 426 426 int i; 427 - struct xen_netif_rx_request *req; 427 + struct xen_netif_rx_request req; 428 428 struct xenvif_rx_meta *meta; 429 429 unsigned char *data; 430 430 int head = 1; ··· 443 443 444 444 /* Set up a GSO prefix descriptor, if necessary */ 445 445 if ((1 << gso_type) & vif->gso_prefix_mask) { 446 - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 446 + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); 447 447 meta = npo->meta + npo->meta_prod++; 448 448 meta->gso_type = gso_type; 449 449 meta->gso_size = skb_shinfo(skb)->gso_size; 450 450 meta->size = 0; 451 - meta->id = req->id; 451 + meta->id = req.id; 452 452 } 453 453 454 - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 454 + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); 455 455 meta = npo->meta + npo->meta_prod++; 456 456 457 457 if ((1 << gso_type) & vif->gso_mask) { ··· 463 463 } 464 464 465 465 meta->size = 0; 466 - meta->id = req->id; 466 + meta->id = req.id; 467 467 npo->copy_off = 0; 468 - npo->copy_gref = req->gref; 468 + npo->copy_gref = req.gref; 469 469 470 470 data = skb->data; 471 471 while (data < skb_tail_pointer(skb)) { ··· 679 679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 680 680 * Otherwise the interface can seize up due to insufficient credit. 681 681 */ 682 - max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 683 - max_burst = min(max_burst, 131072UL); 684 - max_burst = max(max_burst, queue->credit_bytes); 682 + max_burst = max(131072UL, queue->credit_bytes); 685 683 686 684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 687 685 max_credit = queue->remaining_credit + queue->credit_bytes; ··· 709 711 spin_unlock_irqrestore(&queue->response_lock, flags); 710 712 if (cons == end) 711 713 break; 712 - txp = RING_GET_REQUEST(&queue->tx, cons++); 714 + RING_COPY_REQUEST(&queue->tx, cons++, txp); 713 715 } while (1); 714 716 queue->tx.req_cons = cons; 715 717 } ··· 776 778 if (drop_err) 777 779 txp = &dropped_tx; 778 780 779 - memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 780 - sizeof(*txp)); 781 + RING_COPY_REQUEST(&queue->tx, cons + slots, txp); 781 782 782 783 /* If the guest submitted a frame >= 64 KiB then 783 784 * first->size overflowed and following slots will ··· 1109 1112 return -EBADR; 1110 1113 } 1111 1114 1112 - memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1113 - sizeof(extra)); 1115 + RING_COPY_REQUEST(&queue->tx, cons, &extra); 1114 1116 if (unlikely(!extra.type || 1115 1117 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1116 1118 queue->tx.req_cons = ++cons; ··· 1318 1322 1319 1323 idx = queue->tx.req_cons; 1320 1324 rmb(); /* Ensure that we see the request before we copy it. */ 1321 - memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1325 + RING_COPY_REQUEST(&queue->tx, idx, &txreq); 1322 1326 1323 1327 /* Credit-based scheduling. */ 1324 1328 if (txreq.size > queue->remaining_credit &&
+19 -1
drivers/nvme/host/pci.c
··· 2540 2540 { 2541 2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); 2542 2542 2543 - if (kill) 2543 + if (kill) { 2544 2544 blk_set_queue_dying(ns->queue); 2545 + 2546 + /* 2547 + * The controller was shutdown first if we got here through 2548 + * device removal. The shutdown may requeue outstanding 2549 + * requests. These need to be aborted immediately so 2550 + * del_gendisk doesn't block indefinitely for their completion. 2551 + */ 2552 + blk_mq_abort_requeue_list(ns->queue); 2553 + } 2545 2554 if (ns->disk->flags & GENHD_FL_UP) 2546 2555 del_gendisk(ns->disk); 2547 2556 if (kill || !blk_queue_dying(ns->queue)) { ··· 2986 2977 { 2987 2978 struct nvme_ns *ns, *next; 2988 2979 2980 + if (nvme_io_incapable(dev)) { 2981 + /* 2982 + * If the device is not capable of IO (surprise hot-removal, 2983 + * for example), we need to quiesce prior to deleting the 2984 + * namespaces. This will end outstanding requests and prevent 2985 + * attempts to sync dirty data. 2986 + */ 2987 + nvme_dev_shutdown(dev); 2988 + } 2989 2989 list_for_each_entry_safe(ns, next, &dev->namespaces, list) 2990 2990 nvme_ns_remove(ns); 2991 2991 }
+3 -1
drivers/pci/host/pcie-hisi.c
··· 61 61 *val = *(u8 __force *) walker; 62 62 else if (size == 2) 63 63 *val = *(u16 __force *) walker; 64 - else if (size != 4) 64 + else if (size == 4) 65 + *val = reg_val; 66 + else 65 67 return PCIBIOS_BAD_REGISTER_NUMBER; 66 68 67 69 return PCIBIOS_SUCCESSFUL;
+1
drivers/phy/Kconfig
··· 233 233 tristate "Allwinner sun9i SoC USB PHY driver" 234 234 depends on ARCH_SUNXI && HAS_IOMEM && OF 235 235 depends on RESET_CONTROLLER 236 + depends on USB_COMMON 236 237 select GENERIC_PHY 237 238 help 238 239 Enable this to support the transceiver that is part of Allwinner
+12 -4
drivers/phy/phy-bcm-cygnus-pcie.c
··· 128 128 struct phy_provider *provider; 129 129 struct resource *res; 130 130 unsigned cnt = 0; 131 + int ret; 131 132 132 133 if (of_get_child_count(node) == 0) { 133 134 dev_err(dev, "PHY no child node\n"); ··· 155 154 if (of_property_read_u32(child, "reg", &id)) { 156 155 dev_err(dev, "missing reg property for %s\n", 157 156 child->name); 158 - return -EINVAL; 157 + ret = -EINVAL; 158 + goto put_child; 159 159 } 160 160 161 161 if (id >= MAX_NUM_PHYS) { 162 162 dev_err(dev, "invalid PHY id: %u\n", id); 163 - return -EINVAL; 163 + ret = -EINVAL; 164 + goto put_child; 164 165 } 165 166 166 167 if (core->phys[id].phy) { 167 168 dev_err(dev, "duplicated PHY id: %u\n", id); 168 - return -EINVAL; 169 + ret = -EINVAL; 170 + goto put_child; 169 171 } 170 172 171 173 p = &core->phys[id]; 172 174 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); 173 175 if (IS_ERR(p->phy)) { 174 176 dev_err(dev, "failed to create PHY\n"); 175 - return PTR_ERR(p->phy); 177 + ret = PTR_ERR(p->phy); 178 + goto put_child; 176 179 } 177 180 178 181 p->core = core; ··· 196 191 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); 197 192 198 193 return 0; 194 + put_child: 195 + of_node_put(child); 196 + return ret; 199 197 } 200 198 201 199 static const struct of_device_id cygnus_pcie_phy_match_table[] = {
+14 -6
drivers/phy/phy-berlin-sata.c
··· 195 195 struct phy_provider *phy_provider; 196 196 struct phy_berlin_priv *priv; 197 197 struct resource *res; 198 - int i = 0; 198 + int ret, i = 0; 199 199 u32 phy_id; 200 200 201 201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ··· 237 237 if (of_property_read_u32(child, "reg", &phy_id)) { 238 238 dev_err(dev, "missing reg property in node %s\n", 239 239 child->name); 240 - return -EINVAL; 240 + ret = -EINVAL; 241 + goto put_child; 241 242 } 242 243 243 244 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { 244 245 dev_err(dev, "invalid reg in node %s\n", child->name); 245 - return -EINVAL; 246 + ret = -EINVAL; 247 + goto put_child; 246 248 } 247 249 248 250 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); 249 - if (!phy_desc) 250 - return -ENOMEM; 251 + if (!phy_desc) { 252 + ret = -ENOMEM; 253 + goto put_child; 254 + } 251 255 252 256 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); 253 257 if (IS_ERR(phy)) { 254 258 dev_err(dev, "failed to create PHY %d\n", phy_id); 255 - return PTR_ERR(phy); 259 + ret = PTR_ERR(phy); 260 + goto put_child; 256 261 } 257 262 258 263 phy_desc->phy = phy; ··· 274 269 phy_provider = 275 270 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); 276 271 return PTR_ERR_OR_ZERO(phy_provider); 272 + put_child: 273 + of_node_put(child); 274 + return ret; 277 275 } 278 276 279 277 static const struct of_device_id phy_berlin_sata_of_match[] = {
+12 -5
drivers/phy/phy-brcmstb-sata.c
··· 140 140 struct brcm_sata_phy *priv; 141 141 struct resource *res; 142 142 struct phy_provider *provider; 143 - int count = 0; 143 + int ret, count = 0; 144 144 145 145 if (of_get_child_count(dn) == 0) 146 146 return -ENODEV; ··· 163 163 if (of_property_read_u32(child, "reg", &id)) { 164 164 dev_err(dev, "missing reg property in node %s\n", 165 165 child->name); 166 - return -EINVAL; 166 + ret = -EINVAL; 167 + goto put_child; 167 168 } 168 169 169 170 if (id >= MAX_PORTS) { 170 171 dev_err(dev, "invalid reg: %u\n", id); 171 - return -EINVAL; 172 + ret = -EINVAL; 173 + goto put_child; 172 174 } 173 175 if (priv->phys[id].phy) { 174 176 dev_err(dev, "already registered port %u\n", id); 175 - return -EINVAL; 177 + ret = -EINVAL; 178 + goto put_child; 176 179 } 177 180 178 181 port = &priv->phys[id]; ··· 185 182 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); 186 183 if (IS_ERR(port->phy)) { 187 184 dev_err(dev, "failed to create PHY\n"); 188 - return PTR_ERR(port->phy); 185 + ret = PTR_ERR(port->phy); 186 + goto put_child; 189 187 } 190 188 191 189 phy_set_drvdata(port->phy, port); ··· 202 198 dev_info(dev, "registered %d port(s)\n", count); 203 199 204 200 return 0; 201 + put_child: 202 + of_node_put(child); 203 + return ret; 205 204 } 206 205 207 206 static struct platform_driver brcm_sata_phy_driver = {
+15 -6
drivers/phy/phy-core.c
··· 636 636 * @np: node containing the phy 637 637 * @index: index of the phy 638 638 * 639 - * Gets the phy using _of_phy_get(), and associates a device with it using 640 - * devres. On driver detach, release function is invoked on the devres data, 639 + * Gets the phy using _of_phy_get(), then gets a refcount to it, 640 + * and associates a device with it using devres. On driver detach, 641 + * release function is invoked on the devres data, 641 642 * then, devres data is freed. 642 643 * 643 644 */ ··· 652 651 return ERR_PTR(-ENOMEM); 653 652 654 653 phy = _of_phy_get(np, index); 655 - if (!IS_ERR(phy)) { 656 - *ptr = phy; 657 - devres_add(dev, ptr); 658 - } else { 654 + if (IS_ERR(phy)) { 659 655 devres_free(ptr); 656 + return phy; 660 657 } 658 + 659 + if (!try_module_get(phy->ops->owner)) { 660 + devres_free(ptr); 661 + return ERR_PTR(-EPROBE_DEFER); 662 + } 663 + 664 + get_device(&phy->dev); 665 + 666 + *ptr = phy; 667 + devres_add(dev, ptr); 661 668 662 669 return phy; 663 670 }
+11 -5
drivers/phy/phy-miphy28lp.c
··· 1226 1226 1227 1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 1228 1228 GFP_KERNEL); 1229 - if (!miphy_phy) 1230 - return -ENOMEM; 1229 + if (!miphy_phy) { 1230 + ret = -ENOMEM; 1231 + goto put_child; 1232 + } 1231 1233 1232 1234 miphy_dev->phys[port] = miphy_phy; 1233 1235 1234 1236 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); 1235 1237 if (IS_ERR(phy)) { 1236 1238 dev_err(&pdev->dev, "failed to create PHY\n"); 1237 - return PTR_ERR(phy); 1239 + ret = PTR_ERR(phy); 1240 + goto put_child; 1238 1241 } 1239 1242 1240 1243 miphy_dev->phys[port]->phy = phy; ··· 1245 1242 1246 1243 ret = miphy28lp_of_probe(child, miphy_phy); 1247 1244 if (ret) 1248 - return ret; 1245 + goto put_child; 1249 1246 1250 1247 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); 1251 1248 if (ret) 1252 - return ret; 1249 + goto put_child; 1253 1250 1254 1251 phy_set_drvdata(phy, miphy_dev->phys[port]); 1255 1252 port++; ··· 1258 1255 1259 1256 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); 1260 1257 return PTR_ERR_OR_ZERO(provider); 1258 + put_child: 1259 + of_node_put(child); 1260 + return ret; 1261 1261 } 1262 1262 1263 1263 static const struct of_device_id miphy28lp_of_match[] = {
+11 -5
drivers/phy/phy-miphy365x.c
··· 566 566 567 567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 568 568 GFP_KERNEL); 569 - if (!miphy_phy) 570 - return -ENOMEM; 569 + if (!miphy_phy) { 570 + ret = -ENOMEM; 571 + goto put_child; 572 + } 571 573 572 574 miphy_dev->phys[port] = miphy_phy; 573 575 574 576 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); 575 577 if (IS_ERR(phy)) { 576 578 dev_err(&pdev->dev, "failed to create PHY\n"); 577 - return PTR_ERR(phy); 579 + ret = PTR_ERR(phy); 580 + goto put_child; 578 581 } 579 582 580 583 miphy_dev->phys[port]->phy = phy; 581 584 582 585 ret = miphy365x_of_probe(child, miphy_phy); 583 586 if (ret) 584 - return ret; 587 + goto put_child; 585 588 586 589 phy_set_drvdata(phy, miphy_dev->phys[port]); 587 590 ··· 594 591 &miphy_phy->ctrlreg); 595 592 if (ret) { 596 593 dev_err(&pdev->dev, "No sysconfig offset found\n"); 597 - return ret; 594 + goto put_child; 598 595 } 599 596 } 600 597 601 598 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); 602 599 return PTR_ERR_OR_ZERO(provider); 600 + put_child: 601 + of_node_put(child); 602 + return ret; 603 603 } 604 604 605 605 static const struct of_device_id miphy365x_of_match[] = {
+13 -7
drivers/phy/phy-mt65xx-usb3.c
··· 415 415 struct resource *sif_res; 416 416 struct mt65xx_u3phy *u3phy; 417 417 struct resource res; 418 - int port; 418 + int port, retval; 419 419 420 420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); 421 421 if (!u3phy) ··· 447 447 for_each_child_of_node(np, child_np) { 448 448 struct mt65xx_phy_instance *instance; 449 449 struct phy *phy; 450 - int retval; 451 450 452 451 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); 453 - if (!instance) 454 - return -ENOMEM; 452 + if (!instance) { 453 + retval = -ENOMEM; 454 + goto put_child; 455 + } 455 456 456 457 u3phy->phys[port] = instance; 457 458 458 459 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); 459 460 if (IS_ERR(phy)) { 460 461 dev_err(dev, "failed to create phy\n"); 461 - return PTR_ERR(phy); 462 + retval = PTR_ERR(phy); 463 + goto put_child; 462 464 } 463 465 464 466 retval = of_address_to_resource(child_np, 0, &res); 465 467 if (retval) { 466 468 dev_err(dev, "failed to get address resource(id-%d)\n", 467 469 port); 468 - return retval; 470 + goto put_child; 469 471 } 470 472 471 473 instance->port_base = devm_ioremap_resource(&phy->dev, &res); 472 474 if (IS_ERR(instance->port_base)) { 473 475 dev_err(dev, "failed to remap phy regs\n"); 474 - return PTR_ERR(instance->port_base); 476 + retval = PTR_ERR(instance->port_base); 477 + goto put_child; 475 478 } 476 479 477 480 instance->phy = phy; ··· 486 483 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); 487 484 488 485 return PTR_ERR_OR_ZERO(provider); 486 + put_child: 487 + of_node_put(child_np); 488 + return retval; 489 489 } 490 490 491 491 static const struct of_device_id mt65xx_u3phy_id_table[] = {
+12 -5
drivers/phy/phy-rockchip-usb.c
··· 108 108 109 109 for_each_available_child_of_node(dev->of_node, child) { 110 110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); 111 - if (!rk_phy) 112 - return -ENOMEM; 111 + if (!rk_phy) { 112 + err = -ENOMEM; 113 + goto put_child; 114 + } 113 115 114 116 if (of_property_read_u32(child, "reg", &reg_offset)) { 115 117 dev_err(dev, "missing reg property in node %s\n", 116 118 child->name); 117 - return -EINVAL; 119 + err = -EINVAL; 120 + goto put_child; 118 121 } 119 122 120 123 rk_phy->reg_offset = reg_offset; ··· 130 127 rk_phy->phy = devm_phy_create(dev, child, &ops); 131 128 if (IS_ERR(rk_phy->phy)) { 132 129 dev_err(dev, "failed to create PHY\n"); 133 - return PTR_ERR(rk_phy->phy); 130 + err = PTR_ERR(rk_phy->phy); 131 + goto put_child; 134 132 } 135 133 phy_set_drvdata(rk_phy->phy, rk_phy); 136 134 137 135 /* only power up usb phy when it use, so disable it when init*/ 138 136 err = rockchip_usb_phy_power(rk_phy, 1); 139 137 if (err) 140 - return err; 138 + goto put_child; 141 139 } 142 140 143 141 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 144 142 return PTR_ERR_OR_ZERO(phy_provider); 143 + put_child: 144 + of_node_put(child); 145 + return err; 145 146 } 146 147 147 148 static const struct of_device_id rockchip_usb_phy_dt_ids[] = {
+7 -6
drivers/pinctrl/bcm/pinctrl-bcm2835.c
··· 342 342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset); 343 343 } 344 344 345 - static int bcm2835_gpio_direction_output(struct gpio_chip *chip, 346 - unsigned offset, int value) 347 - { 348 - return pinctrl_gpio_direction_output(chip->base + offset); 349 - } 350 - 351 345 static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 352 346 { 353 347 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); 354 348 355 349 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); 350 + } 351 + 352 + static int bcm2835_gpio_direction_output(struct gpio_chip *chip, 353 + unsigned offset, int value) 354 + { 355 + bcm2835_gpio_set(chip, offset, value); 356 + return pinctrl_gpio_direction_output(chip->base + offset); 356 357 } 357 358 358 359 static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+1 -1
drivers/pinctrl/freescale/pinctrl-vf610.c
··· 299 299 static struct imx_pinctrl_soc_info vf610_pinctrl_info = { 300 300 .pins = vf610_pinctrl_pads, 301 301 .npins = ARRAY_SIZE(vf610_pinctrl_pads), 302 - .flags = SHARE_MUX_CONF_REG, 302 + .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID, 303 303 }; 304 304 305 305 static const struct of_device_id vf610_pinctrl_of_match[] = {
+1
drivers/pinctrl/intel/pinctrl-broxton.c
··· 28 28 .padcfglock_offset = BXT_PADCFGLOCK, \ 29 29 .hostown_offset = BXT_HOSTSW_OWN, \ 30 30 .ie_offset = BXT_GPI_IE, \ 31 + .gpp_size = 32, \ 31 32 .pin_base = (s), \ 32 33 .npins = ((e) - (s) + 1), \ 33 34 }
+20 -21
drivers/pinctrl/intel/pinctrl-intel.c
··· 25 25 26 26 #include "pinctrl-intel.h" 27 27 28 - /* Maximum number of pads in each group */ 29 - #define NPADS_IN_GPP 24 30 - 31 28 /* Offset from regs */ 32 29 #define PADBAR 0x00c 33 30 #define GPI_IS 0x100 ··· 34 37 #define PADOWN_BITS 4 35 38 #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) 36 39 #define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) 40 + #define PADOWN_GPP(p) ((p) / 8) 37 41 38 42 /* Offset from pad_regs */ 39 43 #define PADCFG0 0x000 ··· 140 142 static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) 141 143 { 142 144 const struct intel_community *community; 143 - unsigned padno, gpp, gpp_offset, offset; 145 + unsigned padno, gpp, offset, group; 144 146 void __iomem *padown; 145 147 146 148 community = intel_get_community(pctrl, pin); ··· 150 152 return true; 151 153 152 154 padno = pin_to_padno(community, pin); 153 - gpp = padno / NPADS_IN_GPP; 154 - gpp_offset = padno % NPADS_IN_GPP; 155 - offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; 155 + group = padno / community->gpp_size; 156 + gpp = PADOWN_GPP(padno % community->gpp_size); 157 + offset = community->padown_offset + 0x10 * group + gpp * 4; 156 158 padown = community->regs + offset; 157 159 158 160 return !(readl(padown) & PADOWN_MASK(padno)); ··· 171 173 return false; 172 174 173 175 padno = pin_to_padno(community, pin); 174 - gpp = padno / NPADS_IN_GPP; 176 + gpp = padno / community->gpp_size; 175 177 offset = community->hostown_offset + gpp * 4; 176 178 hostown = community->regs + offset; 177 179 178 - return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); 180 + return !(readl(hostown) & BIT(padno % community->gpp_size)); 179 181 } 180 182 181 183 static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) ··· 191 193 return false; 192 194 193 195 padno = pin_to_padno(community, pin); 194 - gpp = padno / NPADS_IN_GPP; 196 + gpp = padno / community->gpp_size; 195 197 196 198 /* 197 199 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, ··· 200 202 */ 201 203 offset = community->padcfglock_offset + gpp * 8; 202 204 value = readl(community->regs + offset); 203 - if (value & BIT(pin % NPADS_IN_GPP)) 205 + if (value & BIT(pin % community->gpp_size)) 204 206 return true; 205 207 206 208 offset = community->padcfglock_offset + 4 + gpp * 8; 207 209 value = readl(community->regs + offset); 208 - if (value & BIT(pin % NPADS_IN_GPP)) 210 + if (value & BIT(pin % community->gpp_size)) 209 211 return true; 210 212 211 213 return false; ··· 661 663 community = intel_get_community(pctrl, pin); 662 664 if (community) { 663 665 unsigned padno = pin_to_padno(community, pin); 664 - unsigned gpp_offset = padno % NPADS_IN_GPP; 665 - unsigned gpp = padno / NPADS_IN_GPP; 666 + unsigned gpp_offset = padno % community->gpp_size; 667 + unsigned gpp = padno / community->gpp_size; 666 668 667 669 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); 668 670 } ··· 683 685 community = intel_get_community(pctrl, pin); 684 686 if (community) { 685 687 unsigned padno = pin_to_padno(community, pin); 686 - unsigned gpp_offset = padno % NPADS_IN_GPP; 687 - unsigned gpp = padno / NPADS_IN_GPP; 688 + unsigned gpp_offset = padno % community->gpp_size; 689 + unsigned gpp = padno / community->gpp_size; 688 690 void __iomem *reg; 689 691 u32 value; 690 692 ··· 778 780 return -EINVAL; 779 781 780 782 padno = pin_to_padno(community, pin); 781 - gpp = padno / NPADS_IN_GPP; 782 - gpp_offset = padno % NPADS_IN_GPP; 783 + gpp = padno / community->gpp_size; 784 + gpp_offset = padno % community->gpp_size; 783 785 784 786 /* Clear the existing wake status */ 785 787 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); ··· 817 819 /* Only interrupts that are enabled */ 818 820 pending &= enabled; 819 821 820 - for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { 822 + for_each_set_bit(gpp_offset, &pending, community->gpp_size) { 821 823 unsigned padno, irq; 822 824 823 825 /* 824 826 * The last group in community can have less pins 825 827 * than NPADS_IN_GPP. 826 828 */ 827 - padno = gpp_offset + gpp * NPADS_IN_GPP; 829 + padno = gpp_offset + gpp * community->gpp_size; 828 830 if (padno >= community->npins) 829 831 break; 830 832 ··· 1000 1002 1001 1003 community->regs = regs; 1002 1004 community->pad_regs = regs + padbar; 1003 - community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); 1005 + community->ngpps = DIV_ROUND_UP(community->npins, 1006 + community->gpp_size); 1004 1007 } 1005 1008 1006 1009 irq = platform_get_irq(pdev, 0);
+3
drivers/pinctrl/intel/pinctrl-intel.h
··· 55 55 * ACPI). 56 56 * @ie_offset: Register offset of GPI_IE from @regs. 57 57 * @pin_base: Starting pin of pins in this community 58 + * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK, 59 + * HOSTSW_OWN, GPI_IS, GPI_IE, etc. 58 60 * @npins: Number of pins in this community 59 61 * @regs: Community specific common registers (reserved for core driver) 60 62 * @pad_regs: Community specific pad registers (reserved for core driver) ··· 70 68 unsigned hostown_offset; 71 69 unsigned ie_offset; 72 70 unsigned pin_base; 71 + unsigned gpp_size; 73 72 size_t npins; 74 73 void __iomem *regs; 75 74 void __iomem *pad_regs;
+1
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
··· 30 30 .padcfglock_offset = SPT_PADCFGLOCK, \ 31 31 .hostown_offset = SPT_HOSTSW_OWN, \ 32 32 .ie_offset = SPT_GPI_IE, \ 33 + .gpp_size = 24, \ 33 34 .pin_base = (s), \ 34 35 .npins = ((e) - (s) + 1), \ 35 36 }
+5 -2
drivers/powercap/intel_rapl.c
··· 1341 1341 1342 1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1343 1343 /* check if the domain is locked by BIOS */ 1344 - if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { 1344 + ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked); 1345 + if (ret) 1346 + return ret; 1347 + if (locked) { 1345 1348 pr_info("RAPL package %d domain %s locked by BIOS\n", 1346 1349 rp->id, rd->name); 1347 - rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1350 + rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1348 1351 } 1349 1352 } 1350 1353
+10 -11
drivers/rtc/rtc-da9063.c
··· 483 483 484 484 platform_set_drvdata(pdev, rtc); 485 485 486 - irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 487 - ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 488 - da9063_alarm_event, 489 - IRQF_TRIGGER_LOW | IRQF_ONESHOT, 490 - "ALARM", rtc); 491 - if (ret) { 492 - dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", 493 - irq_alarm, ret); 494 - return ret; 495 - } 496 - 497 486 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC, 498 487 &da9063_rtc_ops, THIS_MODULE); 499 488 if (IS_ERR(rtc->rtc_dev)) ··· 490 501 491 502 da9063_data_to_tm(data, &rtc->alarm_time, rtc); 492 503 rtc->rtc_sync = false; 504 + 505 + irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 506 + ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 507 + da9063_alarm_event, 508 + IRQF_TRIGGER_LOW | IRQF_ONESHOT, 509 + "ALARM", rtc); 510 + if (ret) 511 + dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", 512 + irq_alarm, ret); 513 + 493 514 return ret; 494 515 } 495 516
+44 -4
drivers/rtc/rtc-rk808.c
··· 56 56 int irq; 57 57 }; 58 58 59 + /* 60 + * The Rockchip calendar used by the RK808 counts November with 31 days. We use 61 + * these translation functions to convert its dates to/from the Gregorian 62 + * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016 63 + * as the day when both calendars were in sync, and treat all other dates 64 + * relative to that. 65 + * NOTE: Other system software (e.g. firmware) that reads the same hardware must 66 + * implement this exact same conversion algorithm, with the same anchor date. 67 + */ 68 + static time64_t nov2dec_transitions(struct rtc_time *tm) 69 + { 70 + return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0); 71 + } 72 + 73 + static void rockchip_to_gregorian(struct rtc_time *tm) 74 + { 75 + /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */ 76 + time64_t time = rtc_tm_to_time64(tm); 77 + rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm); 78 + } 79 + 80 + static void gregorian_to_rockchip(struct rtc_time *tm) 81 + { 82 + time64_t extra_days = nov2dec_transitions(tm); 83 + time64_t time = rtc_tm_to_time64(tm); 84 + rtc_time64_to_tm(time - extra_days * 86400, tm); 85 + 86 + /* Compensate if we went back over Nov 31st (will work up to 2381) */ 87 + if (nov2dec_transitions(tm) < extra_days) { 88 + if (tm->tm_mon + 1 == 11) 89 + tm->tm_mday++; /* This may result in 31! */ 90 + else 91 + rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm); 92 + } 93 + } 94 + 59 95 /* Read current time and date in RTC */ 60 96 static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) 61 97 { ··· 137 101 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; 138 102 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; 139 103 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); 104 + rockchip_to_gregorian(tm); 140 105 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 141 106 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 142 - tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); 107 + tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); 143 108 144 109 return ret; 145 110 } ··· 153 116 u8 rtc_data[NUM_TIME_REGS]; 154 117 int ret; 155 118 119 + dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 120 + 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 121 + tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); 122 + gregorian_to_rockchip(tm); 156 123 rtc_data[0] = bin2bcd(tm->tm_sec); 157 124 rtc_data[1] = bin2bcd(tm->tm_min); 158 125 rtc_data[2] = bin2bcd(tm->tm_hour); ··· 164 123 rtc_data[4] = bin2bcd(tm->tm_mon + 1); 165 124 rtc_data[5] = bin2bcd(tm->tm_year - 100); 166 125 rtc_data[6] = bin2bcd(tm->tm_wday); 167 - dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 168 - 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 169 - tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); 170 126 171 127 /* Stop RTC while updating the RTC registers */ 172 128 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, ··· 208 170 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); 209 171 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; 210 172 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; 173 + rockchip_to_gregorian(&alrm->time); 211 174 212 175 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); 213 176 if (ret) { ··· 266 227 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, 267 228 alrm->time.tm_min, alrm->time.tm_sec); 268 229 230 + gregorian_to_rockchip(&alrm->time); 269 231 alrm_data[0] = bin2bcd(alrm->time.tm_sec); 270 232 alrm_data[1] = bin2bcd(alrm->time.tm_min); 271 233 alrm_data[2] = bin2bcd(alrm->time.tm_hour);
+3 -1
drivers/s390/crypto/ap_bus.c
··· 599 599 status = ap_sm_recv(ap_dev); 600 600 switch (status.response_code) { 601 601 case AP_RESPONSE_NORMAL: 602 - if (ap_dev->queue_count > 0) 602 + if (ap_dev->queue_count > 0) { 603 + ap_dev->state = AP_STATE_WORKING; 603 604 return AP_WAIT_AGAIN; 605 + } 604 606 ap_dev->state = AP_STATE_IDLE; 605 607 return AP_WAIT_NONE; 606 608 case AP_RESPONSE_NO_PENDING_REPLY:
+39 -27
drivers/s390/virtio/virtio_ccw.c
··· 984 984 return vq; 985 985 } 986 986 987 - static void virtio_ccw_int_handler(struct ccw_device *cdev, 988 - unsigned long intparm, 989 - struct irb *irb) 987 + static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, 988 + __u32 activity) 990 989 { 991 - __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 992 - struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 993 - int i; 994 - struct virtqueue *vq; 995 - 996 - if (!vcdev) 997 - return; 998 - /* Check if it's a notification from the host. */ 999 - if ((intparm == 0) && 1000 - (scsw_stctl(&irb->scsw) == 1001 - (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 1002 - /* OK */ 1003 - } 1004 - if (irb_is_error(irb)) { 1005 - /* Command reject? */ 1006 - if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1007 - (irb->ecw[0] & SNS0_CMD_REJECT)) 1008 - vcdev->err = -EOPNOTSUPP; 1009 - else 1010 - /* Map everything else to -EIO. */ 1011 - vcdev->err = -EIO; 1012 - } 1013 990 if (vcdev->curr_io & activity) { 1014 991 switch (activity) { 1015 992 case VIRTIO_CCW_DOING_READ_FEAT: ··· 1006 1029 break; 1007 1030 default: 1008 1031 /* don't know what to do... */ 1009 - dev_warn(&cdev->dev, "Suspicious activity '%08x'\n", 1010 - activity); 1032 + dev_warn(&vcdev->cdev->dev, 1033 + "Suspicious activity '%08x'\n", activity); 1011 1034 WARN_ON(1); 1012 1035 break; 1013 1036 } 1014 1037 } 1038 + } 1039 + 1040 + static void virtio_ccw_int_handler(struct ccw_device *cdev, 1041 + unsigned long intparm, 1042 + struct irb *irb) 1043 + { 1044 + __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 1045 + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1046 + int i; 1047 + struct virtqueue *vq; 1048 + 1049 + if (!vcdev) 1050 + return; 1051 + if (IS_ERR(irb)) { 1052 + vcdev->err = PTR_ERR(irb); 1053 + virtio_ccw_check_activity(vcdev, activity); 1054 + /* Don't poke around indicators, something's wrong. */ 1055 + return; 1056 + } 1057 + /* Check if it's a notification from the host. */ 1058 + if ((intparm == 0) && 1059 + (scsw_stctl(&irb->scsw) == 1060 + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 1061 + /* OK */ 1062 + } 1063 + if (irb_is_error(irb)) { 1064 + /* Command reject? */ 1065 + if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1066 + (irb->ecw[0] & SNS0_CMD_REJECT)) 1067 + vcdev->err = -EOPNOTSUPP; 1068 + else 1069 + /* Map everything else to -EIO. */ 1070 + vcdev->err = -EIO; 1071 + } 1072 + virtio_ccw_check_activity(vcdev, activity); 1015 1073 for_each_set_bit(i, &vcdev->indicators, 1016 1074 sizeof(vcdev->indicators) * BITS_PER_BYTE) { 1017 1075 /* The bit clear must happen before the vring kick. */
+10 -10
drivers/scsi/scsi_pm.c
··· 219 219 struct scsi_device *sdev = to_scsi_device(dev); 220 220 int err = 0; 221 221 222 - if (pm && pm->runtime_suspend) { 223 - err = blk_pre_runtime_suspend(sdev->request_queue); 224 - if (err) 225 - return err; 222 + err = blk_pre_runtime_suspend(sdev->request_queue); 223 + if (err) 224 + return err; 225 + if (pm && pm->runtime_suspend) 226 226 err = pm->runtime_suspend(dev); 227 - blk_post_runtime_suspend(sdev->request_queue, err); 228 - } 227 + blk_post_runtime_suspend(sdev->request_queue, err); 228 + 229 229 return err; 230 230 } 231 231 ··· 248 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 249 249 int err = 0; 250 250 251 - if (pm && pm->runtime_resume) { 252 - blk_pre_runtime_resume(sdev->request_queue); 251 + blk_pre_runtime_resume(sdev->request_queue); 252 + if (pm && pm->runtime_resume) 253 253 err = pm->runtime_resume(dev); 254 - blk_post_runtime_resume(sdev->request_queue, err); 255 - } 254 + blk_post_runtime_resume(sdev->request_queue, err); 255 + 256 256 return err; 257 257 } 258 258
+28 -2
drivers/scsi/ses.c
··· 84 84 static int ses_recv_diag(struct scsi_device *sdev, int page_code, 85 85 void *buf, int bufflen) 86 86 { 87 + int ret; 87 88 unsigned char cmd[] = { 88 89 RECEIVE_DIAGNOSTIC, 89 90 1, /* Set PCV bit */ ··· 93 92 bufflen & 0xff, 94 93 0 95 94 }; 95 + unsigned char recv_page_code; 96 96 97 - return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 97 + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 98 98 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 99 + if (unlikely(!ret)) 100 + return ret; 101 + 102 + recv_page_code = ((unsigned char *)buf)[0]; 103 + 104 + if (likely(recv_page_code == page_code)) 105 + return ret; 106 + 107 + /* successful diagnostic but wrong page code. This happens to some 108 + * USB devices, just print a message and pretend there was an error */ 109 + 110 + sdev_printk(KERN_ERR, sdev, 111 + "Wrong diagnostic page; asked for %d got %u\n", 112 + page_code, recv_page_code); 113 + 114 + return -EINVAL; 99 115 } 100 116 101 117 static int ses_send_diag(struct scsi_device *sdev, int page_code, ··· 559 541 if (desc_ptr) 560 542 desc_ptr += len; 561 543 562 - if (addl_desc_ptr) 544 + if (addl_desc_ptr && 545 + /* only find additional descriptions for specific devices */ 546 + (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || 547 + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || 548 + type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || 549 + /* these elements are optional */ 550 + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || 551 + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || 552 + type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) 563 553 addl_desc_ptr += addl_desc_ptr[1] + 2; 564 554 565 555 }
+6 -6
drivers/spi/spi-fsl-dspi.c
··· 167 167 { 168 168 unsigned int val; 169 169 170 - regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val); 170 + regmap_read(dspi->regmap, SPI_CTAR(0), &val); 171 171 172 172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; 173 173 } ··· 257 257 258 258 return SPI_PUSHR_TXDATA(d16) | 259 259 SPI_PUSHR_PCS(dspi->cs) | 260 - SPI_PUSHR_CTAS(dspi->cs) | 260 + SPI_PUSHR_CTAS(0) | 261 261 SPI_PUSHR_CONT; 262 262 } 263 263 ··· 290 290 */ 291 291 if (tx_word && (dspi->len == 1)) { 292 292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 293 - regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 293 + regmap_update_bits(dspi->regmap, SPI_CTAR(0), 294 294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 295 295 tx_word = 0; 296 296 } ··· 339 339 340 340 if (tx_word && (dspi->len == 1)) { 341 341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 342 - regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 342 + regmap_update_bits(dspi->regmap, SPI_CTAR(0), 343 343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 344 344 tx_word = 0; 345 345 } ··· 407 407 regmap_update_bits(dspi->regmap, SPI_MCR, 408 408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 409 409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 410 - regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 410 + regmap_write(dspi->regmap, SPI_CTAR(0), 411 411 dspi->cur_chip->ctar_val); 412 412 413 413 trans_mode = dspi->devtype_data->trans_mode; ··· 566 566 if (!dspi->len) { 567 567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { 568 568 regmap_update_bits(dspi->regmap, 569 - SPI_CTAR(dspi->cs), 569 + SPI_CTAR(0), 570 570 SPI_FRAME_BITS_MASK, 571 571 SPI_FRAME_BITS(16)); 572 572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
+1 -1
drivers/spi/spi.c
··· 1705 1705 master->bus_num = -1; 1706 1706 master->num_chipselect = 1; 1707 1707 master->dev.class = &spi_master_class; 1708 - master->dev.parent = get_device(dev); 1708 + master->dev.parent = dev; 1709 1709 spi_master_set_devdata(master, &master[1]); 1710 1710 1711 1711 return master;
+1 -1
drivers/spi/spidev.c
··· 651 651 kfree(spidev->rx_buffer); 652 652 spidev->rx_buffer = NULL; 653 653 654 + spin_lock_irq(&spidev->spi_lock); 654 655 if (spidev->spi) 655 656 spidev->speed_hz = spidev->spi->max_speed_hz; 656 657 657 658 /* ... after we unbound from the underlying device? */ 658 - spin_lock_irq(&spidev->spi_lock); 659 659 dofree = (spidev->spi == NULL); 660 660 spin_unlock_irq(&spidev->spi_lock); 661 661
+9 -13
drivers/tty/n_tty.c
··· 2054 2054 size_t eol; 2055 2055 size_t tail; 2056 2056 int ret, found = 0; 2057 - bool eof_push = 0; 2058 2057 2059 2058 /* N.B. avoid overrun if nr == 0 */ 2060 - n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); 2061 - if (!n) 2059 + if (!*nr) 2062 2060 return 0; 2061 + 2062 + n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); 2063 2063 2064 2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); 2065 2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); ··· 2081 2081 n = eol - tail; 2082 2082 if (n > N_TTY_BUF_SIZE) 2083 2083 n += N_TTY_BUF_SIZE; 2084 - n += found; 2085 - c = n; 2084 + c = n + found; 2086 2085 2087 - if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { 2088 - n--; 2089 - eof_push = !n && ldata->read_tail != ldata->line_start; 2086 + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { 2087 + c = min(*nr, c); 2088 + n = c; 2090 2089 } 2091 2090 2092 2091 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", ··· 2115 2116 ldata->push = 0; 2116 2117 tty_audit_push(tty); 2117 2118 } 2118 - return eof_push ? -EAGAIN : 0; 2119 + return 0; 2119 2120 } 2120 2121 2121 2122 extern ssize_t redirected_tty_write(struct file *, const char __user *, ··· 2272 2273 2273 2274 if (ldata->icanon && !L_EXTPROC(tty)) { 2274 2275 retval = canon_copy_from_read_buf(tty, &b, &nr); 2275 - if (retval == -EAGAIN) { 2276 - retval = 0; 2277 - continue; 2278 - } else if (retval) 2276 + if (retval) 2279 2277 break; 2280 2278 } else { 2281 2279 int uncopied;
+6 -2
drivers/tty/serial/8250/8250_uniphier.c
··· 115 115 */ 116 116 static int uniphier_serial_dl_read(struct uart_8250_port *up) 117 117 { 118 - return readl(up->port.membase + UNIPHIER_UART_DLR); 118 + int offset = UNIPHIER_UART_DLR << up->port.regshift; 119 + 120 + return readl(up->port.membase + offset); 119 121 } 120 122 121 123 static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) 122 124 { 123 - writel(value, up->port.membase + UNIPHIER_UART_DLR); 125 + int offset = UNIPHIER_UART_DLR << up->port.regshift; 126 + 127 + writel(value, up->port.membase + offset); 124 128 } 125 129 126 130 static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
+2
drivers/tty/serial/earlycon.c
··· 115 115 if (buf && !parse_options(&early_console_dev, buf)) 116 116 buf = NULL; 117 117 118 + spin_lock_init(&port->lock); 118 119 port->uartclk = BASE_BAUD * 16; 119 120 if (port->mapbase) 120 121 port->membase = earlycon_map(port->mapbase, 64); ··· 203 202 int err; 204 203 struct uart_port *port = &early_console_dev.port; 205 204 205 + spin_lock_init(&port->lock); 206 206 port->iotype = UPIO_MEM; 207 207 port->mapbase = addr; 208 208 port->uartclk = BASE_BAUD * 16;
+1 -1
drivers/tty/serial/sh-sci.c
··· 1437 1437 sg_init_table(sg, 1); 1438 1438 s->rx_buf[i] = buf; 1439 1439 sg_dma_address(sg) = dma; 1440 - sg->length = s->buf_len_rx; 1440 + sg_dma_len(sg) = s->buf_len_rx; 1441 1441 1442 1442 buf += s->buf_len_rx; 1443 1443 dma += s->buf_len_rx;
+7 -5
drivers/tty/serial/sunhv.c
··· 148 148 uart_handle_dcd_change(port, 1); 149 149 } 150 150 151 - for (i = 0; i < bytes_read; i++) 152 - uart_handle_sysrq_char(port, con_read_page[i]); 151 + if (port->sysrq != 0 && *con_read_page) { 152 + for (i = 0; i < bytes_read; i++) 153 + uart_handle_sysrq_char(port, con_read_page[i]); 154 + } 153 155 154 156 if (port->state == NULL) 155 157 continue; ··· 170 168 int (*receive_chars)(struct uart_port *port); 171 169 }; 172 170 173 - static struct sunhv_ops bychar_ops = { 171 + static const struct sunhv_ops bychar_ops = { 174 172 .transmit_chars = transmit_chars_putchar, 175 173 .receive_chars = receive_chars_getchar, 176 174 }; 177 175 178 - static struct sunhv_ops bywrite_ops = { 176 + static const struct sunhv_ops bywrite_ops = { 179 177 .transmit_chars = transmit_chars_write, 180 178 .receive_chars = receive_chars_read, 181 179 }; 182 180 183 - static struct sunhv_ops *sunhv_ops = &bychar_ops; 181 + static const struct sunhv_ops *sunhv_ops = &bychar_ops; 184 182 185 183 static struct tty_port *receive_chars(struct uart_port *port) 186 184 {
+1 -1
drivers/tty/tty_buffer.c
··· 450 450 count = disc->ops->receive_buf2(tty, p, f, count); 451 451 else { 452 452 count = min_t(int, count, tty->receive_room); 453 - if (count) 453 + if (count && disc->ops->receive_buf) 454 454 disc->ops->receive_buf(tty, p, f, count); 455 455 } 456 456 return count;
+19 -3
drivers/usb/core/hub.c
··· 1035 1035 unsigned delay; 1036 1036 1037 1037 /* Continue a partial initialization */ 1038 - if (type == HUB_INIT2) 1039 - goto init2; 1040 - if (type == HUB_INIT3) 1038 + if (type == HUB_INIT2 || type == HUB_INIT3) { 1039 + device_lock(hub->intfdev); 1040 + 1041 + /* Was the hub disconnected while we were waiting? */ 1042 + if (hub->disconnected) { 1043 + device_unlock(hub->intfdev); 1044 + kref_put(&hub->kref, hub_release); 1045 + return; 1046 + } 1047 + if (type == HUB_INIT2) 1048 + goto init2; 1041 1049 goto init3; 1050 + } 1051 + kref_get(&hub->kref); 1042 1052 1043 1053 /* The superspeed hub except for root hub has to use Hub Depth 1044 1054 * value as an offset into the route string to locate the bits ··· 1246 1236 queue_delayed_work(system_power_efficient_wq, 1247 1237 &hub->init_work, 1248 1238 msecs_to_jiffies(delay)); 1239 + device_unlock(hub->intfdev); 1249 1240 return; /* Continues at init3: below */ 1250 1241 } else { 1251 1242 msleep(delay); ··· 1268 1257 /* Allow autosuspend if it was suppressed */ 1269 1258 if (type <= HUB_INIT3) 1270 1259 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); 1260 + 1261 + if (type == HUB_INIT2 || type == HUB_INIT3) 1262 + device_unlock(hub->intfdev); 1263 + 1264 + kref_put(&hub->kref, hub_release); 1271 1265 } 1272 1266 1273 1267 /* Implement the continuations for the delays above */
+2 -1
drivers/usb/serial/ipaq.c
··· 531 531 * through. Since this has a reasonably high failure rate, we retry 532 532 * several times. 533 533 */ 534 - while (retries--) { 534 + while (retries) { 535 + retries--; 535 536 result = usb_control_msg(serial->dev, 536 537 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 537 538 0x1, 0, NULL, 0, 100);
+18 -5
drivers/xen/events/events_fifo.c
··· 281 281 282 282 static void consume_one_event(unsigned cpu, 283 283 struct evtchn_fifo_control_block *control_block, 284 - unsigned priority, unsigned long *ready) 284 + unsigned priority, unsigned long *ready, 285 + bool drop) 285 286 { 286 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 287 288 uint32_t head; ··· 314 313 if (head == 0) 315 314 clear_bit(priority, ready); 316 315 317 - if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 318 - handle_irq_for_port(port); 316 + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { 317 + if (unlikely(drop)) 318 + pr_warn("Dropping pending event for port %u\n", port); 319 + else 320 + handle_irq_for_port(port); 321 + } 319 322 320 323 q->head[priority] = head; 321 324 } 322 325 323 - static void evtchn_fifo_handle_events(unsigned cpu) 326 + static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) 324 327 { 325 328 struct evtchn_fifo_control_block *control_block; 326 329 unsigned long ready; ··· 336 331 337 332 while (ready) { 338 333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 339 - consume_one_event(cpu, control_block, q, &ready); 334 + consume_one_event(cpu, control_block, q, &ready, drop); 340 335 ready |= xchg(&control_block->ready, 0); 341 336 } 337 + } 338 + 339 + static void evtchn_fifo_handle_events(unsigned cpu) 340 + { 341 + __evtchn_fifo_handle_events(cpu, false); 342 342 } 343 343 344 344 static void evtchn_fifo_resume(void) ··· 429 419 case CPU_UP_PREPARE: 430 420 if (!per_cpu(cpu_control_block, cpu)) 431 421 ret = evtchn_fifo_alloc_control_block(cpu); 422 + break; 423 + case CPU_DEAD: 424 + __evtchn_fifo_handle_events(cpu, true); 432 425 break; 433 426 default: 434 427 break;
+1
drivers/xen/xen-pciback/pciback.h
··· 37 37 struct xen_pci_sharedinfo *sh_info; 38 38 unsigned long flags; 39 39 struct work_struct op_work; 40 + struct xen_pci_op op; 40 41 }; 41 42 42 43 struct xen_pcibk_dev_data {
+60 -15
drivers/xen/xen-pciback/pciback_ops.c
··· 70 70 enable ? "enable" : "disable"); 71 71 72 72 if (enable) { 73 + /* 74 + * The MSI or MSI-X should not have an IRQ handler. Otherwise 75 + * if the guest terminates we BUG_ON in free_msi_irqs. 76 + */ 77 + if (dev->msi_enabled || dev->msix_enabled) 78 + goto out; 79 + 73 80 rc = request_irq(dev_data->irq, 74 81 xen_pcibk_guest_interrupt, IRQF_SHARED, 75 82 dev_data->irq_name, dev); ··· 151 144 if (unlikely(verbose_request)) 152 145 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); 153 146 154 - status = pci_enable_msi(dev); 147 + if (dev->msi_enabled) 148 + status = -EALREADY; 149 + else if (dev->msix_enabled) 150 + status = -ENXIO; 151 + else 152 + status = pci_enable_msi(dev); 155 153 156 154 if (status) { 157 155 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", ··· 185 173 int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, 186 174 struct pci_dev *dev, struct xen_pci_op *op) 187 175 { 188 - struct xen_pcibk_dev_data *dev_data; 189 - 190 176 if (unlikely(verbose_request)) 191 177 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", 192 178 pci_name(dev)); 193 - pci_disable_msi(dev); 194 179 180 + if (dev->msi_enabled) { 181 + struct xen_pcibk_dev_data *dev_data; 182 + 183 + pci_disable_msi(dev); 184 + 185 + dev_data = pci_get_drvdata(dev); 186 + if (dev_data) 187 + dev_data->ack_intr = 1; 188 + } 195 189 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 196 190 if (unlikely(verbose_request)) 197 191 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), 198 192 op->value); 199 - dev_data = pci_get_drvdata(dev); 200 - if (dev_data) 201 - dev_data->ack_intr = 1; 202 193 return 0; 203 194 } 204 195 ··· 212 197 struct xen_pcibk_dev_data *dev_data; 213 198 int i, result; 214 199 struct msix_entry *entries; 200 + u16 cmd; 215 201 216 202 if (unlikely(verbose_request)) 217 203 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", 218 204 pci_name(dev)); 205 + 219 206 if (op->value > SH_INFO_MAX_VEC) 220 207 return -EINVAL; 208 + 209 + if (dev->msix_enabled) 210 + return -EALREADY; 211 + 212 + /* 213 + * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able 214 + * to access the BARs where the MSI-X entries reside. 215 + */ 216 + pci_read_config_word(dev, PCI_COMMAND, &cmd); 217 + if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) 218 + return -ENXIO; 221 219 222 220 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); 223 221 if (entries == NULL) ··· 273 245 int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, 274 246 struct pci_dev *dev, struct xen_pci_op *op) 275 247 { 276 - struct xen_pcibk_dev_data *dev_data; 277 248 if (unlikely(verbose_request)) 278 249 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", 279 250 pci_name(dev)); 280 - pci_disable_msix(dev); 281 251 252 + if (dev->msix_enabled) { 253 + struct xen_pcibk_dev_data *dev_data; 254 + 255 + pci_disable_msix(dev); 256 + 257 + dev_data = pci_get_drvdata(dev); 258 + if (dev_data) 259 + dev_data->ack_intr = 1; 260 + } 282 261 /* 283 262 * SR-IOV devices (which don't have any legacy IRQ) have 284 263 * an undefined IRQ value of zero. 285 264 */ 286 265 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 287 266 if (unlikely(verbose_request)) 288 - printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), 289 - op->value); 290 - dev_data = pci_get_drvdata(dev); 291 - if (dev_data) 292 - dev_data->ack_intr = 1; 267 + printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", 268 + pci_name(dev), op->value); 293 269 return 0; 294 270 } 295 271 #endif ··· 330 298 container_of(data, struct xen_pcibk_device, op_work); 331 299 struct pci_dev *dev; 332 300 struct xen_pcibk_dev_data *dev_data = NULL; 333 - struct xen_pci_op *op = &pdev->sh_info->op; 301 + struct xen_pci_op *op = &pdev->op; 334 302 int test_intx = 0; 335 303 304 + *op = pdev->sh_info->op; 305 + barrier(); 336 306 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); 337 307 338 308 if (dev == NULL) ··· 376 342 if ((dev_data->enable_intx != test_intx)) 377 343 xen_pcibk_control_isr(dev, 0 /* no reset */); 378 344 } 345 + pdev->sh_info->op.err = op->err; 346 + pdev->sh_info->op.value = op->value; 347 + #ifdef CONFIG_PCI_MSI 348 + if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { 349 + unsigned int i; 350 + 351 + for (i = 0; i < op->value; i++) 352 + pdev->sh_info->op.msix_entries[i].vector = 353 + op->msix_entries[i].vector; 354 + } 355 + #endif 379 356 /* Tell the driver domain that we're done. */ 380 357 wmb(); 381 358 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+3 -1
drivers/xen/xen-pciback/xenbus.c
··· 44 44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); 45 45 46 46 pdev->xdev = xdev; 47 - dev_set_drvdata(&xdev->dev, pdev); 48 47 49 48 mutex_init(&pdev->dev_lock); 50 49 ··· 57 58 kfree(pdev); 58 59 pdev = NULL; 59 60 } 61 + 62 + dev_set_drvdata(&xdev->dev, pdev); 63 + 60 64 out: 61 65 return pdev; 62 66 }
+1 -1
drivers/xen/xen-scsiback.c
··· 726 726 if (!pending_req) 727 727 return 1; 728 728 729 - ring_req = *RING_GET_REQUEST(ring, rc); 729 + RING_COPY_REQUEST(ring, rc, &ring_req); 730 730 ring->req_cons = ++rc; 731 731 732 732 err = prepare_pending_reqs(info, &ring_req, pending_req);
+7 -3
fs/btrfs/extent-tree.c
··· 10480 10480 * until transaction commit to do the actual discard. 10481 10481 */ 10482 10482 if (trimming) { 10483 - WARN_ON(!list_empty(&block_group->bg_list)); 10484 - spin_lock(&trans->transaction->deleted_bgs_lock); 10483 + spin_lock(&fs_info->unused_bgs_lock); 10484 + /* 10485 + * A concurrent scrub might have added us to the list 10486 + * fs_info->unused_bgs, so use a list_move operation 10487 + * to add the block group to the deleted_bgs list. 10488 + */ 10485 10489 list_move(&block_group->bg_list, 10486 10490 &trans->transaction->deleted_bgs); 10487 - spin_unlock(&trans->transaction->deleted_bgs_lock); 10491 + spin_unlock(&fs_info->unused_bgs_lock); 10488 10492 btrfs_get_block_group(block_group); 10489 10493 } 10490 10494 end_trans:
+14 -4
fs/btrfs/file.c
··· 1291 1291 * on error we return an unlocked page and the error value 1292 1292 * on success we return a locked page and 0 1293 1293 */ 1294 - static int prepare_uptodate_page(struct page *page, u64 pos, 1294 + static int prepare_uptodate_page(struct inode *inode, 1295 + struct page *page, u64 pos, 1295 1296 bool force_uptodate) 1296 1297 { 1297 1298 int ret = 0; ··· 1306 1305 if (!PageUptodate(page)) { 1307 1306 unlock_page(page); 1308 1307 return -EIO; 1308 + } 1309 + if (page->mapping != inode->i_mapping) { 1310 + unlock_page(page); 1311 + return -EAGAIN; 1309 1312 } 1310 1313 } 1311 1314 return 0; ··· 1329 1324 int faili; 1330 1325 1331 1326 for (i = 0; i < num_pages; i++) { 1327 + again: 1332 1328 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1333 1329 mask | __GFP_WRITE); 1334 1330 if (!pages[i]) { ··· 1339 1333 } 1340 1334 1341 1335 if (i == 0) 1342 - err = prepare_uptodate_page(pages[i], pos, 1336 + err = prepare_uptodate_page(inode, pages[i], pos, 1343 1337 force_uptodate); 1344 - if (i == num_pages - 1) 1345 - err = prepare_uptodate_page(pages[i], 1338 + if (!err && i == num_pages - 1) 1339 + err = prepare_uptodate_page(inode, pages[i], 1346 1340 pos + write_bytes, false); 1347 1341 if (err) { 1348 1342 page_cache_release(pages[i]); 1343 + if (err == -EAGAIN) { 1344 + err = 0; 1345 + goto again; 1346 + } 1349 1347 faili = i - 1; 1350 1348 goto fail; 1351 1349 }
+6 -4
fs/btrfs/free-space-cache.c
··· 891 891 spin_unlock(&block_group->lock); 892 892 ret = 0; 893 893 894 - btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", 894 + btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now", 895 895 block_group->key.objectid); 896 896 } 897 897 ··· 2972 2972 u64 cont1_bytes, u64 min_bytes) 2973 2973 { 2974 2974 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2975 - struct btrfs_free_space *entry; 2975 + struct btrfs_free_space *entry = NULL; 2976 2976 int ret = -ENOSPC; 2977 2977 u64 bitmap_offset = offset_to_bitmap(ctl, offset); 2978 2978 ··· 2983 2983 * The bitmap that covers offset won't be in the list unless offset 2984 2984 * is just its start offset. 2985 2985 */ 2986 - entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 2987 - if (entry->offset != bitmap_offset) { 2986 + if (!list_empty(bitmaps)) 2987 + entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 2988 + 2989 + if (!entry || entry->offset != bitmap_offset) { 2988 2990 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); 2989 2991 if (entry && list_empty(&entry->list)) 2990 2992 list_add(&entry->list, bitmaps);
-1
fs/btrfs/transaction.c
··· 274 274 cur_trans->num_dirty_bgs = 0; 275 275 spin_lock_init(&cur_trans->dirty_bgs_lock); 276 276 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 277 - spin_lock_init(&cur_trans->deleted_bgs_lock); 278 277 spin_lock_init(&cur_trans->dropped_roots_lock); 279 278 list_add_tail(&cur_trans->list, &fs_info->trans_list); 280 279 extent_io_tree_init(&cur_trans->dirty_pages,
+1 -1
fs/btrfs/transaction.h
··· 77 77 */ 78 78 struct mutex cache_write_mutex; 79 79 spinlock_t dirty_bgs_lock; 80 + /* Protected by spin lock fs_info->unused_bgs_lock. */ 80 81 struct list_head deleted_bgs; 81 - spinlock_t deleted_bgs_lock; 82 82 spinlock_t dropped_roots_lock; 83 83 struct btrfs_delayed_ref_root delayed_refs; 84 84 int aborted;
+1 -2
fs/btrfs/volumes.c
··· 3548 3548 3549 3549 ret = btrfs_force_chunk_alloc(trans, chunk_root, 3550 3550 BTRFS_BLOCK_GROUP_DATA); 3551 + btrfs_end_transaction(trans, chunk_root); 3551 3552 if (ret < 0) { 3552 3553 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3553 3554 goto error; 3554 3555 } 3555 - 3556 - btrfs_end_transaction(trans, chunk_root); 3557 3556 chunk_reserved = 1; 3558 3557 } 3559 3558
+1 -1
fs/nfsd/nfs4layouts.c
··· 616 616 617 617 mutex_lock(&ls->ls_mutex); 618 618 nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid); 619 + mutex_unlock(&ls->ls_mutex); 619 620 } 620 621 621 622 static int ··· 660 659 661 660 trace_layout_recall_release(&ls->ls_stid.sc_stateid); 662 661 663 - mutex_unlock(&ls->ls_mutex); 664 662 nfsd4_return_all_layouts(ls, &reaplist); 665 663 nfsd4_free_layouts(&reaplist); 666 664 nfs4_put_stid(&ls->ls_stid);
+2
fs/ocfs2/dlm/dlmmaster.c
··· 2843 2843 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; 2844 2844 if (!ret) 2845 2845 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); 2846 + else 2847 + res->migration_pending = 0; 2846 2848 spin_unlock(&res->spinlock); 2847 2849 2848 2850 /*
+4 -1
fs/ocfs2/locks.c
··· 67 67 */ 68 68 69 69 locks_lock_file_wait(file, 70 - &(struct file_lock){.fl_type = F_UNLCK}); 70 + &(struct file_lock) { 71 + .fl_type = F_UNLCK, 72 + .fl_flags = FL_FLOCK 73 + }); 71 74 72 75 ocfs2_file_unlock(file); 73 76 }
+12 -3
fs/ocfs2/resize.c
··· 54 54 static u16 ocfs2_calc_new_backup_super(struct inode *inode, 55 55 struct ocfs2_group_desc *gd, 56 56 u16 cl_cpg, 57 + u16 old_bg_clusters, 57 58 int set) 58 59 { 59 60 int i; 60 61 u16 backups = 0; 61 - u32 cluster; 62 + u32 cluster, lgd_cluster; 62 63 u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno); 63 64 64 65 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { ··· 71 70 continue; 72 71 else if (gd_blkno > lgd_blkno) 73 72 break; 73 + 74 + /* check if already done backup super */ 75 + lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno); 76 + lgd_cluster += old_bg_clusters; 77 + if (lgd_cluster >= cluster) 78 + continue; 74 79 75 80 if (set) 76 81 ocfs2_set_bit(cluster % cl_cpg, ··· 106 99 u16 chain, num_bits, backups = 0; 107 100 u16 cl_bpc = le16_to_cpu(cl->cl_bpc); 108 101 u16 cl_cpg = le16_to_cpu(cl->cl_cpg); 102 + u16 old_bg_clusters; 109 103 110 104 trace_ocfs2_update_last_group_and_inode(new_clusters, 111 105 first_new_cluster); ··· 120 112 121 113 group = (struct ocfs2_group_desc *)group_bh->b_data; 122 114 115 + old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc; 123 116 /* update the group first. */ 124 117 num_bits = new_clusters * cl_bpc; 125 118 le16_add_cpu(&group->bg_bits, num_bits); ··· 134 125 OCFS2_FEATURE_COMPAT_BACKUP_SB)) { 135 126 backups = ocfs2_calc_new_backup_super(bm_inode, 136 127 group, 137 - cl_cpg, 1); 128 + cl_cpg, old_bg_clusters, 1); 138 129 le16_add_cpu(&group->bg_free_bits_count, -1 * backups); 139 130 } 140 131 ··· 172 163 if (ret < 0) { 173 164 ocfs2_calc_new_backup_super(bm_inode, 174 165 group, 175 - cl_cpg, 0); 166 + cl_cpg, old_bg_clusters, 0); 176 167 le16_add_cpu(&group->bg_free_bits_count, backups); 177 168 le16_add_cpu(&group->bg_bits, -1 * num_bits); 178 169 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
+1
fs/proc/base.c
··· 2494 2494 mm = get_task_mm(task); 2495 2495 if (!mm) 2496 2496 goto out_no_mm; 2497 + ret = 0; 2497 2498 2498 2499 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { 2499 2500 if (val & mask)
+1
include/linux/blkdev.h
··· 797 797 extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); 798 798 extern void blk_queue_exit(struct request_queue *q); 799 799 extern void blk_start_queue(struct request_queue *q); 800 + extern void blk_start_queue_async(struct request_queue *q); 800 801 extern void blk_stop_queue(struct request_queue *q); 801 802 extern void blk_sync_queue(struct request_queue *q); 802 803 extern void __blk_stop_queue(struct request_queue *q);
+4
include/linux/enclosure.h
··· 29 29 /* A few generic types ... taken from ses-2 */ 30 30 enum enclosure_component_type { 31 31 ENCLOSURE_COMPONENT_DEVICE = 0x01, 32 + ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, 33 + ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, 34 + ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, 32 35 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, 36 + ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, 33 37 }; 34 38 35 39 /* ses-2 common element status */
+1
include/linux/mmdebug.h
··· 1 1 #ifndef LINUX_MM_DEBUG_H 2 2 #define LINUX_MM_DEBUG_H 1 3 3 4 + #include <linux/bug.h> 4 5 #include <linux/stringify.h> 5 6 6 7 struct page;
+1
include/linux/usb/cdc_ncm.h
··· 138 138 }; 139 139 140 140 u8 cdc_ncm_select_altsetting(struct usb_interface *intf); 141 + int cdc_ncm_change_mtu(struct net_device *net, int new_mtu); 141 142 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); 142 143 void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 143 144 struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
+3 -3
include/linux/vmstat.h
··· 176 176 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 177 177 178 178 #ifdef CONFIG_SMP 179 - void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 179 + void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); 180 180 void __inc_zone_page_state(struct page *, enum zone_stat_item); 181 181 void __dec_zone_page_state(struct page *, enum zone_stat_item); 182 182 183 - void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 183 + void mod_zone_page_state(struct zone *, enum zone_stat_item, long); 184 184 void inc_zone_page_state(struct page *, enum zone_stat_item); 185 185 void dec_zone_page_state(struct page *, enum zone_stat_item); 186 186 ··· 205 205 * The functions directly modify the zone and global counters. 206 206 */ 207 207 static inline void __mod_zone_page_state(struct zone *zone, 208 - enum zone_stat_item item, int delta) 208 + enum zone_stat_item item, long delta) 209 209 { 210 210 zone_page_state_add(delta, zone, item); 211 211 }
+14
include/xen/interface/io/ring.h
··· 181 181 #define RING_GET_REQUEST(_r, _idx) \ 182 182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 183 183 184 + /* 185 + * Get a local copy of a request. 186 + * 187 + * Use this in preference to RING_GET_REQUEST() so all processing is 188 + * done on a local copy that cannot be modified by the other end. 189 + * 190 + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this 191 + * to be ineffective where _req is a struct which consists of only bitfields. 192 + */ 193 + #define RING_COPY_REQUEST(_r, _idx, _req) do { \ 194 + /* Use volatile to force the copy into _req. */ \ 195 + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ 196 + } while (0) 197 + 184 198 #define RING_GET_RESPONSE(_r, _idx) \ 185 199 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 186 200
+2 -1
lib/rhashtable.c
··· 516 516 return -ENOMEM; 517 517 518 518 spin_lock(&ht->lock); 519 - iter->walker->tbl = rht_dereference(ht->tbl, ht); 519 + iter->walker->tbl = 520 + rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); 520 521 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 521 522 spin_unlock(&ht->lock); 522 523
+46 -14
mm/memcontrol.c
··· 903 903 if (prev && reclaim->generation != iter->generation) 904 904 goto out_unlock; 905 905 906 - do { 906 + while (1) { 907 907 pos = READ_ONCE(iter->position); 908 + if (!pos || css_tryget(&pos->css)) 909 + break; 908 910 /* 909 - * A racing update may change the position and 910 - * put the last reference, hence css_tryget(), 911 - * or retry to see the updated position. 911 + * css reference reached zero, so iter->position will 912 + * be cleared by ->css_released. However, we should not 913 + * rely on this happening soon, because ->css_released 914 + * is called from a work queue, and by busy-waiting we 915 + * might block it. So we clear iter->position right 916 + * away. 912 917 */ 913 - } while (pos && !css_tryget(&pos->css)); 918 + (void)cmpxchg(&iter->position, pos, NULL); 919 + } 914 920 } 915 921 916 922 if (pos) ··· 962 956 } 963 957 964 958 if (reclaim) { 965 - if (cmpxchg(&iter->position, pos, memcg) == pos) { 966 - if (memcg) 967 - css_get(&memcg->css); 968 - if (pos) 969 - css_put(&pos->css); 970 - } 971 - 972 959 /* 973 - * pairs with css_tryget when dereferencing iter->position 974 - * above. 960 + * The position could have already been updated by a competing 961 + * thread, so check that the value hasn't changed since we read 962 + * it to avoid reclaiming from the same cgroup twice. 975 963 */ 964 + (void)cmpxchg(&iter->position, pos, memcg); 965 + 976 966 if (pos) 977 967 css_put(&pos->css); 978 968 ··· 999 997 root = root_mem_cgroup; 1000 998 if (prev && prev != root) 1001 999 css_put(&prev->css); 1000 + } 1001 + 1002 + static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1003 + { 1004 + struct mem_cgroup *memcg = dead_memcg; 1005 + struct mem_cgroup_reclaim_iter *iter; 1006 + struct mem_cgroup_per_zone *mz; 1007 + int nid, zid; 1008 + int i; 1009 + 1010 + while ((memcg = parent_mem_cgroup(memcg))) { 1011 + for_each_node(nid) { 1012 + for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1013 + mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 1014 + for (i = 0; i <= DEF_PRIORITY; i++) { 1015 + iter = &mz->iter[i]; 1016 + cmpxchg(&iter->position, 1017 + dead_memcg, NULL); 1018 + } 1019 + } 1020 + } 1021 + } 1002 1022 } 1003 1023 1004 1024 /* ··· 4348 4324 wb_memcg_offline(memcg); 4349 4325 } 4350 4326 4327 + static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4328 + { 4329 + struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4330 + 4331 + invalidate_reclaim_iterators(memcg); 4332 + } 4333 + 4351 4334 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4352 4335 { 4353 4336 struct mem_cgroup *memcg = mem_cgroup_from_css(css); ··· 5216 5185 .css_alloc = mem_cgroup_css_alloc, 5217 5186 .css_online = mem_cgroup_css_online, 5218 5187 .css_offline = mem_cgroup_css_offline, 5188 + .css_released = mem_cgroup_css_released, 5219 5189 .css_free = mem_cgroup_css_free, 5220 5190 .css_reset = mem_cgroup_css_reset, 5221 5191 .can_attach = mem_cgroup_can_attach,
+19 -12
mm/memory_hotplug.c
··· 1375 1375 */ 1376 1376 int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 1377 1377 { 1378 - unsigned long pfn; 1378 + unsigned long pfn, sec_end_pfn; 1379 1379 struct zone *zone = NULL; 1380 1380 struct page *page; 1381 1381 int i; 1382 - for (pfn = start_pfn; 1382 + for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); 1383 1383 pfn < end_pfn; 1384 - pfn += MAX_ORDER_NR_PAGES) { 1385 - i = 0; 1386 - /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 1387 - while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) 1388 - i++; 1389 - if (i == MAX_ORDER_NR_PAGES) 1384 + pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { 1385 + /* Make sure the memory section is present first */ 1386 + if (!present_section_nr(pfn_to_section_nr(pfn))) 1390 1387 continue; 1391 - page = pfn_to_page(pfn + i); 1392 - if (zone && page_zone(page) != zone) 1393 - return 0; 1394 - zone = page_zone(page); 1388 + for (; pfn < sec_end_pfn && pfn < end_pfn; 1389 + pfn += MAX_ORDER_NR_PAGES) { 1390 + i = 0; 1391 + /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 1392 + while ((i < MAX_ORDER_NR_PAGES) && 1393 + !pfn_valid_within(pfn + i)) 1394 + i++; 1395 + if (i == MAX_ORDER_NR_PAGES) 1396 + continue; 1397 + page = pfn_to_page(pfn + i); 1398 + if (zone && page_zone(page) != zone) 1399 + return 0; 1400 + zone = page_zone(page); 1401 + } 1395 1402 } 1396 1403 return 1; 1397 1404 }
+5 -5
mm/vmstat.c
··· 219 219 * particular counter cannot be updated from interrupt context. 220 220 */ 221 221 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 222 - int delta) 222 + long delta) 223 223 { 224 224 struct per_cpu_pageset __percpu *pcp = zone->pageset; 225 225 s8 __percpu *p = pcp->vm_stat_diff + item; ··· 318 318 * 1 Overstepping half of threshold 319 319 * -1 Overstepping minus half of threshold 320 320 */ 321 - static inline void mod_state(struct zone *zone, 322 - enum zone_stat_item item, int delta, int overstep_mode) 321 + static inline void mod_state(struct zone *zone, enum zone_stat_item item, 322 + long delta, int overstep_mode) 323 323 { 324 324 struct per_cpu_pageset __percpu *pcp = zone->pageset; 325 325 s8 __percpu *p = pcp->vm_stat_diff + item; ··· 357 357 } 358 358 359 359 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 360 - int delta) 360 + long delta) 361 361 { 362 362 mod_state(zone, item, delta, 0); 363 363 } ··· 384 384 * Use interrupt disable to serialize counter updates 385 385 */ 386 386 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 387 - int delta) 387 + long delta) 388 388 { 389 389 unsigned long flags; 390 390
+3 -3
mm/zswap.c
··· 541 541 return last; 542 542 } 543 543 544 + /* type and compressor must be null-terminated */ 544 545 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 545 546 { 546 547 struct zswap_pool *pool; ··· 549 548 assert_spin_locked(&zswap_pools_lock); 550 549 551 550 list_for_each_entry_rcu(pool, &zswap_pools, list) { 552 - if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name))) 551 + if (strcmp(pool->tfm_name, compressor)) 553 552 continue; 554 - if (strncmp(zpool_get_type(pool->zpool), type, 555 - sizeof(zswap_zpool_type))) 553 + if (strcmp(zpool_get_type(pool->zpool), type)) 556 554 continue; 557 555 /* if we can't get it, it's about to be destroyed */ 558 556 if (!zswap_pool_get(pool))
+1 -1
net/bridge/br_stp_if.c
··· 40 40 .orig_dev = p->dev, 41 41 .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, 42 42 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER, 43 - .u.ageing_time = p->br->ageing_time, 43 + .u.ageing_time = jiffies_to_clock_t(p->br->ageing_time), 44 44 }; 45 45 int err; 46 46
-3
net/ipv4/ipip.c
··· 252 252 253 253 p.i_key = p.o_key = 0; 254 254 p.i_flags = p.o_flags = 0; 255 - if (p.iph.ttl) 256 - p.iph.frag_off |= htons(IP_DF); 257 - 258 255 err = ip_tunnel_ioctl(dev, &p, cmd); 259 256 if (err) 260 257 return err;
+37 -9
net/ipv4/xfrm4_policy.c
··· 259 259 xfrm_dst_ifdown(dst, dev); 260 260 } 261 261 262 - static struct dst_ops xfrm4_dst_ops = { 262 + static struct dst_ops xfrm4_dst_ops_template = { 263 263 .family = AF_INET, 264 264 .gc = xfrm4_garbage_collect, 265 265 .update_pmtu = xfrm4_update_pmtu, ··· 273 273 274 274 static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { 275 275 .family = AF_INET, 276 - .dst_ops = &xfrm4_dst_ops, 276 + .dst_ops = &xfrm4_dst_ops_template, 277 277 .dst_lookup = xfrm4_dst_lookup, 278 278 .get_saddr = xfrm4_get_saddr, 279 279 .decode_session = _decode_session4, ··· 295 295 { } 296 296 }; 297 297 298 - static int __net_init xfrm4_net_init(struct net *net) 298 + static int __net_init xfrm4_net_sysctl_init(struct net *net) 299 299 { 300 300 struct ctl_table *table; 301 301 struct ctl_table_header *hdr; ··· 323 323 return -ENOMEM; 324 324 } 325 325 326 - static void __net_exit xfrm4_net_exit(struct net *net) 326 + static void __net_exit xfrm4_net_sysctl_exit(struct net *net) 327 327 { 328 328 struct ctl_table *table; 329 329 ··· 335 335 if (!net_eq(net, &init_net)) 336 336 kfree(table); 337 337 } 338 + #else /* CONFIG_SYSCTL */ 339 + static int inline xfrm4_net_sysctl_init(struct net *net) 340 + { 341 + return 0; 342 + } 343 + 344 + static void inline xfrm4_net_sysctl_exit(struct net *net) 345 + { 346 + } 347 + #endif 348 + 349 + static int __net_init xfrm4_net_init(struct net *net) 350 + { 351 + int ret; 352 + 353 + memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template, 354 + sizeof(xfrm4_dst_ops_template)); 355 + ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops); 356 + if (ret) 357 + return ret; 358 + 359 + ret = xfrm4_net_sysctl_init(net); 360 + if (ret) 361 + dst_entries_destroy(&net->xfrm.xfrm4_dst_ops); 362 + 363 + return ret; 364 + } 365 + 366 + static void __net_exit xfrm4_net_exit(struct net *net) 367 + { 368 + xfrm4_net_sysctl_exit(net); 369 + dst_entries_destroy(&net->xfrm.xfrm4_dst_ops); 370 + } 338 371 339 372 static struct pernet_operations __net_initdata xfrm4_net_ops = { 340 373 .init = xfrm4_net_init, 341 374 .exit = xfrm4_net_exit, 342 375 }; 343 - #endif 344 376 345 377 static void __init xfrm4_policy_init(void) 346 378 { ··· 381 349 382 350 void __init xfrm4_init(void) 383 351 { 384 - dst_entries_init(&xfrm4_dst_ops); 385 - 386 352 xfrm4_state_init(); 387 353 xfrm4_policy_init(); 388 354 xfrm4_protocol_init(); 389 - #ifdef CONFIG_SYSCTL 390 355 register_pernet_subsys(&xfrm4_net_ops); 391 - #endif 392 356 } 393 357
+4 -7
net/ipv6/addrconf.c
··· 5415 5415 goto out; 5416 5416 } 5417 5417 5418 - if (!write) { 5419 - err = snprintf(str, sizeof(str), "%pI6", 5420 - &secret->secret); 5421 - if (err >= sizeof(str)) { 5422 - err = -EIO; 5423 - goto out; 5424 - } 5418 + err = snprintf(str, sizeof(str), "%pI6", &secret->secret); 5419 + if (err >= sizeof(str)) { 5420 + err = -EIO; 5421 + goto out; 5425 5422 } 5426 5423 5427 5424 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
+1 -1
net/ipv6/addrlabel.c
··· 552 552 553 553 rcu_read_lock(); 554 554 p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index); 555 - if (p && ip6addrlbl_hold(p)) 555 + if (p && !ip6addrlbl_hold(p)) 556 556 p = NULL; 557 557 lseq = ip6addrlbl_table.seq; 558 558 rcu_read_unlock();
+2 -2
net/ipv6/ndisc.c
··· 1183 1183 */ 1184 1184 if (!in6_dev->cnf.accept_ra_from_local && 1185 1185 ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, 1186 - NULL, 0)) { 1186 + in6_dev->dev, 0)) { 1187 1187 ND_PRINTK(2, info, 1188 1188 "RA from local address detected on dev: %s: default router ignored\n", 1189 1189 skb->dev->name); ··· 1337 1337 #ifdef CONFIG_IPV6_ROUTE_INFO 1338 1338 if (!in6_dev->cnf.accept_ra_from_local && 1339 1339 ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, 1340 - NULL, 0)) { 1340 + in6_dev->dev, 0)) { 1341 1341 ND_PRINTK(2, info, 1342 1342 "RA from local address detected on dev: %s: router info ignored.\n", 1343 1343 skb->dev->name);
+38 -15
net/ipv6/xfrm6_policy.c
··· 279 279 xfrm_dst_ifdown(dst, dev); 280 280 } 281 281 282 - static struct dst_ops xfrm6_dst_ops = { 282 + static struct dst_ops xfrm6_dst_ops_template = { 283 283 .family = AF_INET6, 284 284 .gc = xfrm6_garbage_collect, 285 285 .update_pmtu = xfrm6_update_pmtu, ··· 293 293 294 294 static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { 295 295 .family = AF_INET6, 296 - .dst_ops = &xfrm6_dst_ops, 296 + .dst_ops = &xfrm6_dst_ops_template, 297 297 .dst_lookup = xfrm6_dst_lookup, 298 298 .get_saddr = xfrm6_get_saddr, 299 299 .decode_session = _decode_session6, ··· 325 325 { } 326 326 }; 327 327 328 - static int __net_init xfrm6_net_init(struct net *net) 328 + static int __net_init xfrm6_net_sysctl_init(struct net *net) 329 329 { 330 330 struct ctl_table *table; 331 331 struct ctl_table_header *hdr; ··· 353 353 return -ENOMEM; 354 354 } 355 355 356 - static void __net_exit xfrm6_net_exit(struct net *net) 356 + static void __net_exit xfrm6_net_sysctl_exit(struct net *net) 357 357 { 358 358 struct ctl_table *table; 359 359 ··· 365 365 if (!net_eq(net, &init_net)) 366 366 kfree(table); 367 367 } 368 + #else /* CONFIG_SYSCTL */ 369 + static int inline xfrm6_net_sysctl_init(struct net *net) 370 + { 371 + return 0; 372 + } 373 + 374 + static void inline xfrm6_net_sysctl_exit(struct net *net) 375 + { 376 + } 377 + #endif 378 + 379 + static int __net_init xfrm6_net_init(struct net *net) 380 + { 381 + int ret; 382 + 383 + memcpy(&net->xfrm.xfrm6_dst_ops, &xfrm6_dst_ops_template, 384 + sizeof(xfrm6_dst_ops_template)); 385 + ret = dst_entries_init(&net->xfrm.xfrm6_dst_ops); 386 + if (ret) 387 + return ret; 388 + 389 + ret = xfrm6_net_sysctl_init(net); 390 + if (ret) 391 + dst_entries_destroy(&net->xfrm.xfrm6_dst_ops); 392 + 393 + return ret; 394 + } 395 + 396 + static void __net_exit xfrm6_net_exit(struct net *net) 397 + { 398 + xfrm6_net_sysctl_exit(net); 399 + dst_entries_destroy(&net->xfrm.xfrm6_dst_ops); 400 + } 368 401 369 402 static struct pernet_operations xfrm6_net_ops = { 370 403 .init = xfrm6_net_init, 371 404 .exit = xfrm6_net_exit, 372 405 }; 373 - #endif 374 406 375 407 int __init xfrm6_init(void) 376 408 { 377 409 int ret; 378 410 379 - dst_entries_init(&xfrm6_dst_ops); 380 - 381 411 ret = xfrm6_policy_init(); 382 - if (ret) { 383 - dst_entries_destroy(&xfrm6_dst_ops); 412 + if (ret) 384 413 goto out; 385 - } 386 414 ret = xfrm6_state_init(); 387 415 if (ret) 388 416 goto out_policy; ··· 419 391 if (ret) 420 392 goto out_state; 421 393 422 - #ifdef CONFIG_SYSCTL 423 394 register_pernet_subsys(&xfrm6_net_ops); 424 - #endif 425 395 out: 426 396 return ret; 427 397 out_state: ··· 431 405 432 406 void xfrm6_fini(void) 433 407 { 434 - #ifdef CONFIG_SYSCTL 435 408 unregister_pernet_subsys(&xfrm6_net_ops); 436 - #endif 437 409 xfrm6_protocol_fini(); 438 410 xfrm6_policy_fini(); 439 411 xfrm6_state_fini(); 440 - dst_entries_destroy(&xfrm6_dst_ops); 441 412 }
+1 -1
net/netfilter/nf_tables_netdev.c
··· 94 94 { 95 95 struct nft_pktinfo pkt; 96 96 97 - switch (eth_hdr(skb)->h_proto) { 97 + switch (skb->protocol) { 98 98 case htons(ETH_P_IP): 99 99 nft_netdev_set_pktinfo_ipv4(&pkt, skb, state); 100 100 break;
+1
net/netfilter/nft_ct.c
··· 366 366 goto nla_put_failure; 367 367 368 368 switch (priv->key) { 369 + case NFT_CT_L3PROTOCOL: 369 370 case NFT_CT_PROTOCOL: 370 371 case NFT_CT_SRC: 371 372 case NFT_CT_DST:
+4 -2
net/openvswitch/conntrack.c
··· 683 683 OVS_NLERR(log, "Failed to allocate conntrack template"); 684 684 return -ENOMEM; 685 685 } 686 + 687 + __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status); 688 + nf_conntrack_get(&ct_info.ct->ct_general); 689 + 686 690 if (helper) { 687 691 err = ovs_ct_add_helper(&ct_info, helper, key, log); 688 692 if (err) ··· 698 694 if (err) 699 695 goto err_free_ct; 700 696 701 - __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status); 702 - nf_conntrack_get(&ct_info.ct->ct_general); 703 697 return 0; 704 698 err_free_ct: 705 699 __ovs_ct_free_action(&ct_info);
+4 -1
net/openvswitch/flow_netlink.c
··· 2434 2434 if (!start) 2435 2435 return -EMSGSIZE; 2436 2436 2437 - err = ovs_nla_put_tunnel_info(skb, tun_info); 2437 + err = ip_tun_to_nlattr(skb, &tun_info->key, 2438 + ip_tunnel_info_opts(tun_info), 2439 + tun_info->options_len, 2440 + ip_tunnel_info_af(tun_info)); 2438 2441 if (err) 2439 2442 return err; 2440 2443 nla_nest_end(skb, start);
+4 -2
net/sctp/sm_statefuns.c
··· 4829 4829 4830 4830 retval = SCTP_DISPOSITION_CONSUME; 4831 4831 4832 - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4832 + if (abort) 4833 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4833 4834 4834 4835 /* Even if we can't send the ABORT due to low memory delete the 4835 4836 * TCB. This is a departure from our typical NOMEM handling. ··· 4967 4966 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 4968 4967 retval = SCTP_DISPOSITION_CONSUME; 4969 4968 4970 - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4969 + if (abort) 4970 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4971 4971 4972 4972 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4973 4973 SCTP_STATE(SCTP_STATE_CLOSED));
+9 -5
net/sctp/socket.c
··· 1301 1301 int addrs_size, 1302 1302 sctp_assoc_t *assoc_id) 1303 1303 { 1304 - int err = 0; 1305 1304 struct sockaddr *kaddrs; 1305 + gfp_t gfp = GFP_KERNEL; 1306 + int err = 0; 1306 1307 1307 1308 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1308 1309 __func__, sk, addrs, addrs_size); ··· 1316 1315 return -EFAULT; 1317 1316 1318 1317 /* Alloc space for the address array in kernel memory. */ 1319 - kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1318 + if (sk->sk_socket->file) 1319 + gfp = GFP_USER | __GFP_NOWARN; 1320 + kaddrs = kmalloc(addrs_size, gfp); 1320 1321 if (unlikely(!kaddrs)) 1321 1322 return -ENOMEM; 1322 1323 ··· 1516 1513 struct sctp_chunk *chunk; 1517 1514 1518 1515 chunk = sctp_make_abort_user(asoc, NULL, 0); 1519 - if (chunk) 1520 - sctp_primitive_ABORT(net, asoc, chunk); 1516 + sctp_primitive_ABORT(net, asoc, chunk); 1521 1517 } else 1522 1518 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1523 1519 } ··· 5775 5773 5776 5774 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5777 5775 5778 - ids = kmalloc(len, GFP_KERNEL); 5776 + ids = kmalloc(len, GFP_USER | __GFP_NOWARN); 5779 5777 if (unlikely(!ids)) 5780 5778 return -ENOMEM; 5781 5779 ··· 7201 7199 7202 7200 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 7203 7201 net_enable_timestamp(); 7202 + 7203 + security_sk_clone(sk, newsk); 7204 7204 } 7205 7205 7206 7206 static inline void sctp_copy_descendant(struct sock *sk_to,
+1
net/socket.c
··· 257 257 } 258 258 init_waitqueue_head(&wq->wait); 259 259 wq->fasync_list = NULL; 260 + wq->flags = 0; 260 261 RCU_INIT_POINTER(ei->socket.wq, wq); 261 262 262 263 ei->socket.state = SS_UNCONNECTED;
-38
net/xfrm/xfrm_policy.c
··· 2826 2826 2827 2827 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2828 2828 { 2829 - struct net *net; 2830 2829 int err = 0; 2831 2830 if (unlikely(afinfo == NULL)) 2832 2831 return -EINVAL; ··· 2855 2856 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); 2856 2857 } 2857 2858 spin_unlock(&xfrm_policy_afinfo_lock); 2858 - 2859 - rtnl_lock(); 2860 - for_each_net(net) { 2861 - struct dst_ops *xfrm_dst_ops; 2862 - 2863 - switch (afinfo->family) { 2864 - case AF_INET: 2865 - xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2866 - break; 2867 - #if IS_ENABLED(CONFIG_IPV6) 2868 - case AF_INET6: 2869 - xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2870 - break; 2871 - #endif 2872 - default: 2873 - BUG(); 2874 - } 2875 - *xfrm_dst_ops = *afinfo->dst_ops; 2876 - } 2877 - rtnl_unlock(); 2878 2859 2879 2860 return err; 2880 2861 } ··· 2890 2911 return err; 2891 2912 } 2892 2913 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2893 - 2894 - static void __net_init xfrm_dst_ops_init(struct net *net) 2895 - { 2896 - struct xfrm_policy_afinfo *afinfo; 2897 - 2898 - rcu_read_lock(); 2899 - afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]); 2900 - if (afinfo) 2901 - net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2902 - #if IS_ENABLED(CONFIG_IPV6) 2903 - afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]); 2904 - if (afinfo) 2905 - net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2906 - #endif 2907 - rcu_read_unlock(); 2908 - } 2909 2914 2910 2915 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2911 2916 { ··· 3039 3076 rv = xfrm_policy_init(net); 3040 3077 if (rv < 0) 3041 3078 goto out_policy; 3042 - xfrm_dst_ops_init(net); 3043 3079 rv = xfrm_sysctl_init(net); 3044 3080 if (rv < 0) 3045 3081 goto out_sysctl;
+113 -24
scripts/recordmcount.c
··· 48 48 49 49 static int fd_map; /* File descriptor for file being modified. */ 50 50 static int mmap_failed; /* Boolean flag. */ 51 - static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ 52 51 static char gpfx; /* prefix for global symbol name (sometimes '_') */ 53 52 static struct stat sb; /* Remember .st_size, etc. */ 54 53 static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ 55 54 static const char *altmcount; /* alternate mcount symbol name */ 56 55 static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ 56 + static void *file_map; /* pointer of the mapped file */ 57 + static void *file_end; /* pointer to the end of the mapped file */ 58 + static int file_updated; /* flag to state file was changed */ 59 + static void *file_ptr; /* current file pointer location */ 60 + static void *file_append; /* added to the end of the file */ 61 + static size_t file_append_size; /* how much is added to end of file */ 57 62 58 63 /* setjmp() return values */ 59 64 enum { ··· 72 67 cleanup(void) 73 68 { 74 69 if (!mmap_failed) 75 - munmap(ehdr_curr, sb.st_size); 70 + munmap(file_map, sb.st_size); 76 71 else 77 - free(ehdr_curr); 78 - close(fd_map); 72 + free(file_map); 73 + file_map = NULL; 74 + free(file_append); 75 + file_append = NULL; 76 + file_append_size = 0; 77 + file_updated = 0; 79 78 } 80 79 81 80 static void __attribute__((noreturn)) ··· 101 92 static off_t 102 93 ulseek(int const fd, off_t const offset, int const whence) 103 94 { 104 - off_t const w = lseek(fd, offset, whence); 105 - if (w == (off_t)-1) { 106 - perror("lseek"); 95 + switch (whence) { 96 + case SEEK_SET: 97 + file_ptr = file_map + offset; 98 + break; 99 + case SEEK_CUR: 100 + file_ptr += offset; 101 + break; 102 + case SEEK_END: 103 + file_ptr = file_map + (sb.st_size - offset); 104 + break; 105 + } 106 + if (file_ptr < file_map) { 107 + fprintf(stderr, "lseek: seek before file\n"); 107 108 fail_file(); 108 109 } 109 - return w; 110 + return file_ptr - file_map; 110 111 } 111 112 112 113 static size_t ··· 133 114 static size_t 134 115 uwrite(int const fd, void const *const buf, size_t const count) 135 116 { 136 - size_t const n = write(fd, buf, count); 137 - if (n != count) { 138 - perror("write"); 139 - fail_file(); 117 + size_t cnt = count; 118 + off_t idx = 0; 119 + 120 + file_updated = 1; 121 + 122 + if (file_ptr + count >= file_end) { 123 + off_t aoffset = (file_ptr + count) - file_end; 124 + 125 + if (aoffset > file_append_size) { 126 + file_append = realloc(file_append, aoffset); 127 + file_append_size = aoffset; 128 + } 129 + if (!file_append) { 130 + perror("write"); 131 + fail_file(); 132 + } 133 + if (file_ptr < file_end) { 134 + cnt = file_end - file_ptr; 135 + } else { 136 + cnt = 0; 137 + idx = aoffset - count; 138 + } 140 139 } 141 - return n; 140 + 141 + if (cnt) 142 + memcpy(file_ptr, buf, cnt); 143 + 144 + if (cnt < count) 145 + memcpy(file_append + idx, buf + cnt, count - cnt); 146 + 147 + file_ptr += count; 148 + return count; 142 149 } 143 150 144 151 static void * ··· 237 192 */ 238 193 static void *mmap_file(char const *fname) 239 194 { 240 - void *addr; 241 - 242 - fd_map = open(fname, O_RDWR); 195 + fd_map = open(fname, O_RDONLY); 243 196 if (fd_map < 0 || fstat(fd_map, &sb) < 0) { 244 197 perror(fname); 245 198 fail_file(); ··· 246 203 fprintf(stderr, "not a regular file: %s\n", fname); 247 204 fail_file(); 248 205 } 249 - addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, 250 - fd_map, 0); 206 + file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, 207 + fd_map, 0); 251 208 mmap_failed = 0; 252 - if (addr == MAP_FAILED) { 209 + if (file_map == MAP_FAILED) { 253 210 mmap_failed = 1; 254 - addr = umalloc(sb.st_size); 255 - uread(fd_map, addr, sb.st_size); 211 + file_map = umalloc(sb.st_size); 212 + uread(fd_map, file_map, sb.st_size); 256 213 } 257 - return addr; 214 + close(fd_map); 215 + 216 + file_end = file_map + sb.st_size; 217 + 218 + return file_map; 219 + } 220 + 221 + static void write_file(const char *fname) 222 + { 223 + char tmp_file[strlen(fname) + 4]; 224 + size_t n; 225 + 226 + if (!file_updated) 227 + return; 228 + 229 + sprintf(tmp_file, "%s.rc", fname); 230 + 231 + /* 232 + * After reading the entire file into memory, delete it 233 + * and write it back, to prevent weird side effects of modifying 234 + * an object file in place. 235 + */ 236 + fd_map = open(tmp_file, O_WRONLY | O_TRUNC | O_CREAT, sb.st_mode); 237 + if (fd_map < 0) { 238 + perror(fname); 239 + fail_file(); 240 + } 241 + n = write(fd_map, file_map, sb.st_size); 242 + if (n != sb.st_size) { 243 + perror("write"); 244 + fail_file(); 245 + } 246 + if (file_append_size) { 247 + n = write(fd_map, file_append, file_append_size); 248 + if (n != file_append_size) { 249 + perror("write"); 250 + fail_file(); 251 + } 252 + } 253 + close(fd_map); 254 + if (rename(tmp_file, fname) < 0) { 255 + perror(fname); 256 + fail_file(); 257 + } 258 258 } 259 259 260 260 /* w8rev, w8nat, ...: Handle endianness. */ ··· 404 318 Elf32_Ehdr *const ehdr = mmap_file(fname); 405 319 unsigned int reltype = 0; 406 320 407 - ehdr_curr = ehdr; 408 321 w = w4nat; 409 322 w2 = w2nat; 410 323 w8 = w8nat; ··· 526 441 } 527 442 } /* end switch */ 528 443 444 + write_file(fname); 529 445 cleanup(); 530 446 } 531 447 ··· 579 493 case SJ_SETJMP: /* normal sequence */ 580 494 /* Avoid problems if early cleanup() */ 581 495 fd_map = -1; 582 - ehdr_curr = NULL; 583 496 mmap_failed = 1; 497 + file_map = NULL; 498 + file_ptr = NULL; 499 + file_updated = 0; 584 500 do_file(file); 585 501 break; 586 502 case SJ_FAIL: /* error in do_file or below */ 503 + sprintf("%s: failed\n", file); 587 504 ++n_error; 588 505 break; 589 506 case SJ_SUCCEED: /* premature success */
+9 -9
security/keys/keyctl.c
··· 751 751 752 752 /* the key is probably readable - now try to read it */ 753 753 can_read_key: 754 - ret = key_validate(key); 755 - if (ret == 0) { 756 - ret = -EOPNOTSUPP; 757 - if (key->type->read) { 758 - /* read the data with the semaphore held (since we 759 - * might sleep) */ 760 - down_read(&key->sem); 754 + ret = -EOPNOTSUPP; 755 + if (key->type->read) { 756 + /* Read the data with the semaphore held (since we might sleep) 757 + * to protect against the key being updated or revoked. 758 + */ 759 + down_read(&key->sem); 760 + ret = key_validate(key); 761 + if (ret == 0) 761 762 ret = key->type->read(key, buffer, buflen); 762 - up_read(&key->sem); 763 - } 763 + up_read(&key->sem); 764 764 } 765 765 766 766 error2:
+34
sound/pci/hda/hda_intel.c
··· 954 954 } 955 955 #endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */ 956 956 957 + #ifdef CONFIG_PM_SLEEP 958 + /* put codec down to D3 at hibernation for Intel SKL+; 959 + * otherwise BIOS may still access the codec and screw up the driver 960 + */ 961 + #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) 962 + #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) 963 + #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 964 + #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) 965 + 966 + static int azx_freeze_noirq(struct device *dev) 967 + { 968 + struct pci_dev *pci = to_pci_dev(dev); 969 + 970 + if (IS_SKL_PLUS(pci)) 971 + pci_set_power_state(pci, PCI_D3hot); 972 + 973 + return 0; 974 + } 975 + 976 + static int azx_thaw_noirq(struct device *dev) 977 + { 978 + struct pci_dev *pci = to_pci_dev(dev); 979 + 980 + if (IS_SKL_PLUS(pci)) 981 + pci_set_power_state(pci, PCI_D0); 982 + 983 + return 0; 984 + } 985 + #endif /* CONFIG_PM_SLEEP */ 986 + 957 987 #ifdef CONFIG_PM 958 988 static int azx_runtime_suspend(struct device *dev) 959 989 { ··· 1093 1063 1094 1064 static const struct dev_pm_ops azx_pm = { 1095 1065 SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) 1066 + #ifdef CONFIG_PM_SLEEP 1067 + .freeze_noirq = azx_freeze_noirq, 1068 + .thaw_noirq = azx_thaw_noirq, 1069 + #endif 1096 1070 SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) 1097 1071 }; 1098 1072
+46 -21
sound/pci/hda/patch_realtek.c
··· 111 111 void (*power_hook)(struct hda_codec *codec); 112 112 #endif 113 113 void (*shutup)(struct hda_codec *codec); 114 + void (*reboot_notify)(struct hda_codec *codec); 114 115 115 116 int init_amp; 116 117 int codec_variant; /* flag for other variants */ ··· 774 773 snd_hda_shutup_pins(codec); 775 774 } 776 775 776 + static void alc_reboot_notify(struct hda_codec *codec) 777 + { 778 + struct alc_spec *spec = codec->spec; 779 + 780 + if (spec && spec->reboot_notify) 781 + spec->reboot_notify(codec); 782 + else 783 + alc_shutup(codec); 784 + } 785 + 786 + /* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */ 787 + static void alc_d3_at_reboot(struct hda_codec *codec) 788 + { 789 + snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); 790 + snd_hda_codec_write(codec, codec->core.afg, 0, 791 + AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 792 + msleep(10); 793 + } 794 + 777 795 #define alc_free snd_hda_gen_free 778 796 779 797 #ifdef CONFIG_PM ··· 838 818 .suspend = alc_suspend, 839 819 .check_power_status = snd_hda_gen_check_power_status, 840 820 #endif 841 - .reboot_notify = alc_shutup, 821 + .reboot_notify = alc_reboot_notify, 842 822 }; 843 823 844 824 ··· 1775 1755 ALC889_FIXUP_MBA11_VREF, 1776 1756 ALC889_FIXUP_MBA21_VREF, 1777 1757 ALC889_FIXUP_MP11_VREF, 1758 + ALC889_FIXUP_MP41_VREF, 1778 1759 ALC882_FIXUP_INV_DMIC, 1779 1760 ALC882_FIXUP_NO_PRIMARY_HP, 1780 1761 ALC887_FIXUP_ASUS_BASS, ··· 1864 1843 const struct hda_fixup *fix, int action) 1865 1844 { 1866 1845 struct alc_spec *spec = codec->spec; 1867 - static hda_nid_t nids[2] = { 0x14, 0x15 }; 1846 + static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 }; 1868 1847 int i; 1869 1848 1870 1849 if (action != HDA_FIXUP_ACT_INIT) ··· 2154 2133 .chained = true, 2155 2134 .chain_id = ALC885_FIXUP_MACPRO_GPIO, 2156 2135 }, 2136 + [ALC889_FIXUP_MP41_VREF] = { 2137 + .type = HDA_FIXUP_FUNC, 2138 + .v.func = alc889_fixup_mbp_vref, 2139 + .chained = true, 2140 + .chain_id = ALC885_FIXUP_MACPRO_GPIO, 2141 + }, 2157 2142 [ALC882_FIXUP_INV_DMIC] = { 2158 2143 .type = HDA_FIXUP_FUNC, 2159 2144 .v.func = alc_fixup_inv_dmic, ··· 2242 2215 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF), 2243 2216 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), 2244 2217 SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), 2245 - SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), 2218 + SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF), 2246 2219 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), 2247 2220 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), 2248 2221 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), ··· 4225 4198 struct alc_spec *spec = codec->spec; 4226 4199 4227 4200 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4201 + spec->shutup = alc_no_shutup; /* reduce click noise */ 4202 + spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ 4228 4203 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 4229 4204 codec->power_save_node = 0; /* avoid click noises */ 4230 4205 snd_hda_apply_pincfgs(codec, pincfgs); 4231 - } 4232 - } 4233 - 4234 - /* additional fixup for Thinkpad T440s noise problem */ 4235 - static void alc_fixup_tpt440(struct hda_codec *codec, 4236 - const struct hda_fixup *fix, int action) 4237 - { 4238 - struct alc_spec *spec = codec->spec; 4239 - 4240 - if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4241 - spec->shutup = alc_no_shutup; /* reduce click noise */ 4242 - spec->gen.mixer_nid = 0; /* reduce background noise */ 4243 4206 } 4244 4207 } 4245 4208 ··· 4623 4606 ALC288_FIXUP_DISABLE_AAMIX, 4624 4607 ALC292_FIXUP_DELL_E7X, 4625 4608 ALC292_FIXUP_DISABLE_AAMIX, 4609 + ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, 4626 4610 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4627 4611 ALC275_FIXUP_DELL_XPS, 4628 4612 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, ··· 5084 5066 }, 5085 5067 [ALC292_FIXUP_TPT440] = { 5086 5068 .type = HDA_FIXUP_FUNC, 5087 - .v.func = alc_fixup_tpt440, 5069 + .v.func = alc_fixup_disable_aamix, 5088 5070 .chained = true, 5089 5071 .chain_id = ALC292_FIXUP_TPT440_DOCK, 5090 5072 }, ··· 5187 5169 .chained = true, 5188 5170 .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE 5189 5171 }, 5172 + [ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK] = { 5173 + .type = HDA_FIXUP_FUNC, 5174 + .v.func = alc_fixup_disable_aamix, 5175 + .chained = true, 5176 + .chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE 5177 + }, 5190 5178 [ALC292_FIXUP_DELL_E7X] = { 5191 5179 .type = HDA_FIXUP_FUNC, 5192 5180 .v.func = alc_fixup_dell_xps13, ··· 5271 5247 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5272 5248 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5273 5249 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5274 - SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5275 - SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5276 - SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5277 - SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5278 - SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5250 + SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5251 + SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5252 + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5253 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5254 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5279 5255 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5280 5256 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5281 5257 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), ··· 5382 5358 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5383 5359 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5384 5360 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5361 + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), 5385 5362 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 5386 5363 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5387 5364 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+17 -8
sound/soc/codecs/es8328.c
··· 85 85 static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); 86 86 static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); 87 87 88 - static const int deemph_settings[] = { 0, 32000, 44100, 48000 }; 88 + static const struct { 89 + int rate; 90 + unsigned int val; 91 + } deemph_settings[] = { 92 + { 0, ES8328_DACCONTROL6_DEEMPH_OFF }, 93 + { 32000, ES8328_DACCONTROL6_DEEMPH_32k }, 94 + { 44100, ES8328_DACCONTROL6_DEEMPH_44_1k }, 95 + { 48000, ES8328_DACCONTROL6_DEEMPH_48k }, 96 + }; 89 97 90 98 static int es8328_set_deemph(struct snd_soc_codec *codec) 91 99 { ··· 105 97 * rate. 106 98 */ 107 99 if (es8328->deemph) { 108 - best = 1; 109 - for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) { 110 - if (abs(deemph_settings[i] - es8328->playback_fs) < 111 - abs(deemph_settings[best] - es8328->playback_fs)) 100 + best = 0; 101 + for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) { 102 + if (abs(deemph_settings[i].rate - es8328->playback_fs) < 103 + abs(deemph_settings[best].rate - es8328->playback_fs)) 112 104 best = i; 113 105 } 114 106 115 - val = best << 1; 107 + val = deemph_settings[best].val; 116 108 } else { 117 - val = 0; 109 + val = ES8328_DACCONTROL6_DEEMPH_OFF; 118 110 } 119 111 120 112 dev_dbg(codec->dev, "Set deemphasis %d\n", val); 121 113 122 - return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val); 114 + return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 115 + ES8328_DACCONTROL6_DEEMPH_MASK, val); 123 116 } 124 117 125 118 static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
+1
sound/soc/codecs/es8328.h
··· 153 153 #define ES8328_DACCONTROL6_CLICKFREE (1 << 3) 154 154 #define ES8328_DACCONTROL6_DAC_INVR (1 << 4) 155 155 #define ES8328_DACCONTROL6_DAC_INVL (1 << 5) 156 + #define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6) 156 157 #define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6) 157 158 #define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6) 158 159 #define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6)
+1
sound/soc/codecs/sgtl5000.c
··· 189 189 case SND_SOC_DAPM_POST_PMU: 190 190 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, 191 191 SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); 192 + msleep(400); 192 193 break; 193 194 194 195 case SND_SOC_DAPM_PRE_PMD:
+1
sound/soc/codecs/wm8974.c
··· 574 574 .max_register = WM8974_MONOMIX, 575 575 .reg_defaults = wm8974_reg_defaults, 576 576 .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults), 577 + .cache_type = REGCACHE_FLAT, 577 578 }; 578 579 579 580 static int wm8974_probe(struct snd_soc_codec *codec)
+2 -2
sound/soc/davinci/davinci-mcasp.c
··· 223 223 224 224 /* wait for XDATA to be cleared */ 225 225 cnt = 0; 226 - while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & 227 - ~XRDATA) && (cnt < 100000)) 226 + while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) && 227 + (cnt < 100000)) 228 228 cnt++; 229 229 230 230 /* Release TX state machine */
+18
sound/soc/fsl/fsl_sai.c
··· 505 505 FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); 506 506 regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 507 507 FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); 508 + 509 + /* 510 + * For sai master mode, after several open/close sai, 511 + * there will be no frame clock, and can't recover 512 + * anymore. Add software reset to fix this issue. 513 + * This is a hardware bug, and will be fix in the 514 + * next sai version. 515 + */ 516 + if (!sai->is_slave_mode) { 517 + /* Software Reset for both Tx and Rx */ 518 + regmap_write(sai->regmap, 519 + FSL_SAI_TCSR, FSL_SAI_CSR_SR); 520 + regmap_write(sai->regmap, 521 + FSL_SAI_RCSR, FSL_SAI_CSR_SR); 522 + /* Clear SR bit to finish the reset */ 523 + regmap_write(sai->regmap, FSL_SAI_TCSR, 0); 524 + regmap_write(sai->regmap, FSL_SAI_RCSR, 0); 525 + } 508 526 } 509 527 break; 510 528 default:
+4 -2
sound/soc/rockchip/rockchip_spdif.c
··· 152 152 case SNDRV_PCM_TRIGGER_RESUME: 153 153 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 154 154 ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR, 155 - SPDIF_DMACR_TDE_ENABLE, 156 - SPDIF_DMACR_TDE_ENABLE); 155 + SPDIF_DMACR_TDE_ENABLE | 156 + SPDIF_DMACR_TDL_MASK, 157 + SPDIF_DMACR_TDE_ENABLE | 158 + SPDIF_DMACR_TDL(16)); 157 159 158 160 if (ret != 0) 159 161 return ret;
+1 -1
sound/soc/rockchip/rockchip_spdif.h
··· 42 42 43 43 #define SPDIF_DMACR_TDL_SHIFT 0 44 44 #define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT) 45 - #define SPDIF_DMACR_TDL_MASK (0x1f << SDPIF_DMACR_TDL_SHIFT) 45 + #define SPDIF_DMACR_TDL_MASK (0x1f << SPDIF_DMACR_TDL_SHIFT) 46 46 47 47 /* 48 48 * XFER
+2
sound/usb/mixer.c
··· 1354 1354 } 1355 1355 } 1356 1356 1357 + snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl); 1358 + 1357 1359 range = (cval->max - cval->min) / cval->res; 1358 1360 /* 1359 1361 * Are there devices with volume range more than 255? I use a bit more
-12
sound/usb/mixer_maps.c
··· 348 348 { 0 } /* terminator */ 349 349 }; 350 350 351 - /* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */ 352 - static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000}; 353 - static struct usbmix_name_map dragonfly_1_2_map[] = { 354 - { 7, NULL, .dB = &dragonfly_1_2_dB }, 355 - { 0 } /* terminator */ 356 - }; 357 - 358 351 /* 359 352 * Control map entries 360 353 */ ··· 462 469 /* Bose Companion 5 */ 463 470 .id = USB_ID(0x05a7, 0x1020), 464 471 .map = bose_companion5_map, 465 - }, 466 - { 467 - /* Dragonfly DAC 1.2 */ 468 - .id = USB_ID(0x21b4, 0x0081), 469 - .map = dragonfly_1_2_map, 470 472 }, 471 473 { 0 } /* terminator */ 472 474 };
+37
sound/usb/mixer_quirks.c
··· 37 37 #include <sound/control.h> 38 38 #include <sound/hwdep.h> 39 39 #include <sound/info.h> 40 + #include <sound/tlv.h> 40 41 41 42 #include "usbaudio.h" 42 43 #include "mixer.h" ··· 1822 1821 break; 1823 1822 default: 1824 1823 usb_audio_dbg(mixer->chip, "memory change in unknown unit %d\n", unitid); 1824 + break; 1825 + } 1826 + } 1827 + 1828 + static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer, 1829 + struct snd_kcontrol *kctl) 1830 + { 1831 + /* Approximation using 10 ranges based on output measurement on hw v1.2. 1832 + * This seems close to the cubic mapping e.g. alsamixer uses. */ 1833 + static const DECLARE_TLV_DB_RANGE(scale, 1834 + 0, 1, TLV_DB_MINMAX_ITEM(-5300, -4970), 1835 + 2, 5, TLV_DB_MINMAX_ITEM(-4710, -4160), 1836 + 6, 7, TLV_DB_MINMAX_ITEM(-3884, -3710), 1837 + 8, 14, TLV_DB_MINMAX_ITEM(-3443, -2560), 1838 + 15, 16, TLV_DB_MINMAX_ITEM(-2475, -2324), 1839 + 17, 19, TLV_DB_MINMAX_ITEM(-2228, -2031), 1840 + 20, 26, TLV_DB_MINMAX_ITEM(-1910, -1393), 1841 + 27, 31, TLV_DB_MINMAX_ITEM(-1322, -1032), 1842 + 32, 40, TLV_DB_MINMAX_ITEM(-968, -490), 1843 + 41, 50, TLV_DB_MINMAX_ITEM(-441, 0), 1844 + ); 1845 + 1846 + usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n"); 1847 + kctl->tlv.p = scale; 1848 + kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; 1849 + kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; 1850 + } 1851 + 1852 + void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, 1853 + struct usb_mixer_elem_info *cval, int unitid, 1854 + struct snd_kcontrol *kctl) 1855 + { 1856 + switch (mixer->chip->usb_id) { 1857 + case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */ 1858 + if (unitid == 7 && cval->min == 0 && cval->max == 50) 1859 + snd_dragonfly_quirk_db_scale(mixer, kctl); 1825 1860 break; 1826 1861 } 1827 1862 }
+4
sound/usb/mixer_quirks.h
··· 9 9 void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer, 10 10 int unitid); 11 11 12 + void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, 13 + struct usb_mixer_elem_info *cval, int unitid, 14 + struct snd_kcontrol *kctl); 15 + 12 16 #endif /* SND_USB_MIXER_QUIRKS_H */ 13 17
+1
sound/usb/quirks.c
··· 1125 1125 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1126 1126 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1127 1127 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1128 + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ 1128 1129 return true; 1129 1130 } 1130 1131 return false;
+1 -1
virt/kvm/arm/vgic.c
··· 1114 1114 return true; 1115 1115 } 1116 1116 1117 - return dist_active_irq(vcpu); 1117 + return vgic_irq_is_active(vcpu, map->virt_irq); 1118 1118 } 1119 1119 1120 1120 /*