Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Two entries being added at the same time to the IFLA
policy table, whilst parallel bug fixes to decnet
routing dst handling overlapping with the dst gc removal
in net-next.

Signed-off-by: David S. Miller <davem@davemloft.net>

+2106 -1707
+7
Documentation/admin-guide/kernel-parameters.txt
··· 3811 3811 expediting. Set to zero to disable automatic 3812 3812 expediting. 3813 3813 3814 + stack_guard_gap= [MM] 3815 + override the default stack gap protection. The value 3816 + is in page units and it defines how many pages prior 3817 + to (for stacks growing down) resp. after (for stacks 3818 + growing up) the main stack are reserved for no other 3819 + mapping. Default value is 256 pages. 3820 + 3814 3821 stacktrace [FTRACE] 3815 3822 Enabled the stack tracer on boot up. 3816 3823
+4 -3
Documentation/devicetree/bindings/clock/sunxi-ccu.txt
··· 22 22 - #clock-cells : must contain 1 23 23 - #reset-cells : must contain 1 24 24 25 - For the PRCM CCUs on H3/A64, one more clock is needed: 25 + For the PRCM CCUs on H3/A64, two more clocks are needed: 26 + - "pll-periph": the SoC's peripheral PLL from the main CCU 26 27 - "iosc": the SoC's internal frequency oscillator 27 28 28 29 Example for generic CCU: ··· 40 39 r_ccu: clock@01f01400 { 41 40 compatible = "allwinner,sun50i-a64-r-ccu"; 42 41 reg = <0x01f01400 0x100>; 43 - clocks = <&osc24M>, <&osc32k>, <&iosc>; 44 - clock-names = "hosc", "losc", "iosc"; 42 + clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>; 43 + clock-names = "hosc", "losc", "iosc", "pll-periph"; 45 44 #clock-cells = <1>; 46 45 #reset-cells = <1>; 47 46 };
+1 -1
Documentation/devicetree/bindings/net/dsa/b53.txt
··· 37 37 "brcm,bcm6328-switch" 38 38 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" 39 39 40 - See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional 40 + See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional 41 41 required and optional properties. 42 42 43 43 Examples:
+1
Documentation/devicetree/bindings/net/smsc911x.txt
··· 27 27 of the device. On many systems this is wired high so the device goes 28 28 out of reset at power-on, but if it is under program control, this 29 29 optional GPIO can wake up in response to it. 30 + - vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies 30 31 31 32 Examples: 32 33
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 12 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc6 5 5 NAME = Fearless Coyote 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/arc/mm/mmap.c
··· 65 65 66 66 vma = find_vma(mm, addr); 67 67 if (TASK_SIZE - len >= addr && 68 - (!vma || addr + len <= vma->vm_start)) 68 + (!vma || addr + len <= vm_start_gap(vma))) 69 69 return addr; 70 70 } 71 71
+2 -6
arch/arm/boot/dts/am335x-sl50.dts
··· 220 220 221 221 mmc1_pins: pinmux_mmc1_pins { 222 222 pinctrl-single,pins = < 223 - AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ 223 + AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */ 224 224 >; 225 225 }; 226 226 ··· 280 280 AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ 281 281 AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ 282 282 AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ 283 - /* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */ 284 - AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */ 285 - AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */ 286 - AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */ 287 283 /* PDI Bus - Battery system */ 288 284 AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ 289 285 AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ ··· 380 384 pinctrl-names = "default"; 381 385 pinctrl-0 = <&mmc1_pins>; 382 386 bus-width = <4>; 383 - cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; 387 + cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; 384 388 vmmc-supply = <&vmmcsd_fixed>; 385 389 }; 386 390
+4 -3
arch/arm/boot/dts/sunxi-h3-h5.dtsi
··· 598 598 }; 599 599 600 600 r_ccu: clock@1f01400 { 601 - compatible = "allwinner,sun50i-a64-r-ccu"; 601 + compatible = "allwinner,sun8i-h3-r-ccu"; 602 602 reg = <0x01f01400 0x100>; 603 - clocks = <&osc24M>, <&osc32k>, <&iosc>; 604 - clock-names = "hosc", "losc", "iosc"; 603 + clocks = <&osc24M>, <&osc32k>, <&iosc>, 604 + <&ccu 9>; 605 + clock-names = "hosc", "losc", "iosc", "pll-periph"; 605 606 #clock-cells = <1>; 606 607 #reset-cells = <1>; 607 608 };
+2 -2
arch/arm/mm/mmap.c
··· 90 90 91 91 vma = find_vma(mm, addr); 92 92 if (TASK_SIZE - len >= addr && 93 - (!vma || addr + len <= vma->vm_start)) 93 + (!vma || addr + len <= vm_start_gap(vma))) 94 94 return addr; 95 95 } 96 96 ··· 141 141 addr = PAGE_ALIGN(addr); 142 142 vma = find_vma(mm, addr); 143 143 if (TASK_SIZE - len >= addr && 144 - (!vma || addr + len <= vma->vm_start)) 144 + (!vma || addr + len <= vm_start_gap(vma))) 145 145 return addr; 146 146 } 147 147
+3 -2
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
··· 447 447 r_ccu: clock@1f01400 { 448 448 compatible = "allwinner,sun50i-a64-r-ccu"; 449 449 reg = <0x01f01400 0x100>; 450 - clocks = <&osc24M>, <&osc32k>, <&iosc>; 451 - clock-names = "hosc", "losc", "iosc"; 450 + clocks = <&osc24M>, <&osc32k>, <&iosc>, 451 + <&ccu 11>; 452 + clock-names = "hosc", "losc", "iosc", "pll-periph"; 452 453 #clock-cells = <1>; 453 454 #reset-cells = <1>; 454 455 };
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
··· 40 40 * OTHER DEALINGS IN THE SOFTWARE. 41 41 */ 42 42 43 - #include "sunxi-h3-h5.dtsi" 43 + #include <arm/sunxi-h3-h5.dtsi> 44 44 45 45 / { 46 46 cpus {
+1 -1
arch/frv/mm/elf-fdpic.c
··· 75 75 addr = PAGE_ALIGN(addr); 76 76 vma = find_vma(current->mm, addr); 77 77 if (TASK_SIZE - len >= addr && 78 - (!vma || addr + len <= vma->vm_start)) 78 + (!vma || addr + len <= vm_start_gap(vma))) 79 79 goto success; 80 80 } 81 81
+5 -5
arch/mips/boot/Makefile
··· 128 128 -DADDR_BITS=$(ADDR_BITS) \ 129 129 -DADDR_CELLS=$(itb_addr_cells) 130 130 131 - $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 131 + $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 132 132 $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) 133 133 134 - $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 134 + $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 135 135 $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) 136 136 137 - $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 137 + $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 138 138 $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) 139 139 140 - $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 140 + $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 141 141 $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) 142 142 143 - $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 143 + $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 144 144 $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) 145 145 146 146 quiet_cmd_itb-image = ITB $@
+5
arch/mips/include/asm/highmem.h
··· 35 35 * easily, subsequent pte tables have to be allocated in one physical 36 36 * chunk of RAM. 37 37 */ 38 + #ifdef CONFIG_PHYS_ADDR_T_64BIT 39 + #define LAST_PKMAP 512 40 + #else 38 41 #define LAST_PKMAP 1024 42 + #endif 43 + 39 44 #define LAST_PKMAP_MASK (LAST_PKMAP-1) 40 45 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 41 46 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+2 -1
arch/mips/include/asm/kprobes.h
··· 43 43 44 44 #define flush_insn_slot(p) \ 45 45 do { \ 46 - flush_icache_range((unsigned long)p->addr, \ 46 + if (p->addr) \ 47 + flush_icache_range((unsigned long)p->addr, \ 47 48 (unsigned long)p->addr + \ 48 49 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ 49 50 } while (0)
+6 -1
arch/mips/include/asm/pgtable-32.h
··· 19 19 #define __ARCH_USE_5LEVEL_HACK 20 20 #include <asm-generic/pgtable-nopmd.h> 21 21 22 + #ifdef CONFIG_HIGHMEM 23 + #include <asm/highmem.h> 24 + #endif 25 + 22 26 extern int temp_tlb_entry; 23 27 24 28 /* ··· 66 62 67 63 #define VMALLOC_START MAP_BASE 68 64 69 - #define PKMAP_BASE (0xfe000000UL) 65 + #define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1)) 66 + #define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP) 70 67 71 68 #ifdef CONFIG_HIGHMEM 72 69 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+3 -1
arch/mips/kernel/branch.c
··· 804 804 break; 805 805 } 806 806 /* Compact branch: BNEZC || JIALC */ 807 - if (insn.i_format.rs) 807 + if (!insn.i_format.rs) { 808 + /* JIALC: set $31/ra */ 808 809 regs->regs[31] = epc + 4; 810 + } 809 811 regs->cp0_epc += 8; 810 812 break; 811 813 #endif
+5 -19
arch/mips/kernel/ftrace.c
··· 38 38 39 39 #endif 40 40 41 - /* 42 - * Check if the address is in kernel space 43 - * 44 - * Clone core_kernel_text() from kernel/extable.c, but doesn't call 45 - * init_kernel_text() for Ftrace doesn't trace functions in init sections. 46 - */ 47 - static inline int in_kernel_space(unsigned long ip) 48 - { 49 - if (ip >= (unsigned long)_stext && 50 - ip <= (unsigned long)_etext) 51 - return 1; 52 - return 0; 53 - } 54 - 55 41 #ifdef CONFIG_DYNAMIC_FTRACE 56 42 57 43 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ ··· 184 198 * If ip is in kernel space, no long call, otherwise, long call is 185 199 * needed. 186 200 */ 187 - new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 201 + new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; 188 202 #ifdef CONFIG_64BIT 189 203 return ftrace_modify_code(ip, new); 190 204 #else ··· 204 218 unsigned int new; 205 219 unsigned long ip = rec->ip; 206 220 207 - new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 221 + new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 208 222 209 223 #ifdef CONFIG_64BIT 210 224 return ftrace_modify_code(ip, new); 211 225 #else 212 - return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? 226 + return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? 213 227 INSN_NOP : insn_la_mcount[1]); 214 228 #endif 215 229 } ··· 275 289 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 276 290 * kernel, move after the instruction "move ra, at"(offset is 16) 277 291 */ 278 - ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 292 + ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); 279 293 280 294 /* 281 295 * search the text until finding the non-store instruction or "s{d,w} ··· 380 394 * entries configured through the tracing/set_graph_function interface. 381 395 */ 382 396 383 - insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 397 + insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 384 398 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 385 399 386 400 /* Only trace if the calling function expects to */
+5 -1
arch/mips/kernel/perf_event_mipsxx.c
··· 1597 1597 break; 1598 1598 case CPU_P5600: 1599 1599 case CPU_P6600: 1600 - case CPU_I6400: 1601 1600 /* 8-bit event numbers */ 1602 1601 raw_id = config & 0x1ff; 1603 1602 base_id = raw_id & 0xff; ··· 1608 1609 #ifdef CONFIG_MIPS_MT_SMP 1609 1610 raw_event.range = P; 1610 1611 #endif 1612 + break; 1613 + case CPU_I6400: 1614 + /* 8-bit event numbers */ 1615 + base_id = config & 0xff; 1616 + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1611 1617 break; 1612 1618 case CPU_1004K: 1613 1619 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+1 -1
arch/mips/mm/mmap.c
··· 93 93 94 94 vma = find_vma(mm, addr); 95 95 if (TASK_SIZE - len >= addr && 96 - (!vma || addr + len <= vma->vm_start)) 96 + (!vma || addr + len <= vm_start_gap(vma))) 97 97 return addr; 98 98 } 99 99
+3 -3
arch/mips/mm/pgtable-32.c
··· 51 51 /* 52 52 * Fixed mappings: 53 53 */ 54 - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 55 - fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); 54 + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); 55 + fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); 56 56 57 57 #ifdef CONFIG_HIGHMEM 58 58 /* 59 59 * Permanent kmaps: 60 60 */ 61 61 vaddr = PKMAP_BASE; 62 - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 62 + fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 63 63 64 64 pgd = swapper_pg_dir + __pgd_offset(vaddr); 65 65 pud = pud_offset(pgd, vaddr);
+9 -6
arch/parisc/kernel/sys_parisc.c
··· 90 90 unsigned long len, unsigned long pgoff, unsigned long flags) 91 91 { 92 92 struct mm_struct *mm = current->mm; 93 - struct vm_area_struct *vma; 93 + struct vm_area_struct *vma, *prev; 94 94 unsigned long task_size = TASK_SIZE; 95 95 int do_color_align, last_mmap; 96 96 struct vm_unmapped_area_info info; ··· 117 117 else 118 118 addr = PAGE_ALIGN(addr); 119 119 120 - vma = find_vma(mm, addr); 120 + vma = find_vma_prev(mm, addr, &prev); 121 121 if (task_size - len >= addr && 122 - (!vma || addr + len <= vma->vm_start)) 122 + (!vma || addr + len <= vm_start_gap(vma)) && 123 + (!prev || addr >= vm_end_gap(prev))) 123 124 goto found_addr; 124 125 } 125 126 ··· 144 143 const unsigned long len, const unsigned long pgoff, 145 144 const unsigned long flags) 146 145 { 147 - struct vm_area_struct *vma; 146 + struct vm_area_struct *vma, *prev; 148 147 struct mm_struct *mm = current->mm; 149 148 unsigned long addr = addr0; 150 149 int do_color_align, last_mmap; ··· 178 177 addr = COLOR_ALIGN(addr, last_mmap, pgoff); 179 178 else 180 179 addr = PAGE_ALIGN(addr); 181 - vma = find_vma(mm, addr); 180 + 181 + vma = find_vma_prev(mm, addr, &prev); 182 182 if (TASK_SIZE - len >= addr && 183 - (!vma || addr + len <= vma->vm_start)) 183 + (!vma || addr + len <= vm_start_gap(vma)) && 184 + (!prev || addr >= vm_end_gap(prev))) 184 185 goto found_addr; 185 186 } 186 187
+1 -1
arch/powerpc/include/asm/bug.h
··· 104 104 "1: "PPC_TLNEI" %4,0\n" \ 105 105 _EMIT_BUG_ENTRY \ 106 106 : : "i" (__FILE__), "i" (__LINE__), \ 107 - "i" (BUGFLAG_TAINT(TAINT_WARN)), \ 107 + "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\ 108 108 "i" (sizeof(struct bug_entry)), \ 109 109 "r" (__ret_warn_on)); \ 110 110 } \
+7 -5
arch/powerpc/include/asm/xive.h
··· 94 94 * store at 0 and some ESBs support doing a trigger via a 95 95 * separate trigger page. 96 96 */ 97 - #define XIVE_ESB_GET 0x800 98 - #define XIVE_ESB_SET_PQ_00 0xc00 99 - #define XIVE_ESB_SET_PQ_01 0xd00 100 - #define XIVE_ESB_SET_PQ_10 0xe00 101 - #define XIVE_ESB_SET_PQ_11 0xf00 97 + #define XIVE_ESB_STORE_EOI 0x400 /* Store */ 98 + #define XIVE_ESB_LOAD_EOI 0x000 /* Load */ 99 + #define XIVE_ESB_GET 0x800 /* Load */ 100 + #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ 101 + #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ 102 + #define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ 103 + #define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ 102 104 103 105 #define XIVE_ESB_VAL_P 0x2 104 106 #define XIVE_ESB_VAL_Q 0x1
+2 -2
arch/powerpc/kvm/book3s_xive_template.c
··· 69 69 { 70 70 /* If the XIVE supports the new "store EOI facility, use it */ 71 71 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 72 - __x_writeq(0, __x_eoi_page(xd)); 72 + __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); 73 73 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 74 74 opal_int_eoi(hw_irq); 75 75 } else { ··· 89 89 * properly. 90 90 */ 91 91 if (xd->flags & XIVE_IRQ_FLAG_LSI) 92 - __x_readq(__x_eoi_page(xd)); 92 + __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); 93 93 else { 94 94 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); 95 95
+1 -1
arch/powerpc/mm/hugetlbpage-radix.c
··· 68 68 addr = ALIGN(addr, huge_page_size(h)); 69 69 vma = find_vma(mm, addr); 70 70 if (mm->task_size - len >= addr && 71 - (!vma || addr + len <= vma->vm_start)) 71 + (!vma || addr + len <= vm_start_gap(vma))) 72 72 return addr; 73 73 } 74 74 /*
+2 -2
arch/powerpc/mm/mmap.c
··· 112 112 addr = PAGE_ALIGN(addr); 113 113 vma = find_vma(mm, addr); 114 114 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 115 - (!vma || addr + len <= vma->vm_start)) 115 + (!vma || addr + len <= vm_start_gap(vma))) 116 116 return addr; 117 117 } 118 118 ··· 157 157 addr = PAGE_ALIGN(addr); 158 158 vma = find_vma(mm, addr); 159 159 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 160 - (!vma || addr + len <= vma->vm_start)) 160 + (!vma || addr + len <= vm_start_gap(vma))) 161 161 return addr; 162 162 } 163 163
+1 -1
arch/powerpc/mm/slice.c
··· 99 99 if ((mm->task_size - len) < addr) 100 100 return 0; 101 101 vma = find_vma(mm, addr); 102 - return (!vma || (addr + len) <= vma->vm_start); 102 + return (!vma || (addr + len) <= vm_start_gap(vma)); 103 103 } 104 104 105 105 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+2 -1
arch/powerpc/platforms/powernv/npu-dma.c
··· 75 75 if (WARN_ON(!gpdev)) 76 76 return NULL; 77 77 78 - if (WARN_ON(!gpdev->dev.of_node)) 78 + /* Not all PCI devices have device-tree nodes */ 79 + if (!gpdev->dev.of_node) 79 80 return NULL; 80 81 81 82 /* Get assoicated PCI device */
+1 -1
arch/powerpc/sysdev/xive/common.c
··· 297 297 { 298 298 /* If the XIVE supports the new "store EOI facility, use it */ 299 299 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 300 - out_be64(xd->eoi_mmio, 0); 300 + out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0); 301 301 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 302 302 /* 303 303 * The FW told us to call it. This happens for some
+2 -2
arch/s390/mm/mmap.c
··· 101 101 addr = PAGE_ALIGN(addr); 102 102 vma = find_vma(mm, addr); 103 103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 104 - (!vma || addr + len <= vma->vm_start)) 104 + (!vma || addr + len <= vm_start_gap(vma))) 105 105 goto check_asce_limit; 106 106 } 107 107 ··· 151 151 addr = PAGE_ALIGN(addr); 152 152 vma = find_vma(mm, addr); 153 153 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 154 - (!vma || addr + len <= vma->vm_start)) 154 + (!vma || addr + len <= vm_start_gap(vma))) 155 155 goto check_asce_limit; 156 156 } 157 157
+2 -2
arch/sh/mm/mmap.c
··· 64 64 65 65 vma = find_vma(mm, addr); 66 66 if (TASK_SIZE - len >= addr && 67 - (!vma || addr + len <= vma->vm_start)) 67 + (!vma || addr + len <= vm_start_gap(vma))) 68 68 return addr; 69 69 } 70 70 ··· 114 114 115 115 vma = find_vma(mm, addr); 116 116 if (TASK_SIZE - len >= addr && 117 - (!vma || addr + len <= vma->vm_start)) 117 + (!vma || addr + len <= vm_start_gap(vma))) 118 118 return addr; 119 119 } 120 120
+2 -2
arch/sparc/kernel/sys_sparc_64.c
··· 120 120 121 121 vma = find_vma(mm, addr); 122 122 if (task_size - len >= addr && 123 - (!vma || addr + len <= vma->vm_start)) 123 + (!vma || addr + len <= vm_start_gap(vma))) 124 124 return addr; 125 125 } 126 126 ··· 183 183 184 184 vma = find_vma(mm, addr); 185 185 if (task_size - len >= addr && 186 - (!vma || addr + len <= vma->vm_start)) 186 + (!vma || addr + len <= vm_start_gap(vma))) 187 187 return addr; 188 188 } 189 189
+1 -1
arch/sparc/mm/hugetlbpage.c
··· 120 120 addr = ALIGN(addr, huge_page_size(h)); 121 121 vma = find_vma(mm, addr); 122 122 if (task_size - len >= addr && 123 - (!vma || addr + len <= vma->vm_start)) 123 + (!vma || addr + len <= vm_start_gap(vma))) 124 124 return addr; 125 125 } 126 126 if (mm->get_unmapped_area == arch_get_unmapped_area)
+1 -1
arch/tile/mm/hugetlbpage.c
··· 233 233 addr = ALIGN(addr, huge_page_size(h)); 234 234 vma = find_vma(mm, addr); 235 235 if (TASK_SIZE - len >= addr && 236 - (!vma || addr + len <= vma->vm_start)) 236 + (!vma || addr + len <= vm_start_gap(vma))) 237 237 return addr; 238 238 } 239 239 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
+1
arch/x86/include/asm/extable.h
··· 29 29 } while (0) 30 30 31 31 extern int fixup_exception(struct pt_regs *regs, int trapnr); 32 + extern int fixup_bug(struct pt_regs *regs, int trapnr); 32 33 extern bool ex_has_fault_handler(unsigned long ip); 33 34 extern void early_fixup_exception(struct pt_regs *regs, int trapnr); 34 35
+2 -2
arch/x86/kernel/sys_x86_64.c
··· 144 144 addr = PAGE_ALIGN(addr); 145 145 vma = find_vma(mm, addr); 146 146 if (end - len >= addr && 147 - (!vma || addr + len <= vma->vm_start)) 147 + (!vma || addr + len <= vm_start_gap(vma))) 148 148 return addr; 149 149 } 150 150 ··· 187 187 addr = PAGE_ALIGN(addr); 188 188 vma = find_vma(mm, addr); 189 189 if (TASK_SIZE - len >= addr && 190 - (!vma || addr + len <= vma->vm_start)) 190 + (!vma || addr + len <= vm_start_gap(vma))) 191 191 return addr; 192 192 } 193 193
+1 -1
arch/x86/kernel/traps.c
··· 182 182 return ud == INSN_UD0 || ud == INSN_UD2; 183 183 } 184 184 185 - static int fixup_bug(struct pt_regs *regs, int trapnr) 185 + int fixup_bug(struct pt_regs *regs, int trapnr) 186 186 { 187 187 if (trapnr != X86_TRAP_UD) 188 188 return 0;
+3
arch/x86/mm/extable.c
··· 162 162 if (fixup_exception(regs, trapnr)) 163 163 return; 164 164 165 + if (fixup_bug(regs, trapnr)) 166 + return; 167 + 165 168 fail: 166 169 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", 167 170 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
+1 -1
arch/x86/mm/hugetlbpage.c
··· 148 148 addr = ALIGN(addr, huge_page_size(h)); 149 149 vma = find_vma(mm, addr); 150 150 if (TASK_SIZE - len >= addr && 151 - (!vma || addr + len <= vma->vm_start)) 151 + (!vma || addr + len <= vm_start_gap(vma))) 152 152 return addr; 153 153 } 154 154 if (mm->get_unmapped_area == arch_get_unmapped_area)
+3 -3
arch/x86/mm/init.c
··· 161 161 162 162 static void __init probe_page_size_mask(void) 163 163 { 164 - #if !defined(CONFIG_KMEMCHECK) 165 164 /* 166 165 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will 167 166 * use small pages. 168 167 * This will simplify cpa(), which otherwise needs to support splitting 169 168 * large pages into small in interrupt context, etc. 170 169 */ 171 - if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) 170 + if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) 172 171 page_size_mask |= 1 << PG_LEVEL_2M; 173 - #endif 172 + else 173 + direct_gbpages = 0; 174 174 175 175 /* Enable PSE if available */ 176 176 if (boot_cpu_has(X86_FEATURE_PSE))
+1 -1
arch/xtensa/kernel/syscall.c
··· 88 88 /* At this point: (!vmm || addr < vmm->vm_end). */ 89 89 if (TASK_SIZE - len < addr) 90 90 return -ENOMEM; 91 - if (!vmm || addr + len <= vmm->vm_start) 91 + if (!vmm || addr + len <= vm_start_gap(vmm)) 92 92 return addr; 93 93 addr = vmm->vm_end; 94 94 if (flags & MAP_SHARED)
+22 -12
block/blk-sysfs.c
··· 777 777 } 778 778 779 779 /** 780 - * blk_release_queue: - release a &struct request_queue when it is no longer needed 781 - * @kobj: the kobj belonging to the request queue to be released 780 + * __blk_release_queue - release a request queue when it is no longer needed 781 + * @work: pointer to the release_work member of the request queue to be released 782 782 * 783 783 * Description: 784 - * blk_release_queue is the pair to blk_init_queue() or 785 - * blk_queue_make_request(). It should be called when a request queue is 786 - * being released; typically when a block device is being de-registered. 787 - * Currently, its primary task it to free all the &struct request 788 - * structures that were allocated to the queue and the queue itself. 784 + * blk_release_queue is the counterpart of blk_init_queue(). It should be 785 + * called when a request queue is being released; typically when a block 786 + * device is being de-registered. Its primary task it to free the queue 787 + * itself. 789 788 * 790 - * Note: 789 + * Notes: 791 790 * The low level driver must have finished any outstanding requests first 792 791 * via blk_cleanup_queue(). 793 - **/ 794 - static void blk_release_queue(struct kobject *kobj) 792 + * 793 + * Although blk_release_queue() may be called with preemption disabled, 794 + * __blk_release_queue() may sleep. 795 + */ 796 + static void __blk_release_queue(struct work_struct *work) 795 797 { 796 - struct request_queue *q = 797 - container_of(kobj, struct request_queue, kobj); 798 + struct request_queue *q = container_of(work, typeof(*q), release_work); 798 799 799 800 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 800 801 blk_stat_remove_callback(q, q->poll_cb); ··· 833 832 834 833 ida_simple_remove(&blk_queue_ida, q->id); 835 834 call_rcu(&q->rcu_head, blk_free_queue_rcu); 835 + } 836 + 837 + static void blk_release_queue(struct kobject *kobj) 838 + { 839 + struct request_queue *q = 840 + container_of(kobj, struct request_queue, kobj); 841 + 842 + INIT_WORK(&q->release_work, __blk_release_queue); 843 + schedule_work(&q->release_work); 836 844 } 837 845 838 846 static const struct sysfs_ops queue_sysfs_ops = {
+1
drivers/clk/meson/Kconfig
··· 14 14 config COMMON_CLK_GXBB 15 15 bool 16 16 depends on COMMON_CLK_AMLOGIC 17 + select RESET_CONTROLLER 17 18 help 18 19 Support for the clock controller on AmLogic S905 devices, aka gxbb. 19 20 Say Y if you want peripherals and CPU frequency scaling to work.
+1
drivers/clk/sunxi-ng/Kconfig
··· 156 156 bool "Support for Allwinner SoCs' PRCM CCUs" 157 157 select SUNXI_CCU_DIV 158 158 select SUNXI_CCU_GATE 159 + select SUNXI_CCU_MP 159 160 default MACH_SUN8I || (ARCH_SUNXI && ARM64) 160 161 161 162 endif
+3 -1
drivers/clk/sunxi-ng/ccu-sun50i-a64.h
··· 31 31 #define CLK_PLL_VIDEO0_2X 8 32 32 #define CLK_PLL_VE 9 33 33 #define CLK_PLL_DDR0 10 34 - #define CLK_PLL_PERIPH0 11 34 + 35 + /* PLL_PERIPH0 exported for PRCM */ 36 + 35 37 #define CLK_PLL_PERIPH0_2X 12 36 38 #define CLK_PLL_PERIPH1 13 37 39 #define CLK_PLL_PERIPH1_2X 14
+1 -1
drivers/clk/sunxi-ng/ccu-sun5i.c
··· 243 243 static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", 244 244 0x060, BIT(6), 0); 245 245 static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", 246 - 0x060, BIT(6), 0); 246 + 0x060, BIT(7), 0); 247 247 static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", 248 248 0x060, BIT(8), 0); 249 249 static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
+1 -1
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
··· 556 556 0x12c, 0, 4, 24, 3, BIT(31), 557 557 CLK_SET_RATE_PARENT); 558 558 static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, 559 - 0x12c, 0, 4, 24, 3, BIT(31), 559 + 0x130, 0, 4, 24, 3, BIT(31), 560 560 CLK_SET_RATE_PARENT); 561 561 562 562 static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
+3 -1
drivers/clk/sunxi-ng/ccu-sun8i-h3.h
··· 29 29 #define CLK_PLL_VIDEO 6 30 30 #define CLK_PLL_VE 7 31 31 #define CLK_PLL_DDR 8 32 - #define CLK_PLL_PERIPH0 9 32 + 33 + /* PLL_PERIPH0 exported for PRCM */ 34 + 33 35 #define CLK_PLL_PERIPH0_2X 10 34 36 #define CLK_PLL_GPU 11 35 37 #define CLK_PLL_PERIPH1 12
+1 -1
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
··· 537 537 [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, 538 538 [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, 539 539 [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, 540 - [RST_BUS_OTG] = { 0x2c0, BIT(23) }, 540 + [RST_BUS_OTG] = { 0x2c0, BIT(24) }, 541 541 [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, 542 542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, 543 543
+2 -2
drivers/firmware/dmi-id.c
··· 47 47 DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); 48 48 DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); 49 49 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); 50 - DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); 50 + DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY); 51 51 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); 52 52 DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); 53 53 DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); ··· 192 192 ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); 193 193 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); 194 194 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); 195 - ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); 195 + ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); 196 196 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); 197 197 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); 198 198 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
+37 -12
drivers/firmware/dmi_scan.c
··· 144 144 145 145 buf = dmi_early_remap(dmi_base, orig_dmi_len); 146 146 if (buf == NULL) 147 - return -1; 147 + return -ENOMEM; 148 148 149 149 dmi_decode_table(buf, decode, NULL); 150 150 ··· 178 178 const char *d = (const char *) dm; 179 179 const char *p; 180 180 181 - if (dmi_ident[slot]) 181 + if (dmi_ident[slot] || dm->length <= string) 182 182 return; 183 183 184 184 p = dmi_string(dm, d[string]); ··· 191 191 static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, 192 192 int index) 193 193 { 194 - const u8 *d = (u8 *) dm + index; 194 + const u8 *d; 195 195 char *s; 196 196 int is_ff = 1, is_00 = 1, i; 197 197 198 - if (dmi_ident[slot]) 198 + if (dmi_ident[slot] || dm->length <= index + 16) 199 199 return; 200 200 201 + d = (u8 *) dm + index; 201 202 for (i = 0; i < 16 && (is_ff || is_00); i++) { 202 203 if (d[i] != 0x00) 203 204 is_00 = 0; ··· 229 228 static void __init dmi_save_type(const struct dmi_header *dm, int slot, 230 229 int index) 231 230 { 232 - const u8 *d = (u8 *) dm + index; 231 + const u8 *d; 233 232 char *s; 234 233 235 - if (dmi_ident[slot]) 234 + if (dmi_ident[slot] || dm->length <= index) 236 235 return; 237 236 238 237 s = dmi_alloc(4); 239 238 if (!s) 240 239 return; 241 240 241 + d = (u8 *) dm + index; 242 242 sprintf(s, "%u", *d & 0x7F); 243 243 dmi_ident[slot] = s; 244 244 } ··· 280 278 281 279 static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) 282 280 { 283 - int i, count = *(u8 *)(dm + 1); 281 + int i, count; 284 282 struct dmi_device *dev; 285 283 284 + if (dm->length < 0x05) 285 + return; 286 + 287 + count = *(u8 *)(dm + 1); 286 288 for (i = 1; i <= count; i++) { 287 289 const char *devname = dmi_string(dm, i); 288 290 ··· 359 353 const char *name; 360 354 const u8 *d = (u8 *)dm; 361 355 356 + if (dm->length < 0x0B) 357 + return; 358 + 362 359 /* Skip disabled device */ 363 360 if ((d[0x5] & 0x80) == 0) 364 361 return; ··· 396 387 const char *d = (const char *)dm; 397 388 static int nr; 398 389 399 - if (dm->type != DMI_ENTRY_MEM_DEVICE) 390 + if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12) 400 391 return; 401 392 if (nr >= dmi_memdev_nr) { 402 393 pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); ··· 659 650 goto error; 660 651 661 652 /* 653 + * Same logic as above, look for a 64-bit entry point 654 + * first, and if not found, fall back to 32-bit entry point. 655 + */ 656 + memcpy_fromio(buf, p, 16); 657 + for (q = p + 16; q < p + 0x10000; q += 16) { 658 + memcpy_fromio(buf + 16, q, 16); 659 + if (!dmi_smbios3_present(buf)) { 660 + dmi_available = 1; 661 + dmi_early_unmap(p, 0x10000); 662 + goto out; 663 + } 664 + memcpy(buf, buf + 16, 16); 665 + } 666 + 667 + /* 662 668 * Iterate over all possible DMI header addresses q. 663 669 * Maintain the 32 bytes around q in buf. On the 664 670 * first iteration, substitute zero for the ··· 683 659 memset(buf, 0, 16); 684 660 for (q = p; q < p + 0x10000; q += 16) { 685 661 memcpy_fromio(buf + 16, q, 16); 686 - if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { 662 + if (!dmi_present(buf)) { 687 663 dmi_available = 1; 688 664 dmi_early_unmap(p, 0x10000); 689 665 goto out; ··· 1017 993 * @decode: Callback function 1018 994 * @private_data: Private data to be passed to the callback function 1019 995 * 1020 - * Returns -1 when the DMI table can't be reached, 0 on success. 996 + * Returns 0 on success, -ENXIO if DMI is not selected or not present, 997 + * or a different negative error code if DMI walking fails. 1021 998 */ 1022 999 int dmi_walk(void (*decode)(const struct dmi_header *, void *), 1023 1000 void *private_data) ··· 1026 1001 u8 *buf; 1027 1002 1028 1003 if (!dmi_available) 1029 - return -1; 1004 + return -ENXIO; 1030 1005 1031 1006 buf = dmi_remap(dmi_base, dmi_len); 1032 1007 if (buf == NULL) 1033 - return -1; 1008 + return -ENOMEM; 1034 1009 1035 1010 dmi_decode_table(buf, decode, private_data); 1036 1011
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 1207 1207 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1208 1208 1209 1209 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1210 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1211 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1210 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1211 + (u32)mode->clock); 1212 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1213 + (u32)mode->clock); 1214 + line_time = min(line_time, (u32)65535); 1212 1215 1213 1216 /* watermark for high clocks */ 1214 1217 if (adev->pm.dpm_enabled) {
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 1176 1176 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1177 1177 1178 1178 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1179 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1180 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1179 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1180 + (u32)mode->clock); 1181 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1182 + (u32)mode->clock); 1183 + line_time = min(line_time, (u32)65535); 1181 1184 1182 1185 /* watermark for high clocks */ 1183 1186 if (adev->pm.dpm_enabled) {
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 983 983 fixed20_12 a, b, c; 984 984 985 985 if (amdgpu_crtc->base.enabled && num_heads && mode) { 986 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 987 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 986 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 987 + (u32)mode->clock); 988 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 989 + (u32)mode->clock); 990 + line_time = min(line_time, (u32)65535); 988 991 priority_a_cnt = 0; 989 992 priority_b_cnt = 0; 990 993
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 1091 1091 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1092 1092 1093 1093 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1094 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1095 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1094 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1095 + (u32)mode->clock); 1096 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1097 + (u32)mode->clock); 1098 + line_time = min(line_time, (u32)65535); 1096 1099 1097 1100 /* watermark for high clocks */ 1098 1101 if (adev->pm.dpm_enabled) {
+1
drivers/gpu/drm/bridge/synopsys/Kconfig
··· 1 1 config DRM_DW_HDMI 2 2 tristate 3 3 select DRM_KMS_HELPER 4 + select REGMAP_MMIO 4 5 5 6 config DRM_DW_HDMI_AHB_AUDIO 6 7 tristate "Synopsys Designware AHB Audio interface"
+2 -6
drivers/gpu/drm/i915/i915_pvinfo.h
··· 36 36 #define VGT_VERSION_MAJOR 1 37 37 #define VGT_VERSION_MINOR 0 38 38 39 - #define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) 40 - #define INTEL_VGT_IF_VERSION \ 41 - INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) 42 - 43 39 /* 44 40 * notifications from guest to vgpu device model 45 41 */ ··· 51 55 52 56 struct vgt_if { 53 57 u64 magic; /* VGT_MAGIC */ 54 - uint16_t version_major; 55 - uint16_t version_minor; 58 + u16 version_major; 59 + u16 version_minor; 56 60 u32 vgt_id; /* ID of vGT instance */ 57 61 u32 rsv1[12]; /* pad to offset 0x40 */ 58 62 /*
+4 -6
drivers/gpu/drm/i915/i915_vgpu.c
··· 60 60 */ 61 61 void i915_check_vgpu(struct drm_i915_private *dev_priv) 62 62 { 63 - uint64_t magic; 64 - uint32_t version; 63 + u64 magic; 64 + u16 version_major; 65 65 66 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 67 67 ··· 69 69 if (magic != VGT_MAGIC) 70 70 return; 71 71 72 - version = INTEL_VGT_IF_VERSION_ENCODE( 73 - __raw_i915_read16(dev_priv, vgtif_reg(version_major)), 74 - __raw_i915_read16(dev_priv, vgtif_reg(version_minor))); 75 - if (version != INTEL_VGT_IF_VERSION) { 72 + version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); 73 + if (version_major < VGT_VERSION_MAJOR) { 76 74 DRM_INFO("VGT interface version mismatch!\n"); 77 75 return; 78 76 }
+8 -6
drivers/gpu/drm/i915/intel_display.c
··· 4598 4598 4599 4599 static int 4600 4600 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4601 - unsigned scaler_user, int *scaler_id, unsigned int rotation, 4601 + unsigned int scaler_user, int *scaler_id, 4602 4602 int src_w, int src_h, int dst_w, int dst_h) 4603 4603 { 4604 4604 struct intel_crtc_scaler_state *scaler_state = ··· 4607 4607 to_intel_crtc(crtc_state->base.crtc); 4608 4608 int need_scaling; 4609 4609 4610 - need_scaling = drm_rotation_90_or_270(rotation) ? 4611 - (src_h != dst_w || src_w != dst_h): 4612 - (src_w != dst_w || src_h != dst_h); 4610 + /* 4611 + * Src coordinates are already rotated by 270 degrees for 4612 + * the 90/270 degree plane rotation cases (to match the 4613 + * GTT mapping), hence no need to account for rotation here. 4614 + */ 4615 + need_scaling = src_w != dst_w || src_h != dst_h; 4613 4616 4614 4617 /* 4615 4618 * if plane is being disabled or scaler is no more required or force detach ··· 4674 4671 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4675 4672 4676 4673 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4677 - &state->scaler_state.scaler_id, DRM_ROTATE_0, 4674 + &state->scaler_state.scaler_id, 4678 4675 state->pipe_src_w, state->pipe_src_h, 4679 4676 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4680 4677 } ··· 4703 4700 ret = skl_update_scaler(crtc_state, force_detach, 4704 4701 drm_plane_index(&intel_plane->base), 4705 4702 &plane_state->scaler_id, 4706 - plane_state->base.rotation, 4707 4703 drm_rect_width(&plane_state->base.src) >> 16, 4708 4704 drm_rect_height(&plane_state->base.src) >> 16, 4709 4705 drm_rect_width(&plane_state->base.dst),
+24 -12
drivers/gpu/drm/i915/intel_pm.c
··· 3373 3373 3374 3374 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3375 3375 if (plane->id == PLANE_CURSOR) { 3376 + /* 3377 + * Cursors only support 0/180 degree rotation, 3378 + * hence no need to account for rotation here. 3379 + */ 3376 3380 src_w = pstate->base.src_w; 3377 3381 src_h = pstate->base.src_h; 3378 3382 dst_w = pstate->base.crtc_w; 3379 3383 dst_h = pstate->base.crtc_h; 3380 3384 } else { 3385 + /* 3386 + * Src coordinates are already rotated by 270 degrees for 3387 + * the 90/270 degree plane rotation cases (to match the 3388 + * GTT mapping), hence no need to account for rotation here. 3389 + */ 3381 3390 src_w = drm_rect_width(&pstate->base.src); 3382 3391 src_h = drm_rect_height(&pstate->base.src); 3383 3392 dst_w = drm_rect_width(&pstate->base.dst); 3384 3393 dst_h = drm_rect_height(&pstate->base.dst); 3385 3394 } 3386 - 3387 - if (drm_rotation_90_or_270(pstate->base.rotation)) 3388 - swap(dst_w, dst_h); 3389 3395 3390 3396 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3391 3397 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); ··· 3423 3417 if (y && format != DRM_FORMAT_NV12) 3424 3418 return 0; 3425 3419 3420 + /* 3421 + * Src coordinates are already rotated by 270 degrees for 3422 + * the 90/270 degree plane rotation cases (to match the 3423 + * GTT mapping), hence no need to account for rotation here. 3424 + */ 3426 3425 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3427 3426 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3428 - 3429 - if (drm_rotation_90_or_270(pstate->rotation)) 3430 - swap(width, height); 3431 3427 3432 3428 /* for planar format */ 3433 3429 if (format == DRM_FORMAT_NV12) { ··· 3513 3505 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 3514 3506 return 8; 3515 3507 3508 + /* 3509 + * Src coordinates are already rotated by 270 degrees for 3510 + * the 90/270 degree plane rotation cases (to match the 3511 + * GTT mapping), hence no need to account for rotation here. 3512 + */ 3516 3513 src_w = drm_rect_width(&intel_pstate->base.src) >> 16; 3517 3514 src_h = drm_rect_height(&intel_pstate->base.src) >> 16; 3518 - 3519 - if (drm_rotation_90_or_270(pstate->rotation)) 3520 - swap(src_w, src_h); 3521 3515 3522 3516 /* Halve UV plane width and height for NV12 */ 3523 3517 if (fb->format->format == DRM_FORMAT_NV12 && !y) { ··· 3804 3794 width = intel_pstate->base.crtc_w; 3805 3795 height = intel_pstate->base.crtc_h; 3806 3796 } else { 3797 + /* 3798 + * Src coordinates are already rotated by 270 degrees for 3799 + * the 90/270 degree plane rotation cases (to match the 3800 + * GTT mapping), hence no need to account for rotation here. 3801 + */ 3807 3802 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3808 3803 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3809 3804 } 3810 - 3811 - if (drm_rotation_90_or_270(pstate->rotation)) 3812 - swap(width, height); 3813 3805 3814 3806 cpp = fb->format->cpp[0]; 3815 3807 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+8 -1
drivers/gpu/drm/mgag200/mgag200_mode.c
··· 1173 1173 1174 1174 1175 1175 if (IS_G200_SE(mdev)) { 1176 - if (mdev->unique_rev_id >= 0x02) { 1176 + if (mdev->unique_rev_id >= 0x04) { 1177 + WREG8(MGAREG_CRTCEXT_INDEX, 0x06); 1178 + WREG8(MGAREG_CRTCEXT_DATA, 0); 1179 + } else if (mdev->unique_rev_id >= 0x02) { 1177 1180 u8 hi_pri_lvl; 1178 1181 u32 bpp; 1179 1182 u32 mb; ··· 1641 1638 return MODE_VIRTUAL_Y; 1642 1639 if (mga_vga_calculate_mode_bandwidth(mode, bpp) 1643 1640 > (30100 * 1024)) 1641 + return MODE_BANDWIDTH; 1642 + } else { 1643 + if (mga_vga_calculate_mode_bandwidth(mode, bpp) 1644 + > (55000 * 1024)) 1644 1645 return MODE_BANDWIDTH; 1645 1646 } 1646 1647 } else if (mdev->type == G200_WB) {
+42
drivers/gpu/drm/mxsfb/mxsfb_crtc.c
··· 35 35 #include "mxsfb_drv.h" 36 36 #include "mxsfb_regs.h" 37 37 38 + #define MXS_SET_ADDR 0x4 39 + #define MXS_CLR_ADDR 0x8 40 + #define MODULE_CLKGATE BIT(30) 41 + #define MODULE_SFTRST BIT(31) 42 + /* 1 second delay should be plenty of time for block reset */ 43 + #define RESET_TIMEOUT 1000000 44 + 38 45 static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) 39 46 { 40 47 return (val & mxsfb->devdata->hs_wdth_mask) << ··· 166 159 clk_disable_unprepare(mxsfb->clk_disp_axi); 167 160 } 168 161 162 + /* 163 + * Clear the bit and poll it cleared. This is usually called with 164 + * a reset address and mask being either SFTRST(bit 31) or CLKGATE 165 + * (bit 30). 166 + */ 167 + static int clear_poll_bit(void __iomem *addr, u32 mask) 168 + { 169 + u32 reg; 170 + 171 + writel(mask, addr + MXS_CLR_ADDR); 172 + return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT); 173 + } 174 + 175 + static int mxsfb_reset_block(void __iomem *reset_addr) 176 + { 177 + int ret; 178 + 179 + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 180 + if (ret) 181 + return ret; 182 + 183 + writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); 184 + 185 + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 186 + if (ret) 187 + return ret; 188 + 189 + return clear_poll_bit(reset_addr, MODULE_CLKGATE); 190 + } 191 + 169 192 static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) 170 193 { 171 194 struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; ··· 209 172 * first stop the controller and drain its FIFOs. 210 173 */ 211 174 mxsfb_enable_axi_clk(mxsfb); 175 + 176 + /* Mandatory eLCDIF reset as per the Reference Manual */ 177 + err = mxsfb_reset_block(mxsfb->base); 178 + if (err) 179 + return; 212 180 213 181 /* Clear the FIFOs */ 214 182 writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
+5 -2
drivers/gpu/drm/radeon/cik.c
··· 9267 9267 u32 tmp, wm_mask; 9268 9268 9269 9269 if (radeon_crtc->base.enabled && num_heads && mode) { 9270 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 9271 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 9270 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 9271 + (u32)mode->clock); 9272 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 9273 + (u32)mode->clock); 9274 + line_time = min(line_time, (u32)65535); 9272 9275 9273 9276 /* watermark for high clocks */ 9274 9277 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+5 -2
drivers/gpu/drm/radeon/evergreen.c
··· 2266 2266 fixed20_12 a, b, c; 2267 2267 2268 2268 if (radeon_crtc->base.enabled && num_heads && mode) { 2269 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2270 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2269 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 2270 + (u32)mode->clock); 2271 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 2272 + (u32)mode->clock); 2273 + line_time = min(line_time, (u32)65535); 2271 2274 priority_a_cnt = 0; 2272 2275 priority_b_cnt = 0; 2273 2276 dram_channels = evergreen_get_number_of_dram_channels(rdev);
+1 -1
drivers/gpu/drm/radeon/radeon_uvd.c
··· 621 621 } 622 622 623 623 /* TODO: is this still necessary on NI+ ? */ 624 - if ((cmd == 0 || cmd == 1 || cmd == 0x3) && 624 + if ((cmd == 0 || cmd == 0x3) && 625 625 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 626 626 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 627 627 start, end);
+5 -2
drivers/gpu/drm/radeon/si.c
··· 2284 2284 fixed20_12 a, b, c; 2285 2285 2286 2286 if (radeon_crtc->base.enabled && num_heads && mode) { 2287 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2288 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2287 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 2288 + (u32)mode->clock); 2289 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 2290 + (u32)mode->clock); 2291 + line_time = min(line_time, (u32)65535); 2289 2292 priority_a_cnt = 0; 2290 2293 priority_b_cnt = 0; 2291 2294
+5 -17
drivers/gpu/drm/tegra/drm.c
··· 451 451 452 452 453 453 #ifdef CONFIG_DRM_TEGRA_STAGING 454 - static struct tegra_drm_context * 455 - tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) 456 - { 457 - struct tegra_drm_context *context; 458 - 459 - mutex_lock(&file->lock); 460 - context = idr_find(&file->contexts, id); 461 - mutex_unlock(&file->lock); 462 - 463 - return context; 464 - } 465 - 466 454 static int tegra_gem_create(struct drm_device *drm, void *data, 467 455 struct drm_file *file) 468 456 { ··· 539 551 if (err < 0) 540 552 return err; 541 553 542 - err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); 554 + err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 543 555 if (err < 0) { 544 556 client->ops->close_channel(context); 545 557 return err; ··· 594 606 595 607 mutex_lock(&fpriv->lock); 596 608 597 - context = tegra_drm_file_get_context(fpriv, args->context); 609 + context = idr_find(&fpriv->contexts, args->context); 598 610 if (!context) { 599 611 err = -EINVAL; 600 612 goto unlock; ··· 619 631 620 632 mutex_lock(&fpriv->lock); 621 633 622 - context = tegra_drm_file_get_context(fpriv, args->context); 634 + context = idr_find(&fpriv->contexts, args->context); 623 635 if (!context) { 624 636 err = -ENODEV; 625 637 goto unlock; ··· 648 660 649 661 mutex_lock(&fpriv->lock); 650 662 651 - context = tegra_drm_file_get_context(fpriv, args->context); 663 + context = idr_find(&fpriv->contexts, args->context); 652 664 if (!context) { 653 665 err = -ENODEV; 654 666 goto unlock; ··· 673 685 674 686 mutex_lock(&fpriv->lock); 675 687 676 - context = tegra_drm_file_get_context(fpriv, args->context); 688 + context = idr_find(&fpriv->contexts, args->context); 677 689 if (!context) { 678 690 err = -ENODEV; 679 691 goto unlock;
+1 -1
drivers/gpu/host1x/dev.c
··· 172 172 173 173 host->rst = devm_reset_control_get(&pdev->dev, "host1x"); 174 174 if (IS_ERR(host->rst)) { 175 - err = PTR_ERR(host->clk); 175 + err = PTR_ERR(host->rst); 176 176 dev_err(&pdev->dev, "failed to get reset: %d\n", err); 177 177 return err; 178 178 }
+3
drivers/hid/hid-ids.h
··· 319 319 #define USB_VENDOR_ID_DELCOM 0x0fc5 320 320 #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 321 321 322 + #define USB_VENDOR_ID_DELL 0x413c 323 + #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a 324 + 322 325 #define USB_VENDOR_ID_DELORME 0x1163 323 326 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 324 327 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
+7 -8
drivers/hid/hid-magicmouse.c
··· 349 349 350 350 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 351 351 magicmouse_emit_buttons(msc, clicks & 3); 352 - input_mt_report_pointer_emulation(input, true); 353 352 input_report_rel(input, REL_X, x); 354 353 input_report_rel(input, REL_Y, y); 355 354 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ ··· 388 389 __clear_bit(BTN_RIGHT, input->keybit); 389 390 __clear_bit(BTN_MIDDLE, input->keybit); 390 391 __set_bit(BTN_MOUSE, input->keybit); 392 + __set_bit(BTN_TOOL_FINGER, input->keybit); 393 + __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); 394 + __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); 395 + __set_bit(BTN_TOOL_QUADTAP, input->keybit); 396 + __set_bit(BTN_TOOL_QUINTTAP, input->keybit); 397 + __set_bit(BTN_TOUCH, input->keybit); 398 + __set_bit(INPUT_PROP_POINTER, input->propbit); 391 399 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); 392 400 } 393 401 394 - __set_bit(BTN_TOOL_FINGER, input->keybit); 395 - __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); 396 - __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); 397 - __set_bit(BTN_TOOL_QUADTAP, input->keybit); 398 - __set_bit(BTN_TOOL_QUINTTAP, input->keybit); 399 - __set_bit(BTN_TOUCH, input->keybit); 400 - __set_bit(INPUT_PROP_POINTER, input->propbit); 401 402 402 403 __set_bit(EV_ABS, input->evbit); 403 404
+1
drivers/hid/usbhid/hid-quirks.c
··· 85 85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 86 86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 87 87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 88 + { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 88 89 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 89 90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 90 91 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+1 -1
drivers/i2c/busses/i2c-ismt.c
··· 584 584 585 585 /* unmap the data buffer */ 586 586 if (dma_size != 0) 587 - dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); 587 + dma_unmap_single(dev, dma_addr, dma_size, dma_direction); 588 588 589 589 if (unlikely(!time_left)) { 590 590 dev_err(dev, "completion wait timed out\n");
+1 -1
drivers/i2c/busses/i2c-rcar.c
··· 319 319 rcar_i2c_write(priv, ICFBSCR, TCYC06); 320 320 321 321 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 322 - priv->msg->len, priv->dma_direction); 322 + sg_dma_len(&priv->sg), priv->dma_direction); 323 323 324 324 priv->dma_direction = DMA_NONE; 325 325 }
+2 -2
drivers/iio/adc/meson_saradc.c
··· 468 468 static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) 469 469 { 470 470 struct meson_sar_adc_priv *priv = iio_priv(indio_dev); 471 - int count; 471 + unsigned int count, tmp; 472 472 473 473 for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { 474 474 if (!meson_sar_adc_get_fifo_count(indio_dev)) 475 475 break; 476 476 477 - regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); 477 + regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp); 478 478 } 479 479 } 480 480
+5 -2
drivers/iio/adc/mxs-lradc-adc.c
··· 718 718 adc->dev = dev; 719 719 720 720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 721 + if (!iores) 722 + return -EINVAL; 723 + 721 724 adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); 722 - if (IS_ERR(adc->base)) 723 - return PTR_ERR(adc->base); 725 + if (!adc->base) 726 + return -ENOMEM; 724 727 725 728 init_completion(&adc->completion); 726 729 spin_lock_init(&adc->lock);
+1
drivers/iio/buffer/industrialio-buffer-dma.c
··· 14 14 #include <linux/sched.h> 15 15 #include <linux/poll.h> 16 16 #include <linux/iio/buffer.h> 17 + #include <linux/iio/buffer_impl.h> 17 18 #include <linux/iio/buffer-dma.h> 18 19 #include <linux/dma-mapping.h> 19 20 #include <linux/sizes.h>
+1
drivers/iio/buffer/industrialio-buffer-dmaengine.c
··· 14 14 15 15 #include <linux/iio/iio.h> 16 16 #include <linux/iio/buffer.h> 17 + #include <linux/iio/buffer_impl.h> 17 18 #include <linux/iio/buffer-dma.h> 18 19 #include <linux/iio/buffer-dmaengine.h> 19 20
+36 -3
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 41 41 static const struct inv_mpu6050_reg_map reg_set_6500 = { 42 42 .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, 43 43 .lpf = INV_MPU6050_REG_CONFIG, 44 + .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2, 44 45 .user_ctrl = INV_MPU6050_REG_USER_CTRL, 45 46 .fifo_en = INV_MPU6050_REG_FIFO_EN, 46 47 .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, ··· 212 211 EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); 213 212 214 213 /** 214 + * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent 215 + * 216 + * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope 217 + * MPU6500 and above have a dedicated register for accelerometer 218 + */ 219 + static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st, 220 + enum inv_mpu6050_filter_e val) 221 + { 222 + int result; 223 + 224 + result = regmap_write(st->map, st->reg->lpf, val); 225 + if (result) 226 + return result; 227 + 228 + switch (st->chip_type) { 229 + case INV_MPU6050: 230 + case INV_MPU6000: 231 + case INV_MPU9150: 232 + /* old chips, nothing to do */ 233 + result = 0; 234 + break; 235 + default: 236 + /* set accel lpf */ 237 + result = regmap_write(st->map, st->reg->accel_lpf, val); 238 + break; 239 + } 240 + 241 + return result; 242 + } 243 + 244 + /** 215 245 * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. 216 246 * 217 247 * Initial configuration: ··· 265 233 if (result) 266 234 return result; 267 235 268 - d = INV_MPU6050_FILTER_20HZ; 269 - result = regmap_write(st->map, st->reg->lpf, d); 236 + result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ); 270 237 if (result) 271 238 return result; 272 239 ··· 568 537 * would be alising. This function basically search for the 569 538 * correct low pass parameters based on the fifo rate, e.g, 570 539 * sampling frequency. 540 + * 541 + * lpf is set automatically when setting sampling rate to avoid any aliases. 571 542 */ 572 543 static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) 573 544 { ··· 585 552 while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) 586 553 i++; 587 554 data = d[i]; 588 - result = regmap_write(st->map, st->reg->lpf, data); 555 + result = inv_mpu6050_set_lpf_regs(st, data); 589 556 if (result) 590 557 return result; 591 558 st->chip_config.lpf = data;
+3
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
··· 28 28 * struct inv_mpu6050_reg_map - Notable registers. 29 29 * @sample_rate_div: Divider applied to gyro output rate. 30 30 * @lpf: Configures internal low pass filter. 31 + * @accel_lpf: Configures accelerometer low pass filter. 31 32 * @user_ctrl: Enables/resets the FIFO. 32 33 * @fifo_en: Determines which data will appear in FIFO. 33 34 * @gyro_config: gyro config register. ··· 48 47 struct inv_mpu6050_reg_map { 49 48 u8 sample_rate_div; 50 49 u8 lpf; 50 + u8 accel_lpf; 51 51 u8 user_ctrl; 52 52 u8 fifo_en; 53 53 u8 gyro_config; ··· 190 188 #define INV_MPU6050_FIFO_THRESHOLD 500 191 189 192 190 /* mpu6500 registers */ 191 + #define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D 193 192 #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 194 193 195 194 /* delay time in milliseconds */
+1 -9
drivers/infiniband/core/addr.c
··· 448 448 return ret; 449 449 450 450 rt = (struct rt6_info *)dst; 451 - if (ipv6_addr_any(&fl6.saddr)) { 452 - ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, 453 - &fl6.daddr, 0, &fl6.saddr); 454 - if (ret) 455 - goto put; 456 - 451 + if (ipv6_addr_any(&src_in->sin6_addr)) { 457 452 src_in->sin6_family = AF_INET6; 458 453 src_in->sin6_addr = fl6.saddr; 459 454 } ··· 465 470 466 471 *pdst = dst; 467 472 return 0; 468 - put: 469 - dst_release(dst); 470 - return ret; 471 473 } 472 474 #else 473 475 static int addr6_resolve(struct sockaddr_in6 *src_in,
+4
drivers/infiniband/hw/bnxt_re/bnxt_re.h
··· 56 56 #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) 57 57 #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) 58 58 59 + #define BNXT_RE_UD_QP_HW_STALL 0x400000 60 + 61 + #define BNXT_RE_RQ_WQE_THRESHOLD 32 62 + 59 63 struct bnxt_re_work { 60 64 struct work_struct work; 61 65 unsigned long event;
+347 -124
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 61 61 #include "ib_verbs.h" 62 62 #include <rdma/bnxt_re-abi.h> 63 63 64 + static int __from_ib_access_flags(int iflags) 65 + { 66 + int qflags = 0; 67 + 68 + if (iflags & IB_ACCESS_LOCAL_WRITE) 69 + qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 70 + if (iflags & IB_ACCESS_REMOTE_READ) 71 + qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; 72 + if (iflags & IB_ACCESS_REMOTE_WRITE) 73 + qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; 74 + if (iflags & IB_ACCESS_REMOTE_ATOMIC) 75 + qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; 76 + if (iflags & IB_ACCESS_MW_BIND) 77 + qflags |= BNXT_QPLIB_ACCESS_MW_BIND; 78 + if (iflags & IB_ZERO_BASED) 79 + qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; 80 + if (iflags & IB_ACCESS_ON_DEMAND) 81 + qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; 82 + return qflags; 83 + }; 84 + 85 + static enum ib_access_flags __to_ib_access_flags(int qflags) 86 + { 87 + enum ib_access_flags iflags = 0; 88 + 89 + if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) 90 + iflags |= IB_ACCESS_LOCAL_WRITE; 91 + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) 92 + iflags |= IB_ACCESS_REMOTE_WRITE; 93 + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) 94 + iflags |= IB_ACCESS_REMOTE_READ; 95 + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) 96 + iflags |= IB_ACCESS_REMOTE_ATOMIC; 97 + if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) 98 + iflags |= IB_ACCESS_MW_BIND; 99 + if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) 100 + iflags |= IB_ZERO_BASED; 101 + if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) 102 + iflags |= IB_ACCESS_ON_DEMAND; 103 + return iflags; 104 + }; 105 + 64 106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, 65 107 struct bnxt_qplib_sge *sg_list, int num) 66 108 { ··· 191 149 ib_attr->max_total_mcast_qp_attach = 0; 192 150 ib_attr->max_ah = dev_attr->max_ah; 193 151 194 - ib_attr->max_fmr = dev_attr->max_fmr; 195 - ib_attr->max_map_per_fmr = 1; /* ? */ 152 + ib_attr->max_fmr = 0; 153 + ib_attr->max_map_per_fmr = 0; 196 154 197 155 ib_attr->max_srq = dev_attr->max_srq; 198 156 ib_attr->max_srq_wr = dev_attr->max_srq_wqes; ··· 452 410 return IB_LINK_LAYER_ETHERNET; 453 411 } 454 412 413 + #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) 414 + 415 + static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) 416 + { 417 + struct bnxt_re_fence_data *fence = &pd->fence; 418 + struct ib_mr *ib_mr = &fence->mr->ib_mr; 419 + struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; 420 + 421 + memset(wqe, 0, sizeof(*wqe)); 422 + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; 423 + wqe->wr_id = BNXT_QPLIB_FENCE_WRID; 424 + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 425 + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 426 + wqe->bind.zero_based = false; 427 + wqe->bind.parent_l_key = ib_mr->lkey; 428 + wqe->bind.va = (u64)(unsigned long)fence->va; 429 + wqe->bind.length = fence->size; 430 + wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); 431 + wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; 432 + 433 + /* Save the initial rkey in fence structure for now; 434 + * wqe->bind.r_key will be set at (re)bind time. 435 + */ 436 + fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); 437 + } 438 + 439 + static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) 440 + { 441 + struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, 442 + qplib_qp); 443 + struct ib_pd *ib_pd = qp->ib_qp.pd; 444 + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 445 + struct bnxt_re_fence_data *fence = &pd->fence; 446 + struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; 447 + struct bnxt_qplib_swqe wqe; 448 + int rc; 449 + 450 + memcpy(&wqe, fence_wqe, sizeof(wqe)); 451 + wqe.bind.r_key = fence->bind_rkey; 452 + fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); 453 + 454 + dev_dbg(rdev_to_dev(qp->rdev), 455 + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", 456 + wqe.bind.r_key, qp->qplib_qp.id, pd); 457 + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 458 + if (rc) { 459 + dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); 460 + return rc; 461 + } 462 + bnxt_qplib_post_send_db(&qp->qplib_qp); 463 + 464 + return rc; 465 + } 466 + 467 + static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) 468 + { 469 + struct bnxt_re_fence_data *fence = &pd->fence; 470 + struct bnxt_re_dev *rdev = pd->rdev; 471 + struct device *dev = &rdev->en_dev->pdev->dev; 472 + struct bnxt_re_mr *mr = fence->mr; 473 + 474 + if (fence->mw) { 475 + bnxt_re_dealloc_mw(fence->mw); 476 + fence->mw = NULL; 477 + } 478 + if (mr) { 479 + if (mr->ib_mr.rkey) 480 + bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, 481 + true); 482 + if (mr->ib_mr.lkey) 483 + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 484 + kfree(mr); 485 + fence->mr = NULL; 486 + } 487 + if (fence->dma_addr) { 488 + dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, 489 + DMA_BIDIRECTIONAL); 490 + fence->dma_addr = 0; 491 + } 492 + } 493 + 494 + static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) 495 + { 496 + int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; 497 + struct bnxt_re_fence_data *fence = &pd->fence; 498 + struct bnxt_re_dev *rdev = pd->rdev; 499 + struct device *dev = &rdev->en_dev->pdev->dev; 500 + struct bnxt_re_mr *mr = NULL; 501 + dma_addr_t dma_addr = 0; 502 + struct ib_mw *mw; 503 + u64 pbl_tbl; 504 + int rc; 505 + 506 + dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, 507 + DMA_BIDIRECTIONAL); 508 + rc = dma_mapping_error(dev, dma_addr); 509 + if (rc) { 510 + dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); 511 + rc = -EIO; 512 + fence->dma_addr = 0; 513 + goto fail; 514 + } 515 + fence->dma_addr = dma_addr; 516 + 517 + /* Allocate a MR */ 518 + mr = kzalloc(sizeof(*mr), GFP_KERNEL); 519 + if (!mr) { 520 + rc = -ENOMEM; 521 + goto fail; 522 + } 523 + fence->mr = mr; 524 + mr->rdev = rdev; 525 + mr->qplib_mr.pd = &pd->qplib_pd; 526 + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 527 + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 528 + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 529 + if (rc) { 530 + dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); 531 + goto fail; 532 + } 533 + 534 + /* Register MR */ 535 + mr->ib_mr.lkey = mr->qplib_mr.lkey; 536 + mr->qplib_mr.va = (u64)(unsigned long)fence->va; 537 + mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; 538 + pbl_tbl = dma_addr; 539 + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, 540 + BNXT_RE_FENCE_PBL_SIZE, false); 541 + if (rc) { 542 + dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); 543 + goto fail; 544 + } 545 + mr->ib_mr.rkey = mr->qplib_mr.rkey; 546 + 547 + /* Create a fence MW only for kernel consumers */ 548 + mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); 549 + if (!mw) { 550 + dev_err(rdev_to_dev(rdev), 551 + "Failed to create fence-MW for PD: %p\n", pd); 552 + rc = -EINVAL; 553 + goto fail; 554 + } 555 + fence->mw = mw; 556 + 557 + bnxt_re_create_fence_wqe(pd); 558 + return 0; 559 + 560 + fail: 561 + bnxt_re_destroy_fence_mr(pd); 562 + return rc; 563 + } 564 + 455 565 /* Protection Domains */ 456 566 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) 457 567 { ··· 611 417 struct bnxt_re_dev *rdev = pd->rdev; 612 418 int rc; 613 419 420 + bnxt_re_destroy_fence_mr(pd); 614 421 if (ib_pd->uobject && pd->dpi.dbr) { 615 422 struct ib_ucontext *ib_uctx = ib_pd->uobject->context; 616 423 struct bnxt_re_ucontext *ucntx; ··· 693 498 } 694 499 } 695 500 501 + if (!udata) 502 + if (bnxt_re_create_fence_mr(pd)) 503 + dev_warn(rdev_to_dev(rdev), 504 + "Failed to create Fence-MR\n"); 696 505 return &pd->ib_pd; 697 506 dbfail: 698 507 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, ··· 1048 849 /* Shadow QP SQ depth should be same as QP1 RQ depth */ 1049 850 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; 1050 851 qp->qplib_qp.sq.max_sge = 2; 852 + /* Q full delta can be 1 since it is internal QP */ 853 + qp->qplib_qp.sq.q_full_delta = 1; 1051 854 1052 855 qp->qplib_qp.scq = qp1_qp->scq; 1053 856 qp->qplib_qp.rcq = qp1_qp->rcq; 1054 857 1055 858 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; 1056 859 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; 860 + /* Q full delta can be 1 since it is internal QP */ 861 + qp->qplib_qp.rq.q_full_delta = 1; 1057 862 1058 863 qp->qplib_qp.mtu = qp1_qp->mtu; 1059 864 ··· 1120 917 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == 1121 918 IB_SIGNAL_ALL_WR) ? true : false); 1122 919 1123 - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); 1124 - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1125 - dev_attr->max_qp_wqes + 1); 1126 - 1127 920 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; 1128 921 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) 1129 922 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; ··· 1158 959 qp->qplib_qp.rq.max_wqe = min_t(u32, entries, 1159 960 dev_attr->max_qp_wqes + 1); 1160 961 962 + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - 963 + qp_init_attr->cap.max_recv_wr; 964 + 1161 965 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; 1162 966 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1163 967 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; ··· 1169 967 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 1170 968 1171 969 if (qp_init_attr->qp_type == IB_QPT_GSI) { 970 + /* Allocate 1 more than what's provided */ 971 + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); 972 + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 973 + dev_attr->max_qp_wqes + 1); 974 + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - 975 + qp_init_attr->cap.max_send_wr; 1172 976 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1173 977 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1174 978 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; ··· 1214 1006 } 1215 1007 1216 1008 } else { 1009 + /* Allocate 128 + 1 more than what's provided */ 1010 + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1011 + BNXT_QPLIB_RESERVED_QP_WRS + 1); 1012 + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1013 + dev_attr->max_qp_wqes + 1014 + BNXT_QPLIB_RESERVED_QP_WRS + 1); 1015 + qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; 1016 + 1017 + /* 1018 + * Reserving one slot for Phantom WQE. Application can 1019 + * post one extra entry in this case. But allowing this to avoid 1020 + * unexpected Queue full condition 1021 + */ 1022 + 1023 + qp->qplib_qp.sq.q_full_delta -= 1; 1024 + 1217 1025 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; 1218 1026 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; 1219 1027 if (udata) { ··· 1249 1025 1250 1026 qp->ib_qp.qp_num = qp->qplib_qp.id; 1251 1027 spin_lock_init(&qp->sq_lock); 1028 + spin_lock_init(&qp->rq_lock); 1252 1029 1253 1030 if (udata) { 1254 1031 struct bnxt_re_qp_resp resp; ··· 1353 1128 return IB_MTU_2048; 1354 1129 } 1355 1130 } 1356 - 1357 - static int __from_ib_access_flags(int iflags) 1358 - { 1359 - int qflags = 0; 1360 - 1361 - if (iflags & IB_ACCESS_LOCAL_WRITE) 1362 - qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 1363 - if (iflags & IB_ACCESS_REMOTE_READ) 1364 - qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; 1365 - if (iflags & IB_ACCESS_REMOTE_WRITE) 1366 - qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; 1367 - if (iflags & IB_ACCESS_REMOTE_ATOMIC) 1368 - qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; 1369 - if (iflags & IB_ACCESS_MW_BIND) 1370 - qflags |= BNXT_QPLIB_ACCESS_MW_BIND; 1371 - if (iflags & IB_ZERO_BASED) 1372 - qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; 1373 - if (iflags & IB_ACCESS_ON_DEMAND) 1374 - qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; 1375 - return qflags; 1376 - }; 1377 - 1378 - static enum ib_access_flags __to_ib_access_flags(int qflags) 1379 - { 1380 - enum ib_access_flags iflags = 0; 1381 - 1382 - if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) 1383 - iflags |= IB_ACCESS_LOCAL_WRITE; 1384 - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) 1385 - iflags |= IB_ACCESS_REMOTE_WRITE; 1386 - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) 1387 - iflags |= IB_ACCESS_REMOTE_READ; 1388 - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) 1389 - iflags |= IB_ACCESS_REMOTE_ATOMIC; 1390 - if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) 1391 - iflags |= IB_ACCESS_MW_BIND; 1392 - if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) 1393 - iflags |= IB_ZERO_BASED; 1394 - if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) 1395 - iflags |= IB_ACCESS_ON_DEMAND; 1396 - return iflags; 1397 - }; 1398 1131 1399 1132 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, 1400 1133 struct bnxt_re_qp *qp1_qp, ··· 1561 1378 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); 1562 1379 qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1563 1380 dev_attr->max_qp_wqes + 1); 1381 + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - 1382 + qp_attr->cap.max_send_wr; 1383 + /* 1384 + * Reserving one slot for Phantom WQE. Some application can 1385 + * post one extra entry in this case. Allowing this to avoid 1386 + * unexpected Queue full condition 1387 + */ 1388 + qp->qplib_qp.sq.q_full_delta -= 1; 1564 1389 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; 1565 1390 if (qp->qplib_qp.rq.max_wqe) { 1566 1391 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); 1567 1392 qp->qplib_qp.rq.max_wqe = 1568 1393 min_t(u32, entries, dev_attr->max_qp_wqes + 1); 1394 + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - 1395 + qp_attr->cap.max_recv_wr; 1569 1396 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; 1570 1397 } else { 1571 1398 /* SRQ was used prior, just ignore the RQ caps */ ··· 2076 1883 return payload_sz; 2077 1884 } 2078 1885 1886 + static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) 1887 + { 1888 + if ((qp->ib_qp.qp_type == IB_QPT_UD || 1889 + qp->ib_qp.qp_type == IB_QPT_GSI || 1890 + qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && 1891 + qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { 1892 + int qp_attr_mask; 1893 + struct ib_qp_attr qp_attr; 1894 + 1895 + qp_attr_mask = IB_QP_STATE; 1896 + qp_attr.qp_state = IB_QPS_RTS; 1897 + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); 1898 + qp->qplib_qp.wqe_cnt = 0; 1899 + } 1900 + } 1901 + 2079 1902 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, 2080 1903 struct bnxt_re_qp *qp, 2081 1904 struct ib_send_wr *wr) ··· 2137 1928 wr = wr->next; 2138 1929 } 2139 1930 bnxt_qplib_post_send_db(&qp->qplib_qp); 1931 + bnxt_ud_qp_hw_stall_workaround(qp); 2140 1932 spin_unlock_irqrestore(&qp->sq_lock, flags); 2141 1933 return rc; 2142 1934 } ··· 2234 2024 wr = wr->next; 2235 2025 } 2236 2026 bnxt_qplib_post_send_db(&qp->qplib_qp); 2027 + bnxt_ud_qp_hw_stall_workaround(qp); 2237 2028 spin_unlock_irqrestore(&qp->sq_lock, flags); 2238 2029 2239 2030 return rc; ··· 2282 2071 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2283 2072 struct bnxt_qplib_swqe wqe; 2284 2073 int rc = 0, payload_sz = 0; 2074 + unsigned long flags; 2075 + u32 count = 0; 2285 2076 2077 + spin_lock_irqsave(&qp->rq_lock, flags); 2286 2078 while (wr) { 2287 2079 /* House keeping */ 2288 2080 memset(&wqe, 0, sizeof(wqe)); ··· 2314 2100 *bad_wr = wr; 2315 2101 break; 2316 2102 } 2103 + 2104 + /* Ring DB if the RQEs posted reaches a threshold value */ 2105 + if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { 2106 + bnxt_qplib_post_recv_db(&qp->qplib_qp); 2107 + count = 0; 2108 + } 2109 + 2317 2110 wr = wr->next; 2318 2111 } 2319 - bnxt_qplib_post_recv_db(&qp->qplib_qp); 2112 + 2113 + if (count) 2114 + bnxt_qplib_post_recv_db(&qp->qplib_qp); 2115 + 2116 + spin_unlock_irqrestore(&qp->rq_lock, flags); 2117 + 2320 2118 return rc; 2321 2119 } 2322 2120 ··· 2869 2643 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2870 2644 } 2871 2645 2646 + static int send_phantom_wqe(struct bnxt_re_qp *qp) 2647 + { 2648 + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; 2649 + unsigned long flags; 2650 + int rc = 0; 2651 + 2652 + spin_lock_irqsave(&qp->sq_lock, flags); 2653 + 2654 + rc = bnxt_re_bind_fence_mw(lib_qp); 2655 + if (!rc) { 2656 + lib_qp->sq.phantom_wqe_cnt++; 2657 + dev_dbg(&lib_qp->sq.hwq.pdev->dev, 2658 + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", 2659 + lib_qp->id, lib_qp->sq.hwq.prod, 2660 + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), 2661 + lib_qp->sq.phantom_wqe_cnt); 2662 + } 2663 + 2664 + spin_unlock_irqrestore(&qp->sq_lock, flags); 2665 + return rc; 2666 + } 2667 + 2872 2668 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) 2873 2669 { 2874 2670 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 2875 2671 struct bnxt_re_qp *qp; 2876 2672 struct bnxt_qplib_cqe *cqe; 2877 2673 int i, ncqe, budget; 2674 + struct bnxt_qplib_q *sq; 2675 + struct bnxt_qplib_qp *lib_qp; 2878 2676 u32 tbl_idx; 2879 2677 struct bnxt_re_sqp_entries *sqp_entry = NULL; 2880 2678 unsigned long flags; ··· 2911 2661 } 2912 2662 cqe = &cq->cql[0]; 2913 2663 while (budget) { 2914 - ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); 2664 + lib_qp = NULL; 2665 + ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); 2666 + if (lib_qp) { 2667 + sq = &lib_qp->sq; 2668 + if (sq->send_phantom) { 2669 + qp = container_of(lib_qp, 2670 + struct bnxt_re_qp, qplib_qp); 2671 + if (send_phantom_wqe(qp) == -ENOMEM) 2672 + dev_err(rdev_to_dev(cq->rdev), 2673 + "Phantom failed! Scheduled to send again\n"); 2674 + else 2675 + sq->send_phantom = false; 2676 + } 2677 + } 2678 + 2915 2679 if (!ncqe) 2916 2680 break; 2917 2681 ··· 3086 2822 struct bnxt_re_dev *rdev = mr->rdev; 3087 2823 int rc; 3088 2824 2825 + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 2826 + if (rc) { 2827 + dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); 2828 + return rc; 2829 + } 2830 + 3089 2831 if (mr->npages && mr->pages) { 3090 2832 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3091 2833 &mr->qplib_frpl); ··· 3099 2829 mr->npages = 0; 3100 2830 mr->pages = NULL; 3101 2831 } 3102 - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3103 - 3104 2832 if (!IS_ERR_OR_NULL(mr->ib_umem)) 3105 2833 ib_umem_release(mr->ib_umem); 3106 2834 ··· 3182 2914 return ERR_PTR(rc); 3183 2915 } 3184 2916 3185 - /* Fast Memory Regions */ 3186 - struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, 3187 - struct ib_fmr_attr *fmr_attr) 2917 + struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 2918 + struct ib_udata *udata) 3188 2919 { 3189 2920 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3190 2921 struct bnxt_re_dev *rdev = pd->rdev; 3191 - struct bnxt_re_fmr *fmr; 2922 + struct bnxt_re_mw *mw; 3192 2923 int rc; 3193 2924 3194 - if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || 3195 - fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { 3196 - dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); 2925 + mw = kzalloc(sizeof(*mw), GFP_KERNEL); 2926 + if (!mw) 3197 2927 return ERR_PTR(-ENOMEM); 3198 - } 3199 - fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); 3200 - if (!fmr) 3201 - return ERR_PTR(-ENOMEM); 2928 + mw->rdev = rdev; 2929 + mw->qplib_mw.pd = &pd->qplib_pd; 3202 2930 3203 - fmr->rdev = rdev; 3204 - fmr->qplib_fmr.pd = &pd->qplib_pd; 3205 - fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 3206 - 3207 - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3208 - if (rc) 2931 + mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? 2932 + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : 2933 + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); 2934 + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); 2935 + if (rc) { 2936 + dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); 3209 2937 goto fail; 2938 + } 2939 + mw->ib_mw.rkey = mw->qplib_mw.rkey; 3210 2940 3211 - fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); 3212 - fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; 3213 - fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; 2941 + atomic_inc(&rdev->mw_count); 2942 + return &mw->ib_mw; 3214 2943 3215 - atomic_inc(&rdev->mr_count); 3216 - return &fmr->ib_fmr; 3217 2944 fail: 3218 - kfree(fmr); 2945 + kfree(mw); 3219 2946 return ERR_PTR(rc); 3220 2947 } 3221 2948 3222 - int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, 3223 - u64 iova) 2949 + int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) 3224 2950 { 3225 - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, 3226 - ib_fmr); 3227 - struct bnxt_re_dev *rdev = fmr->rdev; 2951 + struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); 2952 + struct bnxt_re_dev *rdev = mw->rdev; 3228 2953 int rc; 3229 2954 3230 - fmr->qplib_fmr.va = iova; 3231 - fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; 3232 - 3233 - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, 3234 - list_len, true); 3235 - if (rc) 3236 - dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", 3237 - fmr->ib_fmr.lkey); 3238 - return rc; 3239 - } 3240 - 3241 - int bnxt_re_unmap_fmr(struct list_head *fmr_list) 3242 - { 3243 - struct bnxt_re_dev *rdev; 3244 - struct bnxt_re_fmr *fmr; 3245 - struct ib_fmr *ib_fmr; 3246 - int rc = 0; 3247 - 3248 - /* Validate each FMRs inside the fmr_list */ 3249 - list_for_each_entry(ib_fmr, fmr_list, list) { 3250 - fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); 3251 - rdev = fmr->rdev; 3252 - 3253 - if (rdev) { 3254 - rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, 3255 - &fmr->qplib_fmr, true); 3256 - if (rc) 3257 - break; 3258 - } 2955 + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); 2956 + if (rc) { 2957 + dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); 2958 + return rc; 3259 2959 } 3260 - return rc; 3261 - } 3262 2960 3263 - int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) 3264 - { 3265 - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, 3266 - ib_fmr); 3267 - struct bnxt_re_dev *rdev = fmr->rdev; 3268 - int rc; 3269 - 3270 - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3271 - if (rc) 3272 - dev_err(rdev_to_dev(rdev), "Failed to free FMR"); 3273 - 3274 - kfree(fmr); 3275 - atomic_dec(&rdev->mr_count); 2961 + kfree(mw); 2962 + atomic_dec(&rdev->mw_count); 3276 2963 return rc; 3277 2964 } 3278 2965
+16 -6
drivers/infiniband/hw/bnxt_re/ib_verbs.h
··· 44 44 u32 refcnt; 45 45 }; 46 46 47 + #define BNXT_RE_FENCE_BYTES 64 48 + struct bnxt_re_fence_data { 49 + u32 size; 50 + u8 va[BNXT_RE_FENCE_BYTES]; 51 + dma_addr_t dma_addr; 52 + struct bnxt_re_mr *mr; 53 + struct ib_mw *mw; 54 + struct bnxt_qplib_swqe bind_wqe; 55 + u32 bind_rkey; 56 + }; 57 + 47 58 struct bnxt_re_pd { 48 59 struct bnxt_re_dev *rdev; 49 60 struct ib_pd ib_pd; 50 61 struct bnxt_qplib_pd qplib_pd; 51 62 struct bnxt_qplib_dpi dpi; 63 + struct bnxt_re_fence_data fence; 52 64 }; 53 65 54 66 struct bnxt_re_ah { ··· 74 62 struct bnxt_re_dev *rdev; 75 63 struct ib_qp ib_qp; 76 64 spinlock_t sq_lock; /* protect sq */ 65 + spinlock_t rq_lock; /* protect rq */ 77 66 struct bnxt_qplib_qp qplib_qp; 78 67 struct ib_umem *sumem; 79 68 struct ib_umem *rumem; ··· 194 181 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, 195 182 u32 max_num_sg); 196 183 int bnxt_re_dereg_mr(struct ib_mr *mr); 197 - struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 198 - struct ib_fmr_attr *fmr_attr); 199 - int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, 200 - u64 iova); 201 - int bnxt_re_unmap_fmr(struct list_head *fmr_list); 202 - int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); 184 + struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 185 + struct ib_udata *udata); 186 + int bnxt_re_dealloc_mw(struct ib_mw *mw); 203 187 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 204 188 u64 virt_addr, int mr_access_flags, 205 189 struct ib_udata *udata);
-4
drivers/infiniband/hw/bnxt_re/main.c
··· 507 507 ibdev->dereg_mr = bnxt_re_dereg_mr; 508 508 ibdev->alloc_mr = bnxt_re_alloc_mr; 509 509 ibdev->map_mr_sg = bnxt_re_map_mr_sg; 510 - ibdev->alloc_fmr = bnxt_re_alloc_fmr; 511 - ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; 512 - ibdev->unmap_fmr = bnxt_re_unmap_fmr; 513 - ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; 514 510 515 511 ibdev->reg_user_mr = bnxt_re_reg_user_mr; 516 512 ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
+199 -185
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 284 284 { 285 285 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 286 286 struct cmdq_create_qp1 req; 287 - struct creq_create_qp1_resp *resp; 287 + struct creq_create_qp1_resp resp; 288 288 struct bnxt_qplib_pbl *pbl; 289 289 struct bnxt_qplib_q *sq = &qp->sq; 290 290 struct bnxt_qplib_q *rq = &qp->rq; ··· 394 394 395 395 req.pd_id = cpu_to_le32(qp->pd->id); 396 396 397 - resp = (struct creq_create_qp1_resp *) 398 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 399 - NULL, 0); 400 - if (!resp) { 401 - dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); 402 - rc = -EINVAL; 397 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 398 + (void *)&resp, NULL, 0); 399 + if (rc) 403 400 goto fail; 404 - } 405 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 406 - /* Cmd timed out */ 407 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); 408 - rc = -ETIMEDOUT; 409 - goto fail; 410 - } 411 - if (resp->status || 412 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 413 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); 414 - dev_err(&rcfw->pdev->dev, 415 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 416 - resp->status, le16_to_cpu(req.cookie), 417 - le16_to_cpu(resp->cookie)); 418 - rc = -EINVAL; 419 - goto fail; 420 - } 421 - qp->id = le32_to_cpu(resp->xid); 401 + 402 + qp->id = le32_to_cpu(resp.xid); 422 403 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 423 404 sq->flush_in_progress = false; 424 405 rq->flush_in_progress = false; ··· 423 442 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 424 443 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; 425 444 struct cmdq_create_qp req; 426 - struct creq_create_qp_resp *resp; 445 + struct creq_create_qp_resp resp; 427 446 struct bnxt_qplib_pbl *pbl; 428 447 struct sq_psn_search **psn_search_ptr; 429 448 unsigned long int psn_search, poff = 0; ··· 608 627 } 609 628 req.pd_id = cpu_to_le32(qp->pd->id); 610 629 611 - resp = (struct creq_create_qp_resp *) 612 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 613 - NULL, 0); 614 - if (!resp) { 615 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); 616 - rc = -EINVAL; 630 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 631 + (void *)&resp, NULL, 0); 632 + if (rc) 617 633 goto fail; 618 - } 619 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 620 - /* Cmd timed out */ 621 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); 622 - rc = -ETIMEDOUT; 623 - goto fail; 624 - } 625 - if (resp->status || 626 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 627 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); 628 - dev_err(&rcfw->pdev->dev, 629 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 630 - resp->status, le16_to_cpu(req.cookie), 631 - le16_to_cpu(resp->cookie)); 632 - rc = -EINVAL; 633 - goto fail; 634 - } 635 - qp->id = le32_to_cpu(resp->xid); 634 + 635 + qp->id = le32_to_cpu(resp.xid); 636 636 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 637 637 sq->flush_in_progress = false; 638 638 rq->flush_in_progress = false; ··· 731 769 { 732 770 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 733 771 struct cmdq_modify_qp req; 734 - struct creq_modify_qp_resp *resp; 772 + struct creq_modify_qp_resp resp; 735 773 u16 cmd_flags = 0, pkey; 736 774 u32 temp32[4]; 737 775 u32 bmask; 776 + int rc; 738 777 739 778 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); 740 779 ··· 825 862 826 863 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); 827 864 828 - resp = (struct creq_modify_qp_resp *) 829 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 830 - NULL, 0); 831 - if (!resp) { 832 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); 833 - return -EINVAL; 834 - } 835 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 836 - /* Cmd timed out */ 837 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); 838 - return -ETIMEDOUT; 839 - } 840 - if (resp->status || 841 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 842 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); 843 - dev_err(&rcfw->pdev->dev, 844 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 845 - resp->status, le16_to_cpu(req.cookie), 846 - le16_to_cpu(resp->cookie)); 847 - return -EINVAL; 848 - } 865 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 866 + (void *)&resp, NULL, 0); 867 + if (rc) 868 + return rc; 849 869 qp->cur_qp_state = qp->state; 850 870 return 0; 851 871 } ··· 837 891 { 838 892 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 839 893 struct cmdq_query_qp req; 840 - struct creq_query_qp_resp *resp; 894 + struct creq_query_qp_resp resp; 895 + struct bnxt_qplib_rcfw_sbuf *sbuf; 841 896 struct creq_query_qp_resp_sb *sb; 842 897 u16 cmd_flags = 0; 843 898 u32 temp32[4]; 844 - int i; 899 + int i, rc = 0; 845 900 846 901 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); 847 902 903 + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); 904 + if (!sbuf) 905 + return -ENOMEM; 906 + sb = sbuf->sb; 907 + 848 908 req.qp_cid = cpu_to_le32(qp->id); 849 909 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 850 - resp = (struct creq_query_qp_resp *) 851 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 852 - (void **)&sb, 0); 853 - if (!resp) { 854 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); 855 - return -EINVAL; 856 - } 857 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 858 - /* Cmd timed out */ 859 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); 860 - return -ETIMEDOUT; 861 - } 862 - if (resp->status || 863 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 864 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); 865 - dev_err(&rcfw->pdev->dev, 866 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 867 - resp->status, le16_to_cpu(req.cookie), 868 - le16_to_cpu(resp->cookie)); 869 - return -EINVAL; 870 - } 910 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 911 + (void *)sbuf, 0); 912 + if (rc) 913 + goto bail; 871 914 /* Extract the context from the side buffer */ 872 915 qp->state = sb->en_sqd_async_notify_state & 873 916 CREQ_QUERY_QP_RESP_SB_STATE_MASK; ··· 911 976 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 912 977 memcpy(qp->smac, sb->src_mac, 6); 913 978 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 914 - return 0; 979 + bail: 980 + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 981 + return rc; 915 982 } 916 983 917 984 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) ··· 958 1021 { 959 1022 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 960 1023 struct cmdq_destroy_qp req; 961 - struct creq_destroy_qp_resp *resp; 1024 + struct creq_destroy_qp_resp resp; 962 1025 unsigned long flags; 963 1026 u16 cmd_flags = 0; 1027 + int rc; 964 1028 965 1029 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); 966 1030 967 1031 req.qp_cid = cpu_to_le32(qp->id); 968 - resp = (struct creq_destroy_qp_resp *) 969 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 970 - NULL, 0); 971 - if (!resp) { 972 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); 973 - return -EINVAL; 974 - } 975 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 976 - /* Cmd timed out */ 977 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); 978 - return -ETIMEDOUT; 979 - } 980 - if (resp->status || 981 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 982 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); 983 - dev_err(&rcfw->pdev->dev, 984 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 985 - resp->status, le16_to_cpu(req.cookie), 986 - le16_to_cpu(resp->cookie)); 987 - return -EINVAL; 988 - } 1032 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1033 + (void *)&resp, NULL, 0); 1034 + if (rc) 1035 + return rc; 989 1036 990 1037 /* Must walk the associated CQs to nullified the QP ptr */ 991 1038 spin_lock_irqsave(&qp->scq->hwq.lock, flags); ··· 1083 1162 rc = -EINVAL; 1084 1163 goto done; 1085 1164 } 1086 - if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == 1087 - HWQ_CMP(sq->hwq.cons, &sq->hwq)) { 1165 + 1166 + if (bnxt_qplib_queue_full(sq)) { 1167 + dev_err(&sq->hwq.pdev->dev, 1168 + "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", 1169 + sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, 1170 + sq->q_full_delta); 1088 1171 rc = -ENOMEM; 1089 1172 goto done; 1090 1173 } ··· 1298 1373 } 1299 1374 1300 1375 sq->hwq.prod++; 1376 + 1377 + qp->wqe_cnt++; 1378 + 1301 1379 done: 1302 1380 return rc; 1303 1381 } ··· 1339 1411 rc = -EINVAL; 1340 1412 goto done; 1341 1413 } 1342 - if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == 1343 - HWQ_CMP(rq->hwq.cons, &rq->hwq)) { 1414 + if (bnxt_qplib_queue_full(rq)) { 1344 1415 dev_err(&rq->hwq.pdev->dev, 1345 1416 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); 1346 1417 rc = -EINVAL; ··· 1410 1483 { 1411 1484 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1412 1485 struct cmdq_create_cq req; 1413 - struct creq_create_cq_resp *resp; 1486 + struct creq_create_cq_resp resp; 1414 1487 struct bnxt_qplib_pbl *pbl; 1415 1488 u16 cmd_flags = 0; 1416 1489 int rc; ··· 1452 1525 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 1453 1526 CMDQ_CREATE_CQ_CNQ_ID_SFT); 1454 1527 1455 - resp = (struct creq_create_cq_resp *) 1456 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1457 - NULL, 0); 1458 - if (!resp) { 1459 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); 1460 - return -EINVAL; 1461 - } 1462 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 1463 - /* Cmd timed out */ 1464 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); 1465 - rc = -ETIMEDOUT; 1528 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1529 + (void *)&resp, NULL, 0); 1530 + if (rc) 1466 1531 goto fail; 1467 - } 1468 - if (resp->status || 1469 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 1470 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); 1471 - dev_err(&rcfw->pdev->dev, 1472 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 1473 - resp->status, le16_to_cpu(req.cookie), 1474 - le16_to_cpu(resp->cookie)); 1475 - rc = -EINVAL; 1476 - goto fail; 1477 - } 1478 - cq->id = le32_to_cpu(resp->xid); 1532 + 1533 + cq->id = le32_to_cpu(resp.xid); 1479 1534 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; 1480 1535 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 1481 1536 init_waitqueue_head(&cq->waitq); ··· 1475 1566 { 1476 1567 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1477 1568 struct cmdq_destroy_cq req; 1478 - struct creq_destroy_cq_resp *resp; 1569 + struct creq_destroy_cq_resp resp; 1479 1570 u16 cmd_flags = 0; 1571 + int rc; 1480 1572 1481 1573 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); 1482 1574 1483 1575 req.cq_cid = cpu_to_le32(cq->id); 1484 - resp = (struct creq_destroy_cq_resp *) 1485 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1486 - NULL, 0); 1487 - if (!resp) { 1488 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); 1489 - return -EINVAL; 1490 - } 1491 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 1492 - /* Cmd timed out */ 1493 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); 1494 - return -ETIMEDOUT; 1495 - } 1496 - if (resp->status || 1497 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 1498 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); 1499 - dev_err(&rcfw->pdev->dev, 1500 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 1501 - resp->status, le16_to_cpu(req.cookie), 1502 - le16_to_cpu(resp->cookie)); 1503 - return -EINVAL; 1504 - } 1576 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1577 + (void *)&resp, NULL, 0); 1578 + if (rc) 1579 + return rc; 1505 1580 bnxt_qplib_free_hwq(res->pdev, &cq->hwq); 1506 1581 return 0; 1507 1582 } ··· 1557 1664 return rc; 1558 1665 } 1559 1666 1667 + /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 1668 + * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 1669 + */ 1670 + static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, 1671 + u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) 1672 + { 1673 + struct bnxt_qplib_q *sq = &qp->sq; 1674 + struct bnxt_qplib_swq *swq; 1675 + u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; 1676 + struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; 1677 + struct cq_req *peek_req_hwcqe; 1678 + struct bnxt_qplib_qp *peek_qp; 1679 + struct bnxt_qplib_q *peek_sq; 1680 + int i, rc = 0; 1681 + 1682 + /* Normal mode */ 1683 + /* Check for the psn_search marking before completing */ 1684 + swq = &sq->swq[sw_sq_cons]; 1685 + if (swq->psn_search && 1686 + le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { 1687 + /* Unmark */ 1688 + swq->psn_search->flags_next_psn = cpu_to_le32 1689 + (le32_to_cpu(swq->psn_search->flags_next_psn) 1690 + & ~0x80000000); 1691 + dev_dbg(&cq->hwq.pdev->dev, 1692 + "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", 1693 + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); 1694 + sq->condition = true; 1695 + sq->send_phantom = true; 1696 + 1697 + /* TODO: Only ARM if the previous SQE is ARMALL */ 1698 + bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); 1699 + 1700 + rc = -EAGAIN; 1701 + goto out; 1702 + } 1703 + if (sq->condition) { 1704 + /* Peek at the completions */ 1705 + peek_raw_cq_cons = cq->hwq.cons; 1706 + peek_sw_cq_cons = cq_cons; 1707 + i = cq->hwq.max_elements; 1708 + while (i--) { 1709 + peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); 1710 + peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; 1711 + peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] 1712 + [CQE_IDX(peek_sw_cq_cons)]; 1713 + /* If the next hwcqe is VALID */ 1714 + if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, 1715 + cq->hwq.max_elements)) { 1716 + /* If the next hwcqe is a REQ */ 1717 + if ((peek_hwcqe->cqe_type_toggle & 1718 + CQ_BASE_CQE_TYPE_MASK) == 1719 + CQ_BASE_CQE_TYPE_REQ) { 1720 + peek_req_hwcqe = (struct cq_req *) 1721 + peek_hwcqe; 1722 + peek_qp = (struct bnxt_qplib_qp *) 1723 + ((unsigned long) 1724 + le64_to_cpu 1725 + (peek_req_hwcqe->qp_handle)); 1726 + peek_sq = &peek_qp->sq; 1727 + peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( 1728 + peek_req_hwcqe->sq_cons_idx) - 1 1729 + , &sq->hwq); 1730 + /* If the hwcqe's sq's wr_id matches */ 1731 + if (peek_sq == sq && 1732 + sq->swq[peek_sq_cons_idx].wr_id == 1733 + BNXT_QPLIB_FENCE_WRID) { 1734 + /* 1735 + * Unbreak only if the phantom 1736 + * comes back 1737 + */ 1738 + dev_dbg(&cq->hwq.pdev->dev, 1739 + "FP:Got Phantom CQE"); 1740 + sq->condition = false; 1741 + sq->single = true; 1742 + rc = 0; 1743 + goto out; 1744 + } 1745 + } 1746 + /* Valid but not the phantom, so keep looping */ 1747 + } else { 1748 + /* Not valid yet, just exit and wait */ 1749 + rc = -EINVAL; 1750 + goto out; 1751 + } 1752 + peek_sw_cq_cons++; 1753 + peek_raw_cq_cons++; 1754 + } 1755 + dev_err(&cq->hwq.pdev->dev, 1756 + "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", 1757 + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); 1758 + rc = -EINVAL; 1759 + } 1760 + out: 1761 + return rc; 1762 + } 1763 + 1560 1764 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 1561 1765 struct cq_req *hwcqe, 1562 - struct bnxt_qplib_cqe **pcqe, int *budget) 1766 + struct bnxt_qplib_cqe **pcqe, int *budget, 1767 + u32 cq_cons, struct bnxt_qplib_qp **lib_qp) 1563 1768 { 1564 1769 struct bnxt_qplib_qp *qp; 1565 1770 struct bnxt_qplib_q *sq; 1566 1771 struct bnxt_qplib_cqe *cqe; 1567 - u32 sw_cons, cqe_cons; 1772 + u32 sw_sq_cons, cqe_sq_cons; 1773 + struct bnxt_qplib_swq *swq; 1568 1774 int rc = 0; 1569 1775 1570 1776 qp = (struct bnxt_qplib_qp *)((unsigned long) ··· 1675 1683 } 1676 1684 sq = &qp->sq; 1677 1685 1678 - cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); 1679 - if (cqe_cons > sq->hwq.max_elements) { 1686 + cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); 1687 + if (cqe_sq_cons > sq->hwq.max_elements) { 1680 1688 dev_err(&cq->hwq.pdev->dev, 1681 1689 "QPLIB: FP: CQ Process req reported "); 1682 1690 dev_err(&cq->hwq.pdev->dev, 1683 1691 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", 1684 - cqe_cons, sq->hwq.max_elements); 1692 + cqe_sq_cons, sq->hwq.max_elements); 1685 1693 return -EINVAL; 1686 1694 } 1687 1695 /* If we were in the middle of flushing the SQ, continue */ ··· 1690 1698 1691 1699 /* Require to walk the sq's swq to fabricate CQEs for all previously 1692 1700 * signaled SWQEs due to CQE aggregation from the current sq cons 1693 - * to the cqe_cons 1701 + * to the cqe_sq_cons 1694 1702 */ 1695 1703 cqe = *pcqe; 1696 1704 while (*budget) { 1697 - sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1698 - if (sw_cons == cqe_cons) 1705 + sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1706 + if (sw_sq_cons == cqe_sq_cons) 1707 + /* Done */ 1699 1708 break; 1709 + 1710 + swq = &sq->swq[sw_sq_cons]; 1700 1711 memset(cqe, 0, sizeof(*cqe)); 1701 1712 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 1702 1713 cqe->qp_handle = (u64)(unsigned long)qp; 1703 1714 cqe->src_qp = qp->id; 1704 - cqe->wr_id = sq->swq[sw_cons].wr_id; 1705 - cqe->type = sq->swq[sw_cons].type; 1715 + cqe->wr_id = swq->wr_id; 1716 + if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) 1717 + goto skip; 1718 + cqe->type = swq->type; 1706 1719 1707 1720 /* For the last CQE, check for status. For errors, regardless 1708 1721 * of the request being signaled or not, it must complete with 1709 1722 * the hwcqe error status 1710 1723 */ 1711 - if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && 1724 + if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && 1712 1725 hwcqe->status != CQ_REQ_STATUS_OK) { 1713 1726 cqe->status = hwcqe->status; 1714 1727 dev_err(&cq->hwq.pdev->dev, 1715 1728 "QPLIB: FP: CQ Processed Req "); 1716 1729 dev_err(&cq->hwq.pdev->dev, 1717 1730 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", 1718 - sw_cons, cqe->wr_id, cqe->status); 1731 + sw_sq_cons, cqe->wr_id, cqe->status); 1719 1732 cqe++; 1720 1733 (*budget)--; 1721 1734 sq->flush_in_progress = true; 1722 1735 /* Must block new posting of SQ and RQ */ 1723 1736 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 1737 + sq->condition = false; 1738 + sq->single = false; 1724 1739 } else { 1725 - if (sq->swq[sw_cons].flags & 1726 - SQ_SEND_FLAGS_SIGNAL_COMP) { 1740 + if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 1741 + /* Before we complete, do WA 9060 */ 1742 + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, 1743 + cqe_sq_cons)) { 1744 + *lib_qp = qp; 1745 + goto out; 1746 + } 1727 1747 cqe->status = CQ_REQ_STATUS_OK; 1728 1748 cqe++; 1729 1749 (*budget)--; 1730 1750 } 1731 1751 } 1752 + skip: 1732 1753 sq->hwq.cons++; 1754 + if (sq->single) 1755 + break; 1733 1756 } 1757 + out: 1734 1758 *pcqe = cqe; 1735 - if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { 1759 + if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { 1736 1760 /* Out of budget */ 1737 1761 rc = -EAGAIN; 1738 1762 goto done; 1739 1763 } 1764 + /* 1765 + * Back to normal completion mode only after it has completed all of 1766 + * the WC for this CQE 1767 + */ 1768 + sq->single = false; 1740 1769 if (!sq->flush_in_progress) 1741 1770 goto done; 1742 1771 flush: ··· 2087 2074 } 2088 2075 2089 2076 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 2090 - int num_cqes) 2077 + int num_cqes, struct bnxt_qplib_qp **lib_qp) 2091 2078 { 2092 2079 struct cq_base *hw_cqe, **hw_cqe_ptr; 2093 2080 unsigned long flags; ··· 2112 2099 case CQ_BASE_CQE_TYPE_REQ: 2113 2100 rc = bnxt_qplib_cq_process_req(cq, 2114 2101 (struct cq_req *)hw_cqe, 2115 - &cqe, &budget); 2102 + &cqe, &budget, 2103 + sw_cons, lib_qp); 2116 2104 break; 2117 2105 case CQ_BASE_CQE_TYPE_RES_RC: 2118 2106 rc = bnxt_qplib_cq_process_res_rc(cq,
+17 -1
drivers/infiniband/hw/bnxt_re/qplib_fp.h
··· 88 88 89 89 struct bnxt_qplib_swqe { 90 90 /* General */ 91 + #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ 91 92 u64 wr_id; 92 93 u8 reqs_type; 93 94 u8 type; ··· 217 216 struct scatterlist *sglist; 218 217 u32 nmap; 219 218 u32 max_wqe; 219 + u16 q_full_delta; 220 220 u16 max_sge; 221 221 u32 psn; 222 222 bool flush_in_progress; 223 + bool condition; 224 + bool single; 225 + bool send_phantom; 226 + u32 phantom_wqe_cnt; 227 + u32 phantom_cqe_cnt; 228 + u32 next_cq_cons; 223 229 }; 224 230 225 231 struct bnxt_qplib_qp { ··· 250 242 u8 timeout; 251 243 u8 retry_cnt; 252 244 u8 rnr_retry; 245 + u64 wqe_cnt; 253 246 u32 min_rnr_timer; 254 247 u32 max_rd_atomic; 255 248 u32 max_dest_rd_atomic; ··· 309 300 #define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \ 310 301 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 311 302 !((raw_cons) & (cp_bit))) 303 + 304 + static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) 305 + { 306 + return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), 307 + &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, 308 + &qplib_q->hwq); 309 + } 312 310 313 311 struct bnxt_qplib_cqe { 314 312 u8 status; ··· 448 432 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 449 433 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 450 434 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 451 - int num); 435 + int num, struct bnxt_qplib_qp **qp); 452 436 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 453 437 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 454 438 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
+163 -149
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 39 39 #include <linux/spinlock.h> 40 40 #include <linux/pci.h> 41 41 #include <linux/prefetch.h> 42 + #include <linux/delay.h> 43 + 42 44 #include "roce_hsi.h" 43 45 #include "qplib_res.h" 44 46 #include "qplib_rcfw.h" 45 47 static void bnxt_qplib_service_creq(unsigned long data); 46 48 47 49 /* Hardware communication channel */ 48 - int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 50 + static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 49 51 { 50 52 u16 cbit; 51 53 int rc; 52 54 53 - cookie &= RCFW_MAX_COOKIE_VALUE; 54 55 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 55 - if (!test_bit(cbit, rcfw->cmdq_bitmap)) 56 - dev_warn(&rcfw->pdev->dev, 57 - "QPLIB: CMD bit %d for cookie 0x%x is not set?", 58 - cbit, cookie); 59 - 60 56 rc = wait_event_timeout(rcfw->waitq, 61 57 !test_bit(cbit, rcfw->cmdq_bitmap), 62 58 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 63 - if (!rc) { 64 - dev_warn(&rcfw->pdev->dev, 65 - "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", 66 - RCFW_CMD_WAIT_TIME_MS, cookie); 67 - } 68 - 69 - return rc; 59 + return rc ? 0 : -ETIMEDOUT; 70 60 }; 71 61 72 - int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 62 + static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 73 63 { 74 - u32 count = -1; 64 + u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; 75 65 u16 cbit; 76 66 77 - cookie &= RCFW_MAX_COOKIE_VALUE; 78 67 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 79 68 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 80 69 goto done; 81 70 do { 71 + mdelay(1); /* 1m sec */ 82 72 bnxt_qplib_service_creq((unsigned long)rcfw); 83 73 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 84 74 done: 85 - return count; 75 + return count ? 0 : -ETIMEDOUT; 86 76 }; 87 77 88 - void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 89 - struct cmdq_base *req, void **crsbe, 90 - u8 is_block) 78 + static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, 79 + struct creq_base *resp, void *sb, u8 is_block) 91 80 { 92 - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; 93 81 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 94 82 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 95 - struct bnxt_qplib_hwq *crsb = &rcfw->crsb; 96 - struct bnxt_qplib_crsqe *crsqe = NULL; 97 - struct bnxt_qplib_crsbe **crsb_ptr; 83 + struct bnxt_qplib_crsq *crsqe; 98 84 u32 sw_prod, cmdq_prod; 99 - u8 retry_cnt = 0xFF; 100 - dma_addr_t dma_addr; 101 85 unsigned long flags; 102 86 u32 size, opcode; 103 87 u16 cookie, cbit; 104 88 int pg, idx; 105 89 u8 *preq; 106 90 107 - retry: 108 91 opcode = req->opcode; 109 92 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 110 93 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && ··· 95 112 dev_err(&rcfw->pdev->dev, 96 113 "QPLIB: RCFW not initialized, reject opcode 0x%x", 97 114 opcode); 98 - return NULL; 115 + return -EINVAL; 99 116 } 100 117 101 118 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 102 119 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 103 120 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); 104 - return NULL; 121 + return -EINVAL; 105 122 } 106 123 107 124 /* Cmdq are in 16-byte units, each request can consume 1 or more 108 125 * cmdqe 109 126 */ 110 127 spin_lock_irqsave(&cmdq->lock, flags); 111 - if (req->cmd_size > cmdq->max_elements - 112 - ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & 113 - (cmdq->max_elements - 1))) { 128 + if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { 114 129 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); 115 130 spin_unlock_irqrestore(&cmdq->lock, flags); 116 - 117 - if (!retry_cnt--) 118 - return NULL; 119 - goto retry; 131 + return -EAGAIN; 120 132 } 121 133 122 - retry_cnt = 0xFF; 123 134 124 - cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; 135 + cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; 125 136 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 126 137 if (is_block) 127 138 cookie |= RCFW_CMD_IS_BLOCKING; 139 + 140 + set_bit(cbit, rcfw->cmdq_bitmap); 128 141 req->cookie = cpu_to_le16(cookie); 129 - if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { 130 - dev_err(&rcfw->pdev->dev, 131 - "QPLIB: RCFW MAX outstanding cmd reached!"); 132 - atomic_dec(&rcfw->seq_num); 142 + crsqe = &rcfw->crsqe_tbl[cbit]; 143 + if (crsqe->resp) { 133 144 spin_unlock_irqrestore(&cmdq->lock, flags); 134 - 135 - if (!retry_cnt--) 136 - return NULL; 137 - goto retry; 145 + return -EBUSY; 138 146 } 139 - /* Reserve a resp buffer slot if requested */ 140 - if (req->resp_size && crsbe) { 141 - spin_lock(&crsb->lock); 142 - sw_prod = HWQ_CMP(crsb->prod, crsb); 143 - crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; 144 - *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] 145 - [get_crsb_idx(sw_prod)]; 146 - bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); 147 - req->resp_addr = cpu_to_le64(dma_addr); 148 - crsb->prod++; 149 - spin_unlock(&crsb->lock); 147 + memset(resp, 0, sizeof(*resp)); 148 + crsqe->resp = (struct creq_qp_event *)resp; 149 + crsqe->resp->cookie = req->cookie; 150 + crsqe->req_size = req->cmd_size; 151 + if (req->resp_size && sb) { 152 + struct bnxt_qplib_rcfw_sbuf *sbuf = sb; 150 153 151 - req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + 152 - BNXT_QPLIB_CMDQE_UNITS - 1) / 153 - BNXT_QPLIB_CMDQE_UNITS; 154 + req->resp_addr = cpu_to_le64(sbuf->dma_addr); 155 + req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / 156 + BNXT_QPLIB_CMDQE_UNITS; 154 157 } 158 + 155 159 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 156 160 preq = (u8 *)req; 157 161 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; ··· 160 190 preq += min_t(u32, size, sizeof(*cmdqe)); 161 191 size -= min_t(u32, size, sizeof(*cmdqe)); 162 192 cmdq->prod++; 193 + rcfw->seq_num++; 163 194 } while (size > 0); 195 + 196 + rcfw->seq_num++; 164 197 165 198 cmdq_prod = cmdq->prod; 166 199 if (rcfw->flags & FIRMWARE_FIRST_FLAG) { 167 - /* The very first doorbell write is required to set this flag 168 - * which prompts the FW to reset its internal pointers 200 + /* The very first doorbell write 201 + * is required to set this flag 202 + * which prompts the FW to reset 203 + * its internal pointers 169 204 */ 170 205 cmdq_prod |= FIRMWARE_FIRST_FLAG; 171 206 rcfw->flags &= ~FIRMWARE_FIRST_FLAG; 172 207 } 173 - sw_prod = HWQ_CMP(crsq->prod, crsq); 174 - crsqe = &crsq->crsq[sw_prod]; 175 - memset(crsqe, 0, sizeof(*crsqe)); 176 - crsq->prod++; 177 - crsqe->req_size = req->cmd_size; 178 208 179 209 /* ring CMDQ DB */ 210 + wmb(); 180 211 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 181 212 rcfw->cmdq_bar_reg_prod_off); 182 213 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + ··· 185 214 done: 186 215 spin_unlock_irqrestore(&cmdq->lock, flags); 187 216 /* Return the CREQ response pointer */ 188 - return crsqe ? &crsqe->qp_event : NULL; 217 + return 0; 189 218 } 190 219 220 + int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 221 + struct cmdq_base *req, 222 + struct creq_base *resp, 223 + void *sb, u8 is_block) 224 + { 225 + struct creq_qp_event *evnt = (struct creq_qp_event *)resp; 226 + u16 cookie; 227 + u8 opcode, retry_cnt = 0xFF; 228 + int rc = 0; 229 + 230 + do { 231 + opcode = req->opcode; 232 + rc = __send_message(rcfw, req, resp, sb, is_block); 233 + cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; 234 + if (!rc) 235 + break; 236 + 237 + if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { 238 + /* send failed */ 239 + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", 240 + cookie, opcode); 241 + return rc; 242 + } 243 + is_block ? mdelay(1) : usleep_range(500, 1000); 244 + 245 + } while (retry_cnt--); 246 + 247 + if (is_block) 248 + rc = __block_for_resp(rcfw, cookie); 249 + else 250 + rc = __wait_for_resp(rcfw, cookie); 251 + if (rc) { 252 + /* timed out */ 253 + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 254 + cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 255 + return rc; 256 + } 257 + 258 + if (evnt->status) { 259 + /* failed with status */ 260 + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", 261 + cookie, opcode, evnt->status); 262 + rc = -EFAULT; 263 + } 264 + 265 + return rc; 266 + } 191 267 /* Completions */ 192 268 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 193 269 struct creq_func_event *func_event) ··· 278 260 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 279 261 struct creq_qp_event *qp_event) 280 262 { 281 - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; 282 263 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 283 - struct bnxt_qplib_crsqe *crsqe; 284 - u16 cbit, cookie, blocked = 0; 264 + struct bnxt_qplib_crsq *crsqe; 285 265 unsigned long flags; 286 - u32 sw_cons; 266 + u16 cbit, blocked = 0; 267 + u16 cookie; 268 + __le16 mcookie; 287 269 288 270 switch (qp_event->event) { 289 271 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: ··· 293 275 default: 294 276 /* Command Response */ 295 277 spin_lock_irqsave(&cmdq->lock, flags); 296 - sw_cons = HWQ_CMP(crsq->cons, crsq); 297 - crsqe = &crsq->crsq[sw_cons]; 298 - crsq->cons++; 299 - memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); 300 - 301 - cookie = le16_to_cpu(crsqe->qp_event.cookie); 278 + cookie = le16_to_cpu(qp_event->cookie); 279 + mcookie = qp_event->cookie; 302 280 blocked = cookie & RCFW_CMD_IS_BLOCKING; 303 281 cookie &= RCFW_MAX_COOKIE_VALUE; 304 282 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 283 + crsqe = &rcfw->crsqe_tbl[cbit]; 284 + if (crsqe->resp && 285 + crsqe->resp->cookie == mcookie) { 286 + memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); 287 + crsqe->resp = NULL; 288 + } else { 289 + dev_err(&rcfw->pdev->dev, 290 + "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", 291 + crsqe->resp ? "mismatch" : "collision", 292 + crsqe->resp ? crsqe->resp->cookie : 0, mcookie); 293 + } 305 294 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 306 295 dev_warn(&rcfw->pdev->dev, 307 296 "QPLIB: CMD bit %d was not requested", cbit); 308 - 309 297 cmdq->cons += crsqe->req_size; 310 - spin_unlock_irqrestore(&cmdq->lock, flags); 298 + crsqe->req_size = 0; 299 + 311 300 if (!blocked) 312 301 wake_up(&rcfw->waitq); 313 - break; 302 + spin_unlock_irqrestore(&cmdq->lock, flags); 314 303 } 315 304 return 0; 316 305 } ··· 330 305 struct creq_base *creqe, **creq_ptr; 331 306 u32 sw_cons, raw_cons; 332 307 unsigned long flags; 333 - u32 type; 308 + u32 type, budget = CREQ_ENTRY_POLL_BUDGET; 334 309 335 - /* Service the CREQ until empty */ 310 + /* Service the CREQ until budget is over */ 336 311 spin_lock_irqsave(&creq->lock, flags); 337 312 raw_cons = creq->cons; 338 - while (1) { 313 + while (budget > 0) { 339 314 sw_cons = HWQ_CMP(raw_cons, creq); 340 315 creq_ptr = (struct creq_base **)creq->pbl_ptr; 341 316 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; ··· 345 320 type = creqe->type & CREQ_BASE_TYPE_MASK; 346 321 switch (type) { 347 322 case CREQ_BASE_TYPE_QP_EVENT: 348 - if (!bnxt_qplib_process_qp_event 349 - (rcfw, (struct creq_qp_event *)creqe)) 350 - rcfw->creq_qp_event_processed++; 351 - else { 352 - dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); 353 - dev_warn(&rcfw->pdev->dev, 354 - "QPLIB: type = 0x%x not handled", 355 - type); 356 - } 323 + bnxt_qplib_process_qp_event 324 + (rcfw, (struct creq_qp_event *)creqe); 325 + rcfw->creq_qp_event_processed++; 357 326 break; 358 327 case CREQ_BASE_TYPE_FUNC_EVENT: 359 328 if (!bnxt_qplib_process_func_event ··· 365 346 break; 366 347 } 367 348 raw_cons++; 349 + budget--; 368 350 } 351 + 369 352 if (creq->cons != raw_cons) { 370 353 creq->cons = raw_cons; 371 354 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, ··· 396 375 /* RCFW */ 397 376 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 398 377 { 399 - struct creq_deinitialize_fw_resp *resp; 400 378 struct cmdq_deinitialize_fw req; 379 + struct creq_deinitialize_fw_resp resp; 401 380 u16 cmd_flags = 0; 381 + int rc; 402 382 403 383 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 404 - resp = (struct creq_deinitialize_fw_resp *) 405 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 406 - NULL, 0); 407 - if (!resp) 408 - return -EINVAL; 409 - 410 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) 411 - return -ETIMEDOUT; 412 - 413 - if (resp->status || 414 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) 415 - return -EFAULT; 384 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 385 + NULL, 0); 386 + if (rc) 387 + return rc; 416 388 417 389 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 418 390 return 0; ··· 431 417 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 432 418 struct bnxt_qplib_ctx *ctx, int is_virtfn) 433 419 { 434 - struct creq_initialize_fw_resp *resp; 435 420 struct cmdq_initialize_fw req; 421 + struct creq_initialize_fw_resp resp; 436 422 u16 cmd_flags = 0, level; 423 + int rc; 437 424 438 425 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 439 426 ··· 497 482 498 483 skip_ctx_setup: 499 484 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 500 - resp = (struct creq_initialize_fw_resp *) 501 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 502 - NULL, 0); 503 - if (!resp) { 504 - dev_err(&rcfw->pdev->dev, 505 - "QPLIB: RCFW: INITIALIZE_FW send failed"); 506 - return -EINVAL; 507 - } 508 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 509 - /* Cmd timed out */ 510 - dev_err(&rcfw->pdev->dev, 511 - "QPLIB: RCFW: INITIALIZE_FW timed out"); 512 - return -ETIMEDOUT; 513 - } 514 - if (resp->status || 515 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 516 - dev_err(&rcfw->pdev->dev, 517 - "QPLIB: RCFW: INITIALIZE_FW failed"); 518 - return -EINVAL; 519 - } 485 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 486 + NULL, 0); 487 + if (rc) 488 + return rc; 520 489 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 521 490 return 0; 522 491 } 523 492 524 493 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 525 494 { 526 - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); 527 - kfree(rcfw->crsq.crsq); 495 + kfree(rcfw->crsqe_tbl); 528 496 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 529 497 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 530 - 531 498 rcfw->pdev = NULL; 532 499 } 533 500 ··· 536 539 goto fail; 537 540 } 538 541 539 - rcfw->crsq.max_elements = rcfw->cmdq.max_elements; 540 - rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, 541 - sizeof(*rcfw->crsq.crsq), GFP_KERNEL); 542 - if (!rcfw->crsq.crsq) 542 + rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, 543 + sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); 544 + if (!rcfw->crsqe_tbl) 543 545 goto fail; 544 546 545 - rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; 546 - if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, 547 - &rcfw->crsb.max_elements, 548 - BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, 549 - HWQ_TYPE_CTX)) { 550 - dev_err(&rcfw->pdev->dev, 551 - "QPLIB: HW channel CRSB allocation failed"); 552 - goto fail; 553 - } 554 547 return 0; 555 548 556 549 fail: ··· 593 606 int rc; 594 607 595 608 /* General */ 596 - atomic_set(&rcfw->seq_num, 0); 609 + rcfw->seq_num = 0; 597 610 rcfw->flags = FIRMWARE_FIRST_FLAG; 598 611 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 599 612 sizeof(unsigned long)); ··· 622 635 RCFW_PF_COMM_PROD_OFFSET; 623 636 624 637 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 625 - 626 - /* CRSQ */ 627 - rcfw->crsq.prod = 0; 628 - rcfw->crsq.cons = 0; 629 638 630 639 /* CREQ */ 631 640 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; ··· 674 691 /* Write to the Bono mailbox register */ 675 692 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 676 693 return 0; 694 + } 695 + 696 + struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 697 + struct bnxt_qplib_rcfw *rcfw, 698 + u32 size) 699 + { 700 + struct bnxt_qplib_rcfw_sbuf *sbuf; 701 + 702 + sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); 703 + if (!sbuf) 704 + return NULL; 705 + 706 + sbuf->size = size; 707 + sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 708 + &sbuf->dma_addr, GFP_ATOMIC); 709 + if (!sbuf->sb) 710 + goto bail; 711 + 712 + return sbuf; 713 + bail: 714 + kfree(sbuf); 715 + return NULL; 716 + } 717 + 718 + void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 719 + struct bnxt_qplib_rcfw_sbuf *sbuf) 720 + { 721 + if (sbuf->sb) 722 + dma_free_coherent(&rcfw->pdev->dev, sbuf->size, 723 + sbuf->sb, sbuf->dma_addr); 724 + kfree(sbuf); 677 725 }
+20 -41
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
··· 73 73 #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT 74 74 #define RCFW_MAX_COOKIE_VALUE 0x7FFF 75 75 #define RCFW_CMD_IS_BLOCKING 0x8000 76 + #define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 76 77 77 78 /* Cmdq contains a fix number of a 16-Byte slots */ 78 79 struct bnxt_qplib_cmdqe { ··· 94 93 struct bnxt_qplib_crsbe { 95 94 u8 data[1024]; 96 95 }; 97 - 98 - /* CRSQ SB */ 99 - #define BNXT_QPLIB_CRSBE_MAX_CNT 4 100 - #define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) 101 - #define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) 102 - 103 - #define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) 104 - #define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) 105 - 106 - static inline u32 get_crsb_pg(u32 val) 107 - { 108 - return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; 109 - } 110 - 111 - static inline u32 get_crsb_idx(u32 val) 112 - { 113 - return val & MAX_CRSB_IDX_PER_PG; 114 - } 115 - 116 - static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, 117 - u32 prod, dma_addr_t *dma_addr) 118 - { 119 - *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; 120 - *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * 121 - BNXT_QPLIB_CRSBE_UNITS; 122 - } 123 96 124 97 /* CREQ */ 125 98 /* Allocate 1 per QP for async error notification for now */ ··· 133 158 #define CREQ_DB(db, raw_cons, cp_bit) \ 134 159 writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) 135 160 161 + #define CREQ_ENTRY_POLL_BUDGET 0x100 162 + 136 163 /* HWQ */ 137 - struct bnxt_qplib_crsqe { 138 - struct creq_qp_event qp_event; 164 + 165 + struct bnxt_qplib_crsq { 166 + struct creq_qp_event *resp; 139 167 u32 req_size; 140 168 }; 141 169 142 - struct bnxt_qplib_crsq { 143 - struct bnxt_qplib_crsqe *crsq; 144 - u32 prod; 145 - u32 cons; 146 - u32 max_elements; 170 + struct bnxt_qplib_rcfw_sbuf { 171 + void *sb; 172 + dma_addr_t dma_addr; 173 + u32 size; 147 174 }; 148 175 149 176 /* RCFW Communication Channels */ ··· 162 185 wait_queue_head_t waitq; 163 186 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 164 187 struct creq_func_event *); 165 - atomic_t seq_num; 188 + u32 seq_num; 166 189 167 190 /* Bar region info */ 168 191 void __iomem *cmdq_bar_reg_iomem; ··· 180 203 181 204 /* Actual Cmd and Resp Queues */ 182 205 struct bnxt_qplib_hwq cmdq; 183 - struct bnxt_qplib_crsq crsq; 184 - struct bnxt_qplib_hwq crsb; 206 + struct bnxt_qplib_crsq *crsqe_tbl; 185 207 }; 186 208 187 209 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); ··· 195 219 (struct bnxt_qplib_rcfw *, 196 220 struct creq_func_event *)); 197 221 198 - int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 199 - int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 200 - void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 201 - struct cmdq_base *req, void **crsbe, 202 - u8 is_block); 222 + struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 223 + struct bnxt_qplib_rcfw *rcfw, 224 + u32 size); 225 + void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 226 + struct bnxt_qplib_rcfw_sbuf *sbuf); 227 + int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 228 + struct cmdq_base *req, struct creq_base *resp, 229 + void *sbuf, u8 is_block); 203 230 204 231 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); 205 232 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
+4
drivers/infiniband/hw/bnxt_re/qplib_res.h
··· 48 48 49 49 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) 50 50 51 + #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ 52 + ((HWQ_CMP(hwq->prod, hwq)\ 53 + - HWQ_CMP(hwq->cons, hwq))\ 54 + & (hwq->max_elements - 1))) 51 55 enum bnxt_qplib_hwq_type { 52 56 HWQ_TYPE_CTX, 53 57 HWQ_TYPE_QUEUE,
+83 -250
drivers/infiniband/hw/bnxt_re/qplib_sp.c
··· 55 55 struct bnxt_qplib_dev_attr *attr) 56 56 { 57 57 struct cmdq_query_func req; 58 - struct creq_query_func_resp *resp; 58 + struct creq_query_func_resp resp; 59 + struct bnxt_qplib_rcfw_sbuf *sbuf; 59 60 struct creq_query_func_resp_sb *sb; 60 61 u16 cmd_flags = 0; 61 62 u32 temp; 62 63 u8 *tqm_alloc; 63 - int i; 64 + int i, rc = 0; 64 65 65 66 RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); 66 67 67 - req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 68 - resp = (struct creq_query_func_resp *) 69 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, 70 - 0); 71 - if (!resp) { 72 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); 73 - return -EINVAL; 74 - } 75 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 76 - /* Cmd timed out */ 77 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); 78 - return -ETIMEDOUT; 79 - } 80 - if (resp->status || 81 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 82 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); 68 + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); 69 + if (!sbuf) { 83 70 dev_err(&rcfw->pdev->dev, 84 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 85 - resp->status, le16_to_cpu(req.cookie), 86 - le16_to_cpu(resp->cookie)); 87 - return -EINVAL; 71 + "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); 72 + return -ENOMEM; 88 73 } 74 + 75 + sb = sbuf->sb; 76 + req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 77 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 78 + (void *)sbuf, 0); 79 + if (rc) 80 + goto bail; 81 + 89 82 /* Extract the context from the side buffer */ 90 83 attr->max_qp = le32_to_cpu(sb->max_qp); 91 84 attr->max_qp_rd_atom = ··· 88 95 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 89 96 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; 90 97 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); 98 + /* 99 + * 128 WQEs needs to be reserved for the HW (8916). Prevent 100 + * reporting the max number 101 + */ 102 + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; 91 103 attr->max_qp_sges = sb->max_sge; 92 104 attr->max_cq = le32_to_cpu(sb->max_cq); 93 105 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); ··· 128 130 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); 129 131 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 130 132 } 131 - return 0; 133 + 134 + bail: 135 + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 136 + return rc; 132 137 } 133 138 134 139 /* SGID */ ··· 179 178 /* Remove GID from the SGID table */ 180 179 if (update) { 181 180 struct cmdq_delete_gid req; 182 - struct creq_delete_gid_resp *resp; 181 + struct creq_delete_gid_resp resp; 183 182 u16 cmd_flags = 0; 183 + int rc; 184 184 185 185 RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); 186 186 if (sgid_tbl->hw_id[index] == 0xFFFF) { ··· 190 188 return -EINVAL; 191 189 } 192 190 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); 193 - resp = (struct creq_delete_gid_resp *) 194 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 195 - 0); 196 - if (!resp) { 197 - dev_err(&res->pdev->dev, 198 - "QPLIB: SP: DELETE_GID send failed"); 199 - return -EINVAL; 200 - } 201 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, 202 - le16_to_cpu(req.cookie))) { 203 - /* Cmd timed out */ 204 - dev_err(&res->pdev->dev, 205 - "QPLIB: SP: DELETE_GID timed out"); 206 - return -ETIMEDOUT; 207 - } 208 - if (resp->status || 209 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 210 - dev_err(&res->pdev->dev, 211 - "QPLIB: SP: DELETE_GID failed "); 212 - dev_err(&res->pdev->dev, 213 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 214 - resp->status, le16_to_cpu(req.cookie), 215 - le16_to_cpu(resp->cookie)); 216 - return -EINVAL; 217 - } 191 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 192 + (void *)&resp, NULL, 0); 193 + if (rc) 194 + return rc; 218 195 } 219 196 memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, 220 197 sizeof(bnxt_qplib_gid_zero)); ··· 215 234 struct bnxt_qplib_res, 216 235 sgid_tbl); 217 236 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 218 - int i, free_idx, rc = 0; 237 + int i, free_idx; 219 238 220 239 if (!sgid_tbl) { 221 240 dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); ··· 247 266 } 248 267 if (update) { 249 268 struct cmdq_add_gid req; 250 - struct creq_add_gid_resp *resp; 269 + struct creq_add_gid_resp resp; 251 270 u16 cmd_flags = 0; 252 271 u32 temp32[4]; 253 272 u16 temp16[3]; 273 + int rc; 254 274 255 275 RCFW_CMD_PREP(req, ADD_GID, cmd_flags); 256 276 ··· 272 290 req.src_mac[1] = cpu_to_be16(temp16[1]); 273 291 req.src_mac[2] = cpu_to_be16(temp16[2]); 274 292 275 - resp = (struct creq_add_gid_resp *) 276 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 277 - NULL, 0); 278 - if (!resp) { 279 - dev_err(&res->pdev->dev, 280 - "QPLIB: SP: ADD_GID send failed"); 281 - return -EINVAL; 282 - } 283 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, 284 - le16_to_cpu(req.cookie))) { 285 - /* Cmd timed out */ 286 - dev_err(&res->pdev->dev, 287 - "QPIB: SP: ADD_GID timed out"); 288 - return -ETIMEDOUT; 289 - } 290 - if (resp->status || 291 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 292 - dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); 293 - dev_err(&res->pdev->dev, 294 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 295 - resp->status, le16_to_cpu(req.cookie), 296 - le16_to_cpu(resp->cookie)); 297 - return -EINVAL; 298 - } 299 - sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); 293 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 294 + (void *)&resp, NULL, 0); 295 + if (rc) 296 + return rc; 297 + sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); 300 298 } 301 299 /* Add GID to the sgid_tbl */ 302 300 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); ··· 287 325 288 326 *index = free_idx; 289 327 /* unlock */ 290 - return rc; 328 + return 0; 291 329 } 292 330 293 331 /* pkeys */ ··· 384 422 { 385 423 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 386 424 struct cmdq_create_ah req; 387 - struct creq_create_ah_resp *resp; 425 + struct creq_create_ah_resp resp; 388 426 u16 cmd_flags = 0; 389 427 u32 temp32[4]; 390 428 u16 temp16[3]; 429 + int rc; 391 430 392 431 RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); 393 432 ··· 413 450 req.dest_mac[1] = cpu_to_le16(temp16[1]); 414 451 req.dest_mac[2] = cpu_to_le16(temp16[2]); 415 452 416 - resp = (struct creq_create_ah_resp *) 417 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 418 - NULL, 1); 419 - if (!resp) { 420 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); 421 - return -EINVAL; 422 - } 423 - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { 424 - /* Cmd timed out */ 425 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); 426 - return -ETIMEDOUT; 427 - } 428 - if (resp->status || 429 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 430 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); 431 - dev_err(&rcfw->pdev->dev, 432 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 433 - resp->status, le16_to_cpu(req.cookie), 434 - le16_to_cpu(resp->cookie)); 435 - return -EINVAL; 436 - } 437 - ah->id = le32_to_cpu(resp->xid); 453 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 454 + NULL, 1); 455 + if (rc) 456 + return rc; 457 + 458 + ah->id = le32_to_cpu(resp.xid); 438 459 return 0; 439 460 } 440 461 ··· 426 479 { 427 480 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 428 481 struct cmdq_destroy_ah req; 429 - struct creq_destroy_ah_resp *resp; 482 + struct creq_destroy_ah_resp resp; 430 483 u16 cmd_flags = 0; 484 + int rc; 431 485 432 486 /* Clean up the AH table in the device */ 433 487 RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); 434 488 435 489 req.ah_cid = cpu_to_le32(ah->id); 436 490 437 - resp = (struct creq_destroy_ah_resp *) 438 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 439 - NULL, 1); 440 - if (!resp) { 441 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); 442 - return -EINVAL; 443 - } 444 - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { 445 - /* Cmd timed out */ 446 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); 447 - return -ETIMEDOUT; 448 - } 449 - if (resp->status || 450 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 451 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); 452 - dev_err(&rcfw->pdev->dev, 453 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 454 - resp->status, le16_to_cpu(req.cookie), 455 - le16_to_cpu(resp->cookie)); 456 - return -EINVAL; 457 - } 491 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 492 + NULL, 1); 493 + if (rc) 494 + return rc; 458 495 return 0; 459 496 } 460 497 ··· 447 516 { 448 517 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 449 518 struct cmdq_deallocate_key req; 450 - struct creq_deallocate_key_resp *resp; 519 + struct creq_deallocate_key_resp resp; 451 520 u16 cmd_flags = 0; 521 + int rc; 452 522 453 523 if (mrw->lkey == 0xFFFFFFFF) { 454 524 dev_info(&res->pdev->dev, ··· 468 536 else 469 537 req.key = cpu_to_le32(mrw->lkey); 470 538 471 - resp = (struct creq_deallocate_key_resp *) 472 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 473 - NULL, 0); 474 - if (!resp) { 475 - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); 476 - return -EINVAL; 477 - } 478 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 479 - /* Cmd timed out */ 480 - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); 481 - return -ETIMEDOUT; 482 - } 483 - if (resp->status || 484 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 485 - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); 486 - dev_err(&res->pdev->dev, 487 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 488 - resp->status, le16_to_cpu(req.cookie), 489 - le16_to_cpu(resp->cookie)); 490 - return -EINVAL; 491 - } 539 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 540 + NULL, 0); 541 + if (rc) 542 + return rc; 543 + 492 544 /* Free the qplib's MRW memory */ 493 545 if (mrw->hwq.max_elements) 494 546 bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); ··· 484 568 { 485 569 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 486 570 struct cmdq_allocate_mrw req; 487 - struct creq_allocate_mrw_resp *resp; 571 + struct creq_allocate_mrw_resp resp; 488 572 u16 cmd_flags = 0; 489 573 unsigned long tmp; 574 + int rc; 490 575 491 576 RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); 492 577 ··· 501 584 tmp = (unsigned long)mrw; 502 585 req.mrw_handle = cpu_to_le64(tmp); 503 586 504 - resp = (struct creq_allocate_mrw_resp *) 505 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 506 - NULL, 0); 507 - if (!resp) { 508 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); 509 - return -EINVAL; 510 - } 511 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 512 - /* Cmd timed out */ 513 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); 514 - return -ETIMEDOUT; 515 - } 516 - if (resp->status || 517 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 518 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); 519 - dev_err(&rcfw->pdev->dev, 520 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 521 - resp->status, le16_to_cpu(req.cookie), 522 - le16_to_cpu(resp->cookie)); 523 - return -EINVAL; 524 - } 587 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 588 + (void *)&resp, NULL, 0); 589 + if (rc) 590 + return rc; 591 + 525 592 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || 526 593 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || 527 594 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) 528 - mrw->rkey = le32_to_cpu(resp->xid); 595 + mrw->rkey = le32_to_cpu(resp.xid); 529 596 else 530 - mrw->lkey = le32_to_cpu(resp->xid); 597 + mrw->lkey = le32_to_cpu(resp.xid); 531 598 return 0; 532 599 } 533 600 ··· 520 619 { 521 620 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 522 621 struct cmdq_deregister_mr req; 523 - struct creq_deregister_mr_resp *resp; 622 + struct creq_deregister_mr_resp resp; 524 623 u16 cmd_flags = 0; 525 624 int rc; 526 625 527 626 RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); 528 627 529 628 req.lkey = cpu_to_le32(mrw->lkey); 530 - resp = (struct creq_deregister_mr_resp *) 531 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 532 - NULL, block); 533 - if (!resp) { 534 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); 535 - return -EINVAL; 536 - } 537 - if (block) 538 - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, 539 - le16_to_cpu(req.cookie)); 540 - else 541 - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, 542 - le16_to_cpu(req.cookie)); 543 - if (!rc) { 544 - /* Cmd timed out */ 545 - dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); 546 - return -ETIMEDOUT; 547 - } 548 - if (resp->status || 549 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 550 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); 551 - dev_err(&rcfw->pdev->dev, 552 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 553 - resp->status, le16_to_cpu(req.cookie), 554 - le16_to_cpu(resp->cookie)); 555 - return -EINVAL; 556 - } 629 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 630 + (void *)&resp, NULL, block); 631 + if (rc) 632 + return rc; 557 633 558 634 /* Free the qplib's MR memory */ 559 635 if (mrw->hwq.max_elements) { ··· 547 669 { 548 670 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 549 671 struct cmdq_register_mr req; 550 - struct creq_register_mr_resp *resp; 672 + struct creq_register_mr_resp resp; 551 673 u16 cmd_flags = 0, level; 552 674 int pg_ptrs, pages, i, rc; 553 675 dma_addr_t **pbl_ptr; ··· 608 730 req.key = cpu_to_le32(mr->lkey); 609 731 req.mr_size = cpu_to_le64(mr->total_size); 610 732 611 - resp = (struct creq_register_mr_resp *) 612 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 613 - NULL, block); 614 - if (!resp) { 615 - dev_err(&res->pdev->dev, "SP: REG_MR send failed"); 616 - rc = -EINVAL; 733 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 734 + (void *)&resp, NULL, block); 735 + if (rc) 617 736 goto fail; 618 - } 619 - if (block) 620 - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, 621 - le16_to_cpu(req.cookie)); 622 - else 623 - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, 624 - le16_to_cpu(req.cookie)); 625 - if (!rc) { 626 - /* Cmd timed out */ 627 - dev_err(&res->pdev->dev, "SP: REG_MR timed out"); 628 - rc = -ETIMEDOUT; 629 - goto fail; 630 - } 631 - if (resp->status || 632 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 633 - dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); 634 - dev_err(&res->pdev->dev, 635 - "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", 636 - resp->status, le16_to_cpu(req.cookie), 637 - le16_to_cpu(resp->cookie)); 638 - rc = -EINVAL; 639 - goto fail; 640 - } 737 + 641 738 return 0; 642 739 643 740 fail: ··· 657 804 { 658 805 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 659 806 struct cmdq_map_tc_to_cos req; 660 - struct creq_map_tc_to_cos_resp *resp; 807 + struct creq_map_tc_to_cos_resp resp; 661 808 u16 cmd_flags = 0; 662 - int tleft; 809 + int rc = 0; 663 810 664 811 RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); 665 812 req.cos0 = cpu_to_le16(cids[0]); 666 813 req.cos1 = cpu_to_le16(cids[1]); 667 814 668 - resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); 669 - if (!resp) { 670 - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); 671 - return -EINVAL; 672 - } 673 - 674 - tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); 675 - if (!tleft) { 676 - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); 677 - return -ETIMEDOUT; 678 - } 679 - 680 - if (resp->status || 681 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 682 - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); 683 - dev_err(&res->pdev->dev, 684 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 685 - resp->status, le16_to_cpu(req.cookie), 686 - le16_to_cpu(resp->cookie)); 687 - return -EINVAL; 688 - } 689 - 815 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 816 + (void *)&resp, NULL, 0); 690 817 return 0; 691 818 }
+2
drivers/infiniband/hw/bnxt_re/qplib_sp.h
··· 40 40 #ifndef __BNXT_QPLIB_SP_H__ 41 41 #define __BNXT_QPLIB_SP_H__ 42 42 43 + #define BNXT_QPLIB_RESERVED_QP_WRS 128 44 + 43 45 struct bnxt_qplib_dev_attr { 44 46 char fw_ver[32]; 45 47 u16 max_sgid;
+7 -3
drivers/infiniband/hw/cxgb4/device.c
··· 767 767 kfree(entry); 768 768 } 769 769 770 - list_for_each_safe(pos, nxt, &uctx->qpids) { 770 + list_for_each_safe(pos, nxt, &uctx->cqids) { 771 771 entry = list_entry(pos, struct c4iw_qid_list, entry); 772 772 list_del_init(&entry->entry); 773 773 kfree(entry); ··· 880 880 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 881 881 if (!rdev->free_workq) { 882 882 err = -ENOMEM; 883 - goto err_free_status_page; 883 + goto err_free_status_page_and_wr_log; 884 884 } 885 885 886 886 rdev->status_page->db_off = 0; 887 887 888 888 return 0; 889 - err_free_status_page: 889 + err_free_status_page_and_wr_log: 890 + if (c4iw_wr_log && rdev->wr_log) 891 + kfree(rdev->wr_log); 890 892 free_page((unsigned long)rdev->status_page); 891 893 destroy_ocqp_pool: 892 894 c4iw_ocqp_pool_destroy(rdev); ··· 905 903 { 906 904 destroy_workqueue(rdev->free_workq); 907 905 kfree(rdev->wr_log); 906 + c4iw_release_dev_ucontext(rdev, &rdev->uctx); 908 907 free_page((unsigned long)rdev->status_page); 909 908 c4iw_pblpool_destroy(rdev); 910 909 c4iw_rqtpool_destroy(rdev); 910 + c4iw_ocqp_pool_destroy(rdev); 911 911 c4iw_destroy_resource(&rdev->resource); 912 912 } 913 913
+4 -2
drivers/infiniband/hw/mlx5/main.c
··· 3691 3691 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3692 3692 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3693 3693 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3694 - dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; 3695 - dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; 3694 + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { 3695 + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; 3696 + dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; 3697 + } 3696 3698 if (mlx5_core_is_pf(mdev)) { 3697 3699 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3698 3700 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
+4 -1
drivers/infiniband/hw/qedr/qedr.h
··· 58 58 #define QEDR_MSG_QP " QP" 59 59 #define QEDR_MSG_GSI " GSI" 60 60 61 - #define QEDR_CQ_MAGIC_NUMBER (0x11223344) 61 + #define QEDR_CQ_MAGIC_NUMBER (0x11223344) 62 + 63 + #define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) 64 + #define FW_PAGE_SHIFT (12) 62 65 63 66 struct qedr_dev; 64 67
+39 -25
drivers/infiniband/hw/qedr/verbs.c
··· 653 653 654 654 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, 655 655 struct qedr_pbl *pbl, 656 - struct qedr_pbl_info *pbl_info) 656 + struct qedr_pbl_info *pbl_info, u32 pg_shift) 657 657 { 658 658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; 659 + u32 fw_pg_cnt, fw_pg_per_umem_pg; 659 660 struct qedr_pbl *pbl_tbl; 660 661 struct scatterlist *sg; 661 662 struct regpair *pbe; 663 + u64 pg_addr; 662 664 int entry; 663 - u32 addr; 664 665 665 666 if (!pbl_info->num_pbes) 666 667 return; ··· 684 683 685 684 shift = umem->page_shift; 686 685 686 + fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); 687 + 687 688 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 688 689 pages = sg_dma_len(sg) >> shift; 690 + pg_addr = sg_dma_address(sg); 689 691 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { 690 - /* store the page address in pbe */ 691 - pbe->lo = cpu_to_le32(sg_dma_address(sg) + 692 - (pg_cnt << shift)); 693 - addr = upper_32_bits(sg_dma_address(sg) + 694 - (pg_cnt << shift)); 695 - pbe->hi = cpu_to_le32(addr); 696 - pbe_cnt++; 697 - total_num_pbes++; 698 - pbe++; 692 + for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { 693 + pbe->lo = cpu_to_le32(pg_addr); 694 + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); 699 695 700 - if (total_num_pbes == pbl_info->num_pbes) 701 - return; 696 + pg_addr += BIT(pg_shift); 697 + pbe_cnt++; 698 + total_num_pbes++; 699 + pbe++; 702 700 703 - /* If the given pbl is full storing the pbes, 704 - * move to next pbl. 705 - */ 706 - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { 707 - pbl_tbl++; 708 - pbe = (struct regpair *)pbl_tbl->va; 709 - pbe_cnt = 0; 701 + if (total_num_pbes == pbl_info->num_pbes) 702 + return; 703 + 704 + /* If the given pbl is full storing the pbes, 705 + * move to next pbl. 706 + */ 707 + if (pbe_cnt == 708 + (pbl_info->pbl_size / sizeof(u64))) { 709 + pbl_tbl++; 710 + pbe = (struct regpair *)pbl_tbl->va; 711 + pbe_cnt = 0; 712 + } 713 + 714 + fw_pg_cnt++; 710 715 } 711 716 } 712 717 } ··· 761 754 u64 buf_addr, size_t buf_len, 762 755 int access, int dmasync) 763 756 { 764 - int page_cnt; 757 + u32 fw_pages; 765 758 int rc; 766 759 767 760 q->buf_addr = buf_addr; ··· 773 766 return PTR_ERR(q->umem); 774 767 } 775 768 776 - page_cnt = ib_umem_page_count(q->umem); 777 - rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); 769 + fw_pages = ib_umem_page_count(q->umem) << 770 + (q->umem->page_shift - FW_PAGE_SHIFT); 771 + 772 + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); 778 773 if (rc) 779 774 goto err0; 780 775 ··· 786 777 goto err0; 787 778 } 788 779 789 - qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); 780 + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, 781 + FW_PAGE_SHIFT); 790 782 791 783 return 0; 792 784 ··· 2236 2226 goto err1; 2237 2227 2238 2228 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, 2239 - &mr->info.pbl_info); 2229 + &mr->info.pbl_info, mr->umem->page_shift); 2240 2230 2241 2231 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); 2242 2232 if (rc) { ··· 3218 3208 break; 3219 3209 case IB_WC_REG_MR: 3220 3210 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; 3211 + break; 3212 + case IB_WC_RDMA_READ: 3213 + case IB_WC_SEND: 3214 + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; 3221 3215 break; 3222 3216 default: 3223 3217 break;
+2 -7
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 740 740 741 741 sge = ibwr->sg_list; 742 742 for (i = 0; i < num_sge; i++, sge++) { 743 - if (qp->is_user && copy_from_user(p, (__user void *) 744 - (uintptr_t)sge->addr, sge->length)) 745 - return -EFAULT; 746 - 747 - else if (!qp->is_user) 748 - memcpy(p, (void *)(uintptr_t)sge->addr, 749 - sge->length); 743 + memcpy(p, (void *)(uintptr_t)sge->addr, 744 + sge->length); 750 745 751 746 p += sge->length; 752 747 }
-1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 863 863 set_bit(IPOIB_STOP_REAPER, &priv->flags); 864 864 cancel_delayed_work(&priv->ah_reap_task); 865 865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 866 - napi_enable(&priv->napi); 867 866 ipoib_ib_dev_stop(dev); 868 867 return -1; 869 868 }
+13 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1596 1596 1597 1597 ipoib_transport_dev_cleanup(dev); 1598 1598 1599 + netif_napi_del(&priv->napi); 1600 + 1599 1601 ipoib_cm_dev_cleanup(dev); 1600 1602 1601 1603 kfree(priv->rx_ring); ··· 1651 1649 kfree(priv->rx_ring); 1652 1650 1653 1651 out: 1652 + netif_napi_del(&priv->napi); 1654 1653 return -ENOMEM; 1655 1654 } 1656 1655 ··· 2240 2237 2241 2238 device_init_failed: 2242 2239 free_netdev(priv->dev); 2240 + kfree(priv); 2243 2241 2244 2242 alloc_mem_failed: 2245 2243 return ERR_PTR(result); ··· 2281 2277 2282 2278 static void ipoib_remove_one(struct ib_device *device, void *client_data) 2283 2279 { 2284 - struct ipoib_dev_priv *priv, *tmp; 2280 + struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; 2285 2281 struct list_head *dev_list = client_data; 2286 2282 2287 2283 if (!dev_list) ··· 2304 2300 flush_workqueue(priv->wq); 2305 2301 2306 2302 unregister_netdev(priv->dev); 2307 - free_netdev(priv->dev); 2303 + if (device->free_rdma_netdev) 2304 + device->free_rdma_netdev(priv->dev); 2305 + else 2306 + free_netdev(priv->dev); 2307 + 2308 + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) 2309 + kfree(cpriv); 2310 + 2308 2311 kfree(priv); 2309 2312 } 2310 2313
+7 -4
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 133 133 snprintf(intf_name, sizeof intf_name, "%s.%04x", 134 134 ppriv->dev->name, pkey); 135 135 136 + if (!rtnl_trylock()) 137 + return restart_syscall(); 138 + 136 139 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); 137 140 if (!priv) 138 141 return -ENOMEM; 139 - 140 - if (!rtnl_trylock()) 141 - return restart_syscall(); 142 142 143 143 down_write(&ppriv->vlan_rwsem); 144 144 ··· 167 167 168 168 rtnl_unlock(); 169 169 170 - if (result) 170 + if (result) { 171 171 free_netdev(priv->dev); 172 + kfree(priv); 173 + } 172 174 173 175 return result; 174 176 } ··· 211 209 212 210 if (dev) { 213 211 free_netdev(dev); 212 + kfree(priv); 214 213 return 0; 215 214 } 216 215
+2 -2
drivers/leds/leds-bcm6328.c
··· 242 242 243 243 spin_lock_irqsave(lock, flags); 244 244 val = bcm6328_led_read(addr); 245 - val |= (BIT(reg) << (((sel % 4) * 4) + 16)); 245 + val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16)); 246 246 bcm6328_led_write(addr, val); 247 247 spin_unlock_irqrestore(lock, flags); 248 248 } ··· 269 269 270 270 spin_lock_irqsave(lock, flags); 271 271 val = bcm6328_led_read(addr); 272 - val |= (BIT(reg) << ((sel % 4) * 4)); 272 + val |= (BIT(reg % 4) << ((sel % 4) * 4)); 273 273 bcm6328_led_write(addr, val); 274 274 spin_unlock_irqrestore(lock, flags); 275 275 }
-31
drivers/leds/trigger/ledtrig-heartbeat.c
··· 20 20 #include <linux/sched/loadavg.h> 21 21 #include <linux/leds.h> 22 22 #include <linux/reboot.h> 23 - #include <linux/suspend.h> 24 23 #include "../leds.h" 25 24 26 25 static int panic_heartbeats; ··· 162 163 .deactivate = heartbeat_trig_deactivate, 163 164 }; 164 165 165 - static int heartbeat_pm_notifier(struct notifier_block *nb, 166 - unsigned long pm_event, void *unused) 167 - { 168 - int rc; 169 - 170 - switch (pm_event) { 171 - case PM_SUSPEND_PREPARE: 172 - case PM_HIBERNATION_PREPARE: 173 - case PM_RESTORE_PREPARE: 174 - led_trigger_unregister(&heartbeat_led_trigger); 175 - break; 176 - case PM_POST_SUSPEND: 177 - case PM_POST_HIBERNATION: 178 - case PM_POST_RESTORE: 179 - rc = led_trigger_register(&heartbeat_led_trigger); 180 - if (rc) 181 - pr_err("could not re-register heartbeat trigger\n"); 182 - break; 183 - default: 184 - break; 185 - } 186 - return NOTIFY_DONE; 187 - } 188 - 189 166 static int heartbeat_reboot_notifier(struct notifier_block *nb, 190 167 unsigned long code, void *unused) 191 168 { ··· 175 200 panic_heartbeats = 1; 176 201 return NOTIFY_DONE; 177 202 } 178 - 179 - static struct notifier_block heartbeat_pm_nb = { 180 - .notifier_call = heartbeat_pm_notifier, 181 - }; 182 203 183 204 static struct notifier_block heartbeat_reboot_nb = { 184 205 .notifier_call = heartbeat_reboot_notifier, ··· 192 221 atomic_notifier_chain_register(&panic_notifier_list, 193 222 &heartbeat_panic_nb); 194 223 register_reboot_notifier(&heartbeat_reboot_nb); 195 - register_pm_notifier(&heartbeat_pm_nb); 196 224 } 197 225 return rc; 198 226 } 199 227 200 228 static void __exit heartbeat_trig_exit(void) 201 229 { 202 - unregister_pm_notifier(&heartbeat_pm_nb); 203 230 unregister_reboot_notifier(&heartbeat_reboot_nb); 204 231 atomic_notifier_chain_unregister(&panic_notifier_list, 205 232 &heartbeat_panic_nb);
+9
drivers/mmc/host/meson-gx-mmc.c
··· 210 210 int i; 211 211 bool use_desc_chain_mode = true; 212 212 213 + /* 214 + * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been 215 + * reported. For some strange reason this occurs in descriptor 216 + * chain mode only. So let's fall back to bounce buffer mode 217 + * for command SD_IO_RW_EXTENDED. 218 + */ 219 + if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) 220 + return; 221 + 213 222 for_each_sg(data->sg, sg, data->sg_len, i) 214 223 /* check for 8 byte alignment */ 215 224 if (sg->offset & 7) {
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2647 2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 2648 2648 2649 2649 /* device used for DMA mapping */ 2650 - arch_setup_dma_ops(dev, 0, 0, NULL, false); 2650 + set_dma_ops(dev, get_dma_ops(&pdev->dev)); 2651 2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); 2652 2652 if (err) { 2653 2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
+2
drivers/net/ethernet/freescale/fman/mac.c
··· 623 623 goto no_mem; 624 624 } 625 625 626 + set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); 627 + 626 628 ret = platform_device_add_data(pdev, &data, sizeof(data)); 627 629 if (ret) 628 630 goto err;
+14 -2
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
··· 288 288 289 289 /* Force 1000M Link, Default is 0x0200 */ 290 290 phy_write(phy_dev, 7, 0x20C); 291 - phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 292 291 293 - /* Enable PHY loop-back */ 292 + /* Powerup Fiber */ 293 + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); 294 + val = phy_read(phy_dev, COPPER_CONTROL_REG); 295 + val &= ~PHY_POWER_DOWN; 296 + phy_write(phy_dev, COPPER_CONTROL_REG, val); 297 + 298 + /* Enable Phy Loopback */ 299 + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 294 300 val = phy_read(phy_dev, COPPER_CONTROL_REG); 295 301 val |= PHY_LOOP_BACK; 296 302 val &= ~PHY_POWER_DOWN; ··· 305 299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); 306 300 phy_write(phy_dev, 1, 0x400); 307 301 phy_write(phy_dev, 7, 0x200); 302 + 303 + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); 304 + val = phy_read(phy_dev, COPPER_CONTROL_REG); 305 + val |= PHY_POWER_DOWN; 306 + phy_write(phy_dev, COPPER_CONTROL_REG, val); 307 + 308 308 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 309 309 phy_write(phy_dev, 9, 0xF00); 310 310
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1391 1391 SOF_TIMESTAMPING_RX_HARDWARE | 1392 1392 SOF_TIMESTAMPING_RAW_HARDWARE; 1393 1393 1394 - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | 1395 - (BIT(1) << HWTSTAMP_TX_ON); 1394 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | 1395 + BIT(HWTSTAMP_TX_ON); 1396 1396 1397 - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | 1398 - (BIT(1) << HWTSTAMP_FILTER_ALL); 1397 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 1398 + BIT(HWTSTAMP_FILTER_ALL); 1399 1399 1400 1400 return 0; 1401 1401 }
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4265 4265 return netdev; 4266 4266 4267 4267 err_cleanup_nic: 4268 - profile->cleanup(priv); 4268 + if (profile->cleanup) 4269 + profile->cleanup(priv); 4269 4270 free_netdev(netdev); 4270 4271 4271 4272 return NULL;
+2
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 796 796 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); 797 797 params->num_tc = 1; 798 798 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 799 + 800 + mlx5_query_min_inline(mdev, &params->tx_min_inline_mode); 799 801 } 800 802 801 803 static void mlx5e_build_rep_netdev(struct net_device *netdev)
-1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1060 1060 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, 1061 1061 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, 1062 1062 1063 - {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)}, 1064 1063 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, 1065 1064 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, 1066 1065 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
+40 -37
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 903 903 return 0; 904 904 } 905 905 906 - int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 906 + static int mlx5_devlink_eswitch_check(struct devlink *devlink) 907 907 { 908 - struct mlx5_core_dev *dev; 909 - u16 cur_mlx5_mode, mlx5_mode = 0; 908 + struct mlx5_core_dev *dev = devlink_priv(devlink); 910 909 911 - dev = devlink_priv(devlink); 910 + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 911 + return -EOPNOTSUPP; 912 912 913 913 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 914 914 return -EOPNOTSUPP; 915 915 916 - cur_mlx5_mode = dev->priv.eswitch->mode; 917 - 918 - if (cur_mlx5_mode == SRIOV_NONE) 916 + if (dev->priv.eswitch->mode == SRIOV_NONE) 919 917 return -EOPNOTSUPP; 918 + 919 + return 0; 920 + } 921 + 922 + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 923 + { 924 + struct mlx5_core_dev *dev = devlink_priv(devlink); 925 + u16 cur_mlx5_mode, mlx5_mode = 0; 926 + int err; 927 + 928 + err = mlx5_devlink_eswitch_check(devlink); 929 + if (err) 930 + return err; 931 + 932 + cur_mlx5_mode = dev->priv.eswitch->mode; 920 933 921 934 if (esw_mode_from_devlink(mode, &mlx5_mode)) 922 935 return -EINVAL; ··· 947 934 948 935 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 949 936 { 950 - struct mlx5_core_dev *dev; 937 + struct mlx5_core_dev *dev = devlink_priv(devlink); 938 + int err; 951 939 952 - dev = devlink_priv(devlink); 953 - 954 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 955 - return -EOPNOTSUPP; 956 - 957 - if (dev->priv.eswitch->mode == SRIOV_NONE) 958 - return -EOPNOTSUPP; 940 + err = mlx5_devlink_eswitch_check(devlink); 941 + if (err) 942 + return err; 959 943 960 944 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); 961 945 } ··· 961 951 { 962 952 struct mlx5_core_dev *dev = devlink_priv(devlink); 963 953 struct mlx5_eswitch *esw = dev->priv.eswitch; 964 - int num_vports = esw->enabled_vports; 965 954 int err, vport; 966 955 u8 mlx5_mode; 967 956 968 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 969 - return -EOPNOTSUPP; 970 - 971 - if (esw->mode == SRIOV_NONE) 972 - return -EOPNOTSUPP; 957 + err = mlx5_devlink_eswitch_check(devlink); 958 + if (err) 959 + return err; 973 960 974 961 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 975 962 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: ··· 989 982 if (err) 990 983 goto out; 991 984 992 - for (vport = 1; vport < num_vports; vport++) { 985 + for (vport = 1; vport < esw->enabled_vports; vport++) { 993 986 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); 994 987 if (err) { 995 988 esw_warn(dev, "Failed to set min inline on vport %d\n", ··· 1014 1007 { 1015 1008 struct mlx5_core_dev *dev = devlink_priv(devlink); 1016 1009 struct mlx5_eswitch *esw = dev->priv.eswitch; 1010 + int err; 1017 1011 1018 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1019 - return -EOPNOTSUPP; 1020 - 1021 - if (esw->mode == SRIOV_NONE) 1022 - return -EOPNOTSUPP; 1012 + err = mlx5_devlink_eswitch_check(devlink); 1013 + if (err) 1014 + return err; 1023 1015 1024 1016 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 1025 1017 } ··· 1065 1059 struct mlx5_eswitch *esw = dev->priv.eswitch; 1066 1060 int err; 1067 1061 1068 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1069 - return -EOPNOTSUPP; 1070 - 1071 - if (esw->mode == SRIOV_NONE) 1072 - return -EOPNOTSUPP; 1062 + err = mlx5_devlink_eswitch_check(devlink); 1063 + if (err) 1064 + return err; 1073 1065 1074 1066 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 1075 1067 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || ··· 1106 1102 { 1107 1103 struct mlx5_core_dev *dev = devlink_priv(devlink); 1108 1104 struct mlx5_eswitch *esw = dev->priv.eswitch; 1105 + int err; 1109 1106 1110 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1111 - return -EOPNOTSUPP; 1112 - 1113 - if (esw->mode == SRIOV_NONE) 1114 - return -EOPNOTSUPP; 1107 + err = mlx5_devlink_eswitch_check(devlink); 1108 + if (err) 1109 + return err; 1115 1110 1116 1111 *encap = esw->offloads.encap; 1117 1112 return 0;
+12 -2
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 176 176 }, 177 177 }; 178 178 179 - #define FW_INIT_TIMEOUT_MILI 2000 180 - #define FW_INIT_WAIT_MS 2 179 + #define FW_INIT_TIMEOUT_MILI 2000 180 + #define FW_INIT_WAIT_MS 2 181 + #define FW_PRE_INIT_TIMEOUT_MILI 10000 181 182 182 183 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 183 184 { ··· 1013 1012 * up 1014 1013 */ 1015 1014 dev->state = MLX5_DEVICE_STATE_UP; 1015 + 1016 + /* wait for firmware to accept initialization segments configurations 1017 + */ 1018 + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); 1019 + if (err) { 1020 + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", 1021 + FW_PRE_INIT_TIMEOUT_MILI); 1022 + goto out; 1023 + } 1016 1024 1017 1025 err = mlx5_cmd_init(dev); 1018 1026 if (err) {
-2
drivers/net/ethernet/sfc/ef10_sriov.c
··· 661 661 up_write(&vf->efx->filter_sem); 662 662 mutex_unlock(&vf->efx->mac_lock); 663 663 664 - up_write(&vf->efx->filter_sem); 665 - 666 664 rc2 = efx_net_open(vf->efx->net_dev); 667 665 if (rc2) 668 666 goto reset_nic;
+16 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2830 2830 2831 2831 tx_q->tx_skbuff_dma[first_entry].buf = des; 2832 2832 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2833 - tx_q->tx_skbuff[first_entry] = skb; 2834 2833 2835 2834 first->des0 = cpu_to_le32(des); 2836 2835 ··· 2863 2864 2864 2865 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2865 2866 2867 + /* Only the last descriptor gets to point to the skb. */ 2868 + tx_q->tx_skbuff[tx_q->cur_tx] = skb; 2869 + 2870 + /* We've used all descriptors we need for this skb, however, 2871 + * advance cur_tx so that it references a fresh descriptor. 2872 + * ndo_start_xmit will fill this descriptor the next time it's 2873 + * called and stmmac_tx_clean may clean up to this descriptor. 2874 + */ 2866 2875 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2867 2876 2868 2877 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { ··· 3003 2996 3004 2997 first = desc; 3005 2998 3006 - tx_q->tx_skbuff[first_entry] = skb; 3007 - 3008 2999 enh_desc = priv->plat->enh_desc; 3009 3000 /* To program the descriptors according to the size of the frame */ 3010 3001 if (enh_desc) ··· 3050 3045 skb->len); 3051 3046 } 3052 3047 3053 - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3048 + /* Only the last descriptor gets to point to the skb. */ 3049 + tx_q->tx_skbuff[entry] = skb; 3054 3050 3051 + /* We've used all descriptors we need for this skb, however, 3052 + * advance cur_tx so that it references a fresh descriptor. 3053 + * ndo_start_xmit will fill this descriptor the next time it's 3054 + * called and stmmac_tx_clean may clean up to this descriptor. 3055 + */ 3056 + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3055 3057 tx_q->cur_tx = entry; 3056 3058 3057 3059 if (netif_msg_pktdata(priv)) {
+17 -18
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
··· 442 442 const char *nvram_name; 443 443 u16 domain_nr; 444 444 u16 bus_nr; 445 - void (*done)(struct device *dev, const struct firmware *fw, 445 + void (*done)(struct device *dev, int err, const struct firmware *fw, 446 446 void *nvram_image, u32 nvram_len); 447 447 }; 448 448 ··· 477 477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) 478 478 goto fail; 479 479 480 - fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); 480 + fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length); 481 481 kfree(fwctx); 482 482 return; 483 483 484 484 fail: 485 485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 486 486 release_firmware(fwctx->code); 487 - device_release_driver(fwctx->dev); 487 + fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0); 488 488 kfree(fwctx); 489 489 } 490 490 491 491 static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) 492 492 { 493 493 struct brcmf_fw *fwctx = ctx; 494 - int ret; 494 + int ret = 0; 495 495 496 496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); 497 - if (!fw) 497 + if (!fw) { 498 + ret = -ENOENT; 498 499 goto fail; 499 - 500 - /* only requested code so done here */ 501 - if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { 502 - fwctx->done(fwctx->dev, fw, NULL, 0); 503 - kfree(fwctx); 504 - return; 505 500 } 501 + /* only requested code so done here */ 502 + if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) 503 + goto done; 504 + 506 505 fwctx->code = fw; 507 506 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, 508 507 fwctx->dev, GFP_KERNEL, fwctx, 509 508 brcmf_fw_request_nvram_done); 510 509 511 - if (!ret) 512 - return; 513 - 514 - brcmf_fw_request_nvram_done(NULL, fwctx); 510 + /* pass NULL to nvram callback for bcm47xx fallback */ 511 + if (ret) 512 + brcmf_fw_request_nvram_done(NULL, fwctx); 515 513 return; 516 514 517 515 fail: 518 516 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 519 - device_release_driver(fwctx->dev); 517 + done: 518 + fwctx->done(fwctx->dev, ret, fw, NULL, 0); 520 519 kfree(fwctx); 521 520 } 522 521 523 522 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 524 523 const char *code, const char *nvram, 525 - void (*fw_cb)(struct device *dev, 524 + void (*fw_cb)(struct device *dev, int err, 526 525 const struct firmware *fw, 527 526 void *nvram_image, u32 nvram_len), 528 527 u16 domain_nr, u16 bus_nr) ··· 554 555 555 556 int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 556 557 const char *code, const char *nvram, 557 - void (*fw_cb)(struct device *dev, 558 + void (*fw_cb)(struct device *dev, int err, 558 559 const struct firmware *fw, 559 560 void *nvram_image, u32 nvram_len)) 560 561 {
+2 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
··· 73 73 */ 74 74 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 75 75 const char *code, const char *nvram, 76 - void (*fw_cb)(struct device *dev, 76 + void (*fw_cb)(struct device *dev, int err, 77 77 const struct firmware *fw, 78 78 void *nvram_image, u32 nvram_len), 79 79 u16 domain_nr, u16 bus_nr); 80 80 int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 81 81 const char *code, const char *nvram, 82 - void (*fw_cb)(struct device *dev, 82 + void (*fw_cb)(struct device *dev, int err, 83 83 const struct firmware *fw, 84 84 void *nvram_image, u32 nvram_len)); 85 85
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
··· 2145 2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); 2146 2146 struct brcmf_fws_mac_descriptor *entry; 2147 2147 2148 - if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) 2148 + if (!ifp->ndev || !brcmf_fws_queue_skbs(fws)) 2149 2149 return; 2150 2150 2151 2151 entry = &fws->desc.iface[ifp->ifidx];
+12 -5
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1650 1650 .write32 = brcmf_pcie_buscore_write32, 1651 1651 }; 1652 1652 1653 - static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, 1653 + static void brcmf_pcie_setup(struct device *dev, int ret, 1654 + const struct firmware *fw, 1654 1655 void *nvram, u32 nvram_len) 1655 1656 { 1656 - struct brcmf_bus *bus = dev_get_drvdata(dev); 1657 - struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; 1658 - struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; 1657 + struct brcmf_bus *bus; 1658 + struct brcmf_pciedev *pcie_bus_dev; 1659 + struct brcmf_pciedev_info *devinfo; 1659 1660 struct brcmf_commonring **flowrings; 1660 - int ret; 1661 1661 u32 i; 1662 1662 1663 + /* check firmware loading result */ 1664 + if (ret) 1665 + goto fail; 1666 + 1667 + bus = dev_get_drvdata(dev); 1668 + pcie_bus_dev = bus->bus_priv.pcie; 1669 + devinfo = pcie_bus_dev->devinfo; 1663 1670 brcmf_pcie_attach(devinfo); 1664 1671 1665 1672 /* Some of the firmwares have the size of the memory of the device
+12 -6
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
··· 3982 3982 .get_memdump = brcmf_sdio_bus_get_memdump, 3983 3983 }; 3984 3984 3985 - static void brcmf_sdio_firmware_callback(struct device *dev, 3985 + static void brcmf_sdio_firmware_callback(struct device *dev, int err, 3986 3986 const struct firmware *code, 3987 3987 void *nvram, u32 nvram_len) 3988 3988 { 3989 - struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3990 - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3991 - struct brcmf_sdio *bus = sdiodev->bus; 3992 - int err = 0; 3989 + struct brcmf_bus *bus_if; 3990 + struct brcmf_sdio_dev *sdiodev; 3991 + struct brcmf_sdio *bus; 3993 3992 u8 saveclk; 3994 3993 3995 - brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); 3994 + brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); 3995 + bus_if = dev_get_drvdata(dev); 3996 + sdiodev = bus_if->bus_priv.sdio; 3997 + if (err) 3998 + goto fail; 3996 3999 3997 4000 if (!bus_if->drvr) 3998 4001 return; 4002 + 4003 + bus = sdiodev->bus; 3999 4004 4000 4005 /* try to download image and nvram to the dongle */ 4001 4006 bus->alp_only = true; ··· 4088 4083 fail: 4089 4084 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4090 4085 device_release_driver(dev); 4086 + device_release_driver(&sdiodev->func[2]->dev); 4091 4087 } 4092 4088 4093 4089 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+5 -4
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
··· 1159 1159 return ret; 1160 1160 } 1161 1161 1162 - static void brcmf_usb_probe_phase2(struct device *dev, 1162 + static void brcmf_usb_probe_phase2(struct device *dev, int ret, 1163 1163 const struct firmware *fw, 1164 1164 void *nvram, u32 nvlen) 1165 1165 { 1166 1166 struct brcmf_bus *bus = dev_get_drvdata(dev); 1167 - struct brcmf_usbdev_info *devinfo; 1168 - int ret; 1167 + struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo; 1168 + 1169 + if (ret) 1170 + goto error; 1169 1171 1170 1172 brcmf_dbg(USB, "Start fw downloading\n"); 1171 1173 1172 - devinfo = bus->bus_priv.usb->devinfo; 1173 1174 ret = check_file(fw->data); 1174 1175 if (ret < 0) { 1175 1176 brcmf_err("invalid firmware\n");
+1 -1
drivers/ntb/hw/intel/ntb_hw_intel.c
··· 2878 2878 .link_is_up = xeon_link_is_up, 2879 2879 .db_ioread = skx_db_ioread, 2880 2880 .db_iowrite = skx_db_iowrite, 2881 - .db_size = sizeof(u64), 2881 + .db_size = sizeof(u32), 2882 2882 .ntb_ctl = SKX_NTBCNTL_OFFSET, 2883 2883 .mw_bar = {2, 4}, 2884 2884 };
+11 -47
drivers/ntb/ntb_transport.c
··· 177 177 u64 rx_err_ver; 178 178 u64 rx_memcpy; 179 179 u64 rx_async; 180 - u64 dma_rx_prep_err; 181 180 u64 tx_bytes; 182 181 u64 tx_pkts; 183 182 u64 tx_ring_full; 184 183 u64 tx_err_no_buf; 185 184 u64 tx_memcpy; 186 185 u64 tx_async; 187 - u64 dma_tx_prep_err; 188 186 }; 189 187 190 188 struct ntb_transport_mw { ··· 252 254 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 253 255 #define NTB_QP_DEF_NUM_ENTRIES 100 254 256 #define NTB_LINK_DOWN_TIMEOUT 10 255 - #define DMA_RETRIES 20 256 - #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) 257 257 258 258 static void ntb_transport_rxc_db(unsigned long data); 259 259 static const struct ntb_ctx_ops ntb_transport_ops; ··· 512 516 out_offset += snprintf(buf + out_offset, out_count - out_offset, 513 517 "free tx - \t%u\n", 514 518 ntb_transport_tx_free_entry(qp)); 515 - out_offset += snprintf(buf + out_offset, out_count - out_offset, 516 - "DMA tx prep err - \t%llu\n", 517 - qp->dma_tx_prep_err); 518 - out_offset += snprintf(buf + out_offset, out_count - out_offset, 519 - "DMA rx prep err - \t%llu\n", 520 - qp->dma_rx_prep_err); 521 519 522 520 out_offset += snprintf(buf + out_offset, out_count - out_offset, 523 521 "\n"); ··· 613 623 if (!mw->virt_addr) 614 624 return -ENOMEM; 615 625 616 - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 626 + if (mw_num < qp_count % mw_count) 617 627 num_qps_mw = qp_count / mw_count + 1; 618 628 else 619 629 num_qps_mw = qp_count / mw_count; ··· 758 768 qp->tx_err_no_buf = 0; 759 769 qp->tx_memcpy = 0; 760 770 qp->tx_async = 0; 761 - qp->dma_tx_prep_err = 0; 762 - qp->dma_rx_prep_err = 0; 763 771 } 764 772 765 773 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) ··· 988 1000 qp->event_handler = NULL; 989 1001 ntb_qp_link_down_reset(qp); 990 1002 991 - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 1003 + if (mw_num < qp_count % mw_count) 992 1004 num_qps_mw = qp_count / mw_count + 1; 993 1005 else 994 1006 num_qps_mw = qp_count / mw_count; ··· 1116 1128 qp_count = ilog2(qp_bitmap); 1117 1129 if (max_num_clients && max_num_clients < qp_count) 1118 1130 qp_count = max_num_clients; 1119 - else if (mw_count < qp_count) 1120 - qp_count = mw_count; 1131 + else if (nt->mw_count < qp_count) 1132 + qp_count = nt->mw_count; 1121 1133 1122 1134 qp_bitmap &= BIT_ULL(qp_count) - 1; 1123 1135 ··· 1305 1317 struct dmaengine_unmap_data *unmap; 1306 1318 dma_cookie_t cookie; 1307 1319 void *buf = entry->buf; 1308 - int retries = 0; 1309 1320 1310 1321 len = entry->len; 1311 1322 device = chan->device; ··· 1333 1346 1334 1347 unmap->from_cnt = 1; 1335 1348 1336 - for (retries = 0; retries < DMA_RETRIES; retries++) { 1337 - txd = device->device_prep_dma_memcpy(chan, 1338 - unmap->addr[1], 1339 - unmap->addr[0], len, 1340 - DMA_PREP_INTERRUPT); 1341 - if (txd) 1342 - break; 1343 - 1344 - set_current_state(TASK_INTERRUPTIBLE); 1345 - schedule_timeout(DMA_OUT_RESOURCE_TO); 1346 - } 1347 - 1348 - if (!txd) { 1349 - qp->dma_rx_prep_err++; 1349 + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1350 + unmap->addr[0], len, 1351 + DMA_PREP_INTERRUPT); 1352 + if (!txd) 1350 1353 goto err_get_unmap; 1351 - } 1352 1354 1353 1355 txd->callback_result = ntb_rx_copy_callback; 1354 1356 txd->callback_param = entry; ··· 1582 1606 struct dmaengine_unmap_data *unmap; 1583 1607 dma_addr_t dest; 1584 1608 dma_cookie_t cookie; 1585 - int retries = 0; 1586 1609 1587 1610 device = chan->device; 1588 1611 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; ··· 1603 1628 1604 1629 unmap->to_cnt = 1; 1605 1630 1606 - for (retries = 0; retries < DMA_RETRIES; retries++) { 1607 - txd = device->device_prep_dma_memcpy(chan, dest, 1608 - unmap->addr[0], len, 1609 - DMA_PREP_INTERRUPT); 1610 - if (txd) 1611 - break; 1612 - 1613 - set_current_state(TASK_INTERRUPTIBLE); 1614 - schedule_timeout(DMA_OUT_RESOURCE_TO); 1615 - } 1616 - 1617 - if (!txd) { 1618 - qp->dma_tx_prep_err++; 1631 + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1632 + DMA_PREP_INTERRUPT); 1633 + if (!txd) 1619 1634 goto err_get_unmap; 1620 - } 1621 1635 1622 1636 txd->callback_result = ntb_tx_copy_callback; 1623 1637 txd->callback_param = entry;
+2 -2
drivers/ntb/test/ntb_perf.c
··· 90 90 91 91 static unsigned int seg_order = 19; /* 512K */ 92 92 module_param(seg_order, uint, 0644); 93 - MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); 93 + MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing"); 94 94 95 95 static unsigned int run_order = 32; /* 4G */ 96 96 module_param(run_order, uint, 0644); 97 - MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); 97 + MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer"); 98 98 99 99 static bool use_dma; /* default to 0 */ 100 100 module_param(use_dma, bool, 0644);
+6 -6
drivers/pci/access.c
··· 896 896 { 897 897 if (pci_dev_is_disconnected(dev)) { 898 898 *val = ~0; 899 - return -ENODEV; 899 + return PCIBIOS_DEVICE_NOT_FOUND; 900 900 } 901 901 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); 902 902 } ··· 906 906 { 907 907 if (pci_dev_is_disconnected(dev)) { 908 908 *val = ~0; 909 - return -ENODEV; 909 + return PCIBIOS_DEVICE_NOT_FOUND; 910 910 } 911 911 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); 912 912 } ··· 917 917 { 918 918 if (pci_dev_is_disconnected(dev)) { 919 919 *val = ~0; 920 - return -ENODEV; 920 + return PCIBIOS_DEVICE_NOT_FOUND; 921 921 } 922 922 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); 923 923 } ··· 926 926 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) 927 927 { 928 928 if (pci_dev_is_disconnected(dev)) 929 - return -ENODEV; 929 + return PCIBIOS_DEVICE_NOT_FOUND; 930 930 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); 931 931 } 932 932 EXPORT_SYMBOL(pci_write_config_byte); ··· 934 934 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) 935 935 { 936 936 if (pci_dev_is_disconnected(dev)) 937 - return -ENODEV; 937 + return PCIBIOS_DEVICE_NOT_FOUND; 938 938 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); 939 939 } 940 940 EXPORT_SYMBOL(pci_write_config_word); ··· 943 943 u32 val) 944 944 { 945 945 if (pci_dev_is_disconnected(dev)) 946 - return -ENODEV; 946 + return PCIBIOS_DEVICE_NOT_FOUND; 947 947 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 948 948 } 949 949 EXPORT_SYMBOL(pci_write_config_dword);
+1
drivers/pci/endpoint/functions/Kconfig
··· 5 5 config PCI_EPF_TEST 6 6 tristate "PCI Endpoint Test driver" 7 7 depends on PCI_ENDPOINT 8 + select CRC32 8 9 help 9 10 Enable this configuration option to enable the test driver 10 11 for PCI Endpoint.
+41 -50
drivers/pinctrl/pinctrl-amd.c
··· 495 495 .flags = IRQCHIP_SKIP_SET_WAKE, 496 496 }; 497 497 498 - static void amd_gpio_irq_handler(struct irq_desc *desc) 498 + #define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) 499 + 500 + static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) 499 501 { 500 - u32 i; 501 - u32 off; 502 - u32 reg; 503 - u32 pin_reg; 504 - u64 reg64; 505 - int handled = 0; 506 - unsigned int irq; 502 + struct amd_gpio *gpio_dev = dev_id; 503 + struct gpio_chip *gc = &gpio_dev->gc; 504 + irqreturn_t ret = IRQ_NONE; 505 + unsigned int i, irqnr; 507 506 unsigned long flags; 508 - struct irq_chip *chip = irq_desc_get_chip(desc); 509 - struct gpio_chip *gc = irq_desc_get_handler_data(desc); 510 - struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 507 + u32 *regs, regval; 508 + u64 status, mask; 511 509 512 - chained_irq_enter(chip, desc); 513 - /*enable GPIO interrupt again*/ 510 + /* Read the wake status */ 514 511 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 515 - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); 516 - reg64 = reg; 517 - reg64 = reg64 << 32; 518 - 519 - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0); 520 - reg64 |= reg; 512 + status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); 513 + status <<= 32; 514 + status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0); 521 515 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 522 516 523 - /* 524 - * first 46 bits indicates interrupt status. 525 - * one bit represents four interrupt sources. 526 - */ 527 - for (off = 0; off < 46 ; off++) { 528 - if (reg64 & BIT(off)) { 529 - for (i = 0; i < 4; i++) { 530 - pin_reg = readl(gpio_dev->base + 531 - (off * 4 + i) * 4); 532 - if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || 533 - (pin_reg & BIT(WAKE_STS_OFF))) { 534 - irq = irq_find_mapping(gc->irqdomain, 535 - off * 4 + i); 536 - generic_handle_irq(irq); 537 - writel(pin_reg, 538 - gpio_dev->base 539 - + (off * 4 + i) * 4); 540 - handled++; 541 - } 542 - } 517 + /* Bit 0-45 contain the relevant status bits */ 518 + status &= (1ULL << 46) - 1; 519 + regs = gpio_dev->base; 520 + for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) { 521 + if (!(status & mask)) 522 + continue; 523 + status &= ~mask; 524 + 525 + /* Each status bit covers four pins */ 526 + for (i = 0; i < 4; i++) { 527 + regval = readl(regs + i); 528 + if (!(regval & PIN_IRQ_PENDING)) 529 + continue; 530 + irq = irq_find_mapping(gc->irqdomain, irqnr + i); 531 + generic_handle_irq(irq); 532 + /* Clear interrupt */ 533 + writel(regval, regs + i); 534 + ret = IRQ_HANDLED; 543 535 } 544 536 } 545 537 546 - if (handled == 0) 547 - handle_bad_irq(desc); 548 - 538 + /* Signal EOI to the GPIO unit */ 549 539 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 550 - reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 551 - reg |= EOI_MASK; 552 - writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); 540 + regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 541 + regval |= EOI_MASK; 542 + writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG); 553 543 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 554 544 555 - chained_irq_exit(chip, desc); 545 + return ret; 556 546 } 557 547 558 548 static int amd_get_groups_count(struct pinctrl_dev *pctldev) ··· 811 821 goto out2; 812 822 } 813 823 814 - gpiochip_set_chained_irqchip(&gpio_dev->gc, 815 - &amd_gpio_irqchip, 816 - irq_base, 817 - amd_gpio_irq_handler); 824 + ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0, 825 + KBUILD_MODNAME, gpio_dev); 826 + if (ret) 827 + goto out2; 828 + 818 829 platform_set_drvdata(pdev, gpio_dev); 819 830 820 831 dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
+1 -1
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 798 798 break; 799 799 case PIN_CONFIG_OUTPUT: 800 800 __stm32_gpio_set(bank, offset, arg); 801 - ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); 801 + ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false); 802 802 break; 803 803 default: 804 804 ret = -EINVAL;
+7 -9
drivers/platform/x86/intel_telemetry_debugfs.c
··· 97 97 } \ 98 98 } 99 99 100 - #ifdef CONFIG_PM_SLEEP 101 100 static u8 suspend_prep_ok; 102 101 static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; 103 102 static u64 suspend_shlw_res_temp, suspend_deep_res_temp; 104 - #endif 105 103 106 104 struct telemetry_susp_stats { 107 105 u32 shlw_swake_ctr; ··· 805 807 .release = single_release, 806 808 }; 807 809 808 - #ifdef CONFIG_PM_SLEEP 809 810 static int pm_suspend_prep_cb(void) 810 811 { 811 812 struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; ··· 934 937 static struct notifier_block pm_notifier = { 935 938 .notifier_call = pm_notification, 936 939 }; 937 - #endif /* CONFIG_PM_SLEEP */ 938 940 939 941 static int __init telemetry_debugfs_init(void) 940 942 { ··· 956 960 if (err < 0) 957 961 return -EINVAL; 958 962 959 - 960 - #ifdef CONFIG_PM_SLEEP 961 963 register_pm_notifier(&pm_notifier); 962 - #endif /* CONFIG_PM_SLEEP */ 963 964 964 965 debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); 965 - if (!debugfs_conf->telemetry_dbg_dir) 966 - return -ENOMEM; 966 + if (!debugfs_conf->telemetry_dbg_dir) { 967 + err = -ENOMEM; 968 + goto out_pm; 969 + } 967 970 968 971 f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, 969 972 debugfs_conf->telemetry_dbg_dir, NULL, ··· 1009 1014 out: 1010 1015 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1011 1016 debugfs_conf->telemetry_dbg_dir = NULL; 1017 + out_pm: 1018 + unregister_pm_notifier(&pm_notifier); 1012 1019 1013 1020 return err; 1014 1021 } ··· 1019 1022 { 1020 1023 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1021 1024 debugfs_conf->telemetry_dbg_dir = NULL; 1025 + unregister_pm_notifier(&pm_notifier); 1022 1026 } 1023 1027 1024 1028 late_initcall(telemetry_debugfs_init);
+1 -5
drivers/staging/iio/cdc/ad7152.c
··· 231 231 if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) 232 232 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; 233 233 234 - mutex_lock(&chip->state_lock); 235 234 ret = i2c_smbus_write_byte_data(chip->client, 236 235 AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); 237 - if (ret < 0) { 238 - mutex_unlock(&chip->state_lock); 236 + if (ret < 0) 239 237 return ret; 240 - } 241 238 242 239 chip->filter_rate_setup = i; 243 - mutex_unlock(&chip->state_lock); 244 240 245 241 return ret; 246 242 }
+1 -1
drivers/staging/rtl8723bs/os_dep/osdep_service.c
··· 160 160 oldfs = get_fs(); set_fs(get_ds()); 161 161 162 162 if (1!=readFile(fp, &buf, 1)) 163 - ret = PTR_ERR(fp); 163 + ret = -EINVAL; 164 164 165 165 set_fs(oldfs); 166 166 filp_close(fp, NULL);
+5 -6
drivers/usb/gadget/composite.c
··· 315 315 list_del(&f->list); 316 316 if (f->unbind) 317 317 f->unbind(c, f); 318 + 319 + if (f->bind_deactivated) 320 + usb_function_activate(f); 318 321 } 319 322 EXPORT_SYMBOL_GPL(usb_remove_function); 320 323 ··· 959 956 960 957 f = list_first_entry(&config->functions, 961 958 struct usb_function, list); 962 - list_del(&f->list); 963 - if (f->unbind) { 964 - DBG(cdev, "unbind function '%s'/%p\n", f->name, f); 965 - f->unbind(config, f); 966 - /* may free memory for "f" */ 967 - } 959 + 960 + usb_remove_function(config, f); 968 961 } 969 962 list_del(&config->list); 970 963 if (config->unbind) {
+6 -3
drivers/usb/gadget/legacy/inode.c
··· 1183 1183 1184 1184 /* closing ep0 === shutdown all */ 1185 1185 1186 - if (dev->gadget_registered) 1186 + if (dev->gadget_registered) { 1187 1187 usb_gadget_unregister_driver (&gadgetfs_driver); 1188 + dev->gadget_registered = false; 1189 + } 1188 1190 1189 1191 /* at this point "good" hardware has disconnected the 1190 1192 * device from USB; the host won't see it any more. ··· 1679 1677 gadgetfs_suspend (struct usb_gadget *gadget) 1680 1678 { 1681 1679 struct dev_data *dev = get_gadget_data (gadget); 1680 + unsigned long flags; 1682 1681 1683 1682 INFO (dev, "suspended from state %d\n", dev->state); 1684 - spin_lock (&dev->lock); 1683 + spin_lock_irqsave(&dev->lock, flags); 1685 1684 switch (dev->state) { 1686 1685 case STATE_DEV_SETUP: // VERY odd... host died?? 1687 1686 case STATE_DEV_CONNECTED: ··· 1693 1690 default: 1694 1691 break; 1695 1692 } 1696 - spin_unlock (&dev->lock); 1693 + spin_unlock_irqrestore(&dev->lock, flags); 1697 1694 } 1698 1695 1699 1696 static struct usb_gadget_driver gadgetfs_driver = {
+4 -9
drivers/usb/gadget/udc/dummy_hcd.c
··· 442 442 /* Report reset and disconnect events to the driver */ 443 443 if (dum->driver && (disconnect || reset)) { 444 444 stop_activity(dum); 445 - spin_unlock(&dum->lock); 446 445 if (reset) 447 446 usb_gadget_udc_reset(&dum->gadget, dum->driver); 448 447 else 449 448 dum->driver->disconnect(&dum->gadget); 450 - spin_lock(&dum->lock); 451 449 } 452 450 } else if (dum_hcd->active != dum_hcd->old_active) { 453 - if (dum_hcd->old_active && dum->driver->suspend) { 454 - spin_unlock(&dum->lock); 451 + if (dum_hcd->old_active && dum->driver->suspend) 455 452 dum->driver->suspend(&dum->gadget); 456 - spin_lock(&dum->lock); 457 - } else if (!dum_hcd->old_active && dum->driver->resume) { 458 - spin_unlock(&dum->lock); 453 + else if (!dum_hcd->old_active && dum->driver->resume) 459 454 dum->driver->resume(&dum->gadget); 460 - spin_lock(&dum->lock); 461 - } 462 455 } 463 456 464 457 dum_hcd->old_status = dum_hcd->port_status; ··· 976 983 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 977 984 struct dummy *dum = dum_hcd->dum; 978 985 986 + spin_lock_irq(&dum->lock); 979 987 dum->driver = NULL; 988 + spin_unlock_irq(&dum->lock); 980 989 981 990 return 0; 982 991 }
+1 -8
drivers/usb/gadget/udc/net2280.c
··· 2470 2470 nuke(&dev->ep[i]); 2471 2471 2472 2472 /* report disconnect; the driver is already quiesced */ 2473 - if (driver) { 2474 - spin_unlock(&dev->lock); 2473 + if (driver) 2475 2474 driver->disconnect(&dev->gadget); 2476 - spin_lock(&dev->lock); 2477 - } 2478 2475 2479 2476 usb_reinit(dev); 2480 2477 } ··· 3345 3348 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3346 3349 3347 3350 static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3348 - __releases(dev->lock) 3349 - __acquires(dev->lock) 3350 3351 { 3351 3352 struct net2280_ep *ep; 3352 3353 u32 tmp, num, mask, scratch; ··· 3385 3390 if (disconnect || reset) { 3386 3391 stop_activity(dev, dev->driver); 3387 3392 ep0_start(dev); 3388 - spin_unlock(&dev->lock); 3389 3393 if (reset) 3390 3394 usb_gadget_udc_reset 3391 3395 (&dev->gadget, dev->driver); 3392 3396 else 3393 3397 (dev->driver->disconnect) 3394 3398 (&dev->gadget); 3395 - spin_lock(&dev->lock); 3396 3399 return; 3397 3400 } 3398 3401 }
+5 -2
drivers/usb/host/xhci-mem.c
··· 2119 2119 { 2120 2120 u32 temp, port_offset, port_count; 2121 2121 int i; 2122 - u8 major_revision; 2122 + u8 major_revision, minor_revision; 2123 2123 struct xhci_hub *rhub; 2124 2124 2125 2125 temp = readl(addr); 2126 2126 major_revision = XHCI_EXT_PORT_MAJOR(temp); 2127 + minor_revision = XHCI_EXT_PORT_MINOR(temp); 2127 2128 2128 2129 if (major_revision == 0x03) { 2129 2130 rhub = &xhci->usb3_rhub; ··· 2138 2137 return; 2139 2138 } 2140 2139 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); 2141 - rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); 2140 + 2141 + if (rhub->min_rev < minor_revision) 2142 + rhub->min_rev = minor_revision; 2142 2143 2143 2144 /* Port offset and count in the third dword, see section 7.2 */ 2144 2145 temp = readl(addr + 2);
+3
drivers/usb/host/xhci-pci.c
··· 201 201 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 202 202 pdev->device == 0x1042) 203 203 xhci->quirks |= XHCI_BROKEN_STREAMS; 204 + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 205 + pdev->device == 0x1142) 206 + xhci->quirks |= XHCI_TRUST_TX_LENGTH; 204 207 205 208 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) 206 209 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+1 -1
drivers/video/fbdev/core/fbmon.c
··· 1048 1048 1049 1049 for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; 1050 1050 i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) 1051 - if (PIXEL_CLOCK) 1051 + if (PIXEL_CLOCK != 0) 1052 1052 edt[num++] = block - edid; 1053 1053 1054 1054 /* Yikes, EDID data is totally useless */
+3 -2
drivers/video/fbdev/smscufx.c
··· 1646 1646 dev_dbg(dev->gdev, "%s %s - serial #%s\n", 1647 1647 usbdev->manufacturer, usbdev->product, usbdev->serial); 1648 1648 dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", 1649 - usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1650 - usbdev->descriptor.bcdDevice, dev); 1649 + le16_to_cpu(usbdev->descriptor.idVendor), 1650 + le16_to_cpu(usbdev->descriptor.idProduct), 1651 + le16_to_cpu(usbdev->descriptor.bcdDevice), dev); 1651 1652 dev_dbg(dev->gdev, "console enable=%d\n", console); 1652 1653 dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); 1653 1654
+5 -4
drivers/video/fbdev/udlfb.c
··· 1105 1105 char *bufptr; 1106 1106 struct urb *urb; 1107 1107 1108 - pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", 1109 - info->node, dev->blank_mode, blank_mode); 1108 + pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n", 1109 + info->node, dev->blank_mode, blank_mode); 1110 1110 1111 1111 if ((dev->blank_mode == FB_BLANK_POWERDOWN) && 1112 1112 (blank_mode != FB_BLANK_POWERDOWN)) { ··· 1613 1613 pr_info("%s %s - serial #%s\n", 1614 1614 usbdev->manufacturer, usbdev->product, usbdev->serial); 1615 1615 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", 1616 - usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1617 - usbdev->descriptor.bcdDevice, dev); 1616 + le16_to_cpu(usbdev->descriptor.idVendor), 1617 + le16_to_cpu(usbdev->descriptor.idProduct), 1618 + le16_to_cpu(usbdev->descriptor.bcdDevice), dev); 1618 1619 pr_info("console enable=%d\n", console); 1619 1620 pr_info("fb_defio enable=%d\n", fb_defio); 1620 1621 pr_info("shadow enable=%d\n", shadow);
+3 -5
drivers/video/fbdev/via/viafbdev.c
··· 1630 1630 } 1631 1631 static void viafb_remove_proc(struct viafb_shared *shared) 1632 1632 { 1633 - struct proc_dir_entry *viafb_entry = shared->proc_entry, 1634 - *iga1_entry = shared->iga1_proc_entry, 1635 - *iga2_entry = shared->iga2_proc_entry; 1633 + struct proc_dir_entry *viafb_entry = shared->proc_entry; 1636 1634 1637 1635 if (!viafb_entry) 1638 1636 return; 1639 1637 1640 - remove_proc_entry("output_devices", iga2_entry); 1638 + remove_proc_entry("output_devices", shared->iga2_proc_entry); 1641 1639 remove_proc_entry("iga2", viafb_entry); 1642 - remove_proc_entry("output_devices", iga1_entry); 1640 + remove_proc_entry("output_devices", shared->iga1_proc_entry); 1643 1641 remove_proc_entry("iga1", viafb_entry); 1644 1642 remove_proc_entry("supported_output_devices", viafb_entry); 1645 1643
+7
drivers/virtio/virtio_balloon.c
··· 663 663 } 664 664 #endif 665 665 666 + static int virtballoon_validate(struct virtio_device *vdev) 667 + { 668 + __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); 669 + return 0; 670 + } 671 + 666 672 static unsigned int features[] = { 667 673 VIRTIO_BALLOON_F_MUST_TELL_HOST, 668 674 VIRTIO_BALLOON_F_STATS_VQ, ··· 681 675 .driver.name = KBUILD_MODNAME, 682 676 .driver.owner = THIS_MODULE, 683 677 .id_table = id_table, 678 + .validate = virtballoon_validate, 684 679 .probe = virtballoon_probe, 685 680 .remove = virtballoon_remove, 686 681 .config_changed = virtballoon_changed,
+1
fs/ceph/acl.c
··· 131 131 } 132 132 133 133 if (new_mode != old_mode) { 134 + newattrs.ia_ctime = current_time(inode); 134 135 newattrs.ia_mode = new_mode; 135 136 newattrs.ia_valid = ATTR_MODE; 136 137 ret = __ceph_setattr(inode, &newattrs);
+4
fs/ceph/export.c
··· 91 91 ceph_mdsc_put_request(req); 92 92 if (!inode) 93 93 return ERR_PTR(-ESTALE); 94 + if (inode->i_nlink == 0) { 95 + iput(inode); 96 + return ERR_PTR(-ESTALE); 97 + } 94 98 } 95 99 96 100 return d_obtain_alias(inode);
+2 -3
fs/ceph/inode.c
··· 2022 2022 attr->ia_size > inode->i_size) { 2023 2023 i_size_write(inode, attr->ia_size); 2024 2024 inode->i_blocks = calc_inode_blocks(attr->ia_size); 2025 - inode->i_ctime = attr->ia_ctime; 2026 2025 ci->i_reported_size = attr->ia_size; 2027 2026 dirtied |= CEPH_CAP_FILE_EXCL; 2028 2027 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || ··· 2043 2044 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2044 2045 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2045 2046 only ? "ctime only" : "ignored"); 2046 - inode->i_ctime = attr->ia_ctime; 2047 2047 if (only) { 2048 2048 /* 2049 2049 * if kernel wants to dirty ctime but nothing else, ··· 2065 2067 if (dirtied) { 2066 2068 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2067 2069 &prealloc_cf); 2068 - inode->i_ctime = current_time(inode); 2070 + inode->i_ctime = attr->ia_ctime; 2069 2071 } 2070 2072 2071 2073 release &= issued; ··· 2083 2085 req->r_inode_drop = release; 2084 2086 req->r_args.setattr.mask = cpu_to_le32(mask); 2085 2087 req->r_num_caps = 1; 2088 + req->r_stamp = attr->ia_ctime; 2086 2089 err = ceph_mdsc_do_request(mdsc, NULL, req); 2087 2090 } 2088 2091 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
+1 -3
fs/ceph/mds_client.c
··· 1687 1687 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1688 1688 { 1689 1689 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1690 - struct timespec ts; 1691 1690 1692 1691 if (!req) 1693 1692 return ERR_PTR(-ENOMEM); ··· 1705 1706 init_completion(&req->r_safe_completion); 1706 1707 INIT_LIST_HEAD(&req->r_unsafe_item); 1707 1708 1708 - ktime_get_real_ts(&ts); 1709 - req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran); 1709 + req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran); 1710 1710 1711 1711 req->r_op = op; 1712 1712 req->r_direct_mode = mode;
+8
fs/configfs/item.c
··· 138 138 } 139 139 EXPORT_SYMBOL(config_item_get); 140 140 141 + struct config_item *config_item_get_unless_zero(struct config_item *item) 142 + { 143 + if (item && kref_get_unless_zero(&item->ci_kref)) 144 + return item; 145 + return NULL; 146 + } 147 + EXPORT_SYMBOL(config_item_get_unless_zero); 148 + 141 149 static void config_item_cleanup(struct config_item *item) 142 150 { 143 151 struct config_item_type *t = item->ci_type;
+1 -2
fs/configfs/symlink.c
··· 83 83 ret = -ENOMEM; 84 84 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); 85 85 if (sl) { 86 - sl->sl_target = config_item_get(item); 87 86 spin_lock(&configfs_dirent_lock); 88 87 if (target_sd->s_type & CONFIGFS_USET_DROPPING) { 89 88 spin_unlock(&configfs_dirent_lock); 90 - config_item_put(item); 91 89 kfree(sl); 92 90 return -ENOENT; 93 91 } 92 + sl->sl_target = config_item_get(item); 94 93 list_add(&sl->sl_list, &target_sd->s_links); 95 94 spin_unlock(&configfs_dirent_lock); 96 95 ret = configfs_create_link(sl, parent_item->ci_dentry,
+4 -6
fs/dcache.c
··· 1494 1494 { 1495 1495 struct detach_data *data = _data; 1496 1496 1497 - if (!data->mountpoint && !data->select.found) 1497 + if (!data->mountpoint && list_empty(&data->select.dispose)) 1498 1498 __d_drop(data->select.start); 1499 1499 } 1500 1500 ··· 1536 1536 1537 1537 d_walk(dentry, &data, detach_and_collect, check_and_drop); 1538 1538 1539 - if (data.select.found) 1539 + if (!list_empty(&data.select.dispose)) 1540 1540 shrink_dentry_list(&data.select.dispose); 1541 + else if (!data.mountpoint) 1542 + return; 1541 1543 1542 1544 if (data.mountpoint) { 1543 1545 detach_mounts(data.mountpoint); 1544 1546 dput(data.mountpoint); 1545 1547 } 1546 - 1547 - if (!data.mountpoint && !data.select.found) 1548 - break; 1549 - 1550 1548 cond_resched(); 1551 1549 } 1552 1550 }
+1 -1
fs/hugetlbfs/inode.c
··· 200 200 addr = ALIGN(addr, huge_page_size(h)); 201 201 vma = find_vma(mm, addr); 202 202 if (TASK_SIZE - len >= addr && 203 - (!vma || addr + len <= vma->vm_start)) 203 + (!vma || addr + len <= vm_start_gap(vma))) 204 204 return addr; 205 205 } 206 206
+2
fs/namespace.c
··· 3488 3488 return err; 3489 3489 } 3490 3490 3491 + put_mnt_ns(old_mnt_ns); 3492 + 3491 3493 /* Update the pwd and root */ 3492 3494 set_fs_pwd(fs, &root); 3493 3495 set_fs_root(fs, &root);
-4
fs/proc/task_mmu.c
··· 300 300 301 301 /* We don't show the stack guard page in /proc/maps */ 302 302 start = vma->vm_start; 303 - if (stack_guard_page_start(vma, start)) 304 - start += PAGE_SIZE; 305 303 end = vma->vm_end; 306 - if (stack_guard_page_end(vma, end)) 307 - end -= PAGE_SIZE; 308 304 309 305 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 310 306 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
+1 -1
fs/read_write.c
··· 1285 1285 if (!(file->f_mode & FMODE_CAN_WRITE)) 1286 1286 goto out; 1287 1287 1288 - ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0); 1288 + ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags); 1289 1289 1290 1290 out: 1291 1291 if (ret > 0)
+19 -25
fs/ufs/balloc.c
··· 400 400 /* 401 401 * There is not enough space for user on the device 402 402 */ 403 - if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { 404 - mutex_unlock(&UFS_SB(sb)->s_lock); 405 - UFSD("EXIT (FAILED)\n"); 406 - return 0; 403 + if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) { 404 + if (!capable(CAP_SYS_RESOURCE)) { 405 + mutex_unlock(&UFS_SB(sb)->s_lock); 406 + UFSD("EXIT (FAILED)\n"); 407 + return 0; 408 + } 407 409 } 408 410 409 411 if (goal >= uspi->s_size) ··· 423 421 if (result) { 424 422 ufs_clear_frags(inode, result + oldcount, 425 423 newcount - oldcount, locked_page != NULL); 424 + *err = 0; 426 425 write_seqlock(&UFS_I(inode)->meta_lock); 427 426 ufs_cpu_to_data_ptr(sb, p, result); 428 - write_sequnlock(&UFS_I(inode)->meta_lock); 429 - *err = 0; 430 427 UFS_I(inode)->i_lastfrag = 431 428 max(UFS_I(inode)->i_lastfrag, fragment + count); 429 + write_sequnlock(&UFS_I(inode)->meta_lock); 432 430 } 433 431 mutex_unlock(&UFS_SB(sb)->s_lock); 434 432 UFSD("EXIT, result %llu\n", (unsigned long long)result); ··· 441 439 result = ufs_add_fragments(inode, tmp, oldcount, newcount); 442 440 if (result) { 443 441 *err = 0; 442 + read_seqlock_excl(&UFS_I(inode)->meta_lock); 444 443 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 445 444 fragment + count); 445 + read_sequnlock_excl(&UFS_I(inode)->meta_lock); 446 446 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 447 447 locked_page != NULL); 448 448 mutex_unlock(&UFS_SB(sb)->s_lock); ··· 455 451 /* 456 452 * allocate new block and move data 457 453 */ 458 - switch (fs32_to_cpu(sb, usb1->fs_optim)) { 459 - case UFS_OPTSPACE: 454 + if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) { 460 455 request = newcount; 461 - if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree 462 - > uspi->s_dsize * uspi->s_minfree / (2 * 100)) 463 - break; 464 - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 465 - break; 466 - default: 467 - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 468 - 469 - case UFS_OPTTIME: 456 + if (uspi->cs_total.cs_nffree < uspi->s_space_to_time) 457 + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 458 + } else { 470 459 request = uspi->s_fpb; 471 - if (uspi->cs_total.cs_nffree < uspi->s_dsize * 472 - (uspi->s_minfree - 2) / 100) 473 - break; 474 - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 475 - break; 460 + if (uspi->cs_total.cs_nffree > uspi->s_time_to_space) 461 + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); 476 462 } 477 463 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 478 464 if (result) { 479 465 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 480 466 locked_page != NULL); 467 + mutex_unlock(&UFS_SB(sb)->s_lock); 481 468 ufs_change_blocknr(inode, fragment - oldcount, oldcount, 482 469 uspi->s_sbbase + tmp, 483 470 uspi->s_sbbase + result, locked_page); 471 + *err = 0; 484 472 write_seqlock(&UFS_I(inode)->meta_lock); 485 473 ufs_cpu_to_data_ptr(sb, p, result); 486 - write_sequnlock(&UFS_I(inode)->meta_lock); 487 - *err = 0; 488 474 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 489 475 fragment + count); 490 - mutex_unlock(&UFS_SB(sb)->s_lock); 476 + write_sequnlock(&UFS_I(inode)->meta_lock); 491 477 if (newcount < request) 492 478 ufs_free_fragments (inode, result + newcount, request - newcount); 493 479 ufs_free_fragments (inode, tmp, oldcount);
+43 -31
fs/ufs/inode.c
··· 401 401 u64 phys64 = 0; 402 402 unsigned frag = fragment & uspi->s_fpbmask; 403 403 404 - if (!create) { 405 - phys64 = ufs_frag_map(inode, offsets, depth); 406 - if (phys64) 407 - map_bh(bh_result, sb, phys64 + frag); 408 - return 0; 409 - } 404 + phys64 = ufs_frag_map(inode, offsets, depth); 405 + if (!create) 406 + goto done; 410 407 408 + if (phys64) { 409 + if (fragment >= UFS_NDIR_FRAGMENT) 410 + goto done; 411 + read_seqlock_excl(&UFS_I(inode)->meta_lock); 412 + if (fragment < UFS_I(inode)->i_lastfrag) { 413 + read_sequnlock_excl(&UFS_I(inode)->meta_lock); 414 + goto done; 415 + } 416 + read_sequnlock_excl(&UFS_I(inode)->meta_lock); 417 + } 411 418 /* This code entered only while writing ....? */ 412 419 413 420 mutex_lock(&UFS_I(inode)->truncate_mutex); ··· 458 451 } 459 452 mutex_unlock(&UFS_I(inode)->truncate_mutex); 460 453 return err; 454 + 455 + done: 456 + if (phys64) 457 + map_bh(bh_result, sb, phys64 + frag); 458 + return 0; 461 459 } 462 460 463 461 static int ufs_writepage(struct page *page, struct writeback_control *wbc) ··· 566 554 */ 567 555 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 568 556 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 569 - if (inode->i_nlink == 0) { 570 - ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 571 - return -1; 572 - } 557 + if (inode->i_nlink == 0) 558 + return -ESTALE; 573 559 574 560 /* 575 561 * Linux now has 32-bit uid and gid, so we can support EFT. ··· 576 566 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 577 567 578 568 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 579 - inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 580 - inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 581 - inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 569 + inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 570 + inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 571 + inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 582 572 inode->i_mtime.tv_nsec = 0; 583 573 inode->i_atime.tv_nsec = 0; 584 574 inode->i_ctime.tv_nsec = 0; ··· 612 602 */ 613 603 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 614 604 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 615 - if (inode->i_nlink == 0) { 616 - ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 617 - return -1; 618 - } 605 + if (inode->i_nlink == 0) 606 + return -ESTALE; 619 607 620 608 /* 621 609 * Linux now has 32-bit uid and gid, so we can support EFT. ··· 653 645 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 654 646 struct buffer_head * bh; 655 647 struct inode *inode; 656 - int err; 648 + int err = -EIO; 657 649 658 650 UFSD("ENTER, ino %lu\n", ino); 659 651 ··· 688 680 err = ufs1_read_inode(inode, 689 681 ufs_inode + ufs_inotofsbo(inode->i_ino)); 690 682 } 691 - 683 + brelse(bh); 692 684 if (err) 693 685 goto bad_inode; 686 + 694 687 inode->i_version++; 695 688 ufsi->i_lastfrag = 696 689 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; ··· 700 691 701 692 ufs_set_inode_ops(inode); 702 693 703 - brelse(bh); 704 - 705 694 UFSD("EXIT\n"); 706 695 unlock_new_inode(inode); 707 696 return inode; 708 697 709 698 bad_inode: 710 699 iget_failed(inode); 711 - return ERR_PTR(-EIO); 700 + return ERR_PTR(err); 712 701 } 713 702 714 703 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) ··· 881 874 ctx->to = from + count; 882 875 } 883 876 884 - #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 885 877 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 886 878 887 879 static void ufs_trunc_direct(struct inode *inode) ··· 1118 1112 struct super_block *sb = inode->i_sb; 1119 1113 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1120 1114 unsigned offsets[4]; 1121 - int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1115 + int depth; 1122 1116 int depth2; 1123 1117 unsigned i; 1124 1118 struct ufs_buffer_head *ubh[3]; 1125 1119 void *p; 1126 1120 u64 block; 1127 1121 1128 - if (!depth) 1129 - return; 1122 + if (inode->i_size) { 1123 + sector_t last = (inode->i_size - 1) >> uspi->s_bshift; 1124 + depth = ufs_block_to_path(inode, last, offsets); 1125 + if (!depth) 1126 + return; 1127 + } else { 1128 + depth = 1; 1129 + } 1130 1130 1131 - /* find the last non-zero in offsets[] */ 1132 1131 for (depth2 = depth - 1; depth2; depth2--) 1133 - if (offsets[depth2]) 1132 + if (offsets[depth2] != uspi->s_apb - 1) 1134 1133 break; 1135 1134 1136 1135 mutex_lock(&ufsi->truncate_mutex); ··· 1144 1133 offsets[0] = UFS_IND_BLOCK; 1145 1134 } else { 1146 1135 /* get the blocks that should be partially emptied */ 1147 - p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1136 + p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++); 1148 1137 for (i = 0; i < depth2; i++) { 1149 - offsets[i]++; /* next branch is fully freed */ 1150 1138 block = ufs_data_ptr_to_cpu(sb, p); 1151 1139 if (!block) 1152 1140 break; ··· 1156 1146 write_sequnlock(&ufsi->meta_lock); 1157 1147 break; 1158 1148 } 1159 - p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1149 + p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++); 1160 1150 } 1161 1151 while (i--) 1162 1152 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); ··· 1171 1161 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1172 1162 } 1173 1163 } 1164 + read_seqlock_excl(&ufsi->meta_lock); 1174 1165 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1166 + read_sequnlock_excl(&ufsi->meta_lock); 1175 1167 mark_inode_dirty(inode); 1176 1168 mutex_unlock(&ufsi->truncate_mutex); 1177 1169 }
+49 -24
fs/ufs/super.c
··· 480 480 usb3 = ubh_get_usb_third(uspi); 481 481 482 482 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 483 - (usb1->fs_flags & UFS_FLAGS_UPDATED)) || 483 + (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) || 484 484 mtype == UFS_MOUNT_UFSTYPE_UFS2) { 485 485 /*we have statistic in different place, then usual*/ 486 486 uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); ··· 596 596 usb2 = ubh_get_usb_second(uspi); 597 597 usb3 = ubh_get_usb_third(uspi); 598 598 599 - if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 600 - (usb1->fs_flags & UFS_FLAGS_UPDATED)) || 601 - mtype == UFS_MOUNT_UFSTYPE_UFS2) { 599 + if (mtype == UFS_MOUNT_UFSTYPE_UFS2) { 602 600 /*we have statistic in different place, then usual*/ 603 601 usb2->fs_un.fs_u2.cs_ndir = 604 602 cpu_to_fs64(sb, uspi->cs_total.cs_ndir); ··· 606 608 cpu_to_fs64(sb, uspi->cs_total.cs_nifree); 607 609 usb3->fs_un1.fs_u2.cs_nffree = 608 610 cpu_to_fs64(sb, uspi->cs_total.cs_nffree); 609 - } else { 610 - usb1->fs_cstotal.cs_ndir = 611 - cpu_to_fs32(sb, uspi->cs_total.cs_ndir); 612 - usb1->fs_cstotal.cs_nbfree = 613 - cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); 614 - usb1->fs_cstotal.cs_nifree = 615 - cpu_to_fs32(sb, uspi->cs_total.cs_nifree); 616 - usb1->fs_cstotal.cs_nffree = 617 - cpu_to_fs32(sb, uspi->cs_total.cs_nffree); 611 + goto out; 618 612 } 613 + 614 + if (mtype == UFS_MOUNT_UFSTYPE_44BSD && 615 + (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) { 616 + /* store stats in both old and new places */ 617 + usb2->fs_un.fs_u2.cs_ndir = 618 + cpu_to_fs64(sb, uspi->cs_total.cs_ndir); 619 + usb2->fs_un.fs_u2.cs_nbfree = 620 + cpu_to_fs64(sb, uspi->cs_total.cs_nbfree); 621 + usb3->fs_un1.fs_u2.cs_nifree = 622 + cpu_to_fs64(sb, uspi->cs_total.cs_nifree); 623 + usb3->fs_un1.fs_u2.cs_nffree = 624 + cpu_to_fs64(sb, uspi->cs_total.cs_nffree); 625 + } 626 + usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir); 627 + usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); 628 + usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree); 629 + usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree); 630 + out: 619 631 ubh_mark_buffer_dirty(USPI_UBH(uspi)); 620 632 ufs_print_super_stuff(sb, usb1, usb2, usb3); 621 633 UFSD("EXIT\n"); ··· 1004 996 flags |= UFS_ST_SUN; 1005 997 } 1006 998 999 + if ((flags & UFS_ST_MASK) == UFS_ST_44BSD && 1000 + uspi->s_postblformat == UFS_42POSTBLFMT) { 1001 + if (!silent) 1002 + pr_err("this is not a 44bsd filesystem"); 1003 + goto failed; 1004 + } 1005 + 1007 1006 /* 1008 1007 * Check ufs magic number 1009 1008 */ ··· 1158 1143 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); 1159 1144 1160 1145 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1161 - uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); 1162 - uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1146 + uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); 1147 + uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1163 1148 } else { 1164 1149 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); 1165 1150 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); ··· 1207 1192 uspi->s_nrpos = fs32_to_cpu(sb, usb3->fs_nrpos); 1208 1193 uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); 1209 1194 uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); 1195 + 1196 + uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, 1197 + uspi->s_minfree, 100); 1198 + if (uspi->s_minfree <= 5) { 1199 + uspi->s_time_to_space = ~0ULL; 1200 + uspi->s_space_to_time = 0; 1201 + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); 1202 + } else { 1203 + uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1; 1204 + uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize, 1205 + uspi->s_minfree - 2, 100) - 1; 1206 + } 1210 1207 1211 1208 /* 1212 1209 * Compute another frequently used values ··· 1409 1382 mutex_lock(&UFS_SB(sb)->s_lock); 1410 1383 usb3 = ubh_get_usb_third(uspi); 1411 1384 1412 - if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1385 + if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 1413 1386 buf->f_type = UFS2_MAGIC; 1414 - buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1415 - } else { 1387 + else 1416 1388 buf->f_type = UFS_MAGIC; 1417 - buf->f_blocks = uspi->s_dsize; 1418 - } 1419 - buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 1420 - uspi->cs_total.cs_nffree; 1389 + 1390 + buf->f_blocks = uspi->s_dsize; 1391 + buf->f_bfree = ufs_freefrags(uspi); 1421 1392 buf->f_ffree = uspi->cs_total.cs_nifree; 1422 1393 buf->f_bsize = sb->s_blocksize; 1423 - buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree)) 1424 - ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0; 1394 + buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks) 1395 + ? (buf->f_bfree - uspi->s_root_blocks) : 0; 1425 1396 buf->f_files = uspi->s_ncg * uspi->s_ipg; 1426 1397 buf->f_namelen = UFS_MAXNAMLEN; 1427 1398 buf->f_fsid.val[0] = (u32)id;
+5 -4
fs/ufs/ufs_fs.h
··· 733 733 __u32 s_dblkno; /* offset of first data after cg */ 734 734 __u32 s_cgoffset; /* cylinder group offset in cylinder */ 735 735 __u32 s_cgmask; /* used to calc mod fs_ntrak */ 736 - __u32 s_size; /* number of blocks (fragments) in fs */ 737 - __u32 s_dsize; /* number of data blocks in fs */ 738 - __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */ 739 - __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */ 736 + __u64 s_size; /* number of blocks (fragments) in fs */ 737 + __u64 s_dsize; /* number of data blocks in fs */ 740 738 __u32 s_ncg; /* number of cylinder groups */ 741 739 __u32 s_bsize; /* size of basic blocks */ 742 740 __u32 s_fsize; /* size of fragments */ ··· 791 793 __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ 792 794 __s32 fs_magic; /* filesystem magic */ 793 795 unsigned int s_dirblksize; 796 + __u64 s_root_blocks; 797 + __u64 s_time_to_space; 798 + __u64 s_space_to_time; 794 799 }; 795 800 796 801 /*
+8 -9
fs/ufs/util.c
··· 243 243 struct page *ufs_get_locked_page(struct address_space *mapping, 244 244 pgoff_t index) 245 245 { 246 - struct page *page; 247 - 248 - page = find_lock_page(mapping, index); 246 + struct inode *inode = mapping->host; 247 + struct page *page = find_lock_page(mapping, index); 249 248 if (!page) { 250 249 page = read_mapping_page(mapping, index, NULL); 251 250 ··· 252 253 printk(KERN_ERR "ufs_change_blocknr: " 253 254 "read_mapping_page error: ino %lu, index: %lu\n", 254 255 mapping->host->i_ino, index); 255 - goto out; 256 + return page; 256 257 } 257 258 258 259 lock_page(page); ··· 261 262 /* Truncate got there first */ 262 263 unlock_page(page); 263 264 put_page(page); 264 - page = NULL; 265 - goto out; 265 + return NULL; 266 266 } 267 267 268 268 if (!PageUptodate(page) || PageError(page)) { ··· 270 272 271 273 printk(KERN_ERR "ufs_change_blocknr: " 272 274 "can not read page: ino %lu, index: %lu\n", 273 - mapping->host->i_ino, index); 275 + inode->i_ino, index); 274 276 275 - page = ERR_PTR(-EIO); 277 + return ERR_PTR(-EIO); 276 278 } 277 279 } 278 - out: 280 + if (!page_has_buffers(page)) 281 + create_empty_buffers(page, 1 << inode->i_blkbits, 0); 279 282 return page; 280 283 }
+2 -7
fs/ufs/util.h
··· 350 350 #define ubh_blkmap(ubh,begin,bit) \ 351 351 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) 352 352 353 - /* 354 - * Determine the number of available frags given a 355 - * percentage to hold in reserve. 356 - */ 357 353 static inline u64 358 - ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved) 354 + ufs_freefrags(struct ufs_sb_private_info *uspi) 359 355 { 360 356 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 361 - uspi->cs_total.cs_nffree - 362 - (uspi->s_dsize * (percentreserved) / 100); 357 + uspi->cs_total.cs_nffree; 363 358 } 364 359 365 360 /*
+21 -8
fs/userfaultfd.c
··· 340 340 bool must_wait, return_to_userland; 341 341 long blocking_state; 342 342 343 - BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 344 - 345 343 ret = VM_FAULT_SIGBUS; 344 + 345 + /* 346 + * We don't do userfault handling for the final child pid update. 347 + * 348 + * We also don't do userfault handling during 349 + * coredumping. hugetlbfs has the special 350 + * follow_hugetlb_page() to skip missing pages in the 351 + * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with 352 + * the no_page_table() helper in follow_page_mask(), but the 353 + * shmem_vm_ops->fault method is invoked even during 354 + * coredumping without mmap_sem and it ends up here. 355 + */ 356 + if (current->flags & (PF_EXITING|PF_DUMPCORE)) 357 + goto out; 358 + 359 + /* 360 + * Coredumping runs without mmap_sem so we can only check that 361 + * the mmap_sem is held, if PF_DUMPCORE was not set. 362 + */ 363 + WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); 364 + 346 365 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; 347 366 if (!ctx) 348 367 goto out; ··· 377 358 * caller of handle_userfault to release the mmap_sem. 378 359 */ 379 360 if (unlikely(ACCESS_ONCE(ctx->released))) 380 - goto out; 381 - 382 - /* 383 - * We don't do userfault handling for the final child pid update. 384 - */ 385 - if (current->flags & PF_EXITING) 386 361 goto out; 387 362 388 363 /*
+1 -1
fs/xfs/xfs_buf.c
··· 117 117 __xfs_buf_ioacct_dec( 118 118 struct xfs_buf *bp) 119 119 { 120 - ASSERT(spin_is_locked(&bp->b_lock)); 120 + lockdep_assert_held(&bp->b_lock); 121 121 122 122 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { 123 123 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+2 -3
fs/xfs/xfs_icache.c
··· 66 66 67 67 XFS_STATS_INC(mp, vn_active); 68 68 ASSERT(atomic_read(&ip->i_pincount) == 0); 69 - ASSERT(!spin_is_locked(&ip->i_flags_lock)); 70 69 ASSERT(!xfs_isiflocked(ip)); 71 70 ASSERT(ip->i_ino == 0); 72 71 ··· 189 190 { 190 191 struct xfs_mount *mp = pag->pag_mount; 191 192 192 - ASSERT(spin_is_locked(&pag->pag_ici_lock)); 193 + lockdep_assert_held(&pag->pag_ici_lock); 193 194 if (pag->pag_ici_reclaimable++) 194 195 return; 195 196 ··· 211 212 { 212 213 struct xfs_mount *mp = pag->pag_mount; 213 214 214 - ASSERT(spin_is_locked(&pag->pag_ici_lock)); 215 + lockdep_assert_held(&pag->pag_ici_lock); 215 216 if (--pag->pag_ici_reclaimable) 216 217 return; 217 218
+2
include/dt-bindings/clock/sun50i-a64-ccu.h
··· 43 43 #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ 44 44 #define _DT_BINDINGS_CLK_SUN50I_A64_H_ 45 45 46 + #define CLK_PLL_PERIPH0 11 47 + 46 48 #define CLK_BUS_MIPI_DSI 28 47 49 #define CLK_BUS_CE 29 48 50 #define CLK_BUS_DMA 30
+2
include/dt-bindings/clock/sun8i-h3-ccu.h
··· 43 43 #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ 44 44 #define _DT_BINDINGS_CLK_SUN8I_H3_H_ 45 45 46 + #define CLK_PLL_PERIPH0 9 47 + 46 48 #define CLK_CPUX 14 47 49 48 50 #define CLK_BUS_CE 20
+2
include/linux/blkdev.h
··· 586 586 587 587 size_t cmd_size; 588 588 void *rq_alloc_data; 589 + 590 + struct work_struct release_work; 589 591 }; 590 592 591 593 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
+2 -1
include/linux/configfs.h
··· 74 74 const char *name, 75 75 struct config_item_type *type); 76 76 77 - extern struct config_item * config_item_get(struct config_item *); 77 + extern struct config_item *config_item_get(struct config_item *); 78 + extern struct config_item *config_item_get_unless_zero(struct config_item *); 78 79 extern void config_item_put(struct config_item *); 79 80 80 81 struct config_item_type {
+1 -1
include/linux/dmi.h
··· 136 136 static inline int dmi_name_in_serial(const char *s) { return 0; } 137 137 #define dmi_available 0 138 138 static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), 139 - void *private_data) { return -1; } 139 + void *private_data) { return -ENXIO; } 140 140 static inline bool dmi_match(enum dmi_field f, const char *str) 141 141 { return false; } 142 142 static inline void dmi_memdev_name(u16 handle, const char **bank,
+25 -28
include/linux/mm.h
··· 1393 1393 1394 1394 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1395 1395 1396 - /* Is the vma a continuation of the stack vma above it? */ 1397 - static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1398 - { 1399 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1400 - } 1401 - 1402 1396 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1403 1397 { 1404 1398 return !vma->vm_ops; ··· 1407 1413 #else 1408 1414 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 1409 1415 #endif 1410 - 1411 - static inline int stack_guard_page_start(struct vm_area_struct *vma, 1412 - unsigned long addr) 1413 - { 1414 - return (vma->vm_flags & VM_GROWSDOWN) && 1415 - (vma->vm_start == addr) && 1416 - !vma_growsdown(vma->vm_prev, addr); 1417 - } 1418 - 1419 - /* Is the vma a continuation of the stack vma below it? */ 1420 - static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1421 - { 1422 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1423 - } 1424 - 1425 - static inline int stack_guard_page_end(struct vm_area_struct *vma, 1426 - unsigned long addr) 1427 - { 1428 - return (vma->vm_flags & VM_GROWSUP) && 1429 - (vma->vm_end == addr) && 1430 - !vma_growsup(vma->vm_next, addr); 1431 - } 1432 1416 1433 1417 int vma_is_stack_for_current(struct vm_area_struct *vma); 1434 1418 ··· 2194 2222 pgoff_t offset, 2195 2223 unsigned long size); 2196 2224 2225 + extern unsigned long stack_guard_gap; 2197 2226 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2198 2227 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2199 2228 ··· 2221 2248 if (vma && end_addr <= vma->vm_start) 2222 2249 vma = NULL; 2223 2250 return vma; 2251 + } 2252 + 2253 + static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 2254 + { 2255 + unsigned long vm_start = vma->vm_start; 2256 + 2257 + if (vma->vm_flags & VM_GROWSDOWN) { 2258 + vm_start -= stack_guard_gap; 2259 + if (vm_start > vma->vm_start) 2260 + vm_start = 0; 2261 + } 2262 + return vm_start; 2263 + } 2264 + 2265 + static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 2266 + { 2267 + unsigned long vm_end = vma->vm_end; 2268 + 2269 + if (vma->vm_flags & VM_GROWSUP) { 2270 + vm_end += stack_guard_gap; 2271 + if (vm_end < vma->vm_end) 2272 + vm_end = -PAGE_SIZE; 2273 + } 2274 + return vm_end; 2224 2275 } 2225 2276 2226 2277 static inline unsigned long vma_pages(struct vm_area_struct *vma)
+2 -2
include/net/wext.h
··· 6 6 struct net; 7 7 8 8 #ifdef CONFIG_WEXT_CORE 9 - int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 9 + int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 10 10 void __user *arg); 11 11 int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, 12 12 unsigned long arg); ··· 14 14 struct iw_statistics *get_wireless_stats(struct net_device *dev); 15 15 int call_commit_handler(struct net_device *dev); 16 16 #else 17 - static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 17 + static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 18 18 void __user *arg) 19 19 { 20 20 return -EINVAL;
+3 -1
kernel/irq/manage.c
··· 1312 1312 ret = __irq_set_trigger(desc, 1313 1313 new->flags & IRQF_TRIGGER_MASK); 1314 1314 1315 - if (ret) 1315 + if (ret) { 1316 + irq_release_resources(desc); 1316 1317 goto out_mask; 1318 + } 1317 1319 } 1318 1320 1319 1321 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
+6 -2
kernel/livepatch/patch.c
··· 59 59 60 60 ops = container_of(fops, struct klp_ops, fops); 61 61 62 - rcu_read_lock(); 62 + /* 63 + * A variant of synchronize_sched() is used to allow patching functions 64 + * where RCU is not watching, see klp_synchronize_transition(). 65 + */ 66 + preempt_disable_notrace(); 63 67 64 68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 65 69 stack_node); ··· 119 115 120 116 klp_arch_set_pc(regs, (unsigned long)func->new_func); 121 117 unlock: 122 - rcu_read_unlock(); 118 + preempt_enable_notrace(); 123 119 } 124 120 125 121 /*
+31 -5
kernel/livepatch/transition.c
··· 49 49 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 50 50 51 51 /* 52 + * This function is just a stub to implement a hard force 53 + * of synchronize_sched(). This requires synchronizing 54 + * tasks even in userspace and idle. 55 + */ 56 + static void klp_sync(struct work_struct *work) 57 + { 58 + } 59 + 60 + /* 61 + * We allow to patch also functions where RCU is not watching, 62 + * e.g. before user_exit(). We can not rely on the RCU infrastructure 63 + * to do the synchronization. Instead hard force the sched synchronization. 64 + * 65 + * This approach allows to use RCU functions for manipulating func_stack 66 + * safely. 67 + */ 68 + static void klp_synchronize_transition(void) 69 + { 70 + schedule_on_each_cpu(klp_sync); 71 + } 72 + 73 + /* 52 74 * The transition to the target patch state is complete. Clean up the data 53 75 * structures. 54 76 */ ··· 95 73 * func->transition gets cleared, the handler may choose a 96 74 * removed function. 97 75 */ 98 - synchronize_rcu(); 76 + klp_synchronize_transition(); 99 77 } 100 78 101 79 if (klp_transition_patch->immediate) ··· 114 92 115 93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 116 94 if (klp_target_state == KLP_PATCHED) 117 - synchronize_rcu(); 95 + klp_synchronize_transition(); 118 96 119 97 read_lock(&tasklist_lock); 120 98 for_each_process_thread(g, task) { ··· 158 136 */ 159 137 void klp_update_patch_state(struct task_struct *task) 160 138 { 161 - rcu_read_lock(); 139 + /* 140 + * A variant of synchronize_sched() is used to allow patching functions 141 + * where RCU is not watching, see klp_synchronize_transition(). 142 + */ 143 + preempt_disable_notrace(); 162 144 163 145 /* 164 146 * This test_and_clear_tsk_thread_flag() call also serves as a read ··· 179 153 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 180 154 task->patch_state = READ_ONCE(klp_target_state); 181 155 182 - rcu_read_unlock(); 156 + preempt_enable_notrace(); 183 157 } 184 158 185 159 /* ··· 565 539 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 566 540 567 541 /* Let any remaining calls to klp_update_patch_state() complete */ 568 - synchronize_rcu(); 542 + klp_synchronize_transition(); 569 543 570 544 klp_start_transition(); 571 545 }
+1 -1
kernel/sched/core.c
··· 5605 5605 BUG_ON(cpu_online(smp_processor_id())); 5606 5606 5607 5607 if (mm != &init_mm) { 5608 - switch_mm_irqs_off(mm, &init_mm, current); 5608 + switch_mm(mm, &init_mm, current); 5609 5609 finish_arch_post_lock_switch(); 5610 5610 } 5611 5611 mmdrop(mm);
+1 -1
kernel/sched/fair.c
··· 3563 3563 trace_sched_stat_runtime_enabled()) { 3564 3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3565 3565 "stat_blocked and stat_runtime require the " 3566 - "kernel parameter schedstats=enabled or " 3566 + "kernel parameter schedstats=enable or " 3567 3567 "kernel.sched_schedstats=1\n"); 3568 3568 } 3569 3569 #endif
+11 -3
kernel/time/alarmtimer.c
··· 387 387 { 388 388 struct alarm_base *base = &alarm_bases[alarm->type]; 389 389 390 - start = ktime_add(start, base->gettime()); 390 + start = ktime_add_safe(start, base->gettime()); 391 391 alarm_start(alarm, start); 392 392 } 393 393 EXPORT_SYMBOL_GPL(alarm_start_relative); ··· 475 475 overrun++; 476 476 } 477 477 478 - alarm->node.expires = ktime_add(alarm->node.expires, interval); 478 + alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); 479 479 return overrun; 480 480 } 481 481 EXPORT_SYMBOL_GPL(alarm_forward); ··· 660 660 661 661 /* start the timer */ 662 662 timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); 663 + 664 + /* 665 + * Rate limit to the tick as a hot fix to prevent DOS. Will be 666 + * mopped up later. 667 + */ 668 + if (timr->it.alarm.interval < TICK_NSEC) 669 + timr->it.alarm.interval = TICK_NSEC; 670 + 663 671 exp = timespec64_to_ktime(new_setting->it_value); 664 672 /* Convert (if necessary) to absolute time */ 665 673 if (flags != TIMER_ABSTIME) { 666 674 ktime_t now; 667 675 668 676 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); 669 - exp = ktime_add(now, exp); 677 + exp = ktime_add_safe(now, exp); 670 678 } 671 679 672 680 alarm_start(&timr->it.alarm.alarmtimer, exp);
+3 -1
kernel/time/tick-broadcast.c
··· 37 37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); 38 38 39 39 #ifdef CONFIG_TICK_ONESHOT 40 + static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); 40 41 static void tick_broadcast_clear_oneshot(int cpu); 41 42 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); 42 43 #else 44 + static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } 43 45 static inline void tick_broadcast_clear_oneshot(int cpu) { } 44 46 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } 45 47 #endif ··· 869 867 /** 870 868 * tick_broadcast_setup_oneshot - setup the broadcast device 871 869 */ 872 - void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 870 + static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 873 871 { 874 872 int cpu = smp_processor_id(); 875 873
-2
kernel/time/tick-internal.h
··· 126 126 127 127 /* Functions related to oneshot broadcasting */ 128 128 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 129 - extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); 130 129 extern void tick_broadcast_switch_to_oneshot(void); 131 130 extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); 132 131 extern int tick_broadcast_oneshot_active(void); ··· 133 134 bool tick_broadcast_oneshot_available(void); 134 135 extern struct cpumask *tick_get_broadcast_oneshot_mask(void); 135 136 #else /* !(BROADCAST && ONESHOT): */ 136 - static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } 137 137 static inline void tick_broadcast_switch_to_oneshot(void) { } 138 138 static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } 139 139 static inline int tick_broadcast_oneshot_active(void) { return 0; }
-5
mm/gup.c
··· 387 387 /* mlock all present pages, but do not fault in new pages */ 388 388 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 389 389 return -ENOENT; 390 - /* For mm_populate(), just skip the stack guard page. */ 391 - if ((*flags & FOLL_POPULATE) && 392 - (stack_guard_page_start(vma, address) || 393 - stack_guard_page_end(vma, address + PAGE_SIZE))) 394 - return -ENOENT; 395 390 if (*flags & FOLL_WRITE) 396 391 fault_flags |= FAULT_FLAG_WRITE; 397 392 if (*flags & FOLL_REMOTE)
+7 -1
mm/huge_memory.c
··· 1426 1426 */ 1427 1427 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 1428 1428 page = pmd_page(*vmf->pmd); 1429 + if (!get_page_unless_zero(page)) 1430 + goto out_unlock; 1429 1431 spin_unlock(vmf->ptl); 1430 1432 wait_on_page_locked(page); 1433 + put_page(page); 1431 1434 goto out; 1432 1435 } 1433 1436 ··· 1462 1459 1463 1460 /* Migration could have started since the pmd_trans_migrating check */ 1464 1461 if (!page_locked) { 1462 + page_nid = -1; 1463 + if (!get_page_unless_zero(page)) 1464 + goto out_unlock; 1465 1465 spin_unlock(vmf->ptl); 1466 1466 wait_on_page_locked(page); 1467 - page_nid = -1; 1467 + put_page(page); 1468 1468 goto out; 1469 1469 } 1470 1470
+4 -1
mm/memory-failure.c
··· 1184 1184 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 1185 1185 * correctly, we save a copy of the page flags at this time. 1186 1186 */ 1187 - page_flags = p->flags; 1187 + if (PageHuge(p)) 1188 + page_flags = hpage->flags; 1189 + else 1190 + page_flags = p->flags; 1188 1191 1189 1192 /* 1190 1193 * unpoison always clear PG_hwpoison inside page lock
-38
mm/memory.c
··· 2855 2855 } 2856 2856 2857 2857 /* 2858 - * This is like a special single-page "expand_{down|up}wards()", 2859 - * except we must first make sure that 'address{-|+}PAGE_SIZE' 2860 - * doesn't hit another vma. 2861 - */ 2862 - static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) 2863 - { 2864 - address &= PAGE_MASK; 2865 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { 2866 - struct vm_area_struct *prev = vma->vm_prev; 2867 - 2868 - /* 2869 - * Is there a mapping abutting this one below? 2870 - * 2871 - * That's only ok if it's the same stack mapping 2872 - * that has gotten split.. 2873 - */ 2874 - if (prev && prev->vm_end == address) 2875 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; 2876 - 2877 - return expand_downwards(vma, address - PAGE_SIZE); 2878 - } 2879 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { 2880 - struct vm_area_struct *next = vma->vm_next; 2881 - 2882 - /* As VM_GROWSDOWN but s/below/above/ */ 2883 - if (next && next->vm_start == address + PAGE_SIZE) 2884 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; 2885 - 2886 - return expand_upwards(vma, address + PAGE_SIZE); 2887 - } 2888 - return 0; 2889 - } 2890 - 2891 - /* 2892 2858 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2893 2859 * but allow concurrent faults), and pte mapped but not yet locked. 2894 2860 * We return with mmap_sem still held, but pte unmapped and unlocked. ··· 2869 2903 /* File mapping without ->vm_ops ? */ 2870 2904 if (vma->vm_flags & VM_SHARED) 2871 2905 return VM_FAULT_SIGBUS; 2872 - 2873 - /* Check if we need to add a guard page to the stack */ 2874 - if (check_stack_guard_page(vma, vmf->address) < 0) 2875 - return VM_FAULT_SIGSEGV; 2876 2906 2877 2907 /* 2878 2908 * Use pte_alloc() instead of pte_alloc_map(). We can't run
+97 -63
mm/mmap.c
··· 183 183 unsigned long retval; 184 184 unsigned long newbrk, oldbrk; 185 185 struct mm_struct *mm = current->mm; 186 + struct vm_area_struct *next; 186 187 unsigned long min_brk; 187 188 bool populate; 188 189 LIST_HEAD(uf); ··· 230 229 } 231 230 232 231 /* Check against existing mmap mappings. */ 233 - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 232 + next = find_vma(mm, oldbrk); 233 + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 234 234 goto out; 235 235 236 236 /* Ok, looks good - let it rip. */ ··· 255 253 256 254 static long vma_compute_subtree_gap(struct vm_area_struct *vma) 257 255 { 258 - unsigned long max, subtree_gap; 259 - max = vma->vm_start; 260 - if (vma->vm_prev) 261 - max -= vma->vm_prev->vm_end; 256 + unsigned long max, prev_end, subtree_gap; 257 + 258 + /* 259 + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we 260 + * allow two stack_guard_gaps between them here, and when choosing 261 + * an unmapped area; whereas when expanding we only require one. 262 + * That's a little inconsistent, but keeps the code here simpler. 263 + */ 264 + max = vm_start_gap(vma); 265 + if (vma->vm_prev) { 266 + prev_end = vm_end_gap(vma->vm_prev); 267 + if (max > prev_end) 268 + max -= prev_end; 269 + else 270 + max = 0; 271 + } 262 272 if (vma->vm_rb.rb_left) { 263 273 subtree_gap = rb_entry(vma->vm_rb.rb_left, 264 274 struct vm_area_struct, vm_rb)->rb_subtree_gap; ··· 366 352 anon_vma_unlock_read(anon_vma); 367 353 } 368 354 369 - highest_address = vma->vm_end; 355 + highest_address = vm_end_gap(vma); 370 356 vma = vma->vm_next; 371 357 i++; 372 358 } ··· 555 541 if (vma->vm_next) 556 542 vma_gap_update(vma->vm_next); 557 543 else 558 - mm->highest_vm_end = vma->vm_end; 544 + mm->highest_vm_end = vm_end_gap(vma); 559 545 560 546 /* 561 547 * vma->vm_prev wasn't known when we followed the rbtree to find the ··· 870 856 vma_gap_update(vma); 871 857 if (end_changed) { 872 858 if (!next) 873 - mm->highest_vm_end = end; 859 + mm->highest_vm_end = vm_end_gap(vma); 874 860 else if (!adjust_next) 875 861 vma_gap_update(next); 876 862 } ··· 955 941 * mm->highest_vm_end doesn't need any update 956 942 * in remove_next == 1 case. 957 943 */ 958 - VM_WARN_ON(mm->highest_vm_end != end); 944 + VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); 959 945 } 960 946 } 961 947 if (insert && file) ··· 1801 1787 1802 1788 while (true) { 1803 1789 /* Visit left subtree if it looks promising */ 1804 - gap_end = vma->vm_start; 1790 + gap_end = vm_start_gap(vma); 1805 1791 if (gap_end >= low_limit && vma->vm_rb.rb_left) { 1806 1792 struct vm_area_struct *left = 1807 1793 rb_entry(vma->vm_rb.rb_left, ··· 1812 1798 } 1813 1799 } 1814 1800 1815 - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1801 + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 1816 1802 check_current: 1817 1803 /* Check if current node has a suitable gap */ 1818 1804 if (gap_start > high_limit) 1819 1805 return -ENOMEM; 1820 - if (gap_end >= low_limit && gap_end - gap_start >= length) 1806 + if (gap_end >= low_limit && 1807 + gap_end > gap_start && gap_end - gap_start >= length) 1821 1808 goto found; 1822 1809 1823 1810 /* Visit right subtree if it looks promising */ ··· 1840 1825 vma = rb_entry(rb_parent(prev), 1841 1826 struct vm_area_struct, vm_rb); 1842 1827 if (prev == vma->vm_rb.rb_left) { 1843 - gap_start = vma->vm_prev->vm_end; 1844 - gap_end = vma->vm_start; 1828 + gap_start = vm_end_gap(vma->vm_prev); 1829 + gap_end = vm_start_gap(vma); 1845 1830 goto check_current; 1846 1831 } 1847 1832 } ··· 1905 1890 1906 1891 while (true) { 1907 1892 /* Visit right subtree if it looks promising */ 1908 - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1893 + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 1909 1894 if (gap_start <= high_limit && vma->vm_rb.rb_right) { 1910 1895 struct vm_area_struct *right = 1911 1896 rb_entry(vma->vm_rb.rb_right, ··· 1918 1903 1919 1904 check_current: 1920 1905 /* Check if current node has a suitable gap */ 1921 - gap_end = vma->vm_start; 1906 + gap_end = vm_start_gap(vma); 1922 1907 if (gap_end < low_limit) 1923 1908 return -ENOMEM; 1924 - if (gap_start <= high_limit && gap_end - gap_start >= length) 1909 + if (gap_start <= high_limit && 1910 + gap_end > gap_start && gap_end - gap_start >= length) 1925 1911 goto found; 1926 1912 1927 1913 /* Visit left subtree if it looks promising */ ··· 1945 1929 struct vm_area_struct, vm_rb); 1946 1930 if (prev == vma->vm_rb.rb_right) { 1947 1931 gap_start = vma->vm_prev ? 1948 - vma->vm_prev->vm_end : 0; 1932 + vm_end_gap(vma->vm_prev) : 0; 1949 1933 goto check_current; 1950 1934 } 1951 1935 } ··· 1983 1967 unsigned long len, unsigned long pgoff, unsigned long flags) 1984 1968 { 1985 1969 struct mm_struct *mm = current->mm; 1986 - struct vm_area_struct *vma; 1970 + struct vm_area_struct *vma, *prev; 1987 1971 struct vm_unmapped_area_info info; 1988 1972 1989 1973 if (len > TASK_SIZE - mmap_min_addr) ··· 1994 1978 1995 1979 if (addr) { 1996 1980 addr = PAGE_ALIGN(addr); 1997 - vma = find_vma(mm, addr); 1981 + vma = find_vma_prev(mm, addr, &prev); 1998 1982 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 1999 - (!vma || addr + len <= vma->vm_start)) 1983 + (!vma || addr + len <= vm_start_gap(vma)) && 1984 + (!prev || addr >= vm_end_gap(prev))) 2000 1985 return addr; 2001 1986 } 2002 1987 ··· 2020 2003 const unsigned long len, const unsigned long pgoff, 2021 2004 const unsigned long flags) 2022 2005 { 2023 - struct vm_area_struct *vma; 2006 + struct vm_area_struct *vma, *prev; 2024 2007 struct mm_struct *mm = current->mm; 2025 2008 unsigned long addr = addr0; 2026 2009 struct vm_unmapped_area_info info; ··· 2035 2018 /* requesting a specific address */ 2036 2019 if (addr) { 2037 2020 addr = PAGE_ALIGN(addr); 2038 - vma = find_vma(mm, addr); 2021 + vma = find_vma_prev(mm, addr, &prev); 2039 2022 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 2040 - (!vma || addr + len <= vma->vm_start)) 2023 + (!vma || addr + len <= vm_start_gap(vma)) && 2024 + (!prev || addr >= vm_end_gap(prev))) 2041 2025 return addr; 2042 2026 } 2043 2027 ··· 2173 2155 * update accounting. This is shared with both the 2174 2156 * grow-up and grow-down cases. 2175 2157 */ 2176 - static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) 2158 + static int acct_stack_growth(struct vm_area_struct *vma, 2159 + unsigned long size, unsigned long grow) 2177 2160 { 2178 2161 struct mm_struct *mm = vma->vm_mm; 2179 2162 struct rlimit *rlim = current->signal->rlim; 2180 - unsigned long new_start, actual_size; 2163 + unsigned long new_start; 2181 2164 2182 2165 /* address space limit tests */ 2183 2166 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2184 2167 return -ENOMEM; 2185 2168 2186 2169 /* Stack limit test */ 2187 - actual_size = size; 2188 - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) 2189 - actual_size -= PAGE_SIZE; 2190 - if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2170 + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2191 2171 return -ENOMEM; 2192 2172 2193 2173 /* mlock limit tests */ ··· 2223 2207 int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2224 2208 { 2225 2209 struct mm_struct *mm = vma->vm_mm; 2210 + struct vm_area_struct *next; 2211 + unsigned long gap_addr; 2226 2212 int error = 0; 2227 2213 2228 2214 if (!(vma->vm_flags & VM_GROWSUP)) 2229 2215 return -EFAULT; 2230 2216 2231 - /* Guard against wrapping around to address 0. */ 2232 - if (address < PAGE_ALIGN(address+4)) 2233 - address = PAGE_ALIGN(address+4); 2234 - else 2217 + /* Guard against exceeding limits of the address space. */ 2218 + address &= PAGE_MASK; 2219 + if (address >= TASK_SIZE) 2235 2220 return -ENOMEM; 2221 + address += PAGE_SIZE; 2222 + 2223 + /* Enforce stack_guard_gap */ 2224 + gap_addr = address + stack_guard_gap; 2225 + 2226 + /* Guard against overflow */ 2227 + if (gap_addr < address || gap_addr > TASK_SIZE) 2228 + gap_addr = TASK_SIZE; 2229 + 2230 + next = vma->vm_next; 2231 + if (next && next->vm_start < gap_addr) { 2232 + if (!(next->vm_flags & VM_GROWSUP)) 2233 + return -ENOMEM; 2234 + /* Check that both stack segments have the same anon_vma? */ 2235 + } 2236 2236 2237 2237 /* We must make sure the anon_vma is allocated. */ 2238 2238 if (unlikely(anon_vma_prepare(vma))) ··· 2293 2261 if (vma->vm_next) 2294 2262 vma_gap_update(vma->vm_next); 2295 2263 else 2296 - mm->highest_vm_end = address; 2264 + mm->highest_vm_end = vm_end_gap(vma); 2297 2265 spin_unlock(&mm->page_table_lock); 2298 2266 2299 2267 perf_event_mmap(vma); ··· 2314 2282 unsigned long address) 2315 2283 { 2316 2284 struct mm_struct *mm = vma->vm_mm; 2285 + struct vm_area_struct *prev; 2286 + unsigned long gap_addr; 2317 2287 int error; 2318 2288 2319 2289 address &= PAGE_MASK; 2320 2290 error = security_mmap_addr(address); 2321 2291 if (error) 2322 2292 return error; 2293 + 2294 + /* Enforce stack_guard_gap */ 2295 + gap_addr = address - stack_guard_gap; 2296 + if (gap_addr > address) 2297 + return -ENOMEM; 2298 + prev = vma->vm_prev; 2299 + if (prev && prev->vm_end > gap_addr) { 2300 + if (!(prev->vm_flags & VM_GROWSDOWN)) 2301 + return -ENOMEM; 2302 + /* Check that both stack segments have the same anon_vma? */ 2303 + } 2323 2304 2324 2305 /* We must make sure the anon_vma is allocated. */ 2325 2306 if (unlikely(anon_vma_prepare(vma))) ··· 2388 2343 return error; 2389 2344 } 2390 2345 2391 - /* 2392 - * Note how expand_stack() refuses to expand the stack all the way to 2393 - * abut the next virtual mapping, *unless* that mapping itself is also 2394 - * a stack mapping. We want to leave room for a guard page, after all 2395 - * (the guard page itself is not added here, that is done by the 2396 - * actual page faulting logic) 2397 - * 2398 - * This matches the behavior of the guard page logic (see mm/memory.c: 2399 - * check_stack_guard_page()), which only allows the guard page to be 2400 - * removed under these circumstances. 2401 - */ 2346 + /* enforced gap between the expanding stack and other mappings. */ 2347 + unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 2348 + 2349 + static int __init cmdline_parse_stack_guard_gap(char *p) 2350 + { 2351 + unsigned long val; 2352 + char *endptr; 2353 + 2354 + val = simple_strtoul(p, &endptr, 10); 2355 + if (!*endptr) 2356 + stack_guard_gap = val << PAGE_SHIFT; 2357 + 2358 + return 0; 2359 + } 2360 + __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 2361 + 2402 2362 #ifdef CONFIG_STACK_GROWSUP 2403 2363 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2404 2364 { 2405 - struct vm_area_struct *next; 2406 - 2407 - address &= PAGE_MASK; 2408 - next = vma->vm_next; 2409 - if (next && next->vm_start == address + PAGE_SIZE) { 2410 - if (!(next->vm_flags & VM_GROWSUP)) 2411 - return -ENOMEM; 2412 - } 2413 2365 return expand_upwards(vma, address); 2414 2366 } 2415 2367 ··· 2428 2386 #else 2429 2387 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2430 2388 { 2431 - struct vm_area_struct *prev; 2432 - 2433 - address &= PAGE_MASK; 2434 - prev = vma->vm_prev; 2435 - if (prev && prev->vm_end == address) { 2436 - if (!(prev->vm_flags & VM_GROWSDOWN)) 2437 - return -ENOMEM; 2438 - } 2439 2389 return expand_downwards(vma, address); 2440 2390 } 2441 2391 ··· 2525 2491 vma->vm_prev = prev; 2526 2492 vma_gap_update(vma); 2527 2493 } else 2528 - mm->highest_vm_end = prev ? prev->vm_end : 0; 2494 + mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; 2529 2495 tail_vma->vm_next = NULL; 2530 2496 2531 2497 /* Kill the cache */
+3
mm/swap_cgroup.c
··· 48 48 if (!page) 49 49 goto not_enough_page; 50 50 ctrl->map[idx] = page; 51 + 52 + if (!(idx % SWAP_CLUSTER_MAX)) 53 + cond_resched(); 51 54 } 52 55 return 0; 53 56 not_enough_page:
+3 -3
mm/vmpressure.c
··· 115 115 unsigned long pressure = 0; 116 116 117 117 /* 118 - * reclaimed can be greater than scanned in cases 119 - * like THP, where the scanned is 1 and reclaimed 120 - * could be 512 118 + * reclaimed can be greater than scanned for things such as reclaimed 119 + * slab pages. shrink_node() just adds reclaimed pages without a 120 + * related increment to scanned pages. 121 121 */ 122 122 if (reclaimed >= scanned) 123 123 goto out;
+2 -1
net/8021q/vlan.c
··· 277 277 return 0; 278 278 279 279 out_free_newdev: 280 - free_netdev(new_dev); 280 + if (new_dev->reg_state == NETREG_UNINITIALIZED) 281 + free_netdev(new_dev); 281 282 return err; 282 283 } 283 284
-2
net/core/dev.c
··· 5283 5283 if (rc == BUSY_POLL_BUDGET) 5284 5284 __napi_schedule(napi); 5285 5285 local_bh_enable(); 5286 - if (local_softirq_pending()) 5287 - do_softirq(); 5288 5286 } 5289 5287 5290 5288 void napi_busy_loop(unsigned int napi_id,
+16 -3
net/core/dev_ioctl.c
··· 411 411 if (cmd == SIOCGIFNAME) 412 412 return dev_ifname(net, (struct ifreq __user *)arg); 413 413 414 + /* 415 + * Take care of Wireless Extensions. Unfortunately struct iwreq 416 + * isn't a proper subset of struct ifreq (it's 8 byte shorter) 417 + * so we need to treat it specially, otherwise applications may 418 + * fault if the struct they're passing happens to land at the 419 + * end of a mapped page. 420 + */ 421 + if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { 422 + struct iwreq iwr; 423 + 424 + if (copy_from_user(&iwr, arg, sizeof(iwr))) 425 + return -EFAULT; 426 + 427 + return wext_handle_ioctl(net, &iwr, cmd, arg); 428 + } 429 + 414 430 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 415 431 return -EFAULT; 416 432 ··· 576 560 ret = -EFAULT; 577 561 return ret; 578 562 } 579 - /* Take care of Wireless Extensions */ 580 - if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 581 - return wext_handle_ioctl(net, &ifr, cmd, arg); 582 563 return -ENOTTY; 583 564 } 584 565 }
+14 -7
net/core/fib_rules.c
··· 568 568 struct net *net = sock_net(skb->sk); 569 569 struct fib_rule_hdr *frh = nlmsg_data(nlh); 570 570 struct fib_rules_ops *ops = NULL; 571 - struct fib_rule *rule, *tmp; 571 + struct fib_rule *rule, *r; 572 572 struct nlattr *tb[FRA_MAX+1]; 573 573 struct fib_kuid_range range; 574 574 int err = -EINVAL; ··· 668 668 669 669 /* 670 670 * Check if this rule is a target to any of them. If so, 671 + * adjust to the next one with the same preference or 671 672 * disable them. As this operation is eventually very 672 - * expensive, it is only performed if goto rules have 673 - * actually been added. 673 + * expensive, it is only performed if goto rules, except 674 + * current if it is goto rule, have actually been added. 674 675 */ 675 676 if (ops->nr_goto_rules > 0) { 676 - list_for_each_entry(tmp, &ops->rules_list, list) { 677 - if (rtnl_dereference(tmp->ctarget) == rule) { 678 - RCU_INIT_POINTER(tmp->ctarget, NULL); 677 + struct fib_rule *n; 678 + 679 + n = list_next_entry(rule, list); 680 + if (&n->list == &ops->rules_list || n->pref != rule->pref) 681 + n = NULL; 682 + list_for_each_entry(r, &ops->rules_list, list) { 683 + if (rtnl_dereference(r->ctarget) != rule) 684 + continue; 685 + rcu_assign_pointer(r->ctarget, n); 686 + if (!n) 679 687 ops->unresolved_rules++; 680 - } 681 688 } 682 689 } 683 690
+2
net/core/rtnetlink.c
··· 933 933 + nla_total_size(1) /* IFLA_LINKMODE */ 934 934 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 935 935 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 936 + + nla_total_size(4) /* IFLA_GROUP */ 936 937 + nla_total_size(ext_filter_mask 937 938 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 938 939 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ ··· 1521 1520 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1522 1521 [IFLA_XDP] = { .type = NLA_NESTED }, 1523 1522 [IFLA_EVENT] = { .type = NLA_U32 }, 1523 + [IFLA_GROUP] = { .type = NLA_U32 }, 1524 1524 }; 1525 1525 1526 1526 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+1 -1
net/decnet/dn_route.c
··· 1179 1179 if (dev_out->flags & IFF_LOOPBACK) 1180 1180 flags |= RTCF_LOCAL; 1181 1181 1182 - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); 1182 + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); 1183 1183 if (rt == NULL) 1184 1184 goto e_nobufs; 1185 1185
+1
net/ipv4/igmp.c
··· 1112 1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1113 1113 if (!pmc) 1114 1114 return; 1115 + spin_lock_init(&pmc->lock); 1115 1116 spin_lock_bh(&im->lock); 1116 1117 pmc->interface = im->interface; 1117 1118 in_dev_hold(in_dev);
+2
net/ipv4/ip_tunnel.c
··· 446 446 return 0; 447 447 448 448 drop: 449 + if (tun_dst) 450 + dst_release((struct dst_entry *)tun_dst); 449 451 kfree_skb(skb); 450 452 return 0; 451 453 }
+3 -3
net/ipv6/addrconf.c
··· 332 332 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 333 333 unsigned long delay) 334 334 { 335 - if (!delayed_work_pending(&ifp->dad_work)) 336 - in6_ifa_hold(ifp); 337 - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); 335 + in6_ifa_hold(ifp); 336 + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) 337 + in6_ifa_put(ifp); 338 338 } 339 339 340 340 static int snmp6_alloc_dev(struct inet6_dev *idev)
+6 -16
net/ipv6/fib6_rules.c
··· 32 32 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 33 33 int flags, pol_lookup_t lookup) 34 34 { 35 - struct rt6_info *rt; 36 35 struct fib_lookup_arg arg = { 37 36 .lookup_ptr = lookup, 38 37 .flags = FIB_LOOKUP_NOREF, ··· 43 44 fib_rules_lookup(net->ipv6.fib6_rules_ops, 44 45 flowi6_to_flowi(fl6), flags, &arg); 45 46 46 - rt = arg.result; 47 + if (arg.result) 48 + return arg.result; 47 49 48 - if (!rt) { 49 - dst_hold(&net->ipv6.ip6_null_entry->dst); 50 - return &net->ipv6.ip6_null_entry->dst; 51 - } 52 - 53 - if (rt->rt6i_flags & RTF_REJECT && 54 - rt->dst.error == -EAGAIN) { 55 - ip6_rt_put(rt); 56 - rt = net->ipv6.ip6_null_entry; 57 - dst_hold(&rt->dst); 58 - } 59 - 60 - return &rt->dst; 50 + dst_hold(&net->ipv6.ip6_null_entry->dst); 51 + return &net->ipv6.ip6_null_entry->dst; 61 52 } 62 53 63 54 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, ··· 110 121 flp6->saddr = saddr; 111 122 } 112 123 err = rt->dst.error; 113 - goto out; 124 + if (err != -EAGAIN) 125 + goto out; 114 126 } 115 127 again: 116 128 ip6_rt_put(rt);
+1 -2
net/ipv6/ip6_fib.c
··· 286 286 struct rt6_info *rt; 287 287 288 288 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 289 - if (rt->rt6i_flags & RTF_REJECT && 290 - rt->dst.error == -EAGAIN) { 289 + if (rt->dst.error == -EAGAIN) { 291 290 ip6_rt_put(rt); 292 291 rt = net->ipv6.ip6_null_entry; 293 292 dst_hold(&rt->dst);
+4 -2
net/ipv6/ip6_tunnel.c
··· 858 858 return 0; 859 859 860 860 drop: 861 + if (tun_dst) 862 + dst_release((struct dst_entry *)tun_dst); 861 863 kfree_skb(skb); 862 864 return 0; 863 865 } ··· 1248 1246 fl6.flowi6_proto = IPPROTO_IPIP; 1249 1247 fl6.daddr = key->u.ipv6.dst; 1250 1248 fl6.flowlabel = key->label; 1251 - dsfield = ip6_tclass(key->label); 1249 + dsfield = key->tos; 1252 1250 } else { 1253 1251 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1254 1252 encap_limit = t->parms.encap_limit; ··· 1319 1317 fl6.flowi6_proto = IPPROTO_IPV6; 1320 1318 fl6.daddr = key->u.ipv6.dst; 1321 1319 fl6.flowlabel = key->label; 1322 - dsfield = ip6_tclass(key->label); 1320 + dsfield = key->tos; 1323 1321 } else { 1324 1322 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1325 1323 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+34 -30
net/rxrpc/key.c
··· 217 217 unsigned int *_toklen) 218 218 { 219 219 const __be32 *xdr = *_xdr; 220 - unsigned int toklen = *_toklen, n_parts, loop, tmp; 220 + unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen; 221 221 222 222 /* there must be at least one name, and at least #names+1 length 223 223 * words */ ··· 247 247 toklen -= 4; 248 248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) 249 249 return -EINVAL; 250 - if (tmp > toklen) 250 + paddedlen = (tmp + 3) & ~3; 251 + if (paddedlen > toklen) 251 252 return -EINVAL; 252 253 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); 253 254 if (!princ->name_parts[loop]) 254 255 return -ENOMEM; 255 256 memcpy(princ->name_parts[loop], xdr, tmp); 256 257 princ->name_parts[loop][tmp] = 0; 257 - tmp = (tmp + 3) & ~3; 258 - toklen -= tmp; 259 - xdr += tmp >> 2; 258 + toklen -= paddedlen; 259 + xdr += paddedlen >> 2; 260 260 } 261 261 262 262 if (toklen < 4) ··· 265 265 toklen -= 4; 266 266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) 267 267 return -EINVAL; 268 - if (tmp > toklen) 268 + paddedlen = (tmp + 3) & ~3; 269 + if (paddedlen > toklen) 269 270 return -EINVAL; 270 271 princ->realm = kmalloc(tmp + 1, GFP_KERNEL); 271 272 if (!princ->realm) 272 273 return -ENOMEM; 273 274 memcpy(princ->realm, xdr, tmp); 274 275 princ->realm[tmp] = 0; 275 - tmp = (tmp + 3) & ~3; 276 - toklen -= tmp; 277 - xdr += tmp >> 2; 276 + toklen -= paddedlen; 277 + xdr += paddedlen >> 2; 278 278 279 279 _debug("%s/...@%s", princ->name_parts[0], princ->realm); 280 280 ··· 293 293 unsigned int *_toklen) 294 294 { 295 295 const __be32 *xdr = *_xdr; 296 - unsigned int toklen = *_toklen, len; 296 + unsigned int toklen = *_toklen, len, paddedlen; 297 297 298 298 /* there must be at least one tag and one length word */ 299 299 if (toklen <= 8) ··· 307 307 toklen -= 8; 308 308 if (len > max_data_size) 309 309 return -EINVAL; 310 + paddedlen = (len + 3) & ~3; 311 + if (paddedlen > toklen) 312 + return -EINVAL; 310 313 td->data_len = len; 311 314 312 315 if (len > 0) { 313 316 td->data = kmemdup(xdr, len, GFP_KERNEL); 314 317 if (!td->data) 315 318 return -ENOMEM; 316 - len = (len + 3) & ~3; 317 - toklen -= len; 318 - xdr += len >> 2; 319 + toklen -= paddedlen; 320 + xdr += paddedlen >> 2; 319 321 } 320 322 321 323 _debug("tag %x len %x", td->tag, td->data_len); ··· 389 387 const __be32 **_xdr, unsigned int *_toklen) 390 388 { 391 389 const __be32 *xdr = *_xdr; 392 - unsigned int toklen = *_toklen, len; 390 + unsigned int toklen = *_toklen, len, paddedlen; 393 391 394 392 /* there must be at least one length word */ 395 393 if (toklen <= 4) ··· 401 399 toklen -= 4; 402 400 if (len > AFSTOKEN_K5_TIX_MAX) 403 401 return -EINVAL; 402 + paddedlen = (len + 3) & ~3; 403 + if (paddedlen > toklen) 404 + return -EINVAL; 404 405 *_tktlen = len; 405 406 406 407 _debug("ticket len %u", len); ··· 412 407 *_ticket = kmemdup(xdr, len, GFP_KERNEL); 413 408 if (!*_ticket) 414 409 return -ENOMEM; 415 - len = (len + 3) & ~3; 416 - toklen -= len; 417 - xdr += len >> 2; 410 + toklen -= paddedlen; 411 + xdr += paddedlen >> 2; 418 412 } 419 413 420 414 *_xdr = xdr; ··· 556 552 { 557 553 const __be32 *xdr = prep->data, *token; 558 554 const char *cp; 559 - unsigned int len, tmp, loop, ntoken, toklen, sec_ix; 555 + unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; 560 556 size_t datalen = prep->datalen; 561 557 int ret; 562 558 ··· 582 578 if (len < 1 || len > AFSTOKEN_CELL_MAX) 583 579 goto not_xdr; 584 580 datalen -= 4; 585 - tmp = (len + 3) & ~3; 586 - if (tmp > datalen) 581 + paddedlen = (len + 3) & ~3; 582 + if (paddedlen > datalen) 587 583 goto not_xdr; 588 584 589 585 cp = (const char *) xdr; 590 586 for (loop = 0; loop < len; loop++) 591 587 if (!isprint(cp[loop])) 592 588 goto not_xdr; 593 - if (len < tmp) 594 - for (; loop < tmp; loop++) 595 - if (cp[loop]) 596 - goto not_xdr; 589 + for (; loop < paddedlen; loop++) 590 + if (cp[loop]) 591 + goto not_xdr; 597 592 _debug("cellname: [%u/%u] '%*.*s'", 598 - len, tmp, len, len, (const char *) xdr); 599 - datalen -= tmp; 600 - xdr += tmp >> 2; 593 + len, paddedlen, len, len, (const char *) xdr); 594 + datalen -= paddedlen; 595 + xdr += paddedlen >> 2; 601 596 602 597 /* get the token count */ 603 598 if (datalen < 12) ··· 617 614 sec_ix = ntohl(*xdr); 618 615 datalen -= 4; 619 616 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); 620 - if (toklen < 20 || toklen > datalen) 617 + paddedlen = (toklen + 3) & ~3; 618 + if (toklen < 20 || toklen > datalen || paddedlen > datalen) 621 619 goto not_xdr; 622 - datalen -= (toklen + 3) & ~3; 623 - xdr += (toklen + 3) >> 2; 620 + datalen -= paddedlen; 621 + xdr += paddedlen >> 2; 624 622 625 623 } while (--loop > 0); 626 624
+1
net/sctp/endpointola.c
··· 275 275 if (sctp_sk(sk)->bind_hash) 276 276 sctp_put_port(sk); 277 277 278 + sctp_sk(sk)->ep = NULL; 278 279 sock_put(sk); 279 280 } 280 281
+3 -2
net/sctp/sctp_diag.c
··· 278 278 279 279 static int sctp_sock_dump(struct sock *sk, void *p) 280 280 { 281 - struct sctp_endpoint *ep = sctp_sk(sk)->ep; 282 281 struct sctp_comm_param *commp = p; 283 282 struct sk_buff *skb = commp->skb; 284 283 struct netlink_callback *cb = commp->cb; ··· 286 287 int err = 0; 287 288 288 289 lock_sock(sk); 289 - list_for_each_entry(assoc, &ep->asocs, asocs) { 290 + if (!sctp_sk(sk)->ep) 291 + goto release; 292 + list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) { 290 293 if (cb->args[4] < cb->args[1]) 291 294 goto next; 292 295
+2 -3
net/sctp/socket.c
··· 4666 4666 if (err) 4667 4667 return err; 4668 4668 4669 - sctp_transport_get_idx(net, &hti, pos); 4670 - obj = sctp_transport_get_next(net, &hti); 4671 - for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { 4669 + obj = sctp_transport_get_idx(net, &hti, pos + 1); 4670 + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) { 4672 4671 struct sctp_transport *transport = obj; 4673 4672 4674 4673 if (!sctp_transport_hold(transport))
+9 -13
net/wireless/wext-core.c
··· 914 914 * Main IOCTl dispatcher. 915 915 * Check the type of IOCTL and call the appropriate wrapper... 916 916 */ 917 - static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, 917 + static int wireless_process_ioctl(struct net *net, struct iwreq *iwr, 918 918 unsigned int cmd, 919 919 struct iw_request_info *info, 920 920 wext_ioctl_func standard, 921 921 wext_ioctl_func private) 922 922 { 923 - struct iwreq *iwr = (struct iwreq *) ifr; 924 923 struct net_device *dev; 925 924 iw_handler handler; 926 925 ··· 927 928 * The copy_to/from_user() of ifr is also dealt with in there */ 928 929 929 930 /* Make sure the device exist */ 930 - if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) 931 + if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL) 931 932 return -ENODEV; 932 933 933 934 /* A bunch of special cases, then the generic case... ··· 956 957 else if (private) 957 958 return private(dev, iwr, cmd, info, handler); 958 959 } 959 - /* Old driver API : call driver ioctl handler */ 960 - if (dev->netdev_ops->ndo_do_ioctl) 961 - return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); 962 960 return -EOPNOTSUPP; 963 961 } 964 962 ··· 973 977 } 974 978 975 979 /* entry point from dev ioctl */ 976 - static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, 980 + static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr, 977 981 unsigned int cmd, struct iw_request_info *info, 978 982 wext_ioctl_func standard, 979 983 wext_ioctl_func private) ··· 983 987 if (ret) 984 988 return ret; 985 989 986 - dev_load(net, ifr->ifr_name); 990 + dev_load(net, iwr->ifr_name); 987 991 rtnl_lock(); 988 - ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); 992 + ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private); 989 993 rtnl_unlock(); 990 994 991 995 return ret; ··· 1035 1039 } 1036 1040 1037 1041 1038 - int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1042 + int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 1039 1043 void __user *arg) 1040 1044 { 1041 1045 struct iw_request_info info = { .cmd = cmd, .flags = 0 }; 1042 1046 int ret; 1043 1047 1044 - ret = wext_ioctl_dispatch(net, ifr, cmd, &info, 1048 + ret = wext_ioctl_dispatch(net, iwr, cmd, &info, 1045 1049 ioctl_standard_call, 1046 1050 ioctl_private_call); 1047 1051 if (ret >= 0 && 1048 1052 IW_IS_GET(cmd) && 1049 - copy_to_user(arg, ifr, sizeof(struct iwreq))) 1053 + copy_to_user(arg, iwr, sizeof(struct iwreq))) 1050 1054 return -EFAULT; 1051 1055 1052 1056 return ret; ··· 1103 1107 info.cmd = cmd; 1104 1108 info.flags = IW_REQUEST_FLAG_COMPAT; 1105 1109 1106 - ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, 1110 + ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, 1107 1111 compat_standard_call, 1108 1112 compat_private_call); 1109 1113
+2 -3
security/selinux/hooks.c
··· 1106 1106 1107 1107 opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), 1108 1108 GFP_KERNEL); 1109 - if (!opts->mnt_opts_flags) { 1110 - kfree(opts->mnt_opts); 1109 + if (!opts->mnt_opts_flags) 1111 1110 goto out_err; 1112 - } 1113 1111 1114 1112 if (fscontext) { 1115 1113 opts->mnt_opts[num_mnt_opts] = fscontext; ··· 1130 1132 return 0; 1131 1133 1132 1134 out_err: 1135 + security_free_mnt_opts(opts); 1133 1136 kfree(context); 1134 1137 kfree(defcontext); 1135 1138 kfree(fscontext);
+2 -1
tools/objtool/builtin-check.c
··· 192 192 "complete_and_exit", 193 193 "kvm_spurious_fault", 194 194 "__reiserfs_panic", 195 - "lbug_with_loc" 195 + "lbug_with_loc", 196 + "fortify_panic", 196 197 }; 197 198 198 199 if (func->bind == STB_WEAK)
+19 -19
tools/perf/Makefile.config
··· 19 19 20 20 include $(srctree)/tools/scripts/Makefile.arch 21 21 22 - $(call detected_var,ARCH) 22 + $(call detected_var,SRCARCH) 23 23 24 24 NO_PERF_REGS := 1 25 25 26 26 # Additional ARCH settings for ppc 27 - ifeq ($(ARCH),powerpc) 27 + ifeq ($(SRCARCH),powerpc) 28 28 NO_PERF_REGS := 0 29 29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 30 30 endif 31 31 32 32 # Additional ARCH settings for x86 33 - ifeq ($(ARCH),x86) 33 + ifeq ($(SRCARCH),x86) 34 34 $(call detected,CONFIG_X86) 35 35 ifeq (${IS_64_BIT}, 1) 36 36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated ··· 43 43 NO_PERF_REGS := 0 44 44 endif 45 45 46 - ifeq ($(ARCH),arm) 46 + ifeq ($(SRCARCH),arm) 47 47 NO_PERF_REGS := 0 48 48 LIBUNWIND_LIBS = -lunwind -lunwind-arm 49 49 endif 50 50 51 - ifeq ($(ARCH),arm64) 51 + ifeq ($(SRCARCH),arm64) 52 52 NO_PERF_REGS := 0 53 53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 54 54 endif ··· 61 61 # Disable it on all other architectures in case libdw unwind 62 62 # support is detected in system. Add supported architectures 63 63 # to the check. 64 - ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) 64 + ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm)) 65 65 NO_LIBDW_DWARF_UNWIND := 1 66 66 endif 67 67 ··· 115 115 FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) 116 116 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf 117 117 118 - FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi 118 + FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi 119 119 # include ARCH specific config 120 - -include $(src-perf)/arch/$(ARCH)/Makefile 120 + -include $(src-perf)/arch/$(SRCARCH)/Makefile 121 121 122 122 ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 123 123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET ··· 228 228 endif 229 229 230 230 INC_FLAGS += -I$(src-perf)/util/include 231 - INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include 231 + INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include 232 232 INC_FLAGS += -I$(srctree)/tools/include/uapi 233 233 INC_FLAGS += -I$(srctree)/tools/include/ 234 - INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi 235 - INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ 236 - INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ 234 + INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi 235 + INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/ 236 + INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/ 237 237 238 238 # $(obj-perf) for generated common-cmds.h 239 239 # $(obj-perf)/util for generated bison/flex headers ··· 355 355 356 356 ifndef NO_DWARF 357 357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) 358 - msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); 358 + msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled); 359 359 NO_DWARF := 1 360 360 else 361 361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) ··· 380 380 CFLAGS += -DHAVE_BPF_PROLOGUE 381 381 $(call detected,CONFIG_BPF_PROLOGUE) 382 382 else 383 - msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); 383 + msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset()); 384 384 endif 385 385 else 386 386 msg := $(warning DWARF support is off, BPF prologue is disabled); ··· 406 406 endif 407 407 endif 408 408 409 - ifeq ($(ARCH),powerpc) 409 + ifeq ($(SRCARCH),powerpc) 410 410 ifndef NO_DWARF 411 411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX 412 412 endif ··· 487 487 endif 488 488 489 489 ifndef NO_LOCAL_LIBUNWIND 490 - ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) 490 + ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64)) 491 491 $(call feature_check,libunwind-debug-frame) 492 492 ifneq ($(feature-libunwind-debug-frame), 1) 493 493 msg := $(warning No debug_frame support found in libunwind); ··· 740 740 NO_PERF_READ_VDSO32 := 1 741 741 endif 742 742 endif 743 - ifneq ($(ARCH), x86) 743 + ifneq ($(SRCARCH), x86) 744 744 NO_PERF_READ_VDSOX32 := 1 745 745 endif 746 746 ifndef NO_PERF_READ_VDSOX32 ··· 769 769 endif 770 770 771 771 ifndef NO_AUXTRACE 772 - ifeq ($(ARCH),x86) 772 + ifeq ($(SRCARCH),x86) 773 773 ifeq ($(feature-get_cpuid), 0) 774 774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); 775 775 NO_AUXTRACE := 1 ··· 872 872 ETC_PERFCONFIG = etc/perfconfig 873 873 endif 874 874 ifndef lib 875 - ifeq ($(ARCH)$(IS_64_BIT), x861) 875 + ifeq ($(SRCARCH)$(IS_64_BIT), x861) 876 876 lib = lib64 877 877 else 878 878 lib = lib
+1 -1
tools/perf/Makefile.perf
··· 226 226 227 227 ifeq ($(config),0) 228 228 include $(srctree)/tools/scripts/Makefile.arch 229 - -include arch/$(ARCH)/Makefile 229 + -include arch/$(SRCARCH)/Makefile 230 230 endif 231 231 232 232 # The FEATURE_DUMP_EXPORT holds location of the actual
+1 -1
tools/perf/arch/Build
··· 1 1 libperf-y += common.o 2 - libperf-y += $(ARCH)/ 2 + libperf-y += $(SRCARCH)/
+2 -2
tools/perf/pmu-events/Build
··· 2 2 3 3 jevents-y += json.o jsmn.o jevents.o 4 4 pmu-events-y += pmu-events.o 5 - JDIR = pmu-events/arch/$(ARCH) 5 + JDIR = pmu-events/arch/$(SRCARCH) 6 6 JSON = $(shell [ -d $(JDIR) ] && \ 7 7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv') 8 8 # ··· 10 10 # directory and create tables in pmu-events.c. 11 11 # 12 12 $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) 13 - $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) 13 + $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
+1 -1
tools/perf/tests/Build
··· 75 75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ 76 76 $(Q)echo ';' >> $@ 77 77 78 - ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) 78 + ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) 79 79 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o 80 80 endif 81 81
+1 -1
tools/perf/tests/task-exit.c
··· 83 83 84 84 evsel = perf_evlist__first(evlist); 85 85 evsel->attr.task = 1; 86 - evsel->attr.sample_freq = 0; 86 + evsel->attr.sample_freq = 1; 87 87 evsel->attr.inherit = 0; 88 88 evsel->attr.watermark = 0; 89 89 evsel->attr.wakeup_events = 1;
+12
tools/perf/util/evsel.c
··· 273 273 struct perf_evsel *evsel; 274 274 275 275 event_attr_init(&attr); 276 + /* 277 + * Unnamed union member, not supported as struct member named 278 + * initializer in older compilers such as gcc 4.4.7 279 + * 280 + * Just for probing the precise_ip: 281 + */ 282 + attr.sample_period = 1; 276 283 277 284 perf_event_attr__set_max_precise_ip(&attr); 285 + /* 286 + * Now let the usual logic to set up the perf_event_attr defaults 287 + * to kick in when we return and before perf_evsel__open() is called. 288 + */ 289 + attr.sample_period = 0; 278 290 279 291 evsel = perf_evsel__new(&attr); 280 292 if (evsel == NULL)
+1 -1
tools/perf/util/header.c
··· 841 841 842 842 /* 843 843 * default get_cpuid(): nothing gets recorded 844 - * actual implementation must be in arch/$(ARCH)/util/header.c 844 + * actual implementation must be in arch/$(SRCARCH)/util/header.c 845 845 */ 846 846 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 847 847 {
+8
tools/perf/util/unwind-libdw.c
··· 178 178 Dwarf_Addr pc; 179 179 bool isactivation; 180 180 181 + if (!dwfl_frame_pc(state, &pc, NULL)) { 182 + pr_err("%s", dwfl_errmsg(-1)); 183 + return DWARF_CB_ABORT; 184 + } 185 + 186 + // report the module before we query for isactivation 187 + report_module(pc, ui); 188 + 181 189 if (!dwfl_frame_pc(state, &pc, &isactivation)) { 182 190 pr_err("%s", dwfl_errmsg(-1)); 183 191 return DWARF_CB_ABORT;
+1 -1
tools/testing/selftests/ntb/ntb_test.sh
··· 305 305 echo "Running remote perf test $WITH DMA" 306 306 write_file "" $REMOTE_PERF/run 307 307 echo -n " " 308 - read_file $LOCAL_PERF/run 308 + read_file $REMOTE_PERF/run 309 309 echo " Passed" 310 310 311 311 _modprobe -r ntb_perf