Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

The ungrafting from PRIO bug fixes in net, when merged into net-next,
merge cleanly but create a build failure. The resolution used here is
from Petr Machata.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1564 -882
+5 -5
Documentation/dev-tools/kcov.rst
··· 251 251 .. code-block:: c 252 252 253 253 struct kcov_remote_arg { 254 - unsigned trace_mode; 255 - unsigned area_size; 256 - unsigned num_handles; 257 - uint64_t common_handle; 258 - uint64_t handles[0]; 254 + __u32 trace_mode; 255 + __u32 area_size; 256 + __u32 num_handles; 257 + __aligned_u64 common_handle; 258 + __aligned_u64 handles[0]; 259 259 }; 260 260 261 261 #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+2 -2
Documentation/devicetree/bindings/spi/spi-controller.yaml
··· 111 111 spi-rx-bus-width: 112 112 allOf: 113 113 - $ref: /schemas/types.yaml#/definitions/uint32 114 - - enum: [ 1, 2, 4 ] 114 + - enum: [ 1, 2, 4, 8 ] 115 115 - default: 1 116 116 description: 117 117 Bus width to the SPI bus used for MISO. ··· 123 123 spi-tx-bus-width: 124 124 allOf: 125 125 - $ref: /schemas/types.yaml#/definitions/uint32 126 - - enum: [ 1, 2, 4 ] 126 + - enum: [ 1, 2, 4, 8 ] 127 127 - default: 1 128 128 description: 129 129 Bus width to the SPI bus used for MOSI.
+1 -1
Documentation/features/debug/gcov-profile-all/arch-support.txt
··· 23 23 | openrisc: | TODO | 24 24 | parisc: | TODO | 25 25 | powerpc: | ok | 26 - | riscv: | TODO | 26 + | riscv: | ok | 27 27 | s390: | ok | 28 28 | sh: | ok | 29 29 | sparc: | TODO |
+1 -1
Documentation/networking/ip-sysctl.txt
··· 607 607 with the current initial RTO of 1second. With this the final timeout 608 608 for a passive TCP connection will happen after 63seconds. 609 609 610 - tcp_syncookies - BOOLEAN 610 + tcp_syncookies - INTEGER 611 611 Only valid when the kernel was compiled with CONFIG_SYN_COOKIES 612 612 Send out syncookies when the syn backlog queue of a socket 613 613 overflows. This is to prevent against the common 'SYN flood attack'
+2 -2
Documentation/networking/netdev-FAQ.rst
··· 34 34 mainline tree from Linus, and ``net-next`` is where the new code goes 35 35 for the future release. You can find the trees here: 36 36 37 - - https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 38 - - https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 37 + - https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git 38 + - https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git 39 39 40 40 Q: How often do changes from these trees make it to the mainline Linus tree? 41 41 ----------------------------------------------------------------------------
+1
Documentation/process/index.rst
··· 60 60 volatile-considered-harmful 61 61 botching-up-ioctls 62 62 clang-format 63 + ../riscv/patch-acceptance 63 64 64 65 .. only:: subproject and html 65 66
+1
Documentation/riscv/index.rst
··· 7 7 8 8 boot-image-header 9 9 pmu 10 + patch-acceptance 10 11 11 12 .. only:: subproject and html 12 13
+35
Documentation/riscv/patch-acceptance.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + 3 + arch/riscv maintenance guidelines for developers 4 + ================================================ 5 + 6 + Overview 7 + -------- 8 + The RISC-V instruction set architecture is developed in the open: 9 + in-progress drafts are available for all to review and to experiment 10 + with implementations. New module or extension drafts can change 11 + during the development process - sometimes in ways that are 12 + incompatible with previous drafts. This flexibility can present a 13 + challenge for RISC-V Linux maintenance. Linux maintainers disapprove 14 + of churn, and the Linux development process prefers well-reviewed and 15 + tested code over experimental code. We wish to extend these same 16 + principles to the RISC-V-related code that will be accepted for 17 + inclusion in the kernel. 18 + 19 + Submit Checklist Addendum 20 + ------------------------- 21 + We'll only accept patches for new modules or extensions if the 22 + specifications for those modules or extensions are listed as being 23 + "Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of 24 + course, maintain their own Linux kernel trees that contain code for 25 + any draft extensions that they wish.) 26 + 27 + Additionally, the RISC-V specification allows implementors to create 28 + their own custom extensions. These custom extensions aren't required 29 + to go through any review or ratification process by the RISC-V 30 + Foundation. To avoid the maintenance complexity and potential 31 + performance impact of adding kernel code for implementor-specific 32 + RISC-V extensions, we'll only to accept patches for extensions that 33 + have been officially frozen or ratified by the RISC-V Foundation. 34 + (Implementors, may, of course, maintain their own Linux kernel trees 35 + containing code for any custom extensions that they wish.)
+6 -8
MAINTAINERS
··· 11460 11460 L: netdev@vger.kernel.org 11461 11461 W: http://www.linuxfoundation.org/en/Net 11462 11462 Q: http://patchwork.ozlabs.org/project/netdev/list/ 11463 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 11464 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 11463 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git 11464 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git 11465 11465 S: Odd Fixes 11466 11466 F: Documentation/devicetree/bindings/net/ 11467 11467 F: drivers/net/ ··· 11502 11502 L: netdev@vger.kernel.org 11503 11503 W: http://www.linuxfoundation.org/en/Net 11504 11504 Q: http://patchwork.ozlabs.org/project/netdev/list/ 11505 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 11506 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 11505 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git 11506 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git 11507 11507 B: mailto:netdev@vger.kernel.org 11508 11508 S: Maintained 11509 11509 F: net/ ··· 11548 11548 M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 11549 11549 M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> 11550 11550 L: netdev@vger.kernel.org 11551 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 11551 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git 11552 11552 S: Maintained 11553 11553 F: net/ipv4/ 11554 11554 F: net/ipv6/ ··· 13686 13686 13687 13687 QUALCOMM ETHQOS ETHERNET DRIVER 13688 13688 M: Vinod Koul <vkoul@kernel.org> 13689 - M: Niklas Cassel <niklas.cassel@linaro.org> 13690 13689 L: netdev@vger.kernel.org 13691 13690 S: Maintained 13692 13691 F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c ··· 14127 14128 M: Palmer Dabbelt <palmer@dabbelt.com> 14128 14129 M: Albert Ou <aou@eecs.berkeley.edu> 14129 14130 L: linux-riscv@lists.infradead.org 14131 + P: Documentation/riscv/patch-acceptance.rst 14130 14132 T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git 14131 14133 S: Supported 14132 14134 F: arch/riscv/ ··· 14555 14555 14556 14556 SAMSUNG SXGBE DRIVERS 14557 14557 M: Byungho An <bh74.an@samsung.com> 14558 - M: Girish K S <ks.giri@samsung.com> 14559 - M: Vipul Pandya <vipul.pandya@samsung.com> 14560 14558 S: Supported 14561 14559 L: netdev@vger.kernel.org 14562 14560 F: drivers/net/ethernet/samsung/sxgbe/
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 5 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Kleptomaniac Octopus 7 7 8 8 # *DOCUMENTATION*
+4 -4
arch/arc/include/asm/entry-arcv2.h
··· 162 162 #endif 163 163 164 164 #ifdef CONFIG_ARC_HAS_ACCL_REGS 165 - ST2 r58, r59, PT_sp + 12 165 + ST2 r58, r59, PT_r58 166 166 #endif 167 167 168 168 .endm ··· 172 172 173 173 LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) 174 174 175 - ld r12, [sp, PT_sp + 4] 176 - ld r30, [sp, PT_sp + 8] 175 + ld r12, [sp, PT_r12] 176 + ld r30, [sp, PT_r30] 177 177 178 178 ; Restore SP (into AUX_USER_SP) only if returning to U mode 179 179 ; - for K mode, it will be implicitly restored as stack is unwound ··· 190 190 #endif 191 191 192 192 #ifdef CONFIG_ARC_HAS_ACCL_REGS 193 - LD2 r58, r59, PT_sp + 12 193 + LD2 r58, r59, PT_r58 194 194 #endif 195 195 .endm 196 196
-1
arch/arc/include/asm/hugepage.h
··· 8 8 #define _ASM_ARC_HUGEPAGE_H 9 9 10 10 #include <linux/types.h> 11 - #define __ARCH_USE_5LEVEL_HACK 12 11 #include <asm-generic/pgtable-nopmd.h> 13 12 14 13 static inline pte_t pmd_pte(pmd_t pmd)
+9 -1
arch/arc/kernel/asm-offsets.c
··· 66 66 67 67 DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); 68 68 DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); 69 - DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25)); 69 + 70 + #ifdef CONFIG_ISA_ARCV2 71 + OFFSET(PT_r12, pt_regs, r12); 72 + OFFSET(PT_r30, pt_regs, r30); 73 + #endif 74 + #ifdef CONFIG_ARC_HAS_ACCL_REGS 75 + OFFSET(PT_r58, pt_regs, r58); 76 + OFFSET(PT_r59, pt_regs, r59); 77 + #endif 70 78 71 79 return 0; 72 80 }
+1 -1
arch/arc/plat-eznps/Kconfig
··· 7 7 menuconfig ARC_PLAT_EZNPS 8 8 bool "\"EZchip\" ARC dev platform" 9 9 select CPU_BIG_ENDIAN 10 - select CLKSRC_NPS 10 + select CLKSRC_NPS if !PHYS_ADDR_T_64BIT 11 11 select EZNPS_GIC 12 12 select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET 13 13 help
+2 -3
arch/arm64/include/asm/pgtable-prot.h
··· 85 85 #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) 86 86 #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) 87 87 #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) 88 - #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) 89 88 90 89 #define __P000 PAGE_NONE 91 90 #define __P001 PAGE_READONLY 92 91 #define __P010 PAGE_READONLY 93 92 #define __P011 PAGE_READONLY 94 - #define __P100 PAGE_EXECONLY 93 + #define __P100 PAGE_READONLY_EXEC 95 94 #define __P101 PAGE_READONLY_EXEC 96 95 #define __P110 PAGE_READONLY_EXEC 97 96 #define __P111 PAGE_READONLY_EXEC ··· 99 100 #define __S001 PAGE_READONLY 100 101 #define __S010 PAGE_SHARED 101 102 #define __S011 PAGE_SHARED 102 - #define __S100 PAGE_EXECONLY 103 + #define __S100 PAGE_READONLY_EXEC 103 104 #define __S101 PAGE_READONLY_EXEC 104 105 #define __S110 PAGE_SHARED_EXEC 105 106 #define __S111 PAGE_SHARED_EXEC
+3 -7
arch/arm64/include/asm/pgtable.h
··· 96 96 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 97 97 98 98 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 99 - /* 100 - * Execute-only user mappings do not have the PTE_USER bit set. All valid 101 - * kernel mappings have the PTE_UXN bit set. 102 - */ 103 99 #define pte_valid_not_user(pte) \ 104 - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 100 + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) 105 101 #define pte_valid_young(pte) \ 106 102 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) 107 103 #define pte_valid_user(pte) \ ··· 113 117 114 118 /* 115 119 * p??_access_permitted() is true for valid user mappings (subject to the 116 - * write permission check) other than user execute-only which do not have the 117 - * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. 120 + * write permission check). PROT_NONE mappings do not have the PTE_VALID bit 121 + * set. 118 122 */ 119 123 #define pte_access_permitted(pte, write) \ 120 124 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+1 -1
arch/arm64/mm/fault.c
··· 445 445 const struct fault_info *inf; 446 446 struct mm_struct *mm = current->mm; 447 447 vm_fault_t fault, major = 0; 448 - unsigned long vm_flags = VM_READ | VM_WRITE; 448 + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; 449 449 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 450 450 451 451 if (kprobe_page_fault(regs, esr))
+1 -3
arch/arm64/mm/mmu.c
··· 1070 1070 { 1071 1071 unsigned long start_pfn = start >> PAGE_SHIFT; 1072 1072 unsigned long nr_pages = size >> PAGE_SHIFT; 1073 - struct zone *zone; 1074 1073 1075 1074 /* 1076 1075 * FIXME: Cleanup page tables (also in arch_add_memory() in case ··· 1078 1079 * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be 1079 1080 * unlocked yet. 1080 1081 */ 1081 - zone = page_zone(pfn_to_page(start_pfn)); 1082 - __remove_pages(zone, start_pfn, nr_pages, altmap); 1082 + __remove_pages(start_pfn, nr_pages, altmap); 1083 1083 } 1084 1084 #endif
+4 -4
arch/hexagon/include/asm/atomic.h
··· 91 91 "1: %0 = memw_locked(%1);\n" \ 92 92 " %0 = "#op "(%0,%2);\n" \ 93 93 " memw_locked(%1,P3)=%0;\n" \ 94 - " if !P3 jump 1b;\n" \ 94 + " if (!P3) jump 1b;\n" \ 95 95 : "=&r" (output) \ 96 96 : "r" (&v->counter), "r" (i) \ 97 97 : "memory", "p3" \ ··· 107 107 "1: %0 = memw_locked(%1);\n" \ 108 108 " %0 = "#op "(%0,%2);\n" \ 109 109 " memw_locked(%1,P3)=%0;\n" \ 110 - " if !P3 jump 1b;\n" \ 110 + " if (!P3) jump 1b;\n" \ 111 111 : "=&r" (output) \ 112 112 : "r" (&v->counter), "r" (i) \ 113 113 : "memory", "p3" \ ··· 124 124 "1: %0 = memw_locked(%2);\n" \ 125 125 " %1 = "#op "(%0,%3);\n" \ 126 126 " memw_locked(%2,P3)=%1;\n" \ 127 - " if !P3 jump 1b;\n" \ 127 + " if (!P3) jump 1b;\n" \ 128 128 : "=&r" (output), "=&r" (val) \ 129 129 : "r" (&v->counter), "r" (i) \ 130 130 : "memory", "p3" \ ··· 173 173 " }" 174 174 " memw_locked(%2, p3) = %1;" 175 175 " {" 176 - " if !p3 jump 1b;" 176 + " if (!p3) jump 1b;" 177 177 " }" 178 178 "2:" 179 179 : "=&r" (__oldval), "=&r" (tmp)
+4 -4
arch/hexagon/include/asm/bitops.h
··· 38 38 "1: R12 = memw_locked(R10);\n" 39 39 " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" 40 40 " memw_locked(R10,P1) = R12;\n" 41 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 41 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 42 42 : "=&r" (oldval) 43 43 : "r" (addr), "r" (nr) 44 44 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 62 62 "1: R12 = memw_locked(R10);\n" 63 63 " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" 64 64 " memw_locked(R10,P1) = R12;\n" 65 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 65 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 66 66 : "=&r" (oldval) 67 67 : "r" (addr), "r" (nr) 68 68 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 88 88 "1: R12 = memw_locked(R10);\n" 89 89 " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" 90 90 " memw_locked(R10,P1) = R12;\n" 91 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 91 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 92 92 : "=&r" (oldval) 93 93 : "r" (addr), "r" (nr) 94 94 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 223 223 int r; 224 224 225 225 asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" 226 - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" 226 + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" 227 227 : "=&r" (r) 228 228 : "r" (x) 229 229 : "p0");
+1 -1
arch/hexagon/include/asm/cmpxchg.h
··· 30 30 __asm__ __volatile__ ( 31 31 "1: %0 = memw_locked(%1);\n" /* load into retval */ 32 32 " memw_locked(%1,P0) = %2;\n" /* store into memory */ 33 - " if !P0 jump 1b;\n" 33 + " if (!P0) jump 1b;\n" 34 34 : "=&r" (retval) 35 35 : "r" (ptr), "r" (x) 36 36 : "memory", "p0"
+3 -3
arch/hexagon/include/asm/futex.h
··· 16 16 /* For example: %1 = %4 */ \ 17 17 insn \ 18 18 "2: memw_locked(%3,p2) = %1;\n" \ 19 - " if !p2 jump 1b;\n" \ 19 + " if (!p2) jump 1b;\n" \ 20 20 " %1 = #0;\n" \ 21 21 "3:\n" \ 22 22 ".section .fixup,\"ax\"\n" \ ··· 84 84 "1: %1 = memw_locked(%3)\n" 85 85 " {\n" 86 86 " p2 = cmp.eq(%1,%4)\n" 87 - " if !p2.new jump:NT 3f\n" 87 + " if (!p2.new) jump:NT 3f\n" 88 88 " }\n" 89 89 "2: memw_locked(%3,p2) = %5\n" 90 - " if !p2 jump 1b\n" 90 + " if (!p2) jump 1b\n" 91 91 "3:\n" 92 92 ".section .fixup,\"ax\"\n" 93 93 "4: %0 = #%6\n"
+1
arch/hexagon/include/asm/io.h
··· 173 173 174 174 void __iomem *ioremap(unsigned long phys_addr, unsigned long size); 175 175 #define ioremap_nocache ioremap 176 + #define ioremap_uc(X, Y) ioremap((X), (Y)) 176 177 177 178 178 179 #define __raw_writel writel
+10 -10
arch/hexagon/include/asm/spinlock.h
··· 30 30 __asm__ __volatile__( 31 31 "1: R6 = memw_locked(%0);\n" 32 32 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 33 - " { if !P3 jump 1b; }\n" 33 + " { if (!P3) jump 1b; }\n" 34 34 " memw_locked(%0,P3) = R6;\n" 35 - " { if !P3 jump 1b; }\n" 35 + " { if (!P3) jump 1b; }\n" 36 36 : 37 37 : "r" (&lock->lock) 38 38 : "memory", "r6", "p3" ··· 46 46 "1: R6 = memw_locked(%0);\n" 47 47 " R6 = add(R6,#-1);\n" 48 48 " memw_locked(%0,P3) = R6\n" 49 - " if !P3 jump 1b;\n" 49 + " if (!P3) jump 1b;\n" 50 50 : 51 51 : "r" (&lock->lock) 52 52 : "memory", "r6", "p3" ··· 61 61 __asm__ __volatile__( 62 62 " R6 = memw_locked(%1);\n" 63 63 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 64 - " { if !P3 jump 1f; }\n" 64 + " { if (!P3) jump 1f; }\n" 65 65 " memw_locked(%1,P3) = R6;\n" 66 66 " { %0 = P3 }\n" 67 67 "1:\n" ··· 78 78 __asm__ __volatile__( 79 79 "1: R6 = memw_locked(%0)\n" 80 80 " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 81 - " { if !P3 jump 1b; }\n" 81 + " { if (!P3) jump 1b; }\n" 82 82 " memw_locked(%0,P3) = R6;\n" 83 - " { if !P3 jump 1b; }\n" 83 + " { if (!P3) jump 1b; }\n" 84 84 : 85 85 : "r" (&lock->lock) 86 86 : "memory", "r6", "p3" ··· 94 94 __asm__ __volatile__( 95 95 " R6 = memw_locked(%1)\n" 96 96 " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 97 - " { if !P3 jump 1f; }\n" 97 + " { if (!P3) jump 1f; }\n" 98 98 " memw_locked(%1,P3) = R6;\n" 99 99 " %0 = P3;\n" 100 100 "1:\n" ··· 117 117 __asm__ __volatile__( 118 118 "1: R6 = memw_locked(%0);\n" 119 119 " P3 = cmp.eq(R6,#0);\n" 120 - " { if !P3 jump 1b; R6 = #1; }\n" 120 + " { if (!P3) jump 1b; R6 = #1; }\n" 121 121 " memw_locked(%0,P3) = R6;\n" 122 - " { if !P3 jump 1b; }\n" 122 + " { if (!P3) jump 1b; }\n" 123 123 : 124 124 : "r" (&lock->lock) 125 125 : "memory", "r6", "p3" ··· 139 139 __asm__ __volatile__( 140 140 " R6 = memw_locked(%1);\n" 141 141 " P3 = cmp.eq(R6,#0);\n" 142 - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" 142 + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" 143 143 " memw_locked(%1,P3) = R6;\n" 144 144 " %0 = P3;\n" 145 145 "1:\n"
+1 -3
arch/hexagon/kernel/stacktrace.c
··· 11 11 #include <linux/thread_info.h> 12 12 #include <linux/module.h> 13 13 14 - register unsigned long current_frame_pointer asm("r30"); 15 - 16 14 struct stackframe { 17 15 unsigned long fp; 18 16 unsigned long rets; ··· 28 30 29 31 low = (unsigned long)task_stack_page(current); 30 32 high = low + THREAD_SIZE; 31 - fp = current_frame_pointer; 33 + fp = (unsigned long)__builtin_frame_address(0); 32 34 33 35 while (fp >= low && fp <= (high - sizeof(*frame))) { 34 36 frame = (struct stackframe *)fp;
+1 -1
arch/hexagon/kernel/vm_entry.S
··· 369 369 R26.L = #LO(do_work_pending); 370 370 R0 = #VM_INT_DISABLE; 371 371 } 372 - if P0 jump check_work_pending 372 + if (P0) jump check_work_pending 373 373 { 374 374 R0 = R25; 375 375 callr R24
+1 -3
arch/ia64/mm/init.c
··· 689 689 { 690 690 unsigned long start_pfn = start >> PAGE_SHIFT; 691 691 unsigned long nr_pages = size >> PAGE_SHIFT; 692 - struct zone *zone; 693 692 694 - zone = page_zone(pfn_to_page(start_pfn)); 695 - __remove_pages(zone, start_pfn, nr_pages, altmap); 693 + __remove_pages(start_pfn, nr_pages, altmap); 696 694 } 697 695 #endif
+1 -1
arch/mips/Kconfig
··· 47 47 select HAVE_ARCH_TRACEHOOK 48 48 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES 49 49 select HAVE_ASM_MODVERSIONS 50 - select HAVE_EBPF_JIT if (!CPU_MICROMIPS) 50 + select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 51 51 select HAVE_CONTEXT_TRACKING 52 52 select HAVE_COPY_THREAD_TLS 53 53 select HAVE_C_RECORDMCOUNT
+3
arch/mips/boot/compressed/Makefile
··· 29 29 -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \ 30 30 -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS) 31 31 32 + # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. 33 + KCOV_INSTRUMENT := n 34 + 32 35 # decompressor objects (linked with vmlinuz) 33 36 vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o 34 37
+2 -1
arch/mips/include/asm/cpu-type.h
··· 15 15 static inline int __pure __get_cpu_type(const int cpu_type) 16 16 { 17 17 switch (cpu_type) { 18 - #if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF) 18 + #if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \ 19 + defined(CONFIG_SYS_HAS_CPU_LOONGSON2F) 19 20 case CPU_LOONGSON2EF: 20 21 #endif 21 22
+19 -1
arch/mips/include/asm/thread_info.h
··· 49 49 .addr_limit = KERNEL_DS, \ 50 50 } 51 51 52 - /* How to get the thread information struct from C. */ 52 + /* 53 + * A pointer to the struct thread_info for the currently executing thread is 54 + * held in register $28/$gp. 55 + * 56 + * We declare __current_thread_info as a global register variable rather than a 57 + * local register variable within current_thread_info() because clang doesn't 58 + * support explicit local register variables. 59 + * 60 + * When building the VDSO we take care not to declare the global register 61 + * variable because this causes GCC to not preserve the value of $28/$gp in 62 + * functions that change its value (which is common in the PIC VDSO when 63 + * accessing the GOT). Since the VDSO shouldn't be accessing 64 + * __current_thread_info anyway we declare it extern in order to cause a link 65 + * failure if it's referenced. 66 + */ 67 + #ifdef __VDSO__ 68 + extern struct thread_info *__current_thread_info; 69 + #else 53 70 register struct thread_info *__current_thread_info __asm__("$28"); 71 + #endif 54 72 55 73 static inline struct thread_info *current_thread_info(void) 56 74 {
-13
arch/mips/include/asm/vdso/gettimeofday.h
··· 26 26 27 27 #define __VDSO_USE_SYSCALL ULLONG_MAX 28 28 29 - #ifdef CONFIG_MIPS_CLOCK_VSYSCALL 30 - 31 29 static __always_inline long gettimeofday_fallback( 32 30 struct __kernel_old_timeval *_tv, 33 31 struct timezone *_tz) ··· 45 47 46 48 return error ? -ret : ret; 47 49 } 48 - 49 - #else 50 - 51 - static __always_inline long gettimeofday_fallback( 52 - struct __kernel_old_timeval *_tv, 53 - struct timezone *_tz) 54 - { 55 - return -1; 56 - } 57 - 58 - #endif 59 50 60 51 static __always_inline long clock_gettime_fallback( 61 52 clockid_t _clkid,
+26 -1
arch/mips/kernel/cacheinfo.c
··· 50 50 return 0; 51 51 } 52 52 53 + static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) 54 + { 55 + int cpu1; 56 + 57 + for_each_possible_cpu(cpu1) 58 + if (cpus_are_siblings(cpu, cpu1)) 59 + cpumask_set_cpu(cpu1, cpu_map); 60 + } 61 + 62 + static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) 63 + { 64 + int cpu1; 65 + int cluster = cpu_cluster(&cpu_data[cpu]); 66 + 67 + for_each_possible_cpu(cpu1) 68 + if (cpu_cluster(&cpu_data[cpu1]) == cluster) 69 + cpumask_set_cpu(cpu1, cpu_map); 70 + } 71 + 53 72 static int __populate_cache_leaves(unsigned int cpu) 54 73 { 55 74 struct cpuinfo_mips *c = &current_cpu_data; ··· 76 57 struct cacheinfo *this_leaf = this_cpu_ci->info_list; 77 58 78 59 if (c->icache.waysize) { 60 + /* L1 caches are per core */ 61 + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); 79 62 populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); 63 + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); 80 64 populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); 81 65 } else { 82 66 populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); 83 67 } 84 68 85 - if (c->scache.waysize) 69 + if (c->scache.waysize) { 70 + /* L2 cache is per cluster */ 71 + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); 86 72 populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); 73 + } 87 74 88 75 if (c->tcache.waysize) 89 76 populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+1 -1
arch/mips/net/ebpf_jit.c
··· 1804 1804 unsigned int image_size; 1805 1805 u8 *image_ptr; 1806 1806 1807 - if (!prog->jit_requested || MIPS_ISA_REV < 2) 1807 + if (!prog->jit_requested) 1808 1808 return prog; 1809 1809 1810 1810 tmp = bpf_jit_blind_constants(prog);
+20
arch/mips/vdso/vgettimeofday.c
··· 17 17 return __cvdso_clock_gettime32(clock, ts); 18 18 } 19 19 20 + #ifdef CONFIG_MIPS_CLOCK_VSYSCALL 21 + 22 + /* 23 + * This is behind the ifdef so that we don't provide the symbol when there's no 24 + * possibility of there being a usable clocksource, because there's nothing we 25 + * can do without it. When libc fails the symbol lookup it should fall back on 26 + * the standard syscall path. 27 + */ 20 28 int __vdso_gettimeofday(struct __kernel_old_timeval *tv, 21 29 struct timezone *tz) 22 30 { 23 31 return __cvdso_gettimeofday(tv, tz); 24 32 } 33 + 34 + #endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ 25 35 26 36 int __vdso_clock_getres(clockid_t clock_id, 27 37 struct old_timespec32 *res) ··· 53 43 return __cvdso_clock_gettime(clock, ts); 54 44 } 55 45 46 + #ifdef CONFIG_MIPS_CLOCK_VSYSCALL 47 + 48 + /* 49 + * This is behind the ifdef so that we don't provide the symbol when there's no 50 + * possibility of there being a usable clocksource, because there's nothing we 51 + * can do without it. When libc fails the symbol lookup it should fall back on 52 + * the standard syscall path. 53 + */ 56 54 int __vdso_gettimeofday(struct __kernel_old_timeval *tv, 57 55 struct timezone *tz) 58 56 { 59 57 return __cvdso_gettimeofday(tv, tz); 60 58 } 59 + 60 + #endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ 61 61 62 62 int __vdso_clock_getres(clockid_t clock_id, 63 63 struct __kernel_timespec *res)
+1
arch/powerpc/include/asm/spinlock.h
··· 15 15 * 16 16 * (the type definitions are in asm/spinlock_types.h) 17 17 */ 18 + #include <linux/jump_label.h> 18 19 #include <linux/irqflags.h> 19 20 #ifdef CONFIG_PPC64 20 21 #include <asm/paca.h>
+1 -2
arch/powerpc/mm/mem.c
··· 151 151 { 152 152 unsigned long start_pfn = start >> PAGE_SHIFT; 153 153 unsigned long nr_pages = size >> PAGE_SHIFT; 154 - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); 155 154 int ret; 156 155 157 - __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); 156 + __remove_pages(start_pfn, nr_pages, altmap); 158 157 159 158 /* Remove htab bolted mappings for this section of memory */ 160 159 start = (unsigned long)__va(start);
+2 -2
arch/powerpc/mm/slice.c
··· 50 50 51 51 #endif 52 52 53 - static inline bool slice_addr_is_low(unsigned long addr) 53 + static inline notrace bool slice_addr_is_low(unsigned long addr) 54 54 { 55 55 u64 tmp = (u64)addr; 56 56 ··· 659 659 mm_ctx_user_psize(&current->mm->context), 1); 660 660 } 661 661 662 - unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 662 + unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) 663 663 { 664 664 unsigned char *psizes; 665 665 int index, mask_index;
+1
arch/riscv/Kconfig
··· 64 64 select SPARSEMEM_STATIC if 32BIT 65 65 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU 66 66 select HAVE_ARCH_MMAP_RND_BITS if MMU 67 + select ARCH_HAS_GCOV_PROFILE_ALL 67 68 68 69 config ARCH_MMAP_RND_BITS_MIN 69 70 default 18 if 64BIT
+15
arch/riscv/boot/dts/sifive/fu540-c000.dtsi
··· 54 54 reg = <1>; 55 55 riscv,isa = "rv64imafdc"; 56 56 tlb-split; 57 + next-level-cache = <&l2cache>; 57 58 cpu1_intc: interrupt-controller { 58 59 #interrupt-cells = <1>; 59 60 compatible = "riscv,cpu-intc"; ··· 78 77 reg = <2>; 79 78 riscv,isa = "rv64imafdc"; 80 79 tlb-split; 80 + next-level-cache = <&l2cache>; 81 81 cpu2_intc: interrupt-controller { 82 82 #interrupt-cells = <1>; 83 83 compatible = "riscv,cpu-intc"; ··· 102 100 reg = <3>; 103 101 riscv,isa = "rv64imafdc"; 104 102 tlb-split; 103 + next-level-cache = <&l2cache>; 105 104 cpu3_intc: interrupt-controller { 106 105 #interrupt-cells = <1>; 107 106 compatible = "riscv,cpu-intc"; ··· 126 123 reg = <4>; 127 124 riscv,isa = "rv64imafdc"; 128 125 tlb-split; 126 + next-level-cache = <&l2cache>; 129 127 cpu4_intc: interrupt-controller { 130 128 #interrupt-cells = <1>; 131 129 compatible = "riscv,cpu-intc"; ··· 256 252 clocks = <&prci PRCI_CLK_TLCLK>; 257 253 #pwm-cells = <3>; 258 254 status = "disabled"; 255 + }; 256 + l2cache: cache-controller@2010000 { 257 + compatible = "sifive,fu540-c000-ccache", "cache"; 258 + cache-block-size = <64>; 259 + cache-level = <2>; 260 + cache-sets = <1024>; 261 + cache-size = <2097152>; 262 + cache-unified; 263 + interrupt-parent = <&plic0>; 264 + interrupts = <1 2 3>; 265 + reg = <0x0 0x2010000 0x0 0x1000>; 259 266 }; 260 267 261 268 };
+9 -9
arch/riscv/include/asm/csr.h
··· 116 116 # define SR_PIE SR_MPIE 117 117 # define SR_PP SR_MPP 118 118 119 - # define IRQ_SOFT IRQ_M_SOFT 120 - # define IRQ_TIMER IRQ_M_TIMER 121 - # define IRQ_EXT IRQ_M_EXT 119 + # define RV_IRQ_SOFT IRQ_M_SOFT 120 + # define RV_IRQ_TIMER IRQ_M_TIMER 121 + # define RV_IRQ_EXT IRQ_M_EXT 122 122 #else /* CONFIG_RISCV_M_MODE */ 123 123 # define CSR_STATUS CSR_SSTATUS 124 124 # define CSR_IE CSR_SIE ··· 133 133 # define SR_PIE SR_SPIE 134 134 # define SR_PP SR_SPP 135 135 136 - # define IRQ_SOFT IRQ_S_SOFT 137 - # define IRQ_TIMER IRQ_S_TIMER 138 - # define IRQ_EXT IRQ_S_EXT 136 + # define RV_IRQ_SOFT IRQ_S_SOFT 137 + # define RV_IRQ_TIMER IRQ_S_TIMER 138 + # define RV_IRQ_EXT IRQ_S_EXT 139 139 #endif /* CONFIG_RISCV_M_MODE */ 140 140 141 141 /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ 142 - #define IE_SIE (_AC(0x1, UL) << IRQ_SOFT) 143 - #define IE_TIE (_AC(0x1, UL) << IRQ_TIMER) 144 - #define IE_EIE (_AC(0x1, UL) << IRQ_EXT) 142 + #define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT) 143 + #define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) 144 + #define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) 145 145 146 146 #ifndef __ASSEMBLY__ 147 147
+1 -1
arch/riscv/kernel/ftrace.c
··· 142 142 */ 143 143 old = *parent; 144 144 145 - if (function_graph_enter(old, self_addr, frame_pointer, parent)) 145 + if (!function_graph_enter(old, self_addr, frame_pointer, parent)) 146 146 *parent = return_hooker; 147 147 } 148 148
+3 -3
arch/riscv/kernel/irq.c
··· 23 23 24 24 irq_enter(); 25 25 switch (regs->cause & ~CAUSE_IRQ_FLAG) { 26 - case IRQ_TIMER: 26 + case RV_IRQ_TIMER: 27 27 riscv_timer_interrupt(); 28 28 break; 29 29 #ifdef CONFIG_SMP 30 - case IRQ_SOFT: 30 + case RV_IRQ_SOFT: 31 31 /* 32 32 * We only use software interrupts to pass IPIs, so if a non-SMP 33 33 * system gets one, then we don't know what to do. ··· 35 35 riscv_software_interrupt(); 36 36 break; 37 37 #endif 38 - case IRQ_EXT: 38 + case RV_IRQ_EXT: 39 39 handle_arch_irq(regs); 40 40 break; 41 41 default:
+6 -6
arch/riscv/mm/init.c
··· 99 99 pr_info("initrd not found or empty"); 100 100 goto disable; 101 101 } 102 - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { 102 + if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { 103 103 pr_err("initrd extends beyond end of memory"); 104 104 goto disable; 105 105 } 106 106 107 107 size = initrd_end - initrd_start; 108 - memblock_reserve(__pa(initrd_start), size); 108 + memblock_reserve(__pa_symbol(initrd_start), size); 109 109 initrd_below_start_ok = 1; 110 110 111 111 pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", ··· 124 124 { 125 125 struct memblock_region *reg; 126 126 phys_addr_t mem_size = 0; 127 - phys_addr_t vmlinux_end = __pa(&_end); 128 - phys_addr_t vmlinux_start = __pa(&_start); 127 + phys_addr_t vmlinux_end = __pa_symbol(&_end); 128 + phys_addr_t vmlinux_start = __pa_symbol(&_start); 129 129 130 130 /* Find the memory region containing the kernel */ 131 131 for_each_memblock(memory, reg) { ··· 445 445 446 446 /* Setup swapper PGD for fixmap */ 447 447 create_pgd_mapping(swapper_pg_dir, FIXADDR_START, 448 - __pa(fixmap_pgd_next), 448 + __pa_symbol(fixmap_pgd_next), 449 449 PGDIR_SIZE, PAGE_TABLE); 450 450 451 451 /* Map all memory banks */ ··· 474 474 clear_fixmap(FIX_PMD); 475 475 476 476 /* Move to swapper page table */ 477 - csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); 477 + csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); 478 478 local_flush_tlb_all(); 479 479 } 480 480 #else
+1 -3
arch/s390/mm/init.c
··· 292 292 { 293 293 unsigned long start_pfn = start >> PAGE_SHIFT; 294 294 unsigned long nr_pages = size >> PAGE_SHIFT; 295 - struct zone *zone; 296 295 297 - zone = page_zone(pfn_to_page(start_pfn)); 298 - __remove_pages(zone, start_pfn, nr_pages, altmap); 296 + __remove_pages(start_pfn, nr_pages, altmap); 299 297 vmem_remove_mapping(start, size); 300 298 } 301 299 #endif /* CONFIG_MEMORY_HOTPLUG */
+1 -3
arch/sh/mm/init.c
··· 434 434 { 435 435 unsigned long start_pfn = PFN_DOWN(start); 436 436 unsigned long nr_pages = size >> PAGE_SHIFT; 437 - struct zone *zone; 438 437 439 - zone = page_zone(pfn_to_page(start_pfn)); 440 - __remove_pages(zone, start_pfn, nr_pages, altmap); 438 + __remove_pages(start_pfn, nr_pages, altmap); 441 439 } 442 440 #endif /* CONFIG_MEMORY_HOTPLUG */
+1 -3
arch/x86/mm/init_32.c
··· 865 865 { 866 866 unsigned long start_pfn = start >> PAGE_SHIFT; 867 867 unsigned long nr_pages = size >> PAGE_SHIFT; 868 - struct zone *zone; 869 868 870 - zone = page_zone(pfn_to_page(start_pfn)); 871 - __remove_pages(zone, start_pfn, nr_pages, altmap); 869 + __remove_pages(start_pfn, nr_pages, altmap); 872 870 } 873 871 #endif 874 872
+1 -3
arch/x86/mm/init_64.c
··· 1212 1212 { 1213 1213 unsigned long start_pfn = start >> PAGE_SHIFT; 1214 1214 unsigned long nr_pages = size >> PAGE_SHIFT; 1215 - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); 1216 - struct zone *zone = page_zone(page); 1217 1215 1218 - __remove_pages(zone, start_pfn, nr_pages, altmap); 1216 + __remove_pages(start_pfn, nr_pages, altmap); 1219 1217 kernel_physical_mapping_remove(start, start + size); 1220 1218 } 1221 1219 #endif /* CONFIG_MEMORY_HOTPLUG */
+39
block/bio.c
··· 538 538 } 539 539 EXPORT_SYMBOL(zero_fill_bio_iter); 540 540 541 + void bio_truncate(struct bio *bio, unsigned new_size) 542 + { 543 + struct bio_vec bv; 544 + struct bvec_iter iter; 545 + unsigned int done = 0; 546 + bool truncated = false; 547 + 548 + if (new_size >= bio->bi_iter.bi_size) 549 + return; 550 + 551 + if (bio_data_dir(bio) != READ) 552 + goto exit; 553 + 554 + bio_for_each_segment(bv, bio, iter) { 555 + if (done + bv.bv_len > new_size) { 556 + unsigned offset; 557 + 558 + if (!truncated) 559 + offset = new_size - done; 560 + else 561 + offset = 0; 562 + zero_user(bv.bv_page, offset, bv.bv_len - offset); 563 + truncated = true; 564 + } 565 + done += bv.bv_len; 566 + } 567 + 568 + exit: 569 + /* 570 + * Don't touch bvec table here and make it really immutable, since 571 + * fs bio user has to retrieve all pages via bio_for_each_segment_all 572 + * in its .end_bio() callback. 573 + * 574 + * It is enough to truncate bio by updating .bi_size since we can make 575 + * correct bvec with the updated .bi_size for drivers. 576 + */ 577 + bio->bi_iter.bi_size = new_size; 578 + } 579 + 541 580 /** 542 581 * bio_put - release a reference to a bio 543 582 * @bio: bio to release reference to
+9 -9
block/blk-merge.c
··· 157 157 return sectors & (lbs - 1); 158 158 } 159 159 160 - static unsigned get_max_segment_size(const struct request_queue *q, 161 - unsigned offset) 160 + static inline unsigned get_max_segment_size(const struct request_queue *q, 161 + struct page *start_page, 162 + unsigned long offset) 162 163 { 163 164 unsigned long mask = queue_segment_boundary(q); 164 165 165 - /* default segment boundary mask means no boundary limit */ 166 - if (mask == BLK_SEG_BOUNDARY_MASK) 167 - return queue_max_segment_size(q); 168 - 169 - return min_t(unsigned long, mask - (mask & offset) + 1, 166 + offset = mask & (page_to_phys(start_page) + offset); 167 + return min_t(unsigned long, mask - offset + 1, 170 168 queue_max_segment_size(q)); 171 169 } 172 170 ··· 199 201 unsigned seg_size = 0; 200 202 201 203 while (len && *nsegs < max_segs) { 202 - seg_size = get_max_segment_size(q, bv->bv_offset + total_len); 204 + seg_size = get_max_segment_size(q, bv->bv_page, 205 + bv->bv_offset + total_len); 203 206 seg_size = min(seg_size, len); 204 207 205 208 (*nsegs)++; ··· 418 419 419 420 while (nbytes > 0) { 420 421 unsigned offset = bvec->bv_offset + total; 421 - unsigned len = min(get_max_segment_size(q, offset), nbytes); 422 + unsigned len = min(get_max_segment_size(q, bvec->bv_page, 423 + offset), nbytes); 422 424 struct page *page = bvec->bv_page; 423 425 424 426 /*
+2 -2
drivers/atm/eni.c
··· 374 374 here = (eni_vcc->descr+skip) & (eni_vcc->words-1); 375 375 dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci 376 376 << MID_DMA_VCI_SHIFT) | MID_DT_JK; 377 - j++; 377 + dma[j++] = 0; 378 378 } 379 379 here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); 380 380 if (!eff) size += skip; ··· 447 447 if (size != eff) { 448 448 dma[j++] = (here << MID_DMA_COUNT_SHIFT) | 449 449 (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; 450 - j++; 450 + dma[j++] = 0; 451 451 } 452 452 if (!j || j > 2*RX_DMA_BUF) { 453 453 printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
+4 -1
drivers/block/null_blk_zoned.c
··· 186 186 if (zone->cond == BLK_ZONE_COND_FULL) 187 187 return BLK_STS_IOERR; 188 188 189 - zone->cond = BLK_ZONE_COND_CLOSED; 189 + if (zone->wp == zone->start) 190 + zone->cond = BLK_ZONE_COND_EMPTY; 191 + else 192 + zone->cond = BLK_ZONE_COND_CLOSED; 190 193 break; 191 194 case REQ_OP_ZONE_FINISH: 192 195 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+1 -8
drivers/char/agp/isoch.c
··· 84 84 unsigned int cdev = 0; 85 85 u32 mnistat, tnistat, tstatus, mcmd; 86 86 u16 tnicmd, mnicmd; 87 - u8 mcapndx; 88 87 u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; 89 88 u32 step, rem, rem_isoch, rem_async; 90 89 int ret = 0; ··· 136 137 list_for_each(pos, head) { 137 138 cur = list_entry(pos, struct agp_3_5_dev, list); 138 139 dev = cur->dev; 139 - 140 - mcapndx = cur->capndx; 141 140 142 141 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); 143 142 ··· 248 251 cur = master[cdev].dev; 249 252 dev = cur->dev; 250 253 251 - mcapndx = cur->capndx; 252 - 253 254 master[cdev].rq += (cdev == ndevs - 1) 254 255 ? (rem_async + rem_isoch) : step; 255 256 ··· 314 319 { 315 320 struct pci_dev *td = bridge->dev, *dev = NULL; 316 321 u8 mcapndx; 317 - u32 isoch, arqsz; 322 + u32 isoch; 318 323 u32 tstatus, mstatus, ncapid; 319 324 u32 mmajor; 320 325 u16 mpstat; ··· 328 333 isoch = (tstatus >> 17) & 0x1; 329 334 if (isoch == 0) /* isoch xfers not available, bail out. */ 330 335 return -ENODEV; 331 - 332 - arqsz = (tstatus >> 13) & 0x7; 333 336 334 337 /* 335 338 * Allocate a head for our AGP 3.5 device list
+1 -1
drivers/char/tpm/tpm-dev-common.c
··· 130 130 priv->response_read = true; 131 131 132 132 ret_size = min_t(ssize_t, size, priv->response_length); 133 - if (!ret_size) { 133 + if (ret_size <= 0) { 134 134 priv->response_length = 0; 135 135 goto out; 136 136 }
+1 -1
drivers/char/tpm/tpm-dev.h
··· 14 14 struct work_struct timeout_work; 15 15 struct work_struct async_work; 16 16 wait_queue_head_t async_wait; 17 - size_t response_length; 17 + ssize_t response_length; 18 18 bool response_read; 19 19 bool command_enqueued; 20 20
+15 -19
drivers/char/tpm/tpm_tis_core.c
··· 978 978 979 979 if (wait_startup(chip, 0) != 0) { 980 980 rc = -ENODEV; 981 - goto err_start; 981 + goto out_err; 982 982 } 983 983 984 984 /* Take control of the TPM's interrupt hardware and shut it off */ 985 985 rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); 986 986 if (rc < 0) 987 - goto err_start; 987 + goto out_err; 988 988 989 989 intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | 990 990 TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; ··· 993 993 994 994 rc = tpm_chip_start(chip); 995 995 if (rc) 996 - goto err_start; 997 - 996 + goto out_err; 998 997 rc = tpm2_probe(chip); 998 + tpm_chip_stop(chip); 999 999 if (rc) 1000 - goto err_probe; 1000 + goto out_err; 1001 1001 1002 1002 rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); 1003 1003 if (rc < 0) 1004 - goto err_probe; 1004 + goto out_err; 1005 1005 1006 1006 priv->manufacturer_id = vendor; 1007 1007 1008 1008 rc = tpm_tis_read8(priv, TPM_RID(0), &rid); 1009 1009 if (rc < 0) 1010 - goto err_probe; 1010 + goto out_err; 1011 1011 1012 1012 dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", 1013 1013 (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", ··· 1016 1016 probe = probe_itpm(chip); 1017 1017 if (probe < 0) { 1018 1018 rc = -ENODEV; 1019 - goto err_probe; 1019 + goto out_err; 1020 1020 } 1021 1021 1022 1022 /* Figure out the capabilities */ 1023 1023 rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps); 1024 1024 if (rc < 0) 1025 - goto err_probe; 1025 + goto out_err; 1026 1026 1027 1027 dev_dbg(dev, "TPM interface capabilities (0x%x):\n", 1028 1028 intfcaps); ··· 1056 1056 if (tpm_get_timeouts(chip)) { 1057 1057 dev_err(dev, "Could not get TPM timeouts and durations\n"); 1058 1058 rc = -ENODEV; 1059 - goto err_probe; 1059 + goto out_err; 1060 1060 } 1061 1061 1062 - chip->flags |= TPM_CHIP_FLAG_IRQ; 1063 1062 if (irq) { 1064 1063 tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, 1065 1064 irq); ··· 1070 1071 } 1071 1072 } 1072 1073 1073 - tpm_chip_stop(chip); 1074 - 1075 1074 rc = tpm_chip_register(chip); 1076 1075 if (rc) 1077 - goto err_start; 1076 + goto out_err; 1077 + 1078 + if (chip->ops->clk_enable != NULL) 1079 + chip->ops->clk_enable(chip, false); 1078 1080 1079 1081 return 0; 1080 - 1081 - err_probe: 1082 - tpm_chip_stop(chip); 1083 - 1084 - err_start: 1082 + out_err: 1085 1083 if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) 1086 1084 chip->ops->clk_enable(chip, false); 1087 1085
+1 -1
drivers/clocksource/timer-riscv.c
··· 56 56 return get_cycles64(); 57 57 } 58 58 59 - static u64 riscv_sched_clock(void) 59 + static u64 notrace riscv_sched_clock(void) 60 60 { 61 61 return get_cycles64(); 62 62 }
+2 -1
drivers/dma/dma-jz4780.c
··· 999 999 static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { 1000 1000 .nb_channels = 6, 1001 1001 .transfer_ord_max = 5, 1002 - .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, 1002 + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | 1003 + JZ_SOC_DATA_BREAK_LINKS, 1003 1004 }; 1004 1005 1005 1006 static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
+2 -1
drivers/dma/ioat/dma.c
··· 377 377 378 378 descs->virt = dma_alloc_coherent(to_dev(ioat_chan), 379 379 SZ_2M, &descs->hw, flags); 380 - if (!descs->virt && (i > 0)) { 380 + if (!descs->virt) { 381 381 int idx; 382 382 383 383 for (idx = 0; idx < i; idx++) { 384 + descs = &ioat_chan->descs[idx]; 384 385 dma_free_coherent(to_dev(ioat_chan), SZ_2M, 385 386 descs->virt, descs->hw); 386 387 descs->virt = NULL;
+9 -3
drivers/dma/k3dma.c
··· 229 229 c = p->vchan; 230 230 if (c && (tc1 & BIT(i))) { 231 231 spin_lock_irqsave(&c->vc.lock, flags); 232 - vchan_cookie_complete(&p->ds_run->vd); 233 - p->ds_done = p->ds_run; 234 - p->ds_run = NULL; 232 + if (p->ds_run != NULL) { 233 + vchan_cookie_complete(&p->ds_run->vd); 234 + p->ds_done = p->ds_run; 235 + p->ds_run = NULL; 236 + } 235 237 spin_unlock_irqrestore(&c->vc.lock, flags); 236 238 } 237 239 if (c && (tc2 & BIT(i))) { ··· 271 269 return -EAGAIN; 272 270 273 271 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) 272 + return -EAGAIN; 273 + 274 + /* Avoid losing track of ds_run if a transaction is in flight */ 275 + if (c->phy->ds_run) 274 276 return -EAGAIN; 275 277 276 278 if (vd) {
+1 -2
drivers/dma/virt-dma.c
··· 104 104 dmaengine_desc_get_callback(&vd->tx, &cb); 105 105 106 106 list_del(&vd->node); 107 - vchan_vdesc_fini(vd); 108 - 109 107 dmaengine_desc_callback_invoke(&cb, &vd->tx_result); 108 + vchan_vdesc_fini(vd); 110 109 } 111 110 } 112 111
-1
drivers/firmware/broadcom/tee_bnxt_fw.c
··· 215 215 fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ, 216 216 TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); 217 217 if (IS_ERR(fw_shm_pool)) { 218 - tee_client_close_context(pvt_data.ctx); 219 218 dev_err(pvt_data.dev, "tee_shm_alloc failed\n"); 220 219 err = PTR_ERR(fw_shm_pool); 221 220 goto out_sess;
+11 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
··· 613 613 bool d3_supported = false; 614 614 struct pci_dev *parent_pdev; 615 615 616 - while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { 616 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 617 + vga_count++; 618 + 619 + has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); 620 + 621 + parent_pdev = pci_upstream_bridge(pdev); 622 + d3_supported |= parent_pdev && parent_pdev->bridge_d3; 623 + amdgpu_atpx_get_quirks(pdev); 624 + } 625 + 626 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { 617 627 vga_count++; 618 628 619 629 has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 1488 1488 1489 1489 /* Start rlc autoload after psp recieved all the gfx firmware */ 1490 1490 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1491 - AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { 1491 + AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1492 1492 ret = psp_rlc_autoload(psp); 1493 1493 if (ret) { 1494 1494 DRM_ERROR("Failed to start rlc autoload\n");
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
··· 292 292 AMDGPU_UCODE_ID_CP_MEC2_JT, 293 293 AMDGPU_UCODE_ID_CP_MES, 294 294 AMDGPU_UCODE_ID_CP_MES_DATA, 295 - AMDGPU_UCODE_ID_RLC_G, 296 295 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL, 297 296 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM, 298 297 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM, 298 + AMDGPU_UCODE_ID_RLC_G, 299 299 AMDGPU_UCODE_ID_STORAGE, 300 300 AMDGPU_UCODE_ID_SMC, 301 301 AMDGPU_UCODE_ID_UVD,
+4 -11
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1052 1052 case CHIP_VEGA20: 1053 1053 break; 1054 1054 case CHIP_RAVEN: 1055 - /* Disable GFXOFF on original raven. There are combinations 1056 - * of sbios and platforms that are not stable. 1057 - */ 1058 - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) 1059 - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1060 - else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 1061 - &&((adev->gfx.rlc_fw_version != 106 && 1062 - adev->gfx.rlc_fw_version < 531) || 1063 - (adev->gfx.rlc_fw_version == 53815) || 1064 - (adev->gfx.rlc_feature_version < 1) || 1065 - !adev->gfx.rlc.is_rlc_v2_1)) 1055 + if (!(adev->rev_id >= 0x8 || 1056 + adev->pdev->device == 0x15d8) && 1057 + (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ 1058 + !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ 1066 1059 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1067 1060 1068 1061 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+1
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
··· 866 866 smu->smu_baco.platform_support = false; 867 867 868 868 mutex_init(&smu->sensor_lock); 869 + mutex_init(&smu->metrics_lock); 869 870 870 871 smu->watermarks_bitmap = 0; 871 872 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+3
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
··· 862 862 struct smu_table_context *smu_table= &smu->smu_table; 863 863 int ret = 0; 864 864 865 + mutex_lock(&smu->metrics_lock); 865 866 if (!smu_table->metrics_time || 866 867 time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { 867 868 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 868 869 (void *)smu_table->metrics_table, false); 869 870 if (ret) { 870 871 pr_info("Failed to export SMU metrics table!\n"); 872 + mutex_unlock(&smu->metrics_lock); 871 873 return ret; 872 874 } 873 875 smu_table->metrics_time = jiffies; 874 876 } 875 877 876 878 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 879 + mutex_unlock(&smu->metrics_lock); 877 880 878 881 return ret; 879 882 }
+1
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
··· 349 349 const struct pptable_funcs *ppt_funcs; 350 350 struct mutex mutex; 351 351 struct mutex sensor_lock; 352 + struct mutex metrics_lock; 352 353 uint64_t pool_size; 353 354 354 355 struct smu_table_context smu_table;
+3
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
··· 562 562 struct smu_table_context *smu_table= &smu->smu_table; 563 563 int ret = 0; 564 564 565 + mutex_lock(&smu->metrics_lock); 565 566 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { 566 567 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 567 568 (void *)smu_table->metrics_table, false); 568 569 if (ret) { 569 570 pr_info("Failed to export SMU metrics table!\n"); 571 + mutex_unlock(&smu->metrics_lock); 570 572 return ret; 571 573 } 572 574 smu_table->metrics_time = jiffies; 573 575 } 574 576 575 577 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 578 + mutex_unlock(&smu->metrics_lock); 576 579 577 580 return ret; 578 581 }
+3
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
··· 1678 1678 struct smu_table_context *smu_table= &smu->smu_table; 1679 1679 int ret = 0; 1680 1680 1681 + mutex_lock(&smu->metrics_lock); 1681 1682 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { 1682 1683 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 1683 1684 (void *)smu_table->metrics_table, false); 1684 1685 if (ret) { 1685 1686 pr_info("Failed to export SMU metrics table!\n"); 1687 + mutex_unlock(&smu->metrics_lock); 1686 1688 return ret; 1687 1689 } 1688 1690 smu_table->metrics_time = jiffies; 1689 1691 } 1690 1692 1691 1693 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 1694 + mutex_unlock(&smu->metrics_lock); 1692 1695 1693 1696 return ret; 1694 1697 }
+1 -1
drivers/gpu/drm/arm/malidp_mw.c
··· 56 56 return MODE_OK; 57 57 } 58 58 59 - const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { 59 + static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { 60 60 .get_modes = malidp_mw_connector_get_modes, 61 61 .mode_valid = malidp_mw_connector_mode_valid, 62 62 };
+12 -6
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
··· 215 215 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 216 216 struct mtk_ddp_comp *comp; 217 217 int i, count = 0; 218 + unsigned int local_index = plane - mtk_crtc->planes; 218 219 219 220 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 220 221 comp = mtk_crtc->ddp_comp[i]; 221 - if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { 222 - *local_layer = plane->index - count; 222 + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { 223 + *local_layer = local_index - count; 223 224 return comp; 224 225 } 225 226 count += mtk_ddp_comp_layer_nr(comp); ··· 311 310 312 311 plane_state = to_mtk_plane_state(plane->state); 313 312 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 314 - mtk_ddp_comp_layer_config(comp, local_layer, plane_state); 313 + if (comp) 314 + mtk_ddp_comp_layer_config(comp, local_layer, 315 + plane_state); 315 316 } 316 317 317 318 return 0; ··· 389 386 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, 390 387 &local_layer); 391 388 392 - mtk_ddp_comp_layer_config(comp, local_layer, 393 - plane_state); 389 + if (comp) 390 + mtk_ddp_comp_layer_config(comp, local_layer, 391 + plane_state); 394 392 plane_state->pending.config = false; 395 393 } 396 394 mtk_crtc->pending_planes = false; ··· 405 401 struct mtk_ddp_comp *comp; 406 402 407 403 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 408 - return mtk_ddp_comp_layer_check(comp, local_layer, state); 404 + if (comp) 405 + return mtk_ddp_comp_layer_check(comp, local_layer, state); 406 + return 0; 409 407 } 410 408 411 409 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+38 -29
drivers/gpu/drm/mediatek/mtk_dsi.c
··· 230 230 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) 231 231 { 232 232 u32 timcon0, timcon1, timcon2, timcon3; 233 - u32 ui, cycle_time; 233 + u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000); 234 234 struct mtk_phy_timing *timing = &dsi->phy_timing; 235 235 236 - ui = DIV_ROUND_UP(1000000000, dsi->data_rate); 237 - cycle_time = div_u64(8000000000ULL, dsi->data_rate); 236 + timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; 237 + timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; 238 + timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - 239 + timing->da_hs_prepare; 240 + timing->da_hs_trail = timing->da_hs_prepare + 1; 238 241 239 - timing->lpx = NS_TO_CYCLE(60, cycle_time); 240 - timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time); 241 - timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time); 242 - timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time); 242 + timing->ta_go = 4 * timing->lpx - 2; 243 + timing->ta_sure = timing->lpx + 2; 244 + timing->ta_get = 4 * timing->lpx; 245 + timing->da_hs_exit = 2 * timing->lpx + 1; 243 246 244 - timing->ta_go = 4 * timing->lpx; 245 - timing->ta_sure = 3 * timing->lpx / 2; 246 - timing->ta_get = 5 * timing->lpx; 247 - timing->da_hs_exit = 2 * timing->lpx; 248 - 249 - timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time); 250 - timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10; 251 - 252 - timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time); 253 - timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time); 254 - timing->clk_hs_exit = 2 * timing->lpx; 247 + timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); 248 + timing->clk_hs_post = timing->clk_hs_prepare + 8; 249 + timing->clk_hs_trail = timing->clk_hs_prepare; 250 + timing->clk_hs_zero = timing->clk_hs_trail * 4; 251 + timing->clk_hs_exit = 2 * timing->clk_hs_trail; 255 252 256 253 timcon0 = timing->lpx | timing->da_hs_prepare << 8 | 257 254 timing->da_hs_zero << 16 | timing->da_hs_trail << 24; ··· 479 482 dsi_tmp_buf_bpp - 10); 480 483 481 484 data_phy_cycles = timing->lpx + timing->da_hs_prepare + 482 - timing->da_hs_zero + timing->da_hs_exit + 2; 485 + timing->da_hs_zero + timing->da_hs_exit + 3; 483 486 484 487 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { 485 - if (vm->hfront_porch * dsi_tmp_buf_bpp > 488 + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > 486 489 data_phy_cycles * dsi->lanes + 18) { 487 - horizontal_frontporch_byte = vm->hfront_porch * 488 - dsi_tmp_buf_bpp - 489 - data_phy_cycles * 490 - dsi->lanes - 18; 490 + horizontal_frontporch_byte = 491 + vm->hfront_porch * dsi_tmp_buf_bpp - 492 + (data_phy_cycles * dsi->lanes + 18) * 493 + vm->hfront_porch / 494 + (vm->hfront_porch + vm->hback_porch); 495 + 496 + horizontal_backporch_byte = 497 + horizontal_backporch_byte - 498 + (data_phy_cycles * dsi->lanes + 18) * 499 + vm->hback_porch / 500 + (vm->hfront_porch + vm->hback_porch); 491 501 } else { 492 502 DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); 493 503 horizontal_frontporch_byte = vm->hfront_porch * 494 504 dsi_tmp_buf_bpp; 495 505 } 496 506 } else { 497 - if (vm->hfront_porch * dsi_tmp_buf_bpp > 507 + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > 498 508 data_phy_cycles * dsi->lanes + 12) { 499 - horizontal_frontporch_byte = vm->hfront_porch * 500 - dsi_tmp_buf_bpp - 501 - data_phy_cycles * 502 - dsi->lanes - 12; 509 + horizontal_frontporch_byte = 510 + vm->hfront_porch * dsi_tmp_buf_bpp - 511 + (data_phy_cycles * dsi->lanes + 12) * 512 + vm->hfront_porch / 513 + (vm->hfront_porch + vm->hback_porch); 514 + horizontal_backporch_byte = horizontal_backporch_byte - 515 + (data_phy_cycles * dsi->lanes + 12) * 516 + vm->hback_porch / 517 + (vm->hfront_porch + vm->hback_porch); 503 518 } else { 504 519 DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); 505 520 horizontal_frontporch_byte = vm->hfront_porch *
-2
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 685 685 struct sun4i_hdmi *hdmi = dev_get_drvdata(dev); 686 686 687 687 cec_unregister_adapter(hdmi->cec_adap); 688 - drm_connector_cleanup(&hdmi->connector); 689 - drm_encoder_cleanup(&hdmi->encoder); 690 688 i2c_del_adapter(hdmi->i2c); 691 689 i2c_put_adapter(hdmi->ddc_i2c); 692 690 clk_disable_unprepare(hdmi->mod_clk);
+2 -1
drivers/hid/hid-asus.c
··· 261 261 struct hid_usage *usage, __s32 value) 262 262 { 263 263 if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 && 264 - (usage->hid & HID_USAGE) != 0x00 && !usage->type) { 264 + (usage->hid & HID_USAGE) != 0x00 && 265 + (usage->hid & HID_USAGE) != 0xff && !usage->type) { 265 266 hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n", 266 267 usage->hid & HID_USAGE); 267 268 }
+6
drivers/hid/hid-core.c
··· 288 288 offset = report->size; 289 289 report->size += parser->global.report_size * parser->global.report_count; 290 290 291 + /* Total size check: Allow for possible report index byte */ 292 + if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { 293 + hid_err(parser->device, "report is too long\n"); 294 + return -1; 295 + } 296 + 291 297 if (!parser->local.usage_index) /* Ignore padding fields */ 292 298 return 0; 293 299
+3
drivers/hid/hid-ids.h
··· 631 631 #define USB_VENDOR_ID_ITE 0x048d 632 632 #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 633 633 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 634 + #define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a 634 635 #define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396 635 636 #define USB_DEVICE_ID_ITE8595 0x8595 636 637 ··· 731 730 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 732 731 #define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 733 732 #define I2C_DEVICE_ID_LG_8001 0x8001 733 + #define I2C_DEVICE_ID_LG_7010 0x7010 734 734 735 735 #define USB_VENDOR_ID_LOGITECH 0x046d 736 736 #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e ··· 1104 1102 #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 1105 1103 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 1106 1104 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 1105 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 1107 1106 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 1108 1107 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 1109 1108
+12 -4
drivers/hid/hid-input.c
··· 1132 1132 } 1133 1133 1134 1134 mapped: 1135 - if (device->driver->input_mapped && device->driver->input_mapped(device, 1136 - hidinput, field, usage, &bit, &max) < 0) 1137 - goto ignore; 1135 + if (device->driver->input_mapped && 1136 + device->driver->input_mapped(device, hidinput, field, usage, 1137 + &bit, &max) < 0) { 1138 + /* 1139 + * The driver indicated that no further generic handling 1140 + * of the usage is desired. 1141 + */ 1142 + return; 1143 + } 1138 1144 1139 1145 set_bit(usage->type, input->evbit); 1140 1146 ··· 1221 1215 set_bit(MSC_SCAN, input->mscbit); 1222 1216 } 1223 1217 1224 - ignore: 1225 1218 return; 1226 1219 1220 + ignore: 1221 + usage->type = 0; 1222 + usage->code = 0; 1227 1223 } 1228 1224 1229 1225 static void hidinput_handle_scroll(struct hid_usage *usage,
+3
drivers/hid/hid-ite.c
··· 40 40 static const struct hid_device_id ite_devices[] = { 41 41 { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, 42 42 { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, 43 + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ 44 + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, 45 + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, 43 46 { } 44 47 }; 45 48 MODULE_DEVICE_TABLE(hid, ite_devices);
+4 -1
drivers/hid/hid-multitouch.c
··· 1019 1019 tool = MT_TOOL_DIAL; 1020 1020 else if (unlikely(!confidence_state)) { 1021 1021 tool = MT_TOOL_PALM; 1022 - if (!active && 1022 + if (!active && mt && 1023 1023 input_mt_is_active(&mt->slots[slotnum])) { 1024 1024 /* 1025 1025 * The non-confidence was reported for ··· 1985 1985 { .driver_data = MT_CLS_LG, 1986 1986 HID_USB_DEVICE(USB_VENDOR_ID_LG, 1987 1987 USB_DEVICE_ID_LG_MELFAS_MT) }, 1988 + { .driver_data = MT_CLS_LG, 1989 + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, 1990 + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, 1988 1991 1989 1992 /* MosArt panels */ 1990 1993 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+1
drivers/hid/hid-quirks.c
··· 174 174 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, 175 175 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 176 176 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 177 + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, 177 178 178 179 { 0 } 179 180 };
+4
drivers/hid/hid-steam.c
··· 768 768 769 769 if (steam->quirks & STEAM_QUIRK_WIRELESS) { 770 770 hid_info(hdev, "Steam wireless receiver connected"); 771 + /* If using a wireless adaptor ask for connection status */ 772 + steam->connected = false; 771 773 steam_request_conn_status(steam); 772 774 } else { 775 + /* A wired connection is always present */ 776 + steam->connected = true; 773 777 ret = steam_register(steam); 774 778 if (ret) { 775 779 hid_err(hdev,
+2 -2
drivers/hid/hidraw.c
··· 252 252 253 253 poll_wait(file, &list->hidraw->wait, wait); 254 254 if (list->head != list->tail) 255 - return EPOLLIN | EPOLLRDNORM | EPOLLOUT; 255 + return EPOLLIN | EPOLLRDNORM; 256 256 if (!list->hidraw->exist) 257 257 return EPOLLERR | EPOLLHUP; 258 - return 0; 258 + return EPOLLOUT | EPOLLWRNORM; 259 259 } 260 260 261 261 static int hidraw_open(struct inode *inode, struct file *file)
+13 -3
drivers/hid/i2c-hid/i2c-hid-core.c
··· 49 49 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 50 50 #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) 51 51 #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) 52 + #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) 53 + 52 54 53 55 /* flags */ 54 56 #define I2C_HID_STARTED 0 ··· 177 175 I2C_HID_QUIRK_BOGUS_IRQ }, 178 176 { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, 179 177 I2C_HID_QUIRK_RESET_ON_RESUME }, 178 + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, 179 + I2C_HID_QUIRK_BAD_INPUT_SIZE }, 180 180 { 0, 0 } 181 181 }; 182 182 ··· 500 496 } 501 497 502 498 if ((ret_size > size) || (ret_size < 2)) { 503 - dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", 504 - __func__, size, ret_size); 505 - return; 499 + if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) { 500 + ihid->inbuf[0] = size & 0xff; 501 + ihid->inbuf[1] = size >> 8; 502 + ret_size = size; 503 + } else { 504 + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", 505 + __func__, size, ret_size); 506 + return; 507 + } 506 508 } 507 509 508 510 i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
+2
drivers/hid/intel-ish-hid/ipc/hw-ish.h
··· 24 24 #define ICL_MOBILE_DEVICE_ID 0x34FC 25 25 #define SPT_H_DEVICE_ID 0xA135 26 26 #define CML_LP_DEVICE_ID 0x02FC 27 + #define CMP_H_DEVICE_ID 0x06FC 27 28 #define EHL_Ax_DEVICE_ID 0x4BB3 29 + #define TGL_LP_DEVICE_ID 0xA0FC 28 30 29 31 #define REVISION_ID_CHT_A0 0x6 30 32 #define REVISION_ID_CHT_Ax_SI 0x0
+2
drivers/hid/intel-ish-hid/ipc/pci-ish.c
··· 34 34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, 35 35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 36 36 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, 37 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)}, 37 38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, 39 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)}, 38 40 {0, } 39 41 }; 40 42 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+1 -1
drivers/hid/uhid.c
··· 772 772 if (uhid->head != uhid->tail) 773 773 return EPOLLIN | EPOLLRDNORM; 774 774 775 - return 0; 775 + return EPOLLOUT | EPOLLWRNORM; 776 776 } 777 777 778 778 static const struct file_operations uhid_fops = {
+42 -55
drivers/hid/usbhid/hiddev.c
··· 241 241 return 0; 242 242 } 243 243 244 + static int __hiddev_open(struct hiddev *hiddev, struct file *file) 245 + { 246 + struct hiddev_list *list; 247 + int error; 248 + 249 + lockdep_assert_held(&hiddev->existancelock); 250 + 251 + list = vzalloc(sizeof(*list)); 252 + if (!list) 253 + return -ENOMEM; 254 + 255 + mutex_init(&list->thread_lock); 256 + list->hiddev = hiddev; 257 + 258 + if (!hiddev->open++) { 259 + error = hid_hw_power(hiddev->hid, PM_HINT_FULLON); 260 + if (error < 0) 261 + goto err_drop_count; 262 + 263 + error = hid_hw_open(hiddev->hid); 264 + if (error < 0) 265 + goto err_normal_power; 266 + } 267 + 268 + spin_lock_irq(&hiddev->list_lock); 269 + list_add_tail(&list->node, &hiddev->list); 270 + spin_unlock_irq(&hiddev->list_lock); 271 + 272 + file->private_data = list; 273 + 274 + return 0; 275 + 276 + err_normal_power: 277 + hid_hw_power(hiddev->hid, PM_HINT_NORMAL); 278 + err_drop_count: 279 + hiddev->open--; 280 + vfree(list); 281 + return error; 282 + } 283 + 244 284 /* 245 285 * open file op 246 286 */ 247 287 static int hiddev_open(struct inode *inode, struct file *file) 248 288 { 249 - struct hiddev_list *list; 250 289 struct usb_interface *intf; 251 290 struct hid_device *hid; 252 291 struct hiddev *hiddev; ··· 294 255 intf = usbhid_find_interface(iminor(inode)); 295 256 if (!intf) 296 257 return -ENODEV; 258 + 297 259 hid = usb_get_intfdata(intf); 298 260 hiddev = hid->hiddev; 299 261 300 - if (!(list = vzalloc(sizeof(struct hiddev_list)))) 301 - return -ENOMEM; 302 - mutex_init(&list->thread_lock); 303 - list->hiddev = hiddev; 304 - file->private_data = list; 305 - 306 - /* 307 - * no need for locking because the USB major number 308 - * is shared which usbcore guards against disconnect 309 - */ 310 - if (list->hiddev->exist) { 311 - if (!list->hiddev->open++) { 312 - res = hid_hw_open(hiddev->hid); 313 - if (res < 0) 314 - goto bail; 315 - } 316 - } else { 317 - res = -ENODEV; 318 - goto bail; 319 - } 320 - 321 - spin_lock_irq(&list->hiddev->list_lock); 322 - list_add_tail(&list->node, &hiddev->list); 323 - spin_unlock_irq(&list->hiddev->list_lock); 324 - 325 262 mutex_lock(&hiddev->existancelock); 326 - /* 327 - * recheck exist with existance lock held to 328 - * avoid opening a disconnected device 329 - */ 330 - if (!list->hiddev->exist) { 331 - res = -ENODEV; 332 - goto bail_unlock; 333 - } 334 - if (!list->hiddev->open++) 335 - if (list->hiddev->exist) { 336 - struct hid_device *hid = hiddev->hid; 337 - res = hid_hw_power(hid, PM_HINT_FULLON); 338 - if (res < 0) 339 - goto bail_unlock; 340 - res = hid_hw_open(hid); 341 - if (res < 0) 342 - goto bail_normal_power; 343 - } 344 - mutex_unlock(&hiddev->existancelock); 345 - return 0; 346 - bail_normal_power: 347 - hid_hw_power(hid, PM_HINT_NORMAL); 348 - bail_unlock: 263 + res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV; 349 264 mutex_unlock(&hiddev->existancelock); 350 265 351 - spin_lock_irq(&list->hiddev->list_lock); 352 - list_del(&list->node); 353 - spin_unlock_irq(&list->hiddev->list_lock); 354 - bail: 355 - file->private_data = NULL; 356 - vfree(list); 357 266 return res; 358 267 } 359 268
+4 -2
drivers/hid/wacom_wac.c
··· 2096 2096 (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ 2097 2097 hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */ 2098 2098 hdev->product == 0x392 || /* Intuos Pro 2 */ 2099 - hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */ 2099 + hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */ 2100 + hdev->product == 0x3AA)) { /* MobileStudio Pro */ 2100 2101 value = (field->logical_maximum - value); 2101 2102 2102 2103 if (hdev->product == 0x357 || hdev->product == 0x358 || 2103 2104 hdev->product == 0x392) 2104 2105 value = wacom_offset_rotation(input, usage, value, 3, 16); 2105 2106 else if (hdev->product == 0x34d || hdev->product == 0x34e || 2106 - hdev->product == 0x398 || hdev->product == 0x399) 2107 + hdev->product == 0x398 || hdev->product == 0x399 || 2108 + hdev->product == 0x3AA) 2107 2109 value = wacom_offset_rotation(input, usage, value, 1, 2); 2108 2110 } 2109 2111 else {
+1 -1
drivers/irqchip/irq-sifive-plic.c
··· 256 256 * Skip contexts other than external interrupts for our 257 257 * privilege level. 258 258 */ 259 - if (parent.args[0] != IRQ_EXT) 259 + if (parent.args[0] != RV_IRQ_EXT) 260 260 continue; 261 261 262 262 hartid = plic_find_hart_id(parent.np);
+27 -13
drivers/media/cec/cec-adap.c
··· 380 380 } else { 381 381 list_del_init(&data->list); 382 382 if (!(data->msg.tx_status & CEC_TX_STATUS_OK)) 383 - data->adap->transmit_queue_sz--; 383 + if (!WARN_ON(!data->adap->transmit_queue_sz)) 384 + data->adap->transmit_queue_sz--; 384 385 } 385 386 386 387 if (data->msg.tx_status & CEC_TX_STATUS_OK) { ··· 433 432 * need to do anything special in that case. 434 433 */ 435 434 } 435 + /* 436 + * If something went wrong and this counter isn't what it should 437 + * be, then this will reset it back to 0. Warn if it is not 0, 438 + * since it indicates a bug, either in this framework or in a 439 + * CEC driver. 440 + */ 441 + if (WARN_ON(adap->transmit_queue_sz)) 442 + adap->transmit_queue_sz = 0; 436 443 } 437 444 438 445 /* ··· 465 456 bool timeout = false; 466 457 u8 attempts; 467 458 468 - if (adap->transmitting) { 459 + if (adap->transmit_in_progress) { 469 460 int err; 470 461 471 462 /* ··· 500 491 goto unlock; 501 492 } 502 493 503 - if (adap->transmitting && timeout) { 494 + if (adap->transmit_in_progress && timeout) { 504 495 /* 505 496 * If we timeout, then log that. Normally this does 506 497 * not happen and it is an indication of a faulty CEC ··· 509 500 * so much traffic on the bus that the adapter was 510 501 * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). 511 502 */ 512 - pr_warn("cec-%s: message %*ph timed out\n", adap->name, 513 - adap->transmitting->msg.len, 514 - adap->transmitting->msg.msg); 503 + if (adap->transmitting) { 504 + pr_warn("cec-%s: message %*ph timed out\n", adap->name, 505 + adap->transmitting->msg.len, 506 + adap->transmitting->msg.msg); 507 + /* Just give up on this. */ 508 + cec_data_cancel(adap->transmitting, 509 + CEC_TX_STATUS_TIMEOUT); 510 + } else { 511 + pr_warn("cec-%s: transmit timed out\n", adap->name); 512 + } 515 513 adap->transmit_in_progress = false; 516 514 adap->tx_timeouts++; 517 - /* Just give up on this. */ 518 - cec_data_cancel(adap->transmitting, 519 - CEC_TX_STATUS_TIMEOUT); 520 515 goto unlock; 521 516 } 522 517 ··· 535 522 data = list_first_entry(&adap->transmit_queue, 536 523 struct cec_data, list); 537 524 list_del_init(&data->list); 538 - adap->transmit_queue_sz--; 525 + if (!WARN_ON(!data->adap->transmit_queue_sz)) 526 + adap->transmit_queue_sz--; 539 527 540 528 /* Make this the current transmitting message */ 541 529 adap->transmitting = data; ··· 1099 1085 valid_la = false; 1100 1086 else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED)) 1101 1087 valid_la = false; 1102 - else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4)) 1088 + else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST)) 1103 1089 valid_la = false; 1104 1090 else if (cec_msg_is_broadcast(msg) && 1105 - adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 && 1106 - !(dir_fl & BCAST2_0)) 1091 + adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 && 1092 + !(dir_fl & BCAST1_4)) 1107 1093 valid_la = false; 1108 1094 } 1109 1095 if (valid_la && min_len) {
+13 -4
drivers/media/usb/pulse8-cec/pulse8-cec.c
··· 116 116 unsigned int vers; 117 117 struct completion cmd_done; 118 118 struct work_struct work; 119 + u8 work_result; 119 120 struct delayed_work ping_eeprom_work; 120 121 struct cec_msg rx_msg; 121 122 u8 data[DATA_SIZE]; ··· 138 137 { 139 138 struct pulse8 *pulse8 = 140 139 container_of(work, struct pulse8, work); 140 + u8 result = pulse8->work_result; 141 141 142 - switch (pulse8->data[0] & 0x3f) { 142 + pulse8->work_result = 0; 143 + switch (result & 0x3f) { 143 144 case MSGCODE_FRAME_DATA: 144 145 cec_received_msg(pulse8->adap, &pulse8->rx_msg); 145 146 break; ··· 175 172 pulse8->escape = false; 176 173 } else if (data == MSGEND) { 177 174 struct cec_msg *msg = &pulse8->rx_msg; 175 + u8 msgcode = pulse8->buf[0]; 178 176 179 177 if (debug) 180 178 dev_info(pulse8->dev, "received: %*ph\n", 181 179 pulse8->idx, pulse8->buf); 182 - pulse8->data[0] = pulse8->buf[0]; 183 - switch (pulse8->buf[0] & 0x3f) { 180 + switch (msgcode & 0x3f) { 184 181 case MSGCODE_FRAME_START: 185 182 msg->len = 1; 186 183 msg->msg[0] = pulse8->buf[1]; ··· 189 186 if (msg->len == CEC_MAX_MSG_SIZE) 190 187 break; 191 188 msg->msg[msg->len++] = pulse8->buf[1]; 192 - if (pulse8->buf[0] & MSGCODE_FRAME_EOM) 189 + if (msgcode & MSGCODE_FRAME_EOM) { 190 + WARN_ON(pulse8->work_result); 191 + pulse8->work_result = msgcode; 193 192 schedule_work(&pulse8->work); 193 + break; 194 + } 194 195 break; 195 196 case MSGCODE_TRANSMIT_SUCCEEDED: 196 197 case MSGCODE_TRANSMIT_FAILED_LINE: 197 198 case MSGCODE_TRANSMIT_FAILED_ACK: 198 199 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: 199 200 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: 201 + WARN_ON(pulse8->work_result); 202 + pulse8->work_result = msgcode; 200 203 schedule_work(&pulse8->work); 201 204 break; 202 205 case MSGCODE_HIGH_ERROR:
+53 -10
drivers/net/can/m_can/tcan4x5x.c
··· 102 102 #define TCAN4X5X_MODE_NORMAL BIT(7) 103 103 104 104 #define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) 105 + #define TCAN4X5X_DISABLE_INH_MSK BIT(9) 105 106 106 107 #define TCAN4X5X_SW_RESET BIT(2) 107 108 ··· 165 164 usleep_range(5, 50); 166 165 gpiod_set_value(priv->device_wake_gpio, 1); 167 166 } 167 + } 168 + 169 + static int tcan4x5x_reset(struct tcan4x5x_priv *priv) 170 + { 171 + int ret = 0; 172 + 173 + if (priv->reset_gpio) { 174 + gpiod_set_value(priv->reset_gpio, 1); 175 + 176 + /* tpulse_width minimum 30us */ 177 + usleep_range(30, 100); 178 + gpiod_set_value(priv->reset_gpio, 0); 179 + } else { 180 + ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, 181 + TCAN4X5X_SW_RESET); 182 + if (ret) 183 + return ret; 184 + } 185 + 186 + usleep_range(700, 1000); 187 + 188 + return ret; 168 189 } 169 190 170 191 static int regmap_spi_gather_write(void *context, const void *reg, ··· 371 348 TCAN4X5X_DISABLE_WAKE_MSK, 0x00); 372 349 } 373 350 351 + static int tcan4x5x_disable_state(struct m_can_classdev *cdev) 352 + { 353 + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; 354 + 355 + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, 356 + TCAN4X5X_DISABLE_INH_MSK, 0x01); 357 + } 358 + 374 359 static int tcan4x5x_parse_config(struct m_can_classdev *cdev) 375 360 { 376 361 struct tcan4x5x_priv *tcan4x5x = cdev->device_data; 362 + int ret; 377 363 378 364 tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", 379 365 GPIOD_OUT_HIGH); 380 366 if (IS_ERR(tcan4x5x->device_wake_gpio)) { 381 - if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) 367 + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) 382 368 return -EPROBE_DEFER; 383 369 384 370 tcan4x5x_disable_wake(cdev); ··· 398 366 if (IS_ERR(tcan4x5x->reset_gpio)) 399 367 tcan4x5x->reset_gpio = NULL; 400 368 401 - usleep_range(700, 1000); 369 + ret = tcan4x5x_reset(tcan4x5x); 370 + if (ret) 371 + return ret; 402 372 403 373 tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, 404 374 "device-state", 405 375 GPIOD_IN); 406 - if (IS_ERR(tcan4x5x->device_state_gpio)) 376 + if (IS_ERR(tcan4x5x->device_state_gpio)) { 407 377 tcan4x5x->device_state_gpio = NULL; 408 - 409 - tcan4x5x->power = devm_regulator_get_optional(cdev->dev, 410 - "vsup"); 411 - if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) 412 - return -EPROBE_DEFER; 378 + tcan4x5x_disable_state(cdev); 379 + } 413 380 414 381 return 0; 415 382 } ··· 442 411 priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); 443 412 if (!priv) 444 413 return -ENOMEM; 414 + 415 + priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); 416 + if (PTR_ERR(priv->power) == -EPROBE_DEFER) 417 + return -EPROBE_DEFER; 418 + else 419 + priv->power = NULL; 445 420 446 421 mcan_class->device_data = priv; 447 422 ··· 488 451 priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, 489 452 &spi->dev, &tcan4x5x_regmap); 490 453 491 - ret = tcan4x5x_parse_config(mcan_class); 454 + ret = tcan4x5x_power_enable(priv->power, 1); 492 455 if (ret) 493 456 goto out_clk; 494 457 495 - tcan4x5x_power_enable(priv->power, 1); 458 + ret = tcan4x5x_parse_config(mcan_class); 459 + if (ret) 460 + goto out_power; 461 + 462 + ret = tcan4x5x_init(mcan_class); 463 + if (ret) 464 + goto out_power; 496 465 497 466 ret = m_can_class_register(mcan_class); 498 467 if (ret)
+10 -11
drivers/net/can/mscan/mscan.c
··· 381 381 struct net_device *dev = napi->dev; 382 382 struct mscan_regs __iomem *regs = priv->reg_base; 383 383 struct net_device_stats *stats = &dev->stats; 384 - int npackets = 0; 385 - int ret = 1; 384 + int work_done = 0; 386 385 struct sk_buff *skb; 387 386 struct can_frame *frame; 388 387 u8 canrflg; 389 388 390 - while (npackets < quota) { 389 + while (work_done < quota) { 391 390 canrflg = in_8(&regs->canrflg); 392 391 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) 393 392 break; ··· 407 408 408 409 stats->rx_packets++; 409 410 stats->rx_bytes += frame->can_dlc; 410 - npackets++; 411 + work_done++; 411 412 netif_receive_skb(skb); 412 413 } 413 414 414 - if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { 415 - napi_complete(&priv->napi); 416 - clear_bit(F_RX_PROGRESS, &priv->flags); 417 - if (priv->can.state < CAN_STATE_BUS_OFF) 418 - out_8(&regs->canrier, priv->shadow_canrier); 419 - ret = 0; 415 + if (work_done < quota) { 416 + if (likely(napi_complete_done(&priv->napi, work_done))) { 417 + clear_bit(F_RX_PROGRESS, &priv->flags); 418 + if (priv->can.state < CAN_STATE_BUS_OFF) 419 + out_8(&regs->canrier, priv->shadow_canrier); 420 + } 420 421 } 421 - return ret; 422 + return work_done; 422 423 } 423 424 424 425 static irqreturn_t mscan_isr(int irq, void *dev_id)
+2 -2
drivers/net/can/usb/gs_usb.c
··· 918 918 GS_USB_BREQ_HOST_FORMAT, 919 919 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 920 920 1, 921 - intf->altsetting[0].desc.bInterfaceNumber, 921 + intf->cur_altsetting->desc.bInterfaceNumber, 922 922 hconf, 923 923 sizeof(*hconf), 924 924 1000); ··· 941 941 GS_USB_BREQ_DEVICE_CONFIG, 942 942 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 943 943 1, 944 - intf->altsetting[0].desc.bInterfaceNumber, 944 + intf->cur_altsetting->desc.bInterfaceNumber, 945 945 dconf, 946 946 sizeof(*dconf), 947 947 1000);
+1 -1
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
··· 1590 1590 struct usb_endpoint_descriptor *ep; 1591 1591 int i; 1592 1592 1593 - iface_desc = &dev->intf->altsetting[0]; 1593 + iface_desc = dev->intf->cur_altsetting; 1594 1594 1595 1595 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 1596 1596 ep = &iface_desc->endpoint[i].desc;
+1 -1
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
··· 1310 1310 struct usb_endpoint_descriptor *endpoint; 1311 1311 int i; 1312 1312 1313 - iface_desc = &dev->intf->altsetting[0]; 1313 + iface_desc = dev->intf->cur_altsetting; 1314 1314 1315 1315 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 1316 1316 endpoint = &iface_desc->endpoint[i].desc;
+5
drivers/net/dsa/mv88e6xxx/global1.c
··· 360 360 { 361 361 u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST; 362 362 363 + /* Use the default high priority for management frames sent to 364 + * the CPU. 365 + */ 366 + port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI; 367 + 363 368 return mv88e6390_g1_monitor_write(chip, ptr, port); 364 369 } 365 370
+1
drivers/net/dsa/mv88e6xxx/global1.h
··· 211 211 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000 212 212 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100 213 213 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000 214 + #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0 214 215 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff 215 216 216 217 /* Offset 0x1C: Global Control 2 */
+6 -6
drivers/net/dsa/mv88e6xxx/port.c
··· 393 393 } 394 394 395 395 static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, 396 - phy_interface_t mode) 396 + phy_interface_t mode, bool force) 397 397 { 398 398 u8 lane; 399 399 u16 cmode; ··· 427 427 cmode = 0; 428 428 } 429 429 430 - /* cmode doesn't change, nothing to do for us */ 431 - if (cmode == chip->ports[port].cmode) 430 + /* cmode doesn't change, nothing to do for us unless forced */ 431 + if (cmode == chip->ports[port].cmode && !force) 432 432 return 0; 433 433 434 434 lane = mv88e6xxx_serdes_get_lane(chip, port); ··· 484 484 if (port != 9 && port != 10) 485 485 return -EOPNOTSUPP; 486 486 487 - return mv88e6xxx_port_set_cmode(chip, port, mode); 487 + return mv88e6xxx_port_set_cmode(chip, port, mode, false); 488 488 } 489 489 490 490 int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, ··· 504 504 break; 505 505 } 506 506 507 - return mv88e6xxx_port_set_cmode(chip, port, mode); 507 + return mv88e6xxx_port_set_cmode(chip, port, mode, false); 508 508 } 509 509 510 510 static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, ··· 555 555 if (err) 556 556 return err; 557 557 558 - return mv88e6xxx_port_set_cmode(chip, port, mode); 558 + return mv88e6xxx_port_set_cmode(chip, port, mode, true); 559 559 } 560 560 561 561 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+2 -2
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
··· 403 403 if (err < 0) 404 404 goto err_exit; 405 405 406 + aq_nic_set_loopback(self); 407 + 406 408 err = self->aq_hw_ops->hw_start(self->aq_hw); 407 409 if (err < 0) 408 410 goto err_exit; ··· 414 412 goto err_exit; 415 413 416 414 INIT_WORK(&self->service_task, aq_nic_service_task); 417 - 418 - aq_nic_set_loopback(self); 419 415 420 416 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); 421 417 aq_nic_service_timer_cb(&self->service_timer);
-3
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
··· 1525 1525 .rx_extract_ts = hw_atl_b0_rx_extract_ts, 1526 1526 .extract_hwts = hw_atl_b0_extract_hwts, 1527 1527 .hw_set_offload = hw_atl_b0_hw_offload_set, 1528 - .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 1529 - .hw_get_fw_version = hw_atl_utils_get_fw_version, 1530 - .hw_set_offload = hw_atl_b0_hw_offload_set, 1531 1528 .hw_set_loopback = hw_atl_b0_set_loopback, 1532 1529 .hw_set_fc = hw_atl_b0_set_fc, 1533 1530 };
+1 -3
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
··· 667 667 u32 speed; 668 668 669 669 mpi_state = hw_atl_utils_mpi_get_state(self); 670 - speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | 671 - FW2X_RATE_2G5 | FW2X_RATE_5G | 672 - FW2X_RATE_10G); 670 + speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT; 673 671 674 672 if (!speed) { 675 673 link_status->mbps = 0U;
+6 -3
drivers/net/ethernet/broadcom/b44.c
··· 1516 1516 int ethaddr_bytes = ETH_ALEN; 1517 1517 1518 1518 memset(ppattern + offset, 0xff, magicsync); 1519 - for (j = 0; j < magicsync; j++) 1520 - set_bit(len++, (unsigned long *) pmask); 1519 + for (j = 0; j < magicsync; j++) { 1520 + pmask[len >> 3] |= BIT(len & 7); 1521 + len++; 1522 + } 1521 1523 1522 1524 for (j = 0; j < B44_MAX_PATTERNS; j++) { 1523 1525 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) ··· 1531 1529 for (k = 0; k< ethaddr_bytes; k++) { 1532 1530 ppattern[offset + magicsync + 1533 1531 (j * ETH_ALEN) + k] = macaddr[k]; 1534 - set_bit(len++, (unsigned long *) pmask); 1532 + pmask[len >> 3] |= BIT(len & 7); 1533 + len++; 1535 1534 } 1536 1535 } 1537 1536 return len - 1;
+1 -3
drivers/net/ethernet/cadence/macb_main.c
··· 4127 4127 mgmt->rate = 0; 4128 4128 mgmt->hw.init = &init; 4129 4129 4130 - *tx_clk = clk_register(NULL, &mgmt->hw); 4130 + *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); 4131 4131 if (IS_ERR(*tx_clk)) 4132 4132 return PTR_ERR(*tx_clk); 4133 4133 ··· 4455 4455 4456 4456 err_disable_clocks: 4457 4457 clk_disable_unprepare(tx_clk); 4458 - clk_unregister(tx_clk); 4459 4458 clk_disable_unprepare(hclk); 4460 4459 clk_disable_unprepare(pclk); 4461 4460 clk_disable_unprepare(rx_clk); ··· 4484 4485 pm_runtime_dont_use_autosuspend(&pdev->dev); 4485 4486 if (!pm_runtime_suspended(&pdev->dev)) { 4486 4487 clk_disable_unprepare(bp->tx_clk); 4487 - clk_unregister(bp->tx_clk); 4488 4488 clk_disable_unprepare(bp->hclk); 4489 4489 clk_disable_unprepare(bp->pclk); 4490 4490 clk_disable_unprepare(bp->rx_clk);
+9
drivers/net/ethernet/freescale/fec_main.c
··· 2199 2199 { 2200 2200 struct fec_enet_private *fep = netdev_priv(ndev); 2201 2201 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2202 + struct device *dev = &fep->pdev->dev; 2202 2203 u32 *buf = (u32 *)regbuf; 2203 2204 u32 i, off; 2205 + int ret; 2206 + 2207 + ret = pm_runtime_get_sync(dev); 2208 + if (ret < 0) 2209 + return; 2204 2210 2205 2211 regs->version = fec_enet_register_version; 2206 2212 ··· 2222 2216 off >>= 2; 2223 2217 buf[off] = readl(&theregs[off]); 2224 2218 } 2219 + 2220 + pm_runtime_mark_last_busy(dev); 2221 + pm_runtime_put_autosuspend(dev); 2225 2222 } 2226 2223 2227 2224 static int fec_enet_get_ts_info(struct net_device *ndev,
-2
drivers/net/ethernet/google/gve/gve_rx.c
··· 418 418 rx->cnt = cnt; 419 419 rx->fill_cnt += work_done; 420 420 421 - /* restock desc ring slots */ 422 - dma_wmb(); /* Ensure descs are visible before ringing doorbell */ 423 421 gve_rx_write_doorbell(priv, rx); 424 422 return gve_rx_work_pending(rx); 425 423 }
-6
drivers/net/ethernet/google/gve/gve_tx.c
··· 487 487 * may have added descriptors without ringing the doorbell. 488 488 */ 489 489 490 - /* Ensure tx descs from a prior gve_tx are visible before 491 - * ringing doorbell. 492 - */ 493 - dma_wmb(); 494 490 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 495 491 return NETDEV_TX_BUSY; 496 492 } ··· 501 505 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) 502 506 return NETDEV_TX_OK; 503 507 504 - /* Ensure tx descs are visible before ringing doorbell */ 505 - dma_wmb(); 506 508 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 507 509 return NETDEV_TX_OK; 508 510 }
+16
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
··· 122 122 #endif 123 123 }; 124 124 125 + #define MLX5E_TTC_NUM_GROUPS 3 126 + #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) 127 + #define MLX5E_TTC_GROUP2_SIZE BIT(1) 128 + #define MLX5E_TTC_GROUP3_SIZE BIT(0) 129 + #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ 130 + MLX5E_TTC_GROUP2_SIZE +\ 131 + MLX5E_TTC_GROUP3_SIZE) 132 + 133 + #define MLX5E_INNER_TTC_NUM_GROUPS 3 134 + #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) 135 + #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) 136 + #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) 137 + #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ 138 + MLX5E_INNER_TTC_GROUP2_SIZE +\ 139 + MLX5E_INNER_TTC_GROUP3_SIZE) 140 + 125 141 #ifdef CONFIG_MLX5_EN_RXNFC 126 142 127 143 struct mlx5e_ethtool_table {
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
··· 197 197 struct devlink_health_reporter *reporter, char *err_str, 198 198 struct mlx5e_err_ctx *err_ctx) 199 199 { 200 - if (!reporter) { 201 - netdev_err(priv->netdev, err_str); 200 + netdev_err(priv->netdev, err_str); 201 + 202 + if (!reporter) 202 203 return err_ctx->recover(&err_ctx->ctx); 203 - } 204 + 204 205 return devlink_health_report(reporter, err_str, err_ctx); 205 206 }
-16
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
··· 904 904 return err; 905 905 } 906 906 907 - #define MLX5E_TTC_NUM_GROUPS 3 908 - #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) 909 - #define MLX5E_TTC_GROUP2_SIZE BIT(1) 910 - #define MLX5E_TTC_GROUP3_SIZE BIT(0) 911 - #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ 912 - MLX5E_TTC_GROUP2_SIZE +\ 913 - MLX5E_TTC_GROUP3_SIZE) 914 - 915 - #define MLX5E_INNER_TTC_NUM_GROUPS 3 916 - #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) 917 - #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) 918 - #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) 919 - #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ 920 - MLX5E_INNER_TTC_GROUP2_SIZE +\ 921 - MLX5E_INNER_TTC_GROUP3_SIZE) 922 - 923 907 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, 924 908 bool use_ipv) 925 909 {
+58 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 592 592 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 593 593 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; 594 594 595 - ft_attr->max_fte = MLX5E_NUM_TT; 595 + ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; 596 596 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; 597 597 ft_attr->prio = MLX5E_TC_PRIO; 598 598 } ··· 3003 3003 return kmemdup(tun_info, tun_size, GFP_KERNEL); 3004 3004 } 3005 3005 3006 + static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, 3007 + struct mlx5e_tc_flow *flow, 3008 + int out_index, 3009 + struct mlx5e_encap_entry *e, 3010 + struct netlink_ext_ack *extack) 3011 + { 3012 + int i; 3013 + 3014 + for (i = 0; i < out_index; i++) { 3015 + if (flow->encaps[i].e != e) 3016 + continue; 3017 + NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action"); 3018 + netdev_err(priv->netdev, "can't duplicate encap action\n"); 3019 + return true; 3020 + } 3021 + 3022 + return false; 3023 + } 3024 + 3006 3025 static int mlx5e_attach_encap(struct mlx5e_priv *priv, 3007 3026 struct mlx5e_tc_flow *flow, 3008 3027 struct net_device *mirred_dev, ··· 3057 3038 3058 3039 /* must verify if encap is valid or not */ 3059 3040 if (e) { 3041 + /* Check that entry was not already attached to this flow */ 3042 + if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { 3043 + err = -EOPNOTSUPP; 3044 + goto out_err; 3045 + } 3046 + 3060 3047 mutex_unlock(&esw->offloads.encap_tbl_lock); 3061 3048 wait_for_completion(&e->res_ready); 3062 3049 ··· 3249 3224 same_hw_devs(priv, netdev_priv(out_dev)); 3250 3225 } 3251 3226 3227 + static bool is_duplicated_output_device(struct net_device *dev, 3228 + struct net_device *out_dev, 3229 + int *ifindexes, int if_count, 3230 + struct netlink_ext_ack *extack) 3231 + { 3232 + int i; 3233 + 3234 + for (i = 0; i < if_count; i++) { 3235 + if (ifindexes[i] == out_dev->ifindex) { 3236 + NL_SET_ERR_MSG_MOD(extack, 3237 + "can't duplicate output to same device"); 3238 + netdev_err(dev, "can't duplicate output to same device: %s\n", 3239 + out_dev->name); 3240 + return true; 3241 + } 3242 + } 3243 + 3244 + return false; 3245 + } 3246 + 3252 3247 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, 3253 3248 struct flow_action *flow_action, 3254 3249 struct mlx5e_tc_flow *flow, ··· 3280 3235 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; 3281 3236 struct mlx5e_rep_priv *rpriv = priv->ppriv; 3282 3237 const struct ip_tunnel_info *info = NULL; 3238 + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; 3283 3239 bool ft_flow = mlx5e_is_ft_flow(flow); 3284 3240 const struct flow_action_entry *act; 3241 + int err, i, if_count = 0; 3285 3242 bool encap = false; 3286 3243 u32 action = 0; 3287 - int err, i; 3288 3244 3289 3245 if (!flow_action_has_entries(flow_action)) 3290 3246 return -EINVAL; ··· 3361 3315 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3362 3316 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); 3363 3317 struct net_device *uplink_upper; 3318 + 3319 + if (is_duplicated_output_device(priv->netdev, 3320 + out_dev, 3321 + ifindexes, 3322 + if_count, 3323 + extack)) 3324 + return -EOPNOTSUPP; 3325 + 3326 + ifindexes[if_count] = out_dev->ifindex; 3327 + if_count++; 3364 3328 3365 3329 rcu_read_lock(); 3366 3330 uplink_upper =
+27 -67
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 531 531 } 532 532 } 533 533 534 - static void del_sw_fte_rcu(struct rcu_head *head) 535 - { 536 - struct fs_fte *fte = container_of(head, struct fs_fte, rcu); 537 - struct mlx5_flow_steering *steering = get_steering(&fte->node); 538 - 539 - kmem_cache_free(steering->ftes_cache, fte); 540 - } 541 - 542 534 static void del_sw_fte(struct fs_node *node) 543 535 { 536 + struct mlx5_flow_steering *steering = get_steering(node); 544 537 struct mlx5_flow_group *fg; 545 538 struct fs_fte *fte; 546 539 int err; ··· 546 553 rhash_fte); 547 554 WARN_ON(err); 548 555 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); 549 - 550 - call_rcu(&fte->rcu, del_sw_fte_rcu); 556 + kmem_cache_free(steering->ftes_cache, fte); 551 557 } 552 558 553 559 static void del_hw_flow_group(struct fs_node *node) ··· 1625 1633 } 1626 1634 1627 1635 static struct fs_fte * 1628 - lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value) 1636 + lookup_fte_locked(struct mlx5_flow_group *g, 1637 + const u32 *match_value, 1638 + bool take_write) 1629 1639 { 1630 1640 struct fs_fte *fte_tmp; 1631 1641 1632 - nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); 1633 - 1634 - fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte); 1635 - if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { 1636 - fte_tmp = NULL; 1637 - goto out; 1638 - } 1639 - 1640 - if (!fte_tmp->node.active) { 1641 - tree_put_node(&fte_tmp->node, false); 1642 - fte_tmp = NULL; 1643 - goto out; 1644 - } 1645 - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); 1646 - 1647 - out: 1648 - up_write_ref_node(&g->node, false); 1649 - return fte_tmp; 1650 - } 1651 - 1652 - static struct fs_fte * 1653 - lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) 1654 - { 1655 - struct fs_fte *fte_tmp; 1656 - 1657 - if (!tree_get_node(&g->node)) 1658 - return NULL; 1659 - 1660 - rcu_read_lock(); 1661 - fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte); 1662 - if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { 1663 - rcu_read_unlock(); 1664 - fte_tmp = NULL; 1665 - goto out; 1666 - } 1667 - rcu_read_unlock(); 1668 - 1669 - if (!fte_tmp->node.active) { 1670 - tree_put_node(&fte_tmp->node, false); 1671 - fte_tmp = NULL; 1672 - goto out; 1673 - } 1674 - 1675 - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); 1676 - 1677 - out: 1678 - tree_put_node(&g->node, false); 1679 - return fte_tmp; 1680 - } 1681 - 1682 - static struct fs_fte * 1683 - lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write) 1684 - { 1685 - if (write) 1686 - return lookup_fte_for_write_locked(g, match_value); 1642 + if (take_write) 1643 + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); 1687 1644 else 1688 - return lookup_fte_for_read_locked(g, match_value); 1645 + nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); 1646 + fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, 1647 + rhash_fte); 1648 + if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { 1649 + fte_tmp = NULL; 1650 + goto out; 1651 + } 1652 + if (!fte_tmp->node.active) { 1653 + tree_put_node(&fte_tmp->node, false); 1654 + fte_tmp = NULL; 1655 + goto out; 1656 + } 1657 + 1658 + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); 1659 + out: 1660 + if (take_write) 1661 + up_write_ref_node(&g->node, false); 1662 + else 1663 + up_read_ref_node(&g->node); 1664 + return fte_tmp; 1689 1665 } 1690 1666 1691 1667 static struct mlx5_flow_handle *
-1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
··· 203 203 enum fs_fte_status status; 204 204 struct mlx5_fc *counter; 205 205 struct rhash_head hash; 206 - struct rcu_head rcu; 207 206 int modify_mask; 208 207 }; 209 208
+9 -7
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1193 1193 if (err) 1194 1194 goto err_load; 1195 1195 1196 + if (boot) { 1197 + err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); 1198 + if (err) 1199 + goto err_devlink_reg; 1200 + } 1201 + 1196 1202 if (mlx5_device_registered(dev)) { 1197 1203 mlx5_attach_device(dev); 1198 1204 } else { ··· 1216 1210 return err; 1217 1211 1218 1212 err_reg_dev: 1213 + if (boot) 1214 + mlx5_devlink_unregister(priv_to_devlink(dev)); 1215 + err_devlink_reg: 1219 1216 mlx5_unload(dev); 1220 1217 err_load: 1221 1218 if (boot) ··· 1356 1347 1357 1348 request_module_nowait(MLX5_IB_MOD); 1358 1349 1359 - err = mlx5_devlink_register(devlink, &pdev->dev); 1360 - if (err) 1361 - goto clean_load; 1362 - 1363 1350 err = mlx5_crdump_enable(dev); 1364 1351 if (err) 1365 1352 dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); 1366 1353 1367 1354 pci_save_state(pdev); 1368 1355 return 0; 1369 - 1370 - clean_load: 1371 - mlx5_unload_one(dev, true); 1372 1356 1373 1357 err_load_one: 1374 1358 mlx5_pci_close(dev);
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 209 209 /* We need to copy the refcount since this ste 210 210 * may have been traversed several times 211 211 */ 212 - refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount)); 212 + new_ste->refcount = cur_ste->refcount; 213 213 214 214 /* Link old STEs rule_mem list to the new ste */ 215 215 mlx5dr_rule_update_rule_member(cur_ste, new_ste); ··· 637 637 rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL); 638 638 if (!rule_mem) 639 639 return -ENOMEM; 640 + 641 + INIT_LIST_HEAD(&rule_mem->list); 642 + INIT_LIST_HEAD(&rule_mem->use_ste_list); 640 643 641 644 rule_mem->ste = ste; 642 645 list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
··· 348 348 if (dst->next_htbl) 349 349 dst->next_htbl->pointing_ste = dst; 350 350 351 - refcount_set(&dst->refcount, refcount_read(&src->refcount)); 351 + dst->refcount = src->refcount; 352 352 353 353 INIT_LIST_HEAD(&dst->rule_list); 354 354 list_splice_tail_init(&src->rule_list, &dst->rule_list); ··· 565 565 566 566 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) 567 567 { 568 - return !refcount_read(&ste->refcount); 568 + return !ste->refcount; 569 569 } 570 570 571 571 /* Init one ste as a pattern for ste data array */ ··· 689 689 htbl->ste_arr = chunk->ste_arr; 690 690 htbl->hw_ste_arr = chunk->hw_ste_arr; 691 691 htbl->miss_list = chunk->miss_list; 692 - refcount_set(&htbl->refcount, 0); 692 + htbl->refcount = 0; 693 693 694 694 for (i = 0; i < chunk->num_of_entries; i++) { 695 695 struct mlx5dr_ste *ste = &htbl->ste_arr[i]; 696 696 697 697 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; 698 698 ste->htbl = htbl; 699 - refcount_set(&ste->refcount, 0); 699 + ste->refcount = 0; 700 700 INIT_LIST_HEAD(&ste->miss_list_node); 701 701 INIT_LIST_HEAD(&htbl->miss_list[i]); 702 702 INIT_LIST_HEAD(&ste->rule_list); ··· 713 713 714 714 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) 715 715 { 716 - if (refcount_read(&htbl->refcount)) 716 + if (htbl->refcount) 717 717 return -EBUSY; 718 718 719 719 mlx5dr_icm_free_chunk(htbl->chunk);
+8 -6
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
··· 123 123 struct mlx5dr_ste { 124 124 u8 *hw_ste; 125 125 /* refcount: indicates the num of rules that using this ste */ 126 - refcount_t refcount; 126 + u32 refcount; 127 127 128 128 /* attached to the miss_list head at each htbl entry */ 129 129 struct list_head miss_list_node; ··· 155 155 struct mlx5dr_ste_htbl { 156 156 u8 lu_type; 157 157 u16 byte_mask; 158 - refcount_t refcount; 158 + u32 refcount; 159 159 struct mlx5dr_icm_chunk *chunk; 160 160 struct mlx5dr_ste *ste_arr; 161 161 u8 *hw_ste_arr; ··· 206 206 207 207 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl) 208 208 { 209 - if (refcount_dec_and_test(&htbl->refcount)) 209 + htbl->refcount--; 210 + if (!htbl->refcount) 210 211 mlx5dr_ste_htbl_free(htbl); 211 212 } 212 213 213 214 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) 214 215 { 215 - refcount_inc(&htbl->refcount); 216 + htbl->refcount++; 216 217 } 217 218 218 219 /* STE utils */ ··· 255 254 struct mlx5dr_matcher *matcher, 256 255 struct mlx5dr_matcher_rx_tx *nic_matcher) 257 256 { 258 - if (refcount_dec_and_test(&ste->refcount)) 257 + ste->refcount--; 258 + if (!ste->refcount) 259 259 mlx5dr_ste_free(ste, matcher, nic_matcher); 260 260 } 261 261 262 262 /* initial as 0, increased only when ste appears in a new rule */ 263 263 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste) 264 264 { 265 - refcount_inc(&ste->refcount); 265 + ste->refcount++; 266 266 } 267 267 268 268 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+7
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
··· 767 767 mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle) 768 768 return 0; 769 769 770 + if (!child_handle) { 771 + /* This is an invisible FIFO replacing the original Qdisc. 772 + * Ignore it--the original Qdisc's destroy will follow. 773 + */ 774 + return 0; 775 + } 776 + 770 777 /* See if the grafted qdisc is already offloaded on any tclass. If so, 771 778 * unoffload it. 772 779 */
+3
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
··· 973 973 /* default */ 974 974 break; 975 975 case PHY_INTERFACE_MODE_RGMII: 976 + case PHY_INTERFACE_MODE_RGMII_ID: 977 + case PHY_INTERFACE_MODE_RGMII_RXID: 978 + case PHY_INTERFACE_MODE_RGMII_TXID: 976 979 reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; 977 980 break; 978 981 case PHY_INTERFACE_MODE_RMII:
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
··· 44 44 * rate, which then uses the auto-reparenting feature of the 45 45 * clock driver, and enabling/disabling the clock. 46 46 */ 47 - if (gmac->interface == PHY_INTERFACE_MODE_RGMII) { 47 + if (phy_interface_mode_is_rgmii(gmac->interface)) { 48 48 clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); 49 49 clk_prepare_enable(gmac->tx_clk); 50 50 gmac->clk_enabled = 1;
+32
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 106 106 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 107 107 108 108 #ifdef CONFIG_DEBUG_FS 109 + static const struct net_device_ops stmmac_netdev_ops; 109 110 static void stmmac_init_fs(struct net_device *dev); 110 111 static void stmmac_exit_fs(struct net_device *dev); 111 112 #endif ··· 4298 4297 } 4299 4298 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 4300 4299 4300 + /* Use network device events to rename debugfs file entries. 4301 + */ 4302 + static int stmmac_device_event(struct notifier_block *unused, 4303 + unsigned long event, void *ptr) 4304 + { 4305 + struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4306 + struct stmmac_priv *priv = netdev_priv(dev); 4307 + 4308 + if (dev->netdev_ops != &stmmac_netdev_ops) 4309 + goto done; 4310 + 4311 + switch (event) { 4312 + case NETDEV_CHANGENAME: 4313 + if (priv->dbgfs_dir) 4314 + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 4315 + priv->dbgfs_dir, 4316 + stmmac_fs_dir, 4317 + dev->name); 4318 + break; 4319 + } 4320 + done: 4321 + return NOTIFY_DONE; 4322 + } 4323 + 4324 + static struct notifier_block stmmac_notifier = { 4325 + .notifier_call = stmmac_device_event, 4326 + }; 4327 + 4301 4328 static void stmmac_init_fs(struct net_device *dev) 4302 4329 { 4303 4330 struct stmmac_priv *priv = netdev_priv(dev); ··· 4340 4311 /* Entry to report the DMA HW features */ 4341 4312 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 4342 4313 &stmmac_dma_cap_fops); 4314 + 4315 + register_netdevice_notifier(&stmmac_notifier); 4343 4316 } 4344 4317 4345 4318 static void stmmac_exit_fs(struct net_device *dev) 4346 4319 { 4347 4320 struct stmmac_priv *priv = netdev_priv(dev); 4348 4321 4322 + unregister_netdevice_notifier(&stmmac_notifier); 4349 4323 debugfs_remove_recursive(priv->dbgfs_dir); 4350 4324 } 4351 4325 #endif /* CONFIG_DEBUG_FS */
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 320 320 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, 321 321 struct device_node *np, struct device *dev) 322 322 { 323 - bool mdio = false; 323 + bool mdio = !of_phy_is_fixed_link(np); 324 324 static const struct of_device_id need_mdio_ids[] = { 325 325 { .compatible = "snps,dwc-qos-ethernet-4.10" }, 326 326 {},
+3 -2
drivers/net/gtp.c
··· 813 813 lock_sock(sock->sk); 814 814 if (sock->sk->sk_user_data) { 815 815 sk = ERR_PTR(-EBUSY); 816 - goto out_sock; 816 + goto out_rel_sock; 817 817 } 818 818 819 819 sk = sock->sk; ··· 826 826 827 827 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); 828 828 829 - out_sock: 829 + out_rel_sock: 830 830 release_sock(sock->sk); 831 + out_sock: 831 832 sockfd_put(sock); 832 833 return sk; 833 834 }
+1 -1
drivers/net/macvlan.c
··· 259 259 struct net_device *src, 260 260 enum macvlan_mode mode) 261 261 { 262 - const struct ethhdr *eth = eth_hdr(skb); 262 + const struct ethhdr *eth = skb_eth_hdr(skb); 263 263 const struct macvlan_dev *vlan; 264 264 struct sk_buff *nskb; 265 265 unsigned int i;
+3
drivers/net/phy/phylink.c
··· 572 572 struct sfp_bus *bus; 573 573 int ret; 574 574 575 + if (!fwnode) 576 + return 0; 577 + 575 578 bus = sfp_bus_find_fwnode(fwnode); 576 579 if (IS_ERR(bus)) { 577 580 ret = PTR_ERR(bus);
+3 -6
drivers/net/usb/lan78xx.c
··· 2724 2724 return 0; 2725 2725 } 2726 2726 2727 - static int lan78xx_linearize(struct sk_buff *skb) 2728 - { 2729 - return skb_linearize(skb); 2730 - } 2731 - 2732 2727 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, 2733 2728 struct sk_buff *skb, gfp_t flags) 2734 2729 { ··· 2735 2740 return NULL; 2736 2741 } 2737 2742 2738 - if (lan78xx_linearize(skb) < 0) 2743 + if (skb_linearize(skb)) { 2744 + dev_kfree_skb_any(skb); 2739 2745 return NULL; 2746 + } 2740 2747 2741 2748 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; 2742 2749
+2 -2
drivers/net/vxlan.c
··· 2542 2542 ndst = &rt->dst; 2543 2543 skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); 2544 2544 2545 - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2545 + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); 2546 2546 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2547 2547 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), 2548 2548 vni, md, flags, udp_sum); ··· 2582 2582 2583 2583 skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); 2584 2584 2585 - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2585 + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); 2586 2586 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2587 2587 skb_scrub_packet(skb, xnet); 2588 2588 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
+1 -1
drivers/net/wan/sdla.c
··· 708 708 709 709 spin_lock_irqsave(&sdla_lock, flags); 710 710 SDLA_WINDOW(dev, addr); 711 - pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK)); 711 + pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK)); 712 712 __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); 713 713 SDLA_WINDOW(dev, addr); 714 714 pbuf->opp_flag = 1;
+1 -1
drivers/platform/mips/Kconfig
··· 18 18 19 19 config CPU_HWMON 20 20 tristate "Loongson-3 CPU HWMon Driver" 21 - depends on CONFIG_MACH_LOONGSON64 21 + depends on MACH_LOONGSON64 22 22 select HWMON 23 23 default y 24 24 help
+7 -4
drivers/regulator/axp20x-regulator.c
··· 413 413 int i; 414 414 415 415 for (i = 0; i < rate_count; i++) { 416 - if (ramp <= slew_rates[i]) 417 - cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); 418 - else 416 + if (ramp > slew_rates[i]) 419 417 break; 418 + 419 + if (id == AXP20X_DCDC2) 420 + cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i); 421 + else 422 + cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); 420 423 } 421 424 422 425 if (cfg == 0xff) { ··· 608 605 AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), 609 606 AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100, 610 607 AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, 611 - AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), 608 + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), 612 609 AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, 613 610 AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, 614 611 AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
-1
drivers/regulator/bd70528-regulator.c
··· 101 101 .set_voltage_sel = regulator_set_voltage_sel_regmap, 102 102 .get_voltage_sel = regulator_get_voltage_sel_regmap, 103 103 .set_voltage_time_sel = regulator_set_voltage_time_sel, 104 - .set_ramp_delay = bd70528_set_ramp_delay, 105 104 }; 106 105 107 106 static const struct regulator_ops bd70528_led_ops = {
+1 -14
drivers/rtc/rtc-mc146818-lib.c
··· 172 172 save_control = CMOS_READ(RTC_CONTROL); 173 173 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 174 174 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 175 - 176 - #ifdef CONFIG_X86 177 - if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 178 - boot_cpu_data.x86 == 0x17) || 179 - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 180 - CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)), 181 - RTC_FREQ_SELECT); 182 - save_freq_select &= ~RTC_DIV_RESET2; 183 - } else 184 - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), 185 - RTC_FREQ_SELECT); 186 - #else 187 - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT); 188 - #endif 175 + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); 189 176 190 177 #ifdef CONFIG_MACH_DECSTATION 191 178 CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
+25 -14
drivers/rtc/rtc-mt6397.c
··· 47 47 irqen = irqsta & ~RTC_IRQ_EN_AL; 48 48 mutex_lock(&rtc->lock); 49 49 if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, 50 - irqen) < 0) 50 + irqen) == 0) 51 51 mtk_rtc_write_trigger(rtc); 52 52 mutex_unlock(&rtc->lock); 53 53 ··· 169 169 alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM); 170 170 mutex_unlock(&rtc->lock); 171 171 172 - tm->tm_sec = data[RTC_OFFSET_SEC]; 173 - tm->tm_min = data[RTC_OFFSET_MIN]; 174 - tm->tm_hour = data[RTC_OFFSET_HOUR]; 175 - tm->tm_mday = data[RTC_OFFSET_DOM]; 176 - tm->tm_mon = data[RTC_OFFSET_MTH]; 177 - tm->tm_year = data[RTC_OFFSET_YEAR]; 172 + tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK; 173 + tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK; 174 + tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK; 175 + tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK; 176 + tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK; 177 + tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK; 178 178 179 179 tm->tm_year += RTC_MIN_YEAR_OFFSET; 180 180 tm->tm_mon--; ··· 195 195 tm->tm_year -= RTC_MIN_YEAR_OFFSET; 196 196 tm->tm_mon++; 197 197 198 - data[RTC_OFFSET_SEC] = tm->tm_sec; 199 - data[RTC_OFFSET_MIN] = tm->tm_min; 200 - data[RTC_OFFSET_HOUR] = tm->tm_hour; 201 - data[RTC_OFFSET_DOM] = tm->tm_mday; 202 - data[RTC_OFFSET_MTH] = tm->tm_mon; 203 - data[RTC_OFFSET_YEAR] = tm->tm_year; 204 - 205 198 mutex_lock(&rtc->lock); 199 + ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC, 200 + data, RTC_OFFSET_COUNT); 201 + if (ret < 0) 202 + goto exit; 203 + 204 + data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) | 205 + (tm->tm_sec & RTC_AL_SEC_MASK)); 206 + data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) | 207 + (tm->tm_min & RTC_AL_MIN_MASK)); 208 + data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) | 209 + (tm->tm_hour & RTC_AL_HOU_MASK)); 210 + data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) | 211 + (tm->tm_mday & RTC_AL_DOM_MASK)); 212 + data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) | 213 + (tm->tm_mon & RTC_AL_MTH_MASK)); 214 + data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) | 215 + (tm->tm_year & RTC_AL_YEA_MASK)); 216 + 206 217 if (alm->enabled) { 207 218 ret = regmap_bulk_write(rtc->regmap, 208 219 rtc->addr_base + RTC_AL_SEC,
+16
drivers/rtc/rtc-sun6i.c
··· 379 379 CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc", 380 380 sun50i_h6_rtc_clk_init); 381 381 382 + /* 383 + * The R40 user manual is self-conflicting on whether the prescaler is 384 + * fixed or configurable. The clock diagram shows it as fixed, but there 385 + * is also a configurable divider in the RTC block. 386 + */ 387 + static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = { 388 + .rc_osc_rate = 16000000, 389 + .fixed_prescaler = 512, 390 + }; 391 + static void __init sun8i_r40_rtc_clk_init(struct device_node *node) 392 + { 393 + sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data); 394 + } 395 + CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc", 396 + sun8i_r40_rtc_clk_init); 397 + 382 398 static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = { 383 399 .rc_osc_rate = 32000, 384 400 .has_out_clk = 1,
+12 -3
drivers/spi/spi-dw.c
··· 172 172 173 173 static void dw_writer(struct dw_spi *dws) 174 174 { 175 - u32 max = tx_max(dws); 175 + u32 max; 176 176 u16 txw = 0; 177 177 178 + spin_lock(&dws->buf_lock); 179 + max = tx_max(dws); 178 180 while (max--) { 179 181 /* Set the tx word if the transfer's original "tx" is not null */ 180 182 if (dws->tx_end - dws->len) { ··· 188 186 dw_write_io_reg(dws, DW_SPI_DR, txw); 189 187 dws->tx += dws->n_bytes; 190 188 } 189 + spin_unlock(&dws->buf_lock); 191 190 } 192 191 193 192 static void dw_reader(struct dw_spi *dws) 194 193 { 195 - u32 max = rx_max(dws); 194 + u32 max; 196 195 u16 rxw; 197 196 197 + spin_lock(&dws->buf_lock); 198 + max = rx_max(dws); 198 199 while (max--) { 199 200 rxw = dw_read_io_reg(dws, DW_SPI_DR); 200 201 /* Care rx only if the transfer's original "rx" is not null */ ··· 209 204 } 210 205 dws->rx += dws->n_bytes; 211 206 } 207 + spin_unlock(&dws->buf_lock); 212 208 } 213 209 214 210 static void int_error_stop(struct dw_spi *dws, const char *msg) ··· 282 276 { 283 277 struct dw_spi *dws = spi_controller_get_devdata(master); 284 278 struct chip_data *chip = spi_get_ctldata(spi); 279 + unsigned long flags; 285 280 u8 imask = 0; 286 281 u16 txlevel = 0; 287 282 u32 cr0; 288 283 int ret; 289 284 290 285 dws->dma_mapped = 0; 291 - 286 + spin_lock_irqsave(&dws->buf_lock, flags); 292 287 dws->tx = (void *)transfer->tx_buf; 293 288 dws->tx_end = dws->tx + transfer->len; 294 289 dws->rx = transfer->rx_buf; 295 290 dws->rx_end = dws->rx + transfer->len; 296 291 dws->len = transfer->len; 292 + spin_unlock_irqrestore(&dws->buf_lock, flags); 297 293 298 294 spi_enable_chip(dws, 0); 299 295 ··· 479 471 dws->type = SSI_MOTO_SPI; 480 472 dws->dma_inited = 0; 481 473 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); 474 + spin_lock_init(&dws->buf_lock); 482 475 483 476 spi_controller_set_devdata(master, dws); 484 477
+1
drivers/spi/spi-dw.h
··· 119 119 size_t len; 120 120 void *tx; 121 121 void *tx_end; 122 + spinlock_t buf_lock; 122 123 void *rx; 123 124 void *rx_end; 124 125 int dma_mapped;
+10 -14
drivers/spi/spi-fsl-dspi.c
··· 185 185 struct spi_transfer *cur_transfer; 186 186 struct spi_message *cur_msg; 187 187 struct chip_data *cur_chip; 188 + size_t progress; 188 189 size_t len; 189 190 const void *tx; 190 191 void *rx; ··· 587 586 dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; 588 587 589 588 if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { 590 - /* Write two TX FIFO entries first, and then the corresponding 591 - * CMD FIFO entry. 589 + /* Write the CMD FIFO entry first, and then the two 590 + * corresponding TX FIFO entries. 592 591 */ 593 592 u32 data = dspi_pop_tx(dspi); 594 593 595 - if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) { 596 - /* LSB */ 597 - tx_fifo_write(dspi, data & 0xFFFF); 598 - tx_fifo_write(dspi, data >> 16); 599 - } else { 600 - /* MSB */ 601 - tx_fifo_write(dspi, data >> 16); 602 - tx_fifo_write(dspi, data & 0xFFFF); 603 - } 604 594 cmd_fifo_write(dspi); 595 + tx_fifo_write(dspi, data & 0xFFFF); 596 + tx_fifo_write(dspi, data >> 16); 605 597 } else { 606 598 /* Write one entry to both TX FIFO and CMD FIFO 607 599 * simultaneously. ··· 652 658 u32 spi_tcr; 653 659 654 660 spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, 655 - dspi->tx - dspi->bytes_per_word, !dspi->irq); 661 + dspi->progress, !dspi->irq); 656 662 657 663 /* Get transfer counter (in number of SPI transfers). It was 658 664 * reset to 0 when transfer(s) were started. ··· 661 667 spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr); 662 668 /* Update total number of bytes that were transferred */ 663 669 msg->actual_length += spi_tcnt * dspi->bytes_per_word; 670 + dspi->progress += spi_tcnt; 664 671 665 672 trans_mode = dspi->devtype_data->trans_mode; 666 673 if (trans_mode == DSPI_EOQ_MODE) ··· 674 679 return 0; 675 680 676 681 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, 677 - dspi->tx, !dspi->irq); 682 + dspi->progress, !dspi->irq); 678 683 679 684 if (trans_mode == DSPI_EOQ_MODE) 680 685 dspi_eoq_write(dspi); ··· 763 768 dspi->rx = transfer->rx_buf; 764 769 dspi->rx_end = dspi->rx + transfer->len; 765 770 dspi->len = transfer->len; 771 + dspi->progress = 0; 766 772 /* Validated transfer specific frame size (defaults applied) */ 767 773 dspi->bits_per_word = transfer->bits_per_word; 768 774 if (transfer->bits_per_word <= 8) ··· 785 789 SPI_CTARE_DTCP(1)); 786 790 787 791 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, 788 - dspi->tx, !dspi->irq); 792 + dspi->progress, !dspi->irq); 789 793 790 794 trans_mode = dspi->devtype_data->trans_mode; 791 795 switch (trans_mode) {
+19 -12
drivers/spi/spi-uniphier.c
··· 290 290 } 291 291 } 292 292 293 - static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) 293 + static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv, 294 + unsigned int threshold) 294 295 { 295 - unsigned int fifo_threshold, fill_bytes; 296 296 u32 val; 297 297 298 - fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, 299 - bytes_per_word(priv->bits_per_word)); 300 - fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); 301 - 302 - fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes); 303 - 304 - /* set fifo threshold */ 305 298 val = readl(priv->base + SSI_FC); 306 299 val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK); 307 - val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold); 308 - val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold); 300 + val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold); 301 + val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold); 309 302 writel(val, priv->base + SSI_FC); 303 + } 310 304 311 - while (fill_bytes--) 305 + static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) 306 + { 307 + unsigned int fifo_threshold, fill_words; 308 + unsigned int bpw = bytes_per_word(priv->bits_per_word); 309 + 310 + fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw); 311 + fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); 312 + 313 + uniphier_spi_set_fifo_threshold(priv, fifo_threshold); 314 + 315 + fill_words = fifo_threshold - 316 + DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw); 317 + 318 + while (fill_words--) 312 319 uniphier_spi_send(priv); 313 320 } 314 321
+8 -14
drivers/spi/spi.c
··· 1499 1499 * advances its @tx buffer pointer monotonically. 1500 1500 * @ctlr: Pointer to the spi_controller structure of the driver 1501 1501 * @xfer: Pointer to the transfer being timestamped 1502 - * @tx: Pointer to the current word within the xfer->tx_buf that the driver is 1503 - * preparing to transmit right now. 1502 + * @progress: How many words (not bytes) have been transferred so far 1504 1503 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1505 1504 * transfer, for less jitter in time measurement. Only compatible 1506 1505 * with PIO drivers. If true, must follow up with ··· 1509 1510 */ 1510 1511 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1511 1512 struct spi_transfer *xfer, 1512 - const void *tx, bool irqs_off) 1513 + size_t progress, bool irqs_off) 1513 1514 { 1514 - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); 1515 - 1516 1515 if (!xfer->ptp_sts) 1517 1516 return; 1518 1517 1519 1518 if (xfer->timestamped_pre) 1520 1519 return; 1521 1520 1522 - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word)) 1521 + if (progress < xfer->ptp_sts_word_pre) 1523 1522 return; 1524 1523 1525 1524 /* Capture the resolution of the timestamp */ 1526 - xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word; 1525 + xfer->ptp_sts_word_pre = progress; 1527 1526 1528 1527 xfer->timestamped_pre = true; 1529 1528 ··· 1543 1546 * timestamped. 1544 1547 * @ctlr: Pointer to the spi_controller structure of the driver 1545 1548 * @xfer: Pointer to the transfer being timestamped 1546 - * @tx: Pointer to the current word within the xfer->tx_buf that the driver has 1547 - * just transmitted. 1549 + * @progress: How many words (not bytes) have been transferred so far 1548 1550 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1549 1551 */ 1550 1552 void spi_take_timestamp_post(struct spi_controller *ctlr, 1551 1553 struct spi_transfer *xfer, 1552 - const void *tx, bool irqs_off) 1554 + size_t progress, bool irqs_off) 1553 1555 { 1554 - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); 1555 - 1556 1556 if (!xfer->ptp_sts) 1557 1557 return; 1558 1558 1559 1559 if (xfer->timestamped_post) 1560 1560 return; 1561 1561 1562 - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word)) 1562 + if (progress < xfer->ptp_sts_word_post) 1563 1563 return; 1564 1564 1565 1565 ptp_read_system_postts(xfer->ptp_sts); ··· 1567 1573 } 1568 1574 1569 1575 /* Capture the resolution of the timestamp */ 1570 - xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word; 1576 + xfer->ptp_sts_word_post = progress; 1571 1577 1572 1578 xfer->timestamped_post = true; 1573 1579 }
+1 -1
drivers/staging/media/ipu3/include/intel-ipu3.h
··· 449 449 __u16 reserved1; 450 450 __u32 bayer_sign; 451 451 __u8 bayer_nf; 452 - __u8 reserved2[3]; 452 + __u8 reserved2[7]; 453 453 } __attribute__((aligned(32))) __packed; 454 454 455 455 /**
+2
drivers/watchdog/Kconfig
··· 687 687 config MAX77620_WATCHDOG 688 688 tristate "Maxim Max77620 Watchdog Timer" 689 689 depends on MFD_MAX77620 || COMPILE_TEST 690 + select WATCHDOG_CORE 690 691 help 691 692 This is the driver for the Max77620 watchdog timer. 692 693 Say 'Y' here to enable the watchdog timer support for ··· 1445 1444 config TQMX86_WDT 1446 1445 tristate "TQ-Systems TQMX86 Watchdog Timer" 1447 1446 depends on X86 1447 + select WATCHDOG_CORE 1448 1448 help 1449 1449 This is the driver for the hardware watchdog timer in the TQMX86 IO 1450 1450 controller found on some of their ComExpress Modules.
+1 -1
drivers/watchdog/imx7ulp_wdt.c
··· 112 112 { 113 113 struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); 114 114 115 - imx7ulp_wdt_enable(wdt->base, true); 115 + imx7ulp_wdt_enable(wdog, true); 116 116 imx7ulp_wdt_set_timeout(&wdt->wdd, 1); 117 117 118 118 /* wait for wdog to fire */
+2 -2
drivers/watchdog/orion_wdt.c
··· 602 602 set_bit(WDOG_HW_RUNNING, &dev->wdt.status); 603 603 604 604 /* Request the IRQ only after the watchdog is disabled */ 605 - irq = platform_get_irq(pdev, 0); 605 + irq = platform_get_irq_optional(pdev, 0); 606 606 if (irq > 0) { 607 607 /* 608 608 * Not all supported platforms specify an interrupt for the ··· 617 617 } 618 618 619 619 /* Optional 2nd interrupt for pretimeout */ 620 - irq = platform_get_irq(pdev, 1); 620 + irq = platform_get_irq_optional(pdev, 1); 621 621 if (irq > 0) { 622 622 orion_wdt_info.options |= WDIOF_PRETIMEOUT; 623 623 ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
+1
drivers/watchdog/rn5t618_wdt.c
··· 188 188 189 189 module_platform_driver(rn5t618_wdt_driver); 190 190 191 + MODULE_ALIAS("platform:rn5t618-wdt"); 191 192 MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); 192 193 MODULE_DESCRIPTION("RN5T618 watchdog driver"); 193 194 MODULE_LICENSE("GPL v2");
+1 -1
drivers/watchdog/w83627hf_wdt.c
··· 420 420 cr_wdt_csr = NCT6102D_WDT_CSR; 421 421 break; 422 422 case NCT6116_ID: 423 - ret = nct6102; 423 + ret = nct6116; 424 424 cr_wdt_timeout = NCT6102D_WDT_TIMEOUT; 425 425 cr_wdt_control = NCT6102D_WDT_CONTROL; 426 426 cr_wdt_csr = NCT6102D_WDT_CSR;
+6 -1
fs/btrfs/compression.c
··· 447 447 448 448 if (blkcg_css) { 449 449 bio->bi_opf |= REQ_CGROUP_PUNT; 450 - bio_associate_blkg_from_css(bio, blkcg_css); 450 + kthread_associate_blkcg(blkcg_css); 451 451 } 452 452 refcount_set(&cb->pending_bios, 1); 453 453 ··· 491 491 bio->bi_opf = REQ_OP_WRITE | write_flags; 492 492 bio->bi_private = cb; 493 493 bio->bi_end_io = end_compressed_bio_write; 494 + if (blkcg_css) 495 + bio->bi_opf |= REQ_CGROUP_PUNT; 494 496 bio_add_page(bio, page, PAGE_SIZE, 0); 495 497 } 496 498 if (bytes_left < PAGE_SIZE) { ··· 518 516 bio->bi_status = ret; 519 517 bio_endio(bio); 520 518 } 519 + 520 + if (blkcg_css) 521 + kthread_associate_blkcg(NULL); 521 522 522 523 return 0; 523 524 }
+3 -3
fs/btrfs/inode.c
··· 1479 1479 disk_num_bytes = 1480 1480 btrfs_file_extent_disk_num_bytes(leaf, fi); 1481 1481 /* 1482 - * If extent we got ends before our range starts, skip 1483 - * to next extent 1482 + * If the extent we got ends before our current offset, 1483 + * skip to the next extent. 1484 1484 */ 1485 - if (extent_end <= start) { 1485 + if (extent_end <= cur_offset) { 1486 1486 path->slots[0]++; 1487 1487 goto next_slot; 1488 1488 }
+1 -24
fs/buffer.c
··· 3034 3034 void guard_bio_eod(int op, struct bio *bio) 3035 3035 { 3036 3036 sector_t maxsector; 3037 - struct bio_vec *bvec = bio_last_bvec_all(bio); 3038 - unsigned truncated_bytes; 3039 3037 struct hd_struct *part; 3040 3038 3041 3039 rcu_read_lock(); ··· 3059 3061 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 3060 3062 return; 3061 3063 3062 - /* Uhhuh. We've got a bio that straddles the device size! */ 3063 - truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); 3064 - 3065 - /* 3066 - * The bio contains more than one segment which spans EOD, just return 3067 - * and let IO layer turn it into an EIO 3068 - */ 3069 - if (truncated_bytes > bvec->bv_len) 3070 - return; 3071 - 3072 - /* Truncate the bio.. */ 3073 - bio->bi_iter.bi_size -= truncated_bytes; 3074 - bvec->bv_len -= truncated_bytes; 3075 - 3076 - /* ..and clear the end of the buffer for reads */ 3077 - if (op == REQ_OP_READ) { 3078 - struct bio_vec bv; 3079 - 3080 - mp_bvec_last_segment(bvec, &bv); 3081 - zero_user(bv.bv_page, bv.bv_offset + bv.bv_len, 3082 - truncated_bytes); 3083 - } 3064 + bio_truncate(bio, maxsector << 9); 3084 3065 } 3085 3066 3086 3067 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
+2
fs/direct-io.c
··· 39 39 #include <linux/atomic.h> 40 40 #include <linux/prefetch.h> 41 41 42 + #include "internal.h" 43 + 42 44 /* 43 45 * How many user pages to map in one call to get_user_pages(). This determines 44 46 * the size of a structure in the slab cache
+6 -1
fs/file.c
··· 960 960 return ksys_dup3(oldfd, newfd, 0); 961 961 } 962 962 963 - SYSCALL_DEFINE1(dup, unsigned int, fildes) 963 + int ksys_dup(unsigned int fildes) 964 964 { 965 965 int ret = -EBADF; 966 966 struct file *file = fget_raw(fildes); ··· 973 973 fput(file); 974 974 } 975 975 return ret; 976 + } 977 + 978 + SYSCALL_DEFINE1(dup, unsigned int, fildes) 979 + { 980 + return ksys_dup(fildes); 976 981 } 977 982 978 983 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
+3 -1
fs/hugetlbfs/inode.c
··· 1498 1498 /* other hstates are optional */ 1499 1499 i = 0; 1500 1500 for_each_hstate(h) { 1501 - if (i == default_hstate_idx) 1501 + if (i == default_hstate_idx) { 1502 + i++; 1502 1503 continue; 1504 + } 1503 1505 1504 1506 mnt = mount_one_hugetlbfs(h); 1505 1507 if (IS_ERR(mnt))
+1 -1
fs/namespace.c
··· 1728 1728 dentry->d_fsdata == &mntns_operations; 1729 1729 } 1730 1730 1731 - struct mnt_namespace *to_mnt_ns(struct ns_common *ns) 1731 + static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) 1732 1732 { 1733 1733 return container_of(ns, struct mnt_namespace, ns); 1734 1734 }
+3
fs/nsfs.c
··· 3 3 #include <linux/pseudo_fs.h> 4 4 #include <linux/file.h> 5 5 #include <linux/fs.h> 6 + #include <linux/proc_fs.h> 6 7 #include <linux/proc_ns.h> 7 8 #include <linux/magic.h> 8 9 #include <linux/ktime.h> ··· 11 10 #include <linux/user_namespace.h> 12 11 #include <linux/nsfs.h> 13 12 #include <linux/uaccess.h> 13 + 14 + #include "internal.h" 14 15 15 16 static struct vfsmount *nsfs_mnt; 16 17
+1
fs/ocfs2/dlmglue.c
··· 3282 3282 3283 3283 debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root, 3284 3284 &dlm_debug->d_filter_secs); 3285 + ocfs2_get_dlm_debug(dlm_debug); 3285 3286 } 3286 3287 3287 3288 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
+8
fs/ocfs2/journal.c
··· 1066 1066 1067 1067 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); 1068 1068 1069 + if (replayed) { 1070 + jbd2_journal_lock_updates(journal->j_journal); 1071 + status = jbd2_journal_flush(journal->j_journal); 1072 + jbd2_journal_unlock_updates(journal->j_journal); 1073 + if (status < 0) 1074 + mlog_errno(status); 1075 + } 1076 + 1069 1077 status = ocfs2_journal_toggle_dirty(osb, 1, replayed); 1070 1078 if (status < 0) { 1071 1079 mlog_errno(status);
+5 -2
fs/posix_acl.c
··· 631 631 632 632 /** 633 633 * posix_acl_update_mode - update mode in set_acl 634 + * @inode: target inode 635 + * @mode_p: mode (pointer) for update 636 + * @acl: acl pointer 634 637 * 635 638 * Update the file mode when setting an ACL: compute the new file permission 636 639 * bits based on the ACL. In addition, if the ACL is equivalent to the new 637 - * file mode, set *acl to NULL to indicate that no ACL should be set. 640 + * file mode, set *@acl to NULL to indicate that no ACL should be set. 638 641 * 639 - * As with chmod, clear the setgit bit if the caller is not in the owning group 642 + * As with chmod, clear the setgid bit if the caller is not in the owning group 640 643 * or capable of CAP_FSETID (see inode_change_ok). 641 644 * 642 645 * Called from set_acl inode operations.
+13
fs/pstore/ram.c
··· 407 407 408 408 prz = cxt->dprzs[cxt->dump_write_cnt]; 409 409 410 + /* 411 + * Since this is a new crash dump, we need to reset the buffer in 412 + * case it still has an old dump present. Without this, the new dump 413 + * will get appended, which would seriously confuse anything trying 414 + * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() 415 + * expects to find a dump header in the beginning of buffer data, so 416 + * we must to reset the buffer values, in order to ensure that the 417 + * header will be written to the beginning of the buffer. 418 + */ 419 + persistent_ram_zap(prz); 420 + 410 421 /* Build header and append record contents. */ 411 422 hlen = ramoops_write_kmsg_hdr(prz, record); 412 423 if (!hlen) ··· 588 577 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 589 578 name, record_size, 590 579 (unsigned long long)*paddr, err); 580 + kfree(label); 591 581 592 582 while (i > 0) { 593 583 i--; ··· 634 622 635 623 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 636 624 name, sz, (unsigned long long)*paddr, err); 625 + kfree(label); 637 626 return err; 638 627 } 639 628
+1
include/linux/bio.h
··· 470 470 gfp_t); 471 471 extern int bio_uncopy_user(struct bio *); 472 472 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); 473 + void bio_truncate(struct bio *bio, unsigned new_size); 473 474 474 475 static inline void zero_fill_bio(struct bio *bio) 475 476 {
+34
include/linux/can/dev.h
··· 18 18 #include <linux/can/error.h> 19 19 #include <linux/can/led.h> 20 20 #include <linux/can/netlink.h> 21 + #include <linux/can/skb.h> 21 22 #include <linux/netdevice.h> 22 23 23 24 /* ··· 92 91 #define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) 93 92 #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) 94 93 94 + /* Check for outgoing skbs that have not been created by the CAN subsystem */ 95 + static inline bool can_skb_headroom_valid(struct net_device *dev, 96 + struct sk_buff *skb) 97 + { 98 + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ 99 + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) 100 + return false; 101 + 102 + /* af_packet does not apply CAN skb specific settings */ 103 + if (skb->ip_summed == CHECKSUM_NONE) { 104 + /* init headroom */ 105 + can_skb_prv(skb)->ifindex = dev->ifindex; 106 + can_skb_prv(skb)->skbcnt = 0; 107 + 108 + skb->ip_summed = CHECKSUM_UNNECESSARY; 109 + 110 + /* preform proper loopback on capable devices */ 111 + if (dev->flags & IFF_ECHO) 112 + skb->pkt_type = PACKET_LOOPBACK; 113 + else 114 + skb->pkt_type = PACKET_HOST; 115 + 116 + skb_reset_mac_header(skb); 117 + skb_reset_network_header(skb); 118 + skb_reset_transport_header(skb); 119 + } 120 + 121 + return true; 122 + } 123 + 95 124 /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ 96 125 static inline bool can_dropped_invalid_skb(struct net_device *dev, 97 126 struct sk_buff *skb) ··· 137 106 cfd->len > CANFD_MAX_DLEN)) 138 107 goto inval_skb; 139 108 } else 109 + goto inval_skb; 110 + 111 + if (!can_skb_headroom_valid(dev, skb)) 140 112 goto inval_skb; 141 113 142 114 return false;
+4 -1
include/linux/dmaengine.h
··· 1364 1364 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) 1365 1365 { 1366 1366 struct dma_slave_caps caps; 1367 + int ret; 1367 1368 1368 - dma_get_slave_caps(tx->chan, &caps); 1369 + ret = dma_get_slave_caps(tx->chan, &caps); 1370 + if (ret) 1371 + return ret; 1369 1372 1370 1373 if (caps.descriptor_reuse) { 1371 1374 tx->flags |= DMA_CTRL_REUSE;
+8
include/linux/if_ether.h
··· 24 24 return (struct ethhdr *)skb_mac_header(skb); 25 25 } 26 26 27 + /* Prefer this version in TX path, instead of 28 + * skb_reset_mac_header() + eth_hdr() 29 + */ 30 + static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) 31 + { 32 + return (struct ethhdr *)skb->data; 33 + } 34 + 27 35 static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) 28 36 { 29 37 return (struct ethhdr *)skb_inner_mac_header(skb);
-9
include/linux/kernel.h
··· 79 79 */ 80 80 #define round_down(x, y) ((x) & ~__round_mask(x, y)) 81 81 82 - /** 83 - * FIELD_SIZEOF - get the size of a struct's field 84 - * @t: the target struct 85 - * @f: the target struct's field 86 - * Return: the size of @f in the struct definition without having a 87 - * declared instance of @t. 88 - */ 89 - #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 90 - 91 82 #define typeof_member(T, m) typeof(((T*)0)->m) 92 83 93 84 #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
+5 -2
include/linux/memory_hotplug.h
··· 122 122 123 123 extern void arch_remove_memory(int nid, u64 start, u64 size, 124 124 struct vmem_altmap *altmap); 125 - extern void __remove_pages(struct zone *zone, unsigned long start_pfn, 126 - unsigned long nr_pages, struct vmem_altmap *altmap); 125 + extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 126 + struct vmem_altmap *altmap); 127 127 128 128 /* reasonably generic interface to expand the physical pages */ 129 129 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, ··· 342 342 extern int add_memory_resource(int nid, struct resource *resource); 343 343 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 344 344 unsigned long nr_pages, struct vmem_altmap *altmap); 345 + extern void remove_pfn_range_from_zone(struct zone *zone, 346 + unsigned long start_pfn, 347 + unsigned long nr_pages); 345 348 extern bool is_memblock_offlined(struct memory_block *mem); 346 349 extern int sparse_add_section(int nid, unsigned long pfn, 347 350 unsigned long nr_pages, struct vmem_altmap *altmap);
+8
include/linux/mfd/mt6397/rtc.h
··· 46 46 47 47 #define RTC_AL_SEC 0x0018 48 48 49 + #define RTC_AL_SEC_MASK 0x003f 50 + #define RTC_AL_MIN_MASK 0x003f 51 + #define RTC_AL_HOU_MASK 0x001f 52 + #define RTC_AL_DOM_MASK 0x001f 53 + #define RTC_AL_DOW_MASK 0x0007 54 + #define RTC_AL_MTH_MASK 0x000f 55 + #define RTC_AL_YEA_MASK 0x007f 56 + 49 57 #define RTC_PDN2 0x002e 50 58 #define RTC_PDN2_PWRON_ALARM BIT(4) 51 59
+2 -2
include/linux/spi/spi.h
··· 689 689 /* Helper calls for driver to timestamp transfer */ 690 690 void spi_take_timestamp_pre(struct spi_controller *ctlr, 691 691 struct spi_transfer *xfer, 692 - const void *tx, bool irqs_off); 692 + size_t progress, bool irqs_off); 693 693 void spi_take_timestamp_post(struct spi_controller *ctlr, 694 694 struct spi_transfer *xfer, 695 - const void *tx, bool irqs_off); 695 + size_t progress, bool irqs_off); 696 696 697 697 /* the spi driver core manages memory for the spi_controller classdev */ 698 698 extern struct spi_controller *__spi_alloc_controller(struct device *host,
+1
include/linux/syscalls.h
··· 1232 1232 */ 1233 1233 1234 1234 int ksys_umount(char __user *name, int flags); 1235 + int ksys_dup(unsigned int fildes); 1235 1236 int ksys_chroot(const char __user *filename); 1236 1237 ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); 1237 1238 int ksys_chdir(const char __user *filename);
+6
include/net/netfilter/nf_flow_table.h
··· 106 106 }; 107 107 108 108 #define NF_FLOW_TIMEOUT (30 * HZ) 109 + #define nf_flowtable_time_stamp (u32)jiffies 110 + 111 + static inline __s32 nf_flow_timeout_delta(unsigned int timeout) 112 + { 113 + return (__s32)(timeout - nf_flowtable_time_stamp); 114 + } 109 115 110 116 struct nf_flow_route { 111 117 struct {
+4 -4
include/trace/events/preemptirq.h
··· 18 18 TP_ARGS(ip, parent_ip), 19 19 20 20 TP_STRUCT__entry( 21 - __field(u32, caller_offs) 22 - __field(u32, parent_offs) 21 + __field(s32, caller_offs) 22 + __field(s32, parent_offs) 23 23 ), 24 24 25 25 TP_fast_assign( 26 - __entry->caller_offs = (u32)(ip - (unsigned long)_stext); 27 - __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext); 26 + __entry->caller_offs = (s32)(ip - (unsigned long)_stext); 27 + __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext); 28 28 ), 29 29 30 30 TP_printk("caller=%pS parent=%pS",
+5 -5
include/uapi/linux/kcov.h
··· 9 9 * and the comment before kcov_remote_start() for usage details. 10 10 */ 11 11 struct kcov_remote_arg { 12 - unsigned int trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */ 13 - unsigned int area_size; /* Length of coverage buffer in words */ 14 - unsigned int num_handles; /* Size of handles array */ 15 - __u64 common_handle; 16 - __u64 handles[0]; 12 + __u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */ 13 + __u32 area_size; /* Length of coverage buffer in words */ 14 + __u32 num_handles; /* Size of handles array */ 15 + __aligned_u64 common_handle; 16 + __aligned_u64 handles[0]; 17 17 }; 18 18 19 19 #define KCOV_REMOTE_MAX_HANDLES 0x100
+6 -20
init/main.c
··· 93 93 #include <linux/rodata_test.h> 94 94 #include <linux/jump_label.h> 95 95 #include <linux/mem_encrypt.h> 96 - #include <linux/file.h> 97 96 98 97 #include <asm/io.h> 99 98 #include <asm/bugs.h> ··· 1157 1158 1158 1159 void console_on_rootfs(void) 1159 1160 { 1160 - struct file *file; 1161 - unsigned int i; 1161 + /* Open the /dev/console as stdin, this should never fail */ 1162 + if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) 1163 + pr_err("Warning: unable to open an initial console.\n"); 1162 1164 1163 - /* Open /dev/console in kernelspace, this should never fail */ 1164 - file = filp_open("/dev/console", O_RDWR, 0); 1165 - if (IS_ERR(file)) 1166 - goto err_out; 1167 - 1168 - /* create stdin/stdout/stderr, this should never fail */ 1169 - for (i = 0; i < 3; i++) { 1170 - if (f_dupfd(i, file, 0) != i) 1171 - goto err_out; 1172 - } 1173 - 1174 - return; 1175 - 1176 - err_out: 1177 - /* no panic -- this might not be fatal */ 1178 - pr_err("Warning: unable to open an initial console.\n"); 1179 - return; 1165 + /* create stdout/stderr */ 1166 + (void) ksys_dup(0); 1167 + (void) ksys_dup(0); 1180 1168 } 1181 1169 1182 1170 static noinline void __init kernel_init_freeable(void)
+9 -2
kernel/bpf/cgroup.c
··· 35 35 */ 36 36 static void cgroup_bpf_release(struct work_struct *work) 37 37 { 38 - struct cgroup *cgrp = container_of(work, struct cgroup, 39 - bpf.release_work); 38 + struct cgroup *p, *cgrp = container_of(work, struct cgroup, 39 + bpf.release_work); 40 40 enum bpf_cgroup_storage_type stype; 41 41 struct bpf_prog_array *old_array; 42 42 unsigned int type; ··· 64 64 } 65 65 66 66 mutex_unlock(&cgroup_mutex); 67 + 68 + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 69 + cgroup_bpf_put(p); 67 70 68 71 percpu_ref_exit(&cgrp->bpf.refcnt); 69 72 cgroup_put(cgrp); ··· 201 198 */ 202 199 #define NR ARRAY_SIZE(cgrp->bpf.effective) 203 200 struct bpf_prog_array *arrays[NR] = {}; 201 + struct cgroup *p; 204 202 int ret, i; 205 203 206 204 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 207 205 GFP_KERNEL); 208 206 if (ret) 209 207 return ret; 208 + 209 + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 210 + cgroup_bpf_get(p); 210 211 211 212 for (i = 0; i < NR; i++) 212 213 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
+7 -2
kernel/bpf/verifier.c
··· 6264 6264 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 6265 6265 { 6266 6266 struct bpf_reg_state *regs = cur_regs(env); 6267 + static const int ctx_reg = BPF_REG_6; 6267 6268 u8 mode = BPF_MODE(insn->code); 6268 6269 int i, err; 6269 6270 ··· 6298 6297 } 6299 6298 6300 6299 /* check whether implicit source operand (register R6) is readable */ 6301 - err = check_reg_arg(env, BPF_REG_6, SRC_OP); 6300 + err = check_reg_arg(env, ctx_reg, SRC_OP); 6302 6301 if (err) 6303 6302 return err; 6304 6303 ··· 6317 6316 return -EINVAL; 6318 6317 } 6319 6318 6320 - if (regs[BPF_REG_6].type != PTR_TO_CTX) { 6319 + if (regs[ctx_reg].type != PTR_TO_CTX) { 6321 6320 verbose(env, 6322 6321 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 6323 6322 return -EINVAL; ··· 6329 6328 if (err) 6330 6329 return err; 6331 6330 } 6331 + 6332 + err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg); 6333 + if (err < 0) 6334 + return err; 6332 6335 6333 6336 /* reset caller saved regs to unreadable */ 6334 6337 for (i = 0; i < CALLER_SAVED_REGS; i++) {
+3 -3
kernel/cred.c
··· 223 223 new->magic = CRED_MAGIC; 224 224 #endif 225 225 226 - if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) 226 + if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) 227 227 goto error; 228 228 229 229 return new; ··· 282 282 new->security = NULL; 283 283 #endif 284 284 285 - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) 285 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) 286 286 goto error; 287 287 validate_creds(new); 288 288 return new; ··· 715 715 #ifdef CONFIG_SECURITY 716 716 new->security = NULL; 717 717 #endif 718 - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) 718 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) 719 719 goto error; 720 720 721 721 put_cred(old);
+8 -4
kernel/exit.c
··· 517 517 } 518 518 519 519 write_unlock_irq(&tasklist_lock); 520 - if (unlikely(pid_ns == &init_pid_ns)) { 521 - panic("Attempted to kill init! exitcode=0x%08x\n", 522 - father->signal->group_exit_code ?: father->exit_code); 523 - } 524 520 525 521 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 526 522 list_del_init(&p->ptrace_entry); ··· 762 766 acct_update_integrals(tsk); 763 767 group_dead = atomic_dec_and_test(&tsk->signal->live); 764 768 if (group_dead) { 769 + /* 770 + * If the last thread of global init has exited, panic 771 + * immediately to get a useable coredump. 772 + */ 773 + if (unlikely(is_global_init(tsk))) 774 + panic("Attempted to kill init! exitcode=0x%08x\n", 775 + tsk->signal->group_exit_code ?: (int)code); 776 + 765 777 #ifdef CONFIG_POSIX_TIMERS 766 778 hrtimer_cancel(&tsk->signal->real_timer); 767 779 exit_itimers(tsk->signal);
+7
kernel/seccomp.c
··· 1026 1026 struct seccomp_notif unotif; 1027 1027 ssize_t ret; 1028 1028 1029 + /* Verify that we're not given garbage to keep struct extensible. */ 1030 + ret = check_zeroed_user(buf, sizeof(unotif)); 1031 + if (ret < 0) 1032 + return ret; 1033 + if (!ret) 1034 + return -EINVAL; 1035 + 1029 1036 memset(&unotif, 0, sizeof(unotif)); 1030 1037 1031 1038 ret = down_interruptible(&filter->notif->request);
+19 -11
kernel/taskstats.c
··· 554 554 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) 555 555 { 556 556 struct signal_struct *sig = tsk->signal; 557 - struct taskstats *stats; 557 + struct taskstats *stats_new, *stats; 558 558 559 - if (sig->stats || thread_group_empty(tsk)) 560 - goto ret; 559 + /* Pairs with smp_store_release() below. */ 560 + stats = smp_load_acquire(&sig->stats); 561 + if (stats || thread_group_empty(tsk)) 562 + return stats; 561 563 562 564 /* No problem if kmem_cache_zalloc() fails */ 563 - stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); 565 + stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); 564 566 565 567 spin_lock_irq(&tsk->sighand->siglock); 566 - if (!sig->stats) { 567 - sig->stats = stats; 568 - stats = NULL; 568 + stats = sig->stats; 569 + if (!stats) { 570 + /* 571 + * Pairs with smp_store_release() above and order the 572 + * kmem_cache_zalloc(). 573 + */ 574 + smp_store_release(&sig->stats, stats_new); 575 + stats = stats_new; 576 + stats_new = NULL; 569 577 } 570 578 spin_unlock_irq(&tsk->sighand->siglock); 571 579 572 - if (stats) 573 - kmem_cache_free(taskstats_cache, stats); 574 - ret: 575 - return sig->stats; 580 + if (stats_new) 581 + kmem_cache_free(taskstats_cache, stats_new); 582 + 583 + return stats; 576 584 } 577 585 578 586 /* Send pid data out on exit */
+14
kernel/trace/fgraph.c
··· 96 96 return 0; 97 97 } 98 98 99 + /* 100 + * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct 101 + * functions. But those archs currently don't support direct functions 102 + * anyway, and ftrace_find_rec_direct() is just a stub for them. 103 + * Define MCOUNT_INSN_SIZE to keep those archs compiling. 104 + */ 105 + #ifndef MCOUNT_INSN_SIZE 106 + /* Make sure this only works without direct calls */ 107 + # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 108 + # error MCOUNT_INSN_SIZE not defined with direct calls enabled 109 + # endif 110 + # define MCOUNT_INSN_SIZE 0 111 + #endif 112 + 99 113 int function_graph_enter(unsigned long ret, unsigned long func, 100 114 unsigned long frame_pointer, unsigned long *retp) 101 115 {
+3 -3
kernel/trace/ftrace.c
··· 526 526 } 527 527 528 528 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 529 - avg = rec->time; 530 - do_div(avg, rec->counter); 529 + avg = div64_ul(rec->time, rec->counter); 531 530 if (tracing_thresh && (avg < tracing_thresh)) 532 531 goto out; 533 532 #endif ··· 552 553 * Divide only 1000 for ns^2 -> us^2 conversion. 553 554 * trace_print_graph_duration will divide 1000 again. 554 555 */ 555 - do_div(stddev, rec->counter * (rec->counter - 1) * 1000); 556 + stddev = div64_ul(stddev, 557 + rec->counter * (rec->counter - 1) * 1000); 556 558 } 557 559 558 560 trace_seq_init(&s);
+1 -1
kernel/trace/trace_events_inject.c
··· 195 195 unsigned long irq_flags; 196 196 void *entry = NULL; 197 197 int entry_size; 198 - u64 val; 198 + u64 val = 0; 199 199 int len; 200 200 201 201 entry = trace_alloc_entry(call, &entry_size);
+3 -1
kernel/trace/trace_sched_wakeup.c
··· 630 630 if (ret) { 631 631 pr_info("wakeup trace: Couldn't activate tracepoint" 632 632 " probe to kernel_sched_migrate_task\n"); 633 - return; 633 + goto fail_deprobe_sched_switch; 634 634 } 635 635 636 636 wakeup_reset(tr); ··· 648 648 printk(KERN_ERR "failed to start wakeup tracer\n"); 649 649 650 650 return; 651 + fail_deprobe_sched_switch: 652 + unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); 651 653 fail_deprobe_wake_new: 652 654 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 653 655 fail_deprobe:
+1 -1
kernel/trace/trace_seq.c
··· 381 381 int prefix_type, int rowsize, int groupsize, 382 382 const void *buf, size_t len, bool ascii) 383 383 { 384 - unsigned int save_len = s->seq.len; 384 + unsigned int save_len = s->seq.len; 385 385 386 386 if (s->full) 387 387 return 0;
+5
kernel/trace/trace_stack.c
··· 283 283 local_irq_restore(flags); 284 284 } 285 285 286 + /* Some archs may not define MCOUNT_INSN_SIZE */ 287 + #ifndef MCOUNT_INSN_SIZE 288 + # define MCOUNT_INSN_SIZE 0 289 + #endif 290 + 286 291 static void 287 292 stack_trace_call(unsigned long ip, unsigned long parent_ip, 288 293 struct ftrace_ops *op, struct pt_regs *pt_regs)
+6 -2
mm/gup_benchmark.c
··· 26 26 unsigned long i, nr_pages, addr, next; 27 27 int nr; 28 28 struct page **pages; 29 + int ret = 0; 29 30 30 31 if (gup->size > ULONG_MAX) 31 32 return -EINVAL; ··· 64 63 NULL); 65 64 break; 66 65 default: 67 - return -1; 66 + kvfree(pages); 67 + ret = -EINVAL; 68 + goto out; 68 69 } 69 70 70 71 if (nr <= 0) ··· 88 85 gup->put_delta_usec = ktime_us_delta(end_time, start_time); 89 86 90 87 kvfree(pages); 91 - return 0; 88 + out: 89 + return ret; 92 90 } 93 91 94 92 static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
+50 -1
mm/hugetlb.c
··· 27 27 #include <linux/swapops.h> 28 28 #include <linux/jhash.h> 29 29 #include <linux/numa.h> 30 + #include <linux/llist.h> 30 31 31 32 #include <asm/page.h> 32 33 #include <asm/pgtable.h> ··· 1137 1136 page[2].mapping = NULL; 1138 1137 } 1139 1138 1140 - void free_huge_page(struct page *page) 1139 + static void __free_huge_page(struct page *page) 1141 1140 { 1142 1141 /* 1143 1142 * Can't pass hstate in here because it is called from the ··· 1198 1197 enqueue_huge_page(h, page); 1199 1198 } 1200 1199 spin_unlock(&hugetlb_lock); 1200 + } 1201 + 1202 + /* 1203 + * As free_huge_page() can be called from a non-task context, we have 1204 + * to defer the actual freeing in a workqueue to prevent potential 1205 + * hugetlb_lock deadlock. 1206 + * 1207 + * free_hpage_workfn() locklessly retrieves the linked list of pages to 1208 + * be freed and frees them one-by-one. As the page->mapping pointer is 1209 + * going to be cleared in __free_huge_page() anyway, it is reused as the 1210 + * llist_node structure of a lockless linked list of huge pages to be freed. 1211 + */ 1212 + static LLIST_HEAD(hpage_freelist); 1213 + 1214 + static void free_hpage_workfn(struct work_struct *work) 1215 + { 1216 + struct llist_node *node; 1217 + struct page *page; 1218 + 1219 + node = llist_del_all(&hpage_freelist); 1220 + 1221 + while (node) { 1222 + page = container_of((struct address_space **)node, 1223 + struct page, mapping); 1224 + node = node->next; 1225 + __free_huge_page(page); 1226 + } 1227 + } 1228 + static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1229 + 1230 + void free_huge_page(struct page *page) 1231 + { 1232 + /* 1233 + * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. 1234 + */ 1235 + if (!in_task()) { 1236 + /* 1237 + * Only call schedule_work() if hpage_freelist is previously 1238 + * empty. Otherwise, schedule_work() had been called but the 1239 + * workfn hasn't retrieved the list yet. 1240 + */ 1241 + if (llist_add((struct llist_node *)&page->mapping, 1242 + &hpage_freelist)) 1243 + schedule_work(&free_hpage_work); 1244 + return; 1245 + } 1246 + 1247 + __free_huge_page(page); 1201 1248 } 1202 1249 1203 1250 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+16 -15
mm/memory_hotplug.c
··· 483 483 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; 484 484 } 485 485 486 - static void __remove_zone(struct zone *zone, unsigned long start_pfn, 487 - unsigned long nr_pages) 486 + void __ref remove_pfn_range_from_zone(struct zone *zone, 487 + unsigned long start_pfn, 488 + unsigned long nr_pages) 488 489 { 489 490 struct pglist_data *pgdat = zone->zone_pgdat; 490 491 unsigned long flags; ··· 500 499 return; 501 500 #endif 502 501 502 + clear_zone_contiguous(zone); 503 + 503 504 pgdat_resize_lock(zone->zone_pgdat, &flags); 504 505 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 505 506 update_pgdat_span(pgdat); 506 507 pgdat_resize_unlock(zone->zone_pgdat, &flags); 508 + 509 + set_zone_contiguous(zone); 507 510 } 508 511 509 - static void __remove_section(struct zone *zone, unsigned long pfn, 510 - unsigned long nr_pages, unsigned long map_offset, 511 - struct vmem_altmap *altmap) 512 + static void __remove_section(unsigned long pfn, unsigned long nr_pages, 513 + unsigned long map_offset, 514 + struct vmem_altmap *altmap) 512 515 { 513 516 struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn)); 514 517 515 518 if (WARN_ON_ONCE(!valid_section(ms))) 516 519 return; 517 520 518 - __remove_zone(zone, pfn, nr_pages); 519 521 sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); 520 522 } 521 523 522 524 /** 523 - * __remove_pages() - remove sections of pages from a zone 524 - * @zone: zone from which pages need to be removed 525 + * __remove_pages() - remove sections of pages 525 526 * @pfn: starting pageframe (must be aligned to start of a section) 526 527 * @nr_pages: number of pages to remove (must be multiple of section size) 527 528 * @altmap: alternative device page map or %NULL if default memmap is used ··· 533 530 * sure that pages are marked reserved and zones are adjust properly by 534 531 * calling offline_pages(). 535 532 */ 536 - void __remove_pages(struct zone *zone, unsigned long pfn, 537 - unsigned long nr_pages, struct vmem_altmap *altmap) 533 + void __remove_pages(unsigned long pfn, unsigned long nr_pages, 534 + struct vmem_altmap *altmap) 538 535 { 539 536 unsigned long map_offset = 0; 540 537 unsigned long nr, start_sec, end_sec; 541 538 542 539 map_offset = vmem_altmap_offset(altmap); 543 - 544 - clear_zone_contiguous(zone); 545 540 546 541 if (check_pfn_span(pfn, nr_pages, "remove")) 547 542 return; ··· 552 551 cond_resched(); 553 552 pfns = min(nr_pages, PAGES_PER_SECTION 554 553 - (pfn & ~PAGE_SECTION_MASK)); 555 - __remove_section(zone, pfn, pfns, map_offset, altmap); 554 + __remove_section(pfn, pfns, map_offset, altmap); 556 555 pfn += pfns; 557 556 nr_pages -= pfns; 558 557 map_offset = 0; 559 558 } 560 - 561 - set_zone_contiguous(zone); 562 559 } 563 560 564 561 int set_online_page_callback(online_page_callback_t callback) ··· 868 869 (unsigned long long) pfn << PAGE_SHIFT, 869 870 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); 870 871 memory_notify(MEM_CANCEL_ONLINE, &arg); 872 + remove_pfn_range_from_zone(zone, pfn, nr_pages); 871 873 mem_hotplug_done(); 872 874 return ret; 873 875 } ··· 1628 1628 writeback_set_ratelimit(); 1629 1629 1630 1630 memory_notify(MEM_OFFLINE, &arg); 1631 + remove_pfn_range_from_zone(zone, start_pfn, nr_pages); 1631 1632 mem_hotplug_done(); 1632 1633 return 0; 1633 1634
+1 -1
mm/memremap.c
··· 120 120 121 121 mem_hotplug_begin(); 122 122 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 123 - __remove_pages(page_zone(first_page), PHYS_PFN(res->start), 123 + __remove_pages(PHYS_PFN(res->start), 124 124 PHYS_PFN(resource_size(res)), NULL); 125 125 } else { 126 126 arch_remove_memory(nid, res->start, resource_size(res),
+17 -6
mm/migrate.c
··· 1512 1512 /* 1513 1513 * Resolves the given address to a struct page, isolates it from the LRU and 1514 1514 * puts it to the given pagelist. 1515 - * Returns -errno if the page cannot be found/isolated or 0 when it has been 1516 - * queued or the page doesn't need to be migrated because it is already on 1517 - * the target node 1515 + * Returns: 1516 + * errno - if the page cannot be found/isolated 1517 + * 0 - when it doesn't have to be migrated because it is already on the 1518 + * target node 1519 + * 1 - when it has been queued 1518 1520 */ 1519 1521 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1520 1522 int node, struct list_head *pagelist, bool migrate_all) ··· 1555 1553 if (PageHuge(page)) { 1556 1554 if (PageHead(page)) { 1557 1555 isolate_huge_page(page, pagelist); 1558 - err = 0; 1556 + err = 1; 1559 1557 } 1560 1558 } else { 1561 1559 struct page *head; ··· 1565 1563 if (err) 1566 1564 goto out_putpage; 1567 1565 1568 - err = 0; 1566 + err = 1; 1569 1567 list_add_tail(&head->lru, pagelist); 1570 1568 mod_node_page_state(page_pgdat(head), 1571 1569 NR_ISOLATED_ANON + page_is_file_cache(head), ··· 1642 1640 */ 1643 1641 err = add_page_for_migration(mm, addr, current_node, 1644 1642 &pagelist, flags & MPOL_MF_MOVE_ALL); 1645 - if (!err) 1643 + 1644 + if (!err) { 1645 + /* The page is already on the target node */ 1646 + err = store_status(status, i, current_node, 1); 1647 + if (err) 1648 + goto out_flush; 1646 1649 continue; 1650 + } else if (err > 0) { 1651 + /* The page is successfully queued for migration */ 1652 + continue; 1653 + } 1647 1654 1648 1655 err = store_status(status, i, err, 1); 1649 1656 if (err)
-6
mm/mmap.c
··· 90 90 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 91 91 * w: (no) no w: (no) no w: (copy) copy w: (no) no 92 92 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 93 - * 94 - * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and 95 - * MAP_PRIVATE: 96 - * r: (no) no 97 - * w: (no) no 98 - * x: (yes) yes 99 93 */ 100 94 pgprot_t protection_map[16] __ro_after_init = { 101 95 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+1 -1
mm/oom_kill.c
··· 890 890 K(get_mm_counter(mm, MM_FILEPAGES)), 891 891 K(get_mm_counter(mm, MM_SHMEMPAGES)), 892 892 from_kuid(&init_user_ns, task_uid(victim)), 893 - mm_pgtables_bytes(mm), victim->signal->oom_score_adj); 893 + mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); 894 894 task_unlock(victim); 895 895 896 896 /*
+5
mm/zsmalloc.c
··· 2069 2069 zs_pool_dec_isolated(pool); 2070 2070 } 2071 2071 2072 + if (page_zone(newpage) != page_zone(page)) { 2073 + dec_zone_page_state(page, NR_ZSPAGES); 2074 + inc_zone_page_state(newpage, NR_ZSPAGES); 2075 + } 2076 + 2072 2077 reset_page(page); 2073 2078 put_page(page); 2074 2079 page = newpage;
+1
net/8021q/vlan.h
··· 126 126 void vlan_setup(struct net_device *dev); 127 127 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack); 128 128 void unregister_vlan_dev(struct net_device *dev, struct list_head *head); 129 + void vlan_dev_uninit(struct net_device *dev); 129 130 bool vlan_dev_inherit_address(struct net_device *dev, 130 131 struct net_device *real_dev); 131 132
+2 -1
net/8021q/vlan_dev.c
··· 586 586 return 0; 587 587 } 588 588 589 - static void vlan_dev_uninit(struct net_device *dev) 589 + /* Note: this function might be called multiple times for the same device. */ 590 + void vlan_dev_uninit(struct net_device *dev) 590 591 { 591 592 struct vlan_priority_tci_mapping *pm; 592 593 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+12 -7
net/8021q/vlan_netlink.c
··· 108 108 struct ifla_vlan_flags *flags; 109 109 struct ifla_vlan_qos_mapping *m; 110 110 struct nlattr *attr; 111 - int rem; 111 + int rem, err; 112 112 113 113 if (data[IFLA_VLAN_FLAGS]) { 114 114 flags = nla_data(data[IFLA_VLAN_FLAGS]); 115 - vlan_dev_change_flags(dev, flags->flags, flags->mask); 115 + err = vlan_dev_change_flags(dev, flags->flags, flags->mask); 116 + if (err) 117 + return err; 116 118 } 117 119 if (data[IFLA_VLAN_INGRESS_QOS]) { 118 120 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { ··· 125 123 if (data[IFLA_VLAN_EGRESS_QOS]) { 126 124 nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) { 127 125 m = nla_data(attr); 128 - vlan_dev_set_egress_priority(dev, m->from, m->to); 126 + err = vlan_dev_set_egress_priority(dev, m->from, m->to); 127 + if (err) 128 + return err; 129 129 } 130 130 } 131 131 return 0; ··· 183 179 return -EINVAL; 184 180 185 181 err = vlan_changelink(dev, tb, data, extack); 186 - if (err < 0) 187 - return err; 188 - 189 - return register_vlan_dev(dev, extack); 182 + if (!err) 183 + err = register_vlan_dev(dev, extack); 184 + if (err) 185 + vlan_dev_uninit(dev); 186 + return err; 190 187 } 191 188 192 189 static inline size_t vlan_qos_map_size(unsigned int n)
+16 -11
net/ipv4/netfilter/arp_tables.c
··· 384 384 return 1; 385 385 } 386 386 387 - static inline int check_target(struct arpt_entry *e, const char *name) 387 + static int check_target(struct arpt_entry *e, struct net *net, const char *name) 388 388 { 389 389 struct xt_entry_target *t = arpt_get_target(e); 390 390 struct xt_tgchk_param par = { 391 + .net = net, 391 392 .table = name, 392 393 .entryinfo = e, 393 394 .target = t->u.kernel.target, ··· 400 399 return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); 401 400 } 402 401 403 - static inline int 404 - find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, 402 + static int 403 + find_check_entry(struct arpt_entry *e, struct net *net, const char *name, 404 + unsigned int size, 405 405 struct xt_percpu_counter_alloc_state *alloc_state) 406 406 { 407 407 struct xt_entry_target *t; ··· 421 419 } 422 420 t->u.kernel.target = target; 423 421 424 - ret = check_target(e, name); 422 + ret = check_target(e, net, name); 425 423 if (ret) 426 424 goto err; 427 425 return 0; ··· 514 512 /* Checks and translates the user-supplied table segment (held in 515 513 * newinfo). 516 514 */ 517 - static int translate_table(struct xt_table_info *newinfo, void *entry0, 515 + static int translate_table(struct net *net, 516 + struct xt_table_info *newinfo, 517 + void *entry0, 518 518 const struct arpt_replace *repl) 519 519 { 520 520 struct xt_percpu_counter_alloc_state alloc_state = { 0 }; ··· 573 569 /* Finally, each sanity check must pass */ 574 570 i = 0; 575 571 xt_entry_foreach(iter, entry0, newinfo->size) { 576 - ret = find_check_entry(iter, repl->name, repl->size, 572 + ret = find_check_entry(iter, net, repl->name, repl->size, 577 573 &alloc_state); 578 574 if (ret != 0) 579 575 break; ··· 978 974 goto free_newinfo; 979 975 } 980 976 981 - ret = translate_table(newinfo, loc_cpu_entry, &tmp); 977 + ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); 982 978 if (ret != 0) 983 979 goto free_newinfo; 984 980 ··· 1153 1149 } 1154 1150 } 1155 1151 1156 - static int translate_compat_table(struct xt_table_info **pinfo, 1152 + static int translate_compat_table(struct net *net, 1153 + struct xt_table_info **pinfo, 1157 1154 void **pentry0, 1158 1155 const struct compat_arpt_replace *compatr) 1159 1156 { ··· 1222 1217 repl.num_counters = 0; 1223 1218 repl.counters = NULL; 1224 1219 repl.size = newinfo->size; 1225 - ret = translate_table(newinfo, entry1, &repl); 1220 + ret = translate_table(net, newinfo, entry1, &repl); 1226 1221 if (ret) 1227 1222 goto free_newinfo; 1228 1223 ··· 1275 1270 goto free_newinfo; 1276 1271 } 1277 1272 1278 - ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp); 1273 + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); 1279 1274 if (ret != 0) 1280 1275 goto free_newinfo; 1281 1276 ··· 1551 1546 loc_cpu_entry = newinfo->entries; 1552 1547 memcpy(loc_cpu_entry, repl->entries, repl->size); 1553 1548 1554 - ret = translate_table(newinfo, loc_cpu_entry, repl); 1549 + ret = translate_table(net, newinfo, loc_cpu_entry, repl); 1555 1550 if (ret != 0) 1556 1551 goto out_free; 1557 1552
+4 -1
net/ipv4/tcp_input.c
··· 1727 1727 } 1728 1728 1729 1729 /* Ignore very old stuff early */ 1730 - if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1730 + if (!after(sp[used_sacks].end_seq, prior_snd_una)) { 1731 + if (i == 0) 1732 + first_sack_index = -1; 1731 1733 continue; 1734 + } 1732 1735 1733 1736 used_sacks++; 1734 1737 }
+2 -1
net/netfilter/ipset/ip_set_core.c
··· 1848 1848 struct ip_set *set; 1849 1849 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; 1850 1850 int ret = 0; 1851 + u32 lineno; 1851 1852 1852 1853 if (unlikely(protocol_min_failed(attr) || 1853 1854 !attr[IPSET_ATTR_SETNAME] || ··· 1865 1864 return -IPSET_ERR_PROTOCOL; 1866 1865 1867 1866 rcu_read_lock_bh(); 1868 - ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0); 1867 + ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0); 1869 1868 rcu_read_unlock_bh(); 1870 1869 /* Userspace can't trigger element to be re-added */ 1871 1870 if (ret == -EAGAIN)
+3
net/netfilter/nf_conntrack_proto_dccp.c
··· 677 677 unsigned int *timeouts = data; 678 678 int i; 679 679 680 + if (!timeouts) 681 + timeouts = dn->dccp_timeout; 682 + 680 683 /* set default DCCP timeouts. */ 681 684 for (i=0; i<CT_DCCP_MAX; i++) 682 685 timeouts[i] = dn->dccp_timeout[i];
+3
net/netfilter/nf_conntrack_proto_sctp.c
··· 594 594 struct nf_sctp_net *sn = nf_sctp_pernet(net); 595 595 int i; 596 596 597 + if (!timeouts) 598 + timeouts = sn->timeouts; 599 + 597 600 /* set default SCTP timeouts. */ 598 601 for (i=0; i<SCTP_CONNTRACK_MAX; i++) 599 602 timeouts[i] = sn->timeouts[i];
+1 -6
net/netfilter/nf_flow_table_core.c
··· 134 134 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) 135 135 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) 136 136 137 - static inline __s32 nf_flow_timeout_delta(unsigned int timeout) 138 - { 139 - return (__s32)(timeout - (u32)jiffies); 140 - } 141 - 142 137 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) 143 138 { 144 139 const struct nf_conntrack_l4proto *l4proto; ··· 227 232 { 228 233 int err; 229 234 230 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 235 + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; 231 236 232 237 err = rhashtable_insert_fast(&flow_table->rhashtable, 233 238 &flow->tuplehash[0].node,
+2 -2
net/netfilter/nf_flow_table_ip.c
··· 280 280 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) 281 281 return NF_DROP; 282 282 283 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 283 + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; 284 284 iph = ip_hdr(skb); 285 285 ip_decrease_ttl(iph); 286 286 skb->tstamp = 0; ··· 509 509 if (nf_flow_nat_ipv6(flow, skb, dir) < 0) 510 510 return NF_DROP; 511 511 512 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 512 + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; 513 513 ip6h = ipv6_hdr(skb); 514 514 ip6h->hop_limit--; 515 515 skb->tstamp = 0;
+36 -14
net/netfilter/nf_flow_table_offload.c
··· 166 166 enum flow_offload_tuple_dir dir, 167 167 struct nf_flow_rule *flow_rule) 168 168 { 169 - const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple; 170 169 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); 171 170 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); 171 + const void *daddr = &flow->tuplehash[!dir].tuple.src_v4; 172 + const struct dst_entry *dst_cache; 173 + unsigned char ha[ETH_ALEN]; 172 174 struct neighbour *n; 173 175 u32 mask, val; 176 + u8 nud_state; 174 177 u16 val16; 175 178 176 - n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4); 179 + dst_cache = flow->tuplehash[dir].tuple.dst_cache; 180 + n = dst_neigh_lookup(dst_cache, daddr); 177 181 if (!n) 178 182 return -ENOENT; 179 183 184 + read_lock_bh(&n->lock); 185 + nud_state = n->nud_state; 186 + ether_addr_copy(ha, n->ha); 187 + read_unlock_bh(&n->lock); 188 + 189 + if (!(nud_state & NUD_VALID)) { 190 + neigh_release(n); 191 + return -ENOENT; 192 + } 193 + 180 194 mask = ~0xffffffff; 181 - memcpy(&val, n->ha, 4); 195 + memcpy(&val, ha, 4); 182 196 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0, 183 197 &val, &mask); 184 198 185 199 mask = ~0x0000ffff; 186 - memcpy(&val16, n->ha + 4, 2); 200 + memcpy(&val16, ha + 4, 2); 187 201 val = val16; 188 202 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, 189 203 &val, &mask); ··· 349 335 struct nf_flow_rule *flow_rule) 350 336 { 351 337 struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 352 - u32 mask = ~htonl(0xffff0000), port; 338 + u32 mask, port; 353 339 u32 offset; 354 340 355 341 switch (dir) { 356 342 case FLOW_OFFLOAD_DIR_ORIGINAL: 357 343 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); 358 344 offset = 0; /* offsetof(struct tcphdr, source); */ 345 + port = htonl(port << 16); 346 + mask = ~htonl(0xffff0000); 359 347 break; 360 348 case FLOW_OFFLOAD_DIR_REPLY: 361 349 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); 362 350 offset = 0; /* offsetof(struct tcphdr, dest); */ 351 + port = htonl(port); 352 + mask = ~htonl(0xffff); 363 353 break; 364 354 default: 365 355 return; 366 356 } 367 - port = htonl(port << 16); 357 + 368 358 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, 369 359 &port, &mask); 370 360 } ··· 379 361 struct nf_flow_rule *flow_rule) 380 362 { 381 363 struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 382 - u32 mask = ~htonl(0xffff), port; 364 + u32 mask, port; 383 365 u32 offset; 384 366 385 367 switch (dir) { 386 368 case FLOW_OFFLOAD_DIR_ORIGINAL: 387 - port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); 388 - offset = 0; /* offsetof(struct tcphdr, source); */ 369 + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port); 370 + offset = 0; /* offsetof(struct tcphdr, dest); */ 371 + port = htonl(port); 372 + mask = ~htonl(0xffff); 389 373 break; 390 374 case FLOW_OFFLOAD_DIR_REPLY: 391 - port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); 392 - offset = 0; /* offsetof(struct tcphdr, dest); */ 375 + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port); 376 + offset = 0; /* offsetof(struct tcphdr, source); */ 377 + port = htonl(port << 16); 378 + mask = ~htonl(0xffff0000); 393 379 break; 394 380 default: 395 381 return; 396 382 } 397 - port = htonl(port); 383 + 398 384 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, 399 385 &port, &mask); 400 386 } ··· 781 759 struct flow_offload *flow) 782 760 { 783 761 struct flow_offload_work *offload; 784 - s64 delta; 762 + __s32 delta; 785 763 786 - delta = flow->timeout - jiffies; 764 + delta = nf_flow_timeout_delta(flow->timeout); 787 765 if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) || 788 766 flow->flags & FLOW_OFFLOAD_HW_DYING) 789 767 return;
+6 -2
net/netfilter/nf_tables_api.c
··· 5984 5984 return ERR_PTR(-ENOENT); 5985 5985 } 5986 5986 5987 + /* Only called from error and netdev event paths. */ 5987 5988 static void nft_unregister_flowtable_hook(struct net *net, 5988 5989 struct nft_flowtable *flowtable, 5989 5990 struct nft_hook *hook) ··· 6000 5999 struct nft_hook *hook; 6001 6000 6002 6001 list_for_each_entry(hook, &flowtable->hook_list, list) 6003 - nft_unregister_flowtable_hook(net, flowtable, hook); 6002 + nf_unregister_net_hook(net, &hook->ops); 6004 6003 } 6005 6004 6006 6005 static int nft_register_flowtable_net_hooks(struct net *net, ··· 6449 6448 { 6450 6449 struct nft_hook *hook, *next; 6451 6450 6451 + flowtable->data.type->free(&flowtable->data); 6452 6452 list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { 6453 + flowtable->data.type->setup(&flowtable->data, hook->ops.dev, 6454 + FLOW_BLOCK_UNBIND); 6453 6455 list_del_rcu(&hook->list); 6454 6456 kfree(hook); 6455 6457 } 6456 6458 kfree(flowtable->name); 6457 - flowtable->data.type->free(&flowtable->data); 6458 6459 module_put(flowtable->data.type->owner); 6459 6460 kfree(flowtable); 6460 6461 } ··· 6500 6497 if (hook->ops.dev != dev) 6501 6498 continue; 6502 6499 6500 + /* flow_offload_netdev_event() cleans up entries for us. */ 6503 6501 nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook); 6504 6502 list_del_rcu(&hook->list); 6505 6503 kfree_rcu(hook, rcu);
-3
net/netfilter/nft_flow_offload.c
··· 200 200 static void nft_flow_offload_destroy(const struct nft_ctx *ctx, 201 201 const struct nft_expr *expr) 202 202 { 203 - struct nft_flow_offload *priv = nft_expr_priv(expr); 204 - 205 - priv->flowtable->use--; 206 203 nf_ct_netns_put(ctx->net, ctx->family); 207 204 } 208 205
+1 -1
net/qrtr/qrtr.c
··· 196 196 hdr->size = cpu_to_le32(len); 197 197 hdr->confirm_rx = 0; 198 198 199 - skb_put_padto(skb, ALIGN(len, 4)); 199 + skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); 200 200 201 201 mutex_lock(&node->ep_lock); 202 202 if (node->ep)
+1 -1
net/sched/sch_cake.c
··· 1768 1768 q->avg_window_begin)); 1769 1769 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; 1770 1770 1771 - do_div(b, window_interval); 1771 + b = div64_u64(b, window_interval); 1772 1772 q->avg_peak_bandwidth = 1773 1773 cake_ewma(q->avg_peak_bandwidth, b, 1774 1774 b > q->avg_peak_bandwidth ? 2 : 8);
+4 -2
net/sched/sch_fq.c
··· 786 786 if (tb[TCA_FQ_QUANTUM]) { 787 787 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 788 788 789 - if (quantum > 0) 789 + if (quantum > 0 && quantum <= (1 << 20)) { 790 790 q->quantum = quantum; 791 - else 791 + } else { 792 + NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); 792 793 err = -EINVAL; 794 + } 793 795 } 794 796 795 797 if (tb[TCA_FQ_INITIAL_QUANTUM])
+8 -2
net/sched/sch_prio.c
··· 292 292 struct tc_prio_qopt_offload graft_offload; 293 293 unsigned long band = arg - 1; 294 294 295 - if (new == NULL) 296 - new = &noop_qdisc; 295 + if (!new) { 296 + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, 297 + TC_H_MAKE(sch->handle, arg), extack); 298 + if (!new) 299 + new = &noop_qdisc; 300 + else 301 + qdisc_hash_add(new, true); 302 + } 297 303 298 304 *old = qdisc_replace(sch, new, &q->queues[band]); 299 305
+18 -10
net/sctp/sm_sideeffect.c
··· 1359 1359 /* Generate an INIT ACK chunk. */ 1360 1360 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, 1361 1361 0); 1362 - if (!new_obj) 1363 - goto nomem; 1362 + if (!new_obj) { 1363 + error = -ENOMEM; 1364 + break; 1365 + } 1364 1366 1365 1367 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1366 1368 SCTP_CHUNK(new_obj)); ··· 1384 1382 if (!new_obj) { 1385 1383 if (cmd->obj.chunk) 1386 1384 sctp_chunk_free(cmd->obj.chunk); 1387 - goto nomem; 1385 + error = -ENOMEM; 1386 + break; 1388 1387 } 1389 1388 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1390 1389 SCTP_CHUNK(new_obj)); ··· 1432 1429 1433 1430 /* Generate a SHUTDOWN chunk. */ 1434 1431 new_obj = sctp_make_shutdown(asoc, chunk); 1435 - if (!new_obj) 1436 - goto nomem; 1432 + if (!new_obj) { 1433 + error = -ENOMEM; 1434 + break; 1435 + } 1437 1436 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1438 1437 SCTP_CHUNK(new_obj)); 1439 1438 break; ··· 1771 1766 break; 1772 1767 } 1773 1768 1774 - if (error) 1769 + if (error) { 1770 + cmd = sctp_next_cmd(commands); 1771 + while (cmd) { 1772 + if (cmd->verb == SCTP_CMD_REPLY) 1773 + sctp_chunk_free(cmd->obj.chunk); 1774 + cmd = sctp_next_cmd(commands); 1775 + } 1775 1776 break; 1777 + } 1776 1778 } 1777 1779 1778 - out: 1779 1780 /* If this is in response to a received chunk, wait until 1780 1781 * we are done with the packet to open the queue so that we don't 1781 1782 * send multiple packets in response to a single request. ··· 1796 1785 sp->data_ready_signalled = 0; 1797 1786 1798 1787 return error; 1799 - nomem: 1800 - error = -ENOMEM; 1801 - goto out; 1802 1788 }
+1 -3
net/tipc/Makefile
··· 9 9 core.o link.o discover.o msg.o \ 10 10 name_distr.o subscr.o monitor.o name_table.o net.o \ 11 11 netlink.o netlink_compat.o node.o socket.o eth_media.o \ 12 - topsrv.o socket.o group.o trace.o 12 + topsrv.o group.o trace.o 13 13 14 14 CFLAGS_trace.o += -I$(src) 15 15 ··· 20 20 21 21 22 22 obj-$(CONFIG_TIPC_DIAG) += diag.o 23 - 24 - tipc_diag-y := diag.o
+34 -23
net/tipc/socket.c
··· 287 287 * 288 288 * Caller must hold socket lock 289 289 */ 290 - static void tsk_rej_rx_queue(struct sock *sk) 290 + static void tsk_rej_rx_queue(struct sock *sk, int error) 291 291 { 292 292 struct sk_buff *skb; 293 293 294 294 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 295 - tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 295 + tipc_sk_respond(sk, skb, error); 296 296 } 297 297 298 298 static bool tipc_sk_connected(struct sock *sk) ··· 545 545 /* Remove pending SYN */ 546 546 __skb_queue_purge(&sk->sk_write_queue); 547 547 548 - /* Reject all unreceived messages, except on an active connection 549 - * (which disconnects locally & sends a 'FIN+' to peer). 550 - */ 551 - while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 552 - if (TIPC_SKB_CB(skb)->bytes_read) { 553 - kfree_skb(skb); 554 - continue; 555 - } 556 - if (!tipc_sk_type_connectionless(sk) && 557 - sk->sk_state != TIPC_DISCONNECTING) { 558 - tipc_set_sk_state(sk, TIPC_DISCONNECTING); 559 - tipc_node_remove_conn(net, dnode, tsk->portid); 560 - } 561 - tipc_sk_respond(sk, skb, error); 548 + /* Remove partially received buffer if any */ 549 + skb = skb_peek(&sk->sk_receive_queue); 550 + if (skb && TIPC_SKB_CB(skb)->bytes_read) { 551 + __skb_unlink(skb, &sk->sk_receive_queue); 552 + kfree_skb(skb); 562 553 } 563 554 564 - if (tipc_sk_type_connectionless(sk)) 555 + /* Reject all unreceived messages if connectionless */ 556 + if (tipc_sk_type_connectionless(sk)) { 557 + tsk_rej_rx_queue(sk, error); 565 558 return; 559 + } 566 560 567 - if (sk->sk_state != TIPC_DISCONNECTING) { 561 + switch (sk->sk_state) { 562 + case TIPC_CONNECTING: 563 + case TIPC_ESTABLISHED: 564 + tipc_set_sk_state(sk, TIPC_DISCONNECTING); 565 + tipc_node_remove_conn(net, dnode, tsk->portid); 566 + /* Send a FIN+/- to its peer */ 567 + skb = __skb_dequeue(&sk->sk_receive_queue); 568 + if (skb) { 569 + __skb_queue_purge(&sk->sk_receive_queue); 570 + tipc_sk_respond(sk, skb, error); 571 + break; 572 + } 568 573 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 569 574 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 570 575 tsk_own_node(tsk), tsk_peer_port(tsk), 571 576 tsk->portid, error); 572 577 if (skb) 573 578 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 574 - tipc_node_remove_conn(net, dnode, tsk->portid); 575 - tipc_set_sk_state(sk, TIPC_DISCONNECTING); 579 + break; 580 + case TIPC_LISTEN: 581 + /* Reject all SYN messages */ 582 + tsk_rej_rx_queue(sk, error); 583 + break; 584 + default: 585 + __skb_queue_purge(&sk->sk_receive_queue); 586 + break; 576 587 } 577 588 } 578 589 ··· 2443 2432 return sock_intr_errno(*timeo_p); 2444 2433 2445 2434 add_wait_queue(sk_sleep(sk), &wait); 2446 - done = sk_wait_event(sk, timeo_p, 2447 - sk->sk_state != TIPC_CONNECTING, &wait); 2435 + done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk), 2436 + &wait); 2448 2437 remove_wait_queue(sk_sleep(sk), &wait); 2449 2438 } while (!done); 2450 2439 return 0; ··· 2654 2643 * Reject any stray messages received by new socket 2655 2644 * before the socket lock was taken (very, very unlikely) 2656 2645 */ 2657 - tsk_rej_rx_queue(new_sk); 2646 + tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT); 2658 2647 2659 2648 /* Connect new socket to it's peer */ 2660 2649 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
+2 -2
samples/seccomp/user-trap.c
··· 298 298 req = malloc(sizes.seccomp_notif); 299 299 if (!req) 300 300 goto out_close; 301 - memset(req, 0, sizeof(*req)); 302 301 303 302 resp = malloc(sizes.seccomp_notif_resp); 304 303 if (!resp) 305 304 goto out_req; 306 - memset(resp, 0, sizeof(*resp)); 305 + memset(resp, 0, sizes.seccomp_notif_resp); 307 306 308 307 while (1) { 308 + memset(req, 0, sizes.seccomp_notif); 309 309 if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) { 310 310 perror("ioctl recv"); 311 311 goto out_resp;
+4 -5
scripts/gcc-plugins/Kconfig
··· 14 14 An arch should select this symbol if it supports building with 15 15 GCC plugins. 16 16 17 - config GCC_PLUGINS 18 - bool 17 + menuconfig GCC_PLUGINS 18 + bool "GCC plugins" 19 19 depends on HAVE_GCC_PLUGINS 20 20 depends on PLUGIN_HOSTCC != "" 21 21 default y ··· 25 25 26 26 See Documentation/core-api/gcc-plugins.rst for details. 27 27 28 - menu "GCC plugins" 29 - depends on GCC_PLUGINS 28 + if GCC_PLUGINS 30 29 31 30 config GCC_PLUGIN_CYC_COMPLEXITY 32 31 bool "Compute the cyclomatic complexity of a function" if EXPERT ··· 112 113 bool 113 114 depends on GCC_PLUGINS && ARM 114 115 115 - endmenu 116 + endif
+1 -1
scripts/package/mkdebian
··· 136 136 echo "1.0" > debian/source/format 137 137 138 138 echo $debarch > debian/arch 139 - extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)" 139 + extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)" 140 140 extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)" 141 141 142 142 # Generate a simple changelog template
+1 -1
security/apparmor/apparmorfs.c
··· 623 623 624 624 void __aa_bump_ns_revision(struct aa_ns *ns) 625 625 { 626 - ns->revision++; 626 + WRITE_ONCE(ns->revision, ns->revision + 1); 627 627 wake_up_interruptible(&ns->wait); 628 628 } 629 629
+42 -38
security/apparmor/domain.c
··· 317 317 318 318 if (!bprm || !profile->xattr_count) 319 319 return 0; 320 + might_sleep(); 320 321 321 322 /* transition from exec match to xattr set */ 322 323 state = aa_dfa_null_transition(profile->xmatch, state); ··· 362 361 } 363 362 364 363 /** 365 - * __attach_match_ - find an attachment match 364 + * find_attach - do attachment search for unconfined processes 366 365 * @bprm - binprm structure of transitioning task 367 - * @name - to match against (NOT NULL) 366 + * @ns: the current namespace (NOT NULL) 368 367 * @head - profile list to walk (NOT NULL) 368 + * @name - to match against (NOT NULL) 369 369 * @info - info message if there was an error (NOT NULL) 370 370 * 371 371 * Do a linear search on the profiles in the list. There is a matching ··· 376 374 * 377 375 * Requires: @head not be shared or have appropriate locks held 378 376 * 379 - * Returns: profile or NULL if no match found 377 + * Returns: label or NULL if no match found 380 378 */ 381 - static struct aa_profile *__attach_match(const struct linux_binprm *bprm, 382 - const char *name, 383 - struct list_head *head, 384 - const char **info) 379 + static struct aa_label *find_attach(const struct linux_binprm *bprm, 380 + struct aa_ns *ns, struct list_head *head, 381 + const char *name, const char **info) 385 382 { 386 383 int candidate_len = 0, candidate_xattrs = 0; 387 384 bool conflict = false; ··· 389 388 AA_BUG(!name); 390 389 AA_BUG(!head); 391 390 391 + rcu_read_lock(); 392 + restart: 392 393 list_for_each_entry_rcu(profile, head, base.list) { 393 394 if (profile->label.flags & FLAG_NULL && 394 395 &profile->label == ns_unconfined(profile->ns)) ··· 416 413 perm = dfa_user_allow(profile->xmatch, state); 417 414 /* any accepting state means a valid match. */ 418 415 if (perm & MAY_EXEC) { 419 - int ret; 416 + int ret = 0; 420 417 421 418 if (count < candidate_len) 422 419 continue; 423 420 424 - ret = aa_xattrs_match(bprm, profile, state); 425 - /* Fail matching if the xattrs don't match */ 426 - if (ret < 0) 427 - continue; 421 + if (bprm && profile->xattr_count) { 422 + long rev = READ_ONCE(ns->revision); 428 423 424 + if (!aa_get_profile_not0(profile)) 425 + goto restart; 426 + rcu_read_unlock(); 427 + ret = aa_xattrs_match(bprm, profile, 428 + state); 429 + rcu_read_lock(); 430 + aa_put_profile(profile); 431 + if (rev != 432 + READ_ONCE(ns->revision)) 433 + /* policy changed */ 434 + goto restart; 435 + /* 436 + * Fail matching if the xattrs don't 437 + * match 438 + */ 439 + if (ret < 0) 440 + continue; 441 + } 429 442 /* 430 443 * TODO: allow for more flexible best match 431 444 * ··· 464 445 candidate_xattrs = ret; 465 446 conflict = false; 466 447 } 467 - } else if (!strcmp(profile->base.name, name)) 448 + } else if (!strcmp(profile->base.name, name)) { 468 449 /* 469 450 * old exact non-re match, without conditionals such 470 451 * as xattrs. no more searching required 471 452 */ 472 - return profile; 453 + candidate = profile; 454 + goto out; 455 + } 473 456 } 474 457 475 - if (conflict) { 476 - *info = "conflicting profile attachments"; 458 + if (!candidate || conflict) { 459 + if (conflict) 460 + *info = "conflicting profile attachments"; 461 + rcu_read_unlock(); 477 462 return NULL; 478 463 } 479 464 480 - return candidate; 481 - } 482 - 483 - /** 484 - * find_attach - do attachment search for unconfined processes 485 - * @bprm - binprm structure of transitioning task 486 - * @ns: the current namespace (NOT NULL) 487 - * @list: list to search (NOT NULL) 488 - * @name: the executable name to match against (NOT NULL) 489 - * @info: info message if there was an error 490 - * 491 - * Returns: label or NULL if no match found 492 - */ 493 - static struct aa_label *find_attach(const struct linux_binprm *bprm, 494 - struct aa_ns *ns, struct list_head *list, 495 - const char *name, const char **info) 496 - { 497 - struct aa_profile *profile; 498 - 499 - rcu_read_lock(); 500 - profile = aa_get_profile(__attach_match(bprm, name, list, info)); 465 + out: 466 + candidate = aa_get_newest_profile(candidate); 501 467 rcu_read_unlock(); 502 468 503 - return profile ? &profile->label : NULL; 469 + return &candidate->label; 504 470 } 505 471 506 472 static const char *next_name(int xtype, const char *name)
+8 -4
security/apparmor/file.c
··· 618 618 fctx = file_ctx(file); 619 619 620 620 rcu_read_lock(); 621 - flabel = aa_get_newest_label(rcu_dereference(fctx->label)); 622 - rcu_read_unlock(); 621 + flabel = rcu_dereference(fctx->label); 623 622 AA_BUG(!flabel); 624 623 625 624 /* revalidate access, if task is unconfined, or the cached cred ··· 630 631 */ 631 632 denied = request & ~fctx->allow; 632 633 if (unconfined(label) || unconfined(flabel) || 633 - (!denied && aa_label_is_subset(flabel, label))) 634 + (!denied && aa_label_is_subset(flabel, label))) { 635 + rcu_read_unlock(); 634 636 goto done; 637 + } 635 638 639 + flabel = aa_get_newest_label(flabel); 640 + rcu_read_unlock(); 636 641 /* TODO: label cross check */ 637 642 638 643 if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry)) ··· 646 643 else if (S_ISSOCK(file_inode(file)->i_mode)) 647 644 error = __file_sock_perm(op, label, flabel, file, request, 648 645 denied); 649 - done: 650 646 aa_put_label(flabel); 647 + 648 + done: 651 649 return error; 652 650 } 653 651
+1 -1
security/apparmor/mount.c
··· 442 442 buffer = aa_get_buffer(false); 443 443 old_buffer = aa_get_buffer(false); 444 444 error = -ENOMEM; 445 - if (!buffer || old_buffer) 445 + if (!buffer || !old_buffer) 446 446 goto out; 447 447 448 448 error = fn_for_each_confined(label, profile,
+2 -2
security/apparmor/policy.c
··· 1125 1125 if (!name) { 1126 1126 /* remove namespace - can only happen if fqname[0] == ':' */ 1127 1127 mutex_lock_nested(&ns->parent->lock, ns->level); 1128 - __aa_remove_ns(ns); 1129 1128 __aa_bump_ns_revision(ns); 1129 + __aa_remove_ns(ns); 1130 1130 mutex_unlock(&ns->parent->lock); 1131 1131 } else { 1132 1132 /* remove profile */ ··· 1138 1138 goto fail_ns_lock; 1139 1139 } 1140 1140 name = profile->base.hname; 1141 + __aa_bump_ns_revision(ns); 1141 1142 __remove_profile(profile); 1142 1143 __aa_labelset_update_subtree(ns); 1143 - __aa_bump_ns_revision(ns); 1144 1144 mutex_unlock(&ns->lock); 1145 1145 } 1146 1146
+16 -5
sound/pci/hda/hda_intel.c
··· 282 282 283 283 /* quirks for old Intel chipsets */ 284 284 #define AZX_DCAPS_INTEL_ICH \ 285 - (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE) 285 + (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\ 286 + AZX_DCAPS_SYNC_WRITE) 286 287 287 288 /* quirks for Intel PCH */ 288 289 #define AZX_DCAPS_INTEL_PCH_BASE \ 289 290 (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\ 290 - AZX_DCAPS_SNOOP_TYPE(SCH)) 291 + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) 291 292 292 293 /* PCH up to IVB; no runtime PM; bind with i915 gfx */ 293 294 #define AZX_DCAPS_INTEL_PCH_NOPM \ ··· 303 302 #define AZX_DCAPS_INTEL_HASWELL \ 304 303 (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\ 305 304 AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ 306 - AZX_DCAPS_SNOOP_TYPE(SCH)) 305 + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) 307 306 308 307 /* Broadwell HDMI can't use position buffer reliably, force to use LPIB */ 309 308 #define AZX_DCAPS_INTEL_BROADWELL \ 310 309 (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\ 311 310 AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ 312 - AZX_DCAPS_SNOOP_TYPE(SCH)) 311 + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) 313 312 314 313 #define AZX_DCAPS_INTEL_BAYTRAIL \ 315 314 (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT) ··· 1411 1410 acpi_handle dhandle, atpx_handle; 1412 1411 acpi_status status; 1413 1412 1414 - while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { 1413 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 1414 + dhandle = ACPI_HANDLE(&pdev->dev); 1415 + if (dhandle) { 1416 + status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 1417 + if (!ACPI_FAILURE(status)) { 1418 + pci_dev_put(pdev); 1419 + return true; 1420 + } 1421 + } 1422 + } 1423 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { 1415 1424 dhandle = ACPI_HANDLE(&pdev->dev); 1416 1425 if (dhandle) { 1417 1426 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+33 -15
sound/pci/hda/patch_realtek.c
··· 501 501 struct alc_spec *spec = codec->spec; 502 502 503 503 switch (codec->core.vendor_id) { 504 + case 0x10ec0283: 504 505 case 0x10ec0286: 505 506 case 0x10ec0288: 506 507 case 0x10ec0298: ··· 5905 5904 ALC256_FIXUP_ASUS_HEADSET_MIC, 5906 5905 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, 5907 5906 ALC299_FIXUP_PREDATOR_SPK, 5908 - ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC, 5909 5907 ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, 5910 - ALC294_FIXUP_ASUS_INTSPK_GPIO, 5908 + ALC289_FIXUP_DELL_SPK2, 5909 + ALC289_FIXUP_DUAL_SPK, 5910 + ALC294_FIXUP_SPK2_TO_DAC1, 5911 + ALC294_FIXUP_ASUS_DUAL_SPK, 5912 + 5911 5913 }; 5912 5914 5913 5915 static const struct hda_fixup alc269_fixups[] = { ··· 6985 6981 { } 6986 6982 } 6987 6983 }, 6988 - [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = { 6989 - .type = HDA_FIXUP_PINS, 6990 - .v.pins = (const struct hda_pintbl[]) { 6991 - { 0x14, 0x411111f0 }, /* disable confusing internal speaker */ 6992 - { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */ 6993 - { } 6994 - }, 6995 - .chained = true, 6996 - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 6997 - }, 6998 6984 [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { 6999 6985 .type = HDA_FIXUP_PINS, 7000 6986 .v.pins = (const struct hda_pintbl[]) { ··· 6995 7001 .chained = true, 6996 7002 .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE 6997 7003 }, 6998 - [ALC294_FIXUP_ASUS_INTSPK_GPIO] = { 7004 + [ALC289_FIXUP_DELL_SPK2] = { 7005 + .type = HDA_FIXUP_PINS, 7006 + .v.pins = (const struct hda_pintbl[]) { 7007 + { 0x17, 0x90170130 }, /* bass spk */ 7008 + { } 7009 + }, 7010 + .chained = true, 7011 + .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE 7012 + }, 7013 + [ALC289_FIXUP_DUAL_SPK] = { 7014 + .type = HDA_FIXUP_FUNC, 7015 + .v.func = alc285_fixup_speaker2_to_dac1, 7016 + .chained = true, 7017 + .chain_id = ALC289_FIXUP_DELL_SPK2 7018 + }, 7019 + [ALC294_FIXUP_SPK2_TO_DAC1] = { 7020 + .type = HDA_FIXUP_FUNC, 7021 + .v.func = alc285_fixup_speaker2_to_dac1, 7022 + .chained = true, 7023 + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC 7024 + }, 7025 + [ALC294_FIXUP_ASUS_DUAL_SPK] = { 6999 7026 .type = HDA_FIXUP_FUNC, 7000 7027 /* The GPIO must be pulled to initialize the AMP */ 7001 7028 .v.func = alc_fixup_gpio4, 7002 7029 .chained = true, 7003 - .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC 7030 + .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 7004 7031 }, 7032 + 7005 7033 }; 7006 7034 7007 7035 static const struct snd_pci_quirk alc269_fixup_tbl[] = { ··· 7096 7080 SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), 7097 7081 SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), 7098 7082 SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 7083 + SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK), 7084 + SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK), 7099 7085 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 7100 7086 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 7101 7087 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), ··· 7185 7167 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), 7186 7168 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 7187 7169 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 7188 - SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO), 7170 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), 7189 7171 SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), 7190 7172 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 7191 7173 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+6 -3
sound/pci/ice1712/ice1724.c
··· 647 647 unsigned long flags; 648 648 unsigned char mclk_change; 649 649 unsigned int i, old_rate; 650 + bool call_set_rate = false; 650 651 651 652 if (rate > ice->hw_rates->list[ice->hw_rates->count - 1]) 652 653 return -EINVAL; ··· 671 670 * setting clock rate for internal clock mode */ 672 671 old_rate = ice->get_rate(ice); 673 672 if (force || (old_rate != rate)) 674 - ice->set_rate(ice, rate); 673 + call_set_rate = true; 675 674 else if (rate == ice->cur_rate) { 676 675 spin_unlock_irqrestore(&ice->reg_lock, flags); 677 676 return 0; ··· 679 678 } 680 679 681 680 ice->cur_rate = rate; 681 + spin_unlock_irqrestore(&ice->reg_lock, flags); 682 + 683 + if (call_set_rate) 684 + ice->set_rate(ice, rate); 682 685 683 686 /* setting master clock */ 684 687 mclk_change = ice->set_mclk(ice, rate); 685 - 686 - spin_unlock_irqrestore(&ice->reg_lock, flags); 687 688 688 689 if (mclk_change && ice->gpio.i2s_mclk_changed) 689 690 ice->gpio.i2s_mclk_changed(ice);
+1
sound/usb/card.h
··· 145 145 struct snd_usb_endpoint *sync_endpoint; 146 146 unsigned long flags; 147 147 bool need_setup_ep; /* (re)configure EP at prepare? */ 148 + bool need_setup_fmt; /* (re)configure fmt after resume? */ 148 149 unsigned int speed; /* USB_SPEED_XXX */ 149 150 150 151 u64 formats; /* format bitmasks (all or'ed) */
+21 -4
sound/usb/pcm.c
··· 506 506 if (WARN_ON(!iface)) 507 507 return -EINVAL; 508 508 alts = usb_altnum_to_altsetting(iface, fmt->altsetting); 509 - altsd = get_iface_desc(alts); 510 - if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting)) 509 + if (WARN_ON(!alts)) 511 510 return -EINVAL; 511 + altsd = get_iface_desc(alts); 512 512 513 - if (fmt == subs->cur_audiofmt) 513 + if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt) 514 514 return 0; 515 515 516 516 /* close the old interface */ 517 - if (subs->interface >= 0 && subs->interface != fmt->iface) { 517 + if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) { 518 518 if (!subs->stream->chip->keep_iface) { 519 519 err = usb_set_interface(subs->dev, subs->interface, 0); 520 520 if (err < 0) { ··· 527 527 subs->interface = -1; 528 528 subs->altset_idx = 0; 529 529 } 530 + 531 + if (subs->need_setup_fmt) 532 + subs->need_setup_fmt = false; 530 533 531 534 /* set interface */ 532 535 if (iface->cur_altsetting != alts) { ··· 1731 1728 subs->data_endpoint->retire_data_urb = retire_playback_urb; 1732 1729 subs->running = 0; 1733 1730 return 0; 1731 + case SNDRV_PCM_TRIGGER_SUSPEND: 1732 + if (subs->stream->chip->setup_fmt_after_resume_quirk) { 1733 + stop_endpoints(subs, true); 1734 + subs->need_setup_fmt = true; 1735 + return 0; 1736 + } 1737 + break; 1734 1738 } 1735 1739 1736 1740 return -EINVAL; ··· 1770 1760 subs->data_endpoint->retire_data_urb = retire_capture_urb; 1771 1761 subs->running = 1; 1772 1762 return 0; 1763 + case SNDRV_PCM_TRIGGER_SUSPEND: 1764 + if (subs->stream->chip->setup_fmt_after_resume_quirk) { 1765 + stop_endpoints(subs, true); 1766 + subs->need_setup_fmt = true; 1767 + return 0; 1768 + } 1769 + break; 1773 1770 } 1774 1771 1775 1772 return -EINVAL;
+2 -1
sound/usb/quirks-table.h
··· 3466 3466 .vendor_name = "Dell", 3467 3467 .product_name = "WD19 Dock", 3468 3468 .profile_name = "Dell-WD15-Dock", 3469 - .ifnum = QUIRK_NO_INTERFACE 3469 + .ifnum = QUIRK_ANY_INTERFACE, 3470 + .type = QUIRK_SETUP_FMT_AFTER_RESUME 3470 3471 } 3471 3472 }, 3472 3473 /* MOTU Microbook II */
+11
sound/usb/quirks.c
··· 508 508 return snd_usb_create_mixer(chip, quirk->ifnum, 0); 509 509 } 510 510 511 + 512 + static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip, 513 + struct usb_interface *iface, 514 + struct usb_driver *driver, 515 + const struct snd_usb_audio_quirk *quirk) 516 + { 517 + chip->setup_fmt_after_resume_quirk = 1; 518 + return 1; /* Continue with creating streams and mixer */ 519 + } 520 + 511 521 /* 512 522 * audio-interface quirks 513 523 * ··· 556 546 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, 557 547 [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk, 558 548 [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk, 549 + [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk, 559 550 }; 560 551 561 552 if (quirk->type < QUIRK_TYPE_COUNT) {
+2 -1
sound/usb/usbaudio.h
··· 33 33 wait_queue_head_t shutdown_wait; 34 34 unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ 35 35 unsigned int tx_length_quirk:1; /* Put length specifier in transfers */ 36 - 36 + unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */ 37 37 int num_interfaces; 38 38 int num_suspended_intf; 39 39 int sample_rate_read_error; ··· 98 98 QUIRK_AUDIO_EDIROL_UAXX, 99 99 QUIRK_AUDIO_ALIGN_TRANSFER, 100 100 QUIRK_AUDIO_STANDARD_MIXER, 101 + QUIRK_SETUP_FMT_AFTER_RESUME, 101 102 102 103 QUIRK_TYPE_COUNT 103 104 };
+8
tools/testing/selftests/net/forwarding/loopback.sh
··· 1 1 #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 + # Kselftest framework requirement - SKIP code is 4. 5 + ksft_skip=4 6 + 4 7 ALL_TESTS="loopback_test" 5 8 NUM_NETIFS=2 6 9 source tc_common.sh ··· 75 72 76 73 h1_create 77 74 h2_create 75 + 76 + if ethtool -k $h1 | grep loopback | grep -q fixed; then 77 + log_test "SKIP: dev $h1 does not support loopback feature" 78 + exit $ksft_skip 79 + fi 78 80 } 79 81 80 82 cleanup()
+14 -1
tools/testing/selftests/seccomp/seccomp_bpf.c
··· 3158 3158 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3159 3159 EXPECT_EQ(pollfd.revents, POLLIN); 3160 3160 3161 - EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3161 + /* Test that we can't pass garbage to the kernel. */ 3162 + memset(&req, 0, sizeof(req)); 3163 + req.pid = -1; 3164 + errno = 0; 3165 + ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req); 3166 + EXPECT_EQ(-1, ret); 3167 + EXPECT_EQ(EINVAL, errno); 3168 + 3169 + if (ret) { 3170 + req.pid = 0; 3171 + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3172 + } 3162 3173 3163 3174 pollfd.fd = listener; 3164 3175 pollfd.events = POLLIN | POLLOUT; ··· 3289 3278 3290 3279 close(sk_pair[1]); 3291 3280 3281 + memset(&req, 0, sizeof(req)); 3292 3282 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3293 3283 3294 3284 EXPECT_EQ(kill(pid, SIGUSR1), 0); ··· 3308 3296 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3309 3297 EXPECT_EQ(errno, ENOENT); 3310 3298 3299 + memset(&req, 0, sizeof(req)); 3311 3300 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3312 3301 3313 3302 resp.id = req.id;
+1 -1
usr/gen_initramfs_list.sh
··· 128 128 str="${ftype} ${name} ${location} ${str}" 129 129 ;; 130 130 "nod") 131 - local dev=`LC_ALL=C ls -l "${location}"` 131 + local dev="`LC_ALL=C ls -l "${location}"`" 132 132 local maj=`field 5 ${dev}` 133 133 local min=`field 6 ${dev}` 134 134 maj=${maj%,}