Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi-cadence: support transmission with

Merge series from Jun Guo <jun.guo@cixtech.com>:

The Cadence SPI IP supports configurable FIFO data widths during
integration. On some SoCs, the FIFO data width is designed to be 16 or
32 bits at the chip design stage. However, the current driver only
supports communication with an 8-bit FIFO data width. Therefore, these
patches are added to enable the driver to support communication with
16-bit and 32-bit FIFO data widths.

+2139 -937
+1 -1
Documentation/devicetree/bindings/gpio/ti,twl4030-gpio.yaml
··· 1 1 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 2 %YAML 1.2 3 3 --- 4 - $id: http://devicetree.org/schemas/ti,twl4030-gpio.yaml# 4 + $id: http://devicetree.org/schemas/gpio/ti,twl4030-gpio.yaml# 5 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 6 6 7 7 title: TI TWL4030 GPIO controller
+1
Documentation/devicetree/bindings/spi/spi-cadence.yaml
··· 21 21 - enum: 22 22 - xlnx,zynqmp-spi-r1p6 23 23 - xlnx,versal-net-spi-r1p6 24 + - cix,sky1-spi-r1p6 24 25 - const: cdns,spi-r1p6 25 26 26 27 reg:
+4 -4
Documentation/firmware-guide/acpi/i2c-muxes.rst
··· 37 37 Name (_HID, ...) 38 38 Name (_CRS, ResourceTemplate () { 39 39 I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED, 40 - AddressingMode7Bit, "\\_SB.SMB1.CH00", 0x00, 41 - ResourceConsumer,,) 40 + AddressingMode7Bit, "\\_SB.SMB1.MUX0.CH00", 41 + 0x00, ResourceConsumer,,) 42 42 } 43 43 } 44 44 } ··· 52 52 Name (_HID, ...) 53 53 Name (_CRS, ResourceTemplate () { 54 54 I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED, 55 - AddressingMode7Bit, "\\_SB.SMB1.CH01", 0x00, 56 - ResourceConsumer,,) 55 + AddressingMode7Bit, "\\_SB.SMB1.MUX0.CH01", 56 + 0x00, ResourceConsumer,,) 57 57 } 58 58 } 59 59 }
+5 -1
MAINTAINERS
··· 4818 4818 F: drivers/net/dsa/bcm_sf2* 4819 4819 F: include/linux/dsa/brcm.h 4820 4820 F: include/linux/platform_data/b53.h 4821 + F: net/dsa/tag_brcm.c 4821 4822 4822 4823 BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE 4823 4824 M: Florian Fainelli <florian.fainelli@broadcom.com> ··· 12522 12521 F: include/linux/net/intel/*/ 12523 12522 12524 12523 INTEL ETHERNET PROTOCOL DRIVER FOR RDMA 12524 + M: Krzysztof Czurylo <krzysztof.czurylo@intel.com> 12525 12525 M: Tatyana Nikolova <tatyana.e.nikolova@intel.com> 12526 12526 L: linux-rdma@vger.kernel.org 12527 12527 S: Supported ··· 12863 12861 K: \bSGX_ 12864 12862 12865 12863 INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER 12866 - M: Daniel Scally <djrscally@gmail.com> 12864 + M: Daniel Scally <dan.scally@ideasonboard.com> 12865 + M: Sakari Ailus <sakari.ailus@linux.intel.com> 12867 12866 S: Maintained 12868 12867 F: drivers/platform/x86/intel/int3472/ 12869 12868 F: include/linux/platform_data/x86/int3472.h ··· 20163 20160 R: Jiri Olsa <jolsa@kernel.org> 20164 20161 R: Ian Rogers <irogers@google.com> 20165 20162 R: Adrian Hunter <adrian.hunter@intel.com> 20163 + R: James Clark <james.clark@linaro.org> 20166 20164 L: linux-perf-users@vger.kernel.org 20167 20165 L: linux-kernel@vger.kernel.org 20168 20166 S: Supported
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 18 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+7
arch/Kconfig
··· 917 917 An architecture should select this option if it requires the 918 918 .kcfi_traps section for KCFI trap handling. 919 919 920 + config ARCH_USES_CFI_GENERIC_LLVM_PASS 921 + bool 922 + help 923 + An architecture should select this option if it uses the generic 924 + KCFIPass in LLVM to expand kCFI bundles instead of architecture-specific 925 + lowering. 926 + 920 927 config CFI 921 928 bool "Use Kernel Control Flow Integrity (kCFI)" 922 929 default CFI_CLANG
+2
arch/arm/Kconfig
··· 44 44 select ARCH_USE_BUILTIN_BSWAP 45 45 select ARCH_USE_CMPXCHG_LOCKREF 46 46 select ARCH_USE_MEMTEST 47 + # https://github.com/llvm/llvm-project/commit/d130f402642fba3d065aacb506cb061c899558de 48 + select ARCH_USES_CFI_GENERIC_LLVM_PASS if CLANG_VERSION < 220000 47 49 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU 48 50 select ARCH_WANT_GENERAL_HUGETLB 49 51 select ARCH_WANT_IPC_PARSE_VERSION
+1 -1
arch/loongarch/Makefile
··· 109 109 ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP 110 110 KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump 111 111 else 112 - KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers 112 + KBUILD_RUSTFLAGS += $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) # keep compatibility with older compilers 113 113 endif 114 114 ifdef CONFIG_LTO_CLANG 115 115 # The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
+10 -3
arch/parisc/kernel/unwind.c
··· 35 35 36 36 #define KERNEL_START (KERNEL_BINARY_TEXT_START) 37 37 38 + #define ALIGNMENT_OK(ptr, type) (((ptr) & (sizeof(type) - 1)) == 0) 39 + 38 40 extern struct unwind_table_entry __start___unwind[]; 39 41 extern struct unwind_table_entry __stop___unwind[]; 40 42 ··· 259 257 if (pc_is_kernel_fn(pc, _switch_to) || 260 258 pc == (unsigned long)&_switch_to_ret) { 261 259 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; 262 - info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); 260 + if (ALIGNMENT_OK(info->prev_sp, long)) 261 + info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); 262 + else 263 + info->prev_ip = info->prev_sp = 0; 263 264 return 1; 264 265 } 265 266 266 267 #ifdef CONFIG_IRQSTACKS 267 - if (pc == (unsigned long)&_call_on_stack) { 268 + if (pc == (unsigned long)&_call_on_stack && ALIGNMENT_OK(info->sp, long)) { 268 269 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); 269 270 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); 270 271 return 1; ··· 375 370 info->prev_sp = info->sp - frame_size; 376 371 if (e->Millicode) 377 372 info->rp = info->r31; 378 - else if (rpoffset) 373 + else if (rpoffset && ALIGNMENT_OK(info->prev_sp, long)) 379 374 info->rp = *(unsigned long *)(info->prev_sp - rpoffset); 375 + else 376 + info->rp = 0; 380 377 info->prev_ip = info->rp; 381 378 info->rp = 0; 382 379 }
+6
arch/riscv/include/asm/asm.h
··· 12 12 #define __ASM_STR(x) #x 13 13 #endif 14 14 15 + #ifdef CONFIG_AS_HAS_INSN 16 + #define ASM_INSN_I(__x) ".insn " __x 17 + #else 18 + #define ASM_INSN_I(__x) ".4byte " __x 19 + #endif 20 + 15 21 #if __riscv_xlen == 64 16 22 #define __REG_SEL(a, b) __ASM_STR(a) 17 23 #elif __riscv_xlen == 32
+4 -4
arch/riscv/include/asm/insn-def.h
··· 256 256 INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(3), \ 257 257 SIMM12((offset) & 0xfe0), RS1(base)) 258 258 259 - #define RISCV_PAUSE ".4byte 0x100000f" 260 - #define ZAWRS_WRS_NTO ".4byte 0x00d00073" 261 - #define ZAWRS_WRS_STO ".4byte 0x01d00073" 262 - #define RISCV_NOP4 ".4byte 0x00000013" 259 + #define RISCV_PAUSE ASM_INSN_I("0x100000f") 260 + #define ZAWRS_WRS_NTO ASM_INSN_I("0x00d00073") 261 + #define ZAWRS_WRS_STO ASM_INSN_I("0x01d00073") 262 + #define RISCV_NOP4 ASM_INSN_I("0x00000013") 263 263 264 264 #define RISCV_INSN_NOP4 _AC(0x00000013, U) 265 265
+3 -3
arch/riscv/include/asm/vendor_extensions/mips.h
··· 30 30 * allowing any subsequent instructions to fetch. 31 31 */ 32 32 33 - #define MIPS_PAUSE ".4byte 0x00501013\n\t" 34 - #define MIPS_EHB ".4byte 0x00301013\n\t" 35 - #define MIPS_IHB ".4byte 0x00101013\n\t" 33 + #define MIPS_PAUSE ASM_INSN_I("0x00501013\n\t") 34 + #define MIPS_EHB ASM_INSN_I("0x00301013\n\t") 35 + #define MIPS_IHB ASM_INSN_I("0x00101013\n\t") 36 36 37 37 #endif // _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_H
+2 -2
arch/riscv/kernel/kgdb.c
··· 265 265 { 266 266 if (!strncmp(remcom_in_buffer, gdb_xfer_read_target, 267 267 sizeof(gdb_xfer_read_target))) 268 - strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc); 268 + strscpy(remcom_out_buffer, riscv_gdb_stub_target_desc, BUFMAX); 269 269 else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml, 270 270 sizeof(gdb_xfer_read_cpuxml))) 271 - strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml); 271 + strscpy(remcom_out_buffer, riscv_gdb_stub_cpuxml, BUFMAX); 272 272 } 273 273 274 274 static inline void kgdb_arch_update_addr(struct pt_regs *regs,
+6 -2
arch/riscv/kernel/module-sections.c
··· 119 119 unsigned int num_plts = 0; 120 120 unsigned int num_gots = 0; 121 121 Elf_Rela *scratch = NULL; 122 + Elf_Rela *new_scratch; 122 123 size_t scratch_size = 0; 123 124 int i; 124 125 ··· 169 168 scratch_size_needed = (num_scratch_relas + num_relas) * sizeof(*scratch); 170 169 if (scratch_size_needed > scratch_size) { 171 170 scratch_size = scratch_size_needed; 172 - scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL); 173 - if (!scratch) 171 + new_scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL); 172 + if (!new_scratch) { 173 + kvfree(scratch); 174 174 return -ENOMEM; 175 + } 176 + scratch = new_scratch; 175 177 } 176 178 177 179 for (size_t j = 0; j < num_relas; j++)
+19 -2
arch/riscv/kernel/stacktrace.c
··· 16 16 17 17 #ifdef CONFIG_FRAME_POINTER 18 18 19 + /* 20 + * This disables KASAN checking when reading a value from another task's stack, 21 + * since the other task could be running on another CPU and could have poisoned 22 + * the stack in the meantime. 23 + */ 24 + #define READ_ONCE_TASK_STACK(task, x) \ 25 + ({ \ 26 + unsigned long val; \ 27 + unsigned long addr = x; \ 28 + if ((task) == current) \ 29 + val = READ_ONCE(addr); \ 30 + else \ 31 + val = READ_ONCE_NOCHECK(addr); \ 32 + val; \ 33 + }) 34 + 19 35 extern asmlinkage void handle_exception(void); 20 36 extern unsigned long ret_from_exception_end; 21 37 ··· 85 69 fp = frame->ra; 86 70 pc = regs->ra; 87 71 } else { 88 - fp = frame->fp; 89 - pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra, 72 + fp = READ_ONCE_TASK_STACK(task, frame->fp); 73 + pc = READ_ONCE_TASK_STACK(task, frame->ra); 74 + pc = ftrace_graph_ret_addr(current, &graph_idx, pc, 90 75 &frame->ra); 91 76 if (pc >= (unsigned long)handle_exception && 92 77 pc < (unsigned long)&ret_from_exception_end) {
+1 -1
arch/riscv/kernel/tests/Kconfig.debug
··· 31 31 If unsure, say N. 32 32 33 33 config RISCV_KPROBES_KUNIT 34 - bool "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS 34 + tristate "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS 35 35 depends on KUNIT 36 36 depends on KPROBES 37 37 default KUNIT_ALL_TESTS
+3 -1
arch/riscv/kernel/tests/kprobes/Makefile
··· 1 - obj-y += test-kprobes.o test-kprobes-asm.o 1 + obj-$(CONFIG_RISCV_KPROBES_KUNIT) += kprobes_riscv_kunit.o 2 + 3 + kprobes_riscv_kunit-objs := test-kprobes.o test-kprobes-asm.o
+4 -1
arch/riscv/kernel/tests/kprobes/test-kprobes.c
··· 49 49 }; 50 50 51 51 static struct kunit_suite kprobes_test_suite = { 52 - .name = "kprobes_test_riscv", 52 + .name = "kprobes_riscv", 53 53 .test_cases = kprobes_testcases, 54 54 }; 55 55 56 56 kunit_test_suites(&kprobes_test_suite); 57 + 58 + MODULE_LICENSE("GPL"); 59 + MODULE_DESCRIPTION("KUnit test for riscv kprobes");
+1 -1
arch/riscv/mm/ptdump.c
··· 21 21 #define pt_dump_seq_puts(m, fmt) \ 22 22 ({ \ 23 23 if (m) \ 24 - seq_printf(m, fmt); \ 24 + seq_puts(m, fmt); \ 25 25 }) 26 26 27 27 /*
+1 -1
arch/x86/Makefile
··· 98 98 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104816 99 99 # 100 100 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=branch -fno-jump-tables) 101 - KBUILD_RUSTFLAGS += -Zcf-protection=branch -Zno-jump-tables 101 + KBUILD_RUSTFLAGS += -Zcf-protection=branch $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) 102 102 else 103 103 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) 104 104 endif
-1
arch/x86/include/asm/amd/node.h
··· 23 23 #define AMD_NODE0_PCI_SLOT 0x18 24 24 25 25 struct pci_dev *amd_node_get_func(u16 node, u8 func); 26 - struct pci_dev *amd_node_get_root(u16 node); 27 26 28 27 static inline u16 amd_num_nodes(void) 29 28 {
+4
arch/x86/include/asm/runtime-const.h
··· 2 2 #ifndef _ASM_RUNTIME_CONST_H 3 3 #define _ASM_RUNTIME_CONST_H 4 4 5 + #ifdef MODULE 6 + #error "Cannot use runtime-const infrastructure from modules" 7 + #endif 8 + 5 9 #ifdef __ASSEMBLY__ 6 10 7 11 .macro RUNTIME_CONST_PTR sym reg
+5 -5
arch/x86/include/asm/uaccess_64.h
··· 12 12 #include <asm/cpufeatures.h> 13 13 #include <asm/page.h> 14 14 #include <asm/percpu.h> 15 - #include <asm/runtime-const.h> 16 15 17 - /* 18 - * Virtual variable: there's no actual backing store for this, 19 - * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' 20 - */ 16 + #ifdef MODULE 17 + #define runtime_const_ptr(sym) (sym) 18 + #else 19 + #include <asm/runtime-const.h> 20 + #endif 21 21 extern unsigned long USER_PTR_MAX; 22 22 23 23 #ifdef CONFIG_ADDRESS_MASKING
+51 -99
arch/x86/kernel/amd_node.c
··· 34 34 return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func)); 35 35 } 36 36 37 - #define DF_BLK_INST_CNT 0x040 38 - #define DF_CFG_ADDR_CNTL_LEGACY 0x084 39 - #define DF_CFG_ADDR_CNTL_DF4 0xC04 40 - 41 - #define DF_MAJOR_REVISION GENMASK(27, 24) 42 - 43 - static u16 get_cfg_addr_cntl_offset(struct pci_dev *df_f0) 44 - { 45 - u32 reg; 46 - 47 - /* 48 - * Revision fields added for DF4 and later. 49 - * 50 - * Major revision of '0' is found pre-DF4. Field is Read-as-Zero. 51 - */ 52 - if (pci_read_config_dword(df_f0, DF_BLK_INST_CNT, &reg)) 53 - return 0; 54 - 55 - if (reg & DF_MAJOR_REVISION) 56 - return DF_CFG_ADDR_CNTL_DF4; 57 - 58 - return DF_CFG_ADDR_CNTL_LEGACY; 59 - } 60 - 61 - struct pci_dev *amd_node_get_root(u16 node) 62 - { 63 - struct pci_dev *root; 64 - u16 cntl_off; 65 - u8 bus; 66 - 67 - if (!cpu_feature_enabled(X86_FEATURE_ZEN)) 68 - return NULL; 69 - 70 - /* 71 - * D18F0xXXX [Config Address Control] (DF::CfgAddressCntl) 72 - * Bits [7:0] (SecBusNum) holds the bus number of the root device for 73 - * this Data Fabric instance. The segment, device, and function will be 0. 74 - */ 75 - struct pci_dev *df_f0 __free(pci_dev_put) = amd_node_get_func(node, 0); 76 - if (!df_f0) 77 - return NULL; 78 - 79 - cntl_off = get_cfg_addr_cntl_offset(df_f0); 80 - if (!cntl_off) 81 - return NULL; 82 - 83 - if (pci_read_config_byte(df_f0, cntl_off, &bus)) 84 - return NULL; 85 - 86 - /* Grab the pointer for the actual root device instance. */ 87 - root = pci_get_domain_bus_and_slot(0, bus, 0); 88 - 89 - pci_dbg(root, "is root for AMD node %u\n", node); 90 - return root; 91 - } 92 - 93 37 static struct pci_dev **amd_roots; 94 38 95 39 /* Protect the PCI config register pairs used for SMN. */ ··· 218 274 DEFINE_SHOW_STORE_ATTRIBUTE(smn_address); 219 275 DEFINE_SHOW_STORE_ATTRIBUTE(smn_value); 220 276 221 - static int amd_cache_roots(void) 277 + static struct pci_dev *get_next_root(struct pci_dev *root) 222 278 { 223 - u16 node, num_nodes = amd_num_nodes(); 224 - 225 - amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL); 226 - if (!amd_roots) 227 - return -ENOMEM; 228 - 229 - for (node = 0; node < num_nodes; node++) 230 - amd_roots[node] = amd_node_get_root(node); 231 - 232 - return 0; 233 - } 234 - 235 - static int reserve_root_config_spaces(void) 236 - { 237 - struct pci_dev *root = NULL; 238 - struct pci_bus *bus = NULL; 239 - 240 - while ((bus = pci_find_next_bus(bus))) { 241 - /* Root device is Device 0 Function 0 on each Primary Bus. */ 242 - root = pci_get_slot(bus, 0); 243 - if (!root) 279 + while ((root = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, root))) { 280 + /* Root device is Device 0 Function 0. */ 281 + if (root->devfn) 244 282 continue; 245 283 246 284 if (root->vendor != PCI_VENDOR_ID_AMD && 247 285 root->vendor != PCI_VENDOR_ID_HYGON) 248 286 continue; 249 287 250 - pci_dbg(root, "Reserving PCI config space\n"); 251 - 252 - /* 253 - * There are a few SMN index/data pairs and other registers 254 - * that shouldn't be accessed by user space. 255 - * So reserve the entire PCI config space for simplicity rather 256 - * than covering specific registers piecemeal. 257 - */ 258 - if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) { 259 - pci_err(root, "Failed to reserve config space\n"); 260 - return -EEXIST; 261 - } 288 + break; 262 289 } 263 290 264 - smn_exclusive = true; 265 - return 0; 291 + return root; 266 292 } 267 293 268 294 static bool enable_dfs; ··· 246 332 247 333 static int __init amd_smn_init(void) 248 334 { 249 - int err; 335 + u16 count, num_roots, roots_per_node, node, num_nodes; 336 + struct pci_dev *root; 250 337 251 338 if (!cpu_feature_enabled(X86_FEATURE_ZEN)) 252 339 return 0; ··· 257 342 if (amd_roots) 258 343 return 0; 259 344 260 - err = amd_cache_roots(); 261 - if (err) 262 - return err; 345 + num_roots = 0; 346 + root = NULL; 347 + while ((root = get_next_root(root))) { 348 + pci_dbg(root, "Reserving PCI config space\n"); 263 349 264 - err = reserve_root_config_spaces(); 265 - if (err) 266 - return err; 350 + /* 351 + * There are a few SMN index/data pairs and other registers 352 + * that shouldn't be accessed by user space. So reserve the 353 + * entire PCI config space for simplicity rather than covering 354 + * specific registers piecemeal. 355 + */ 356 + if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) { 357 + pci_err(root, "Failed to reserve config space\n"); 358 + return -EEXIST; 359 + } 360 + 361 + num_roots++; 362 + } 363 + 364 + pr_debug("Found %d AMD root devices\n", num_roots); 365 + 366 + if (!num_roots) 367 + return -ENODEV; 368 + 369 + num_nodes = amd_num_nodes(); 370 + amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL); 371 + if (!amd_roots) 372 + return -ENOMEM; 373 + 374 + roots_per_node = num_roots / num_nodes; 375 + 376 + count = 0; 377 + node = 0; 378 + root = NULL; 379 + while (node < num_nodes && (root = get_next_root(root))) { 380 + /* Use one root for each node and skip the rest. */ 381 + if (count++ % roots_per_node) 382 + continue; 383 + 384 + pci_dbg(root, "is root for AMD node %u\n", node); 385 + amd_roots[node++] = root; 386 + } 267 387 268 388 if (enable_dfs) { 269 389 debugfs_dir = debugfs_create_dir("amd_smn", arch_debugfs_dir); ··· 307 357 debugfs_create_file("address", 0600, debugfs_dir, NULL, &smn_address_fops); 308 358 debugfs_create_file("value", 0600, debugfs_dir, NULL, &smn_value_fops); 309 359 } 360 + 361 + smn_exclusive = true; 310 362 311 363 return 0; 312 364 }
+1
arch/x86/kernel/cpu/amd.c
··· 1038 1038 static const struct x86_cpu_id zen5_rdseed_microcode[] = { 1039 1039 ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a), 1040 1040 ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054), 1041 + {}, 1041 1042 }; 1042 1043 1043 1044 static void init_amd_zen5(struct cpuinfo_x86 *c)
+5 -1
arch/x86/kernel/cpu/common.c
··· 78 78 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 79 79 EXPORT_PER_CPU_SYMBOL(cpu_info); 80 80 81 + /* Used for modules: built-in code uses runtime constants */ 82 + unsigned long USER_PTR_MAX; 83 + EXPORT_SYMBOL(USER_PTR_MAX); 84 + 81 85 u32 elf_hwcap2 __read_mostly; 82 86 83 87 /* Number of siblings per CPU package */ ··· 2583 2579 alternative_instructions(); 2584 2580 2585 2581 if (IS_ENABLED(CONFIG_X86_64)) { 2586 - unsigned long USER_PTR_MAX = TASK_SIZE_MAX; 2582 + USER_PTR_MAX = TASK_SIZE_MAX; 2587 2583 2588 2584 /* 2589 2585 * Enable this when LAM is gated on LASS support
+2
arch/x86/kernel/cpu/microcode/amd.c
··· 220 220 case 0xaa001: return cur_rev <= 0xaa00116; break; 221 221 case 0xaa002: return cur_rev <= 0xaa00218; break; 222 222 case 0xb0021: return cur_rev <= 0xb002146; break; 223 + case 0xb0081: return cur_rev <= 0xb008111; break; 223 224 case 0xb1010: return cur_rev <= 0xb101046; break; 224 225 case 0xb2040: return cur_rev <= 0xb204031; break; 225 226 case 0xb4040: return cur_rev <= 0xb404031; break; 226 227 case 0xb6000: return cur_rev <= 0xb600031; break; 228 + case 0xb6080: return cur_rev <= 0xb608031; break; 227 229 case 0xb7000: return cur_rev <= 0xb700031; break; 228 230 default: break; 229 231 }
+1 -1
drivers/acpi/cppc_acpi.c
··· 750 750 } 751 751 752 752 /* 753 - * Disregard _CPC if the number of entries in the return pachage is not 753 + * Disregard _CPC if the number of entries in the return package is not 754 754 * as expected, but support future revisions being proper supersets of 755 755 * the v3 and only causing more entries to be returned by _CPC. 756 756 */
+1 -1
drivers/acpi/sbs.c
··· 487 487 if (result) 488 488 return result; 489 489 490 - battery->present = state & (1 << battery->id); 490 + battery->present = !!(state & (1 << battery->id)); 491 491 if (!battery->present) 492 492 return 0; 493 493
+3 -1
drivers/bluetooth/btrtl.c
··· 625 625 len += entry->len; 626 626 } 627 627 628 - if (!len) 628 + if (!len) { 629 + kvfree(ptr); 629 630 return -EPERM; 631 + } 630 632 631 633 *_buf = ptr; 632 634 return len;
+3 -2
drivers/cpuidle/cpuidle-riscv-sbi.c
··· 18 18 #include <linux/module.h> 19 19 #include <linux/of.h> 20 20 #include <linux/slab.h> 21 + #include <linux/string.h> 21 22 #include <linux/platform_device.h> 22 23 #include <linux/pm_domain.h> 23 24 #include <linux/pm_runtime.h> ··· 304 303 drv->states[0].exit_latency = 1; 305 304 drv->states[0].target_residency = 1; 306 305 drv->states[0].power_usage = UINT_MAX; 307 - strcpy(drv->states[0].name, "WFI"); 308 - strcpy(drv->states[0].desc, "RISC-V WFI"); 306 + strscpy(drv->states[0].name, "WFI"); 307 + strscpy(drv->states[0].desc, "RISC-V WFI"); 309 308 310 309 /* 311 310 * If no DT idle states are detected (ret == 0) let the driver
+1 -1
drivers/edac/versalnet_edac.c
··· 433 433 phys_addr_t pfn; 434 434 int err; 435 435 436 - if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS)) 436 + if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS)) 437 437 return; 438 438 439 439 mci = priv->mci[ctl_num];
+1
drivers/gpio/gpio-aggregator.c
··· 723 723 chip->get_multiple = gpio_fwd_get_multiple_locked; 724 724 chip->set = gpio_fwd_set; 725 725 chip->set_multiple = gpio_fwd_set_multiple_locked; 726 + chip->set_config = gpio_fwd_set_config; 726 727 chip->to_irq = gpio_fwd_to_irq; 727 728 chip->base = -1; 728 729 chip->ngpio = ngpios;
-19
drivers/gpio/gpio-tb10x.c
··· 50 50 return ioread32(gpio->base + offs); 51 51 } 52 52 53 - static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs, 54 - u32 val) 55 - { 56 - iowrite32(val, gpio->base + offs); 57 - } 58 - 59 - static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs, 60 - u32 mask, u32 val) 61 - { 62 - u32 r; 63 - 64 - guard(gpio_generic_lock_irqsave)(&gpio->chip); 65 - 66 - r = tb10x_reg_read(gpio, offs); 67 - r = (r & ~mask) | (val & mask); 68 - 69 - tb10x_reg_write(gpio, offs, r); 70 - } 71 - 72 53 static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 73 54 { 74 55 struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
+1 -1
drivers/gpio/gpiolib-swnode.c
··· 41 41 !strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME)) 42 42 return ERR_PTR(-ENOENT); 43 43 44 - gdev = gpio_device_find_by_label(gdev_node->name); 44 + gdev = gpio_device_find_by_fwnode(fwnode); 45 45 return gdev ?: ERR_PTR(-EPROBE_DEFER); 46 46 } 47 47
+7 -1
drivers/gpio/gpiolib.c
··· 5296 5296 struct gpio_device *gdev; 5297 5297 loff_t index = *pos; 5298 5298 5299 + s->private = NULL; 5300 + 5299 5301 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 5300 5302 if (!priv) 5301 5303 return NULL; ··· 5331 5329 5332 5330 static void gpiolib_seq_stop(struct seq_file *s, void *v) 5333 5331 { 5334 - struct gpiolib_seq_priv *priv = s->private; 5332 + struct gpiolib_seq_priv *priv; 5333 + 5334 + priv = s->private; 5335 + if (!priv) 5336 + return; 5335 5337 5336 5338 srcu_read_unlock(&gpio_devices_srcu, priv->idx); 5337 5339 kfree(priv);
+1 -1
drivers/gpu/drm/Makefile
··· 245 245 quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) 246 246 cmd_hdrtest = \ 247 247 $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \ 248 - PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \ 248 + PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \ 249 249 touch $@ 250 250 251 251 $(obj)/%.hdrtest: $(src)/%.h FORCE
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1267 1267 1268 1268 (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 1269 1269 1270 + /* VM entity stopped if process killed, don't clear freed pt bo */ 1271 + if (!amdgpu_vm_ready(vm)) 1272 + return 0; 1273 + 1270 1274 (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 1271 1275 1272 1276 (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 5243 5243 if (amdgpu_sriov_vf(adev)) 5244 5244 amdgpu_virt_release_full_gpu(adev, false); 5245 5245 5246 - r = amdgpu_dpm_notify_rlc_state(adev, false); 5247 - if (r) 5248 - return r; 5249 - 5250 5246 return 0; 5251 5247 } 5252 5248
+7 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2632 2632 { 2633 2633 struct drm_device *drm_dev = dev_get_drvdata(dev); 2634 2634 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2635 + int r; 2635 2636 2636 - if (amdgpu_acpi_should_gpu_reset(adev)) 2637 - return amdgpu_asic_reset(adev); 2637 + if (amdgpu_acpi_should_gpu_reset(adev)) { 2638 + amdgpu_device_lock_reset_domain(adev->reset_domain); 2639 + r = amdgpu_asic_reset(adev); 2640 + amdgpu_device_unlock_reset_domain(adev->reset_domain); 2641 + return r; 2642 + } 2638 2643 2639 2644 return 0; 2640 2645 }
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 2355 2355 if (!ret && !psp->securedisplay_context.context.resp_status) { 2356 2356 psp->securedisplay_context.context.initialized = true; 2357 2357 mutex_init(&psp->securedisplay_context.mutex); 2358 - } else 2358 + } else { 2359 + /* don't try again */ 2360 + psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2359 2361 return ret; 2362 + } 2360 2363 2361 2364 mutex_lock(&psp->securedisplay_context.mutex); 2362 2365
+2 -1
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
··· 407 407 return -EINVAL; 408 408 } 409 409 410 - if (adev->kfd.init_complete && !amdgpu_in_reset(adev)) 410 + if (adev->kfd.init_complete && !amdgpu_in_reset(adev) && 411 + !adev->in_suspend) 411 412 flags |= AMDGPU_XCP_OPS_KFD; 412 413 413 414 if (flags & AMDGPU_XCP_OPS_KFD) {
+5
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 3102 3102 return r; 3103 3103 } 3104 3104 3105 + adev->gfx.gfx_supported_reset = 3106 + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 3107 + adev->gfx.compute_supported_reset = 3108 + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 3109 + 3105 3110 return r; 3106 3111 } 3107 3112
+5
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 4399 4399 4400 4400 gfx_v7_0_gpu_early_init(adev); 4401 4401 4402 + adev->gfx.gfx_supported_reset = 4403 + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 4404 + adev->gfx.compute_supported_reset = 4405 + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 4406 + 4402 4407 return r; 4403 4408 } 4404 4409
+5
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 2023 2023 if (r) 2024 2024 return r; 2025 2025 2026 + adev->gfx.gfx_supported_reset = 2027 + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 2028 + adev->gfx.compute_supported_reset = 2029 + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 2030 + 2026 2031 return 0; 2027 2032 } 2028 2033
+3 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 2292 2292 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); 2293 2293 2294 2294 } else { 2295 - if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2295 + if (adev->in_suspend) 2296 + amdgpu_xcp_restore_partition_mode(adev->xcp_mgr); 2297 + else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2296 2298 AMDGPU_XCP_FL_NONE) == 2297 2299 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2298 2300 r = amdgpu_xcp_switch_partition_mode(
+25 -1
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 142 142 return err; 143 143 } 144 144 145 + static int psp_v11_wait_for_tos_unload(struct psp_context *psp) 146 + { 147 + struct amdgpu_device *adev = psp->adev; 148 + uint32_t sol_reg1, sol_reg2; 149 + int retry_loop; 150 + 151 + /* Wait for the TOS to be unloaded */ 152 + for (retry_loop = 0; retry_loop < 20; retry_loop++) { 153 + sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 154 + usleep_range(1000, 2000); 155 + sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 156 + if (sol_reg1 == sol_reg2) 157 + return 0; 158 + } 159 + dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x", 160 + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33), 161 + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81)); 162 + 163 + return -ETIME; 164 + } 165 + 145 166 static int psp_v11_0_wait_for_bootloader(struct psp_context *psp) 146 167 { 147 168 struct amdgpu_device *adev = psp->adev; 148 - 149 169 int ret; 150 170 int retry_loop; 171 + 172 + /* For a reset done at the end of S3, only wait for TOS to be unloaded */ 173 + if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev)) 174 + return psp_v11_wait_for_tos_unload(psp); 151 175 152 176 for (retry_loop = 0; retry_loop < 20; retry_loop++) { 153 177 /* Wait for bootloader to signify that is
+10 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 3563 3563 /* Do mst topology probing after resuming cached state*/ 3564 3564 drm_connector_list_iter_begin(ddev, &iter); 3565 3565 drm_for_each_connector_iter(connector, &iter) { 3566 + bool init = false; 3566 3567 3567 3568 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3568 3569 continue; ··· 3573 3572 aconnector->mst_root) 3574 3573 continue; 3575 3574 3576 - drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); 3575 + scoped_guard(mutex, &aconnector->mst_mgr.lock) { 3576 + init = !aconnector->mst_mgr.mst_primary; 3577 + } 3578 + if (init) 3579 + dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx, 3580 + aconnector->dc_link, false); 3581 + else 3582 + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); 3577 3583 } 3578 3584 drm_connector_list_iter_end(&iter); 3579 3585 ··· 8038 8030 "mode %dx%d@%dHz is not native, enabling scaling\n", 8039 8031 adjusted_mode->hdisplay, adjusted_mode->vdisplay, 8040 8032 drm_mode_vrefresh(adjusted_mode)); 8041 - dm_new_connector_state->scaling = RMX_FULL; 8033 + dm_new_connector_state->scaling = RMX_ASPECT; 8042 8034 } 8043 8035 return 0; 8044 8036 }
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 1302 1302 if (connector->status != connector_status_connected) 1303 1303 return -ENODEV; 1304 1304 1305 - if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) 1305 + if (pipe_ctx && pipe_ctx->stream_res.tg && 1306 + pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) 1306 1307 pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments); 1307 1308 1308 1309 seq_printf(m, "%d\n", segments);
-18
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
··· 195 195 return ret; 196 196 } 197 197 198 - int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 199 - { 200 - int ret = 0; 201 - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 202 - 203 - if (pp_funcs && pp_funcs->notify_rlc_state) { 204 - mutex_lock(&adev->pm.mutex); 205 - 206 - ret = pp_funcs->notify_rlc_state( 207 - adev->powerplay.pp_handle, 208 - en); 209 - 210 - mutex_unlock(&adev->pm.mutex); 211 - } 212 - 213 - return ret; 214 - } 215 - 216 198 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 217 199 { 218 200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+2 -2
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 4724 4724 ret = devm_device_add_group(adev->dev, 4725 4725 &amdgpu_pm_policy_attr_group); 4726 4726 if (ret) 4727 - goto err_out0; 4727 + goto err_out1; 4728 4728 } 4729 4729 4730 4730 if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) { 4731 4731 ret = devm_device_add_group(adev->dev, 4732 4732 &amdgpu_board_attr_group); 4733 4733 if (ret) 4734 - goto err_out0; 4734 + goto err_out1; 4735 4735 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, 4736 4736 (void *)&tmp) != -EOPNOTSUPP) { 4737 4737 sysfs_add_file_to_group(&adev->dev->kobj,
-2
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
··· 424 424 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 425 425 enum pp_mp1_state mp1_state); 426 426 427 - int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en); 428 - 429 427 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev); 430 428 431 429 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
+6
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 2040 2040 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix)) 2041 2041 return 0; 2042 2042 2043 + /* vangogh s0ix */ 2044 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) || 2045 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) && 2046 + adev->in_s0ix) 2047 + return 0; 2048 + 2043 2049 /* 2044 2050 * For gpu reset, runpm and hibernation through BACO, 2045 2051 * BACO feature has to be kept enabled.
+3
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 2217 2217 uint32_t total_cu = adev->gfx.config.max_cu_per_sh * 2218 2218 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2219 2219 2220 + if (adev->in_s0ix) 2221 + return 0; 2222 + 2220 2223 /* allow message will be sent after enable message on Vangogh*/ 2221 2224 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 2222 2225 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+1 -1
drivers/gpu/drm/i915/Makefile
··· 413 413 # 414 414 # Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build 415 415 ifdef CONFIG_DRM_I915_WERROR 416 - cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $< 416 + cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none -Werror $< 417 417 endif 418 418 419 419 # header test
+2 -2
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
··· 205 205 206 206 u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count) 207 207 { 208 - return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency); 208 + return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency); 209 209 } 210 210 211 211 u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count) ··· 215 215 216 216 u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns) 217 217 { 218 - return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC); 218 + return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC); 219 219 } 220 220 221 221 u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
+14 -2
drivers/gpu/drm/i915/i915_vma.c
··· 1595 1595 err_vma_res: 1596 1596 i915_vma_resource_free(vma_res); 1597 1597 err_fence: 1598 - if (work) 1599 - dma_fence_work_commit_imm(&work->base); 1598 + if (work) { 1599 + /* 1600 + * When pinning VMA to GGTT on CHV or BXT with VTD enabled, 1601 + * commit VMA binding asynchronously to avoid risk of lock 1602 + * inversion among reservation_ww locks held here and 1603 + * cpu_hotplug_lock acquired from stop_machine(), which we 1604 + * wrap around GGTT updates when running in those environments. 1605 + */ 1606 + if (i915_vma_is_ggtt(vma) && 1607 + intel_vm_no_concurrent_access_wa(vma->vm->i915)) 1608 + dma_fence_work_commit(&work->base); 1609 + else 1610 + dma_fence_work_commit_imm(&work->base); 1611 + } 1600 1612 err_rpm: 1601 1613 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 1602 1614
+1
drivers/gpu/drm/imagination/Kconfig
··· 7 7 depends on DRM 8 8 depends on MMU 9 9 depends on PM 10 + depends on POWER_SEQUENCING || !POWER_SEQUENCING 10 11 select DRM_EXEC 11 12 select DRM_GEM_SHMEM_HELPER 12 13 select DRM_SCHED
+7
drivers/gpu/drm/mediatek/mtk_crtc.c
··· 283 283 unsigned int i; 284 284 unsigned long flags; 285 285 286 + /* release GCE HW usage and start autosuspend */ 287 + pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev); 288 + pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev); 289 + 286 290 if (data->sta < 0) 287 291 return; 288 292 ··· 621 617 spin_lock_irqsave(&mtk_crtc->config_lock, flags); 622 618 mtk_crtc->config_updating = false; 623 619 spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); 620 + 621 + if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0) 622 + goto update_config_out; 624 623 625 624 mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); 626 625 mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
+1 -23
drivers/gpu/drm/mediatek/mtk_plane.c
··· 21 21 22 22 static const u64 modifiers[] = { 23 23 DRM_FORMAT_MOD_LINEAR, 24 - DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 25 - AFBC_FORMAT_MOD_SPLIT | 26 - AFBC_FORMAT_MOD_SPARSE), 27 24 DRM_FORMAT_MOD_INVALID, 28 25 }; 29 26 ··· 68 71 uint32_t format, 69 72 uint64_t modifier) 70 73 { 71 - if (modifier == DRM_FORMAT_MOD_LINEAR) 72 - return true; 73 - 74 - if (modifier != DRM_FORMAT_MOD_ARM_AFBC( 75 - AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | 76 - AFBC_FORMAT_MOD_SPLIT | 77 - AFBC_FORMAT_MOD_SPARSE)) 78 - return false; 79 - 80 - if (format != DRM_FORMAT_XRGB8888 && 81 - format != DRM_FORMAT_ARGB8888 && 82 - format != DRM_FORMAT_BGRX8888 && 83 - format != DRM_FORMAT_BGRA8888 && 84 - format != DRM_FORMAT_ABGR8888 && 85 - format != DRM_FORMAT_XBGR8888 && 86 - format != DRM_FORMAT_RGB888 && 87 - format != DRM_FORMAT_BGR888) 88 - return false; 89 - 90 - return true; 74 + return modifier == DRM_FORMAT_MOD_LINEAR; 91 75 } 92 76 93 77 static void mtk_plane_destroy_state(struct drm_plane *plane,
+3 -1
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 2867 2867 } 2868 2868 2869 2869 /* Assign the correct format modifiers */ 2870 - if (disp->disp->object.oclass >= TU102_DISP) 2870 + if (disp->disp->object.oclass >= GB202_DISP) 2871 + nouveau_display(dev)->format_modifiers = wndwca7e_modifiers; 2872 + else if (disp->disp->object.oclass >= TU102_DISP) 2871 2873 nouveau_display(dev)->format_modifiers = wndwc57e_modifiers; 2872 2874 else 2873 2875 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+1
drivers/gpu/drm/nouveau/dispnv50/disp.h
··· 104 104 extern const u64 disp50xx_modifiers[]; 105 105 extern const u64 disp90xx_modifiers[]; 106 106 extern const u64 wndwc57e_modifiers[]; 107 + extern const u64 wndwca7e_modifiers[]; 107 108 #endif
+22 -2
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 786 786 } 787 787 788 788 /* This function assumes the format has already been validated against the plane 789 - * and the modifier was validated against the device-wides modifier list at FB 789 + * and the modifier was validated against the device-wide modifier list at FB 790 790 * creation time. 791 791 */ 792 792 static bool nv50_plane_format_mod_supported(struct drm_plane *plane, 793 793 u32 format, u64 modifier) 794 794 { 795 795 struct nouveau_drm *drm = nouveau_drm(plane->dev); 796 + const struct drm_format_info *info = drm_format_info(format); 796 797 uint8_t i; 797 798 798 799 /* All chipsets can display all formats in linear layout */ ··· 801 800 return true; 802 801 803 802 if (drm->client.device.info.chipset < 0xc0) { 804 - const struct drm_format_info *info = drm_format_info(format); 805 803 const uint8_t kind = (modifier >> 12) & 0xff; 806 804 807 805 if (!format) return false; 808 806 809 807 for (i = 0; i < info->num_planes; i++) 810 808 if ((info->cpp[i] != 4) && kind != 0x70) return false; 809 + } else if (drm->client.device.info.chipset >= 0x1b2) { 810 + const uint8_t slayout = ((modifier >> 22) & 0x1) | 811 + ((modifier >> 25) & 0x6); 812 + 813 + if (!format) 814 + return false; 815 + 816 + /* 817 + * Note in practice this implies only formats where cpp is equal 818 + * for each plane, or >= 4 for all planes, are supported. 819 + */ 820 + for (i = 0; i < info->num_planes; i++) { 821 + if (((info->cpp[i] == 2) && slayout != 3) || 822 + ((info->cpp[i] == 1) && slayout != 2) || 823 + ((info->cpp[i] >= 4) && slayout != 1)) 824 + return false; 825 + 826 + /* 24-bit not supported. It has yet another layout */ 827 + WARN_ON(info->cpp[i] == 3); 828 + } 811 829 } 812 830 813 831 return true;
+33
drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
··· 179 179 return 0; 180 180 } 181 181 182 + /**************************************************************** 183 + * Log2(block height) ----------------------------+ * 184 + * Page Kind ----------------------------------+ | * 185 + * Gob Height/Page Kind Generation ------+ | | * 186 + * Sector layout -------+ | | | * 187 + * Compression ------+ | | | | */ 188 + const u64 wndwca7e_modifiers[] = { /* | | | | | */ 189 + /* 4cpp+ modifiers */ 190 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0), 191 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1), 192 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2), 193 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3), 194 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4), 195 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5), 196 + /* 1cpp/8bpp modifiers */ 197 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 0), 198 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 1), 199 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 2), 200 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 3), 201 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 4), 202 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 5), 203 + /* 2cpp/16bpp modifiers */ 204 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 0), 205 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 1), 206 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 2), 207 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 3), 208 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 4), 209 + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 5), 210 + /* All formats support linear */ 211 + DRM_FORMAT_MOD_LINEAR, 212 + DRM_FORMAT_MOD_INVALID 213 + }; 214 + 182 215 static const struct nv50_wndw_func 183 216 wndwca7e = { 184 217 .acquire = wndwc37e_acquire,
+19 -15
drivers/gpu/drm/scheduler/sched_entity.c
··· 173 173 } 174 174 EXPORT_SYMBOL(drm_sched_entity_error); 175 175 176 + static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 177 + struct dma_fence_cb *cb); 178 + 176 179 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 177 180 { 178 181 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 179 - 180 - drm_sched_fence_scheduled(job->s_fence, NULL); 181 - drm_sched_fence_finished(job->s_fence, -ESRCH); 182 - WARN_ON(job->s_fence->parent); 183 - job->sched->ops->free_job(job); 184 - } 185 - 186 - /* Signal the scheduler finished fence when the entity in question is killed. */ 187 - static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 188 - struct dma_fence_cb *cb) 189 - { 190 - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 191 - finish_cb); 182 + struct dma_fence *f; 192 183 unsigned long index; 193 - 194 - dma_fence_put(f); 195 184 196 185 /* Wait for all dependencies to avoid data corruptions */ 197 186 xa_for_each(&job->dependencies, index, f) { ··· 208 219 209 220 dma_fence_put(f); 210 221 } 222 + 223 + drm_sched_fence_scheduled(job->s_fence, NULL); 224 + drm_sched_fence_finished(job->s_fence, -ESRCH); 225 + WARN_ON(job->s_fence->parent); 226 + job->sched->ops->free_job(job); 227 + } 228 + 229 + /* Signal the scheduler finished fence when the entity in question is killed. */ 230 + static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 231 + struct dma_fence_cb *cb) 232 + { 233 + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 234 + finish_cb); 235 + 236 + dma_fence_put(f); 211 237 212 238 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 213 239 schedule_work(&job->work);
+1
drivers/gpu/drm/tiny/Kconfig
··· 85 85 config DRM_PIXPAPER 86 86 tristate "DRM support for PIXPAPER display panels" 87 87 depends on DRM && SPI 88 + depends on MMU 88 89 select DRM_CLIENT_SELECTION 89 90 select DRM_GEM_SHMEM_HELPER 90 91 select DRM_KMS_HELPER
+7 -7
drivers/gpu/drm/xe/xe_device.c
··· 988 988 989 989 drm_dbg(&xe->drm, "Shutting down device\n"); 990 990 991 - if (xe_driver_flr_disabled(xe)) { 992 - xe_display_pm_shutdown(xe); 991 + xe_display_pm_shutdown(xe); 993 992 994 - xe_irq_suspend(xe); 993 + xe_irq_suspend(xe); 995 994 996 - for_each_gt(gt, xe, id) 997 - xe_gt_shutdown(gt); 995 + for_each_gt(gt, xe, id) 996 + xe_gt_shutdown(gt); 998 997 999 - xe_display_pm_shutdown_late(xe); 1000 - } else { 998 + xe_display_pm_shutdown_late(xe); 999 + 1000 + if (!xe_driver_flr_disabled(xe)) { 1001 1001 /* BOOM! */ 1002 1002 __xe_driver_flr(xe); 1003 1003 }
+2 -1
drivers/gpu/drm/xe/xe_exec.c
··· 165 165 166 166 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { 167 167 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], 168 - &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC | 168 + &syncs_user[num_syncs], NULL, 0, 169 + SYNC_PARSE_FLAG_EXEC | 169 170 (xe_vm_in_lr_mode(vm) ? 170 171 SYNC_PARSE_FLAG_LR_MODE : 0)); 171 172 if (err)
+14
drivers/gpu/drm/xe/xe_exec_queue.c
··· 10 10 #include <drm/drm_device.h> 11 11 #include <drm/drm_drv.h> 12 12 #include <drm/drm_file.h> 13 + #include <drm/drm_syncobj.h> 13 14 #include <uapi/drm/xe_drm.h> 14 15 15 16 #include "xe_dep_scheduler.h" ··· 325 324 } 326 325 xe_vm_put(migrate_vm); 327 326 327 + if (!IS_ERR(q)) { 328 + int err = drm_syncobj_create(&q->ufence_syncobj, 329 + DRM_SYNCOBJ_CREATE_SIGNALED, 330 + NULL); 331 + if (err) { 332 + xe_exec_queue_put(q); 333 + return ERR_PTR(err); 334 + } 335 + } 336 + 328 337 return q; 329 338 } 330 339 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO); ··· 343 332 { 344 333 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); 345 334 struct xe_exec_queue *eq, *next; 335 + 336 + if (q->ufence_syncobj) 337 + drm_syncobj_put(q->ufence_syncobj); 346 338 347 339 if (xe_exec_queue_uses_pxp(q)) 348 340 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
+7
drivers/gpu/drm/xe/xe_exec_queue_types.h
··· 15 15 #include "xe_hw_fence_types.h" 16 16 #include "xe_lrc_types.h" 17 17 18 + struct drm_syncobj; 18 19 struct xe_execlist_exec_queue; 19 20 struct xe_gt; 20 21 struct xe_guc_exec_queue; ··· 155 154 /** @pxp.link: link into the list of PXP exec queues */ 156 155 struct list_head link; 157 156 } pxp; 157 + 158 + /** @ufence_syncobj: User fence syncobj */ 159 + struct drm_syncobj *ufence_syncobj; 160 + 161 + /** @ufence_timeline_value: User fence timeline value */ 162 + u64 ufence_timeline_value; 158 163 159 164 /** @ops: submission backend exec queue operations */ 160 165 const struct xe_exec_queue_ops *ops;
+3
drivers/gpu/drm/xe/xe_guc_ct.c
··· 200 200 { 201 201 struct xe_guc_ct *ct = arg; 202 202 203 + #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) 204 + cancel_work_sync(&ct->dead.worker); 205 + #endif 203 206 ct_exit_safe_mode(ct); 204 207 destroy_workqueue(ct->g2h_wq); 205 208 xa_destroy(&ct->fence_lookup);
+30 -15
drivers/gpu/drm/xe/xe_oa.c
··· 10 10 11 11 #include <drm/drm_drv.h> 12 12 #include <drm/drm_managed.h> 13 + #include <drm/drm_syncobj.h> 13 14 #include <uapi/drm/xe_drm.h> 14 15 15 16 #include <generated/xe_wa_oob.h> ··· 1390 1389 return 0; 1391 1390 } 1392 1391 1393 - static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param) 1392 + static int xe_oa_parse_syncs(struct xe_oa *oa, 1393 + struct xe_oa_stream *stream, 1394 + struct xe_oa_open_param *param) 1394 1395 { 1395 1396 int ret, num_syncs, num_ufence = 0; 1396 1397 ··· 1412 1409 1413 1410 for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) { 1414 1411 ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs], 1415 - &param->syncs_user[num_syncs], 0); 1412 + &param->syncs_user[num_syncs], 1413 + stream->ufence_syncobj, 1414 + ++stream->ufence_timeline_value, 0); 1416 1415 if (ret) 1417 1416 goto err_syncs; 1418 1417 ··· 1544 1539 return -ENODEV; 1545 1540 1546 1541 param.xef = stream->xef; 1547 - err = xe_oa_parse_syncs(stream->oa, &param); 1542 + err = xe_oa_parse_syncs(stream->oa, stream, &param); 1548 1543 if (err) 1549 1544 goto err_config_put; 1550 1545 ··· 1640 1635 if (stream->exec_q) 1641 1636 xe_exec_queue_put(stream->exec_q); 1642 1637 1638 + drm_syncobj_put(stream->ufence_syncobj); 1643 1639 kfree(stream); 1644 1640 } 1645 1641 ··· 1832 1826 struct xe_oa_open_param *param) 1833 1827 { 1834 1828 struct xe_oa_stream *stream; 1829 + struct drm_syncobj *ufence_syncobj; 1835 1830 int stream_fd; 1836 1831 int ret; 1837 1832 ··· 1843 1836 goto exit; 1844 1837 } 1845 1838 1839 + ret = drm_syncobj_create(&ufence_syncobj, DRM_SYNCOBJ_CREATE_SIGNALED, 1840 + NULL); 1841 + if (ret) 1842 + goto exit; 1843 + 1846 1844 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 1847 1845 if (!stream) { 1848 1846 ret = -ENOMEM; 1849 - goto exit; 1847 + goto err_syncobj; 1850 1848 } 1851 - 1849 + stream->ufence_syncobj = ufence_syncobj; 1852 1850 stream->oa = oa; 1853 - ret = xe_oa_stream_init(stream, param); 1851 + 1852 + ret = xe_oa_parse_syncs(oa, stream, param); 1854 1853 if (ret) 1855 1854 goto err_free; 1855 + 1856 + ret = xe_oa_stream_init(stream, param); 1857 + if (ret) { 1858 + while (param->num_syncs--) 1859 + xe_sync_entry_cleanup(&param->syncs[param->num_syncs]); 1860 + kfree(param->syncs); 1861 + goto err_free; 1862 + } 1856 1863 1857 1864 if (!param->disabled) { 1858 1865 ret = xe_oa_enable_locked(stream); ··· 1891 1870 xe_oa_stream_destroy(stream); 1892 1871 err_free: 1893 1872 kfree(stream); 1873 + err_syncobj: 1874 + drm_syncobj_put(ufence_syncobj); 1894 1875 exit: 1895 1876 return ret; 1896 1877 } ··· 2106 2083 goto err_exec_q; 2107 2084 } 2108 2085 2109 - ret = xe_oa_parse_syncs(oa, &param); 2110 - if (ret) 2111 - goto err_exec_q; 2112 - 2113 2086 mutex_lock(&param.hwe->gt->oa.gt_lock); 2114 2087 ret = xe_oa_stream_open_ioctl_locked(oa, &param); 2115 2088 mutex_unlock(&param.hwe->gt->oa.gt_lock); 2116 2089 if (ret < 0) 2117 - goto err_sync_cleanup; 2090 + goto err_exec_q; 2118 2091 2119 2092 return ret; 2120 2093 2121 - err_sync_cleanup: 2122 - while (param.num_syncs--) 2123 - xe_sync_entry_cleanup(&param.syncs[param.num_syncs]); 2124 - kfree(param.syncs); 2125 2094 err_exec_q: 2126 2095 if (param.exec_q) 2127 2096 xe_exec_queue_put(param.exec_q);
+8
drivers/gpu/drm/xe/xe_oa_types.h
··· 15 15 #include "regs/xe_reg_defs.h" 16 16 #include "xe_hw_engine_types.h" 17 17 18 + struct drm_syncobj; 19 + 18 20 #define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M 19 21 20 22 enum xe_oa_report_header { ··· 249 247 250 248 /** @xef: xe_file with which the stream was opened */ 251 249 struct xe_file *xef; 250 + 251 + /** @ufence_syncobj: User fence syncobj */ 252 + struct drm_syncobj *ufence_syncobj; 253 + 254 + /** @ufence_timeline_value: User fence timeline value */ 255 + u64 ufence_timeline_value; 252 256 253 257 /** @last_fence: fence to use in stream destroy when needed */ 254 258 struct dma_fence *last_fence;
+15 -2
drivers/gpu/drm/xe/xe_sync.c
··· 113 113 int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, 114 114 struct xe_sync_entry *sync, 115 115 struct drm_xe_sync __user *sync_user, 116 + struct drm_syncobj *ufence_syncobj, 117 + u64 ufence_timeline_value, 116 118 unsigned int flags) 117 119 { 118 120 struct drm_xe_sync sync_in; ··· 194 192 if (exec) { 195 193 sync->addr = sync_in.addr; 196 194 } else { 195 + sync->ufence_timeline_value = ufence_timeline_value; 197 196 sync->ufence = user_fence_create(xe, sync_in.addr, 198 197 sync_in.timeline_value); 199 198 if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence))) 200 199 return PTR_ERR(sync->ufence); 200 + sync->ufence_chain_fence = dma_fence_chain_alloc(); 201 + if (!sync->ufence_chain_fence) 202 + return -ENOMEM; 203 + sync->ufence_syncobj = ufence_syncobj; 201 204 } 202 205 203 206 break; ··· 246 239 } else if (sync->ufence) { 247 240 int err; 248 241 249 - dma_fence_get(fence); 242 + drm_syncobj_add_point(sync->ufence_syncobj, 243 + sync->ufence_chain_fence, 244 + fence, sync->ufence_timeline_value); 245 + sync->ufence_chain_fence = NULL; 246 + 247 + fence = drm_syncobj_fence_get(sync->ufence_syncobj); 250 248 user_fence_get(sync->ufence); 251 249 err = dma_fence_add_callback(fence, &sync->ufence->cb, 252 250 user_fence_cb); ··· 271 259 drm_syncobj_put(sync->syncobj); 272 260 dma_fence_put(sync->fence); 273 261 dma_fence_chain_free(sync->chain_fence); 274 - if (sync->ufence) 262 + dma_fence_chain_free(sync->ufence_chain_fence); 263 + if (!IS_ERR_OR_NULL(sync->ufence)) 275 264 user_fence_put(sync->ufence); 276 265 } 277 266
+3
drivers/gpu/drm/xe/xe_sync.h
··· 8 8 9 9 #include "xe_sync_types.h" 10 10 11 + struct drm_syncobj; 11 12 struct xe_device; 12 13 struct xe_exec_queue; 13 14 struct xe_file; ··· 22 21 int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, 23 22 struct xe_sync_entry *sync, 24 23 struct drm_xe_sync __user *sync_user, 24 + struct drm_syncobj *ufence_syncobj, 25 + u64 ufence_timeline_value, 25 26 unsigned int flags); 26 27 int xe_sync_entry_add_deps(struct xe_sync_entry *sync, 27 28 struct xe_sched_job *job);
+3
drivers/gpu/drm/xe/xe_sync_types.h
··· 18 18 struct drm_syncobj *syncobj; 19 19 struct dma_fence *fence; 20 20 struct dma_fence_chain *chain_fence; 21 + struct dma_fence_chain *ufence_chain_fence; 22 + struct drm_syncobj *ufence_syncobj; 21 23 struct xe_user_fence *ufence; 22 24 u64 addr; 23 25 u64 timeline_value; 26 + u64 ufence_timeline_value; 24 27 u32 type; 25 28 u32 flags; 26 29 };
+4
drivers/gpu/drm/xe/xe_vm.c
··· 3606 3606 3607 3607 syncs_user = u64_to_user_ptr(args->syncs); 3608 3608 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { 3609 + struct xe_exec_queue *__q = q ?: vm->q[0]; 3610 + 3609 3611 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], 3610 3612 &syncs_user[num_syncs], 3613 + __q->ufence_syncobj, 3614 + ++__q->ufence_timeline_value, 3611 3615 (xe_vm_in_lr_mode(vm) ? 3612 3616 SYNC_PARSE_FLAG_LR_MODE : 0) | 3613 3617 (!args->num_binds ?
+23 -27
drivers/i2c/muxes/i2c-mux-pca954x.c
··· 118 118 raw_spinlock_t lock; 119 119 struct regulator *supply; 120 120 121 + struct gpio_desc *reset_gpio; 121 122 struct reset_control *reset_cont; 122 123 }; 123 124 ··· 316 315 return 1 << chan; 317 316 } 318 317 319 - static void pca954x_reset_assert(struct pca954x *data) 320 - { 321 - if (data->reset_cont) 322 - reset_control_assert(data->reset_cont); 323 - } 324 - 325 - static void pca954x_reset_deassert(struct pca954x *data) 326 - { 327 - if (data->reset_cont) 328 - reset_control_deassert(data->reset_cont); 329 - } 330 - 331 - static void pca954x_reset_mux(struct pca954x *data) 332 - { 333 - pca954x_reset_assert(data); 334 - udelay(1); 335 - pca954x_reset_deassert(data); 336 - } 337 - 338 318 static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan) 339 319 { 340 320 struct pca954x *data = i2c_mux_priv(muxc); ··· 329 347 ret = pca954x_reg_write(muxc->parent, client, regval); 330 348 data->last_chan = ret < 0 ? 0 : regval; 331 349 } 332 - if (ret == -ETIMEDOUT && data->reset_cont) 333 - pca954x_reset_mux(data); 334 350 335 351 return ret; 336 352 } ··· 338 358 struct pca954x *data = i2c_mux_priv(muxc); 339 359 struct i2c_client *client = data->client; 340 360 s32 idle_state; 341 - int ret = 0; 342 361 343 362 idle_state = READ_ONCE(data->idle_state); 344 363 if (idle_state >= 0) ··· 347 368 if (idle_state == MUX_IDLE_DISCONNECT) { 348 369 /* Deselect active channel */ 349 370 data->last_chan = 0; 350 - ret = pca954x_reg_write(muxc->parent, client, 351 - data->last_chan); 352 - if (ret == -ETIMEDOUT && data->reset_cont) 353 - pca954x_reset_mux(data); 371 + return pca954x_reg_write(muxc->parent, client, 372 + data->last_chan); 354 373 } 355 374 356 375 /* otherwise leave as-is */ ··· 527 550 if (IS_ERR(data->reset_cont)) 528 551 return dev_err_probe(dev, PTR_ERR(data->reset_cont), 529 552 "Failed to get reset\n"); 553 + else if (data->reset_cont) 554 + return 0; 555 + 556 + /* 557 + * fallback to legacy reset-gpios 558 + */ 559 + data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 560 + if (IS_ERR(data->reset_gpio)) { 561 + return dev_err_probe(dev, PTR_ERR(data->reset_gpio), 562 + "Failed to get reset gpio"); 563 + } 530 564 531 565 return 0; 566 + } 567 + 568 + static void pca954x_reset_deassert(struct pca954x *data) 569 + { 570 + if (data->reset_cont) 571 + reset_control_deassert(data->reset_cont); 572 + else 573 + gpiod_set_value_cansleep(data->reset_gpio, 0); 532 574 } 533 575 534 576 /* ··· 589 593 if (ret) 590 594 goto fail_cleanup; 591 595 592 - if (data->reset_cont) { 596 + if (data->reset_cont || data->reset_gpio) { 593 597 udelay(1); 594 598 pca954x_reset_deassert(data); 595 599 /* Give the chip some time to recover. */
+1
drivers/infiniband/core/uverbs_std_types_cq.c
··· 206 206 return ret; 207 207 208 208 err_free: 209 + ib_umem_release(umem); 209 210 rdma_restrack_put(&cq->res); 210 211 kfree(cq); 211 212 err_event_file:
+3 -8
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 913 913 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); 914 914 } 915 915 916 - static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) 916 + static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) 917 917 { 918 918 struct bnxt_re_qp *gsi_sqp; 919 919 struct bnxt_re_ah *gsi_sah; ··· 933 933 934 934 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); 935 935 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); 936 - if (rc) { 936 + if (rc) 937 937 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed"); 938 - goto fail; 939 - } 938 + 940 939 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); 941 940 942 941 /* remove from active qp list */ ··· 950 951 rdev->gsi_ctx.gsi_sqp = NULL; 951 952 rdev->gsi_ctx.gsi_sah = NULL; 952 953 rdev->gsi_ctx.sqp_tbl = NULL; 953 - 954 - return 0; 955 - fail: 956 - return rc; 957 954 } 958 955 959 956 static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
+7 -9
drivers/infiniband/hw/efa/efa_verbs.c
··· 1216 1216 if (umem->length < cq->size) { 1217 1217 ibdev_dbg(&dev->ibdev, "External memory too small\n"); 1218 1218 err = -EINVAL; 1219 - goto err_free_mem; 1219 + goto err_out; 1220 1220 } 1221 1221 1222 1222 if (!ib_umem_is_contiguous(umem)) { 1223 1223 ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n"); 1224 1224 err = -EINVAL; 1225 - goto err_free_mem; 1225 + goto err_out; 1226 1226 } 1227 1227 1228 1228 cq->cpu_addr = NULL; ··· 1251 1251 1252 1252 err = efa_com_create_cq(&dev->edev, &params, &result); 1253 1253 if (err) 1254 - goto err_free_mem; 1254 + goto err_free_mapped; 1255 1255 1256 1256 resp.db_off = result.db_off; 1257 1257 resp.cq_idx = result.cq_idx; ··· 1299 1299 efa_cq_user_mmap_entries_remove(cq); 1300 1300 err_destroy_cq: 1301 1301 efa_destroy_cq_idx(dev, cq->cq_idx); 1302 - err_free_mem: 1303 - if (umem) 1304 - ib_umem_release(umem); 1305 - else 1306 - efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE); 1307 - 1302 + err_free_mapped: 1303 + if (!umem) 1304 + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, 1305 + DMA_FROM_DEVICE); 1308 1306 err_out: 1309 1307 atomic64_inc(&dev->stats.create_cq_err); 1310 1308 return err;
+55 -3
drivers/infiniband/hw/hns/hns_roce_cq.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 + #include <linux/pci.h> 33 34 #include <rdma/ib_umem.h> 34 35 #include <rdma/uverbs_ioctl.h> 35 36 #include "hns_roce_device.h" 36 37 #include "hns_roce_cmd.h" 37 38 #include "hns_roce_hem.h" 38 39 #include "hns_roce_common.h" 40 + 41 + void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx) 42 + { 43 + struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device); 44 + struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 45 + 46 + if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) 47 + return; 48 + 49 + mutex_lock(&cq_table->bank_mutex); 50 + cq_table->ctx_num[uctx->cq_bank_id]--; 51 + mutex_unlock(&cq_table->bank_mutex); 52 + } 53 + 54 + void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx) 55 + { 56 + struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device); 57 + struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 58 + u32 least_load = cq_table->ctx_num[0]; 59 + u8 bankid = 0; 60 + u8 i; 61 + 62 + if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) 63 + return; 64 + 65 + mutex_lock(&cq_table->bank_mutex); 66 + for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) { 67 + if (cq_table->ctx_num[i] < least_load) { 68 + least_load = cq_table->ctx_num[i]; 69 + bankid = i; 70 + } 71 + } 72 + cq_table->ctx_num[bankid]++; 73 + mutex_unlock(&cq_table->bank_mutex); 74 + 75 + uctx->cq_bank_id = bankid; 76 + } 39 77 40 78 static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank) 41 79 { ··· 93 55 return bankid; 94 56 } 95 57 96 - static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) 58 + static u8 select_cq_bankid(struct hns_roce_dev *hr_dev, 59 + struct hns_roce_bank *bank, struct ib_udata *udata) 60 + { 61 + struct hns_roce_ucontext *uctx = udata ? 62 + rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, 63 + ibucontext) : NULL; 64 + 65 + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 66 + return uctx ? uctx->cq_bank_id : 0; 67 + 68 + return get_least_load_bankid_for_cq(bank); 69 + } 70 + 71 + static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, 72 + struct ib_udata *udata) 97 73 { 98 74 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 99 75 struct hns_roce_bank *bank; ··· 115 63 int id; 116 64 117 65 mutex_lock(&cq_table->bank_mutex); 118 - bankid = get_least_load_bankid_for_cq(cq_table->bank); 66 + bankid = select_cq_bankid(hr_dev, cq_table->bank, udata); 119 67 bank = &cq_table->bank[bankid]; 120 68 121 69 id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL); ··· 448 396 goto err_cq_buf; 449 397 } 450 398 451 - ret = alloc_cqn(hr_dev, hr_cq); 399 + ret = alloc_cqn(hr_dev, hr_cq, udata); 452 400 if (ret) { 453 401 ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret); 454 402 goto err_cq_db;
+4
drivers/infiniband/hw/hns/hns_roce_device.h
··· 217 217 struct mutex page_mutex; 218 218 struct hns_user_mmap_entry *db_mmap_entry; 219 219 u32 config; 220 + u8 cq_bank_id; 220 221 }; 221 222 222 223 struct hns_roce_pd { ··· 496 495 struct hns_roce_hem_table table; 497 496 struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM]; 498 497 struct mutex bank_mutex; 498 + u32 ctx_num[HNS_ROCE_CQ_BANK_NUM]; 499 499 }; 500 500 501 501 struct hns_roce_srq_table { ··· 1307 1305 size_t length, 1308 1306 enum hns_roce_mmap_type mmap_type); 1309 1307 bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl); 1308 + void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx); 1309 + void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx); 1310 1310 1311 1311 #endif /* _HNS_ROCE_DEVICE_H */
+8 -4
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 165 165 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ, 166 166 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); 167 167 hr_reg_clear(fseg, FRMR_BLK_MODE); 168 + hr_reg_clear(fseg, FRMR_BLOCK_SIZE); 169 + hr_reg_clear(fseg, FRMR_ZBVA); 168 170 } 169 171 170 172 static void set_atomic_seg(const struct ib_send_wr *wr, ··· 340 338 struct hns_roce_qp *qp = to_hr_qp(ibqp); 341 339 int j = 0; 342 340 int i; 343 - 344 - hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, 345 - (*sge_ind) & (qp->sge.sge_cnt - 1)); 346 341 347 342 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, 348 343 !!(wr->send_flags & IB_SEND_INLINE)); ··· 585 586 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, 586 587 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); 587 588 589 + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, 590 + curr_idx & (qp->sge.sge_cnt - 1)); 591 + 588 592 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 589 593 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 590 594 if (msg_len != ATOMIC_WR_LEN) ··· 735 733 qp->sq.wrid[wqe_idx] = wr->wr_id; 736 734 owner_bit = 737 735 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 736 + 737 + /* RC and UD share the same DirectWQE field layout */ 738 + ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0; 738 739 739 740 /* Corresponding to the QP type, wqe process separately */ 740 741 if (ibqp->qp_type == IB_QPT_RC) ··· 7052 7047 dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); 7053 7048 goto error_failed_roce_init; 7054 7049 } 7055 - 7056 7050 7057 7051 handle->priv = hr_dev; 7058 7052
+4
drivers/infiniband/hw/hns/hns_roce_main.c
··· 425 425 if (ret) 426 426 goto error_fail_copy_to_udata; 427 427 428 + hns_roce_get_cq_bankid_for_uctx(context); 429 + 428 430 return 0; 429 431 430 432 error_fail_copy_to_udata: ··· 448 446 { 449 447 struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); 450 448 struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); 449 + 450 + hns_roce_put_cq_bankid_for_uctx(context); 451 451 452 452 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || 453 453 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
-2
drivers/infiniband/hw/hns/hns_roce_qp.c
··· 662 662 663 663 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 664 664 hr_qp->sq.wqe_cnt = cnt; 665 - cap->max_send_sge = hr_qp->sq.max_gs; 666 665 667 666 return 0; 668 667 } ··· 743 744 744 745 /* sync the parameters of kernel QP to user's configuration */ 745 746 cap->max_send_wr = cnt; 746 - cap->max_send_sge = hr_qp->sq.max_gs; 747 747 748 748 return 0; 749 749 }
+1 -1
drivers/infiniband/hw/irdma/pble.c
··· 71 71 static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, 72 72 struct sd_pd_idx *idx) 73 73 { 74 - idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE; 74 + idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE; 75 75 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE); 76 76 idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD); 77 77 }
+1 -1
drivers/infiniband/hw/irdma/type.h
··· 706 706 u32 vchnl_ver; 707 707 u16 num_vfs; 708 708 u16 hmc_fn_id; 709 - u8 vf_id; 709 + u16 vf_id; 710 710 bool privileged:1; 711 711 bool vchnl_up:1; 712 712 bool ceq_valid:1;
+1
drivers/infiniband/hw/irdma/verbs.c
··· 2503 2503 spin_lock_init(&iwcq->lock); 2504 2504 INIT_LIST_HEAD(&iwcq->resize_list); 2505 2505 INIT_LIST_HEAD(&iwcq->cmpl_generated); 2506 + iwcq->cq_num = cq_num; 2506 2507 info.dev = dev; 2507 2508 ukinfo->cq_size = max(entries, 4); 2508 2509 ukinfo->cq_id = cq_num;
+1 -1
drivers/infiniband/hw/irdma/verbs.h
··· 140 140 struct irdma_cq { 141 141 struct ib_cq ibcq; 142 142 struct irdma_sc_cq sc_cq; 143 - u16 cq_num; 143 + u32 cq_num; 144 144 bool user_mode; 145 145 atomic_t armed; 146 146 enum irdma_cmpl_notify last_notify;
+3 -9
drivers/iommu/iommufd/io_pagetable.c
··· 707 707 struct iopt_area *area; 708 708 unsigned long unmapped_bytes = 0; 709 709 unsigned int tries = 0; 710 - int rc = -ENOENT; 710 + /* If there are no mapped entries then success */ 711 + int rc = 0; 711 712 712 713 /* 713 714 * The domains_rwsem must be held in read mode any time any area->pages ··· 778 777 779 778 down_write(&iopt->iova_rwsem); 780 779 } 781 - if (unmapped_bytes) 782 - rc = 0; 783 780 784 781 out_unlock_iova: 785 782 up_write(&iopt->iova_rwsem); ··· 814 815 815 816 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped) 816 817 { 817 - int rc; 818 - 819 - rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); 820 818 /* If the IOVAs are empty then unmap all succeeds */ 821 - if (rc == -ENOENT) 822 - return 0; 823 - return rc; 819 + return iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); 824 820 } 825 821 826 822 /* The caller must always free all the nodes in the allowed_iova rb_root. */
+4
drivers/iommu/iommufd/ioas.c
··· 367 367 &unmapped); 368 368 if (rc) 369 369 goto out_put; 370 + if (!unmapped) { 371 + rc = -ENOENT; 372 + goto out_put; 373 + } 370 374 } 371 375 372 376 cmd->length = unmapped;
+2 -3
drivers/iommu/iommufd/iova_bitmap.c
··· 130 130 static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap, 131 131 unsigned long iova) 132 132 { 133 - unsigned long pgsize = 1UL << bitmap->mapped.pgshift; 134 - 135 - return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize); 133 + return (iova >> bitmap->mapped.pgshift) / 134 + BITS_PER_TYPE(*bitmap->bitmap); 136 135 } 137 136 138 137 /*
+13 -5
drivers/isdn/hardware/mISDN/hfcsusb.c
··· 1904 1904 mISDN_freebchannel(&hw->bch[1]); 1905 1905 mISDN_freebchannel(&hw->bch[0]); 1906 1906 mISDN_freedchannel(&hw->dch); 1907 - kfree(hw); 1908 1907 return err; 1909 1908 } 1910 1909 1911 1910 static int 1912 1911 hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id) 1913 1912 { 1913 + int err; 1914 1914 struct hfcsusb *hw; 1915 1915 struct usb_device *dev = interface_to_usbdev(intf); 1916 1916 struct usb_host_interface *iface = intf->cur_altsetting; ··· 2101 2101 if (!hw->ctrl_urb) { 2102 2102 pr_warn("%s: No memory for control urb\n", 2103 2103 driver_info->vend_name); 2104 - kfree(hw); 2105 - return -ENOMEM; 2104 + err = -ENOMEM; 2105 + goto err_free_hw; 2106 2106 } 2107 2107 2108 2108 pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n", 2109 2109 hw->name, __func__, driver_info->vend_name, 2110 2110 conf_str[small_match], ifnum, alt_used); 2111 2111 2112 - if (setup_instance(hw, dev->dev.parent)) 2113 - return -EIO; 2112 + if (setup_instance(hw, dev->dev.parent)) { 2113 + err = -EIO; 2114 + goto err_free_urb; 2115 + } 2114 2116 2115 2117 hw->intf = intf; 2116 2118 usb_set_intfdata(hw->intf, hw); 2117 2119 return 0; 2120 + 2121 + err_free_urb: 2122 + usb_free_urb(hw->ctrl_urb); 2123 + err_free_hw: 2124 + kfree(hw); 2125 + return err; 2118 2126 } 2119 2127 2120 2128 /* function called when an active device is removed */
+5
drivers/media/common/videobuf2/videobuf2-v4l2.c
··· 1010 1010 if (vb2_queue_is_busy(vdev->queue, file)) 1011 1011 return -EBUSY; 1012 1012 1013 + if (vb2_fileio_is_active(vdev->queue)) { 1014 + dprintk(vdev->queue, 1, "file io in progress\n"); 1015 + return -EBUSY; 1016 + } 1017 + 1013 1018 return vb2_core_remove_bufs(vdev->queue, d->index, d->count); 1014 1019 } 1015 1020 EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs);
+3 -6
drivers/media/pci/cx18/cx18-driver.c
··· 1136 1136 int video_input; 1137 1137 int fw_retry_count = 3; 1138 1138 struct v4l2_frequency vf; 1139 - struct cx18_open_id fh; 1140 1139 v4l2_std_id std; 1141 - 1142 - fh.cx = cx; 1143 1140 1144 1141 if (test_bit(CX18_F_I_FAILED, &cx->i_flags)) 1145 1142 return -ENXIO; ··· 1217 1220 1218 1221 video_input = cx->active_input; 1219 1222 cx->active_input++; /* Force update of input */ 1220 - cx18_s_input(NULL, &fh, video_input); 1223 + cx18_do_s_input(cx, video_input); 1221 1224 1222 1225 /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code 1223 1226 in one place. */ 1224 1227 cx->std++; /* Force full standard initialization */ 1225 1228 std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std; 1226 - cx18_s_std(NULL, &fh, std); 1227 - cx18_s_frequency(NULL, &fh, &vf); 1229 + cx18_do_s_std(cx, std); 1230 + cx18_do_s_frequency(cx, &vf); 1228 1231 return 0; 1229 1232 } 1230 1233
+19 -11
drivers/media/pci/cx18/cx18-ioctl.c
··· 521 521 return 0; 522 522 } 523 523 524 - int cx18_s_input(struct file *file, void *fh, unsigned int inp) 524 + int cx18_do_s_input(struct cx18 *cx, unsigned int inp) 525 525 { 526 - struct cx18_open_id *id = file2id(file); 527 - struct cx18 *cx = id->cx; 528 526 v4l2_std_id std = V4L2_STD_ALL; 529 527 const struct cx18_card_video_input *card_input = 530 528 cx->card->video_inputs + inp; ··· 556 558 return 0; 557 559 } 558 560 561 + static int cx18_s_input(struct file *file, void *fh, unsigned int inp) 562 + { 563 + return cx18_do_s_input(file2id(file)->cx, inp); 564 + } 565 + 559 566 static int cx18_g_frequency(struct file *file, void *fh, 560 567 struct v4l2_frequency *vf) 561 568 { ··· 573 570 return 0; 574 571 } 575 572 576 - int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 573 + int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf) 577 574 { 578 - struct cx18_open_id *id = file2id(file); 579 - struct cx18 *cx = id->cx; 580 - 581 575 if (vf->tuner != 0) 582 576 return -EINVAL; 583 577 ··· 585 585 return 0; 586 586 } 587 587 588 + static int cx18_s_frequency(struct file *file, void *fh, 589 + const struct v4l2_frequency *vf) 590 + { 591 + return cx18_do_s_frequency(file2id(file)->cx, vf); 592 + } 593 + 588 594 static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std) 589 595 { 590 596 struct cx18 *cx = file2id(file)->cx; ··· 599 593 return 0; 600 594 } 601 595 602 - int cx18_s_std(struct file *file, void *fh, v4l2_std_id std) 596 + int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std) 603 597 { 604 - struct cx18_open_id *id = file2id(file); 605 - struct cx18 *cx = id->cx; 606 - 607 598 if ((std & V4L2_STD_ALL) == 0) 608 599 return -EINVAL; 609 600 ··· 643 640 /* Tuner */ 644 641 cx18_call_all(cx, video, s_std, cx->std); 645 642 return 0; 643 + } 644 + 645 + static int cx18_s_std(struct file *file, void *fh, v4l2_std_id std) 646 + { 647 + return cx18_do_s_std(file2id(file)->cx, std); 646 648 } 647 649 648 650 static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+5 -3
drivers/media/pci/cx18/cx18-ioctl.h
··· 12 12 void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal); 13 13 u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt); 14 14 void cx18_set_funcs(struct video_device *vdev); 15 - int cx18_s_std(struct file *file, void *fh, v4l2_std_id std); 16 - int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf); 17 - int cx18_s_input(struct file *file, void *fh, unsigned int inp); 15 + 16 + struct cx18; 17 + int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std); 18 + int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf); 19 + int cx18_do_s_input(struct cx18 *cx, unsigned int inp);
+4 -7
drivers/media/pci/ivtv/ivtv-driver.c
··· 1247 1247 1248 1248 int ivtv_init_on_first_open(struct ivtv *itv) 1249 1249 { 1250 - struct v4l2_frequency vf; 1251 1250 /* Needed to call ioctls later */ 1252 - struct ivtv_open_id fh; 1251 + struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG]; 1252 + struct v4l2_frequency vf; 1253 1253 int fw_retry_count = 3; 1254 1254 int video_input; 1255 - 1256 - fh.itv = itv; 1257 - fh.type = IVTV_ENC_STREAM_TYPE_MPG; 1258 1255 1259 1256 if (test_bit(IVTV_F_I_FAILED, &itv->i_flags)) 1260 1257 return -ENXIO; ··· 1294 1297 1295 1298 video_input = itv->active_input; 1296 1299 itv->active_input++; /* Force update of input */ 1297 - ivtv_s_input(NULL, &fh, video_input); 1300 + ivtv_do_s_input(itv, video_input); 1298 1301 1299 1302 /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code 1300 1303 in one place. */ 1301 1304 itv->std++; /* Force full standard initialization */ 1302 1305 itv->std_out = itv->std; 1303 - ivtv_s_frequency(NULL, &fh, &vf); 1306 + ivtv_do_s_frequency(s, &vf); 1304 1307 1305 1308 if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) { 1306 1309 /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
+17 -5
drivers/media/pci/ivtv/ivtv-ioctl.c
··· 974 974 return 0; 975 975 } 976 976 977 - int ivtv_s_input(struct file *file, void *fh, unsigned int inp) 977 + int ivtv_do_s_input(struct ivtv *itv, unsigned int inp) 978 978 { 979 - struct ivtv *itv = file2id(file)->itv; 980 979 v4l2_std_id std; 981 980 int i; 982 981 ··· 1014 1015 ivtv_unmute(itv); 1015 1016 1016 1017 return 0; 1018 + } 1019 + 1020 + static int ivtv_s_input(struct file *file, void *fh, unsigned int inp) 1021 + { 1022 + return ivtv_do_s_input(file2id(file)->itv, inp); 1017 1023 } 1018 1024 1019 1025 static int ivtv_g_output(struct file *file, void *fh, unsigned int *i) ··· 1069 1065 return 0; 1070 1066 } 1071 1067 1072 - int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1068 + int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf) 1073 1069 { 1074 - struct ivtv *itv = file2id(file)->itv; 1075 - struct ivtv_stream *s = &itv->streams[file2id(file)->type]; 1070 + struct ivtv *itv = s->itv; 1076 1071 1077 1072 if (s->vdev.vfl_dir) 1078 1073 return -ENOTTY; ··· 1083 1080 ivtv_call_all(itv, tuner, s_frequency, vf); 1084 1081 ivtv_unmute(itv); 1085 1082 return 0; 1083 + } 1084 + 1085 + static int ivtv_s_frequency(struct file *file, void *fh, 1086 + const struct v4l2_frequency *vf) 1087 + { 1088 + struct ivtv_open_id *id = file2id(file); 1089 + struct ivtv *itv = id->itv; 1090 + 1091 + return ivtv_do_s_frequency(&itv->streams[id->type], vf); 1086 1092 } 1087 1093 1088 1094 static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
+4 -2
drivers/media/pci/ivtv/ivtv-ioctl.h
··· 9 9 #ifndef IVTV_IOCTL_H 10 10 #define IVTV_IOCTL_H 11 11 12 + struct ivtv; 13 + 12 14 u16 ivtv_service2vbi(int type); 13 15 void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal); 14 16 u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt); ··· 19 17 void ivtv_set_funcs(struct video_device *vdev); 20 18 void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id std); 21 19 void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std); 22 - int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf); 23 - int ivtv_s_input(struct file *file, void *fh, unsigned int inp); 20 + int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf); 21 + int ivtv_do_s_input(struct ivtv *itv, unsigned int inp); 24 22 25 23 #endif
+14 -1
drivers/media/usb/uvc/uvc_driver.c
··· 167 167 168 168 static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id) 169 169 { 170 - struct uvc_streaming *stream; 170 + struct uvc_streaming *stream, *last_stream; 171 + unsigned int count = 0; 171 172 172 173 list_for_each_entry(stream, &dev->streams, list) { 174 + count += 1; 175 + last_stream = stream; 173 176 if (stream->header.bTerminalLink == id) 174 177 return stream; 178 + } 179 + 180 + /* 181 + * If the streaming entity is referenced by an invalid ID, notify the 182 + * user and use heuristics to guess the correct entity. 183 + */ 184 + if (count == 1 && id == UVC_INVALID_ENTITY_ID) { 185 + dev_warn(&dev->intf->dev, 186 + "UVC non compliance: Invalid USB header. The streaming entity has an invalid ID, guessing the correct one."); 187 + return last_stream; 175 188 } 176 189 177 190 return NULL;
+1 -1
drivers/media/v4l2-core/v4l2-subdev.c
··· 2608 2608 int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) 2609 2609 { 2610 2610 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 2611 - sd->privacy_led = led_get(sd->dev, "privacy-led"); 2611 + sd->privacy_led = led_get(sd->dev, "privacy"); 2612 2612 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) 2613 2613 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), 2614 2614 "getting privacy LED\n");
+1 -8
drivers/net/bonding/bond_options.c
··· 225 225 { NULL, -1, 0}, 226 226 }; 227 227 228 - static const struct bond_opt_value bond_actor_port_prio_tbl[] = { 229 - { "minval", 0, BOND_VALFLAG_MIN}, 230 - { "maxval", 65535, BOND_VALFLAG_MAX}, 231 - { "default", 255, BOND_VALFLAG_DEFAULT}, 232 - { NULL, -1, 0}, 233 - }; 234 - 235 228 static const struct bond_opt_value bond_ad_user_port_key_tbl[] = { 236 229 { "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, 237 230 { "maxval", 1023, BOND_VALFLAG_MAX}, ··· 490 497 .id = BOND_OPT_ACTOR_PORT_PRIO, 491 498 .name = "actor_port_prio", 492 499 .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), 493 - .values = bond_actor_port_prio_tbl, 500 + .flags = BOND_OPTFLAG_RAWVAL, 494 501 .set = bond_option_actor_port_prio_set, 495 502 }, 496 503 [BOND_OPT_AD_ACTOR_SYSTEM] = {
+29 -7
drivers/net/dsa/b53/b53_common.c
··· 371 371 * frames should be flooded or not. 372 372 */ 373 373 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 374 - mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 374 + mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC; 375 375 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 376 376 } else { 377 377 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 378 - mgmt |= B53_IP_MCAST_25; 378 + mgmt |= B53_IP_MC; 379 379 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 380 380 } 381 381 } ··· 1372 1372 else 1373 1373 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1374 1374 1375 + reg &= ~(0x3 << GMII_PO_SPEED_S); 1376 + if (is5301x(dev) || is58xx(dev)) 1377 + reg &= ~PORT_OVERRIDE_SPEED_2000M; 1378 + 1375 1379 switch (speed) { 1376 1380 case 2000: 1377 1381 reg |= PORT_OVERRIDE_SPEED_2000M; ··· 1393 1389 dev_err(dev->dev, "unknown speed: %d\n", speed); 1394 1390 return; 1395 1391 } 1392 + 1393 + if (is5325(dev)) 1394 + reg &= ~PORT_OVERRIDE_LP_FLOW_25; 1395 + else 1396 + reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW); 1396 1397 1397 1398 if (rx_pause) { 1398 1399 if (is5325(dev)) ··· 1602 1593 struct b53_device *dev = dp->ds->priv; 1603 1594 int port = dp->index; 1604 1595 1605 - if (mode == MLO_AN_PHY) 1596 + if (mode == MLO_AN_PHY) { 1597 + if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1598 + b53_force_link(dev, port, false); 1606 1599 return; 1600 + } 1607 1601 1608 1602 if (mode == MLO_AN_FIXED) { 1609 1603 b53_force_link(dev, port, false); ··· 1634 1622 if (mode == MLO_AN_PHY) { 1635 1623 /* Re-negotiate EEE if it was enabled already */ 1636 1624 p->eee_enabled = b53_eee_init(ds, port, phydev); 1625 + 1626 + if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) { 1627 + b53_force_port_config(dev, port, speed, duplex, 1628 + tx_pause, rx_pause); 1629 + b53_force_link(dev, port, true); 1630 + } 1631 + 1637 1632 return; 1638 1633 } 1639 1634 ··· 2037 2018 do { 2038 2019 b53_read8(dev, B53_ARLIO_PAGE, offset, &reg); 2039 2020 if (!(reg & ARL_SRCH_STDN)) 2040 - return 0; 2021 + return -ENOENT; 2041 2022 2042 2023 if (reg & ARL_SRCH_VLID) 2043 2024 return 0; ··· 2087 2068 int b53_fdb_dump(struct dsa_switch *ds, int port, 2088 2069 dsa_fdb_dump_cb_t *cb, void *data) 2089 2070 { 2071 + unsigned int count = 0, results_per_hit = 1; 2090 2072 struct b53_device *priv = ds->priv; 2091 2073 struct b53_arl_entry results[2]; 2092 - unsigned int count = 0; 2093 2074 u8 offset; 2094 2075 int ret; 2095 2076 u8 reg; 2077 + 2078 + if (priv->num_arl_bins > 2) 2079 + results_per_hit = 2; 2096 2080 2097 2081 mutex_lock(&priv->arl_mutex); 2098 2082 ··· 2118 2096 if (ret) 2119 2097 break; 2120 2098 2121 - if (priv->num_arl_bins > 2) { 2099 + if (results_per_hit == 2) { 2122 2100 b53_arl_search_rd(priv, 1, &results[1]); 2123 2101 ret = b53_fdb_copy(port, &results[1], cb, data); 2124 2102 if (ret) ··· 2128 2106 break; 2129 2107 } 2130 2108 2131 - } while (count++ < b53_max_arl_entries(priv) / 2); 2109 + } while (count++ < b53_max_arl_entries(priv) / results_per_hit); 2132 2110 2133 2111 mutex_unlock(&priv->arl_mutex); 2134 2112
+1 -2
drivers/net/dsa/b53/b53_regs.h
··· 111 111 112 112 /* IP Multicast control (8 bit) */ 113 113 #define B53_IP_MULTICAST_CTRL 0x21 114 - #define B53_IP_MCAST_25 BIT(0) 115 - #define B53_IPMC_FWD_EN BIT(1) 114 + #define B53_IP_MC BIT(0) 116 115 #define B53_UC_FWD_EN BIT(6) 117 116 #define B53_MC_FWD_EN BIT(7) 118 117
+84 -14
drivers/net/dsa/microchip/ksz9477.c
··· 1355 1355 } 1356 1356 } 1357 1357 1358 + #define RESV_MCAST_CNT 8 1359 + 1360 + static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 }; 1361 + 1358 1362 int ksz9477_enable_stp_addr(struct ksz_device *dev) 1359 1363 { 1364 + u8 i, ports, update; 1360 1365 const u32 *masks; 1366 + bool override; 1361 1367 u32 data; 1362 1368 int ret; 1363 1369 ··· 1372 1366 /* Enable Reserved multicast table */ 1373 1367 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true); 1374 1368 1375 - /* Set the Override bit for forwarding BPDU packet to CPU */ 1376 - ret = ksz_write32(dev, REG_SW_ALU_VAL_B, 1377 - ALU_V_OVERRIDE | BIT(dev->cpu_port)); 1378 - if (ret < 0) 1379 - return ret; 1369 + /* The reserved multicast address table has 8 entries. Each entry has 1370 + * a default value of which port to forward. It is assumed the host 1371 + * port is the last port in most of the switches, but that is not the 1372 + * case for KSZ9477 or maybe KSZ9897. For LAN937X family the default 1373 + * port is port 5, the first RGMII port. It is okay for LAN9370, a 1374 + * 5-port switch, but may not be correct for the other 8-port 1375 + * versions. It is necessary to update the whole table to forward to 1376 + * the right ports. 1377 + * Furthermore PTP messages can use a reserved multicast address and 1378 + * the host will not receive them if this table is not correct. 1379 + */ 1380 + for (i = 0; i < RESV_MCAST_CNT; i++) { 1381 + data = reserved_mcast_map[i] << 1382 + dev->info->shifts[ALU_STAT_INDEX]; 1383 + data |= ALU_STAT_START | 1384 + masks[ALU_STAT_DIRECT] | 1385 + masks[ALU_RESV_MCAST_ADDR] | 1386 + masks[ALU_STAT_READ]; 1387 + ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1388 + if (ret < 0) 1389 + return ret; 1380 1390 1381 - data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE]; 1391 + /* wait to be finished */ 1392 + ret = ksz9477_wait_alu_sta_ready(dev); 1393 + if (ret < 0) 1394 + return ret; 1382 1395 1383 - ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1384 - if (ret < 0) 1385 - return ret; 1396 + ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data); 1397 + if (ret < 0) 1398 + return ret; 1386 1399 1387 - /* wait to be finished */ 1388 - ret = ksz9477_wait_alu_sta_ready(dev); 1389 - if (ret < 0) { 1390 - dev_err(dev->dev, "Failed to update Reserved Multicast table\n"); 1391 - return ret; 1400 + override = false; 1401 + ports = data & dev->port_mask; 1402 + switch (i) { 1403 + case 0: 1404 + case 6: 1405 + /* Change the host port. */ 1406 + update = BIT(dev->cpu_port); 1407 + override = true; 1408 + break; 1409 + case 2: 1410 + /* Change the host port. */ 1411 + update = BIT(dev->cpu_port); 1412 + break; 1413 + case 4: 1414 + case 5: 1415 + case 7: 1416 + /* Skip the host port. */ 1417 + update = dev->port_mask & ~BIT(dev->cpu_port); 1418 + break; 1419 + default: 1420 + update = ports; 1421 + break; 1422 + } 1423 + if (update != ports || override) { 1424 + data &= ~dev->port_mask; 1425 + data |= update; 1426 + /* Set Override bit to receive frame even when port is 1427 + * closed. 1428 + */ 1429 + if (override) 1430 + data |= ALU_V_OVERRIDE; 1431 + ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data); 1432 + if (ret < 0) 1433 + return ret; 1434 + 1435 + data = reserved_mcast_map[i] << 1436 + dev->info->shifts[ALU_STAT_INDEX]; 1437 + data |= ALU_STAT_START | 1438 + masks[ALU_STAT_DIRECT] | 1439 + masks[ALU_RESV_MCAST_ADDR] | 1440 + masks[ALU_STAT_WRITE]; 1441 + ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1442 + if (ret < 0) 1443 + return ret; 1444 + 1445 + /* wait to be finished */ 1446 + ret = ksz9477_wait_alu_sta_ready(dev); 1447 + if (ret < 0) 1448 + return ret; 1449 + } 1392 1450 } 1393 1451 1394 1452 return 0;
+1 -2
drivers/net/dsa/microchip/ksz9477_reg.h
··· 2 2 /* 3 3 * Microchip KSZ9477 register definitions 4 4 * 5 - * Copyright (C) 2017-2024 Microchip Technology Inc. 5 + * Copyright (C) 2017-2025 Microchip Technology Inc. 6 6 */ 7 7 8 8 #ifndef __KSZ9477_REGS_H ··· 397 397 398 398 #define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1) 399 399 #define ALU_STAT_START BIT(7) 400 - #define ALU_RESV_MCAST_ADDR BIT(1) 401 400 402 401 #define REG_SW_ALU_VAL_A 0x0420 403 402
+4
drivers/net/dsa/microchip/ksz_common.c
··· 808 808 static const u32 ksz9477_masks[] = { 809 809 [ALU_STAT_WRITE] = 0, 810 810 [ALU_STAT_READ] = 1, 811 + [ALU_STAT_DIRECT] = 0, 812 + [ALU_RESV_MCAST_ADDR] = BIT(1), 811 813 [P_MII_TX_FLOW_CTRL] = BIT(5), 812 814 [P_MII_RX_FLOW_CTRL] = BIT(3), 813 815 }; ··· 837 835 static const u32 lan937x_masks[] = { 838 836 [ALU_STAT_WRITE] = 1, 839 837 [ALU_STAT_READ] = 2, 838 + [ALU_STAT_DIRECT] = BIT(3), 839 + [ALU_RESV_MCAST_ADDR] = BIT(2), 840 840 [P_MII_TX_FLOW_CTRL] = BIT(5), 841 841 [P_MII_RX_FLOW_CTRL] = BIT(3), 842 842 };
+2
drivers/net/dsa/microchip/ksz_common.h
··· 294 294 DYNAMIC_MAC_TABLE_TIMESTAMP, 295 295 ALU_STAT_WRITE, 296 296 ALU_STAT_READ, 297 + ALU_STAT_DIRECT, 298 + ALU_RESV_MCAST_ADDR, 297 299 P_MII_TX_FLOW_CTRL, 298 300 P_MII_RX_FLOW_CTRL, 299 301 };
+5 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 12439 12439 return -ENODEV; 12440 12440 } 12441 12441 12442 - static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 12442 + void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 12443 12443 { 12444 12444 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12445 12445 ··· 16892 16892 if (netif_running(dev)) 16893 16893 netif_close(dev); 16894 16894 16895 + if (bnxt_hwrm_func_drv_unrgtr(bp)) { 16896 + pcie_flr(pdev); 16897 + goto shutdown_exit; 16898 + } 16895 16899 bnxt_ptp_clear(bp); 16896 16900 bnxt_clear_int_mode(bp); 16897 16901 pci_disable_device(pdev);
+2 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 2149 2149 static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace, 2150 2150 u32 offset) 2151 2151 { 2152 - if (!bs_trace->wrapped && 2152 + if (!bs_trace->wrapped && bs_trace->magic_byte && 2153 2153 *bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE) 2154 2154 bs_trace->wrapped = 1; 2155 2155 bs_trace->last_offset = offset; ··· 2941 2941 int bnxt_update_link(struct bnxt *bp, bool chng_link_state); 2942 2942 int bnxt_hwrm_set_pause(struct bnxt *); 2943 2943 int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); 2944 + void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset); 2944 2945 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset); 2945 2946 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); 2946 2947 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
+3 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
··· 333 333 u32 offset = 0; 334 334 int rc = 0; 335 335 336 + record->max_entries = cpu_to_le32(ctxm->max_entries); 337 + record->entry_size = cpu_to_le32(ctxm->entry_size); 338 + 336 339 rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset); 337 340 if (rc) 338 341 return; 339 342 340 343 bnxt_bs_trace_check_wrap(bs_trace, offset); 341 - record->max_entries = cpu_to_le32(ctxm->max_entries); 342 - record->entry_size = cpu_to_le32(ctxm->entry_size); 343 344 record->offset = cpu_to_le32(bs_trace->last_offset); 344 345 record->wrapped = bs_trace->wrapped; 345 346 }
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
··· 461 461 rtnl_unlock(); 462 462 break; 463 463 } 464 - bnxt_cancel_reservations(bp, false); 464 + bnxt_clear_reservations(bp, false); 465 465 bnxt_free_ctx_mem(bp, false); 466 466 break; 467 467 }
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
··· 1051 1051 if (ptp->ptp_clock) { 1052 1052 ptp_clock_unregister(ptp->ptp_clock); 1053 1053 ptp->ptp_clock = NULL; 1054 - kfree(ptp->ptp_info.pin_config); 1055 - ptp->ptp_info.pin_config = NULL; 1056 1054 } 1055 + kfree(ptp->ptp_info.pin_config); 1056 + ptp->ptp_info.pin_config = NULL; 1057 1057 } 1058 1058 1059 1059 int bnxt_ptp_init(struct bnxt *bp)
+15
drivers/net/ethernet/google/gve/gve_ptp.c
··· 26 26 return 0; 27 27 } 28 28 29 + static int gve_ptp_gettimex64(struct ptp_clock_info *info, 30 + struct timespec64 *ts, 31 + struct ptp_system_timestamp *sts) 32 + { 33 + return -EOPNOTSUPP; 34 + } 35 + 36 + static int gve_ptp_settime64(struct ptp_clock_info *info, 37 + const struct timespec64 *ts) 38 + { 39 + return -EOPNOTSUPP; 40 + } 41 + 29 42 static long gve_ptp_do_aux_work(struct ptp_clock_info *info) 30 43 { 31 44 const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info); ··· 60 47 static const struct ptp_clock_info gve_ptp_caps = { 61 48 .owner = THIS_MODULE, 62 49 .name = "gve clock", 50 + .gettimex64 = gve_ptp_gettimex64, 51 + .settime64 = gve_ptp_settime64, 63 52 .do_aux_work = gve_ptp_do_aux_work, 64 53 }; 65 54
+2 -2
drivers/net/ethernet/intel/Kconfig
··· 146 146 tristate "Intel(R) 10GbE PCI Express adapters support" 147 147 depends on PCI 148 148 depends on PTP_1588_CLOCK_OPTIONAL 149 - select LIBIE_FWLOG 149 + select LIBIE_FWLOG if DEBUG_FS 150 150 select MDIO 151 151 select NET_DEVLINK 152 152 select PLDMFW ··· 298 298 select DIMLIB 299 299 select LIBIE 300 300 select LIBIE_ADMINQ 301 - select LIBIE_FWLOG 301 + select LIBIE_FWLOG if DEBUG_FS 302 302 select NET_DEVLINK 303 303 select PACKING 304 304 select PLDMFW
-2
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 821 821 #ifdef CONFIG_IXGBE_HWMON 822 822 struct hwmon_buff *ixgbe_hwmon_buff; 823 823 #endif /* CONFIG_IXGBE_HWMON */ 824 - #ifdef CONFIG_DEBUG_FS 825 824 struct dentry *ixgbe_dbg_adapter; 826 - #endif /*CONFIG_DEBUG_FS*/ 827 825 828 826 u8 default_up; 829 827 /* Bitmask indicating in use pools */
+2 -4
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
··· 1516 1516 pool->xdp_cnt = numptrs; 1517 1517 pool->xdp = devm_kcalloc(pfvf->dev, 1518 1518 numptrs, sizeof(struct xdp_buff *), GFP_KERNEL); 1519 - if (IS_ERR(pool->xdp)) { 1520 - netdev_err(pfvf->netdev, "Creation of xsk pool failed\n"); 1521 - return PTR_ERR(pool->xdp); 1522 - } 1519 + if (!pool->xdp) 1520 + return -ENOMEM; 1523 1521 } 1524 1522 1525 1523 return 0;
+3
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 634 634 struct mlx5e_shampo_hd { 635 635 struct mlx5e_frag_page *pages; 636 636 u32 hd_per_wq; 637 + u32 hd_per_page; 637 638 u16 hd_per_wqe; 639 + u8 log_hd_per_page; 640 + u8 log_hd_entry_size; 638 641 unsigned long *bitmap; 639 642 u16 pi; 640 643 u16 ci;
+1 -3
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 2125 2125 if (!size_read) 2126 2126 return i; 2127 2127 2128 - if (size_read == -EINVAL) 2129 - return -EINVAL; 2130 2128 if (size_read < 0) { 2131 2129 NL_SET_ERR_MSG_FMT_MOD( 2132 2130 extack, 2133 2131 "Query module eeprom by page failed, read %u bytes, err %d", 2134 2132 i, size_read); 2135 - return i; 2133 + return size_read; 2136 2134 } 2137 2135 2138 2136 i += size_read;
+19 -5
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 791 791 int node) 792 792 { 793 793 void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); 794 + u8 log_hd_per_page, log_hd_entry_size; 795 + u16 hd_per_wq, hd_per_wqe; 794 796 u32 hd_pool_size; 795 - u16 hd_per_wq; 796 797 int wq_size; 797 798 int err; 798 799 ··· 816 815 if (err) 817 816 goto err_umr_mkey; 818 817 819 - rq->mpwqe.shampo->hd_per_wqe = 820 - mlx5e_shampo_hd_per_wqe(mdev, params, rqp); 818 + hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp); 821 819 wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 822 - hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) / 823 - MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; 820 + 821 + BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT); 822 + if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) { 823 + log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE; 824 + log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; 825 + } else { 826 + log_hd_per_page = order_base_2(hd_per_wqe); 827 + log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe); 828 + } 829 + 830 + rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe; 831 + rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page); 832 + rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page; 833 + rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size; 834 + 835 + hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page; 824 836 825 837 if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) { 826 838 /* Separate page pool for shampo headers */
+39 -33
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 648 648 umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 649 649 } 650 650 651 - static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index) 651 + static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, 652 + int header_index) 652 653 { 653 - BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT); 654 + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 654 655 655 - return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE]; 656 + return &shampo->pages[header_index >> shampo->log_hd_per_page]; 656 657 } 657 658 658 - static u64 mlx5e_shampo_hd_offset(int header_index) 659 + static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index) 659 660 { 660 - return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << 661 - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; 661 + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 662 + u32 hd_per_page = shampo->hd_per_page; 663 + 664 + return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size; 662 665 } 663 666 664 667 static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index); ··· 674 671 u16 pi, header_offset, err, wqe_bbs; 675 672 u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; 676 673 struct mlx5e_umr_wqe *umr_wqe; 677 - int headroom, i = 0; 674 + int headroom, i; 678 675 679 676 headroom = rq->buff.headroom; 680 677 wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); ··· 682 679 umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 683 680 build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries); 684 681 685 - WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); 686 - while (i < ksm_entries) { 687 - struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); 682 + for (i = 0; i < ksm_entries; i++, index++) { 683 + struct mlx5e_frag_page *frag_page; 688 684 u64 addr; 689 685 690 - err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page); 691 - if (unlikely(err)) 692 - goto err_unmap; 686 + frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); 687 + header_offset = mlx5e_shampo_hd_offset(rq, index); 688 + if (!header_offset) { 689 + err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, 690 + frag_page); 691 + if (err) 692 + goto err_unmap; 693 + } 693 694 694 695 addr = page_pool_get_dma_addr_netmem(frag_page->netmem); 695 - 696 - for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) { 697 - header_offset = mlx5e_shampo_hd_offset(index++); 698 - 699 - umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) { 700 - .key = cpu_to_be32(lkey), 701 - .va = cpu_to_be64(addr + header_offset + headroom), 702 - }; 703 - } 696 + umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { 697 + .key = cpu_to_be32(lkey), 698 + .va = cpu_to_be64(addr + header_offset + headroom), 699 + }; 704 700 } 705 701 706 702 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { ··· 715 713 return 0; 716 714 717 715 err_unmap: 718 - while (--i) { 716 + while (--i >= 0) { 719 717 --index; 720 - header_offset = mlx5e_shampo_hd_offset(index); 718 + header_offset = mlx5e_shampo_hd_offset(rq, index); 721 719 if (!header_offset) { 722 720 struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); 723 721 ··· 737 735 struct mlx5e_icosq *sq = rq->icosq; 738 736 int i, err, max_ksm_entries, len; 739 737 740 - max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev), 741 - MLX5E_SHAMPO_WQ_HEADER_PER_PAGE); 738 + max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); 742 739 ksm_entries = bitmap_find_window(shampo->bitmap, 743 740 shampo->hd_per_wqe, 744 741 shampo->hd_per_wq, shampo->pi); 745 - ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE); 742 + ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page); 746 743 if (!ksm_entries) 747 744 return 0; 748 745 ··· 859 858 { 860 859 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 861 860 862 - if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { 861 + if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) { 863 862 struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); 864 863 865 864 mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page); ··· 1226 1225 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) 1227 1226 { 1228 1227 struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); 1229 - u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom; 1228 + u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index); 1229 + void *addr = netmem_address(frag_page->netmem); 1230 1230 1231 - return netmem_address(frag_page->netmem) + head_offset; 1231 + return addr + head_offset + rq->buff.headroom; 1232 1232 } 1233 1233 1234 1234 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) ··· 2269 2267 struct mlx5_cqe64 *cqe, u16 header_index) 2270 2268 { 2271 2269 struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); 2272 - u16 head_offset = mlx5e_shampo_hd_offset(header_index); 2270 + u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index); 2271 + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; 2273 2272 u16 head_size = cqe->shampo.header_size; 2274 2273 u16 rx_headroom = rq->buff.headroom; 2275 2274 struct sk_buff *skb = NULL; ··· 2286 2283 data = hdr + rx_headroom; 2287 2284 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); 2288 2285 2289 - if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { 2286 + if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) { 2290 2287 /* build SKB around header */ 2291 2288 dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir); 2292 2289 net_prefetchw(hdr); ··· 2359 2356 { 2360 2357 int nr_frags = skb_shinfo(skb)->nr_frags; 2361 2358 2362 - return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; 2359 + if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE) 2360 + return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE; 2361 + else 2362 + return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; 2363 2363 } 2364 2364 2365 2365 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+9 -9
drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
··· 294 294 { 295 295 int i, j; 296 296 297 - mutex_lock(&lan966x->stats_lock); 297 + spin_lock(&lan966x->stats_lock); 298 298 299 299 for (i = 0; i < lan966x->num_phys_ports; i++) { 300 300 uint idx = i * lan966x->num_stats; ··· 310 310 } 311 311 } 312 312 313 - mutex_unlock(&lan966x->stats_lock); 313 + spin_unlock(&lan966x->stats_lock); 314 314 } 315 315 316 316 static int lan966x_get_sset_count(struct net_device *dev, int sset) ··· 365 365 366 366 idx = port->chip_port * lan966x->num_stats; 367 367 368 - mutex_lock(&lan966x->stats_lock); 368 + spin_lock(&lan966x->stats_lock); 369 369 370 370 mac_stats->FramesTransmittedOK = 371 371 lan966x->stats[idx + SYS_COUNT_TX_UC] + ··· 416 416 lan966x->stats[idx + SYS_COUNT_RX_LONG] + 417 417 lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; 418 418 419 - mutex_unlock(&lan966x->stats_lock); 419 + spin_unlock(&lan966x->stats_lock); 420 420 } 421 421 422 422 static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = { ··· 442 442 443 443 idx = port->chip_port * lan966x->num_stats; 444 444 445 - mutex_lock(&lan966x->stats_lock); 445 + spin_lock(&lan966x->stats_lock); 446 446 447 447 rmon_stats->undersize_pkts = 448 448 lan966x->stats[idx + SYS_COUNT_RX_SHORT] + ··· 500 500 lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + 501 501 lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; 502 502 503 - mutex_unlock(&lan966x->stats_lock); 503 + spin_unlock(&lan966x->stats_lock); 504 504 505 505 *ranges = lan966x_rmon_ranges; 506 506 } ··· 603 603 604 604 idx = port->chip_port * lan966x->num_stats; 605 605 606 - mutex_lock(&lan966x->stats_lock); 606 + spin_lock(&lan966x->stats_lock); 607 607 608 608 stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] + 609 609 lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT]; ··· 685 685 686 686 stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL]; 687 687 688 - mutex_unlock(&lan966x->stats_lock); 688 + spin_unlock(&lan966x->stats_lock); 689 689 } 690 690 691 691 int lan966x_stats_init(struct lan966x *lan966x) ··· 701 701 return -ENOMEM; 702 702 703 703 /* Init stats worker */ 704 - mutex_init(&lan966x->stats_lock); 704 + spin_lock_init(&lan966x->stats_lock); 705 705 snprintf(queue_name, sizeof(queue_name), "%s-stats", 706 706 dev_name(lan966x->dev)); 707 707 lan966x->stats_queue = create_singlethread_workqueue(queue_name);
-2
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
··· 1261 1261 1262 1262 cancel_delayed_work_sync(&lan966x->stats_work); 1263 1263 destroy_workqueue(lan966x->stats_queue); 1264 - mutex_destroy(&lan966x->stats_lock); 1265 1264 1266 1265 debugfs_remove_recursive(lan966x->debugfs_root); 1267 1266 ··· 1278 1279 1279 1280 cancel_delayed_work_sync(&lan966x->stats_work); 1280 1281 destroy_workqueue(lan966x->stats_queue); 1281 - mutex_destroy(&lan966x->stats_lock); 1282 1282 1283 1283 lan966x_mac_purge_entries(lan966x); 1284 1284 lan966x_mdb_deinit(lan966x);
+2 -2
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
··· 295 295 const struct lan966x_stat_layout *stats_layout; 296 296 u32 num_stats; 297 297 298 - /* workqueue for reading stats */ 299 - struct mutex stats_lock; 298 + /* lock for reading stats */ 299 + spinlock_t stats_lock; 300 300 u64 *stats; 301 301 struct delayed_work stats_work; 302 302 struct workqueue_struct *stats_queue;
+4 -4
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
··· 403 403 u32 counter; 404 404 405 405 id = id & 0xff; /* counter limit */ 406 - mutex_lock(&lan966x->stats_lock); 406 + spin_lock(&lan966x->stats_lock); 407 407 lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG); 408 408 counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) + 409 409 lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS)); 410 - mutex_unlock(&lan966x->stats_lock); 410 + spin_unlock(&lan966x->stats_lock); 411 411 if (counter) 412 412 admin->cache.counter = counter; 413 413 } ··· 417 417 { 418 418 id = id & 0xff; /* counter limit */ 419 419 420 - mutex_lock(&lan966x->stats_lock); 420 + spin_lock(&lan966x->stats_lock); 421 421 lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG); 422 422 lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES)); 423 423 lan_wr(admin->cache.counter, lan966x, 424 424 SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)); 425 425 lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES)); 426 426 lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS)); 427 - mutex_unlock(&lan966x->stats_lock); 427 + spin_unlock(&lan966x->stats_lock); 428 428 } 429 429 430 430 static void lan966x_vcap_cache_write(struct net_device *dev,
+17 -17
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
··· 29 29 30 30 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell) 31 31 { 32 + /* Ensure TX descriptor writes reach memory before NIC reads them. 33 + * Prevents device from fetching stale descriptors. 34 + */ 35 + dma_wmb(); 32 36 ionic_q_post(q, ring_dbell); 33 37 } 34 38 ··· 1448 1444 bool encap; 1449 1445 int err; 1450 1446 1451 - desc_info = &q->tx_info[q->head_idx]; 1452 - 1453 - if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 1454 - return -EIO; 1455 - 1456 - len = skb->len; 1457 - mss = skb_shinfo(skb)->gso_size; 1458 - outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1459 - SKB_GSO_GRE_CSUM | 1460 - SKB_GSO_IPXIP4 | 1461 - SKB_GSO_IPXIP6 | 1462 - SKB_GSO_UDP_TUNNEL | 1463 - SKB_GSO_UDP_TUNNEL_CSUM)); 1464 1447 has_vlan = !!skb_vlan_tag_present(skb); 1465 1448 vlan_tci = skb_vlan_tag_get(skb); 1466 1449 encap = skb->encapsulation; ··· 1461 1470 err = ionic_tx_tcp_inner_pseudo_csum(skb); 1462 1471 else 1463 1472 err = ionic_tx_tcp_pseudo_csum(skb); 1464 - if (unlikely(err)) { 1465 - /* clean up mapping from ionic_tx_map_skb */ 1466 - ionic_tx_desc_unmap_bufs(q, desc_info); 1473 + if (unlikely(err)) 1467 1474 return err; 1468 - } 1469 1475 1476 + desc_info = &q->tx_info[q->head_idx]; 1477 + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 1478 + return -EIO; 1479 + 1480 + len = skb->len; 1481 + mss = skb_shinfo(skb)->gso_size; 1482 + outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1483 + SKB_GSO_GRE_CSUM | 1484 + SKB_GSO_IPXIP4 | 1485 + SKB_GSO_IPXIP6 | 1486 + SKB_GSO_UDP_TUNNEL | 1487 + SKB_GSO_UDP_TUNNEL_CSUM)); 1470 1488 if (encap) 1471 1489 hdrlen = skb_inner_tcp_all_headers(skb); 1472 1490 else
+3
drivers/net/ethernet/spacemit/k1_emac.c
··· 1441 1441 struct emac_priv *priv = netdev_priv(dev); 1442 1442 u8 fc = 0; 1443 1443 1444 + if (!netif_running(dev)) 1445 + return -ENETDOWN; 1446 + 1444 1447 priv->flow_control_autoneg = pause->autoneg; 1445 1448 1446 1449 if (pause->autoneg) {
+7
drivers/net/ethernet/ti/icssg/icssg_config.c
··· 66 66 #define FDB_GEN_CFG1 0x60 67 67 #define SMEM_VLAN_OFFSET 8 68 68 #define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8) 69 + #define FDB_HASH_SIZE_MASK GENMASK(6, 3) 70 + #define FDB_HASH_SIZE_SHIFT 3 71 + #define FDB_HASH_SIZE 3 69 72 70 73 #define FDB_GEN_CFG2 0x64 71 74 #define FDB_VLAN_EN BIT(6) ··· 466 463 /* Set VLAN TABLE address base */ 467 464 regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, 468 465 addr << SMEM_VLAN_OFFSET); 466 + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK, 467 + FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT); 469 468 /* Set enable VLAN aware mode, and FDBs for all PRUs */ 470 469 regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN)); 471 470 prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + ··· 489 484 /* Set VLAN TABLE address base */ 490 485 regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, 491 486 addr << SMEM_VLAN_OFFSET); 487 + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK, 488 + FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT); 492 489 /* Set enable VLAN aware mode, and FDBs for all PRUs */ 493 490 regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL); 494 491 prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
+5 -5
drivers/net/ethernet/ti/netcp_core.c
··· 1338 1338 1339 1339 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1340 1340 tx_pipe->dma_chan_name, &config); 1341 - if (IS_ERR(tx_pipe->dma_channel)) { 1341 + if (!tx_pipe->dma_channel) { 1342 1342 dev_err(dev, "failed opening tx chan(%s)\n", 1343 1343 tx_pipe->dma_chan_name); 1344 - ret = PTR_ERR(tx_pipe->dma_channel); 1344 + ret = -EINVAL; 1345 1345 goto err; 1346 1346 } 1347 1347 ··· 1359 1359 return 0; 1360 1360 1361 1361 err: 1362 - if (!IS_ERR_OR_NULL(tx_pipe->dma_channel)) 1362 + if (tx_pipe->dma_channel) 1363 1363 knav_dma_close_channel(tx_pipe->dma_channel); 1364 1364 tx_pipe->dma_channel = NULL; 1365 1365 return ret; ··· 1678 1678 1679 1679 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1680 1680 netcp->dma_chan_name, &config); 1681 - if (IS_ERR(netcp->rx_channel)) { 1681 + if (!netcp->rx_channel) { 1682 1682 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1683 1683 netcp->dma_chan_name); 1684 - ret = PTR_ERR(netcp->rx_channel); 1684 + ret = -EINVAL; 1685 1685 goto fail; 1686 1686 } 1687 1687
+2 -1
drivers/net/ethernet/wangxun/libwx/wx_hw.c
··· 2427 2427 wx->oem_svid = pdev->subsystem_vendor; 2428 2428 wx->oem_ssid = pdev->subsystem_device; 2429 2429 wx->bus.device = PCI_SLOT(pdev->devfn); 2430 - wx->bus.func = PCI_FUNC(pdev->devfn); 2430 + wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID, 2431 + rd32(wx, WX_CFG_PORT_ST)); 2431 2432 2432 2433 if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN || 2433 2434 pdev->is_virtfn) {
+2 -2
drivers/net/ethernet/wangxun/libwx/wx_type.h
··· 97 97 #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) 98 98 #define WX_CFG_PORT_CTL_QINQ BIT(2) 99 99 #define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ 100 + #define WX_CFG_PORT_ST 0x14404 101 + #define WX_CFG_PORT_ST_LANID GENMASK(9, 8) 100 102 #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) 101 103 #define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ 102 104 ··· 558 556 /* Tx Descriptors needed, worst case */ 559 557 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD) 560 558 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 561 - 562 - #define WX_CFG_PORT_ST 0x14404 563 559 564 560 /******************* Receive Descriptor bit definitions **********************/ 565 561 #define WX_RXD_STAT_DD BIT(0) /* Done */
+2
drivers/net/mdio/mdio-airoha.c
··· 219 219 priv = bus->priv; 220 220 priv->base_addr = addr; 221 221 priv->regmap = device_node_to_regmap(dev->parent->of_node); 222 + if (IS_ERR(priv->regmap)) 223 + return PTR_ERR(priv->regmap); 222 224 223 225 priv->clk = devm_clk_get_enabled(dev, NULL); 224 226 if (IS_ERR(priv->clk))
+10
drivers/net/netconsole.c
··· 936 936 if (count > MAX_EXTRADATA_VALUE_LEN) 937 937 return -EMSGSIZE; 938 938 939 + mutex_lock(&netconsole_subsys.su_mutex); 939 940 mutex_lock(&dynamic_netconsole_mutex); 940 941 941 942 ret = strscpy(udm->value, buf, sizeof(udm->value)); ··· 950 949 ret = count; 951 950 out_unlock: 952 951 mutex_unlock(&dynamic_netconsole_mutex); 952 + mutex_unlock(&netconsole_subsys.su_mutex); 953 953 return ret; 954 954 } 955 955 ··· 976 974 if (ret) 977 975 return ret; 978 976 977 + mutex_lock(&netconsole_subsys.su_mutex); 979 978 mutex_lock(&dynamic_netconsole_mutex); 980 979 curr = !!(nt->sysdata_fields & SYSDATA_MSGID); 981 980 if (msgid_enabled == curr) ··· 997 994 ret = strnlen(buf, count); 998 995 unlock: 999 996 mutex_unlock(&dynamic_netconsole_mutex); 997 + mutex_unlock(&netconsole_subsys.su_mutex); 1000 998 return ret; 1001 999 } 1002 1000 ··· 1012 1008 if (ret) 1013 1009 return ret; 1014 1010 1011 + mutex_lock(&netconsole_subsys.su_mutex); 1015 1012 mutex_lock(&dynamic_netconsole_mutex); 1016 1013 curr = !!(nt->sysdata_fields & SYSDATA_RELEASE); 1017 1014 if (release_enabled == curr) ··· 1033 1028 ret = strnlen(buf, count); 1034 1029 unlock: 1035 1030 mutex_unlock(&dynamic_netconsole_mutex); 1031 + mutex_unlock(&netconsole_subsys.su_mutex); 1036 1032 return ret; 1037 1033 } 1038 1034 ··· 1048 1042 if (ret) 1049 1043 return ret; 1050 1044 1045 + mutex_lock(&netconsole_subsys.su_mutex); 1051 1046 mutex_lock(&dynamic_netconsole_mutex); 1052 1047 curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME); 1053 1048 if (taskname_enabled == curr) ··· 1069 1062 ret = strnlen(buf, count); 1070 1063 unlock: 1071 1064 mutex_unlock(&dynamic_netconsole_mutex); 1065 + mutex_unlock(&netconsole_subsys.su_mutex); 1072 1066 return ret; 1073 1067 } 1074 1068 ··· 1085 1077 if (ret) 1086 1078 return ret; 1087 1079 1080 + mutex_lock(&netconsole_subsys.su_mutex); 1088 1081 mutex_lock(&dynamic_netconsole_mutex); 1089 1082 curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR); 1090 1083 if (cpu_nr_enabled == curr) ··· 1114 1105 ret = strnlen(buf, count); 1115 1106 unlock: 1116 1107 mutex_unlock(&dynamic_netconsole_mutex); 1108 + mutex_unlock(&netconsole_subsys.su_mutex); 1117 1109 return ret; 1118 1110 } 1119 1111
+163
drivers/net/phy/micrel.c
··· 466 466 u16 rev; 467 467 }; 468 468 469 + struct lanphy_reg_data { 470 + int page; 471 + u16 addr; 472 + u16 val; 473 + }; 474 + 469 475 static const struct kszphy_type lan8814_type = { 470 476 .led_mode_reg = ~LAN8814_LED_CTRL_1, 471 477 .cable_diag_reg = LAN8814_CABLE_DIAG, ··· 2842 2836 #define LAN8814_PAGE_PCS_DIGITAL 2 2843 2837 2844 2838 /** 2839 + * LAN8814_PAGE_EEE - Selects Extended Page 3. 2840 + * 2841 + * This page contains EEE registers 2842 + */ 2843 + #define LAN8814_PAGE_EEE 3 2844 + 2845 + /** 2845 2846 * LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4. 2846 2847 * 2847 2848 * This page contains device-common registers that affect the entire chip. ··· 2865 2852 * rate adaptation FIFOs, and the per-port 1588 TSU block. 2866 2853 */ 2867 2854 #define LAN8814_PAGE_PORT_REGS 5 2855 + 2856 + /** 2857 + * LAN8814_PAGE_POWER_REGS - Selects Extended Page 28. 2858 + * 2859 + * This page contains analog control registers and power mode registers. 2860 + */ 2861 + #define LAN8814_PAGE_POWER_REGS 28 2868 2862 2869 2863 /** 2870 2864 * LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31. ··· 5904 5884 return 0; 5905 5885 } 5906 5886 5887 + #define LAN8814_POWER_MGMT_MODE_3_ANEG_MDI 0x13 5888 + #define LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX 0x14 5889 + #define LAN8814_POWER_MGMT_MODE_5_10BT_MDI 0x15 5890 + #define LAN8814_POWER_MGMT_MODE_6_10BT_MDIX 0x16 5891 + #define LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN 0x17 5892 + #define LAN8814_POWER_MGMT_MODE_8_100BT_MDI 0x18 5893 + #define LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX 0x19 5894 + #define LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX 0x1a 5895 + #define LAN8814_POWER_MGMT_MODE_11_100BT_MDIX 0x1b 5896 + #define LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX 0x1c 5897 + #define LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX 0x1d 5898 + #define LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX 0x1e 5899 + 5900 + #define LAN8814_POWER_MGMT_DLLPD_D BIT(0) 5901 + #define LAN8814_POWER_MGMT_ADCPD_D BIT(1) 5902 + #define LAN8814_POWER_MGMT_PGAPD_D BIT(2) 5903 + #define LAN8814_POWER_MGMT_TXPD_D BIT(3) 5904 + #define LAN8814_POWER_MGMT_DLLPD_C BIT(4) 5905 + #define LAN8814_POWER_MGMT_ADCPD_C BIT(5) 5906 + #define LAN8814_POWER_MGMT_PGAPD_C BIT(6) 5907 + #define LAN8814_POWER_MGMT_TXPD_C BIT(7) 5908 + #define LAN8814_POWER_MGMT_DLLPD_B BIT(8) 5909 + #define LAN8814_POWER_MGMT_ADCPD_B BIT(9) 5910 + #define LAN8814_POWER_MGMT_PGAPD_B BIT(10) 5911 + #define LAN8814_POWER_MGMT_TXPD_B BIT(11) 5912 + #define LAN8814_POWER_MGMT_DLLPD_A BIT(12) 5913 + #define LAN8814_POWER_MGMT_ADCPD_A BIT(13) 5914 + #define LAN8814_POWER_MGMT_PGAPD_A BIT(14) 5915 + #define LAN8814_POWER_MGMT_TXPD_A BIT(15) 5916 + 5917 + #define LAN8814_POWER_MGMT_C_D (LAN8814_POWER_MGMT_DLLPD_D | \ 5918 + LAN8814_POWER_MGMT_ADCPD_D | \ 5919 + LAN8814_POWER_MGMT_PGAPD_D | \ 5920 + LAN8814_POWER_MGMT_DLLPD_C | \ 5921 + LAN8814_POWER_MGMT_ADCPD_C | \ 5922 + LAN8814_POWER_MGMT_PGAPD_C) 5923 + 5924 + #define LAN8814_POWER_MGMT_B_C_D (LAN8814_POWER_MGMT_C_D | \ 5925 + LAN8814_POWER_MGMT_DLLPD_B | \ 5926 + LAN8814_POWER_MGMT_ADCPD_B | \ 5927 + LAN8814_POWER_MGMT_PGAPD_B) 5928 + 5929 + #define LAN8814_POWER_MGMT_VAL1 (LAN8814_POWER_MGMT_C_D | \ 5930 + LAN8814_POWER_MGMT_ADCPD_B | \ 5931 + LAN8814_POWER_MGMT_PGAPD_B | \ 5932 + LAN8814_POWER_MGMT_ADCPD_A | \ 5933 + LAN8814_POWER_MGMT_PGAPD_A) 5934 + 5935 + #define LAN8814_POWER_MGMT_VAL2 LAN8814_POWER_MGMT_C_D 5936 + 5937 + #define LAN8814_POWER_MGMT_VAL3 (LAN8814_POWER_MGMT_C_D | \ 5938 + LAN8814_POWER_MGMT_DLLPD_B | \ 5939 + LAN8814_POWER_MGMT_ADCPD_B | \ 5940 + LAN8814_POWER_MGMT_PGAPD_A) 5941 + 5942 + #define LAN8814_POWER_MGMT_VAL4 (LAN8814_POWER_MGMT_B_C_D | \ 5943 + LAN8814_POWER_MGMT_ADCPD_A | \ 5944 + LAN8814_POWER_MGMT_PGAPD_A) 5945 + 5946 + #define LAN8814_POWER_MGMT_VAL5 LAN8814_POWER_MGMT_B_C_D 5947 + 5948 + #define LAN8814_EEE_WAKE_TX_TIMER 0x0e 5949 + #define LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL 0x1f 5950 + 5951 + static const struct lanphy_reg_data short_center_tap_errata[] = { 5952 + { LAN8814_PAGE_POWER_REGS, 5953 + LAN8814_POWER_MGMT_MODE_3_ANEG_MDI, 5954 + LAN8814_POWER_MGMT_VAL1 }, 5955 + { LAN8814_PAGE_POWER_REGS, 5956 + LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX, 5957 + LAN8814_POWER_MGMT_VAL1 }, 5958 + { LAN8814_PAGE_POWER_REGS, 5959 + LAN8814_POWER_MGMT_MODE_5_10BT_MDI, 5960 + LAN8814_POWER_MGMT_VAL1 }, 5961 + { LAN8814_PAGE_POWER_REGS, 5962 + LAN8814_POWER_MGMT_MODE_6_10BT_MDIX, 5963 + LAN8814_POWER_MGMT_VAL1 }, 5964 + { LAN8814_PAGE_POWER_REGS, 5965 + LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN, 5966 + LAN8814_POWER_MGMT_VAL2 }, 5967 + { LAN8814_PAGE_POWER_REGS, 5968 + LAN8814_POWER_MGMT_MODE_8_100BT_MDI, 5969 + LAN8814_POWER_MGMT_VAL3 }, 5970 + { LAN8814_PAGE_POWER_REGS, 5971 + LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX, 5972 + LAN8814_POWER_MGMT_VAL3 }, 5973 + { LAN8814_PAGE_POWER_REGS, 5974 + LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX, 5975 + LAN8814_POWER_MGMT_VAL4 }, 5976 + { LAN8814_PAGE_POWER_REGS, 5977 + LAN8814_POWER_MGMT_MODE_11_100BT_MDIX, 5978 + LAN8814_POWER_MGMT_VAL5 }, 5979 + { LAN8814_PAGE_POWER_REGS, 5980 + LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX, 5981 + LAN8814_POWER_MGMT_VAL5 }, 5982 + { LAN8814_PAGE_POWER_REGS, 5983 + LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX, 5984 + LAN8814_POWER_MGMT_VAL4 }, 5985 + { LAN8814_PAGE_POWER_REGS, 5986 + LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX, 5987 + LAN8814_POWER_MGMT_VAL4 }, 5988 + }; 5989 + 5990 + static const struct lanphy_reg_data waketx_timer_errata[] = { 5991 + { LAN8814_PAGE_EEE, 5992 + LAN8814_EEE_WAKE_TX_TIMER, 5993 + LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL }, 5994 + }; 5995 + 5996 + static int lanphy_write_reg_data(struct phy_device *phydev, 5997 + const struct lanphy_reg_data *data, 5998 + size_t num) 5999 + { 6000 + int ret = 0; 6001 + 6002 + while (num--) { 6003 + ret = lanphy_write_page_reg(phydev, data->page, data->addr, 6004 + data->val); 6005 + if (ret) 6006 + break; 6007 + } 6008 + 6009 + return ret; 6010 + } 6011 + 6012 + static int lan8842_erratas(struct phy_device *phydev) 6013 + { 6014 + int ret; 6015 + 6016 + ret = lanphy_write_reg_data(phydev, short_center_tap_errata, 6017 + ARRAY_SIZE(short_center_tap_errata)); 6018 + if (ret) 6019 + return ret; 6020 + 6021 + return lanphy_write_reg_data(phydev, waketx_timer_errata, 6022 + ARRAY_SIZE(waketx_timer_errata)); 6023 + } 6024 + 5907 6025 static int lan8842_config_init(struct phy_device *phydev) 5908 6026 { 5909 6027 int ret; ··· 6051 5893 LAN8814_QSGMII_SOFT_RESET, 6052 5894 LAN8814_QSGMII_SOFT_RESET_BIT, 6053 5895 LAN8814_QSGMII_SOFT_RESET_BIT); 5896 + if (ret < 0) 5897 + return ret; 5898 + 5899 + /* Apply the erratas for this device */ 5900 + ret = lan8842_erratas(phydev); 6054 5901 if (ret < 0) 6055 5902 return ret; 6056 5903
+6
drivers/net/usb/qmi_wwan.c
··· 192 192 if (!skbn) 193 193 return 0; 194 194 195 + /* Raw IP packets don't have a MAC header, but other subsystems 196 + * (like xfrm) may still access MAC header offsets, so they must 197 + * be initialized. 198 + */ 199 + skb_reset_mac_header(skbn); 200 + 195 201 switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) { 196 202 case 0x40: 197 203 skbn->protocol = htons(ETH_P_IP);
+25 -15
drivers/net/virtio_net.c
··· 910 910 goto ok; 911 911 } 912 912 913 - /* 914 - * Verify that we can indeed put this data into a skb. 915 - * This is here to handle cases when the device erroneously 916 - * tries to receive more than is possible. This is usually 917 - * the case of a broken device. 918 - */ 919 - if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 920 - net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 921 - dev_kfree_skb(skb); 922 - return NULL; 923 - } 924 913 BUG_ON(offset >= PAGE_SIZE); 925 914 while (len) { 926 915 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); ··· 2101 2112 struct virtnet_rq_stats *stats) 2102 2113 { 2103 2114 struct page *page = buf; 2104 - struct sk_buff *skb = 2105 - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); 2115 + struct sk_buff *skb; 2106 2116 2117 + /* Make sure that len does not exceed the size allocated in 2118 + * add_recvbuf_big. 2119 + */ 2120 + if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) { 2121 + pr_debug("%s: rx error: len %u exceeds allocated size %lu\n", 2122 + dev->name, len, 2123 + (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE); 2124 + goto err; 2125 + } 2126 + 2127 + skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); 2107 2128 u64_stats_add(&stats->bytes, len - vi->hdr_len); 2108 2129 if (unlikely(!skb)) 2109 2130 goto err; ··· 2538 2539 return NULL; 2539 2540 } 2540 2541 2542 + static inline u32 2543 + virtio_net_hash_value(const struct virtio_net_hdr_v1_hash *hdr_hash) 2544 + { 2545 + return __le16_to_cpu(hdr_hash->hash_value_lo) | 2546 + (__le16_to_cpu(hdr_hash->hash_value_hi) << 16); 2547 + } 2548 + 2541 2549 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, 2542 2550 struct sk_buff *skb) 2543 2551 { ··· 2571 2565 default: 2572 2566 rss_hash_type = PKT_HASH_TYPE_NONE; 2573 2567 } 2574 - skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); 2568 + skb_set_hash(skb, virtio_net_hash_value(hdr_hash), rss_hash_type); 2575 2569 } 2576 2570 2577 2571 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, ··· 3316 3310 bool can_push; 3317 3311 3318 3312 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 3313 + 3314 + /* Make sure it's safe to cast between formats */ 3315 + BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr)); 3316 + BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr)); 3319 3317 3320 3318 can_push = vi->any_header_sg && 3321 3319 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && ··· 6760 6750 hash_report = VIRTIO_NET_HASH_REPORT_NONE; 6761 6751 6762 6752 *rss_type = virtnet_xdp_rss_type[hash_report]; 6763 - *hash = __le32_to_cpu(hdr_hash->hash_value); 6753 + *hash = virtio_net_hash_value(hdr_hash); 6764 6754 return 0; 6765 6755 } 6766 6756
+4 -3
drivers/net/wan/framer/pef2256/pef2256.c
··· 648 648 audio_devs[i].id = i; 649 649 } 650 650 651 - ret = mfd_add_devices(pef2256->dev, 0, audio_devs, count, NULL, 0, NULL); 651 + ret = devm_mfd_add_devices(pef2256->dev, 0, audio_devs, count, 652 + NULL, 0, NULL); 652 653 kfree(audio_devs); 653 654 return ret; 654 655 } ··· 823 822 824 823 platform_set_drvdata(pdev, pef2256); 825 824 826 - ret = mfd_add_devices(pef2256->dev, 0, pef2256_devs, 827 - ARRAY_SIZE(pef2256_devs), NULL, 0, NULL); 825 + ret = devm_mfd_add_devices(pef2256->dev, 0, pef2256_devs, 826 + ARRAY_SIZE(pef2256_devs), NULL, 0, NULL); 828 827 if (ret) { 829 828 dev_err(pef2256->dev, "add devices failed (%d)\n", ret); 830 829 return ret;
+20 -19
drivers/net/wireless/ath/ath10k/wmi.c
··· 1764 1764 1765 1765 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 1766 1766 { 1767 - unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ; 1768 1767 unsigned long time_left, i; 1769 1768 1770 - /* Sometimes the PCI HIF doesn't receive interrupt 1771 - * for the service ready message even if the buffer 1772 - * was completed. PCIe sniffer shows that it's 1773 - * because the corresponding CE ring doesn't fires 1774 - * it. Workaround here by polling CE rings. Since 1775 - * the message could arrive at any time, continue 1776 - * polling until timeout. 1777 - */ 1778 - do { 1769 + time_left = wait_for_completion_timeout(&ar->wmi.service_ready, 1770 + WMI_SERVICE_READY_TIMEOUT_HZ); 1771 + if (!time_left) { 1772 + /* Sometimes the PCI HIF doesn't receive interrupt 1773 + * for the service ready message even if the buffer 1774 + * was completed. PCIe sniffer shows that it's 1775 + * because the corresponding CE ring doesn't fires 1776 + * it. Workaround here by polling CE rings once. 1777 + */ 1778 + ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); 1779 + 1779 1780 for (i = 0; i < CE_COUNT; i++) 1780 1781 ath10k_hif_send_complete_check(ar, i, 1); 1781 1782 1782 - /* The 100 ms granularity is a tradeoff considering scheduler 1783 - * overhead and response latency 1784 - */ 1785 1783 time_left = wait_for_completion_timeout(&ar->wmi.service_ready, 1786 - msecs_to_jiffies(100)); 1787 - if (time_left) 1788 - return 0; 1789 - } while (time_before(jiffies, timeout)); 1784 + WMI_SERVICE_READY_TIMEOUT_HZ); 1785 + if (!time_left) { 1786 + ath10k_warn(ar, "polling timed out\n"); 1787 + return -ETIMEDOUT; 1788 + } 1790 1789 1791 - ath10k_warn(ar, "failed to receive service ready completion\n"); 1792 - return -ETIMEDOUT; 1790 + ath10k_warn(ar, "service ready completion received, continuing normally\n"); 1791 + } 1792 + 1793 + return 0; 1793 1794 } 1794 1795 1795 1796 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+55 -67
drivers/net/wireless/ath/ath12k/mac.c
··· 4064 4064 return ret; 4065 4065 } 4066 4066 4067 - static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif) 4068 - { 4069 - struct ath12k *ar = arvif->ar; 4070 - struct ieee80211_vif *vif = arvif->ahvif->vif; 4071 - struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf; 4072 - enum wmi_sta_powersave_param param; 4073 - struct ieee80211_bss_conf *info; 4074 - enum wmi_sta_ps_mode psmode; 4075 - int ret; 4076 - int timeout; 4077 - bool enable_ps; 4078 - 4079 - lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4080 - 4081 - if (vif->type != NL80211_IFTYPE_STATION) 4082 - return; 4083 - 4084 - enable_ps = arvif->ahvif->ps; 4085 - if (enable_ps) { 4086 - psmode = WMI_STA_PS_MODE_ENABLED; 4087 - param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 4088 - 4089 - timeout = conf->dynamic_ps_timeout; 4090 - if (timeout == 0) { 4091 - info = ath12k_mac_get_link_bss_conf(arvif); 4092 - if (!info) { 4093 - ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n", 4094 - vif->addr, arvif->link_id); 4095 - return; 4096 - } 4097 - 4098 - /* firmware doesn't like 0 */ 4099 - timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000; 4100 - } 4101 - 4102 - ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 4103 - timeout); 4104 - if (ret) { 4105 - ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", 4106 - arvif->vdev_id, ret); 4107 - return; 4108 - } 4109 - } else { 4110 - psmode = WMI_STA_PS_MODE_DISABLED; 4111 - } 4112 - 4113 - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n", 4114 - arvif->vdev_id, psmode ? "enable" : "disable"); 4115 - 4116 - ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); 4117 - if (ret) 4118 - ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", 4119 - psmode, arvif->vdev_id, ret); 4120 - } 4121 - 4122 4067 static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw, 4123 4068 struct ieee80211_vif *vif, 4124 4069 u64 changed) 4125 4070 { 4126 4071 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 4127 4072 unsigned long links = ahvif->links_map; 4128 - struct ieee80211_vif_cfg *vif_cfg; 4129 4073 struct ieee80211_bss_conf *info; 4130 4074 struct ath12k_link_vif *arvif; 4131 4075 struct ieee80211_sta *sta; ··· 4133 4189 } 4134 4190 } 4135 4191 } 4192 + } 4136 4193 4137 - if (changed & BSS_CHANGED_PS) { 4138 - links = ahvif->links_map; 4139 - vif_cfg = &vif->cfg; 4194 + static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif) 4195 + { 4196 + struct ath12k *ar = arvif->ar; 4197 + struct ieee80211_vif *vif = arvif->ahvif->vif; 4198 + struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf; 4199 + enum wmi_sta_powersave_param param; 4200 + struct ieee80211_bss_conf *info; 4201 + enum wmi_sta_ps_mode psmode; 4202 + int ret; 4203 + int timeout; 4204 + bool enable_ps; 4140 4205 4141 - for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 4142 - arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 4143 - if (!arvif || !arvif->ar) 4144 - continue; 4206 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4145 4207 4146 - ar = arvif->ar; 4208 + if (vif->type != NL80211_IFTYPE_STATION) 4209 + return; 4147 4210 4148 - if (ar->ab->hw_params->supports_sta_ps) { 4149 - ahvif->ps = vif_cfg->ps; 4150 - ath12k_mac_vif_setup_ps(arvif); 4211 + enable_ps = arvif->ahvif->ps; 4212 + if (enable_ps) { 4213 + psmode = WMI_STA_PS_MODE_ENABLED; 4214 + param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 4215 + 4216 + timeout = conf->dynamic_ps_timeout; 4217 + if (timeout == 0) { 4218 + info = ath12k_mac_get_link_bss_conf(arvif); 4219 + if (!info) { 4220 + ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n", 4221 + vif->addr, arvif->link_id); 4222 + return; 4151 4223 } 4224 + 4225 + /* firmware doesn't like 0 */ 4226 + timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000; 4152 4227 } 4228 + 4229 + ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 4230 + timeout); 4231 + if (ret) { 4232 + ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", 4233 + arvif->vdev_id, ret); 4234 + return; 4235 + } 4236 + } else { 4237 + psmode = WMI_STA_PS_MODE_DISABLED; 4153 4238 } 4239 + 4240 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n", 4241 + arvif->vdev_id, psmode ? "enable" : "disable"); 4242 + 4243 + ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); 4244 + if (ret) 4245 + ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", 4246 + psmode, arvif->vdev_id, ret); 4154 4247 } 4155 4248 4156 4249 static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif, ··· 4209 4228 { 4210 4229 struct ath12k_vif *ahvif = arvif->ahvif; 4211 4230 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 4231 + struct ieee80211_vif_cfg *vif_cfg = &vif->cfg; 4212 4232 struct cfg80211_chan_def def; 4213 4233 u32 param_id, param_value; 4214 4234 enum nl80211_band band; ··· 4496 4514 } 4497 4515 4498 4516 ath12k_mac_fils_discovery(arvif, info); 4517 + 4518 + if (changed & BSS_CHANGED_PS && 4519 + ar->ab->hw_params->supports_sta_ps) { 4520 + ahvif->ps = vif_cfg->ps; 4521 + ath12k_mac_vif_setup_ps(arvif); 4522 + } 4499 4523 } 4500 4524 4501 4525 static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,
+4 -3
drivers/net/wireless/virtual/mac80211_hwsim.c
··· 6698 6698 .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), 6699 6699 }; 6700 6700 6701 - static void remove_user_radios(u32 portid) 6701 + static void remove_user_radios(u32 portid, int netgroup) 6702 6702 { 6703 6703 struct mac80211_hwsim_data *entry, *tmp; 6704 6704 LIST_HEAD(list); 6705 6705 6706 6706 spin_lock_bh(&hwsim_radio_lock); 6707 6707 list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { 6708 - if (entry->destroy_on_close && entry->portid == portid) { 6708 + if (entry->destroy_on_close && entry->portid == portid && 6709 + entry->netgroup == netgroup) { 6709 6710 list_move(&entry->list, &list); 6710 6711 rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, 6711 6712 hwsim_rht_params); ··· 6731 6730 if (state != NETLINK_URELEASE) 6732 6731 return NOTIFY_DONE; 6733 6732 6734 - remove_user_radios(notify->portid); 6733 + remove_user_radios(notify->portid, hwsim_net_get_netgroup(notify->net)); 6735 6734 6736 6735 if (notify->portid == hwsim_net_get_wmediumd(notify->net)) { 6737 6736 printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
+1
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
··· 791 791 if (urbs) { 792 792 for (i = 0; i < RX_URBS_COUNT; i++) 793 793 free_rx_urb(urbs[i]); 794 + kfree(urbs); 794 795 } 795 796 return r; 796 797 }
+1 -1
drivers/platform/x86/Kconfig
··· 432 432 depends on INPUT 433 433 help 434 434 This driver provides supports for the wireless buttons found on some AMD, 435 - HP, & Xioami laptops. 435 + HP, & Xiaomi laptops. 436 436 On such systems the driver should load automatically (via ACPI alias). 437 437 438 438 To compile this driver as a module, choose M here: the module will
+12
drivers/platform/x86/dell/dell-wmi-base.c
··· 365 365 /* Backlight brightness change event */ 366 366 { KE_IGNORE, 0x0003, { KEY_RESERVED } }, 367 367 368 + /* 369 + * Electronic privacy screen toggled, extended data gives state, 370 + * separate entries for on/off see handling in dell_wmi_process_key(). 371 + */ 372 + { KE_KEY, 0x000c, { KEY_EPRIVACY_SCREEN_OFF } }, 373 + { KE_KEY, 0x000c, { KEY_EPRIVACY_SCREEN_ON } }, 374 + 368 375 /* Ultra-performance mode switch request */ 369 376 { KE_IGNORE, 0x000d, { KEY_RESERVED } }, 370 377 ··· 442 435 "Dell tablet mode switch", 443 436 SW_TABLET_MODE, !buffer[0]); 444 437 return 1; 438 + } else if (type == 0x0012 && code == 0x000c && remaining > 0) { 439 + /* Eprivacy toggle, switch to "on" key entry for on events */ 440 + if (buffer[0] == 2) 441 + key++; 442 + used = 1; 445 443 } else if (type == 0x0012 && code == 0x000d && remaining > 0) { 446 444 value = (buffer[2] == 2); 447 445 used = 1;
+1 -4
drivers/platform/x86/intel/int3472/clk_and_regulator.c
··· 245 245 if (IS_ERR(regulator->rdev)) 246 246 return PTR_ERR(regulator->rdev); 247 247 248 - int3472->regulators[int3472->n_regulator_gpios].ena_gpio = gpio; 249 248 int3472->n_regulator_gpios++; 250 249 return 0; 251 250 } 252 251 253 252 void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472) 254 253 { 255 - for (int i = 0; i < int3472->n_regulator_gpios; i++) { 254 + for (int i = 0; i < int3472->n_regulator_gpios; i++) 256 255 regulator_unregister(int3472->regulators[i].rdev); 257 - gpiod_put(int3472->regulators[i].ena_gpio); 258 - } 259 256 }
+1 -1
drivers/platform/x86/intel/int3472/led.c
··· 43 43 44 44 int3472->pled.lookup.provider = int3472->pled.name; 45 45 int3472->pled.lookup.dev_id = int3472->sensor_name; 46 - int3472->pled.lookup.con_id = "privacy-led"; 46 + int3472->pled.lookup.con_id = "privacy"; 47 47 led_add_lookup(&int3472->pled.lookup); 48 48 49 49 return 0;
+4
drivers/ptp/ptp_chardev.c
··· 561 561 return ptp_mask_en_single(pccontext->private_clkdata, argptr); 562 562 563 563 case PTP_SYS_OFFSET_PRECISE_CYCLES: 564 + if (!ptp->has_cycles) 565 + return -EOPNOTSUPP; 564 566 return ptp_sys_offset_precise(ptp, argptr, 565 567 ptp->info->getcrosscycles); 566 568 567 569 case PTP_SYS_OFFSET_EXTENDED_CYCLES: 570 + if (!ptp->has_cycles) 571 + return -EOPNOTSUPP; 568 572 return ptp_sys_offset_extended(ptp, argptr, 569 573 ptp->info->getcyclesx64); 570 574 default:
-1
drivers/rtc/rtc-cpcap.c
··· 268 268 return err; 269 269 270 270 rtc->alarm_irq = platform_get_irq(pdev, 0); 271 - rtc->alarm_enabled = true; 272 271 err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL, 273 272 cpcap_rtc_alarm_irq, 274 273 IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+1 -1
drivers/rtc/rtc-rx8025.c
··· 316 316 return hour_reg; 317 317 rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224); 318 318 } else { 319 - rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224); 319 + rx8025->is_24 = (ctrl[0] & RX8025_BIT_CTRL1_1224); 320 320 } 321 321 out: 322 322 return err;
-1
drivers/rtc/rtc-tps6586x.c
··· 258 258 259 259 irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN); 260 260 261 - rtc->irq_en = true; 262 261 ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, 263 262 tps6586x_rtc_irq, 264 263 IRQF_ONESHOT,
+7 -7
drivers/soc/ti/knav_dma.c
··· 402 402 * @name: slave channel name 403 403 * @config: dma configuration parameters 404 404 * 405 - * Returns pointer to appropriate DMA channel on success or error. 405 + * Return: Pointer to appropriate DMA channel on success or NULL on error. 406 406 */ 407 407 void *knav_dma_open_channel(struct device *dev, const char *name, 408 408 struct knav_dma_cfg *config) ··· 414 414 415 415 if (!kdev) { 416 416 pr_err("keystone-navigator-dma driver not registered\n"); 417 - return (void *)-EINVAL; 417 + return NULL; 418 418 } 419 419 420 420 chan_num = of_channel_match_helper(dev->of_node, name, &instance); 421 421 if (chan_num < 0) { 422 422 dev_err(kdev->dev, "No DMA instance with name %s\n", name); 423 - return (void *)-EINVAL; 423 + return NULL; 424 424 } 425 425 426 426 dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n", ··· 431 431 if (config->direction != DMA_MEM_TO_DEV && 432 432 config->direction != DMA_DEV_TO_MEM) { 433 433 dev_err(kdev->dev, "bad direction\n"); 434 - return (void *)-EINVAL; 434 + return NULL; 435 435 } 436 436 437 437 /* Look for correct dma instance */ ··· 443 443 } 444 444 if (!dma) { 445 445 dev_err(kdev->dev, "No DMA instance with name %s\n", instance); 446 - return (void *)-EINVAL; 446 + return NULL; 447 447 } 448 448 449 449 /* Look for correct dma channel from dma instance */ ··· 463 463 if (!chan) { 464 464 dev_err(kdev->dev, "channel %d is not in DMA %s\n", 465 465 chan_num, instance); 466 - return (void *)-EINVAL; 466 + return NULL; 467 467 } 468 468 469 469 if (atomic_read(&chan->ref_count) >= 1) { 470 470 if (!check_config(chan, config)) { 471 471 dev_err(kdev->dev, "channel %d config miss-match\n", 472 472 chan_num); 473 - return (void *)-EINVAL; 473 + return NULL; 474 474 } 475 475 } 476 476
+93 -13
drivers/spi/spi-cadence.c
··· 109 109 * @rxbuf: Pointer to the RX buffer 110 110 * @tx_bytes: Number of bytes left to transfer 111 111 * @rx_bytes: Number of bytes requested 112 + * @n_bytes: Number of bytes per word 112 113 * @dev_busy: Device busy flag 113 114 * @is_decoded_cs: Flag for decoder property set or not 114 115 * @tx_fifo_depth: Depth of the TX FIFO ··· 121 120 struct clk *pclk; 122 121 unsigned int clk_rate; 123 122 u32 speed_hz; 124 - const u8 *txbuf; 125 - u8 *rxbuf; 123 + const void *txbuf; 124 + void *rxbuf; 126 125 int tx_bytes; 127 126 int rx_bytes; 127 + u8 n_bytes; 128 128 u8 dev_busy; 129 129 u32 is_decoded_cs; 130 130 unsigned int tx_fifo_depth; 131 131 struct reset_control *rstc; 132 + }; 133 + 134 + enum cdns_spi_frame_n_bytes { 135 + CDNS_SPI_N_BYTES_NULL = 0, 136 + CDNS_SPI_N_BYTES_U8 = 1, 137 + CDNS_SPI_N_BYTES_U16 = 2, 138 + CDNS_SPI_N_BYTES_U32 = 4 132 139 }; 133 140 134 141 /* Macros for the SPI controller read/write */ ··· 314 305 return 0; 315 306 } 316 307 308 + static u8 cdns_spi_n_bytes(struct spi_transfer *transfer) 309 + { 310 + if (transfer->bits_per_word <= 8) 311 + return CDNS_SPI_N_BYTES_U8; 312 + else if (transfer->bits_per_word <= 16) 313 + return CDNS_SPI_N_BYTES_U16; 314 + else 315 + return CDNS_SPI_N_BYTES_U32; 316 + } 317 + 318 + static inline void cdns_spi_reader(struct cdns_spi *xspi) 319 + { 320 + u32 rxw = 0; 321 + 322 + if (xspi->rxbuf && !IS_ALIGNED((uintptr_t)xspi->rxbuf, xspi->n_bytes)) { 323 + pr_err("%s: rxbuf address is not aligned for %d bytes\n", 324 + __func__, xspi->n_bytes); 325 + return; 326 + } 327 + 328 + rxw = cdns_spi_read(xspi, CDNS_SPI_RXD); 329 + if (xspi->rxbuf) { 330 + switch (xspi->n_bytes) { 331 + case CDNS_SPI_N_BYTES_U8: 332 + *(u8 *)xspi->rxbuf = rxw; 333 + break; 334 + case CDNS_SPI_N_BYTES_U16: 335 + *(u16 *)xspi->rxbuf = rxw; 336 + break; 337 + case CDNS_SPI_N_BYTES_U32: 338 + *(u32 *)xspi->rxbuf = rxw; 339 + break; 340 + default: 341 + pr_err("%s invalid n_bytes %d\n", __func__, 342 + xspi->n_bytes); 343 + return; 344 + } 345 + xspi->rxbuf = (u8 *)xspi->rxbuf + xspi->n_bytes; 346 + } 347 + } 348 + 349 + static inline void cdns_spi_writer(struct cdns_spi *xspi) 350 + { 351 + u32 txw = 0; 352 + 353 + if (xspi->txbuf && !IS_ALIGNED((uintptr_t)xspi->txbuf, xspi->n_bytes)) { 354 + pr_err("%s: txbuf address is not aligned for %d bytes\n", 355 + __func__, xspi->n_bytes); 356 + return; 357 + } 358 + 359 + if (xspi->txbuf) { 360 + switch (xspi->n_bytes) { 361 + case CDNS_SPI_N_BYTES_U8: 362 + txw = *(u8 *)xspi->txbuf; 363 + break; 364 + case CDNS_SPI_N_BYTES_U16: 365 + txw = *(u16 *)xspi->txbuf; 366 + break; 367 + case CDNS_SPI_N_BYTES_U32: 368 + txw = *(u32 *)xspi->txbuf; 369 + break; 370 + default: 371 + pr_err("%s invalid n_bytes %d\n", __func__, 372 + xspi->n_bytes); 373 + return; 374 + } 375 + cdns_spi_write(xspi, CDNS_SPI_TXD, txw); 376 + xspi->txbuf = (u8 *)xspi->txbuf + xspi->n_bytes; 377 + } 378 + } 379 + 317 380 /** 318 381 * cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO 319 382 * @xspi: Pointer to the cdns_spi structure ··· 402 321 403 322 while (ntx || nrx) { 404 323 if (nrx) { 405 - u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD); 406 - 407 - if (xspi->rxbuf) 408 - *xspi->rxbuf++ = data; 409 - 324 + cdns_spi_reader(xspi); 410 325 nrx--; 411 326 } 412 327 413 328 if (ntx) { 414 - if (xspi->txbuf) 415 - cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); 416 - else 417 - cdns_spi_write(xspi, CDNS_SPI_TXD, 0); 418 - 329 + cdns_spi_writer(xspi); 419 330 ntx--; 420 331 } 421 - 422 332 } 423 333 } 424 334 ··· 525 453 */ 526 454 if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL) 527 455 udelay(10); 456 + 457 + xspi->n_bytes = cdns_spi_n_bytes(transfer); 458 + xspi->tx_bytes = DIV_ROUND_UP(xspi->tx_bytes, xspi->n_bytes); 459 + xspi->rx_bytes = DIV_ROUND_UP(xspi->rx_bytes, xspi->n_bytes); 528 460 529 461 cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0); 530 462 ··· 730 654 ctlr->mode_bits = SPI_CPOL | SPI_CPHA; 731 655 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 732 656 657 + if (of_device_is_compatible(pdev->dev.of_node, "cix,sky1-spi-r1p6")) 658 + ctlr->bits_per_word_mask |= SPI_BPW_MASK(16) | SPI_BPW_MASK(32); 659 + 733 660 if (!spi_controller_is_target(ctlr)) { 734 661 ctlr->mode_bits |= SPI_CS_HIGH; 735 662 ctlr->set_cs = cdns_spi_chipselect; ··· 876 797 877 798 static const struct of_device_id cdns_spi_of_match[] = { 878 799 { .compatible = "xlnx,zynq-spi-r1p6" }, 800 + { .compatible = "cix,sky1-spi-r1p6" }, 879 801 { .compatible = "cdns,spi-r1p6" }, 880 802 { /* end of table */ } 881 803 };
+1 -1
drivers/ufs/core/ufs-sysfs.c
··· 1949 1949 return hba->dev_info.hid_sup ? attr->mode : 0; 1950 1950 } 1951 1951 1952 - const struct attribute_group ufs_sysfs_hid_group = { 1952 + static const struct attribute_group ufs_sysfs_hid_group = { 1953 1953 .name = "hid", 1954 1954 .attrs = ufs_sysfs_hid, 1955 1955 .is_visible = ufs_sysfs_hid_is_visible,
-1
drivers/ufs/core/ufs-sysfs.h
··· 14 14 15 15 extern const struct attribute_group ufs_sysfs_unit_descriptor_group; 16 16 extern const struct attribute_group ufs_sysfs_lun_attributes_group; 17 - extern const struct attribute_group ufs_sysfs_hid_group; 18 17 19 18 #endif
+6 -11
drivers/ufs/core/ufshcd.c
··· 5066 5066 * If UFS device isn't active then we will have to issue link startup 5067 5067 * 2 times to make sure the device state move to active. 5068 5068 */ 5069 - if (!ufshcd_is_ufs_dev_active(hba)) 5069 + if (!(hba->quirks & UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE) && 5070 + !ufshcd_is_ufs_dev_active(hba)) 5070 5071 link_startup_again = true; 5071 5072 5072 5073 link_startup: ··· 5132 5131 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 5133 5132 ret = ufshcd_make_hba_operational(hba); 5134 5133 out: 5135 - if (ret) { 5134 + if (ret) 5136 5135 dev_err(hba->dev, "link startup failed %d\n", ret); 5137 - ufshcd_print_host_state(hba); 5138 - ufshcd_print_pwr_info(hba); 5139 - ufshcd_print_evt_hist(hba); 5140 - } 5141 5136 return ret; 5142 5137 } 5143 5138 ··· 8500 8503 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) & 8501 8504 UFS_DEV_HID_SUPPORT; 8502 8505 8503 - sysfs_update_group(&hba->dev->kobj, &ufs_sysfs_hid_group); 8504 - 8505 8506 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 8506 8507 8507 8508 err = ufshcd_read_string_desc(hba, model_index, ··· 10656 10661 * @mmio_base: base register address 10657 10662 * @irq: Interrupt line of device 10658 10663 * 10659 - * Return: 0 on success, non-zero value on failure. 10664 + * Return: 0 on success; < 0 on failure. 10660 10665 */ 10661 10666 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) 10662 10667 { ··· 10886 10891 if (err) 10887 10892 goto out_disable; 10888 10893 10889 - async_schedule(ufshcd_async_scan, hba); 10890 10894 ufs_sysfs_add_nodes(hba->dev); 10895 + async_schedule(ufshcd_async_scan, hba); 10891 10896 10892 10897 device_enable_async_suspend(dev); 10893 10898 ufshcd_pm_qos_init(hba); ··· 10897 10902 hba->is_irq_enabled = false; 10898 10903 ufshcd_hba_exit(hba); 10899 10904 out_error: 10900 - return err; 10905 + return err > 0 ? -EIO : err; 10901 10906 } 10902 10907 EXPORT_SYMBOL_GPL(ufshcd_init); 10903 10908
+14 -1
drivers/ufs/host/ufs-qcom.c
··· 740 740 741 741 742 742 /* reset the connected UFS device during power down */ 743 - if (ufs_qcom_is_link_off(hba) && host->device_reset) 743 + if (ufs_qcom_is_link_off(hba) && host->device_reset) { 744 744 ufs_qcom_device_reset_ctrl(hba, true); 745 + /* 746 + * After sending the SSU command, asserting the rst_n 747 + * line causes the device firmware to wake up and 748 + * execute its reset routine. 749 + * 750 + * During this process, the device may draw current 751 + * beyond the permissible limit for low-power mode (LPM). 752 + * A 10ms delay, based on experimental observations, 753 + * allows the UFS device to complete its hardware reset 754 + * before transitioning the power rail to LPM. 755 + */ 756 + usleep_range(10000, 11000); 757 + } 745 758 746 759 return ufs_qcom_ice_suspend(host); 747 760 }
+67 -3
drivers/ufs/host/ufshcd-pci.c
··· 15 15 #include <linux/pci.h> 16 16 #include <linux/pm_runtime.h> 17 17 #include <linux/pm_qos.h> 18 + #include <linux/suspend.h> 18 19 #include <linux/debugfs.h> 19 20 #include <linux/uuid.h> 20 21 #include <linux/acpi.h> ··· 32 31 u32 dsm_fns; 33 32 u32 active_ltr; 34 33 u32 idle_ltr; 34 + int saved_spm_lvl; 35 35 struct dentry *debugfs_root; 36 36 struct gpio_desc *reset_gpio; 37 37 }; ··· 349 347 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); 350 348 if (!host) 351 349 return -ENOMEM; 350 + host->saved_spm_lvl = -1; 352 351 ufshcd_set_variant(hba, host); 353 352 intel_dsm_init(host, hba->dev); 354 353 if (INTEL_DSM_SUPPORTED(host, RESET)) { ··· 428 425 static int ufs_intel_adl_init(struct ufs_hba *hba) 429 426 { 430 427 hba->nop_out_timeout = 200; 431 - hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; 428 + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 | 429 + UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE; 432 430 hba->caps |= UFSHCD_CAP_WB_EN; 433 431 return ufs_intel_common_init(hba); 434 432 } ··· 542 538 543 539 return ufshcd_system_resume(dev); 544 540 } 541 + 542 + static int ufs_intel_suspend_prepare(struct device *dev) 543 + { 544 + struct ufs_hba *hba = dev_get_drvdata(dev); 545 + struct intel_host *host = ufshcd_get_variant(hba); 546 + int err; 547 + 548 + /* 549 + * Only s2idle (S0ix) retains link state. Force power-off 550 + * (UFS_PM_LVL_5) for any other case. 551 + */ 552 + if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE && hba->spm_lvl < UFS_PM_LVL_5) { 553 + host->saved_spm_lvl = hba->spm_lvl; 554 + hba->spm_lvl = UFS_PM_LVL_5; 555 + } 556 + 557 + err = ufshcd_suspend_prepare(dev); 558 + 559 + if (err < 0 && host->saved_spm_lvl != -1) { 560 + hba->spm_lvl = host->saved_spm_lvl; 561 + host->saved_spm_lvl = -1; 562 + } 563 + 564 + return err; 565 + } 566 + 567 + static void ufs_intel_resume_complete(struct device *dev) 568 + { 569 + struct ufs_hba *hba = dev_get_drvdata(dev); 570 + struct intel_host *host = ufshcd_get_variant(hba); 571 + 572 + ufshcd_resume_complete(dev); 573 + 574 + if (host->saved_spm_lvl != -1) { 575 + hba->spm_lvl = host->saved_spm_lvl; 576 + host->saved_spm_lvl = -1; 577 + } 578 + } 579 + 580 + static int ufshcd_pci_suspend_prepare(struct device *dev) 581 + { 582 + struct ufs_hba *hba = dev_get_drvdata(dev); 583 + 584 + if (!strcmp(hba->vops->name, "intel-pci")) 585 + return ufs_intel_suspend_prepare(dev); 586 + 587 + return ufshcd_suspend_prepare(dev); 588 + } 589 + 590 + static void ufshcd_pci_resume_complete(struct device *dev) 591 + { 592 + struct ufs_hba *hba = dev_get_drvdata(dev); 593 + 594 + if (!strcmp(hba->vops->name, "intel-pci")) { 595 + ufs_intel_resume_complete(dev); 596 + return; 597 + } 598 + 599 + ufshcd_resume_complete(dev); 600 + } 545 601 #endif 546 602 547 603 /** ··· 675 611 .thaw = ufshcd_system_resume, 676 612 .poweroff = ufshcd_system_suspend, 677 613 .restore = ufshcd_pci_restore, 678 - .prepare = ufshcd_suspend_prepare, 679 - .complete = ufshcd_resume_complete, 614 + .prepare = ufshcd_pci_suspend_prepare, 615 + .complete = ufshcd_pci_resume_complete, 680 616 #endif 681 617 }; 682 618
+8
fs/btrfs/extent_io.c
··· 2228 2228 wbc_account_cgroup_owner(wbc, folio, range_len); 2229 2229 folio_unlock(folio); 2230 2230 } 2231 + /* 2232 + * If the fs is already in error status, do not submit any writeback 2233 + * but immediately finish it. 2234 + */ 2235 + if (unlikely(BTRFS_FS_ERROR(fs_info))) { 2236 + btrfs_bio_end_io(bbio, errno_to_blk_status(BTRFS_FS_ERROR(fs_info))); 2237 + return; 2238 + } 2231 2239 btrfs_submit_bbio(bbio, 0); 2232 2240 } 2233 2241
+10
fs/btrfs/file.c
··· 2854 2854 { 2855 2855 struct btrfs_trans_handle *trans; 2856 2856 struct btrfs_root *root = BTRFS_I(inode)->root; 2857 + u64 range_start; 2858 + u64 range_end; 2857 2859 int ret; 2858 2860 int ret2; 2859 2861 2860 2862 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode)) 2861 2863 return 0; 2864 + 2865 + range_start = round_down(i_size_read(inode), root->fs_info->sectorsize); 2866 + range_end = round_up(end, root->fs_info->sectorsize); 2867 + 2868 + ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start, 2869 + range_end - range_start); 2870 + if (ret) 2871 + return ret; 2862 2872 2863 2873 trans = btrfs_start_transaction(root, 1); 2864 2874 if (IS_ERR(trans))
-1
fs/btrfs/inode.c
··· 6873 6873 BTRFS_I(inode)->dir_index = 0ULL; 6874 6874 inode_inc_iversion(inode); 6875 6875 inode_set_ctime_current(inode); 6876 - set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6877 6876 6878 6877 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6879 6878 &fname.disk_name, 1, index);
+3 -1
fs/btrfs/qgroup.c
··· 1539 1539 ASSERT(prealloc); 1540 1540 1541 1541 /* Check the level of src and dst first */ 1542 - if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1542 + if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) { 1543 + kfree(prealloc); 1543 1544 return -EINVAL; 1545 + } 1544 1546 1545 1547 mutex_lock(&fs_info->qgroup_ioctl_lock); 1546 1548 if (!fs_info->quota_root) {
+3
fs/btrfs/tree-log.c
··· 7910 7910 bool log_pinned = false; 7911 7911 int ret; 7912 7912 7913 + /* The inode has a new name (ref/extref), so make sure we log it. */ 7914 + set_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); 7915 + 7913 7916 btrfs_init_log_ctx(&ctx, inode); 7914 7917 ctx.logging_new_name = true; 7915 7918
+1 -2
fs/crypto/inline_crypt.c
··· 333 333 inode = mapping->host; 334 334 335 335 *inode_ret = inode; 336 - *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) + 337 - (bh_offset(bh) >> inode->i_blkbits); 336 + *lblk_num_ret = (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits; 338 337 return true; 339 338 } 340 339
+9 -7
fs/smb/client/cached_dir.c
··· 388 388 * lease. Release one here, and the second below. 389 389 */ 390 390 cfid->has_lease = false; 391 - kref_put(&cfid->refcount, smb2_close_cached_fid); 391 + close_cached_dir(cfid); 392 392 } 393 393 spin_unlock(&cfids->cfid_list_lock); 394 394 395 - kref_put(&cfid->refcount, smb2_close_cached_fid); 395 + close_cached_dir(cfid); 396 396 } else { 397 397 *ret_cfid = cfid; 398 398 atomic_inc(&tcon->num_remote_opens); ··· 438 438 439 439 static void 440 440 smb2_close_cached_fid(struct kref *ref) 441 + __releases(&cfid->cfids->cfid_list_lock) 441 442 { 442 443 struct cached_fid *cfid = container_of(ref, struct cached_fid, 443 444 refcount); 444 445 int rc; 445 446 446 - spin_lock(&cfid->cfids->cfid_list_lock); 447 + lockdep_assert_held(&cfid->cfids->cfid_list_lock); 448 + 447 449 if (cfid->on_list) { 448 450 list_del(&cfid->entry); 449 451 cfid->on_list = false; ··· 480 478 spin_lock(&cfid->cfids->cfid_list_lock); 481 479 if (cfid->has_lease) { 482 480 cfid->has_lease = false; 483 - kref_put(&cfid->refcount, smb2_close_cached_fid); 481 + close_cached_dir(cfid); 484 482 } 485 483 spin_unlock(&cfid->cfids->cfid_list_lock); 486 484 close_cached_dir(cfid); ··· 489 487 490 488 void close_cached_dir(struct cached_fid *cfid) 491 489 { 492 - kref_put(&cfid->refcount, smb2_close_cached_fid); 490 + kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock); 493 491 } 494 492 495 493 /* ··· 598 596 599 597 WARN_ON(cfid->on_list); 600 598 601 - kref_put(&cfid->refcount, smb2_close_cached_fid); 599 + close_cached_dir(cfid); 602 600 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close); 603 601 } 604 602 ··· 764 762 * Drop the ref-count from above, either the lease-ref (if there 765 763 * was one) or the extra one acquired. 766 764 */ 767 - kref_put(&cfid->refcount, smb2_close_cached_fid); 765 + close_cached_dir(cfid); 768 766 } 769 767 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 770 768 dir_cache_timeout * HZ);
+2
fs/smb/client/smb2inode.c
··· 1294 1294 smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb); 1295 1295 if (smb2_to_name == NULL) { 1296 1296 rc = -ENOMEM; 1297 + if (cfile) 1298 + cifsFileInfo_put(cfile); 1297 1299 goto smb2_rename_path; 1298 1300 } 1299 1301 in_iov.iov_base = smb2_to_name;
+5 -2
fs/smb/client/smb2pdu.c
··· 4054 4054 4055 4055 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base; 4056 4056 4057 - smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), 4058 - le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov, 4057 + rc = smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), 4058 + le32_to_cpu(smb_rsp->OutputBufferLength), 4059 + &rsp_iov, 4059 4060 sizeof(struct file_notify_information)); 4061 + if (rc) 4062 + goto cnotify_exit; 4060 4063 4061 4064 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset), 4062 4065 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
+23 -1
fs/smb/server/transport_rdma.c
··· 2606 2606 } 2607 2607 } 2608 2608 2609 - bool ksmbd_rdma_capable_netdev(struct net_device *netdev) 2609 + static bool ksmbd_find_rdma_capable_netdev(struct net_device *netdev) 2610 2610 { 2611 2611 struct smb_direct_device *smb_dev; 2612 2612 int i; ··· 2646 2646 netdev->name, str_true_false(rdma_capable)); 2647 2647 2648 2648 return rdma_capable; 2649 + } 2650 + 2651 + bool ksmbd_rdma_capable_netdev(struct net_device *netdev) 2652 + { 2653 + struct net_device *lower_dev; 2654 + struct list_head *iter; 2655 + 2656 + if (ksmbd_find_rdma_capable_netdev(netdev)) 2657 + return true; 2658 + 2659 + /* check if netdev is bridge or VLAN */ 2660 + if (netif_is_bridge_master(netdev) || 2661 + netdev->priv_flags & IFF_802_1Q_VLAN) 2662 + netdev_for_each_lower_dev(netdev, lower_dev, iter) 2663 + if (ksmbd_find_rdma_capable_netdev(lower_dev)) 2664 + return true; 2665 + 2666 + /* check if netdev is IPoIB safely without layer violation */ 2667 + if (netdev->type == ARPHRD_INFINIBAND) 2668 + return true; 2669 + 2670 + return false; 2649 2671 } 2650 2672 2651 2673 static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
+3 -1
fs/xfs/xfs_discard.c
··· 726 726 break; 727 727 } 728 728 729 - if (!tr.queued) 729 + if (!tr.queued) { 730 + kfree(tr.extents); 730 731 break; 732 + } 731 733 732 734 /* 733 735 * We hand the extent list to the discard function here so the
+69 -13
fs/xfs/xfs_iomap.c
··· 1091 1091 }; 1092 1092 #endif /* CONFIG_XFS_RT */ 1093 1093 1094 + #ifdef DEBUG 1095 + static void 1096 + xfs_check_atomic_cow_conversion( 1097 + struct xfs_inode *ip, 1098 + xfs_fileoff_t offset_fsb, 1099 + xfs_filblks_t count_fsb, 1100 + const struct xfs_bmbt_irec *cmap) 1101 + { 1102 + struct xfs_iext_cursor icur; 1103 + struct xfs_bmbt_irec cmap2 = { }; 1104 + 1105 + if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap2)) 1106 + xfs_trim_extent(&cmap2, offset_fsb, count_fsb); 1107 + 1108 + ASSERT(cmap2.br_startoff == cmap->br_startoff); 1109 + ASSERT(cmap2.br_blockcount == cmap->br_blockcount); 1110 + ASSERT(cmap2.br_startblock == cmap->br_startblock); 1111 + ASSERT(cmap2.br_state == cmap->br_state); 1112 + } 1113 + #else 1114 + # define xfs_check_atomic_cow_conversion(...) ((void)0) 1115 + #endif 1116 + 1094 1117 static int 1095 1118 xfs_atomic_write_cow_iomap_begin( 1096 1119 struct inode *inode, ··· 1125 1102 { 1126 1103 struct xfs_inode *ip = XFS_I(inode); 1127 1104 struct xfs_mount *mp = ip->i_mount; 1128 - const xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1129 - xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); 1130 - xfs_filblks_t count_fsb = end_fsb - offset_fsb; 1105 + const xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1106 + const xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1107 + const xfs_filblks_t count_fsb = end_fsb - offset_fsb; 1108 + xfs_filblks_t hole_count_fsb; 1131 1109 int nmaps = 1; 1132 1110 xfs_filblks_t resaligned; 1133 1111 struct xfs_bmbt_irec cmap; ··· 1154 1130 return -EAGAIN; 1155 1131 1156 1132 trace_xfs_iomap_atomic_write_cow(ip, offset, length); 1157 - 1133 + retry: 1158 1134 xfs_ilock(ip, XFS_ILOCK_EXCL); 1159 1135 1160 1136 if (!ip->i_cowfp) { ··· 1165 1141 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) 1166 1142 cmap.br_startoff = end_fsb; 1167 1143 if (cmap.br_startoff <= offset_fsb) { 1144 + if (isnullstartblock(cmap.br_startblock)) 1145 + goto convert_delay; 1146 + 1147 + /* 1148 + * cmap could extend outside the write range due to previous 1149 + * speculative preallocations. We must trim cmap to the write 1150 + * range because the cow fork treats written mappings to mean 1151 + * "write in progress". 1152 + */ 1168 1153 xfs_trim_extent(&cmap, offset_fsb, count_fsb); 1169 1154 goto found; 1170 1155 } 1171 1156 1172 - end_fsb = cmap.br_startoff; 1173 - count_fsb = end_fsb - offset_fsb; 1157 + hole_count_fsb = cmap.br_startoff - offset_fsb; 1174 1158 1175 - resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, 1159 + resaligned = xfs_aligned_fsb_count(offset_fsb, hole_count_fsb, 1176 1160 xfs_get_cowextsz_hint(ip)); 1177 1161 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1178 1162 ··· 1201 1169 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) 1202 1170 cmap.br_startoff = end_fsb; 1203 1171 if (cmap.br_startoff <= offset_fsb) { 1204 - xfs_trim_extent(&cmap, offset_fsb, count_fsb); 1205 1172 xfs_trans_cancel(tp); 1173 + if (isnullstartblock(cmap.br_startblock)) 1174 + goto convert_delay; 1175 + xfs_trim_extent(&cmap, offset_fsb, count_fsb); 1206 1176 goto found; 1207 1177 } 1208 1178 ··· 1216 1182 * atomic writes to that same range will be aligned (and don't require 1217 1183 * this COW-based method). 1218 1184 */ 1219 - error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 1185 + error = xfs_bmapi_write(tp, ip, offset_fsb, hole_count_fsb, 1220 1186 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC | 1221 1187 XFS_BMAPI_EXTSZALIGN, 0, &cmap, &nmaps); 1222 1188 if (error) { ··· 1229 1195 if (error) 1230 1196 goto out_unlock; 1231 1197 1198 + /* 1199 + * cmap could map more blocks than the range we passed into bmapi_write 1200 + * because of EXTSZALIGN or adjacent pre-existing unwritten mappings 1201 + * that were merged. Trim cmap to the original write range so that we 1202 + * don't convert more than we were asked to do for this write. 1203 + */ 1204 + xfs_trim_extent(&cmap, offset_fsb, count_fsb); 1205 + 1232 1206 found: 1233 1207 if (cmap.br_state != XFS_EXT_NORM) { 1234 - error = xfs_reflink_convert_cow_locked(ip, offset_fsb, 1235 - count_fsb); 1208 + error = xfs_reflink_convert_cow_locked(ip, cmap.br_startoff, 1209 + cmap.br_blockcount); 1236 1210 if (error) 1237 1211 goto out_unlock; 1238 1212 cmap.br_state = XFS_EXT_NORM; 1213 + xfs_check_atomic_cow_conversion(ip, offset_fsb, count_fsb, 1214 + &cmap); 1239 1215 } 1240 1216 1241 - length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount); 1242 - trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); 1217 + trace_xfs_iomap_found(ip, offset, length, XFS_COW_FORK, &cmap); 1243 1218 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED); 1244 1219 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1245 1220 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq); 1246 1221 1222 + convert_delay: 1223 + xfs_iunlock(ip, XFS_ILOCK_EXCL); 1224 + error = xfs_bmapi_convert_delalloc(ip, XFS_COW_FORK, offset, iomap, 1225 + NULL); 1226 + if (error) 1227 + return error; 1228 + 1229 + /* 1230 + * Try the lookup again, because the delalloc conversion might have 1231 + * turned the COW mapping into unwritten, but we need it to be in 1232 + * written state. 1233 + */ 1234 + goto retry; 1247 1235 out_unlock: 1248 1236 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1249 1237 return error;
+4 -2
fs/xfs/xfs_zone_alloc.c
··· 615 615 lockdep_assert_held(&zi->zi_open_zones_lock); 616 616 617 617 list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry) 618 - if (xfs_try_use_zone(zi, file_hint, oz, false)) 618 + if (xfs_try_use_zone(zi, file_hint, oz, XFS_ZONE_ALLOC_OK)) 619 619 return oz; 620 620 621 621 cond_resched_lock(&zi->zi_open_zones_lock); ··· 1249 1249 1250 1250 while ((rtg = xfs_rtgroup_next(mp, rtg))) { 1251 1251 error = xfs_init_zone(&iz, rtg, NULL); 1252 - if (error) 1252 + if (error) { 1253 + xfs_rtgroup_rele(rtg); 1253 1254 goto out_free_zone_info; 1255 + } 1254 1256 } 1255 1257 } 1256 1258
+1 -1
include/drm/Makefile
··· 11 11 quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) 12 12 cmd_hdrtest = \ 13 13 $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \ 14 - PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \ 14 + PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \ 15 15 touch $@ 16 16 17 17 $(obj)/%.hdrtest: $(src)/%.h FORCE
+8 -3
include/linux/compiler_types.h
··· 250 250 /* 251 251 * GCC does not warn about unused static inline functions for -Wunused-function. 252 252 * Suppress the warning in clang as well by using __maybe_unused, but enable it 253 - * for W=1 build. This will allow clang to find unused functions. Remove the 254 - * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. 253 + * for W=2 build. This will allow clang to find unused functions. 255 254 */ 256 - #ifdef KBUILD_EXTRA_WARN1 255 + #ifdef KBUILD_EXTRA_WARN2 257 256 #define __inline_maybe_unused 258 257 #else 259 258 #define __inline_maybe_unused __maybe_unused ··· 458 459 # define __nocfi __attribute__((__no_sanitize__("kcfi"))) 459 460 #else 460 461 # define __nocfi 462 + #endif 463 + 464 + #if defined(CONFIG_ARCH_USES_CFI_GENERIC_LLVM_PASS) 465 + # define __nocfi_generic __nocfi 466 + #else 467 + # define __nocfi_generic 461 468 #endif 462 469 463 470 /*
+12
include/linux/net/intel/libie/fwlog.h
··· 78 78 ); 79 79 }; 80 80 81 + #if IS_ENABLED(CONFIG_LIBIE_FWLOG) 81 82 int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api); 82 83 void libie_fwlog_deinit(struct libie_fwlog *fwlog); 83 84 void libie_fwlog_reregister(struct libie_fwlog *fwlog); 84 85 void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len); 86 + #else 87 + static inline int libie_fwlog_init(struct libie_fwlog *fwlog, 88 + struct libie_fwlog_api *api) 89 + { 90 + return -EOPNOTSUPP; 91 + } 92 + static inline void libie_fwlog_deinit(struct libie_fwlog *fwlog) { } 93 + static inline void libie_fwlog_reregister(struct libie_fwlog *fwlog) { } 94 + static inline void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, 95 + u16 len) { } 96 + #endif /* CONFIG_LIBIE_FWLOG */ 85 97 #endif /* _LIBIE_FWLOG_H_ */
-1
include/linux/platform_data/x86/int3472.h
··· 100 100 struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT * 2]; 101 101 char supply_name_upper[GPIO_SUPPLY_NAME_LENGTH]; 102 102 char regulator_name[GPIO_REGULATOR_NAME_LENGTH]; 103 - struct gpio_desc *ena_gpio; 104 103 struct regulator_dev *rdev; 105 104 struct regulator_desc rdesc; 106 105 };
+2 -1
include/linux/virtio_net.h
··· 401 401 if (!tnl_hdr_negotiated) 402 402 return -EINVAL; 403 403 404 - vhdr->hash_hdr.hash_value = 0; 404 + vhdr->hash_hdr.hash_value_lo = 0; 405 + vhdr->hash_hdr.hash_value_hi = 0; 405 406 vhdr->hash_hdr.hash_report = 0; 406 407 vhdr->hash_hdr.padding = 0; 407 408
+1 -1
include/net/bluetooth/mgmt.h
··· 780 780 __u8 ad_type; 781 781 __u8 offset; 782 782 __u8 length; 783 - __u8 value[31]; 783 + __u8 value[HCI_MAX_AD_LENGTH]; 784 784 } __packed; 785 785 786 786 #define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
+78
include/net/cfg80211.h
··· 6435 6435 * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can 6436 6436 * use just cancel_work() instead of cancel_work_sync(), it requires 6437 6437 * being in a section protected by wiphy_lock(). 6438 + * 6439 + * Note that these are scheduled with a timer where the accuracy 6440 + * becomes less the longer in the future the scheduled timer is. Use 6441 + * wiphy_hrtimer_work_queue() if the timer must be not be late by more 6442 + * than approximately 10 percent. 6438 6443 */ 6439 6444 void wiphy_delayed_work_queue(struct wiphy *wiphy, 6440 6445 struct wiphy_delayed_work *dwork, ··· 6510 6505 */ 6511 6506 bool wiphy_delayed_work_pending(struct wiphy *wiphy, 6512 6507 struct wiphy_delayed_work *dwork); 6508 + 6509 + struct wiphy_hrtimer_work { 6510 + struct wiphy_work work; 6511 + struct wiphy *wiphy; 6512 + struct hrtimer timer; 6513 + }; 6514 + 6515 + enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t); 6516 + 6517 + static inline void wiphy_hrtimer_work_init(struct wiphy_hrtimer_work *hrwork, 6518 + wiphy_work_func_t func) 6519 + { 6520 + hrtimer_setup(&hrwork->timer, wiphy_hrtimer_work_timer, 6521 + CLOCK_BOOTTIME, HRTIMER_MODE_REL); 6522 + wiphy_work_init(&hrwork->work, func); 6523 + } 6524 + 6525 + /** 6526 + * wiphy_hrtimer_work_queue - queue hrtimer work for the wiphy 6527 + * @wiphy: the wiphy to queue for 6528 + * @hrwork: the high resolution timer worker 6529 + * @delay: the delay given as a ktime_t 6530 + * 6531 + * Please refer to wiphy_delayed_work_queue(). The difference is that 6532 + * the hrtimer work uses a high resolution timer for scheduling. This 6533 + * may be needed if timeouts might be scheduled further in the future 6534 + * and the accuracy of the normal timer is not sufficient. 6535 + * 6536 + * Expect a delay of a few milliseconds as the timer is scheduled 6537 + * with some slack and some more time may pass between queueing the 6538 + * work and its start. 6539 + */ 6540 + void wiphy_hrtimer_work_queue(struct wiphy *wiphy, 6541 + struct wiphy_hrtimer_work *hrwork, 6542 + ktime_t delay); 6543 + 6544 + /** 6545 + * wiphy_hrtimer_work_cancel - cancel previously queued hrtimer work 6546 + * @wiphy: the wiphy, for debug purposes 6547 + * @hrtimer: the hrtimer work to cancel 6548 + * 6549 + * Cancel the work *without* waiting for it, this assumes being 6550 + * called under the wiphy mutex acquired by wiphy_lock(). 6551 + */ 6552 + void wiphy_hrtimer_work_cancel(struct wiphy *wiphy, 6553 + struct wiphy_hrtimer_work *hrtimer); 6554 + 6555 + /** 6556 + * wiphy_hrtimer_work_flush - flush previously queued hrtimer work 6557 + * @wiphy: the wiphy, for debug purposes 6558 + * @hrwork: the hrtimer work to flush 6559 + * 6560 + * Flush the work (i.e. run it if pending). This must be called 6561 + * under the wiphy mutex acquired by wiphy_lock(). 6562 + */ 6563 + void wiphy_hrtimer_work_flush(struct wiphy *wiphy, 6564 + struct wiphy_hrtimer_work *hrwork); 6565 + 6566 + /** 6567 + * wiphy_hrtimer_work_pending - Find out whether a wiphy hrtimer 6568 + * work item is currently pending. 6569 + * 6570 + * @wiphy: the wiphy, for debug purposes 6571 + * @hrwork: the hrtimer work in question 6572 + * 6573 + * Return: true if timer is pending, false otherwise 6574 + * 6575 + * Please refer to the wiphy_delayed_work_pending() documentation as 6576 + * this is the equivalent function for hrtimer based delayed work 6577 + * items. 6578 + */ 6579 + bool wiphy_hrtimer_work_pending(struct wiphy *wiphy, 6580 + struct wiphy_hrtimer_work *hrwork); 6513 6581 6514 6582 /** 6515 6583 * enum ieee80211_ap_reg_power - regulatory power for an Access Point
+1 -1
include/net/libeth/xdp.h
··· 513 513 * can't fail, but can send less frames if there's no enough free descriptors 514 514 * available. The actual free space is returned by @prep from the driver. 515 515 */ 516 - static __always_inline u32 516 + static __always_inline __nocfi_generic u32 517 517 libeth_xdp_tx_xmit_bulk(const struct libeth_xdp_tx_frame *bulk, void *xdpsq, 518 518 u32 n, bool unroll, u64 priv, 519 519 u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+15 -8
include/uapi/drm/drm_fourcc.h
··· 979 979 * 2 = Gob Height 8, Turing+ Page Kind mapping 980 980 * 3 = Reserved for future use. 981 981 * 982 - * 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further 983 - * bit remapping step that occurs at an even lower level than the 984 - * page kind and block linear swizzles. This causes the layout of 985 - * surfaces mapped in those SOC's GPUs to be incompatible with the 986 - * equivalent mapping on other GPUs in the same system. 982 + * 22:22 s Sector layout. There is a further bit remapping step that occurs 983 + * 26:27 at an even lower level than the page kind and block linear 984 + * swizzles. This causes the bit arrangement of surfaces in memory 985 + * to differ subtly, and prevents direct sharing of surfaces between 986 + * GPUs with different layouts. 987 987 * 988 - * 0 = Tegra K1 - Tegra Parker/TX2 Layout. 989 - * 1 = Desktop GPU and Tegra Xavier+ Layout 988 + * 0 = Tegra K1 - Tegra Parker/TX2 Layout 989 + * 1 = Pre-GB20x, GB20x 32+ bpp, GB10, Tegra Xavier-Orin Layout 990 + * 2 = GB20x(Blackwell 2)+ 8 bpp surface layout 991 + * 3 = GB20x(Blackwell 2)+ 16 bpp surface layout 992 + * 4 = Reserved for future use. 993 + * 5 = Reserved for future use. 994 + * 6 = Reserved for future use. 995 + * 7 = Reserved for future use. 990 996 * 991 997 * 25:23 c Lossless Framebuffer Compression type. 992 998 * ··· 1007 1001 * 6 = Reserved for future use 1008 1002 * 7 = Reserved for future use 1009 1003 * 1010 - * 55:25 - Reserved for future use. Must be zero. 1004 + * 55:28 - Reserved for future use. Must be zero. 1011 1005 */ 1012 1006 #define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \ 1013 1007 fourcc_mod_code(NVIDIA, (0x10 | \ ··· 1015 1009 (((k) & 0xff) << 12) | \ 1016 1010 (((g) & 0x3) << 20) | \ 1017 1011 (((s) & 0x1) << 22) | \ 1012 + (((s) & 0x6) << 25) | \ 1018 1013 (((c) & 0x7) << 23))) 1019 1014 1020 1015 /* To grandfather in prior block linear format modifiers to the above layout,
+12
include/uapi/linux/input-event-codes.h
··· 631 631 #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ 632 632 #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ 633 633 634 + /* 635 + * Keycodes for hotkeys toggling the electronic privacy screen found on some 636 + * laptops on/off. Note when the embedded-controller turns on/off the eprivacy 637 + * screen itself then the state should be reported through drm connecter props: 638 + * https://www.kernel.org/doc/html/latest/gpu/drm-kms.html#standard-connector-properties 639 + * Except when implementing the drm connecter properties API is not possible 640 + * because e.g. the firmware does not allow querying the presence and/or status 641 + * of the eprivacy screen at boot. 642 + */ 643 + #define KEY_EPRIVACY_SCREEN_ON 0x252 644 + #define KEY_EPRIVACY_SCREEN_OFF 0x253 645 + 634 646 #define KEY_KBDINPUTASSIST_PREV 0x260 635 647 #define KEY_KBDINPUTASSIST_NEXT 0x261 636 648 #define KEY_KBDINPUTASSIST_PREVGROUP 0x262
-12
include/uapi/linux/io_uring.h
··· 689 689 /* query various aspects of io_uring, see linux/io_uring/query.h */ 690 690 IORING_REGISTER_QUERY = 35, 691 691 692 - /* return zcrx buffers back into circulation */ 693 - IORING_REGISTER_ZCRX_REFILL = 36, 694 - 695 692 /* this goes last */ 696 693 IORING_REGISTER_LAST, 697 694 ··· 1068 1071 __u32 zcrx_id; 1069 1072 __u32 __resv2; 1070 1073 __u64 __resv[3]; 1071 - }; 1072 - 1073 - struct io_uring_zcrx_sync_refill { 1074 - __u32 zcrx_id; 1075 - /* the number of entries to return */ 1076 - __u32 nr_entries; 1077 - /* pointer to an array of struct io_uring_zcrx_rqe */ 1078 - __u64 rqes; 1079 - __u64 __resv[2]; 1080 1074 }; 1081 1075 1082 1076 #ifdef __cplusplus
+2 -1
include/uapi/linux/virtio_net.h
··· 193 193 194 194 struct virtio_net_hdr_v1_hash { 195 195 struct virtio_net_hdr_v1 hdr; 196 - __le32 hash_value; 196 + __le16 hash_value_lo; 197 + __le16 hash_value_hi; 197 198 #define VIRTIO_NET_HASH_REPORT_NONE 0 198 199 #define VIRTIO_NET_HASH_REPORT_IPv4 1 199 200 #define VIRTIO_NET_HASH_REPORT_TCPv4 2
+7
include/ufs/ufshcd.h
··· 688 688 * single doorbell mode. 689 689 */ 690 690 UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25, 691 + 692 + /* 693 + * This quirk indicates that DME_LINKSTARTUP should not be issued a 2nd 694 + * time (refer link_startup_again) after the 1st time was successful, 695 + * because it causes link startup to become unreliable. 696 + */ 697 + UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE = 1 << 26, 691 698 }; 692 699 693 700 enum ufshcd_caps {
+1 -1
io_uring/memmap.c
··· 135 135 struct io_mapped_region *mr, 136 136 struct io_uring_region_desc *reg) 137 137 { 138 - unsigned long size = mr->nr_pages << PAGE_SHIFT; 138 + unsigned long size = (size_t) mr->nr_pages << PAGE_SHIFT; 139 139 struct page **pages; 140 140 int nr_pages; 141 141
-3
io_uring/register.c
··· 827 827 case IORING_REGISTER_QUERY: 828 828 ret = io_query(ctx, arg, nr_args); 829 829 break; 830 - case IORING_REGISTER_ZCRX_REFILL: 831 - ret = io_zcrx_return_bufs(ctx, arg, nr_args); 832 - break; 833 830 default: 834 831 ret = -EINVAL; 835 832 break;
+9 -2
io_uring/rsrc.c
··· 1403 1403 size_t max_segs = 0; 1404 1404 unsigned i; 1405 1405 1406 - for (i = 0; i < nr_iovs; i++) 1406 + for (i = 0; i < nr_iovs; i++) { 1407 1407 max_segs += (iov[i].iov_len >> shift) + 2; 1408 + if (max_segs > INT_MAX) 1409 + return -EOVERFLOW; 1410 + } 1408 1411 return max_segs; 1409 1412 } 1410 1413 ··· 1513 1510 if (unlikely(ret)) 1514 1511 return ret; 1515 1512 } else { 1516 - nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu); 1513 + int ret = io_estimate_bvec_size(iov, nr_iovs, imu); 1514 + 1515 + if (ret < 0) 1516 + return ret; 1517 + nr_segs = ret; 1517 1518 } 1518 1519 1519 1520 if (sizeof(struct bio_vec) > sizeof(struct iovec)) {
-68
io_uring/zcrx.c
··· 928 928 .uninstall = io_pp_uninstall, 929 929 }; 930 930 931 - #define IO_ZCRX_MAX_SYS_REFILL_BUFS (1 << 16) 932 - #define IO_ZCRX_SYS_REFILL_BATCH 32 933 - 934 - static void io_return_buffers(struct io_zcrx_ifq *ifq, 935 - struct io_uring_zcrx_rqe *rqes, unsigned nr) 936 - { 937 - int i; 938 - 939 - for (i = 0; i < nr; i++) { 940 - struct net_iov *niov; 941 - netmem_ref netmem; 942 - 943 - if (!io_parse_rqe(&rqes[i], ifq, &niov)) 944 - continue; 945 - 946 - scoped_guard(spinlock_bh, &ifq->rq_lock) { 947 - if (!io_zcrx_put_niov_uref(niov)) 948 - continue; 949 - } 950 - 951 - netmem = net_iov_to_netmem(niov); 952 - if (!page_pool_unref_and_test(netmem)) 953 - continue; 954 - io_zcrx_return_niov(niov); 955 - } 956 - } 957 - 958 - int io_zcrx_return_bufs(struct io_ring_ctx *ctx, 959 - void __user *arg, unsigned nr_arg) 960 - { 961 - struct io_uring_zcrx_rqe rqes[IO_ZCRX_SYS_REFILL_BATCH]; 962 - struct io_uring_zcrx_rqe __user *user_rqes; 963 - struct io_uring_zcrx_sync_refill zr; 964 - struct io_zcrx_ifq *ifq; 965 - unsigned nr, i; 966 - 967 - if (nr_arg) 968 - return -EINVAL; 969 - if (copy_from_user(&zr, arg, sizeof(zr))) 970 - return -EFAULT; 971 - if (!zr.nr_entries || zr.nr_entries > IO_ZCRX_MAX_SYS_REFILL_BUFS) 972 - return -EINVAL; 973 - if (!mem_is_zero(&zr.__resv, sizeof(zr.__resv))) 974 - return -EINVAL; 975 - 976 - ifq = xa_load(&ctx->zcrx_ctxs, zr.zcrx_id); 977 - if (!ifq) 978 - return -EINVAL; 979 - nr = zr.nr_entries; 980 - user_rqes = u64_to_user_ptr(zr.rqes); 981 - 982 - for (i = 0; i < nr;) { 983 - unsigned batch = min(nr - i, IO_ZCRX_SYS_REFILL_BATCH); 984 - size_t size = batch * sizeof(rqes[0]); 985 - 986 - if (copy_from_user(rqes, user_rqes + i, size)) 987 - return i ? i : -EFAULT; 988 - io_return_buffers(ifq, rqes, batch); 989 - 990 - i += batch; 991 - 992 - if (fatal_signal_pending(current)) 993 - return i; 994 - cond_resched(); 995 - } 996 - return nr; 997 - } 998 - 999 931 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov, 1000 932 struct io_zcrx_ifq *ifq, int off, int len) 1001 933 {
-7
io_uring/zcrx.h
··· 63 63 }; 64 64 65 65 #if defined(CONFIG_IO_URING_ZCRX) 66 - int io_zcrx_return_bufs(struct io_ring_ctx *ctx, 67 - void __user *arg, unsigned nr_arg); 68 66 int io_register_zcrx_ifq(struct io_ring_ctx *ctx, 69 67 struct io_uring_zcrx_ifq_reg __user *arg); 70 68 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx); ··· 94 96 unsigned int id) 95 97 { 96 98 return NULL; 97 - } 98 - static inline int io_zcrx_return_bufs(struct io_ring_ctx *ctx, 99 - void __user *arg, unsigned nr_arg) 100 - { 101 - return -EOPNOTSUPP; 102 99 } 103 100 #endif 104 101
+15 -5
kernel/events/core.c
··· 11773 11773 11774 11774 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 11775 11775 11776 - if (event->state != PERF_EVENT_STATE_ACTIVE) 11776 + if (event->state != PERF_EVENT_STATE_ACTIVE || 11777 + event->hw.state & PERF_HES_STOPPED) 11777 11778 return HRTIMER_NORESTART; 11778 11779 11779 11780 event->pmu->read(event); ··· 11820 11819 struct hw_perf_event *hwc = &event->hw; 11821 11820 11822 11821 /* 11823 - * The throttle can be triggered in the hrtimer handler. 11824 - * The HRTIMER_NORESTART should be used to stop the timer, 11825 - * rather than hrtimer_cancel(). See perf_swevent_hrtimer() 11822 + * Careful: this function can be triggered in the hrtimer handler, 11823 + * for cpu-clock events, so hrtimer_cancel() would cause a 11824 + * deadlock. 11825 + * 11826 + * So use hrtimer_try_to_cancel() to try to stop the hrtimer, 11827 + * and the cpu-clock handler also sets the PERF_HES_STOPPED flag, 11828 + * which guarantees that perf_swevent_hrtimer() will stop the 11829 + * hrtimer once it sees the PERF_HES_STOPPED flag. 11826 11830 */ 11827 11831 if (is_sampling_event(event) && (hwc->interrupts != MAX_INTERRUPTS)) { 11828 11832 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 11829 11833 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 11830 11834 11831 - hrtimer_cancel(&hwc->hrtimer); 11835 + hrtimer_try_to_cancel(&hwc->hrtimer); 11832 11836 } 11833 11837 } 11834 11838 ··· 11877 11871 11878 11872 static void cpu_clock_event_start(struct perf_event *event, int flags) 11879 11873 { 11874 + event->hw.state = 0; 11880 11875 local64_set(&event->hw.prev_count, local_clock()); 11881 11876 perf_swevent_start_hrtimer(event); 11882 11877 } 11883 11878 11884 11879 static void cpu_clock_event_stop(struct perf_event *event, int flags) 11885 11880 { 11881 + event->hw.state = PERF_HES_STOPPED; 11886 11882 perf_swevent_cancel_hrtimer(event); 11887 11883 if (flags & PERF_EF_UPDATE) 11888 11884 cpu_clock_event_update(event); ··· 11958 11950 11959 11951 static void task_clock_event_start(struct perf_event *event, int flags) 11960 11952 { 11953 + event->hw.state = 0; 11961 11954 local64_set(&event->hw.prev_count, event->ctx->time); 11962 11955 perf_swevent_start_hrtimer(event); 11963 11956 } 11964 11957 11965 11958 static void task_clock_event_stop(struct perf_event *event, int flags) 11966 11959 { 11960 + event->hw.state = PERF_HES_STOPPED; 11967 11961 perf_swevent_cancel_hrtimer(event); 11968 11962 if (flags & PERF_EF_UPDATE) 11969 11963 task_clock_event_update(event, event->ctx->time);
+6 -6
kernel/futex/core.c
··· 1680 1680 { 1681 1681 struct mm_struct *mm = fph->mm; 1682 1682 1683 - guard(rcu)(); 1683 + guard(preempt)(); 1684 1684 1685 - if (smp_load_acquire(&fph->state) == FR_PERCPU) { 1686 - this_cpu_inc(*mm->futex_ref); 1685 + if (READ_ONCE(fph->state) == FR_PERCPU) { 1686 + __this_cpu_inc(*mm->futex_ref); 1687 1687 return true; 1688 1688 } 1689 1689 ··· 1694 1694 { 1695 1695 struct mm_struct *mm = fph->mm; 1696 1696 1697 - guard(rcu)(); 1697 + guard(preempt)(); 1698 1698 1699 - if (smp_load_acquire(&fph->state) == FR_PERCPU) { 1700 - this_cpu_dec(*mm->futex_ref); 1699 + if (READ_ONCE(fph->state) == FR_PERCPU) { 1700 + __this_cpu_dec(*mm->futex_ref); 1701 1701 return false; 1702 1702 } 1703 1703
+1 -1
kernel/sched/core.c
··· 9606 9606 9607 9607 guard(rq_lock_irq)(rq); 9608 9608 cfs_rq->runtime_enabled = runtime_enabled; 9609 - cfs_rq->runtime_remaining = 0; 9609 + cfs_rq->runtime_remaining = 1; 9610 9610 9611 9611 if (cfs_rq->throttled) 9612 9612 unthrottle_cfs_rq(cfs_rq);
+6 -9
kernel/sched/fair.c
··· 6024 6024 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 6025 6025 6026 6026 /* 6027 - * It's possible we are called with !runtime_remaining due to things 6028 - * like user changed quota setting(see tg_set_cfs_bandwidth()) or async 6029 - * unthrottled us with a positive runtime_remaining but other still 6030 - * running entities consumed those runtime before we reached here. 6027 + * It's possible we are called with runtime_remaining < 0 due to things 6028 + * like async unthrottled us with a positive runtime_remaining but other 6029 + * still running entities consumed those runtime before we reached here. 6031 6030 * 6032 - * Anyway, we can't unthrottle this cfs_rq without any runtime remaining 6033 - * because any enqueue in tg_unthrottle_up() will immediately trigger a 6034 - * throttle, which is not supposed to happen on unthrottle path. 6031 + * We can't unthrottle this cfs_rq without any runtime remaining because 6032 + * any enqueue in tg_unthrottle_up() will immediately trigger a throttle, 6033 + * which is not supposed to happen on unthrottle path. 6035 6034 */ 6036 6035 if (cfs_rq->runtime_enabled && cfs_rq->runtime_remaining <= 0) 6037 6036 return; 6038 - 6039 - se = cfs_rq->tg->se[cpu_of(rq)]; 6040 6037 6041 6038 cfs_rq->throttled = 0; 6042 6039
+4
kernel/trace/ring_buffer.c
··· 7344 7344 goto out; 7345 7345 } 7346 7346 7347 + /* Did the reader catch up with the writer? */ 7348 + if (cpu_buffer->reader_page == cpu_buffer->commit_page) 7349 + goto out; 7350 + 7347 7351 reader = rb_get_reader_page(cpu_buffer); 7348 7352 if (WARN_ON(!reader)) 7349 7353 goto out;
+4 -2
kernel/trace/trace_events_hist.c
··· 3272 3272 var = create_var(hist_data, file, field_name, val->size, val->type); 3273 3273 if (IS_ERR(var)) { 3274 3274 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 3275 - kfree(val); 3275 + destroy_hist_field(val, 0); 3276 3276 ret = PTR_ERR(var); 3277 3277 goto err; 3278 3278 } 3279 3279 3280 3280 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 3281 3281 if (!field_var) { 3282 - kfree(val); 3282 + destroy_hist_field(val, 0); 3283 + kfree_const(var->type); 3284 + kfree(var->var.name); 3283 3285 kfree(var); 3284 3286 ret = -ENOMEM; 3285 3287 goto err;
+6 -1
kernel/trace/trace_fprobe.c
··· 106 106 if (!tuser->name) 107 107 return NULL; 108 108 109 + /* Register tracepoint if it is loaded. */ 109 110 if (tpoint) { 111 + tuser->tpoint = tpoint; 110 112 ret = tracepoint_user_register(tuser); 111 113 if (ret) 112 114 return ERR_PTR(ret); 113 115 } 114 116 115 - tuser->tpoint = tpoint; 116 117 tuser->refcount = 1; 117 118 INIT_LIST_HEAD(&tuser->list); 118 119 list_add(&tuser->list, &tracepoint_user_list); ··· 1514 1513 if (!trace_probe_is_enabled(tp)) { 1515 1514 list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { 1516 1515 unregister_fprobe(&tf->fp); 1516 + if (tf->tuser) { 1517 + tracepoint_user_put(tf->tuser); 1518 + tf->tuser = NULL; 1519 + } 1517 1520 } 1518 1521 } 1519 1522
+1 -1
lib/crypto/Kconfig
··· 64 64 config CRYPTO_LIB_CURVE25519_ARCH 65 65 bool 66 66 depends on CRYPTO_LIB_CURVE25519 && !UML && !KMSAN 67 - default y if ARM && KERNEL_MODE_NEON 67 + default y if ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN 68 68 default y if PPC64 && CPU_LITTLE_ENDIAN 69 69 default y if X86_64 70 70
+1 -1
lib/crypto/Makefile
··· 90 90 libcurve25519-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += curve25519-fiat32.o 91 91 endif 92 92 # clang versions prior to 18 may blow out the stack with KASAN 93 - ifeq ($(call clang-min-version, 180000),) 93 + ifeq ($(CONFIG_CC_IS_CLANG)_$(call clang-min-version, 180000),y_) 94 94 KASAN_SANITIZE_curve25519-hacl64.o := n 95 95 endif 96 96
+5 -1
mm/slub.c
··· 4666 4666 if (kmem_cache_debug(s)) { 4667 4667 freelist = alloc_single_from_new_slab(s, slab, orig_size, gfpflags); 4668 4668 4669 - if (unlikely(!freelist)) 4669 + if (unlikely(!freelist)) { 4670 + /* This could cause an endless loop. Fail instead. */ 4671 + if (!allow_spin) 4672 + return NULL; 4670 4673 goto new_objects; 4674 + } 4671 4675 4672 4676 if (s->flags & SLAB_STORE_USER) 4673 4677 set_track(s, freelist, TRACK_ALLOC, addr,
+2
net/8021q/vlan.c
··· 193 193 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); 194 194 grp->nr_vlan_devs++; 195 195 196 + netdev_update_features(dev); 197 + 196 198 return 0; 197 199 198 200 out_unregister_netdev:
+7
net/bluetooth/hci_event.c
··· 4218 4218 } 4219 4219 4220 4220 if (i == ARRAY_SIZE(hci_cc_table)) { 4221 + if (!skb->len) { 4222 + bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status", 4223 + *opcode); 4224 + *status = HCI_ERROR_UNSPECIFIED; 4225 + return; 4226 + } 4227 + 4221 4228 /* Unknown opcode, assume byte 0 contains the status, so 4222 4229 * that e.g. __hci_cmd_sync() properly returns errors 4223 4230 * for vendor specific commands send by HCI drivers.
+3 -3
net/bluetooth/mgmt.c
··· 5395 5395 for (i = 0; i < pattern_count; i++) { 5396 5396 offset = patterns[i].offset; 5397 5397 length = patterns[i].length; 5398 - if (offset >= HCI_MAX_EXT_AD_LENGTH || 5399 - length > HCI_MAX_EXT_AD_LENGTH || 5400 - (offset + length) > HCI_MAX_EXT_AD_LENGTH) 5398 + if (offset >= HCI_MAX_AD_LENGTH || 5399 + length > HCI_MAX_AD_LENGTH || 5400 + (offset + length) > HCI_MAX_AD_LENGTH) 5401 5401 return MGMT_STATUS_INVALID_PARAMS; 5402 5402 5403 5403 p = kmalloc(sizeof(*p), GFP_KERNEL);
+1 -1
net/bridge/br_forward.c
··· 25 25 26 26 vg = nbp_vlan_group_rcu(p); 27 27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && 28 - (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && 28 + (br_mst_is_enabled(p) || p->state == BR_STATE_FORWARDING) && 29 29 br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && 30 30 !br_skb_isolated(p, skb); 31 31 }
+1
net/bridge/br_if.c
··· 386 386 del_nbp(p); 387 387 } 388 388 389 + br_mst_uninit(br); 389 390 br_recalculate_neigh_suppress_enabled(br); 390 391 391 392 br_fdb_delete_by_port(br, NULL, 0, 1);
+2 -2
net/bridge/br_input.c
··· 94 94 95 95 br = p->br; 96 96 97 - if (br_mst_is_enabled(br)) { 97 + if (br_mst_is_enabled(p)) { 98 98 state = BR_STATE_FORWARDING; 99 99 } else { 100 100 if (p->state == BR_STATE_DISABLED) { ··· 429 429 return RX_HANDLER_PASS; 430 430 431 431 forward: 432 - if (br_mst_is_enabled(p->br)) 432 + if (br_mst_is_enabled(p)) 433 433 goto defer_stp_filtering; 434 434 435 435 switch (p->state) {
+8 -2
net/bridge/br_mst.c
··· 22 22 } 23 23 EXPORT_SYMBOL_GPL(br_mst_enabled); 24 24 25 + void br_mst_uninit(struct net_bridge *br) 26 + { 27 + if (br_opt_get(br, BROPT_MST_ENABLED)) 28 + static_branch_dec(&br_mst_used); 29 + } 30 + 25 31 int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids) 26 32 { 27 33 const struct net_bridge_vlan_group *vg; ··· 231 225 return err; 232 226 233 227 if (on) 234 - static_branch_enable(&br_mst_used); 228 + static_branch_inc(&br_mst_used); 235 229 else 236 - static_branch_disable(&br_mst_used); 230 + static_branch_dec(&br_mst_used); 237 231 238 232 br_opt_toggle(br, BROPT_MST_ENABLED, on); 239 233 return 0;
+10 -3
net/bridge/br_private.h
··· 1935 1935 /* br_mst.c */ 1936 1936 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1937 1937 DECLARE_STATIC_KEY_FALSE(br_mst_used); 1938 - static inline bool br_mst_is_enabled(struct net_bridge *br) 1938 + static inline bool br_mst_is_enabled(const struct net_bridge_port *p) 1939 1939 { 1940 + /* check the port's vlan group to avoid racing with port deletion */ 1940 1941 return static_branch_unlikely(&br_mst_used) && 1941 - br_opt_get(br, BROPT_MST_ENABLED); 1942 + br_opt_get(p->br, BROPT_MST_ENABLED) && 1943 + rcu_access_pointer(p->vlgrp); 1942 1944 } 1943 1945 1944 1946 int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, ··· 1954 1952 const struct net_bridge_vlan_group *vg); 1955 1953 int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr, 1956 1954 struct netlink_ext_ack *extack); 1955 + void br_mst_uninit(struct net_bridge *br); 1957 1956 #else 1958 - static inline bool br_mst_is_enabled(struct net_bridge *br) 1957 + static inline bool br_mst_is_enabled(const struct net_bridge_port *p) 1959 1958 { 1960 1959 return false; 1961 1960 } ··· 1989 1986 struct netlink_ext_ack *extack) 1990 1987 { 1991 1988 return -EOPNOTSUPP; 1989 + } 1990 + 1991 + static inline void br_mst_uninit(struct net_bridge *br) 1992 + { 1992 1993 } 1993 1994 #endif 1994 1995
+2 -2
net/core/gro_cells.c
··· 60 60 struct sk_buff *skb; 61 61 int work_done = 0; 62 62 63 - __local_lock_nested_bh(&cell->bh_lock); 64 63 while (work_done < budget) { 64 + __local_lock_nested_bh(&cell->bh_lock); 65 65 skb = __skb_dequeue(&cell->napi_skbs); 66 + __local_unlock_nested_bh(&cell->bh_lock); 66 67 if (!skb) 67 68 break; 68 69 napi_gro_receive(napi, skb); ··· 72 71 73 72 if (work_done < budget) 74 73 napi_complete_done(napi, work_done); 75 - __local_unlock_nested_bh(&cell->bh_lock); 76 74 return work_done; 77 75 } 78 76
+2 -5
net/core/netpoll.c
··· 228 228 { 229 229 struct sk_buff_head *skb_pool; 230 230 struct sk_buff *skb; 231 - unsigned long flags; 232 231 233 232 skb_pool = &np->skb_pool; 234 233 235 - spin_lock_irqsave(&skb_pool->lock, flags); 236 - while (skb_pool->qlen < MAX_SKBS) { 234 + while (READ_ONCE(skb_pool->qlen) < MAX_SKBS) { 237 235 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); 238 236 if (!skb) 239 237 break; 240 238 241 - __skb_queue_tail(skb_pool, skb); 239 + skb_queue_tail(skb_pool, skb); 242 240 } 243 - spin_unlock_irqrestore(&skb_pool->lock, flags); 244 241 } 245 242 246 243 static void zap_completion_queue(void)
+8 -2
net/dsa/tag_brcm.c
··· 224 224 { 225 225 int len = BRCM_LEG_TAG_LEN; 226 226 int source_port; 227 + __be16 *proto; 227 228 u8 *brcm_tag; 228 229 229 230 if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN))) 230 231 return NULL; 231 232 232 233 brcm_tag = dsa_etype_header_pos_rx(skb); 234 + proto = (__be16 *)(brcm_tag + BRCM_LEG_TAG_LEN); 233 235 234 236 source_port = brcm_tag[5] & BRCM_LEG_PORT_ID; 235 237 ··· 239 237 if (!skb->dev) 240 238 return NULL; 241 239 242 - /* VLAN tag is added by BCM63xx internal switch */ 243 - if (netdev_uses_dsa(skb->dev)) 240 + /* The internal switch in BCM63XX SoCs always tags on egress on the CPU 241 + * port. We use VID 0 internally for untagged traffic, so strip the tag 242 + * if the TCI field is all 0, and keep it otherwise to also retain 243 + * e.g. 802.1p tagged packets. 244 + */ 245 + if (proto[0] == htons(ETH_P_8021Q) && proto[1] == 0) 244 246 len += VLAN_HLEN; 245 247 246 248 /* Remove Broadcom tag and update checksum */
+1 -1
net/mac80211/chan.c
··· 1290 1290 &link->csa.finalize_work); 1291 1291 break; 1292 1292 case NL80211_IFTYPE_STATION: 1293 - wiphy_delayed_work_queue(sdata->local->hw.wiphy, 1293 + wiphy_hrtimer_work_queue(sdata->local->hw.wiphy, 1294 1294 &link->u.mgd.csa.switch_work, 0); 1295 1295 break; 1296 1296 case NL80211_IFTYPE_UNSPECIFIED:
+4 -4
net/mac80211/ieee80211_i.h
··· 612 612 u8 *assoc_req_ies; 613 613 size_t assoc_req_ies_len; 614 614 615 - struct wiphy_delayed_work ml_reconf_work; 615 + struct wiphy_hrtimer_work ml_reconf_work; 616 616 u16 removed_links; 617 617 618 618 /* TID-to-link mapping support */ 619 - struct wiphy_delayed_work ttlm_work; 619 + struct wiphy_hrtimer_work ttlm_work; 620 620 struct ieee80211_adv_ttlm_info ttlm_info; 621 621 struct wiphy_work teardown_ttlm_work; 622 622 ··· 1017 1017 bool operating_11g_mode; 1018 1018 1019 1019 struct { 1020 - struct wiphy_delayed_work switch_work; 1020 + struct wiphy_hrtimer_work switch_work; 1021 1021 struct cfg80211_chan_def ap_chandef; 1022 1022 struct ieee80211_parsed_tpe tpe; 1023 - unsigned long time; 1023 + ktime_t time; 1024 1024 bool waiting_bcn; 1025 1025 bool ignored_same_chan; 1026 1026 bool blocked_tx;
+2 -2
net/mac80211/link.c
··· 472 472 * from there. 473 473 */ 474 474 if (link->conf->csa_active) 475 - wiphy_delayed_work_queue(local->hw.wiphy, 475 + wiphy_hrtimer_work_queue(local->hw.wiphy, 476 476 &link->u.mgd.csa.switch_work, 477 477 link->u.mgd.csa.time - 478 - jiffies); 478 + ktime_get_boottime()); 479 479 } 480 480 481 481 for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+26 -26
net/mac80211/mlme.c
··· 45 45 #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) 46 46 #define IEEE80211_ASSOC_MAX_TRIES 3 47 47 48 - #define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS msecs_to_jiffies(100) 48 + #define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS (100 * USEC_PER_MSEC) 49 49 #define IEEE80211_ADV_TTLM_ST_UNDERFLOW 0xff00 50 50 51 51 #define IEEE80211_NEG_TTLM_REQ_TIMEOUT (HZ / 5) ··· 2594 2594 return; 2595 2595 } 2596 2596 2597 - wiphy_delayed_work_queue(sdata->local->hw.wiphy, 2597 + wiphy_hrtimer_work_queue(sdata->local->hw.wiphy, 2598 2598 &link->u.mgd.csa.switch_work, 0); 2599 2599 } 2600 2600 ··· 2753 2753 .timestamp = timestamp, 2754 2754 .device_timestamp = device_timestamp, 2755 2755 }; 2756 - unsigned long now; 2756 + u32 csa_time_tu; 2757 + ktime_t now; 2757 2758 int res; 2758 2759 2759 2760 lockdep_assert_wiphy(local->hw.wiphy); ··· 2984 2983 csa_ie.mode); 2985 2984 2986 2985 /* we may have to handle timeout for deactivated link in software */ 2987 - now = jiffies; 2988 - link->u.mgd.csa.time = now + 2989 - TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) * 2990 - link->conf->beacon_int); 2986 + now = ktime_get_boottime(); 2987 + csa_time_tu = (max_t(int, csa_ie.count, 1) - 1) * link->conf->beacon_int; 2988 + link->u.mgd.csa.time = now + us_to_ktime(ieee80211_tu_to_usec(csa_time_tu)); 2991 2989 2992 2990 if (ieee80211_vif_link_active(&sdata->vif, link->link_id) && 2993 2991 local->ops->channel_switch) { ··· 3001 3001 } 3002 3002 3003 3003 /* channel switch handled in software */ 3004 - wiphy_delayed_work_queue(local->hw.wiphy, 3004 + wiphy_hrtimer_work_queue(local->hw.wiphy, 3005 3005 &link->u.mgd.csa.switch_work, 3006 3006 link->u.mgd.csa.time - now); 3007 3007 return; ··· 4242 4242 4243 4243 memset(&sdata->u.mgd.ttlm_info, 0, 4244 4244 sizeof(sdata->u.mgd.ttlm_info)); 4245 - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work); 4245 + wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work); 4246 4246 4247 4247 memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm)); 4248 4248 wiphy_delayed_work_cancel(sdata->local->hw.wiphy, 4249 4249 &ifmgd->neg_ttlm_timeout_work); 4250 4250 4251 4251 sdata->u.mgd.removed_links = 0; 4252 - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, 4252 + wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy, 4253 4253 &sdata->u.mgd.ml_reconf_work); 4254 4254 4255 4255 wiphy_work_cancel(sdata->local->hw.wiphy, ··· 6876 6876 /* In case the removal was cancelled, abort it */ 6877 6877 if (sdata->u.mgd.removed_links) { 6878 6878 sdata->u.mgd.removed_links = 0; 6879 - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, 6879 + wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy, 6880 6880 &sdata->u.mgd.ml_reconf_work); 6881 6881 } 6882 6882 return; ··· 6906 6906 } 6907 6907 6908 6908 sdata->u.mgd.removed_links = removed_links; 6909 - wiphy_delayed_work_queue(sdata->local->hw.wiphy, 6909 + wiphy_hrtimer_work_queue(sdata->local->hw.wiphy, 6910 6910 &sdata->u.mgd.ml_reconf_work, 6911 - TU_TO_JIFFIES(delay)); 6911 + us_to_ktime(ieee80211_tu_to_usec(delay))); 6912 6912 } 6913 6913 6914 6914 static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata, ··· 7095 7095 /* if a planned TID-to-link mapping was cancelled - 7096 7096 * abort it 7097 7097 */ 7098 - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, 7098 + wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy, 7099 7099 &sdata->u.mgd.ttlm_work); 7100 7100 } else if (sdata->u.mgd.ttlm_info.active) { 7101 7101 /* if no TID-to-link element, set to default mapping in ··· 7130 7130 7131 7131 if (ttlm_info.switch_time) { 7132 7132 u16 beacon_ts_tu, st_tu, delay; 7133 - u32 delay_jiffies; 7133 + u64 delay_usec; 7134 7134 u64 mask; 7135 7135 7136 7136 /* The t2l map switch time is indicated with a partial ··· 7152 7152 if (delay > IEEE80211_ADV_TTLM_ST_UNDERFLOW) 7153 7153 return; 7154 7154 7155 - delay_jiffies = TU_TO_JIFFIES(delay); 7155 + delay_usec = ieee80211_tu_to_usec(delay); 7156 7156 7157 7157 /* Link switching can take time, so schedule it 7158 7158 * 100ms before to be ready on time 7159 7159 */ 7160 - if (delay_jiffies > IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS) 7161 - delay_jiffies -= 7160 + if (delay_usec > IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS) 7161 + delay_usec -= 7162 7162 IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS; 7163 7163 else 7164 - delay_jiffies = 0; 7164 + delay_usec = 0; 7165 7165 7166 7166 sdata->u.mgd.ttlm_info = ttlm_info; 7167 - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, 7167 + wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy, 7168 7168 &sdata->u.mgd.ttlm_work); 7169 - wiphy_delayed_work_queue(sdata->local->hw.wiphy, 7169 + wiphy_hrtimer_work_queue(sdata->local->hw.wiphy, 7170 7170 &sdata->u.mgd.ttlm_work, 7171 - delay_jiffies); 7171 + us_to_ktime(delay_usec)); 7172 7172 return; 7173 7173 } 7174 7174 } ··· 8793 8793 ieee80211_csa_connection_drop_work); 8794 8794 wiphy_delayed_work_init(&ifmgd->tdls_peer_del_work, 8795 8795 ieee80211_tdls_peer_del_work); 8796 - wiphy_delayed_work_init(&ifmgd->ml_reconf_work, 8796 + wiphy_hrtimer_work_init(&ifmgd->ml_reconf_work, 8797 8797 ieee80211_ml_reconf_work); 8798 8798 wiphy_delayed_work_init(&ifmgd->reconf.wk, 8799 8799 ieee80211_ml_sta_reconf_timeout); ··· 8802 8802 timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0); 8803 8803 wiphy_delayed_work_init(&ifmgd->tx_tspec_wk, 8804 8804 ieee80211_sta_handle_tspec_ac_params_wk); 8805 - wiphy_delayed_work_init(&ifmgd->ttlm_work, 8805 + wiphy_hrtimer_work_init(&ifmgd->ttlm_work, 8806 8806 ieee80211_tid_to_link_map_work); 8807 8807 wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work, 8808 8808 ieee80211_neg_ttlm_timeout_work); ··· 8849 8849 else 8850 8850 link->u.mgd.req_smps = IEEE80211_SMPS_OFF; 8851 8851 8852 - wiphy_delayed_work_init(&link->u.mgd.csa.switch_work, 8852 + wiphy_hrtimer_work_init(&link->u.mgd.csa.switch_work, 8853 8853 ieee80211_csa_switch_work); 8854 8854 8855 8855 ieee80211_clear_tpe(&link->conf->tpe); ··· 10064 10064 &link->u.mgd.request_smps_work); 10065 10065 wiphy_work_cancel(link->sdata->local->hw.wiphy, 10066 10066 &link->u.mgd.recalc_smps); 10067 - wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, 10067 + wiphy_hrtimer_work_cancel(link->sdata->local->hw.wiphy, 10068 10068 &link->u.mgd.csa.switch_work); 10069 10069 } 10070 10070
+17 -6
net/sctp/diag.c
··· 73 73 struct nlattr *attr; 74 74 void *info = NULL; 75 75 76 + rcu_read_lock(); 76 77 list_for_each_entry_rcu(laddr, address_list, list) 77 78 addrcnt++; 79 + rcu_read_unlock(); 78 80 79 81 attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt); 80 82 if (!attr) 81 83 return -EMSGSIZE; 82 84 83 85 info = nla_data(attr); 86 + rcu_read_lock(); 84 87 list_for_each_entry_rcu(laddr, address_list, list) { 85 88 memcpy(info, &laddr->a, sizeof(laddr->a)); 86 89 memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a)); 87 90 info += addrlen; 91 + 92 + if (!--addrcnt) 93 + break; 88 94 } 95 + rcu_read_unlock(); 89 96 90 97 return 0; 91 98 } ··· 230 223 bool net_admin; 231 224 }; 232 225 233 - static size_t inet_assoc_attr_size(struct sctp_association *asoc) 226 + static size_t inet_assoc_attr_size(struct sock *sk, 227 + struct sctp_association *asoc) 234 228 { 235 229 int addrlen = sizeof(struct sockaddr_storage); 236 230 int addrcnt = 0; 237 231 struct sctp_sockaddr_entry *laddr; 238 232 239 233 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list, 240 - list) 234 + list, lockdep_sock_is_held(sk)) 241 235 addrcnt++; 242 236 243 237 return nla_total_size(sizeof(struct sctp_info)) ··· 264 256 if (err) 265 257 return err; 266 258 267 - rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL); 268 - if (!rep) 269 - return -ENOMEM; 270 - 271 259 lock_sock(sk); 260 + 261 + rep = nlmsg_new(inet_assoc_attr_size(sk, assoc), GFP_KERNEL); 262 + if (!rep) { 263 + release_sock(sk); 264 + return -ENOMEM; 265 + } 266 + 272 267 if (ep != assoc->ep) { 273 268 err = -EAGAIN; 274 269 goto out;
+6 -15
net/sctp/transport.c
··· 37 37 /* 1st Level Abstractions. */ 38 38 39 39 /* Initialize a new transport from provided memory. */ 40 - static struct sctp_transport *sctp_transport_init(struct net *net, 41 - struct sctp_transport *peer, 42 - const union sctp_addr *addr, 43 - gfp_t gfp) 40 + static void sctp_transport_init(struct net *net, 41 + struct sctp_transport *peer, 42 + const union sctp_addr *addr, 43 + gfp_t gfp) 44 44 { 45 45 /* Copy in the address. */ 46 46 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); ··· 83 83 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 84 84 85 85 refcount_set(&peer->refcnt, 1); 86 - 87 - return peer; 88 86 } 89 87 90 88 /* Allocate and initialize a new transport. */ ··· 94 96 95 97 transport = kzalloc(sizeof(*transport), gfp); 96 98 if (!transport) 97 - goto fail; 99 + return NULL; 98 100 99 - if (!sctp_transport_init(net, transport, addr, gfp)) 100 - goto fail_init; 101 + sctp_transport_init(net, transport, addr, gfp); 101 102 102 103 SCTP_DBG_OBJCNT_INC(transport); 103 104 104 105 return transport; 105 - 106 - fail_init: 107 - kfree(transport); 108 - 109 - fail: 110 - return NULL; 111 106 } 112 107 113 108 /* This transport is no longer needed. Free up if possible, or
+56
net/wireless/core.c
··· 1787 1787 } 1788 1788 EXPORT_SYMBOL_GPL(wiphy_delayed_work_pending); 1789 1789 1790 + enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t) 1791 + { 1792 + struct wiphy_hrtimer_work *hrwork = 1793 + container_of(t, struct wiphy_hrtimer_work, timer); 1794 + 1795 + wiphy_work_queue(hrwork->wiphy, &hrwork->work); 1796 + 1797 + return HRTIMER_NORESTART; 1798 + } 1799 + EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_timer); 1800 + 1801 + void wiphy_hrtimer_work_queue(struct wiphy *wiphy, 1802 + struct wiphy_hrtimer_work *hrwork, 1803 + ktime_t delay) 1804 + { 1805 + trace_wiphy_hrtimer_work_queue(wiphy, &hrwork->work, delay); 1806 + 1807 + if (!delay) { 1808 + hrtimer_cancel(&hrwork->timer); 1809 + wiphy_work_queue(wiphy, &hrwork->work); 1810 + return; 1811 + } 1812 + 1813 + hrwork->wiphy = wiphy; 1814 + hrtimer_start_range_ns(&hrwork->timer, delay, 1815 + 1000 * NSEC_PER_USEC, HRTIMER_MODE_REL); 1816 + } 1817 + EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_queue); 1818 + 1819 + void wiphy_hrtimer_work_cancel(struct wiphy *wiphy, 1820 + struct wiphy_hrtimer_work *hrwork) 1821 + { 1822 + lockdep_assert_held(&wiphy->mtx); 1823 + 1824 + hrtimer_cancel(&hrwork->timer); 1825 + wiphy_work_cancel(wiphy, &hrwork->work); 1826 + } 1827 + EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_cancel); 1828 + 1829 + void wiphy_hrtimer_work_flush(struct wiphy *wiphy, 1830 + struct wiphy_hrtimer_work *hrwork) 1831 + { 1832 + lockdep_assert_held(&wiphy->mtx); 1833 + 1834 + hrtimer_cancel(&hrwork->timer); 1835 + wiphy_work_flush(wiphy, &hrwork->work); 1836 + } 1837 + EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_flush); 1838 + 1839 + bool wiphy_hrtimer_work_pending(struct wiphy *wiphy, 1840 + struct wiphy_hrtimer_work *hrwork) 1841 + { 1842 + return hrtimer_is_queued(&hrwork->timer); 1843 + } 1844 + EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_pending); 1845 + 1790 1846 static int __init cfg80211_init(void) 1791 1847 { 1792 1848 int err;
+21
net/wireless/trace.h
··· 304 304 __entry->delay) 305 305 ); 306 306 307 + TRACE_EVENT(wiphy_hrtimer_work_queue, 308 + TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work, 309 + ktime_t delay), 310 + TP_ARGS(wiphy, work, delay), 311 + TP_STRUCT__entry( 312 + WIPHY_ENTRY 313 + __field(void *, instance) 314 + __field(void *, func) 315 + __field(ktime_t, delay) 316 + ), 317 + TP_fast_assign( 318 + WIPHY_ASSIGN; 319 + __entry->instance = work; 320 + __entry->func = work->func; 321 + __entry->delay = delay; 322 + ), 323 + TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%llu", 324 + WIPHY_PR_ARG, __entry->instance, __entry->func, 325 + __entry->delay) 326 + ); 327 + 307 328 TRACE_EVENT(wiphy_work_worker_start, 308 329 TP_PROTO(struct wiphy *wiphy), 309 330 TP_ARGS(wiphy),
+14 -1
rust/Makefile
··· 69 69 # the time being (https://github.com/rust-lang/rust/issues/144521). 70 70 rustdoc_modifiers_workaround := $(if $(call rustc-min-version,108800),-Cunsafe-allow-abi-mismatch=fixed-x18) 71 71 72 + # Similarly, for doctests (https://github.com/rust-lang/rust/issues/146465). 73 + doctests_modifiers_workaround := $(rustdoc_modifiers_workaround)$(if $(call rustc-min-version,109100),$(comma)sanitizer) 74 + 72 75 # `rustc` recognizes `--remap-path-prefix` since 1.26.0, but `rustdoc` only 73 76 # since Rust 1.81.0. Moreover, `rustdoc` ICEs on out-of-tree builds since Rust 74 77 # 1.82.0 (https://github.com/rust-lang/rust/issues/138520). Thus workaround both ··· 130 127 rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs rustdoc-clean FORCE 131 128 +$(call if_changed,rustdoc) 132 129 130 + # Even if `rustdoc` targets are not kernel objects, they should still be 131 + # treated as such so that we pass the same flags. Otherwise, for instance, 132 + # `rustdoc` will complain about missing sanitizer flags causing an ABI mismatch. 133 + rustdoc-compiler_builtins: private is-kernel-object := y 133 134 rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE 134 135 +$(call if_changed,rustdoc) 135 136 137 + rustdoc-ffi: private is-kernel-object := y 136 138 rustdoc-ffi: $(src)/ffi.rs rustdoc-core FORCE 137 139 +$(call if_changed,rustdoc) 138 140 ··· 155 147 rustdoc-macros FORCE 156 148 +$(call if_changed,rustdoc) 157 149 150 + rustdoc-kernel: private is-kernel-object := y 158 151 rustdoc-kernel: private rustc_target_flags = --extern ffi --extern pin_init \ 159 152 --extern build_error --extern macros \ 160 153 --extern bindings --extern uapi ··· 239 230 --extern bindings --extern uapi \ 240 231 --no-run --crate-name kernel -Zunstable-options \ 241 232 --sysroot=/dev/null \ 242 - $(rustdoc_modifiers_workaround) \ 233 + $(doctests_modifiers_workaround) \ 243 234 --test-builder $(objtree)/scripts/rustdoc_test_builder \ 244 235 $< $(rustdoc_test_kernel_quiet); \ 245 236 $(objtree)/scripts/rustdoc_test_gen ··· 531 522 $(obj)/$(libpin_init_internal_name) $(obj)/$(libmacros_name) FORCE 532 523 +$(call if_changed_rule,rustc_library) 533 524 525 + # Even if normally `build_error` is not a kernel object, it should still be 526 + # treated as such so that we pass the same flags. Otherwise, for instance, 527 + # `rustc` will complain about missing sanitizer flags causing an ABI mismatch. 528 + $(obj)/build_error.o: private is-kernel-object := y 534 529 $(obj)/build_error.o: private skip_gendwarfksyms = 1 535 530 $(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE 536 531 +$(call if_changed_rule,rustc_library)
+1 -1
rust/kernel/devres.rs
··· 103 103 /// 104 104 /// # Invariants 105 105 /// 106 - /// [`Self::inner`] is guaranteed to be initialized and is always accessed read-only. 106 + /// `Self::inner` is guaranteed to be initialized and is always accessed read-only. 107 107 #[pin_data(PinnedDrop)] 108 108 pub struct Devres<T: Send> { 109 109 dev: ARef<Device>,
+1 -1
rust/kernel/sync/condvar.rs
··· 36 36 /// spuriously. 37 37 /// 38 38 /// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such 39 - /// instances is with the [`pin_init`](crate::pin_init!) and [`new_condvar`] macros. 39 + /// instances is with the [`pin_init`](pin_init::pin_init!) and [`new_condvar`] macros. 40 40 /// 41 41 /// # Examples 42 42 ///
+1 -1
scripts/Makefile.build
··· 167 167 endif 168 168 169 169 ifneq ($(KBUILD_EXTRA_WARN),) 170 - cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(KDOCFLAGS) \ 170 + cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(KDOCFLAGS) \ 171 171 $(if $(findstring 2, $(KBUILD_EXTRA_WARN)), -Wall) \ 172 172 $< 173 173 endif
+14 -1
scripts/Makefile.vmlinux
··· 102 102 # modules.builtin.modinfo 103 103 # --------------------------------------------------------------------------- 104 104 105 + # .modinfo in vmlinux.unstripped is aligned to 8 bytes for compatibility with 106 + # tools that expect vmlinux to have sufficiently aligned sections but the 107 + # additional bytes used for padding .modinfo to satisfy this requirement break 108 + # certain versions of kmod with 109 + # 110 + # depmod: ERROR: kmod_builtin_iter_next: unexpected string without modname prefix 111 + # 112 + # Strip the trailing padding bytes after extracting .modinfo to comply with 113 + # what kmod expects to parse. 114 + quiet_cmd_modules_builtin_modinfo = GEN $@ 115 + cmd_modules_builtin_modinfo = $(cmd_objcopy); \ 116 + sed -i 's/\x00\+$$/\x00/g' $@ 117 + 105 118 OBJCOPYFLAGS_modules.builtin.modinfo := -j .modinfo -O binary 106 119 107 120 targets += modules.builtin.modinfo 108 121 modules.builtin.modinfo: vmlinux.unstripped FORCE 109 - $(call if_changed,objcopy) 122 + $(call if_changed,modules_builtin_modinfo) 110 123 111 124 # modules.builtin 112 125 # ---------------------------------------------------------------------------
+5
tools/arch/x86/include/asm/cpufeatures.h
··· 444 444 #define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */ 445 445 #define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" Secure Encrypted Virtualization - Encrypted State */ 446 446 #define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" Secure Encrypted Virtualization - Secure Nested Paging */ 447 + #define X86_FEATURE_SNP_SECURE_TSC (19*32+ 8) /* SEV-SNP Secure TSC */ 447 448 #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */ 448 449 #define X86_FEATURE_SME_COHERENT (19*32+10) /* hardware-enforced cache coherency */ 449 450 #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" SEV-ES full debug state swap support */ ··· 496 495 #define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */ 497 496 #define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */ 498 497 #define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */ 498 + #define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */ 499 + #define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */ 500 + #define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */ 499 501 500 502 /* 501 503 * BUG word(s) ··· 555 551 #define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */ 556 552 #define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ 557 553 #define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */ 554 + #define X86_BUG_VMSCAPE X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */ 558 555 #endif /* _ASM_X86_CPUFEATURES_H */
+19 -1
tools/arch/x86/include/asm/msr-index.h
··· 315 315 #define PERF_CAP_PT_IDX 16 316 316 317 317 #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 318 + 319 + #define PERF_CAP_LBR_FMT 0x3f 318 320 #define PERF_CAP_PEBS_TRAP BIT_ULL(6) 319 321 #define PERF_CAP_ARCH_REG BIT_ULL(7) 320 322 #define PERF_CAP_PEBS_FORMAT 0xf00 323 + #define PERF_CAP_FW_WRITES BIT_ULL(13) 321 324 #define PERF_CAP_PEBS_BASELINE BIT_ULL(14) 322 325 #define PERF_CAP_PEBS_TIMING_INFO BIT_ULL(17) 323 326 #define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \ ··· 636 633 #define MSR_AMD_PPIN 0xc00102f1 637 634 #define MSR_AMD64_CPUID_FN_7 0xc0011002 638 635 #define MSR_AMD64_CPUID_FN_1 0xc0011004 636 + 637 + #define MSR_AMD64_CPUID_EXT_FEAT 0xc0011005 638 + #define MSR_AMD64_CPUID_EXT_FEAT_TOPOEXT_BIT 54 639 + #define MSR_AMD64_CPUID_EXT_FEAT_TOPOEXT BIT_ULL(MSR_AMD64_CPUID_EXT_FEAT_TOPOEXT_BIT) 640 + 639 641 #define MSR_AMD64_LS_CFG 0xc0011020 640 642 #define MSR_AMD64_DC_CFG 0xc0011022 641 643 #define MSR_AMD64_TW_CFG 0xc0011023 ··· 709 701 #define MSR_AMD64_SNP_VMSA_REG_PROT BIT_ULL(MSR_AMD64_SNP_VMSA_REG_PROT_BIT) 710 702 #define MSR_AMD64_SNP_SMT_PROT_BIT 17 711 703 #define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT) 712 - #define MSR_AMD64_SNP_RESV_BIT 18 704 + #define MSR_AMD64_SNP_SECURE_AVIC_BIT 18 705 + #define MSR_AMD64_SNP_SECURE_AVIC BIT_ULL(MSR_AMD64_SNP_SECURE_AVIC_BIT) 706 + #define MSR_AMD64_SNP_RESV_BIT 19 713 707 #define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT) 708 + #define MSR_AMD64_SAVIC_CONTROL 0xc0010138 709 + #define MSR_AMD64_SAVIC_EN_BIT 0 710 + #define MSR_AMD64_SAVIC_EN BIT_ULL(MSR_AMD64_SAVIC_EN_BIT) 711 + #define MSR_AMD64_SAVIC_ALLOWEDNMI_BIT 1 712 + #define MSR_AMD64_SAVIC_ALLOWEDNMI BIT_ULL(MSR_AMD64_SAVIC_ALLOWEDNMI_BIT) 714 713 #define MSR_AMD64_RMP_BASE 0xc0010132 715 714 #define MSR_AMD64_RMP_END 0xc0010133 716 715 #define MSR_AMD64_RMP_CFG 0xc0010136 ··· 750 735 #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300 751 736 #define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301 752 737 #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302 738 + #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET 0xc0000303 753 739 754 740 /* AMD Hardware Feedback Support MSRs */ 755 741 #define MSR_AMD_WORKLOAD_CLASS_CONFIG 0xc0000500 ··· 1241 1225 /* - AMD: */ 1242 1226 #define MSR_IA32_MBA_BW_BASE 0xc0000200 1243 1227 #define MSR_IA32_SMBA_BW_BASE 0xc0000280 1228 + #define MSR_IA32_L3_QOS_ABMC_CFG 0xc00003fd 1229 + #define MSR_IA32_L3_QOS_EXT_CFG 0xc00003ff 1244 1230 #define MSR_IA32_EVT_CFG_BASE 0xc0000400 1245 1231 1246 1232 /* AMD-V MSRs */
+34
tools/arch/x86/include/uapi/asm/kvm.h
··· 35 35 #define MC_VECTOR 18 36 36 #define XM_VECTOR 19 37 37 #define VE_VECTOR 20 38 + #define CP_VECTOR 21 39 + 40 + #define HV_VECTOR 28 41 + #define VC_VECTOR 29 42 + #define SX_VECTOR 30 38 43 39 44 /* Select x86 specific features in <linux/kvm.h> */ 40 45 #define __KVM_HAVE_PIT ··· 415 410 struct kvm_xcr xcrs[KVM_MAX_XCRS]; 416 411 __u64 padding[16]; 417 412 }; 413 + 414 + #define KVM_X86_REG_TYPE_MSR 2 415 + #define KVM_X86_REG_TYPE_KVM 3 416 + 417 + #define KVM_X86_KVM_REG_SIZE(reg) \ 418 + ({ \ 419 + reg == KVM_REG_GUEST_SSP ? KVM_REG_SIZE_U64 : 0; \ 420 + }) 421 + 422 + #define KVM_X86_REG_TYPE_SIZE(type, reg) \ 423 + ({ \ 424 + __u64 type_size = (__u64)type << 32; \ 425 + \ 426 + type_size |= type == KVM_X86_REG_TYPE_MSR ? KVM_REG_SIZE_U64 : \ 427 + type == KVM_X86_REG_TYPE_KVM ? KVM_X86_KVM_REG_SIZE(reg) : \ 428 + 0; \ 429 + type_size; \ 430 + }) 431 + 432 + #define KVM_X86_REG_ID(type, index) \ 433 + (KVM_REG_X86 | KVM_X86_REG_TYPE_SIZE(type, index) | index) 434 + 435 + #define KVM_X86_REG_MSR(index) \ 436 + KVM_X86_REG_ID(KVM_X86_REG_TYPE_MSR, index) 437 + #define KVM_X86_REG_KVM(index) \ 438 + KVM_X86_REG_ID(KVM_X86_REG_TYPE_KVM, index) 439 + 440 + /* KVM-defined registers starting from 0 */ 441 + #define KVM_REG_GUEST_SSP 0 418 442 419 443 #define KVM_SYNC_X86_REGS (1UL << 0) 420 444 #define KVM_SYNC_X86_SREGS (1UL << 1)
+4
tools/arch/x86/include/uapi/asm/svm.h
··· 118 118 #define SVM_VMGEXIT_AP_CREATE 1 119 119 #define SVM_VMGEXIT_AP_DESTROY 2 120 120 #define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018 121 + #define SVM_VMGEXIT_SAVIC 0x8000001a 122 + #define SVM_VMGEXIT_SAVIC_REGISTER_GPA 0 123 + #define SVM_VMGEXIT_SAVIC_UNREGISTER_GPA 1 124 + #define SVM_VMGEXIT_SAVIC_SELF_GPA ~0ULL 121 125 #define SVM_VMGEXIT_HV_FEATURES 0x8000fffd 122 126 #define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe 123 127 #define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
+5 -1
tools/arch/x86/include/uapi/asm/vmx.h
··· 94 94 #define EXIT_REASON_BUS_LOCK 74 95 95 #define EXIT_REASON_NOTIFY 75 96 96 #define EXIT_REASON_TDCALL 77 97 + #define EXIT_REASON_MSR_READ_IMM 84 98 + #define EXIT_REASON_MSR_WRITE_IMM 85 97 99 98 100 #define VMX_EXIT_REASONS \ 99 101 { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ ··· 160 158 { EXIT_REASON_TPAUSE, "TPAUSE" }, \ 161 159 { EXIT_REASON_BUS_LOCK, "BUS_LOCK" }, \ 162 160 { EXIT_REASON_NOTIFY, "NOTIFY" }, \ 163 - { EXIT_REASON_TDCALL, "TDCALL" } 161 + { EXIT_REASON_TDCALL, "TDCALL" }, \ 162 + { EXIT_REASON_MSR_READ_IMM, "MSR_READ_IMM" }, \ 163 + { EXIT_REASON_MSR_WRITE_IMM, "MSR_WRITE_IMM" } 164 164 165 165 #define VMX_EXIT_REASON_FLAGS \ 166 166 { VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
+1 -1
tools/include/asm-generic/bitops/__fls.h
··· 10 10 * 11 11 * Undefined if no set bit exists, so code should check against 0 first. 12 12 */ 13 - static __always_inline unsigned int generic___fls(unsigned long word) 13 + static __always_inline __attribute_const__ unsigned int generic___fls(unsigned long word) 14 14 { 15 15 unsigned int num = BITS_PER_LONG - 1; 16 16
+1 -1
tools/include/asm-generic/bitops/fls.h
··· 10 10 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. 11 11 */ 12 12 13 - static __always_inline int generic_fls(unsigned int x) 13 + static __always_inline __attribute_const__ int generic_fls(unsigned int x) 14 14 { 15 15 int r = 32; 16 16
+2 -2
tools/include/asm-generic/bitops/fls64.h
··· 16 16 * at position 64. 17 17 */ 18 18 #if BITS_PER_LONG == 32 19 - static __always_inline int fls64(__u64 x) 19 + static __always_inline __attribute_const__ int fls64(__u64 x) 20 20 { 21 21 __u32 h = x >> 32; 22 22 if (h) ··· 24 24 return fls(x); 25 25 } 26 26 #elif BITS_PER_LONG == 64 27 - static __always_inline int fls64(__u64 x) 27 + static __always_inline __attribute_const__ int fls64(__u64 x) 28 28 { 29 29 if (x == 0) 30 30 return 0;
+51 -12
tools/include/uapi/drm/drm.h
··· 597 597 int drm_dd_minor; 598 598 }; 599 599 600 - /* DRM_IOCTL_GEM_CLOSE ioctl argument type */ 600 + /** 601 + * struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl. 602 + * @handle: Handle of the object to be closed. 603 + * @pad: Padding. 604 + * 605 + * Releases the handle to an mm object. 606 + */ 601 607 struct drm_gem_close { 602 - /** Handle of the object to be closed. */ 603 608 __u32 handle; 604 609 __u32 pad; 605 610 }; 606 611 607 - /* DRM_IOCTL_GEM_FLINK ioctl argument type */ 612 + /** 613 + * struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl. 614 + * @handle: Handle for the object being named. 615 + * @name: Returned global name. 616 + * 617 + * Create a global name for an object, returning the name. 618 + * 619 + * Note that the name does not hold a reference; when the object 620 + * is freed, the name goes away. 621 + */ 608 622 struct drm_gem_flink { 609 - /** Handle for the object being named */ 610 623 __u32 handle; 611 - 612 - /** Returned global name */ 613 624 __u32 name; 614 625 }; 615 626 616 - /* DRM_IOCTL_GEM_OPEN ioctl argument type */ 627 + /** 628 + * struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl. 629 + * @name: Name of object being opened. 630 + * @handle: Returned handle for the object. 631 + * @size: Returned size of the object 632 + * 633 + * Open an object using the global name, returning a handle and the size. 634 + * 635 + * This handle (of course) holds a reference to the object, so the object 636 + * will not go away until the handle is deleted. 637 + */ 617 638 struct drm_gem_open { 618 - /** Name of object being opened */ 619 639 __u32 name; 620 - 621 - /** Returned handle for the object */ 622 640 __u32 handle; 623 - 624 - /** Returned size of the object */ 625 641 __u64 size; 642 + }; 643 + 644 + /** 645 + * struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl. 646 + * @handle: The handle of a gem object. 647 + * @new_handle: An available gem handle. 648 + * 649 + * This ioctl changes the handle of a GEM object to the specified one. 650 + * The new handle must be unused. On success the old handle is closed 651 + * and all further IOCTL should refer to the new handle only. 652 + * Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle. 653 + */ 654 + struct drm_gem_change_handle { 655 + __u32 handle; 656 + __u32 new_handle; 626 657 }; 627 658 628 659 /** ··· 1339 1308 * The call will fail if the name contains whitespaces or non-printable chars. 1340 1309 */ 1341 1310 #define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name) 1311 + 1312 + /** 1313 + * DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle 1314 + * 1315 + * Some applications (notably CRIU) need objects to have specific gem handles. 1316 + * This ioctl changes the object at one gem handle to use a new gem handle. 1317 + */ 1318 + #define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle) 1342 1319 1343 1320 /* 1344 1321 * Device specific ioctls should only be in their respective headers
+3
tools/include/uapi/linux/kvm.h
··· 962 962 #define KVM_CAP_ARM_EL2_E2H0 241 963 963 #define KVM_CAP_RISCV_MP_STATE_RESET 242 964 964 #define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243 965 + #define KVM_CAP_GUEST_MEMFD_FLAGS 244 965 966 966 967 struct kvm_irq_routing_irqchip { 967 968 __u32 irqchip; ··· 1599 1598 #define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3) 1600 1599 1601 1600 #define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) 1601 + #define GUEST_MEMFD_FLAG_MMAP (1ULL << 0) 1602 + #define GUEST_MEMFD_FLAG_INIT_SHARED (1ULL << 1) 1602 1603 1603 1604 struct kvm_create_guest_memfd { 1604 1605 __u64 size;
+1
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
··· 345 345 333 common io_pgetevents sys_io_pgetevents 346 346 334 common rseq sys_rseq 347 347 335 common uretprobe sys_uretprobe 348 + 336 common uprobe sys_uprobe 348 349 # don't use numbers 387 through 423, add new calls after the last 349 350 # 'common' entry 350 351 424 common pidfd_send_signal sys_pidfd_send_signal
+1
tools/perf/trace/beauty/include/uapi/linux/fcntl.h
··· 111 111 #define PIDFD_SELF_THREAD_GROUP -10001 /* Current thread group leader. */ 112 112 113 113 #define FD_PIDFS_ROOT -10002 /* Root of the pidfs filesystem */ 114 + #define FD_NSFS_ROOT -10003 /* Root of the nsfs filesystem */ 114 115 #define FD_INVALID -10009 /* Invalid file descriptor: -10000 - EBADF = -10009 */ 115 116 116 117 /* Generic flags for the *at(2) family of syscalls. */
+4 -1
tools/perf/trace/beauty/include/uapi/linux/fs.h
··· 430 430 /* buffered IO that drops the cache after reading or writing data */ 431 431 #define RWF_DONTCACHE ((__force __kernel_rwf_t)0x00000080) 432 432 433 + /* prevent pipe and socket writes from raising SIGPIPE */ 434 + #define RWF_NOSIGNAL ((__force __kernel_rwf_t)0x00000100) 435 + 433 436 /* mask of flags supported by the kernel */ 434 437 #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ 435 438 RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC |\ 436 - RWF_DONTCACHE) 439 + RWF_DONTCACHE | RWF_NOSIGNAL) 437 440 438 441 #define PROCFS_IOCTL_MAGIC 'f' 439 442
+10
tools/perf/trace/beauty/include/uapi/linux/prctl.h
··· 177 177 178 178 #define PR_GET_TID_ADDRESS 40 179 179 180 + /* 181 + * Flags for PR_SET_THP_DISABLE are only applicable when disabling. Bit 0 182 + * is reserved, so PR_GET_THP_DISABLE can return "1 | flags", to effectively 183 + * return "1" when no flags were specified for PR_SET_THP_DISABLE. 184 + */ 180 185 #define PR_SET_THP_DISABLE 41 186 + /* 187 + * Don't disable THPs when explicitly advised (e.g., MADV_HUGEPAGE / 188 + * VM_HUGEPAGE, MADV_COLLAPSE). 189 + */ 190 + # define PR_THP_DISABLE_EXCEPT_ADVISED (1 << 1) 181 191 #define PR_GET_THP_DISABLE 42 182 192 183 193 /*
+5 -1
tools/perf/util/symbol.c
··· 112 112 // 'N' first seen in: 113 113 // ffffffff9b35d130 N __pfx__RNCINvNtNtNtCsbDUBuN8AbD4_4core4iter8adapters3map12map_try_foldjNtCs6vVzKs5jPr6_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_ 114 114 // a seemingly Rust mangled name 115 + // Ditto for '1': 116 + // root@x1:~# grep ' 1 ' /proc/kallsyms 117 + // ffffffffb098bc00 1 __pfx__RNCINvNtNtNtCsfwaGRd4cjqE_4core4iter8adapters3map12map_try_foldjNtCskFudTml27HW_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_ 118 + // ffffffffb098bc10 1 _RNCINvNtNtNtCsfwaGRd4cjqE_4core4iter8adapters3map12map_try_foldjNtCskFudTml27HW_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_ 115 119 char symbol_type = toupper(__symbol_type); 116 120 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B' || 117 - __symbol_type == 'u' || __symbol_type == 'l' || __symbol_type == 'N'; 121 + __symbol_type == 'u' || __symbol_type == 'l' || __symbol_type == 'N' || __symbol_type == '1'; 118 122 } 119 123 120 124 static int prefix_underscores_count(const char *str)
+4
tools/testing/selftests/drivers/net/netdevsim/Makefile
··· 20 20 udp_tunnel_nic.sh \ 21 21 # end of TEST_PROGS 22 22 23 + TEST_FILES := \ 24 + ethtool-common.sh 25 + # end of TEST_FILES 26 + 23 27 include ../../../lib.mk
+2
tools/testing/selftests/iommu/iommufd.c
··· 2638 2638 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 2639 2639 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 2640 2640 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size); 2641 + /* Unmap of empty is success */ 2642 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 2641 2643 2642 2644 /* UNMAP_FLAG_ALL requires 0 iova/size */ 2643 2645 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
+2 -2
tools/testing/selftests/iommu/iommufd_utils.h
··· 1044 1044 }; 1045 1045 1046 1046 while (nvevents--) { 1047 - if (!ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT), 1048 - &trigger_vevent_cmd)) 1047 + if (ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT), 1048 + &trigger_vevent_cmd)) 1049 1049 return -1; 1050 1050 } 1051 1051 return 0;
+10 -2
tools/testing/selftests/net/gro.c
··· 754 754 static char exthdr_pck[sizeof(buf) + MIN_EXTHDR_SIZE]; 755 755 756 756 create_packet(buf, 0, 0, PAYLOAD_LEN, 0); 757 - add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_HOPOPTS, ext_data1); 757 + add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data1); 758 758 write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr); 759 759 760 760 create_packet(buf, PAYLOAD_LEN * 1, 0, PAYLOAD_LEN, 0); 761 - add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_HOPOPTS, ext_data2); 761 + add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data2); 762 762 write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr); 763 763 } 764 764 ··· 989 989 990 990 static void gro_sender(void) 991 991 { 992 + const int fin_delay_us = 100 * 1000; 992 993 static char fin_pkt[MAX_HDR_LEN]; 993 994 struct sockaddr_ll daddr = {}; 994 995 int txfd = -1; ··· 1033 1032 write_packet(txfd, fin_pkt, total_hdr_len, &daddr); 1034 1033 } else if (strcmp(testname, "tcp") == 0) { 1035 1034 send_changed_checksum(txfd, &daddr); 1035 + /* Adding sleep before sending FIN so that it is not 1036 + * received prior to other packets. 1037 + */ 1038 + usleep(fin_delay_us); 1036 1039 write_packet(txfd, fin_pkt, total_hdr_len, &daddr); 1037 1040 1038 1041 send_changed_seq(txfd, &daddr); 1042 + usleep(fin_delay_us); 1039 1043 write_packet(txfd, fin_pkt, total_hdr_len, &daddr); 1040 1044 1041 1045 send_changed_ts(txfd, &daddr); 1046 + usleep(fin_delay_us); 1042 1047 write_packet(txfd, fin_pkt, total_hdr_len, &daddr); 1043 1048 1044 1049 send_diff_opt(txfd, &daddr); 1050 + usleep(fin_delay_us); 1045 1051 write_packet(txfd, fin_pkt, total_hdr_len, &daddr); 1046 1052 } else if (strcmp(testname, "ip") == 0) { 1047 1053 send_changed_ECN(txfd, &daddr);
+4 -4
tools/testing/selftests/vsock/vmtest.sh
··· 389 389 local rc 390 390 391 391 host_oops_cnt_before=$(dmesg | grep -c -i 'Oops') 392 - host_warn_cnt_before=$(dmesg --level=warn | wc -l) 392 + host_warn_cnt_before=$(dmesg --level=warn | grep -c -i 'vsock') 393 393 vm_oops_cnt_before=$(vm_ssh -- dmesg | grep -c -i 'Oops') 394 - vm_warn_cnt_before=$(vm_ssh -- dmesg --level=warn | wc -l) 394 + vm_warn_cnt_before=$(vm_ssh -- dmesg --level=warn | grep -c -i 'vsock') 395 395 396 396 name=$(echo "${1}" | awk '{ print $1 }') 397 397 eval test_"${name}" ··· 403 403 rc=$KSFT_FAIL 404 404 fi 405 405 406 - host_warn_cnt_after=$(dmesg --level=warn | wc -l) 406 + host_warn_cnt_after=$(dmesg --level=warn | grep -c -i 'vsock') 407 407 if [[ ${host_warn_cnt_after} -gt ${host_warn_cnt_before} ]]; then 408 408 echo "FAIL: kernel warning detected on host" | log_host "${name}" 409 409 rc=$KSFT_FAIL ··· 415 415 rc=$KSFT_FAIL 416 416 fi 417 417 418 - vm_warn_cnt_after=$(vm_ssh -- dmesg --level=warn | wc -l) 418 + vm_warn_cnt_after=$(vm_ssh -- dmesg --level=warn | grep -c -i 'vsock') 419 419 if [[ ${vm_warn_cnt_after} -gt ${vm_warn_cnt_before} ]]; then 420 420 echo "FAIL: kernel warning detected on vm" | log_host "${name}" 421 421 rc=$KSFT_FAIL
+1 -1
tools/tracing/latency/latency-collector.c
··· 1725 1725 "-n, --notrace\t\tIf latency is detected, do not print out the content of\n" 1726 1726 "\t\t\tthe trace file to standard output\n\n" 1727 1727 1728 - "-t, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n" 1728 + "-e, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n" 1729 1729 1730 1730 "-r, --random\t\tArbitrarily sleep a certain amount of time, default\n" 1731 1731 "\t\t\t%ld ms, before reading the trace file. The\n"