Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

tools/testing/selftests/net/mptcp/mptcp_join.sh
34aa6e3bccd8 ("selftests: mptcp: add ip mptcp wrappers")

857898eb4b28 ("selftests: mptcp: add missing join check")
6ef84b1517e0 ("selftests: mptcp: more robust signal race test")
https://lore.kernel.org/all/20220221131842.468893-1-broonie@kernel.org/

drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
fb7e76ea3f3b6 ("net/mlx5e: TC, Skip redundant ct clear actions")
c63741b426e11 ("net/mlx5e: Fix MPLSoUDP encap to use MPLS action information")

09bf97923224f ("net/mlx5e: TC, Move pedit_headers_action to parse_attr")
84ba8062e383 ("net/mlx5e: Test CT and SAMPLE on flow attr")
efe6f961cd2e ("net/mlx5e: CT, Don't set flow flag CT for ct clear flow")
3b49a7edec1d ("net/mlx5e: TC, Reject rules with multiple CT actions")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1756 -925
+1
Documentation/ABI/testing/sysfs-class-power
··· 468 468 auto: Charge normally, respect thresholds 469 469 inhibit-charge: Do not charge while AC is attached 470 470 force-discharge: Force discharge while AC is attached 471 + ================ ==================================== 471 472 472 473 What: /sys/class/power_supply/<supply_name>/technology 473 474 Date: May 2007
-1
Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
··· 7 7 title: SiFive GPIO controller 8 8 9 9 maintainers: 10 - - Yash Shah <yash.shah@sifive.com> 11 10 - Paul Walmsley <paul.walmsley@sifive.com> 12 11 13 12 properties:
+1 -1
Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
··· 20 20 21 21 maintainers: 22 22 - Kishon Vijay Abraham I <kishon@ti.com> 23 - - Roger Quadros <rogerq@ti.com 23 + - Roger Quadros <rogerq@kernel.org> 24 24 25 25 properties: 26 26 compatible:
+1 -1
Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
··· 8 8 9 9 maintainers: 10 10 - Kishon Vijay Abraham I <kishon@ti.com> 11 - - Roger Quadros <rogerq@ti.com> 11 + - Roger Quadros <rogerq@kernel.org> 12 12 13 13 properties: 14 14 compatible:
-1
Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
··· 8 8 title: SiFive PWM controller 9 9 10 10 maintainers: 11 - - Yash Shah <yash.shah@sifive.com> 12 11 - Sagar Kadam <sagar.kadam@sifive.com> 13 12 - Paul Walmsley <paul.walmsley@sifive.com> 14 13
-1
Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
··· 9 9 10 10 maintainers: 11 11 - Sagar Kadam <sagar.kadam@sifive.com> 12 - - Yash Shah <yash.shah@sifive.com> 13 12 - Paul Walmsley <paul.walmsley@sifive.com> 14 13 15 14 description:
+1
Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
··· 8 8 9 9 maintainers: 10 10 - Cheng-Yi Chiang <cychiang@chromium.org> 11 + - Tzung-Bi Shih <tzungbi@google.com> 11 12 12 13 description: | 13 14 Google's ChromeOS EC codec is a digital mic codec provided by the
+1 -1
Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
··· 7 7 title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller 8 8 9 9 maintainers: 10 - - Roger Quadros <rogerq@ti.com> 10 + - Roger Quadros <rogerq@kernel.org> 11 11 12 12 properties: 13 13 compatible:
+1 -1
Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
··· 7 7 title: TI Keystone Soc USB Controller 8 8 9 9 maintainers: 10 - - Roger Quadros <rogerq@ti.com> 10 + - Roger Quadros <rogerq@kernel.org> 11 11 12 12 properties: 13 13 compatible:
+13 -16
MAINTAINERS
··· 4549 4549 4550 4550 CHROMEOS EC CODEC DRIVER 4551 4551 M: Cheng-Yi Chiang <cychiang@chromium.org> 4552 + M: Tzung-Bi Shih <tzungbi@google.com> 4552 4553 R: Guenter Roeck <groeck@chromium.org> 4553 4554 S: Maintained 4554 4555 F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml ··· 7013 7012 S: Maintained 7014 7013 F: drivers/edac/sb_edac.c 7015 7014 7016 - EDAC-SIFIVE 7017 - M: Yash Shah <yash.shah@sifive.com> 7018 - L: linux-edac@vger.kernel.org 7019 - S: Supported 7020 - F: drivers/edac/sifive_edac.c 7021 - 7022 7015 EDAC-SKYLAKE 7023 7016 M: Tony Luck <tony.luck@intel.com> 7024 7017 L: linux-edac@vger.kernel.org ··· 9258 9263 S: Maintained 9259 9264 W: https://github.com/o2genum/ideapad-slidebar 9260 9265 F: drivers/input/misc/ideapad_slidebar.c 9266 + 9267 + IDMAPPED MOUNTS 9268 + M: Christian Brauner <brauner@kernel.org> 9269 + L: linux-fsdevel@vger.kernel.org 9270 + S: Maintained 9271 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git 9272 + F: Documentation/filesystems/idmappings.rst 9273 + F: tools/testing/selftests/mount_setattr/ 9274 + F: include/linux/mnt_idmapping.h 9261 9275 9262 9276 IDT VersaClock 5 CLOCK DRIVER 9263 9277 M: Luca Ceresoli <luca@lucaceresoli.net> ··· 16004 16000 F: drivers/misc/fastrpc.c 16005 16001 F: include/uapi/misc/fastrpc.h 16006 16002 16007 - QUALCOMM GENERIC INTERFACE I2C DRIVER 16008 - M: Akash Asthana <akashast@codeaurora.org> 16009 - M: Mukesh Savaliya <msavaliy@codeaurora.org> 16010 - L: linux-i2c@vger.kernel.org 16011 - L: linux-arm-msm@vger.kernel.org 16012 - S: Supported 16013 - F: drivers/i2c/busses/i2c-qcom-geni.c 16014 - 16015 16003 QUALCOMM HEXAGON ARCHITECTURE 16016 16004 M: Brian Cain <bcain@codeaurora.org> 16017 16005 L: linux-hexagon@vger.kernel.org ··· 16075 16079 F: drivers/mtd/nand/raw/qcom_nandc.c 16076 16080 16077 16081 QUALCOMM RMNET DRIVER 16078 - M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org> 16079 - M: Sean Tranchetti <stranche@codeaurora.org> 16082 + M: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com> 16083 + M: Sean Tranchetti <quic_stranche@quicinc.com> 16080 16084 L: netdev@vger.kernel.org 16081 16085 S: Maintained 16082 16086 F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst ··· 16368 16372 16369 16373 REALTEK RTL83xx SMI DSA ROUTER CHIPS 16370 16374 M: Linus Walleij <linus.walleij@linaro.org> 16375 + M: Alvin Šipraga <alsi@bang-olufsen.dk> 16371 16376 S: Maintained 16372 16377 F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt 16373 16378 F: drivers/net/dsa/realtek/*
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 17 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Superb Owl 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm64/include/asm/el2_setup.h
··· 106 106 msr_s SYS_ICC_SRE_EL2, x0 107 107 isb // Make sure SRE is now set 108 108 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, 109 - tbz x0, #0, 1f // and check that it sticks 109 + tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks 110 110 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 111 111 .Lskip_gicv3_\@: 112 112 .endm
+7 -7
arch/parisc/kernel/unaligned.c
··· 340 340 : "r" (val), "r" (regs->ior), "r" (regs->isr) 341 341 : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); 342 342 343 - return 0; 343 + return ret; 344 344 } 345 345 static int emulate_std(struct pt_regs *regs, int frreg, int flop) 346 346 { ··· 397 397 __asm__ __volatile__ ( 398 398 " mtsp %4, %%sr1\n" 399 399 " zdep %2, 29, 2, %%r19\n" 400 - " dep %%r0, 31, 2, %2\n" 400 + " dep %%r0, 31, 2, %3\n" 401 401 " mtsar %%r19\n" 402 402 " zvdepi -2, 32, %%r19\n" 403 403 "1: ldw 0(%%sr1,%3),%%r20\n" ··· 409 409 " andcm %%r21, %%r19, %%r21\n" 410 410 " or %1, %%r20, %1\n" 411 411 " or %2, %%r21, %2\n" 412 - "3: stw %1,0(%%sr1,%1)\n" 412 + "3: stw %1,0(%%sr1,%3)\n" 413 413 "4: stw %%r1,4(%%sr1,%3)\n" 414 414 "5: stw %2,8(%%sr1,%3)\n" 415 415 " copy %%r0, %0\n" ··· 596 596 ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ 597 597 break; 598 598 } 599 - #ifdef CONFIG_PA20 600 599 switch (regs->iir & OPCODE2_MASK) 601 600 { 602 601 case OPCODE_FLDD_L: ··· 606 607 flop=1; 607 608 ret = emulate_std(regs, R2(regs->iir),1); 608 609 break; 610 + #ifdef CONFIG_PA20 609 611 case OPCODE_LDD_L: 610 612 ret = emulate_ldd(regs, R2(regs->iir),0); 611 613 break; 612 614 case OPCODE_STD_L: 613 615 ret = emulate_std(regs, R2(regs->iir),0); 614 616 break; 615 - } 616 617 #endif 618 + } 617 619 switch (regs->iir & OPCODE3_MASK) 618 620 { 619 621 case OPCODE_FLDW_L: 620 622 flop=1; 621 - ret = emulate_ldw(regs, R2(regs->iir),0); 623 + ret = emulate_ldw(regs, R2(regs->iir), 1); 622 624 break; 623 625 case OPCODE_LDW_M: 624 - ret = emulate_ldw(regs, R2(regs->iir),1); 626 + ret = emulate_ldw(regs, R2(regs->iir), 0); 625 627 break; 626 628 627 629 case OPCODE_FSTW_L:
+2 -2
arch/powerpc/kernel/head_book3s_32.S
··· 421 421 */ 422 422 /* Get PTE (linux-style) and check access */ 423 423 mfspr r3,SPRN_IMISS 424 - #ifdef CONFIG_MODULES 424 + #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) 425 425 lis r1, TASK_SIZE@h /* check if kernel address */ 426 426 cmplw 0,r1,r3 427 427 #endif 428 428 mfspr r2, SPRN_SDR1 429 429 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER 430 430 rlwinm r2, r2, 28, 0xfffff000 431 - #ifdef CONFIG_MODULES 431 + #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) 432 432 bgt- 112f 433 433 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ 434 434 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+2
arch/powerpc/lib/sstep.c
··· 3264 3264 case BARRIER_EIEIO: 3265 3265 eieio(); 3266 3266 break; 3267 + #ifdef CONFIG_PPC64 3267 3268 case BARRIER_LWSYNC: 3268 3269 asm volatile("lwsync" : : : "memory"); 3269 3270 break; 3270 3271 case BARRIER_PTESYNC: 3271 3272 asm volatile("ptesync" : : : "memory"); 3272 3273 break; 3274 + #endif 3273 3275 } 3274 3276 break; 3275 3277
+47 -25
arch/riscv/kernel/sbi.c
··· 5 5 * Copyright (c) 2020 Western Digital Corporation or its affiliates. 6 6 */ 7 7 8 + #include <linux/bits.h> 8 9 #include <linux/init.h> 9 10 #include <linux/pm.h> 10 11 #include <linux/reboot.h> ··· 86 85 pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n"); 87 86 break; 88 87 } 89 - hmask |= 1 << hartid; 88 + hmask |= BIT(hartid); 90 89 } 91 90 92 91 return hmask; ··· 161 160 { 162 161 unsigned long hart_mask; 163 162 164 - if (!cpu_mask) 163 + if (!cpu_mask || cpumask_empty(cpu_mask)) 165 164 cpu_mask = cpu_online_mask; 166 165 hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask); 167 166 ··· 177 176 int result = 0; 178 177 unsigned long hart_mask; 179 178 180 - if (!cpu_mask) 179 + if (!cpu_mask || cpumask_empty(cpu_mask)) 181 180 cpu_mask = cpu_online_mask; 182 181 hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask); 183 182 ··· 250 249 251 250 static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask) 252 251 { 253 - unsigned long hartid, cpuid, hmask = 0, hbase = 0; 252 + unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0; 254 253 struct sbiret ret = {0}; 255 254 int result; 256 255 257 - if (!cpu_mask) 256 + if (!cpu_mask || cpumask_empty(cpu_mask)) 258 257 cpu_mask = cpu_online_mask; 259 258 260 259 for_each_cpu(cpuid, cpu_mask) { 261 260 hartid = cpuid_to_hartid_map(cpuid); 262 - if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) { 263 - ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI, 264 - hmask, hbase, 0, 0, 0, 0); 265 - if (ret.error) 266 - goto ecall_failed; 267 - hmask = 0; 268 - hbase = 0; 261 + if (hmask) { 262 + if (hartid + BITS_PER_LONG <= htop || 263 + hbase + BITS_PER_LONG <= hartid) { 264 + ret = sbi_ecall(SBI_EXT_IPI, 265 + SBI_EXT_IPI_SEND_IPI, hmask, 266 + hbase, 0, 0, 0, 0); 267 + if (ret.error) 268 + goto ecall_failed; 269 + hmask = 0; 270 + } else if (hartid < hbase) { 271 + /* shift the mask to fit lower hartid */ 272 + hmask <<= hbase - hartid; 273 + hbase = hartid; 274 + } 269 275 } 270 - if (!hmask) 276 + if (!hmask) { 271 277 hbase = hartid; 272 - hmask |= 1UL << (hartid - hbase); 278 + htop = hartid; 279 + } else if (hartid > htop) { 280 + htop = hartid; 281 + } 282 + hmask |= BIT(hartid - hbase); 273 283 } 274 284 275 285 if (hmask) { ··· 356 344 unsigned long start, unsigned long size, 357 345 unsigned long arg4, unsigned long arg5) 358 346 { 359 - unsigned long hartid, cpuid, hmask = 0, hbase = 0; 347 + unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0; 360 348 int result; 361 349 362 - if (!cpu_mask) 350 + if (!cpu_mask || cpumask_empty(cpu_mask)) 363 351 cpu_mask = cpu_online_mask; 364 352 365 353 for_each_cpu(cpuid, cpu_mask) { 366 354 hartid = cpuid_to_hartid_map(cpuid); 367 - if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) { 368 - result = __sbi_rfence_v02_call(fid, hmask, hbase, 369 - start, size, arg4, arg5); 370 - if (result) 371 - return result; 372 - hmask = 0; 373 - hbase = 0; 355 + if (hmask) { 356 + if (hartid + BITS_PER_LONG <= htop || 357 + hbase + BITS_PER_LONG <= hartid) { 358 + result = __sbi_rfence_v02_call(fid, hmask, 359 + hbase, start, size, arg4, arg5); 360 + if (result) 361 + return result; 362 + hmask = 0; 363 + } else if (hartid < hbase) { 364 + /* shift the mask to fit lower hartid */ 365 + hmask <<= hbase - hartid; 366 + hbase = hartid; 367 + } 374 368 } 375 - if (!hmask) 369 + if (!hmask) { 376 370 hbase = hartid; 377 - hmask |= 1UL << (hartid - hbase); 371 + htop = hartid; 372 + } else if (hartid > htop) { 373 + htop = hartid; 374 + } 375 + hmask |= BIT(hartid - hbase); 378 376 } 379 377 380 378 if (hmask) {
+1 -9
arch/x86/kernel/cpu/sgx/main.c
··· 344 344 { 345 345 struct sgx_epc_page *chunk[SGX_NR_TO_SCAN]; 346 346 struct sgx_backing backing[SGX_NR_TO_SCAN]; 347 - struct sgx_epc_section *section; 348 347 struct sgx_encl_page *encl_page; 349 348 struct sgx_epc_page *epc_page; 350 - struct sgx_numa_node *node; 351 349 pgoff_t page_index; 352 350 int cnt = 0; 353 351 int ret; ··· 416 418 kref_put(&encl_page->encl->refcount, sgx_encl_release); 417 419 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; 418 420 419 - section = &sgx_epc_sections[epc_page->section]; 420 - node = section->node; 421 - 422 - spin_lock(&node->lock); 423 - list_add_tail(&epc_page->list, &node->free_page_list); 424 - spin_unlock(&node->lock); 425 - atomic_long_inc(&sgx_nr_free_pages); 421 + sgx_free_epc_page(epc_page); 426 422 } 427 423 } 428 424
+4 -5
arch/x86/kernel/fpu/regset.c
··· 91 91 const void *kbuf, const void __user *ubuf) 92 92 { 93 93 struct fpu *fpu = &target->thread.fpu; 94 - struct user32_fxsr_struct newstate; 94 + struct fxregs_state newstate; 95 95 int ret; 96 - 97 - BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state)); 98 96 99 97 if (!cpu_feature_enabled(X86_FEATURE_FXSR)) 100 98 return -ENODEV; ··· 114 116 /* Copy the state */ 115 117 memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate)); 116 118 117 - /* Clear xmm8..15 */ 119 + /* Clear xmm8..15 for 32-bit callers */ 118 120 BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16); 119 - memset(&fpu->fpstate->regs.fxsave.xmm_space[8], 0, 8 * 16); 121 + if (in_ia32_syscall()) 122 + memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16); 120 123 121 124 /* Mark FP and SSE as in use when XSAVE is enabled */ 122 125 if (use_xsave())
+2 -2
arch/x86/kernel/ptrace.c
··· 1224 1224 }, 1225 1225 [REGSET_FP] = { 1226 1226 .core_note_type = NT_PRFPREG, 1227 - .n = sizeof(struct user_i387_struct) / sizeof(long), 1227 + .n = sizeof(struct fxregs_state) / sizeof(long), 1228 1228 .size = sizeof(long), .align = sizeof(long), 1229 1229 .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set 1230 1230 }, ··· 1271 1271 }, 1272 1272 [REGSET_XFP] = { 1273 1273 .core_note_type = NT_PRXFPREG, 1274 - .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1274 + .n = sizeof(struct fxregs_state) / sizeof(u32), 1275 1275 .size = sizeof(u32), .align = sizeof(u32), 1276 1276 .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set 1277 1277 },
+2
block/bfq-iosched.c
··· 7018 7018 spin_unlock_irq(&bfqd->lock); 7019 7019 #endif 7020 7020 7021 + wbt_enable_default(bfqd->queue); 7022 + 7021 7023 kfree(bfqd); 7022 7024 } 7023 7025
+2 -8
block/blk-core.c
··· 284 284 wake_up_all(&q->mq_freeze_wq); 285 285 } 286 286 287 - void blk_set_queue_dying(struct request_queue *q) 288 - { 289 - blk_queue_flag_set(QUEUE_FLAG_DYING, q); 290 - blk_queue_start_drain(q); 291 - } 292 - EXPORT_SYMBOL_GPL(blk_set_queue_dying); 293 - 294 287 /** 295 288 * blk_cleanup_queue - shutdown a request queue 296 289 * @q: request queue to shutdown ··· 301 308 WARN_ON_ONCE(blk_queue_registered(q)); 302 309 303 310 /* mark @q DYING, no new request or merges will be allowed afterwards */ 304 - blk_set_queue_dying(q); 311 + blk_queue_flag_set(QUEUE_FLAG_DYING, q); 312 + blk_queue_start_drain(q); 305 313 306 314 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 307 315 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+1 -1
block/blk-map.c
··· 446 446 if (bytes > len) 447 447 bytes = len; 448 448 449 - page = alloc_page(GFP_NOIO | gfp_mask); 449 + page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); 450 450 if (!page) 451 451 goto cleanup; 452 452
+4
block/blk-mq.c
··· 736 736 737 737 /* Completion has already been traced */ 738 738 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 739 + 740 + if (req_op(req) == REQ_OP_ZONE_APPEND) 741 + bio->bi_iter.bi_sector = req->__sector; 742 + 739 743 if (!is_flush) 740 744 bio_endio(bio); 741 745 bio = next;
-2
block/elevator.c
··· 525 525 kobject_del(&e->kobj); 526 526 527 527 e->registered = 0; 528 - /* Re-enable throttling in case elevator disabled it */ 529 - wbt_enable_default(q); 530 528 } 531 529 } 532 530
+2
block/fops.c
··· 289 289 struct kiocb *iocb = dio->iocb; 290 290 ssize_t ret; 291 291 292 + WRITE_ONCE(iocb->private, NULL); 293 + 292 294 if (likely(!bio->bi_status)) { 293 295 ret = dio->size; 294 296 iocb->ki_pos += ret;
+14
block/genhd.c
··· 549 549 EXPORT_SYMBOL(device_add_disk); 550 550 551 551 /** 552 + * blk_mark_disk_dead - mark a disk as dead 553 + * @disk: disk to mark as dead 554 + * 555 + * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O 556 + * to this disk. 557 + */ 558 + void blk_mark_disk_dead(struct gendisk *disk) 559 + { 560 + set_bit(GD_DEAD, &disk->state); 561 + blk_queue_start_drain(disk->queue); 562 + } 563 + EXPORT_SYMBOL_GPL(blk_mark_disk_dead); 564 + 565 + /** 552 566 * del_gendisk - remove the gendisk 553 567 * @disk: the struct gendisk to remove 554 568 *
+5
drivers/acpi/processor_idle.c
··· 96 96 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 97 97 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 98 98 (void *)1}, 99 + /* T40 can not handle C3 idle state */ 100 + { set_max_cstate, "IBM ThinkPad T40", { 101 + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), 102 + DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")}, 103 + (void *)2}, 99 104 {}, 100 105 }; 101 106
+1 -1
drivers/acpi/tables.c
··· 400 400 401 401 acpi_get_table(id, instance, &table_header); 402 402 if (!table_header) { 403 - pr_warn("%4.4s not present\n", id); 403 + pr_debug("%4.4s not present\n", id); 404 404 return -ENODEV; 405 405 } 406 406
+7 -1
drivers/block/loop.c
··· 79 79 #include <linux/ioprio.h> 80 80 #include <linux/blk-cgroup.h> 81 81 #include <linux/sched/mm.h> 82 + #include <linux/statfs.h> 82 83 83 84 #include "loop.h" 84 85 ··· 775 774 granularity = 0; 776 775 777 776 } else { 777 + struct kstatfs sbuf; 778 + 778 779 max_discard_sectors = UINT_MAX >> 9; 779 - granularity = inode->i_sb->s_blocksize; 780 + if (!vfs_statfs(&file->f_path, &sbuf)) 781 + granularity = sbuf.f_bsize; 782 + else 783 + max_discard_sectors = 0; 780 784 } 781 785 782 786 if (max_discard_sectors) {
+1 -1
drivers/block/mtip32xx/mtip32xx.c
··· 4112 4112 "Completion workers still active!\n"); 4113 4113 } 4114 4114 4115 - blk_set_queue_dying(dd->queue); 4115 + blk_mark_disk_dead(dd->disk); 4116 4116 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); 4117 4117 4118 4118 /* Clean up the block layer. */
+1 -1
drivers/block/rbd.c
··· 7185 7185 * IO to complete/fail. 7186 7186 */ 7187 7187 blk_mq_freeze_queue(rbd_dev->disk->queue); 7188 - blk_set_queue_dying(rbd_dev->disk->queue); 7188 + blk_mark_disk_dead(rbd_dev->disk); 7189 7189 } 7190 7190 7191 7191 del_gendisk(rbd_dev->disk);
+1 -1
drivers/block/xen-blkfront.c
··· 2126 2126 2127 2127 /* No more blkif_request(). */ 2128 2128 blk_mq_stop_hw_queues(info->rq); 2129 - blk_set_queue_dying(info->rq); 2129 + blk_mark_disk_dead(info->gd); 2130 2130 set_capacity(info->gd, 0); 2131 2131 2132 2132 for_each_rinfo(info, rinfo, i) {
+3 -1
drivers/dma/at_xdmac.c
··· 1681 1681 __func__, atchan->irq_status); 1682 1682 1683 1683 if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) && 1684 - !(atchan->irq_status & error_mask)) 1684 + !(atchan->irq_status & error_mask)) { 1685 + spin_unlock_irq(&atchan->lock); 1685 1686 return; 1687 + } 1686 1688 1687 1689 if (atchan->irq_status & error_mask) 1688 1690 at_xdmac_handle_error(atchan);
+9 -8
drivers/dma/ptdma/ptdma-dev.c
··· 207 207 if (!cmd_q->qbase) { 208 208 dev_err(dev, "unable to allocate command queue\n"); 209 209 ret = -ENOMEM; 210 - goto e_dma_alloc; 210 + goto e_destroy_pool; 211 211 } 212 212 213 213 cmd_q->qidx = 0; ··· 229 229 230 230 /* Request an irq */ 231 231 ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); 232 - if (ret) 233 - goto e_pool; 232 + if (ret) { 233 + dev_err(dev, "unable to allocate an IRQ\n"); 234 + goto e_free_dma; 235 + } 234 236 235 237 /* Update the device registers with queue information. */ 236 238 cmd_q->qcontrol &= ~CMD_Q_SIZE; ··· 252 250 /* Register the DMA engine support */ 253 251 ret = pt_dmaengine_register(pt); 254 252 if (ret) 255 - goto e_dmaengine; 253 + goto e_free_irq; 256 254 257 255 /* Set up debugfs entries */ 258 256 ptdma_debugfs_setup(pt); 259 257 260 258 return 0; 261 259 262 - e_dmaengine: 260 + e_free_irq: 263 261 free_irq(pt->pt_irq, pt); 264 262 265 - e_dma_alloc: 263 + e_free_dma: 266 264 dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); 267 265 268 - e_pool: 269 - dev_err(dev, "unable to allocate an IRQ\n"); 266 + e_destroy_pool: 270 267 dma_pool_destroy(pt->cmd_q.dma_pool); 271 268 272 269 return ret;
+7 -2
drivers/dma/sh/rcar-dmac.c
··· 1868 1868 1869 1869 dmac->dev = &pdev->dev; 1870 1870 platform_set_drvdata(pdev, dmac); 1871 - dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); 1872 - dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); 1871 + ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); 1872 + if (ret) 1873 + return ret; 1874 + 1875 + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); 1876 + if (ret) 1877 + return ret; 1873 1878 1874 1879 ret = rcar_dmac_parse_of(&pdev->dev, dmac); 1875 1880 if (ret < 0)
+3 -1
drivers/dma/sh/shdma-base.c
··· 115 115 ret = pm_runtime_get(schan->dev); 116 116 117 117 spin_unlock_irq(&schan->chan_lock); 118 - if (ret < 0) 118 + if (ret < 0) { 119 119 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); 120 + pm_runtime_put(schan->dev); 121 + } 120 122 121 123 pm_runtime_barrier(schan->dev); 122 124
+3 -1
drivers/dma/stm32-dmamux.c
··· 292 292 ret = of_dma_router_register(node, stm32_dmamux_route_allocate, 293 293 &stm32_dmamux->dmarouter); 294 294 if (ret) 295 - goto err_clk; 295 + goto pm_disable; 296 296 297 297 return 0; 298 298 299 + pm_disable: 300 + pm_runtime_disable(&pdev->dev); 299 301 err_clk: 300 302 clk_disable_unprepare(stm32_dmamux->clk); 301 303
+1 -1
drivers/edac/edac_mc.c
··· 215 215 else 216 216 return (char *)ptr; 217 217 218 - r = (unsigned long)p % align; 218 + r = (unsigned long)ptr % align; 219 219 220 220 if (r == 0) 221 221 return (char *)ptr;
+8
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 2057 2057 { 2058 2058 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2059 2059 2060 + /* SMU saves SDMA state for us */ 2061 + if (adev->in_s0ix) 2062 + return 0; 2063 + 2060 2064 return sdma_v4_0_hw_fini(adev); 2061 2065 } 2062 2066 2063 2067 static int sdma_v4_0_resume(void *handle) 2064 2068 { 2065 2069 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2070 + 2071 + /* SMU restores SDMA state for us */ 2072 + if (adev->in_s0ix) 2073 + return 0; 2066 2074 2067 2075 return sdma_v4_0_hw_init(adev); 2068 2076 }
+21 -5
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 1238 1238 &dpm_context->dpm_tables.soc_table; 1239 1239 struct smu_umd_pstate_table *pstate_table = 1240 1240 &smu->pstate_table; 1241 + struct amdgpu_device *adev = smu->adev; 1241 1242 1242 1243 pstate_table->gfxclk_pstate.min = gfx_table->min; 1243 1244 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1244 - if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK) 1245 - pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; 1246 1245 1247 1246 pstate_table->uclk_pstate.min = mem_table->min; 1248 1247 pstate_table->uclk_pstate.peak = mem_table->max; 1249 - if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK) 1250 - pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; 1251 1248 1252 1249 pstate_table->socclk_pstate.min = soc_table->min; 1253 1250 pstate_table->socclk_pstate.peak = soc_table->max; 1254 - if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK) 1251 + 1252 + switch (adev->asic_type) { 1253 + case CHIP_SIENNA_CICHLID: 1254 + case CHIP_NAVY_FLOUNDER: 1255 + pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; 1256 + pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; 1255 1257 pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; 1258 + break; 1259 + case CHIP_DIMGREY_CAVEFISH: 1260 + pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK; 1261 + pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK; 1262 + pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK; 1263 + break; 1264 + case CHIP_BEIGE_GOBY: 1265 + pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK; 1266 + pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK; 1267 + pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK; 1268 + break; 1269 + default: 1270 + break; 1271 + } 1256 1272 1257 1273 return 0; 1258 1274 }
+8
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
··· 33 33 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 34 34 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 35 35 36 + #define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950 37 + #define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960 38 + #define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676 39 + 40 + #define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200 41 + #define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960 42 + #define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000 43 + 36 44 extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); 37 45 38 46 #endif
+2 -7
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
··· 282 282 283 283 static int yellow_carp_mode_reset(struct smu_context *smu, int type) 284 284 { 285 - int ret = 0, index = 0; 285 + int ret = 0; 286 286 287 - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 288 - SMU_MSG_GfxDeviceDriverReset); 289 - if (index < 0) 290 - return index == -EACCES ? 0 : index; 291 - 292 - ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); 287 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); 293 288 if (ret) 294 289 dev_err(smu->adev->dev, "Failed to mode reset!\n"); 295 290
+8 -6
drivers/gpu/drm/drm_atomic_uapi.c
··· 76 76 state->mode_blob = NULL; 77 77 78 78 if (mode) { 79 + struct drm_property_blob *blob; 80 + 79 81 drm_mode_convert_to_umode(&umode, mode); 80 - state->mode_blob = 81 - drm_property_create_blob(state->crtc->dev, 82 - sizeof(umode), 83 - &umode); 84 - if (IS_ERR(state->mode_blob)) 85 - return PTR_ERR(state->mode_blob); 82 + blob = drm_property_create_blob(crtc->dev, 83 + sizeof(umode), &umode); 84 + if (IS_ERR(blob)) 85 + return PTR_ERR(blob); 86 86 87 87 drm_mode_copy(&state->mode, mode); 88 + 89 + state->mode_blob = blob; 88 90 state->enable = true; 89 91 drm_dbg_atomic(crtc->dev, 90 92 "Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+1
drivers/gpu/drm/drm_gem_cma_helper.c
··· 512 512 */ 513 513 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 514 514 vma->vm_flags &= ~VM_PFNMAP; 515 + vma->vm_flags |= VM_DONTEXPAND; 515 516 516 517 if (cma_obj->map_noncoherent) { 517 518 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+1
drivers/gpu/drm/i915/Kconfig
··· 101 101 config DRM_I915_GVT 102 102 bool "Enable Intel GVT-g graphics virtualization host support" 103 103 depends on DRM_I915 104 + depends on X86 104 105 depends on 64BIT 105 106 default n 106 107 help
+2 -1
drivers/gpu/drm/i915/display/intel_fbc.c
··· 1115 1115 1116 1116 /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ 1117 1117 if (DISPLAY_VER(i915) >= 11 && 1118 - (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { 1118 + (plane_state->view.color_plane[0].y + 1119 + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { 1119 1120 plane_state->no_fbc_reason = "plane end Y offset misaligned"; 1120 1121 return false; 1121 1122 }
+15
drivers/gpu/drm/i915/display/intel_opregion.c
··· 360 360 port++; 361 361 } 362 362 363 + /* 364 + * The port numbering and mapping here is bizarre. The now-obsolete 365 + * swsci spec supports ports numbered [0..4]. Port E is handled as a 366 + * special case, but port F and beyond are not. The functionality is 367 + * supposed to be obsolete for new platforms. Just bail out if the port 368 + * number is out of bounds after mapping. 369 + */ 370 + if (port > 4) { 371 + drm_dbg_kms(&dev_priv->drm, 372 + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", 373 + intel_encoder->base.base.id, intel_encoder->base.name, 374 + port_name(intel_encoder->port), port); 375 + return -EINVAL; 376 + } 377 + 363 378 if (!enable) 364 379 parm |= 4 << 8; 365 380
+2 -4
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 842 842 } else if (obj->mm.madv != I915_MADV_WILLNEED) { 843 843 bo->priority = I915_TTM_PRIO_PURGE; 844 844 } else if (!i915_gem_object_has_pages(obj)) { 845 - if (bo->priority < I915_TTM_PRIO_HAS_PAGES) 846 - bo->priority = I915_TTM_PRIO_HAS_PAGES; 845 + bo->priority = I915_TTM_PRIO_NO_PAGES; 847 846 } else { 848 - if (bo->priority > I915_TTM_PRIO_NO_PAGES) 849 - bo->priority = I915_TTM_PRIO_NO_PAGES; 847 + bo->priority = I915_TTM_PRIO_HAS_PAGES; 850 848 } 851 849 852 850 ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+2 -2
drivers/gpu/drm/i915/gvt/gtt.c
··· 1148 1148 ops->set_pfn(se, s->shadow_page.mfn); 1149 1149 } 1150 1150 1151 - /** 1151 + /* 1152 1152 * Check if can do 2M page 1153 1153 * @vgpu: target vgpu 1154 1154 * @entry: target pfn's gtt entry ··· 2193 2193 } 2194 2194 2195 2195 /** 2196 - * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read 2196 + * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read 2197 2197 * @vgpu: a vGPU 2198 2198 * @off: register offset 2199 2199 * @p_data: data will be returned to guest
+2 -2
drivers/gpu/drm/i915/intel_pm.c
··· 4853 4853 { 4854 4854 int i; 4855 4855 4856 - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { 4856 + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { 4857 4857 if (dbuf_slices[i].active_pipes == active_pipes) 4858 4858 return dbuf_slices[i].join_mbus; 4859 4859 } ··· 4870 4870 { 4871 4871 int i; 4872 4872 4873 - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { 4873 + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { 4874 4874 if (dbuf_slices[i].active_pipes == active_pipes && 4875 4875 dbuf_slices[i].join_mbus == join_mbus) 4876 4876 return dbuf_slices[i].dbuf_mask[pipe];
+84 -83
drivers/gpu/drm/mediatek/mtk_dsi.c
··· 786 786 mtk_dsi_poweroff(dsi); 787 787 } 788 788 789 + static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) 790 + { 791 + int ret; 792 + 793 + ret = drm_simple_encoder_init(drm, &dsi->encoder, 794 + DRM_MODE_ENCODER_DSI); 795 + if (ret) { 796 + DRM_ERROR("Failed to encoder init to drm\n"); 797 + return ret; 798 + } 799 + 800 + dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); 801 + 802 + ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, 803 + DRM_BRIDGE_ATTACH_NO_CONNECTOR); 804 + if (ret) 805 + goto err_cleanup_encoder; 806 + 807 + dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); 808 + if (IS_ERR(dsi->connector)) { 809 + DRM_ERROR("Unable to create bridge connector\n"); 810 + ret = PTR_ERR(dsi->connector); 811 + goto err_cleanup_encoder; 812 + } 813 + drm_connector_attach_encoder(dsi->connector, &dsi->encoder); 814 + 815 + return 0; 816 + 817 + err_cleanup_encoder: 818 + drm_encoder_cleanup(&dsi->encoder); 819 + return ret; 820 + } 821 + 822 + static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) 823 + { 824 + int ret; 825 + struct drm_device *drm = data; 826 + struct mtk_dsi *dsi = dev_get_drvdata(dev); 827 + 828 + ret = mtk_dsi_encoder_init(drm, dsi); 829 + if (ret) 830 + return ret; 831 + 832 + return device_reset_optional(dev); 833 + } 834 + 835 + static void mtk_dsi_unbind(struct device *dev, struct device *master, 836 + void *data) 837 + { 838 + struct mtk_dsi *dsi = dev_get_drvdata(dev); 839 + 840 + drm_encoder_cleanup(&dsi->encoder); 841 + } 842 + 843 + static const struct component_ops mtk_dsi_component_ops = { 844 + .bind = mtk_dsi_bind, 845 + .unbind = mtk_dsi_unbind, 846 + }; 847 + 789 848 static int mtk_dsi_host_attach(struct mipi_dsi_host *host, 790 849 struct mipi_dsi_device *device) 791 850 { 792 851 struct mtk_dsi *dsi = host_to_dsi(host); 852 + struct device *dev = host->dev; 853 + int ret; 793 854 794 855 dsi->lanes = device->lanes; 795 856 dsi->format = device->format; 796 857 dsi->mode_flags = device->mode_flags; 858 + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); 859 + if (IS_ERR(dsi->next_bridge)) 860 + return PTR_ERR(dsi->next_bridge); 797 861 862 + drm_bridge_add(&dsi->bridge); 863 + 864 + ret = component_add(host->dev, &mtk_dsi_component_ops); 865 + if (ret) { 866 + DRM_ERROR("failed to add dsi_host component: %d\n", ret); 867 + drm_bridge_remove(&dsi->bridge); 868 + return ret; 869 + } 870 + 871 + return 0; 872 + } 873 + 874 + static int mtk_dsi_host_detach(struct mipi_dsi_host *host, 875 + struct mipi_dsi_device *device) 876 + { 877 + struct mtk_dsi *dsi = host_to_dsi(host); 878 + 879 + component_del(host->dev, &mtk_dsi_component_ops); 880 + drm_bridge_remove(&dsi->bridge); 798 881 return 0; 799 882 } 800 883 ··· 1021 938 1022 939 static const struct mipi_dsi_host_ops mtk_dsi_ops = { 1023 940 .attach = mtk_dsi_host_attach, 941 + .detach = mtk_dsi_host_detach, 1024 942 .transfer = mtk_dsi_host_transfer, 1025 - }; 1026 - 1027 - static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) 1028 - { 1029 - int ret; 1030 - 1031 - ret = drm_simple_encoder_init(drm, &dsi->encoder, 1032 - DRM_MODE_ENCODER_DSI); 1033 - if (ret) { 1034 - DRM_ERROR("Failed to encoder init to drm\n"); 1035 - return ret; 1036 - } 1037 - 1038 - dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); 1039 - 1040 - ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, 1041 - DRM_BRIDGE_ATTACH_NO_CONNECTOR); 1042 - if (ret) 1043 - goto err_cleanup_encoder; 1044 - 1045 - dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); 1046 - if (IS_ERR(dsi->connector)) { 1047 - DRM_ERROR("Unable to create bridge connector\n"); 1048 - ret = PTR_ERR(dsi->connector); 1049 - goto err_cleanup_encoder; 1050 - } 1051 - drm_connector_attach_encoder(dsi->connector, &dsi->encoder); 1052 - 1053 - return 0; 1054 - 1055 - err_cleanup_encoder: 1056 - drm_encoder_cleanup(&dsi->encoder); 1057 - return ret; 1058 - } 1059 - 1060 - static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) 1061 - { 1062 - int ret; 1063 - struct drm_device *drm = data; 1064 - struct mtk_dsi *dsi = dev_get_drvdata(dev); 1065 - 1066 - ret = mtk_dsi_encoder_init(drm, dsi); 1067 - if (ret) 1068 - return ret; 1069 - 1070 - return device_reset_optional(dev); 1071 - } 1072 - 1073 - static void mtk_dsi_unbind(struct device *dev, struct device *master, 1074 - void *data) 1075 - { 1076 - struct mtk_dsi *dsi = dev_get_drvdata(dev); 1077 - 1078 - drm_encoder_cleanup(&dsi->encoder); 1079 - } 1080 - 1081 - static const struct component_ops mtk_dsi_component_ops = { 1082 - .bind = mtk_dsi_bind, 1083 - .unbind = mtk_dsi_unbind, 1084 943 }; 1085 944 1086 945 static int mtk_dsi_probe(struct platform_device *pdev) 1087 946 { 1088 947 struct mtk_dsi *dsi; 1089 948 struct device *dev = &pdev->dev; 1090 - struct drm_panel *panel; 1091 949 struct resource *regs; 1092 950 int irq_num; 1093 951 int ret; ··· 1043 1019 if (ret < 0) { 1044 1020 dev_err(dev, "failed to register DSI host: %d\n", ret); 1045 1021 return ret; 1046 - } 1047 - 1048 - ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, 1049 - &panel, &dsi->next_bridge); 1050 - if (ret) 1051 - goto err_unregister_host; 1052 - 1053 - if (panel) { 1054 - dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel); 1055 - if (IS_ERR(dsi->next_bridge)) { 1056 - ret = PTR_ERR(dsi->next_bridge); 1057 - goto err_unregister_host; 1058 - } 1059 1022 } 1060 1023 1061 1024 dsi->driver_data = of_device_get_match_data(dev); ··· 1109 1098 dsi->bridge.of_node = dev->of_node; 1110 1099 dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; 1111 1100 1112 - drm_bridge_add(&dsi->bridge); 1113 - 1114 - ret = component_add(&pdev->dev, &mtk_dsi_component_ops); 1115 - if (ret) { 1116 - dev_err(&pdev->dev, "failed to add component: %d\n", ret); 1117 - goto err_unregister_host; 1118 - } 1119 - 1120 1101 return 0; 1121 1102 1122 1103 err_unregister_host: ··· 1121 1118 struct mtk_dsi *dsi = platform_get_drvdata(pdev); 1122 1119 1123 1120 mtk_output_dsi_disable(dsi); 1124 - drm_bridge_remove(&dsi->bridge); 1125 - component_del(&pdev->dev, &mtk_dsi_component_ops); 1126 1121 mipi_dsi_host_unregister(&dsi->host); 1127 1122 1128 1123 return 0;
+2 -1
drivers/gpu/drm/radeon/atombios_encoders.c
··· 198 198 * so don't register a backlight device 199 199 */ 200 200 if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && 201 - (rdev->pdev->device == 0x6741)) 201 + (rdev->pdev->device == 0x6741) && 202 + !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) 202 203 return; 203 204 204 205 if (!radeon_encoder->enc_priv)
+8 -6
drivers/hwmon/hwmon.c
··· 214 214 215 215 tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, 216 216 &hwmon_thermal_ops); 217 - /* 218 - * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, 219 - * so ignore that error but forward any other error. 220 - */ 221 - if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) 222 - return PTR_ERR(tzd); 217 + if (IS_ERR(tzd)) { 218 + if (PTR_ERR(tzd) != -ENODEV) 219 + return PTR_ERR(tzd); 220 + dev_info(dev, "temp%d_input not attached to any thermal zone\n", 221 + index + 1); 222 + devm_kfree(dev, tdata); 223 + return 0; 224 + } 223 225 224 226 err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); 225 227 if (err)
+1 -1
drivers/hwmon/ntc_thermistor.c
··· 59 59 [NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 }, 60 60 [NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 }, 61 61 [NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 }, 62 - [NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 }, 62 + [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 }, 63 63 [NTC_LAST] = { }, 64 64 }; 65 65
+5
drivers/hwmon/pmbus/pmbus_core.c
··· 911 911 pmbus_update_sensor_data(client, s2); 912 912 913 913 regval = status & mask; 914 + if (regval) { 915 + ret = pmbus_write_byte_data(client, page, reg, regval); 916 + if (ret) 917 + goto unlock; 918 + } 914 919 if (s1 && s2) { 915 920 s64 v1, v2; 916 921
+3 -3
drivers/i2c/busses/Kconfig
··· 488 488 489 489 config I2C_CADENCE 490 490 tristate "Cadence I2C Controller" 491 - depends on ARCH_ZYNQ || ARM64 || XTENSA 491 + depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST 492 492 help 493 493 Say yes here to select Cadence I2C Host Controller. This controller is 494 494 e.g. used by Xilinx Zynq. ··· 680 680 681 681 config I2C_IMX 682 682 tristate "IMX I2C interface" 683 - depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE 683 + depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST 684 684 select I2C_SLAVE 685 685 help 686 686 Say Y here if you want to use the IIC bus controller on ··· 935 935 936 936 config I2C_QUP 937 937 tristate "Qualcomm QUP based I2C controller" 938 - depends on ARCH_QCOM 938 + depends on ARCH_QCOM || COMPILE_TEST 939 939 help 940 940 If you say yes to this option, support will be included for the 941 941 built-in I2C interface on the Qualcomm SoCs.
+11
drivers/i2c/busses/i2c-bcm2835.c
··· 23 23 #define BCM2835_I2C_FIFO 0x10 24 24 #define BCM2835_I2C_DIV 0x14 25 25 #define BCM2835_I2C_DEL 0x18 26 + /* 27 + * 16-bit field for the number of SCL cycles to wait after rising SCL 28 + * before deciding the slave is not responding. 0 disables the 29 + * timeout detection. 30 + */ 26 31 #define BCM2835_I2C_CLKT 0x1c 27 32 28 33 #define BCM2835_I2C_C_READ BIT(0) ··· 479 474 adap->dev.of_node = pdev->dev.of_node; 480 475 adap->quirks = of_device_get_match_data(&pdev->dev); 481 476 477 + /* 478 + * Disable the hardware clock stretching timeout. SMBUS 479 + * specifies a limit for how long the device can stretch the 480 + * clock, but core I2C doesn't. 481 + */ 482 + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); 482 483 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); 483 484 484 485 ret = i2c_add_adapter(adap);
+1 -1
drivers/i2c/busses/i2c-brcmstb.c
··· 673 673 674 674 /* set the data in/out register size for compatible SoCs */ 675 675 if (of_device_is_compatible(dev->device->of_node, 676 - "brcmstb,brcmper-i2c")) 676 + "brcm,brcmper-i2c")) 677 677 dev->data_regsz = sizeof(u8); 678 678 else 679 679 dev->data_regsz = sizeof(u32);
+11 -5
drivers/i2c/busses/i2c-qcom-cci.c
··· 558 558 cci->master[idx].adap.quirks = &cci->data->quirks; 559 559 cci->master[idx].adap.algo = &cci_algo; 560 560 cci->master[idx].adap.dev.parent = dev; 561 - cci->master[idx].adap.dev.of_node = child; 561 + cci->master[idx].adap.dev.of_node = of_node_get(child); 562 562 cci->master[idx].master = idx; 563 563 cci->master[idx].cci = cci; 564 564 ··· 643 643 continue; 644 644 645 645 ret = i2c_add_adapter(&cci->master[i].adap); 646 - if (ret < 0) 646 + if (ret < 0) { 647 + of_node_put(cci->master[i].adap.dev.of_node); 647 648 goto error_i2c; 649 + } 648 650 } 649 651 650 652 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); ··· 657 655 return 0; 658 656 659 657 error_i2c: 660 - for (; i >= 0; i--) { 661 - if (cci->master[i].cci) 658 + for (--i ; i >= 0; i--) { 659 + if (cci->master[i].cci) { 662 660 i2c_del_adapter(&cci->master[i].adap); 661 + of_node_put(cci->master[i].adap.dev.of_node); 662 + } 663 663 } 664 664 error: 665 665 disable_irq(cci->irq); ··· 677 673 int i; 678 674 679 675 for (i = 0; i < cci->data->num_masters; i++) { 680 - if (cci->master[i].cci) 676 + if (cci->master[i].cci) { 681 677 i2c_del_adapter(&cci->master[i].adap); 678 + of_node_put(cci->master[i].adap.dev.of_node); 679 + } 682 680 cci_halt(cci, i); 683 681 } 684 682
+6
drivers/input/input.c
··· 2285 2285 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2286 2286 __clear_bit(KEY_RESERVED, dev->keybit); 2287 2287 2288 + /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */ 2289 + if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) { 2290 + __clear_bit(BTN_RIGHT, dev->keybit); 2291 + __clear_bit(BTN_MIDDLE, dev->keybit); 2292 + } 2293 + 2288 2294 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2289 2295 input_cleanse_bitmasks(dev); 2290 2296
+10
drivers/input/mouse/psmouse-smbus.c
··· 75 75 "Marking SMBus companion %s as gone\n", 76 76 dev_name(&smbdev->client->dev)); 77 77 smbdev->dead = true; 78 + device_link_remove(&smbdev->client->dev, 79 + &smbdev->psmouse->ps2dev.serio->dev); 78 80 serio_rescan(smbdev->psmouse->ps2dev.serio); 79 81 } else { 80 82 list_del(&smbdev->node); ··· 176 174 kfree(smbdev); 177 175 } else { 178 176 smbdev->dead = true; 177 + device_link_remove(&smbdev->client->dev, 178 + &psmouse->ps2dev.serio->dev); 179 179 psmouse_dbg(smbdev->psmouse, 180 180 "posting removal request for SMBus companion %s\n", 181 181 dev_name(&smbdev->client->dev)); ··· 274 270 275 271 if (smbdev->client) { 276 272 /* We have our companion device */ 273 + if (!device_link_add(&smbdev->client->dev, 274 + &psmouse->ps2dev.serio->dev, 275 + DL_FLAG_STATELESS)) 276 + psmouse_warn(psmouse, 277 + "failed to set up link with iSMBus companion %s\n", 278 + dev_name(&smbdev->client->dev)); 277 279 return 0; 278 280 } 279 281
+12
drivers/input/touchscreen/zinitix.c
··· 571 571 572 572 #ifdef CONFIG_OF 573 573 static const struct of_device_id zinitix_of_match[] = { 574 + { .compatible = "zinitix,bt402" }, 575 + { .compatible = "zinitix,bt403" }, 576 + { .compatible = "zinitix,bt404" }, 577 + { .compatible = "zinitix,bt412" }, 578 + { .compatible = "zinitix,bt413" }, 579 + { .compatible = "zinitix,bt431" }, 580 + { .compatible = "zinitix,bt432" }, 581 + { .compatible = "zinitix,bt531" }, 574 582 { .compatible = "zinitix,bt532" }, 583 + { .compatible = "zinitix,bt538" }, 575 584 { .compatible = "zinitix,bt541" }, 585 + { .compatible = "zinitix,bt548" }, 586 + { .compatible = "zinitix,bt554" }, 587 + { .compatible = "zinitix,at100" }, 576 588 { } 577 589 }; 578 590 MODULE_DEVICE_TABLE(of, zinitix_of_match);
+1 -1
drivers/md/dm.c
··· 2077 2077 set_bit(DMF_FREEING, &md->flags); 2078 2078 spin_unlock(&_minor_lock); 2079 2079 2080 - blk_set_queue_dying(md->queue); 2080 + blk_mark_disk_dead(md->disk); 2081 2081 2082 2082 /* 2083 2083 * Take suspend_lock so that presuspend and postsuspend methods
+8 -4
drivers/mtd/devices/phram.c
··· 264 264 } 265 265 } 266 266 267 - if (erasesize) 268 - div_u64_rem(len, (uint32_t)erasesize, &rem); 269 - 270 267 if (len == 0 || erasesize == 0 || erasesize > len 271 - || erasesize > UINT_MAX || rem) { 268 + || erasesize > UINT_MAX) { 272 269 parse_err("illegal erasesize or len\n"); 270 + ret = -EINVAL; 271 + goto error; 272 + } 273 + 274 + div_u64_rem(len, (uint32_t)erasesize, &rem); 275 + if (rem) { 276 + parse_err("len is not multiple of erasesize\n"); 273 277 ret = -EINVAL; 274 278 goto error; 275 279 }
+2 -1
drivers/mtd/nand/raw/Kconfig
··· 42 42 tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller" 43 43 depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST 44 44 depends on HAS_IOMEM 45 - select OMAP_GPMC if ARCH_K3 45 + select MEMORY 46 + select OMAP_GPMC 46 47 help 47 48 Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4 48 49 and Keystone platforms.
+1 -1
drivers/mtd/nand/raw/brcmnand/brcmnand.c
··· 2106 2106 mtd->oobsize / trans, 2107 2107 host->hwcfg.sector_size_1k); 2108 2108 2109 - if (!ret) { 2109 + if (ret != -EBADMSG) { 2110 2110 *err_addr = brcmnand_get_uncorrecc_addr(ctrl); 2111 2111 2112 2112 if (*err_addr)
+2 -1
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
··· 2285 2285 this->hw.must_apply_timings = false; 2286 2286 ret = gpmi_nfc_apply_timings(this); 2287 2287 if (ret) 2288 - return ret; 2288 + goto out_pm; 2289 2289 } 2290 2290 2291 2291 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); ··· 2414 2414 2415 2415 this->bch = false; 2416 2416 2417 + out_pm: 2417 2418 pm_runtime_mark_last_busy(this->dev); 2418 2419 pm_runtime_put_autosuspend(this->dev); 2419 2420
+6 -1
drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
··· 68 68 struct ingenic_ecc *ecc; 69 69 70 70 pdev = of_find_device_by_node(np); 71 - if (!pdev || !platform_get_drvdata(pdev)) 71 + if (!pdev) 72 72 return ERR_PTR(-EPROBE_DEFER); 73 + 74 + if (!platform_get_drvdata(pdev)) { 75 + put_device(&pdev->dev); 76 + return ERR_PTR(-EPROBE_DEFER); 77 + } 73 78 74 79 ecc = platform_get_drvdata(pdev); 75 80 clk_prepare_enable(ecc->clk);
+6 -8
drivers/mtd/nand/raw/qcom_nandc.c
··· 2 2 /* 3 3 * Copyright (c) 2016, The Linux Foundation. All rights reserved. 4 4 */ 5 - 6 5 #include <linux/clk.h> 7 6 #include <linux/slab.h> 8 7 #include <linux/bitops.h> ··· 3072 3073 if (dma_mapping_error(dev, nandc->base_dma)) 3073 3074 return -ENXIO; 3074 3075 3075 - ret = qcom_nandc_alloc(nandc); 3076 - if (ret) 3077 - goto err_nandc_alloc; 3078 - 3079 3076 ret = clk_prepare_enable(nandc->core_clk); 3080 3077 if (ret) 3081 3078 goto err_core_clk; ··· 3079 3084 ret = clk_prepare_enable(nandc->aon_clk); 3080 3085 if (ret) 3081 3086 goto err_aon_clk; 3087 + 3088 + ret = qcom_nandc_alloc(nandc); 3089 + if (ret) 3090 + goto err_nandc_alloc; 3082 3091 3083 3092 ret = qcom_nandc_setup(nandc); 3084 3093 if (ret) ··· 3095 3096 return 0; 3096 3097 3097 3098 err_setup: 3099 + qcom_nandc_unalloc(nandc); 3100 + err_nandc_alloc: 3098 3101 clk_disable_unprepare(nandc->aon_clk); 3099 3102 err_aon_clk: 3100 3103 clk_disable_unprepare(nandc->core_clk); 3101 3104 err_core_clk: 3102 - qcom_nandc_unalloc(nandc); 3103 - err_nandc_alloc: 3104 3105 dma_unmap_resource(dev, res->start, resource_size(res), 3105 3106 DMA_BIDIRECTIONAL, 0); 3106 - 3107 3107 return ret; 3108 3108 } 3109 3109
+23 -13
drivers/mtd/parsers/qcomsmempart.c
··· 58 58 const struct mtd_partition **pparts, 59 59 struct mtd_part_parser_data *data) 60 60 { 61 + size_t len = SMEM_FLASH_PTABLE_HDR_LEN; 62 + int ret, i, j, tmpparts, numparts = 0; 61 63 struct smem_flash_pentry *pentry; 62 64 struct smem_flash_ptable *ptable; 63 - size_t len = SMEM_FLASH_PTABLE_HDR_LEN; 64 65 struct mtd_partition *parts; 65 - int ret, i, numparts; 66 66 char *name, *c; 67 67 68 68 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) ··· 75 75 pr_debug("Parsing partition table info from SMEM\n"); 76 76 ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len); 77 77 if (IS_ERR(ptable)) { 78 - pr_err("Error reading partition table header\n"); 78 + if (PTR_ERR(ptable) != -EPROBE_DEFER) 79 + pr_err("Error reading partition table header\n"); 79 80 return PTR_ERR(ptable); 80 81 } 81 82 ··· 88 87 } 89 88 90 89 /* Ensure that # of partitions is less than the max we have allocated */ 91 - numparts = le32_to_cpu(ptable->numparts); 92 - if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { 90 + tmpparts = le32_to_cpu(ptable->numparts); 91 + if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { 93 92 pr_err("Partition numbers exceed the max limit\n"); 94 93 return -EINVAL; 95 94 } ··· 117 116 return PTR_ERR(ptable); 118 117 } 119 118 119 + for (i = 0; i < tmpparts; i++) { 120 + pentry = &ptable->pentry[i]; 121 + if (pentry->name[0] != '\0') 122 + numparts++; 123 + } 124 + 120 125 parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL); 121 126 if (!parts) 122 127 return -ENOMEM; 123 128 124 - for (i = 0; i < numparts; i++) { 129 + for (i = 0, j = 0; i < tmpparts; i++) { 125 130 pentry = &ptable->pentry[i]; 126 131 if (pentry->name[0] == '\0') 127 132 continue; ··· 142 135 for (c = name; *c != '\0'; c++) 143 136 *c = tolower(*c); 144 137 145 - parts[i].name = name; 146 - parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; 147 - parts[i].mask_flags = pentry->attr; 148 - parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize; 138 + parts[j].name = name; 139 + parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; 140 + parts[j].mask_flags = pentry->attr; 141 + parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize; 149 142 pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n", 150 143 i, pentry->name, le32_to_cpu(pentry->offset), 151 144 le32_to_cpu(pentry->length), pentry->attr); 145 + j++; 152 146 } 153 147 154 148 pr_debug("SMEM partition table found: ver: %d len: %d\n", 155 - le32_to_cpu(ptable->version), numparts); 149 + le32_to_cpu(ptable->version), tmpparts); 156 150 *pparts = parts; 157 151 158 152 return numparts; 159 153 160 154 out_free_parts: 161 - while (--i >= 0) 162 - kfree(parts[i].name); 155 + while (--j >= 0) 156 + kfree(parts[j].name); 163 157 kfree(parts); 164 158 *pparts = NULL; 165 159 ··· 174 166 175 167 for (i = 0; i < nr_parts; i++) 176 168 kfree(pparts[i].name); 169 + 170 + kfree(pparts); 177 171 } 178 172 179 173 static const struct of_device_id qcomsmem_of_match_table[] = {
+23 -3
drivers/net/dsa/microchip/ksz_common.c
··· 26 26 struct dsa_switch *ds = dev->ds; 27 27 u8 port_member = 0, cpu_port; 28 28 const struct dsa_port *dp; 29 - int i; 29 + int i, j; 30 30 31 31 if (!dsa_is_user_port(ds, port)) 32 32 return; ··· 45 45 continue; 46 46 if (!dsa_port_bridge_same(dp, other_dp)) 47 47 continue; 48 + if (other_p->stp_state != BR_STATE_FORWARDING) 49 + continue; 48 50 49 - if (other_p->stp_state == BR_STATE_FORWARDING && 50 - p->stp_state == BR_STATE_FORWARDING) { 51 + if (p->stp_state == BR_STATE_FORWARDING) { 51 52 val |= BIT(port); 52 53 port_member |= BIT(i); 54 + } 55 + 56 + /* Retain port [i]'s relationship to other ports than [port] */ 57 + for (j = 0; j < ds->num_ports; j++) { 58 + const struct dsa_port *third_dp; 59 + struct ksz_port *third_p; 60 + 61 + if (j == i) 62 + continue; 63 + if (j == port) 64 + continue; 65 + if (!dsa_is_user_port(ds, j)) 66 + continue; 67 + third_p = &dev->ports[j]; 68 + if (third_p->stp_state != BR_STATE_FORWARDING) 69 + continue; 70 + third_dp = dsa_to_port(ds, j); 71 + if (dsa_port_bridge_same(other_dp, third_dp)) 72 + val |= BIT(j); 53 73 } 54 74 55 75 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
+3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 100 100 MODULE_FIRMWARE(FW_FILE_NAME_E1); 101 101 MODULE_FIRMWARE(FW_FILE_NAME_E1H); 102 102 MODULE_FIRMWARE(FW_FILE_NAME_E2); 103 + MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); 104 + MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); 105 + MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); 103 106 104 107 int bnx2x_num_queues; 105 108 module_param_named(num_queues, bnx2x_num_queues, int, 0444);
+35 -12
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4776 4776 return rc; 4777 4777 4778 4778 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4779 - req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4780 - req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4779 + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 4780 + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4781 + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4782 + } 4781 4783 req->mask = cpu_to_le32(vnic->rx_mask); 4782 4784 return hwrm_req_send_silent(bp, req); 4783 4785 } ··· 7822 7820 return 0; 7823 7821 } 7824 7822 7823 + static void bnxt_remap_fw_health_regs(struct bnxt *bp) 7824 + { 7825 + if (!bp->fw_health) 7826 + return; 7827 + 7828 + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 7829 + bp->fw_health->status_reliable = true; 7830 + bp->fw_health->resets_reliable = true; 7831 + } else { 7832 + bnxt_try_map_fw_health_reg(bp); 7833 + } 7834 + } 7835 + 7825 7836 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 7826 7837 { 7827 7838 struct bnxt_fw_health *fw_health = bp->fw_health; ··· 8687 8672 vnic->uc_filter_count = 1; 8688 8673 8689 8674 vnic->rx_mask = 0; 8675 + if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 8676 + goto skip_rx_mask; 8677 + 8690 8678 if (bp->dev->flags & IFF_BROADCAST) 8691 8679 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 8692 8680 ··· 8699 8681 if (bp->dev->flags & IFF_ALLMULTI) { 8700 8682 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 8701 8683 vnic->mc_list_count = 0; 8702 - } else { 8684 + } else if (bp->dev->flags & IFF_MULTICAST) { 8703 8685 u32 mask = 0; 8704 8686 8705 8687 bnxt_mc_list_updated(bp, &mask); ··· 8710 8692 if (rc) 8711 8693 goto err_out; 8712 8694 8695 + skip_rx_mask: 8713 8696 rc = bnxt_hwrm_set_coal(bp); 8714 8697 if (rc) 8715 8698 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", ··· 9902 9883 resc_reinit = true; 9903 9884 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) 9904 9885 fw_reset = true; 9905 - else if (bp->fw_health && !bp->fw_health->status_reliable) 9906 - bnxt_try_map_fw_health_reg(bp); 9886 + else 9887 + bnxt_remap_fw_health_regs(bp); 9907 9888 9908 9889 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 9909 9890 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); ··· 10383 10364 goto half_open_err; 10384 10365 } 10385 10366 10386 - rc = bnxt_alloc_mem(bp, false); 10367 + rc = bnxt_alloc_mem(bp, true); 10387 10368 if (rc) { 10388 10369 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 10389 10370 goto half_open_err; 10390 10371 } 10391 - rc = bnxt_init_nic(bp, false); 10372 + set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 10373 + rc = bnxt_init_nic(bp, true); 10392 10374 if (rc) { 10375 + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 10393 10376 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 10394 10377 goto half_open_err; 10395 10378 } ··· 10399 10378 10400 10379 half_open_err: 10401 10380 bnxt_free_skbs(bp); 10402 - bnxt_free_mem(bp, false); 10381 + bnxt_free_mem(bp, true); 10403 10382 dev_close(bp->dev); 10404 10383 return rc; 10405 10384 } ··· 10409 10388 */ 10410 10389 void bnxt_half_close_nic(struct bnxt *bp) 10411 10390 { 10412 - bnxt_hwrm_resource_free(bp, false, false); 10391 + bnxt_hwrm_resource_free(bp, false, true); 10413 10392 bnxt_free_skbs(bp); 10414 - bnxt_free_mem(bp, false); 10393 + bnxt_free_mem(bp, true); 10394 + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 10415 10395 } 10416 10396 10417 10397 void bnxt_reenable_sriov(struct bnxt *bp) ··· 10828 10806 if (dev->flags & IFF_ALLMULTI) { 10829 10807 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10830 10808 vnic->mc_list_count = 0; 10831 - } else { 10809 + } else if (dev->flags & IFF_MULTICAST) { 10832 10810 mc_update = bnxt_mc_list_updated(bp, &mask); 10833 10811 } 10834 10812 ··· 10905 10883 !bnxt_promisc_ok(bp)) 10906 10884 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10907 10885 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 10908 - if (rc && vnic->mc_list_count) { 10886 + if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 10909 10887 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 10910 10888 rc); 10889 + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 10911 10890 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10912 10891 vnic->mc_list_count = 0; 10913 10892 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 1921 1921 #define BNXT_STATE_RECOVER 12 1922 1922 #define BNXT_STATE_FW_NON_FATAL_COND 13 1923 1923 #define BNXT_STATE_FW_ACTIVATE_RESET 14 1924 + #define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */ 1924 1925 1925 1926 #define BNXT_NO_FW_ACCESS(bp) \ 1926 1927 (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
+31 -8
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
··· 367 367 } 368 368 } 369 369 370 + /* Live patch status in NVM */ 371 + #define BNXT_LIVEPATCH_NOT_INSTALLED 0 372 + #define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 373 + #define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 374 + #define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \ 375 + FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) 376 + #define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK 377 + 378 + #define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK) 379 + 370 380 static int 371 381 bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) 372 382 { ··· 384 374 struct hwrm_fw_livepatch_query_input *query_req; 385 375 struct hwrm_fw_livepatch_output *patch_resp; 386 376 struct hwrm_fw_livepatch_input *patch_req; 377 + u16 flags, live_patch_state; 378 + bool activated = false; 387 379 u32 installed = 0; 388 - u16 flags; 389 380 u8 target; 390 381 int rc; 391 382 ··· 405 394 hwrm_req_drop(bp, query_req); 406 395 return rc; 407 396 } 408 - patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; 409 397 patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; 410 398 patch_resp = hwrm_req_hold(bp, patch_req); 411 399 ··· 417 407 } 418 408 419 409 flags = le16_to_cpu(query_resp->status_flags); 420 - if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) 410 + live_patch_state = BNXT_LIVEPATCH_STATE(flags); 411 + 412 + if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED) 421 413 continue; 422 - if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) && 423 - !strncmp(query_resp->active_ver, query_resp->install_ver, 424 - sizeof(query_resp->active_ver))) 414 + 415 + if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) { 416 + activated = true; 425 417 continue; 418 + } 419 + 420 + if (live_patch_state == BNXT_LIVEPATCH_INSTALLED) 421 + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; 422 + else if (live_patch_state == BNXT_LIVEPATCH_REMOVED) 423 + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE; 426 424 427 425 patch_req->fw_target = target; 428 426 rc = hwrm_req_send(bp, patch_req); ··· 442 424 } 443 425 444 426 if (!rc && !installed) { 445 - NL_SET_ERR_MSG_MOD(extack, "No live patches found"); 446 - rc = -ENOENT; 427 + if (activated) { 428 + NL_SET_ERR_MSG_MOD(extack, "Live patch already activated"); 429 + rc = -EEXIST; 430 + } else { 431 + NL_SET_ERR_MSG_MOD(extack, "No live patches found"); 432 + rc = -ENOENT; 433 + } 447 434 } 448 435 hwrm_req_drop(bp, query_req); 449 436 hwrm_req_drop(bp, patch_req);
+13 -4
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 26 26 #include "bnxt_hsi.h" 27 27 #include "bnxt.h" 28 28 #include "bnxt_hwrm.h" 29 + #include "bnxt_ulp.h" 29 30 #include "bnxt_xdp.h" 30 31 #include "bnxt_ptp.h" 31 32 #include "bnxt_ethtool.h" ··· 1973 1972 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 1974 1973 fec->active_fec |= ETHTOOL_FEC_LLRS; 1975 1974 break; 1975 + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 1976 + fec->active_fec |= ETHTOOL_FEC_OFF; 1977 + break; 1976 1978 } 1977 1979 return 0; 1978 1980 } ··· 3461 3457 if (!skb) 3462 3458 return -ENOMEM; 3463 3459 data = skb_put(skb, pkt_size); 3464 - eth_broadcast_addr(data); 3460 + ether_addr_copy(&data[i], bp->dev->dev_addr); 3465 3461 i += ETH_ALEN; 3466 3462 ether_addr_copy(&data[i], bp->dev->dev_addr); 3467 3463 i += ETH_ALEN; ··· 3555 3551 if (!offline) { 3556 3552 bnxt_run_fw_tests(bp, test_mask, &test_results); 3557 3553 } else { 3558 - rc = bnxt_close_nic(bp, false, false); 3559 - if (rc) 3554 + bnxt_ulp_stop(bp); 3555 + rc = bnxt_close_nic(bp, true, false); 3556 + if (rc) { 3557 + bnxt_ulp_start(bp, rc); 3560 3558 return; 3559 + } 3561 3560 bnxt_run_fw_tests(bp, test_mask, &test_results); 3562 3561 3563 3562 buf[BNXT_MACLPBK_TEST_IDX] = 1; ··· 3570 3563 if (rc) { 3571 3564 bnxt_hwrm_mac_loopback(bp, false); 3572 3565 etest->flags |= ETH_TEST_FL_FAILED; 3566 + bnxt_ulp_start(bp, rc); 3573 3567 return; 3574 3568 } 3575 3569 if (bnxt_run_loopback(bp)) ··· 3596 3588 } 3597 3589 bnxt_hwrm_phy_loopback(bp, false, false); 3598 3590 bnxt_half_close_nic(bp); 3599 - rc = bnxt_open_nic(bp, false, true); 3591 + rc = bnxt_open_nic(bp, true, true); 3592 + bnxt_ulp_start(bp, rc); 3600 3593 } 3601 3594 if (rc || bnxt_test_irq(bp)) { 3602 3595 buf[BNXT_IRQ_TEST_IDX] = 1;
+9 -3
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
··· 644 644 645 645 /* Last byte of resp contains valid bit */ 646 646 valid = ((u8 *)ctx->resp) + len - 1; 647 - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 647 + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) { 648 648 /* make sure we read from updated DMA memory */ 649 649 dma_rmb(); 650 650 if (*valid) 651 651 break; 652 - usleep_range(1, 5); 652 + if (j < 10) { 653 + udelay(1); 654 + j++; 655 + } else { 656 + usleep_range(20, 30); 657 + j += 20; 658 + } 653 659 } 654 660 655 661 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 656 662 hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", 657 - hwrm_total_timeout(i), req_type, 663 + hwrm_total_timeout(i) + j, req_type, 658 664 le16_to_cpu(ctx->req->seq_id), len, *valid); 659 665 goto exit; 660 666 }
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
··· 90 90 } 91 91 92 92 93 - #define HWRM_VALID_BIT_DELAY_USEC 150 93 + #define HWRM_VALID_BIT_DELAY_USEC 50000 94 94 95 95 static inline bool bnxt_cfa_hwrm_message(u16 req_type) 96 96 {
+129 -114
drivers/net/ethernet/faraday/ftgmac100.c
··· 989 989 return 0; 990 990 } 991 991 992 - static void ftgmac100_adjust_link(struct net_device *netdev) 993 - { 994 - struct ftgmac100 *priv = netdev_priv(netdev); 995 - struct phy_device *phydev = netdev->phydev; 996 - bool tx_pause, rx_pause; 997 - int new_speed; 998 - 999 - /* We store "no link" as speed 0 */ 1000 - if (!phydev->link) 1001 - new_speed = 0; 1002 - else 1003 - new_speed = phydev->speed; 1004 - 1005 - /* Grab pause settings from PHY if configured to do so */ 1006 - if (priv->aneg_pause) { 1007 - rx_pause = tx_pause = phydev->pause; 1008 - if (phydev->asym_pause) 1009 - tx_pause = !rx_pause; 1010 - } else { 1011 - rx_pause = priv->rx_pause; 1012 - tx_pause = priv->tx_pause; 1013 - } 1014 - 1015 - /* Link hasn't changed, do nothing */ 1016 - if (phydev->speed == priv->cur_speed && 1017 - phydev->duplex == priv->cur_duplex && 1018 - rx_pause == priv->rx_pause && 1019 - tx_pause == priv->tx_pause) 1020 - return; 1021 - 1022 - /* Print status if we have a link or we had one and just lost it, 1023 - * don't print otherwise. 1024 - */ 1025 - if (new_speed || priv->cur_speed) 1026 - phy_print_status(phydev); 1027 - 1028 - priv->cur_speed = new_speed; 1029 - priv->cur_duplex = phydev->duplex; 1030 - priv->rx_pause = rx_pause; 1031 - priv->tx_pause = tx_pause; 1032 - 1033 - /* Link is down, do nothing else */ 1034 - if (!new_speed) 1035 - return; 1036 - 1037 - /* Disable all interrupts */ 1038 - iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1039 - 1040 - /* Reset the adapter asynchronously */ 1041 - schedule_work(&priv->reset_task); 1042 - } 1043 - 1044 - static int ftgmac100_mii_probe(struct net_device *netdev) 1045 - { 1046 - struct ftgmac100 *priv = netdev_priv(netdev); 1047 - struct platform_device *pdev = to_platform_device(priv->dev); 1048 - struct device_node *np = pdev->dev.of_node; 1049 - struct phy_device *phydev; 1050 - phy_interface_t phy_intf; 1051 - int err; 1052 - 1053 - /* Default to RGMII. It's a gigabit part after all */ 1054 - err = of_get_phy_mode(np, &phy_intf); 1055 - if (err) 1056 - phy_intf = PHY_INTERFACE_MODE_RGMII; 1057 - 1058 - /* Aspeed only supports these. I don't know about other IP 1059 - * block vendors so I'm going to just let them through for 1060 - * now. Note that this is only a warning if for some obscure 1061 - * reason the DT really means to lie about it or it's a newer 1062 - * part we don't know about. 1063 - * 1064 - * On the Aspeed SoC there are additionally straps and SCU 1065 - * control bits that could tell us what the interface is 1066 - * (or allow us to configure it while the IP block is held 1067 - * in reset). For now I chose to keep this driver away from 1068 - * those SoC specific bits and assume the device-tree is 1069 - * right and the SCU has been configured properly by pinmux 1070 - * or the firmware. 1071 - */ 1072 - if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { 1073 - netdev_warn(netdev, 1074 - "Unsupported PHY mode %s !\n", 1075 - phy_modes(phy_intf)); 1076 - } 1077 - 1078 - phydev = phy_find_first(priv->mii_bus); 1079 - if (!phydev) { 1080 - netdev_info(netdev, "%s: no PHY found\n", netdev->name); 1081 - return -ENODEV; 1082 - } 1083 - 1084 - phydev = phy_connect(netdev, phydev_name(phydev), 1085 - &ftgmac100_adjust_link, phy_intf); 1086 - 1087 - if (IS_ERR(phydev)) { 1088 - netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 1089 - return PTR_ERR(phydev); 1090 - } 1091 - 1092 - /* Indicate that we support PAUSE frames (see comment in 1093 - * Documentation/networking/phy.rst) 1094 - */ 1095 - phy_support_asym_pause(phydev); 1096 - 1097 - /* Display what we found */ 1098 - phy_attached_info(phydev); 1099 - 1100 - return 0; 1101 - } 1102 - 1103 992 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 1104 993 { 1105 994 struct net_device *netdev = bus->priv; ··· 1299 1410 return err; 1300 1411 } 1301 1412 1302 - static void ftgmac100_reset_task(struct work_struct *work) 1413 + static void ftgmac100_reset(struct ftgmac100 *priv) 1303 1414 { 1304 - struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1305 - reset_task); 1306 1415 struct net_device *netdev = priv->netdev; 1307 1416 int err; 1308 1417 ··· 1344 1457 if (netdev->phydev) 1345 1458 mutex_unlock(&netdev->phydev->lock); 1346 1459 rtnl_unlock(); 1460 + } 1461 + 1462 + static void ftgmac100_reset_task(struct work_struct *work) 1463 + { 1464 + struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1465 + reset_task); 1466 + 1467 + ftgmac100_reset(priv); 1468 + } 1469 + 1470 + static void ftgmac100_adjust_link(struct net_device *netdev) 1471 + { 1472 + struct ftgmac100 *priv = netdev_priv(netdev); 1473 + struct phy_device *phydev = netdev->phydev; 1474 + bool tx_pause, rx_pause; 1475 + int new_speed; 1476 + 1477 + /* We store "no link" as speed 0 */ 1478 + if (!phydev->link) 1479 + new_speed = 0; 1480 + else 1481 + new_speed = phydev->speed; 1482 + 1483 + /* Grab pause settings from PHY if configured to do so */ 1484 + if (priv->aneg_pause) { 1485 + rx_pause = tx_pause = phydev->pause; 1486 + if (phydev->asym_pause) 1487 + tx_pause = !rx_pause; 1488 + } else { 1489 + rx_pause = priv->rx_pause; 1490 + tx_pause = priv->tx_pause; 1491 + } 1492 + 1493 + /* Link hasn't changed, do nothing */ 1494 + if (phydev->speed == priv->cur_speed && 1495 + phydev->duplex == priv->cur_duplex && 1496 + rx_pause == priv->rx_pause && 1497 + tx_pause == priv->tx_pause) 1498 + return; 1499 + 1500 + /* Print status if we have a link or we had one and just lost it, 1501 + * don't print otherwise. 1502 + */ 1503 + if (new_speed || priv->cur_speed) 1504 + phy_print_status(phydev); 1505 + 1506 + priv->cur_speed = new_speed; 1507 + priv->cur_duplex = phydev->duplex; 1508 + priv->rx_pause = rx_pause; 1509 + priv->tx_pause = tx_pause; 1510 + 1511 + /* Link is down, do nothing else */ 1512 + if (!new_speed) 1513 + return; 1514 + 1515 + /* Disable all interrupts */ 1516 + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1517 + 1518 + /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock 1519 + * order consistent to prevent dead lock. 1520 + */ 1521 + if (netdev->phydev) 1522 + mutex_unlock(&netdev->phydev->lock); 1523 + 1524 + ftgmac100_reset(priv); 1525 + 1526 + if (netdev->phydev) 1527 + mutex_lock(&netdev->phydev->lock); 1528 + 1529 + } 1530 + 1531 + static int ftgmac100_mii_probe(struct net_device *netdev) 1532 + { 1533 + struct ftgmac100 *priv = netdev_priv(netdev); 1534 + struct platform_device *pdev = to_platform_device(priv->dev); 1535 + struct device_node *np = pdev->dev.of_node; 1536 + struct phy_device *phydev; 1537 + phy_interface_t phy_intf; 1538 + int err; 1539 + 1540 + /* Default to RGMII. It's a gigabit part after all */ 1541 + err = of_get_phy_mode(np, &phy_intf); 1542 + if (err) 1543 + phy_intf = PHY_INTERFACE_MODE_RGMII; 1544 + 1545 + /* Aspeed only supports these. I don't know about other IP 1546 + * block vendors so I'm going to just let them through for 1547 + * now. Note that this is only a warning if for some obscure 1548 + * reason the DT really means to lie about it or it's a newer 1549 + * part we don't know about. 1550 + * 1551 + * On the Aspeed SoC there are additionally straps and SCU 1552 + * control bits that could tell us what the interface is 1553 + * (or allow us to configure it while the IP block is held 1554 + * in reset). For now I chose to keep this driver away from 1555 + * those SoC specific bits and assume the device-tree is 1556 + * right and the SCU has been configured properly by pinmux 1557 + * or the firmware. 1558 + */ 1559 + if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { 1560 + netdev_warn(netdev, 1561 + "Unsupported PHY mode %s !\n", 1562 + phy_modes(phy_intf)); 1563 + } 1564 + 1565 + phydev = phy_find_first(priv->mii_bus); 1566 + if (!phydev) { 1567 + netdev_info(netdev, "%s: no PHY found\n", netdev->name); 1568 + return -ENODEV; 1569 + } 1570 + 1571 + phydev = phy_connect(netdev, phydev_name(phydev), 1572 + &ftgmac100_adjust_link, phy_intf); 1573 + 1574 + if (IS_ERR(phydev)) { 1575 + netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 1576 + return PTR_ERR(phydev); 1577 + } 1578 + 1579 + /* Indicate that we support PAUSE frames (see comment in 1580 + * Documentation/networking/phy.rst) 1581 + */ 1582 + phy_support_asym_pause(phydev); 1583 + 1584 + /* Display what we found */ 1585 + phy_attached_info(phydev); 1586 + 1587 + return 0; 1347 1588 } 1348 1589 1349 1590 static int ftgmac100_open(struct net_device *netdev)
+5 -1
drivers/net/ethernet/ibm/ibmvnic.c
··· 5934 5934 be64_to_cpu(session_token)); 5935 5935 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 5936 5936 H_SESSION_ERR_DETECTED, session_token, 0, 0); 5937 - if (rc) 5937 + if (rc) { 5938 5938 netdev_err(netdev, 5939 5939 "H_VIOCTL initiated failover failed, rc %ld\n", 5940 5940 rc); 5941 + goto last_resort; 5942 + } 5943 + 5944 + return count; 5941 5945 5942 5946 last_resort: 5943 5947 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
+1 -11
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 5386 5386 /* There is no need to reset BW when mqprio mode is on. */ 5387 5387 if (pf->flags & I40E_FLAG_TC_MQPRIO) 5388 5388 return 0; 5389 - 5390 - if (!vsi->mqprio_qopt.qopt.hw) { 5391 - if (pf->flags & I40E_FLAG_DCB_ENABLED) 5392 - goto skip_reset; 5393 - 5394 - if (IS_ENABLED(CONFIG_I40E_DCB) && 5395 - i40e_dcb_hw_get_num_tc(&pf->hw) == 1) 5396 - goto skip_reset; 5397 - 5389 + if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { 5398 5390 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); 5399 5391 if (ret) 5400 5392 dev_info(&pf->pdev->dev, ··· 5394 5402 vsi->seid); 5395 5403 return ret; 5396 5404 } 5397 - 5398 - skip_reset: 5399 5405 memset(&bw_data, 0, sizeof(bw_data)); 5400 5406 bw_data.tc_valid_bits = enabled_tc; 5401 5407 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
-1
drivers/net/ethernet/intel/ice/ice.h
··· 281 281 ICE_VFLR_EVENT_PENDING, 282 282 ICE_FLTR_OVERFLOW_PROMISC, 283 283 ICE_VF_DIS, 284 - ICE_VF_DEINIT_IN_PROGRESS, 285 284 ICE_CFG_BUSY, 286 285 ICE_SERVICE_SCHED, 287 286 ICE_SERVICE_DIS,
+1 -1
drivers/net/ethernet/intel/ice/ice_common.c
··· 3379 3379 3380 3380 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3381 3381 !ice_fw_supports_report_dflt_cfg(hw)) { 3382 - struct ice_link_default_override_tlv tlv; 3382 + struct ice_link_default_override_tlv tlv = { 0 }; 3383 3383 3384 3384 status = ice_get_link_default_override(&tlv, pi); 3385 3385 if (status)
+1
drivers/net/ethernet/intel/ice/ice_eswitch.c
··· 44 44 ctrl_vsi->rxq_map[vf->vf_id]; 45 45 rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; 46 46 rule_info.flags_info.act_valid = true; 47 + rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; 47 48 48 49 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, 49 50 vf->repr->mac_rule);
+2
drivers/net/ethernet/intel/ice/ice_main.c
··· 1802 1802 * reset, so print the event prior to reset. 1803 1803 */ 1804 1804 ice_print_vf_rx_mdd_event(vf); 1805 + mutex_lock(&pf->vf[i].cfg_lock); 1805 1806 ice_reset_vf(&pf->vf[i], false); 1807 + mutex_unlock(&pf->vf[i].cfg_lock); 1806 1808 } 1807 1809 } 1808 1810 }
+1
drivers/net/ethernet/intel/ice/ice_protocol_type.h
··· 47 47 48 48 enum ice_sw_tunnel_type { 49 49 ICE_NON_TUN = 0, 50 + ICE_SW_TUN_AND_NON_TUN, 50 51 ICE_SW_TUN_VXLAN, 51 52 ICE_SW_TUN_GENEVE, 52 53 ICE_SW_TUN_NVGRE,
+4 -1
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 1533 1533 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1534 1534 { 1535 1535 struct timespec64 now, then; 1536 + int ret; 1536 1537 1537 1538 then = ns_to_timespec64(delta); 1538 - ice_ptp_gettimex64(info, &now, NULL); 1539 + ret = ice_ptp_gettimex64(info, &now, NULL); 1540 + if (ret) 1541 + return ret; 1539 1542 now = timespec64_add(now, then); 1540 1543 1541 1544 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
+3 -1
drivers/net/ethernet/intel/ice/ice_switch.c
··· 4617 4617 case ICE_SW_TUN_NVGRE: 4618 4618 prof_type = ICE_PROF_TUN_GRE; 4619 4619 break; 4620 + case ICE_SW_TUN_AND_NON_TUN: 4620 4621 default: 4621 4622 prof_type = ICE_PROF_ALL; 4622 4623 break; ··· 5386 5385 if (status) 5387 5386 goto err_ice_add_adv_rule; 5388 5387 5389 - if (rinfo->tun_type != ICE_NON_TUN) { 5388 + if (rinfo->tun_type != ICE_NON_TUN && 5389 + rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { 5390 5390 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, 5391 5391 s_rule->pdata.lkup_tx_rx.hdr, 5392 5392 pkt_offsets);
+2 -2
drivers/net/ethernet/intel/ice/ice_tc_lib.c
··· 709 709 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; 710 710 else 711 711 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; 712 - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; 712 + 713 713 headers->l4_key.dst_port = match.key->dst; 714 714 headers->l4_mask.dst_port = match.mask->dst; 715 715 } ··· 718 718 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; 719 719 else 720 720 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; 721 - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; 721 + 722 722 headers->l4_key.src_port = match.key->src; 723 723 headers->l4_mask.src_port = match.mask->src; 724 724 }
+25 -17
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
··· 502 502 struct ice_hw *hw = &pf->hw; 503 503 unsigned int tmp, i; 504 504 505 - set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); 506 - 507 505 if (!pf->vf) 508 506 return; 509 507 ··· 519 521 else 520 522 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 521 523 522 - /* Avoid wait time by stopping all VFs at the same time */ 523 - ice_for_each_vf(pf, i) 524 - ice_dis_vf_qs(&pf->vf[i]); 525 - 526 524 tmp = pf->num_alloc_vfs; 527 525 pf->num_qps_per_vf = 0; 528 526 pf->num_alloc_vfs = 0; 529 527 for (i = 0; i < tmp; i++) { 530 - if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { 528 + struct ice_vf *vf = &pf->vf[i]; 529 + 530 + mutex_lock(&vf->cfg_lock); 531 + 532 + ice_dis_vf_qs(vf); 533 + 534 + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 531 535 /* disable VF qp mappings and set VF disable state */ 532 - ice_dis_vf_mappings(&pf->vf[i]); 533 - set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); 534 - ice_free_vf_res(&pf->vf[i]); 536 + ice_dis_vf_mappings(vf); 537 + set_bit(ICE_VF_STATE_DIS, vf->vf_states); 538 + ice_free_vf_res(vf); 535 539 } 536 540 537 - mutex_destroy(&pf->vf[i].cfg_lock); 541 + mutex_unlock(&vf->cfg_lock); 542 + 543 + mutex_destroy(&vf->cfg_lock); 538 544 } 539 545 540 546 if (ice_sriov_free_msix_res(pf)) ··· 574 572 i); 575 573 576 574 clear_bit(ICE_VF_DIS, pf->state); 577 - clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); 578 575 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 579 576 } 580 577 ··· 1565 1564 ice_for_each_vf(pf, v) { 1566 1565 vf = &pf->vf[v]; 1567 1566 1567 + mutex_lock(&vf->cfg_lock); 1568 + 1568 1569 vf->driver_caps = 0; 1569 1570 ice_vc_set_default_allowlist(vf); 1570 1571 ··· 1581 1578 ice_vf_pre_vsi_rebuild(vf); 1582 1579 ice_vf_rebuild_vsi(vf); 1583 1580 ice_vf_post_vsi_rebuild(vf); 1581 + 1582 + mutex_unlock(&vf->cfg_lock); 1584 1583 } 1585 1584 1586 1585 if (ice_is_eswitch_mode_switchdev(pf)) ··· 1632 1627 u8 promisc_m; 1633 1628 u32 reg; 1634 1629 int i; 1630 + 1631 + lockdep_assert_held(&vf->cfg_lock); 1635 1632 1636 1633 dev = ice_pf_to_dev(pf); 1637 1634 ··· 2150 2143 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 2151 2144 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 2152 2145 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 2153 - if (reg & BIT(bit_idx)) 2146 + if (reg & BIT(bit_idx)) { 2154 2147 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 2148 + mutex_lock(&vf->cfg_lock); 2155 2149 ice_reset_vf(vf, true); 2150 + mutex_unlock(&vf->cfg_lock); 2151 + } 2156 2152 } 2157 2153 } 2158 2154 ··· 2232 2222 if (!vf) 2233 2223 return; 2234 2224 2225 + mutex_lock(&vf->cfg_lock); 2235 2226 ice_vc_reset_vf(vf); 2227 + mutex_unlock(&vf->cfg_lock); 2236 2228 } 2237 2229 2238 2230 /** ··· 5770 5758 struct ice_vf *vf = NULL; 5771 5759 struct device *dev; 5772 5760 int err = 0; 5773 - 5774 - /* if de-init is underway, don't process messages from VF */ 5775 - if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state)) 5776 - return; 5777 5761 5778 5762 dev = ice_pf_to_dev(pf); 5779 5763 if (ice_validate_vf_id(pf, vf_id)) {
+14 -10
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 2704 2704 2705 2705 static struct platform_device *port_platdev[3]; 2706 2706 2707 + static void mv643xx_eth_shared_of_remove(void) 2708 + { 2709 + int n; 2710 + 2711 + for (n = 0; n < 3; n++) { 2712 + platform_device_del(port_platdev[n]); 2713 + port_platdev[n] = NULL; 2714 + } 2715 + } 2716 + 2707 2717 static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, 2708 2718 struct device_node *pnp) 2709 2719 { ··· 2750 2740 return -EINVAL; 2751 2741 } 2752 2742 2753 - of_get_mac_address(pnp, ppd.mac_addr); 2743 + ret = of_get_mac_address(pnp, ppd.mac_addr); 2744 + if (ret) 2745 + return ret; 2754 2746 2755 2747 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); 2756 2748 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); ··· 2816 2804 ret = mv643xx_eth_shared_of_add_port(pdev, pnp); 2817 2805 if (ret) { 2818 2806 of_node_put(pnp); 2807 + mv643xx_eth_shared_of_remove(); 2819 2808 return ret; 2820 2809 } 2821 2810 } 2822 2811 return 0; 2823 2812 } 2824 2813 2825 - static void mv643xx_eth_shared_of_remove(void) 2826 - { 2827 - int n; 2828 - 2829 - for (n = 0; n < 3; n++) { 2830 - platform_device_del(port_platdev[n]); 2831 - port_platdev[n] = NULL; 2832 - } 2833 - } 2834 2814 #else 2835 2815 static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) 2836 2816 {
+3 -3
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 6870 6870 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 6871 6871 dev->dev.of_node = port_node; 6872 6872 6873 + port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; 6874 + port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; 6875 + 6873 6876 if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { 6874 6877 port->phylink_config.dev = &dev->dev; 6875 6878 port->phylink_config.type = PHYLINK_NETDEV; ··· 6942 6939 __set_bit(PHY_INTERFACE_MODE_SGMII, 6943 6940 port->phylink_config.supported_interfaces); 6944 6941 } 6945 - 6946 - port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; 6947 - port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; 6948 6942 6949 6943 phylink = phylink_create(&port->phylink_config, port_fwnode, 6950 6944 phy_mode, &mvpp2_phylink_ops);
+2
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
··· 18 18 struct netlink_ext_ack *extack; 19 19 u32 actions; 20 20 bool ct; 21 + bool ct_clear; 21 22 bool encap; 22 23 bool decap; 23 24 bool mpls_push; 24 25 bool ptype_host; 25 26 const struct ip_tunnel_info *tun_info; 27 + struct mlx5e_mpls_info mpls_info; 26 28 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; 27 29 int if_count; 28 30 struct mlx5_tc_ct_priv *ct_priv;
+5
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
··· 31 31 bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; 32 32 int err; 33 33 34 + /* It's redundant to do ct clear more than once. */ 35 + if (clear_action && parse_state->ct_clear) 36 + return 0; 37 + 34 38 err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr, 35 39 &attr->parse_attr->mod_hdr_acts, 36 40 act, parse_state->extack); ··· 50 46 flow_flag_set(parse_state->flow, CT); 51 47 parse_state->ct = true; 52 48 } 49 + parse_state->ct_clear = clear_action; 53 50 54 51 return 0; 55 52 }
+6
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
··· 178 178 return -ENOMEM; 179 179 180 180 parse_state->encap = false; 181 + 182 + if (parse_state->mpls_push) { 183 + memcpy(&parse_attr->mpls_info[esw_attr->out_count], 184 + &parse_state->mpls_info, sizeof(parse_state->mpls_info)); 185 + parse_state->mpls_push = false; 186 + } 181 187 esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; 182 188 esw_attr->out_count++; 183 189 /* attr->dests[].rep is resolved when we handle encap */
+11
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
··· 23 23 return true; 24 24 } 25 25 26 + static void 27 + copy_mpls_info(struct mlx5e_mpls_info *mpls_info, 28 + const struct flow_action_entry *act) 29 + { 30 + mpls_info->label = act->mpls_push.label; 31 + mpls_info->tc = act->mpls_push.tc; 32 + mpls_info->bos = act->mpls_push.bos; 33 + mpls_info->ttl = act->mpls_push.ttl; 34 + } 35 + 26 36 static int 27 37 tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, 28 38 const struct flow_action_entry *act, ··· 40 30 struct mlx5_flow_attr *attr) 41 31 { 42 32 parse_state->mpls_push = true; 33 + copy_mpls_info(&parse_state->mpls_info, act); 43 34 44 35 return 0; 45 36 }
+1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
··· 35 35 36 36 struct mlx5e_tc_flow_parse_attr { 37 37 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; 38 + struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS]; 38 39 struct net_device *filter_dev; 39 40 struct mlx5_flow_spec spec; 40 41 struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
+3
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
··· 768 768 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 769 769 struct mlx5e_tc_flow_parse_attr *parse_attr; 770 770 const struct ip_tunnel_info *tun_info; 771 + const struct mlx5e_mpls_info *mpls_info; 771 772 unsigned long tbl_time_before = 0; 772 773 struct mlx5e_encap_entry *e; 773 774 struct mlx5e_encap_key key; ··· 779 778 780 779 parse_attr = attr->parse_attr; 781 780 tun_info = parse_attr->tun_info[out_index]; 781 + mpls_info = &parse_attr->mpls_info[out_index]; 782 782 family = ip_tunnel_info_af(tun_info); 783 783 key.ip_tun_key = &tun_info->key; 784 784 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); ··· 830 828 goto out_err_init; 831 829 } 832 830 e->tun_info = tun_info; 831 + memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info)); 833 832 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); 834 833 if (err) 835 834 goto out_err_init;
+13 -20
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
··· 30 30 struct mlx5e_encap_entry *r) 31 31 { 32 32 const struct ip_tunnel_key *tun_key = &r->tun_info->key; 33 + const struct mlx5e_mpls_info *mpls_info = &r->mpls_info; 33 34 struct udphdr *udp = (struct udphdr *)(buf); 34 35 struct mpls_shim_hdr *mpls; 35 - u32 tun_id; 36 36 37 - tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id)); 38 37 mpls = (struct mpls_shim_hdr *)(udp + 1); 39 38 *ip_proto = IPPROTO_UDP; 40 39 41 40 udp->dest = tun_key->tp_dst; 42 - *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true); 41 + *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos); 43 42 44 43 return 0; 45 44 } ··· 59 60 void *headers_v) 60 61 { 61 62 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 62 - struct flow_match_enc_keyid enc_keyid; 63 63 struct flow_match_mpls match; 64 64 void *misc2_c; 65 65 void *misc2_v; 66 66 67 - misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 68 - misc_parameters_2); 69 - misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 70 - misc_parameters_2); 71 - 72 - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) 73 - return 0; 74 - 75 - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) 76 - return 0; 77 - 78 - flow_rule_match_enc_keyid(rule, &enc_keyid); 79 - 80 - if (!enc_keyid.mask->keyid) 81 - return 0; 82 - 83 67 if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) && 84 68 !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP)) 85 69 return -EOPNOTSUPP; 70 + 71 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) 72 + return -EOPNOTSUPP; 73 + 74 + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) 75 + return 0; 86 76 87 77 flow_rule_match_mpls(rule, &match); 88 78 89 79 /* Only support matching the first LSE */ 90 80 if (match.mask->used_lses != 1) 91 81 return -EOPNOTSUPP; 82 + 83 + misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 84 + misc_parameters_2); 85 + misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 86 + misc_parameters_2); 92 87 93 88 MLX5_SET(fte_match_set_misc2, misc2_c, 94 89 outer_first_mpls_over_udp.mpls_label,
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1792 1792 if (size_read < 0) { 1793 1793 netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", 1794 1794 __func__, size_read); 1795 - return 0; 1795 + return size_read; 1796 1796 } 1797 1797 1798 1798 i += size_read;
+8
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
··· 179 179 struct rcu_head rcu; 180 180 }; 181 181 182 + struct mlx5e_mpls_info { 183 + u32 label; 184 + u8 tc; 185 + u8 bos; 186 + u8 ttl; 187 + }; 188 + 182 189 struct mlx5e_encap_entry { 183 190 /* attached neigh hash entry */ 184 191 struct mlx5e_neigh_hash_entry *nhe; ··· 199 192 struct list_head route_list; 200 193 struct mlx5_pkt_reformat *pkt_reformat; 201 194 const struct ip_tunnel_info *tun_info; 195 + struct mlx5e_mpls_info mpls_info; 202 196 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 203 197 204 198 struct net_device *out_dev;
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1348 1348 } 1349 1349 1350 1350 /* True when explicitly set via priv flag, or XDP prog is loaded */ 1351 - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) 1351 + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || 1352 + get_cqe_tls_offload(cqe)) 1352 1353 goto csum_unnecessary; 1353 1354 1354 1355 /* CQE csum doesn't cover padding octets in short ethernet
+1
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
··· 334 334 netdev_info(ndev, "\t[%d] %s start..\n", i, st.name); 335 335 buf[count] = st.st_func(priv); 336 336 netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]); 337 + count++; 337 338 } 338 339 339 340 mutex_unlock(&priv->state_lock);
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 1254 1254 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1255 1255 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1256 1256 1257 - if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1258 - return; 1259 - 1260 1257 MLX5_SET(ppcnt_reg, in, local_port, 1); 1261 1258 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1262 1259 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, ··· 1269 1272 void mlx5e_stats_fec_get(struct mlx5e_priv *priv, 1270 1273 struct ethtool_fec_stats *fec_stats) 1271 1274 { 1275 + if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) 1276 + return; 1277 + 1272 1278 fec_set_corrected_bits_total(priv, fec_stats); 1273 1279 fec_set_block_stats(priv, fec_stats); 1274 1280 }
+12
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 3316 3316 return false; 3317 3317 } 3318 3318 3319 + if (!(~actions & 3320 + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 3321 + NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); 3322 + return false; 3323 + } 3324 + 3325 + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 3326 + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3327 + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); 3328 + return false; 3329 + } 3330 + 3319 3331 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 3320 3332 !modify_header_match_supported(priv, &parse_attr->spec, flow_action, 3321 3333 actions, ct_flow, ct_clear, extack))
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 697 697 } 698 698 699 699 int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 700 - u32 min_rate, u32 max_rate) 700 + u32 max_rate, u32 min_rate) 701 701 { 702 702 int err; 703 703
-4
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 2839 2839 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) 2840 2840 return false; 2841 2841 2842 - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || 2843 - mlx5_ecpf_vport_exists(esw->dev)) 2844 - return false; 2845 - 2846 2842 return true; 2847 2843 } 2848 2844
+2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 2074 2074 fte->node.del_hw_func = NULL; 2075 2075 up_write_ref_node(&fte->node, false); 2076 2076 tree_put_node(&fte->node, false); 2077 + } else { 2078 + up_write_ref_node(&fte->node, false); 2077 2079 } 2078 2080 kfree(handle); 2079 2081 }
+3
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
··· 121 121 122 122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) 123 123 { 124 + if (!mlx5_chains_prios_supported(chains)) 125 + return 1; 126 + 124 127 if (mlx5_chains_ignore_flow_level_supported(chains)) 125 128 return UINT_MAX; 126 129
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 526 526 527 527 /* Check log_max_qp from HCA caps to set in current profile */ 528 528 if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { 529 - prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); 529 + prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); 530 530 } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { 531 531 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", 532 532 prof->log_max_qp, ··· 1840 1840 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ 1841 1841 { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ 1842 1842 { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ 1843 + { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ 1843 1844 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ 1844 1845 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ 1845 1846 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ 1846 1847 { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ 1848 + { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */ 1847 1849 { 0, } 1848 1850 }; 1849 1851
+81 -39
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
··· 4 4 #include "dr_types.h" 5 5 6 6 #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 7 - #define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024) 8 7 9 8 struct mlx5dr_icm_pool { 10 9 enum mlx5dr_icm_type icm_type; ··· 135 136 kvfree(icm_mr); 136 137 } 137 138 138 - static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) 139 + static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy) 139 140 { 140 - chunk->ste_arr = kvzalloc(chunk->num_of_entries * 141 - sizeof(chunk->ste_arr[0]), GFP_KERNEL); 142 - if (!chunk->ste_arr) 143 - return -ENOMEM; 141 + /* We support only one type of STE size, both for ConnectX-5 and later 142 + * devices. Once the support for match STE which has a larger tag is 143 + * added (32B instead of 16B), the STE size for devices later than 144 + * ConnectX-5 needs to account for that. 145 + */ 146 + return DR_STE_SIZE_REDUCED; 147 + } 144 148 145 - chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * 146 - DR_STE_SIZE_REDUCED, GFP_KERNEL); 147 - if (!chunk->hw_ste_arr) 148 - goto out_free_ste_arr; 149 + static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset) 150 + { 151 + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; 152 + int index = offset / DR_STE_SIZE; 149 153 150 - chunk->miss_list = kvmalloc(chunk->num_of_entries * 151 - sizeof(chunk->miss_list[0]), GFP_KERNEL); 152 - if (!chunk->miss_list) 153 - goto out_free_hw_ste_arr; 154 - 155 - return 0; 156 - 157 - out_free_hw_ste_arr: 158 - kvfree(chunk->hw_ste_arr); 159 - out_free_ste_arr: 160 - kvfree(chunk->ste_arr); 161 - return -ENOMEM; 154 + chunk->ste_arr = &buddy->ste_arr[index]; 155 + chunk->miss_list = &buddy->miss_list[index]; 156 + chunk->hw_ste_arr = buddy->hw_ste_arr + 157 + index * dr_icm_buddy_get_ste_size(buddy); 162 158 } 163 159 164 160 static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) 165 161 { 166 - kvfree(chunk->miss_list); 167 - kvfree(chunk->hw_ste_arr); 168 - kvfree(chunk->ste_arr); 162 + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; 163 + 164 + memset(chunk->hw_ste_arr, 0, 165 + chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy)); 166 + memset(chunk->ste_arr, 0, 167 + chunk->num_of_entries * sizeof(chunk->ste_arr[0])); 169 168 } 170 169 171 170 static enum mlx5dr_icm_type ··· 186 189 kvfree(chunk); 187 190 } 188 191 192 + static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) 193 + { 194 + int num_of_entries = 195 + mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz); 196 + 197 + buddy->ste_arr = kvcalloc(num_of_entries, 198 + sizeof(struct mlx5dr_ste), GFP_KERNEL); 199 + if (!buddy->ste_arr) 200 + return -ENOMEM; 201 + 202 + /* Preallocate full STE size on non-ConnectX-5 devices since 203 + * we need to support both full and reduced with the same cache. 204 + */ 205 + buddy->hw_ste_arr = kvcalloc(num_of_entries, 206 + dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL); 207 + if (!buddy->hw_ste_arr) 208 + goto free_ste_arr; 209 + 210 + buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL); 211 + if (!buddy->miss_list) 212 + goto free_hw_ste_arr; 213 + 214 + return 0; 215 + 216 + free_hw_ste_arr: 217 + kvfree(buddy->hw_ste_arr); 218 + free_ste_arr: 219 + kvfree(buddy->ste_arr); 220 + return -ENOMEM; 221 + } 222 + 223 + static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) 224 + { 225 + kvfree(buddy->ste_arr); 226 + kvfree(buddy->hw_ste_arr); 227 + kvfree(buddy->miss_list); 228 + } 229 + 189 230 static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) 190 231 { 191 232 struct mlx5dr_icm_buddy_mem *buddy; ··· 243 208 buddy->icm_mr = icm_mr; 244 209 buddy->pool = pool; 245 210 211 + if (pool->icm_type == DR_ICM_TYPE_STE) { 212 + /* Reduce allocations by preallocating and reusing the STE structures */ 213 + if (dr_icm_buddy_init_ste_cache(buddy)) 214 + goto err_cleanup_buddy; 215 + } 216 + 246 217 /* add it to the -start- of the list in order to search in it first */ 247 218 list_add(&buddy->list_node, &pool->buddy_mem_list); 248 219 249 220 return 0; 250 221 222 + err_cleanup_buddy: 223 + mlx5dr_buddy_cleanup(buddy); 251 224 err_free_buddy: 252 225 kvfree(buddy); 253 226 free_mr: ··· 276 233 dr_icm_pool_mr_destroy(buddy->icm_mr); 277 234 278 235 mlx5dr_buddy_cleanup(buddy); 236 + 237 + if (buddy->pool->icm_type == DR_ICM_TYPE_STE) 238 + dr_icm_buddy_cleanup_ste_cache(buddy); 279 239 280 240 kvfree(buddy); 281 241 } ··· 307 261 chunk->byte_size = 308 262 mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); 309 263 chunk->seg = seg; 264 + chunk->buddy_mem = buddy_mem_pool; 310 265 311 - if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { 312 - mlx5dr_err(pool->dmn, 313 - "Failed to init ste arrays (order: %d)\n", 314 - chunk_size); 315 - goto out_free_chunk; 316 - } 266 + if (pool->icm_type == DR_ICM_TYPE_STE) 267 + dr_icm_chunk_ste_init(chunk, offset); 317 268 318 269 buddy_mem_pool->used_memory += chunk->byte_size; 319 - chunk->buddy_mem = buddy_mem_pool; 320 270 INIT_LIST_HEAD(&chunk->chunk_list); 321 271 322 272 /* chunk now is part of the used_list */ 323 273 list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); 324 274 325 275 return chunk; 326 - 327 - out_free_chunk: 328 - kvfree(chunk); 329 - return NULL; 330 276 } 331 277 332 278 static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) 333 279 { 334 - if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL) 335 - return true; 280 + int allow_hot_size; 336 281 337 - return false; 282 + /* sync when hot memory reaches half of the pool size */ 283 + allow_hot_size = 284 + mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, 285 + pool->icm_type) / 2; 286 + 287 + return pool->hot_memory_size > allow_hot_size; 338 288 } 339 289 340 290 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
+4 -16
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
··· 13 13 return (spec->dmac_47_16 || spec->dmac_15_0); 14 14 } 15 15 16 - static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec) 17 - { 18 - return (spec->src_ip_127_96 || spec->src_ip_95_64 || 19 - spec->src_ip_63_32 || spec->src_ip_31_0); 20 - } 21 - 22 - static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec) 23 - { 24 - return (spec->dst_ip_127_96 || spec->dst_ip_95_64 || 25 - spec->dst_ip_63_32 || spec->dst_ip_31_0); 26 - } 27 - 28 16 static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec) 29 17 { 30 18 return (spec->ip_protocol || spec->frag || spec->tcp_flags || ··· 491 503 &mask, inner, rx); 492 504 493 505 if (outer_ipv == DR_RULE_IPV6) { 494 - if (dr_mask_is_dst_addr_set(&mask.outer)) 506 + if (DR_MASK_IS_DST_IP_SET(&mask.outer)) 495 507 mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], 496 508 &mask, inner, rx); 497 509 498 - if (dr_mask_is_src_addr_set(&mask.outer)) 510 + if (DR_MASK_IS_SRC_IP_SET(&mask.outer)) 499 511 mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], 500 512 &mask, inner, rx); 501 513 ··· 598 610 &mask, inner, rx); 599 611 600 612 if (inner_ipv == DR_RULE_IPV6) { 601 - if (dr_mask_is_dst_addr_set(&mask.inner)) 613 + if (DR_MASK_IS_DST_IP_SET(&mask.inner)) 602 614 mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], 603 615 &mask, inner, rx); 604 616 605 - if (dr_mask_is_src_addr_set(&mask.inner)) 617 + if (DR_MASK_IS_SRC_IP_SET(&mask.inner)) 606 618 mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], 607 619 &mask, inner, rx); 608 620
+31 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
··· 602 602 used_hw_action_num); 603 603 } 604 604 605 + static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, 606 + struct mlx5dr_match_spec *spec) 607 + { 608 + if (spec->ip_version) { 609 + if (spec->ip_version != 0xf) { 610 + mlx5dr_err(dmn, 611 + "Partial ip_version mask with src/dst IP is not supported\n"); 612 + return -EINVAL; 613 + } 614 + } else if (spec->ethertype != 0xffff && 615 + (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) { 616 + mlx5dr_err(dmn, 617 + "Partial/no ethertype mask with src/dst IP is not supported\n"); 618 + return -EINVAL; 619 + } 620 + 621 + return 0; 622 + } 623 + 605 624 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, 606 625 u8 match_criteria, 607 626 struct mlx5dr_match_param *mask, 608 627 struct mlx5dr_match_param *value) 609 628 { 610 - if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { 629 + if (value) 630 + return 0; 631 + 632 + if (match_criteria & DR_MATCHER_CRITERIA_MISC) { 611 633 if (mask->misc.source_port && mask->misc.source_port != 0xffff) { 612 634 mlx5dr_err(dmn, 613 635 "Partial mask source_port is not supported\n"); ··· 642 620 return -EINVAL; 643 621 } 644 622 } 623 + 624 + if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) && 625 + dr_ste_build_pre_check_spec(dmn, &mask->outer)) 626 + return -EINVAL; 627 + 628 + if ((match_criteria & DR_MATCHER_CRITERIA_INNER) && 629 + dr_ste_build_pre_check_spec(dmn, &mask->inner)) 630 + return -EINVAL; 645 631 646 632 return 0; 647 633 }
+10
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
··· 798 798 (_misc3)->icmpv4_code || \ 799 799 (_misc3)->icmpv4_header_data) 800 800 801 + #define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \ 802 + (_spec)->src_ip_95_64 || \ 803 + (_spec)->src_ip_63_32 || \ 804 + (_spec)->src_ip_31_0) 805 + 806 + #define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \ 807 + (_spec)->dst_ip_95_64 || \ 808 + (_spec)->dst_ip_63_32 || \ 809 + (_spec)->dst_ip_31_0) 810 + 801 811 struct mlx5dr_esw_caps { 802 812 u64 drop_icm_address_rx; 803 813 u64 drop_icm_address_tx;
+26 -7
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
··· 233 233 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 234 234 } 235 235 236 - #define MLX5_FLOW_CONTEXT_ACTION_MAX 32 236 + /* We want to support a rule with 32 destinations, which means we need to 237 + * account for 32 destinations plus usually a counter plus one more action 238 + * for a multi-destination flow table. 239 + */ 240 + #define MLX5_FLOW_CONTEXT_ACTION_MAX 34 237 241 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, 238 242 struct mlx5_flow_table *ft, 239 243 struct mlx5_flow_group *group, ··· 407 403 enum mlx5_flow_destination_type type = dst->dest_attr.type; 408 404 u32 id; 409 405 410 - if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 411 - num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) { 412 - err = -ENOSPC; 406 + if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 407 + num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 408 + err = -EOPNOTSUPP; 413 409 goto free_actions; 414 410 } 415 411 ··· 482 478 MLX5_FLOW_DESTINATION_TYPE_COUNTER) 483 479 continue; 484 480 485 - if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 486 - err = -ENOSPC; 481 + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 482 + fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 483 + err = -EOPNOTSUPP; 487 484 goto free_actions; 488 485 } 489 486 ··· 504 499 params.match_sz = match_sz; 505 500 params.match_buf = (u64 *)fte->val; 506 501 if (num_term_actions == 1) { 507 - if (term_actions->reformat) 502 + if (term_actions->reformat) { 503 + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 504 + err = -EOPNOTSUPP; 505 + goto free_actions; 506 + } 508 507 actions[num_actions++] = term_actions->reformat; 508 + } 509 509 510 + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 511 + err = -EOPNOTSUPP; 512 + goto free_actions; 513 + } 510 514 actions[num_actions++] = term_actions->dest; 511 515 } else if (num_term_actions > 1) { 512 516 bool ignore_flow_level = 513 517 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); 514 518 519 + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 520 + fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 521 + err = -EOPNOTSUPP; 522 + goto free_actions; 523 + } 515 524 tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, 516 525 term_actions, 517 526 num_term_actions,
+5
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
··· 160 160 * sync_ste command sets them free. 161 161 */ 162 162 struct list_head hot_list; 163 + 164 + /* Memory optimisation */ 165 + struct mlx5dr_ste *ste_arr; 166 + struct list_head *miss_list; 167 + u8 *hw_ste_arr; 163 168 }; 164 169 165 170 int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
+2 -2
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
··· 922 922 int port, bool mod) 923 923 { 924 924 struct nfp_flower_priv *priv = app->priv; 925 - int ida_idx = NFP_MAX_MAC_INDEX, err; 926 925 struct nfp_tun_offloaded_mac *entry; 926 + int ida_idx = -1, err; 927 927 u16 nfp_mac_idx = 0; 928 928 929 929 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); ··· 997 997 err_free_entry: 998 998 kfree(entry); 999 999 err_free_ida: 1000 - if (ida_idx != NFP_MAX_MAC_INDEX) 1000 + if (ida_idx != -1) 1001 1001 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); 1002 1002 1003 1003 return err;
+2
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 1434 1434 lp->indirect_lock = devm_kmalloc(&pdev->dev, 1435 1435 sizeof(*lp->indirect_lock), 1436 1436 GFP_KERNEL); 1437 + if (!lp->indirect_lock) 1438 + return -ENOMEM; 1437 1439 spin_lock_init(lp->indirect_lock); 1438 1440 } 1439 1441
+2 -2
drivers/net/hamradio/6pack.c
··· 668 668 */ 669 669 netif_stop_queue(sp->dev); 670 670 671 + unregister_netdev(sp->dev); 672 + 671 673 del_timer_sync(&sp->tx_t); 672 674 del_timer_sync(&sp->resync_t); 673 - 674 - unregister_netdev(sp->dev); 675 675 676 676 /* Free all 6pack frame buffers after unreg. */ 677 677 kfree(sp->rbuff);
+5 -1
drivers/net/mdio/mdio-ipq4019.c
··· 200 200 if (ret) 201 201 return ret; 202 202 203 - return clk_prepare_enable(priv->mdio_clk); 203 + ret = clk_prepare_enable(priv->mdio_clk); 204 + if (ret == 0) 205 + mdelay(10); 206 + 207 + return ret; 204 208 } 205 209 206 210 static int ipq4019_mdio_probe(struct platform_device *pdev)
+1 -1
drivers/net/usb/sr9700.c
··· 413 413 /* ignore the CRC length */ 414 414 len = (skb->data[1] | (skb->data[2] << 8)) - 4; 415 415 416 - if (len > ETH_FRAME_LEN) 416 + if (len > ETH_FRAME_LEN || len > skb->len) 417 417 return 0; 418 418 419 419 /* the last packet of current skb */
+5 -9
drivers/net/xen-netback/xenbus.c
··· 256 256 unsigned int queue_index; 257 257 258 258 xen_unregister_watchers(vif); 259 + xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); 259 260 #ifdef CONFIG_DEBUG_FS 260 261 xenvif_debugfs_delif(vif); 261 262 #endif /* CONFIG_DEBUG_FS */ ··· 676 675 677 676 /* Not interested in this watch anymore. */ 678 677 unregister_hotplug_status_watch(be); 679 - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); 680 678 } 681 679 kfree(str); 682 680 } ··· 824 824 xenvif_carrier_on(be->vif); 825 825 826 826 unregister_hotplug_status_watch(be); 827 - if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { 828 - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 829 - NULL, hotplug_status_changed, 830 - "%s/%s", dev->nodename, 831 - "hotplug-status"); 832 - if (err) 833 - goto err; 827 + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, 828 + hotplug_status_changed, 829 + "%s/%s", dev->nodename, "hotplug-status"); 830 + if (!err) 834 831 be->have_hotplug_status_watch = 1; 835 - } 836 832 837 833 netif_tx_wake_all_queues(be->vif->dev); 838 834
+9 -12
drivers/nvme/host/core.c
··· 1723 1723 return 0; 1724 1724 } 1725 1725 1726 - static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1726 + static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1727 1727 { 1728 1728 struct nvme_ctrl *ctrl = ns->ctrl; 1729 1729 ··· 1739 1739 1740 1740 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1741 1741 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1742 - return 0; 1742 + return; 1743 + 1743 1744 if (ctrl->ops->flags & NVME_F_FABRICS) { 1744 1745 /* 1745 1746 * The NVMe over Fabrics specification only supports metadata as ··· 1748 1747 * remap the separate metadata buffer from the block layer. 1749 1748 */ 1750 1749 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1751 - return -EINVAL; 1750 + return; 1752 1751 1753 1752 ns->features |= NVME_NS_EXT_LBAS; 1754 1753 ··· 1775 1774 else 1776 1775 ns->features |= NVME_NS_METADATA_SUPPORTED; 1777 1776 } 1778 - 1779 - return 0; 1780 1777 } 1781 1778 1782 1779 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, ··· 1915 1916 ns->lba_shift = id->lbaf[lbaf].ds; 1916 1917 nvme_set_queue_limits(ns->ctrl, ns->queue); 1917 1918 1918 - ret = nvme_configure_metadata(ns, id); 1919 - if (ret) 1920 - goto out_unfreeze; 1919 + nvme_configure_metadata(ns, id); 1921 1920 nvme_set_chunk_sectors(ns, id); 1922 1921 nvme_update_disk_info(ns->disk, ns, id); 1923 1922 ··· 1931 1934 if (blk_queue_is_zoned(ns->queue)) { 1932 1935 ret = nvme_revalidate_zones(ns); 1933 1936 if (ret && !nvme_first_scan(ns->disk)) 1934 - goto out; 1937 + return ret; 1935 1938 } 1936 1939 1937 1940 if (nvme_ns_head_multipath(ns->head)) { ··· 1946 1949 return 0; 1947 1950 1948 1951 out_unfreeze: 1949 - blk_mq_unfreeze_queue(ns->disk->queue); 1950 - out: 1951 1952 /* 1952 1953 * If probing fails due an unsupported feature, hide the block device, 1953 1954 * but still allow other access. 1954 1955 */ 1955 1956 if (ret == -ENODEV) { 1956 1957 ns->disk->flags |= GENHD_FL_HIDDEN; 1958 + set_bit(NVME_NS_READY, &ns->flags); 1957 1959 ret = 0; 1958 1960 } 1961 + blk_mq_unfreeze_queue(ns->disk->queue); 1959 1962 return ret; 1960 1963 } 1961 1964 ··· 4571 4574 if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 4572 4575 return; 4573 4576 4574 - blk_set_queue_dying(ns->queue); 4577 + blk_mark_disk_dead(ns->disk); 4575 4578 nvme_start_ns_queue(ns); 4576 4579 4577 4580 set_capacity_and_notify(ns->disk, 0);
+1 -1
drivers/nvme/host/multipath.c
··· 848 848 { 849 849 if (!head->disk) 850 850 return; 851 - blk_set_queue_dying(head->disk->queue); 851 + blk_mark_disk_dead(head->disk); 852 852 /* make sure all pending bios are cleaned up */ 853 853 kblockd_schedule_work(&head->requeue_work); 854 854 flush_work(&head->requeue_work);
+49 -14
drivers/nvme/host/tcp.c
··· 44 44 u32 data_len; 45 45 u32 pdu_len; 46 46 u32 pdu_sent; 47 + u32 h2cdata_left; 48 + u32 h2cdata_offset; 47 49 u16 ttag; 48 50 __le16 status; 49 51 struct list_head entry; ··· 97 95 struct nvme_tcp_request *request; 98 96 99 97 int queue_size; 98 + u32 maxh2cdata; 100 99 size_t cmnd_capsule_len; 101 100 struct nvme_tcp_ctrl *ctrl; 102 101 unsigned long flags; ··· 575 572 return ret; 576 573 } 577 574 578 - static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, 579 - struct nvme_tcp_r2t_pdu *pdu) 575 + static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) 580 576 { 581 577 struct nvme_tcp_data_pdu *data = req->pdu; 582 578 struct nvme_tcp_queue *queue = req->queue; 583 579 struct request *rq = blk_mq_rq_from_pdu(req); 580 + u32 h2cdata_sent = req->pdu_len; 584 581 u8 hdgst = nvme_tcp_hdgst_len(queue); 585 582 u8 ddgst = nvme_tcp_ddgst_len(queue); 586 583 587 584 req->state = NVME_TCP_SEND_H2C_PDU; 588 585 req->offset = 0; 589 - req->pdu_len = le32_to_cpu(pdu->r2t_length); 586 + req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); 590 587 req->pdu_sent = 0; 588 + req->h2cdata_left -= req->pdu_len; 589 + req->h2cdata_offset += h2cdata_sent; 591 590 592 591 memset(data, 0, sizeof(*data)); 593 592 data->hdr.type = nvme_tcp_h2c_data; 594 - data->hdr.flags = NVME_TCP_F_DATA_LAST; 593 + if (!req->h2cdata_left) 594 + data->hdr.flags = NVME_TCP_F_DATA_LAST; 595 595 if (queue->hdr_digest) 596 596 data->hdr.flags |= NVME_TCP_F_HDGST; 597 597 if (queue->data_digest) ··· 603 597 data->hdr.pdo = data->hdr.hlen + hdgst; 604 598 data->hdr.plen = 605 599 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); 606 - data->ttag = pdu->ttag; 600 + data->ttag = req->ttag; 607 601 data->command_id = nvme_cid(rq); 608 - data->data_offset = pdu->r2t_offset; 602 + data->data_offset = cpu_to_le32(req->h2cdata_offset); 609 603 data->data_length = cpu_to_le32(req->pdu_len); 610 604 } 611 605 ··· 615 609 struct nvme_tcp_request *req; 616 610 struct request *rq; 617 611 u32 r2t_length = le32_to_cpu(pdu->r2t_length); 612 + u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); 618 613 619 614 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); 620 615 if (!rq) { ··· 640 633 return -EPROTO; 641 634 } 642 635 643 - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { 636 + if (unlikely(r2t_offset < req->data_sent)) { 644 637 dev_err(queue->ctrl->ctrl.device, 645 638 "req %d unexpected r2t offset %u (expected %zu)\n", 646 - rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent); 639 + rq->tag, r2t_offset, req->data_sent); 647 640 return -EPROTO; 648 641 } 649 642 650 - nvme_tcp_setup_h2c_data_pdu(req, pdu); 643 + req->pdu_len = 0; 644 + req->h2cdata_left = r2t_length; 645 + req->h2cdata_offset = r2t_offset; 646 + req->ttag = pdu->ttag; 647 + 648 + nvme_tcp_setup_h2c_data_pdu(req); 651 649 nvme_tcp_queue_request(req, false, true); 652 650 653 651 return 0; ··· 940 928 { 941 929 struct nvme_tcp_queue *queue = req->queue; 942 930 int req_data_len = req->data_len; 931 + u32 h2cdata_left = req->h2cdata_left; 943 932 944 933 while (true) { 945 934 struct page *page = nvme_tcp_req_cur_page(req); ··· 985 972 req->state = NVME_TCP_SEND_DDGST; 986 973 req->offset = 0; 987 974 } else { 988 - nvme_tcp_done_send_req(queue); 975 + if (h2cdata_left) 976 + nvme_tcp_setup_h2c_data_pdu(req); 977 + else 978 + nvme_tcp_done_send_req(queue); 989 979 } 990 980 return 1; 991 981 } ··· 1046 1030 if (queue->hdr_digest && !req->offset) 1047 1031 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 1048 1032 1049 - ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 1050 - offset_in_page(pdu) + req->offset, len, 1051 - MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); 1033 + if (!req->h2cdata_left) 1034 + ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 1035 + offset_in_page(pdu) + req->offset, len, 1036 + MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); 1037 + else 1038 + ret = sock_no_sendpage(queue->sock, virt_to_page(pdu), 1039 + offset_in_page(pdu) + req->offset, len, 1040 + MSG_DONTWAIT | MSG_MORE); 1052 1041 if (unlikely(ret <= 0)) 1053 1042 return ret; 1054 1043 ··· 1073 1052 { 1074 1053 struct nvme_tcp_queue *queue = req->queue; 1075 1054 size_t offset = req->offset; 1055 + u32 h2cdata_left = req->h2cdata_left; 1076 1056 int ret; 1077 1057 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1078 1058 struct kvec iov = { ··· 1091 1069 return ret; 1092 1070 1093 1071 if (offset + ret == NVME_TCP_DIGEST_LENGTH) { 1094 - nvme_tcp_done_send_req(queue); 1072 + if (h2cdata_left) 1073 + nvme_tcp_setup_h2c_data_pdu(req); 1074 + else 1075 + nvme_tcp_done_send_req(queue); 1095 1076 return 1; 1096 1077 } 1097 1078 ··· 1286 1261 struct msghdr msg = {}; 1287 1262 struct kvec iov; 1288 1263 bool ctrl_hdgst, ctrl_ddgst; 1264 + u32 maxh2cdata; 1289 1265 int ret; 1290 1266 1291 1267 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); ··· 1369 1343 nvme_tcp_queue_id(queue), icresp->cpda); 1370 1344 goto free_icresp; 1371 1345 } 1346 + 1347 + maxh2cdata = le32_to_cpu(icresp->maxdata); 1348 + if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) { 1349 + pr_err("queue %d: invalid maxh2cdata returned %u\n", 1350 + nvme_tcp_queue_id(queue), maxh2cdata); 1351 + goto free_icresp; 1352 + } 1353 + queue->maxh2cdata = maxh2cdata; 1372 1354 1373 1355 ret = 0; 1374 1356 free_icresp: ··· 2363 2329 req->data_sent = 0; 2364 2330 req->pdu_len = 0; 2365 2331 req->pdu_sent = 0; 2332 + req->h2cdata_left = 0; 2366 2333 req->data_len = blk_rq_nr_phys_segments(rq) ? 2367 2334 blk_rq_payload_bytes(rq) : 0; 2368 2335 req->curr_bio = rq->bio;
+1 -1
drivers/of/fdt.c
··· 648 648 } 649 649 650 650 fdt_scan_reserved_mem(); 651 - fdt_init_reserved_mem(); 652 651 fdt_reserve_elfcorehdr(); 652 + fdt_init_reserved_mem(); 653 653 } 654 654 655 655 /**
+8 -8
drivers/of/unittest.c
··· 513 513 memset(&args, 0, sizeof(args)); 514 514 515 515 EXPECT_BEGIN(KERN_INFO, 516 - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); 516 + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); 517 517 518 518 rc = of_parse_phandle_with_args(np, "phandle-list-bad-args", 519 519 "#phandle-cells", 1, &args); 520 520 521 521 EXPECT_END(KERN_INFO, 522 - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); 522 + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); 523 523 524 524 unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); 525 525 526 526 EXPECT_BEGIN(KERN_INFO, 527 - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); 527 + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); 528 528 529 529 rc = of_count_phandle_with_args(np, "phandle-list-bad-args", 530 530 "#phandle-cells"); 531 531 532 532 EXPECT_END(KERN_INFO, 533 - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); 533 + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); 534 534 535 535 unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); 536 536 } ··· 670 670 memset(&args, 0, sizeof(args)); 671 671 672 672 EXPECT_BEGIN(KERN_INFO, 673 - "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1"); 673 + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); 674 674 675 675 rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args", 676 676 "phandle", 1, &args); 677 677 EXPECT_END(KERN_INFO, 678 - "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1"); 678 + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); 679 679 680 680 unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); 681 681 } ··· 1257 1257 unittest(pdev, "device 2 creation failed\n"); 1258 1258 1259 1259 EXPECT_BEGIN(KERN_INFO, 1260 - "platform testcase-data:testcase-device2: IRQ index 0 not found"); 1260 + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); 1261 1261 1262 1262 irq = platform_get_irq(pdev, 0); 1263 1263 1264 1264 EXPECT_END(KERN_INFO, 1265 - "platform testcase-data:testcase-device2: IRQ index 0 not found"); 1265 + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); 1266 1266 1267 1267 unittest(irq < 0 && irq != -EPROBE_DEFER, 1268 1268 "device parsing error failed - %d\n", irq);
+2 -1
drivers/pci/controller/pci-mvebu.c
··· 1329 1329 * indirectly via kernel emulated PCI bridge driver. 1330 1330 */ 1331 1331 mvebu_pcie_setup_hw(port); 1332 - mvebu_pcie_set_local_dev_nr(port, 0); 1332 + mvebu_pcie_set_local_dev_nr(port, 1); 1333 + mvebu_pcie_set_local_bus_nr(port, 0); 1333 1334 } 1334 1335 1335 1336 pcie->nports = i;
+9 -5
drivers/pci/quirks.c
··· 5344 5344 */ 5345 5345 static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) 5346 5346 { 5347 - if ((pdev->device == 0x7312 && pdev->revision != 0x00) || 5348 - (pdev->device == 0x7340 && pdev->revision != 0xc5) || 5349 - (pdev->device == 0x7341 && pdev->revision != 0x00)) 5350 - return; 5351 - 5352 5347 if (pdev->device == 0x15d8) { 5353 5348 if (pdev->revision == 0xcf && 5354 5349 pdev->subsystem_vendor == 0xea50 && ··· 5365 5370 /* AMD Iceland dGPU */ 5366 5371 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); 5367 5372 /* AMD Navi10 dGPU */ 5373 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats); 5368 5374 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); 5375 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats); 5376 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats); 5377 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats); 5378 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats); 5379 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats); 5380 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats); 5369 5381 /* AMD Navi14 dGPU */ 5370 5382 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); 5371 5383 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats); 5384 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats); 5385 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats); 5372 5386 /* AMD Raven platform iGPU */ 5373 5387 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats); 5374 5388 #endif /* CONFIG_PCI_ATS */
+10 -3
drivers/platform/surface/surface3_power.c
··· 232 232 } 233 233 bix->last_full_charg_capacity = ret; 234 234 235 - /* get serial number */ 235 + /* 236 + * Get serial number, on some devices (with unofficial replacement 237 + * battery?) reading any of the serial number range addresses gets 238 + * nacked in this case just leave the serial number empty. 239 + */ 236 240 ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, 237 241 sizeof(buf), buf); 238 - if (ret != sizeof(buf)) { 242 + if (ret == -EREMOTEIO) { 243 + /* no serial number available */ 244 + } else if (ret != sizeof(buf)) { 239 245 dev_err(&client->dev, "Error reading serial no: %d\n", ret); 240 246 return ret; 247 + } else { 248 + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); 241 249 } 242 - snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); 243 250 244 251 /* get cycle count */ 245 252 ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
+33 -9
drivers/platform/x86/amd-pmc.c
··· 21 21 #include <linux/module.h> 22 22 #include <linux/pci.h> 23 23 #include <linux/platform_device.h> 24 + #include <linux/pm_qos.h> 24 25 #include <linux/rtc.h> 25 26 #include <linux/suspend.h> 26 27 #include <linux/seq_file.h> ··· 86 85 #define PMC_MSG_DELAY_MIN_US 50 87 86 #define RESPONSE_REGISTER_LOOP_MAX 20000 88 87 88 + /* QoS request for letting CPUs in idle states, but not the deepest */ 89 + #define AMD_PMC_MAX_IDLE_STATE_LATENCY 3 90 + 89 91 #define SOC_SUBSYSTEM_IP_MAX 12 90 92 #define DELAY_MIN_US 2000 91 93 #define DELAY_MAX_US 3000 ··· 135 131 struct device *dev; 136 132 struct pci_dev *rdev; 137 133 struct mutex lock; /* generic mutex lock */ 134 + struct pm_qos_request amd_pmc_pm_qos_req; 138 135 #if IS_ENABLED(CONFIG_DEBUG_FS) 139 136 struct dentry *dbgfs_dir; 140 137 #endif /* CONFIG_DEBUG_FS */ ··· 526 521 rc = rtc_alarm_irq_enable(rtc_device, 0); 527 522 dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration); 528 523 524 + /* 525 + * Prevent CPUs from getting into deep idle states while sending OS_HINT 526 + * which is otherwise generally safe to send when at least one of the CPUs 527 + * is not in deep idle states. 528 + */ 529 + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY); 530 + wake_up_all_idle_cpus(); 531 + 529 532 return rc; 530 533 } 531 534 ··· 551 538 /* Activate CZN specific RTC functionality */ 552 539 if (pdev->cpu_id == AMD_CPU_ID_CZN) { 553 540 rc = amd_pmc_verify_czn_rtc(pdev, &arg); 554 - if (rc < 0) 555 - return rc; 541 + if (rc) 542 + goto fail; 556 543 } 557 544 558 545 /* Dump the IdleMask before we send hint to SMU */ 559 546 amd_pmc_idlemask_read(pdev, dev, NULL); 560 547 msg = amd_pmc_get_os_hint(pdev); 561 548 rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0); 562 - if (rc) 549 + if (rc) { 563 550 dev_err(pdev->dev, "suspend failed\n"); 551 + goto fail; 552 + } 564 553 565 554 if (enable_stb) 566 555 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF); 567 - if (rc) { 556 + if (rc) { 568 557 dev_err(pdev->dev, "error writing to STB\n"); 569 - return rc; 558 + goto fail; 570 559 } 571 560 561 + return 0; 562 + fail: 563 + if (pdev->cpu_id == AMD_CPU_ID_CZN) 564 + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, 565 + PM_QOS_DEFAULT_VALUE); 572 566 return rc; 573 567 } 574 568 ··· 599 579 /* Write data incremented by 1 to distinguish in stb_read */ 600 580 if (enable_stb) 601 581 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1); 602 - if (rc) { 582 + if (rc) 603 583 dev_err(pdev->dev, "error writing to STB\n"); 604 - return rc; 605 - } 606 584 607 - return 0; 585 + /* Restore the QoS request back to defaults if it was set */ 586 + if (pdev->cpu_id == AMD_CPU_ID_CZN) 587 + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, 588 + PM_QOS_DEFAULT_VALUE); 589 + 590 + return rc; 608 591 } 609 592 610 593 static const struct dev_pm_ops amd_pmc_pm_ops = { ··· 745 722 amd_pmc_get_smu_version(dev); 746 723 platform_set_drvdata(pdev, dev); 747 724 amd_pmc_dbgfs_register(dev); 725 + cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE); 748 726 return 0; 749 727 750 728 err_pci_dev_put:
+1 -1
drivers/platform/x86/asus-wmi.c
··· 2223 2223 2224 2224 err = fan_curve_get_factory_default(asus, fan_dev); 2225 2225 if (err) { 2226 - if (err == -ENODEV) 2226 + if (err == -ENODEV || err == -ENODATA) 2227 2227 return 0; 2228 2228 return err; 2229 2229 }
+2 -1
drivers/platform/x86/intel/int3472/tps68470_board_data.c
··· 100 100 .dev_id = "i2c-INT347A:00", 101 101 .table = { 102 102 GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW), 103 - GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW) 103 + GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW), 104 + { } 104 105 } 105 106 }; 106 107
+1
drivers/platform/x86/thinkpad_acpi.c
··· 8703 8703 TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */ 8704 8704 TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ 8705 8705 TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */ 8706 + TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */ 8706 8707 TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ 8707 8708 }; 8708 8709
+3
drivers/power/supply/bq256xx_charger.c
··· 1523 1523 BQ256XX_WDT_BIT_SHIFT); 1524 1524 1525 1525 ret = power_supply_get_battery_info(bq->charger, &bat_info); 1526 + if (ret == -ENOMEM) 1527 + return ret; 1528 + 1526 1529 if (ret) { 1527 1530 dev_warn(bq->dev, "battery info missing, default values will be applied\n"); 1528 1531
+1 -1
drivers/power/supply/cw2015_battery.c
··· 689 689 if (ret) { 690 690 /* Allocate an empty battery */ 691 691 cw_bat->battery = devm_kzalloc(&client->dev, 692 - sizeof(cw_bat->battery), 692 + sizeof(*cw_bat->battery), 693 693 GFP_KERNEL); 694 694 if (!cw_bat->battery) 695 695 return -ENOMEM;
+1
drivers/scsi/lpfc/lpfc.h
··· 592 592 #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ 593 593 #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ 594 594 #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ 595 + #define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */ 595 596 #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ 596 597 #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ 597 598 #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
+3
drivers/scsi/lpfc/lpfc_attr.c
··· 1315 1315 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1316 1316 pmboxq->u.mb.mbxOwner = OWN_HOST; 1317 1317 1318 + if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) 1319 + vport->fc_flag &= ~FC_PT2PT_NO_NVME; 1320 + 1318 1321 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 1319 1322 1320 1323 if ((mbxstatus == MBX_SUCCESS) &&
+19 -1
drivers/scsi/lpfc/lpfc_els.c
··· 1072 1072 1073 1073 /* FLOGI failed, so there is no fabric */ 1074 1074 spin_lock_irq(shost->host_lock); 1075 - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1075 + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1076 + FC_PT2PT_NO_NVME); 1076 1077 spin_unlock_irq(shost->host_lock); 1077 1078 1078 1079 /* If private loop, then allow max outstanding els to be ··· 4608 4607 /* Added for Vendor specifc support 4609 4608 * Just keep retrying for these Rsn / Exp codes 4610 4609 */ 4610 + if ((vport->fc_flag & FC_PT2PT) && 4611 + cmd == ELS_CMD_NVMEPRLI) { 4612 + switch (stat.un.b.lsRjtRsnCode) { 4613 + case LSRJT_UNABLE_TPC: 4614 + case LSRJT_INVALID_CMD: 4615 + case LSRJT_LOGICAL_ERR: 4616 + case LSRJT_CMD_UNSUPPORTED: 4617 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4618 + "0168 NVME PRLI LS_RJT " 4619 + "reason %x port doesn't " 4620 + "support NVME, disabling NVME\n", 4621 + stat.un.b.lsRjtRsnCode); 4622 + retry = 0; 4623 + vport->fc_flag |= FC_PT2PT_NO_NVME; 4624 + goto out_retry; 4625 + } 4626 + } 4611 4627 switch (stat.un.b.lsRjtRsnCode) { 4612 4628 case LSRJT_UNABLE_TPC: 4613 4629 /* The driver has a VALID PLOGI but the rport has
+3 -2
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 1961 1961 * is configured try it. 1962 1962 */ 1963 1963 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 1964 - if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 1965 - (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 1964 + if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && 1965 + (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || 1966 + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 1966 1967 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 1967 1968 /* We need to update the localport also */ 1968 1969 lpfc_nvme_update_localport(vport);
+2 -4
drivers/scsi/qedi/qedi_fw.c
··· 771 771 qedi_cmd->list_tmf_work = NULL; 772 772 } 773 773 } 774 + spin_unlock_bh(&qedi_conn->tmf_work_lock); 774 775 775 - if (!found) { 776 - spin_unlock_bh(&qedi_conn->tmf_work_lock); 776 + if (!found) 777 777 goto check_cleanup_reqs; 778 - } 779 778 780 779 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 781 780 "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", ··· 805 806 qedi_cmd->state = CLEANUP_RECV; 806 807 unlock: 807 808 spin_unlock_bh(&conn->session->back_lock); 808 - spin_unlock_bh(&qedi_conn->tmf_work_lock); 809 809 wake_up_interruptible(&qedi_conn->wait_queue); 810 810 return; 811 811
+1 -1
drivers/scsi/ufs/ufshcd.c
··· 2681 2681 break; 2682 2682 case HCTX_TYPE_READ: 2683 2683 map->nr_queues = 0; 2684 - break; 2684 + continue; 2685 2685 default: 2686 2686 WARN_ON_ONCE(true); 2687 2687 }
+4 -11
drivers/soc/mediatek/mtk-scpsys.c
··· 411 411 return ret; 412 412 } 413 413 414 - static int init_clks(struct platform_device *pdev, struct clk **clk) 414 + static void init_clks(struct platform_device *pdev, struct clk **clk) 415 415 { 416 416 int i; 417 417 418 - for (i = CLK_NONE + 1; i < CLK_MAX; i++) { 418 + for (i = CLK_NONE + 1; i < CLK_MAX; i++) 419 419 clk[i] = devm_clk_get(&pdev->dev, clk_names[i]); 420 - if (IS_ERR(clk[i])) 421 - return PTR_ERR(clk[i]); 422 - } 423 - 424 - return 0; 425 420 } 426 421 427 422 static struct scp *init_scp(struct platform_device *pdev, ··· 426 431 { 427 432 struct genpd_onecell_data *pd_data; 428 433 struct resource *res; 429 - int i, j, ret; 434 + int i, j; 430 435 struct scp *scp; 431 436 struct clk *clk[CLK_MAX]; 432 437 ··· 481 486 482 487 pd_data->num_domains = num; 483 488 484 - ret = init_clks(pdev, clk); 485 - if (ret) 486 - return ERR_PTR(ret); 489 + init_clks(pdev, clk); 487 490 488 491 for (i = 0; i < num; i++) { 489 492 struct scp_domain *scpd = &scp->domains[i];
+14 -7
drivers/vhost/vsock.c
··· 629 629 return ret; 630 630 } 631 631 632 - static int vhost_vsock_stop(struct vhost_vsock *vsock) 632 + static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) 633 633 { 634 634 size_t i; 635 - int ret; 635 + int ret = 0; 636 636 637 637 mutex_lock(&vsock->dev.mutex); 638 638 639 - ret = vhost_dev_check_owner(&vsock->dev); 640 - if (ret) 641 - goto err; 639 + if (check_owner) { 640 + ret = vhost_dev_check_owner(&vsock->dev); 641 + if (ret) 642 + goto err; 643 + } 642 644 643 645 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 644 646 struct vhost_virtqueue *vq = &vsock->vqs[i]; ··· 755 753 * inefficient. Room for improvement here. */ 756 754 vsock_for_each_connected_socket(vhost_vsock_reset_orphans); 757 755 758 - vhost_vsock_stop(vsock); 756 + /* Don't check the owner, because we are in the release path, so we 757 + * need to stop the vsock device in any case. 758 + * vhost_vsock_stop() can not fail in this case, so we don't need to 759 + * check the return code. 760 + */ 761 + vhost_vsock_stop(vsock, false); 759 762 vhost_vsock_flush(vsock); 760 763 vhost_dev_stop(&vsock->dev); 761 764 ··· 875 868 if (start) 876 869 return vhost_vsock_start(vsock); 877 870 else 878 - return vhost_vsock_stop(vsock); 871 + return vhost_vsock_stop(vsock, true); 879 872 case VHOST_GET_FEATURES: 880 873 features = VHOST_VSOCK_FEATURES; 881 874 if (copy_to_user(argp, &features, sizeof(features)))
+6 -3
fs/cifs/cifsacl.c
··· 949 949 pnntace = (struct cifs_ace *) (nacl_base + nsize); 950 950 nsize += setup_special_mode_ACE(pnntace, nmode); 951 951 num_aces++; 952 + pnntace = (struct cifs_ace *) (nacl_base + nsize); 953 + nsize += setup_authusers_ACE(pnntace); 954 + num_aces++; 952 955 goto set_size; 953 956 } 954 957 ··· 1300 1297 1301 1298 if (uid_valid(uid)) { /* chown */ 1302 1299 uid_t id; 1303 - nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid), 1300 + nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid), 1304 1301 GFP_KERNEL); 1305 1302 if (!nowner_sid_ptr) { 1306 1303 rc = -ENOMEM; ··· 1329 1326 } 1330 1327 if (gid_valid(gid)) { /* chgrp */ 1331 1328 gid_t id; 1332 - ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid), 1329 + ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid), 1333 1330 GFP_KERNEL); 1334 1331 if (!ngroup_sid_ptr) { 1335 1332 rc = -ENOMEM; ··· 1616 1613 nsecdesclen = secdesclen; 1617 1614 if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */ 1618 1615 if (mode_from_sid) 1619 - nsecdesclen += sizeof(struct cifs_ace); 1616 + nsecdesclen += 2 * sizeof(struct cifs_ace); 1620 1617 else /* cifsacl */ 1621 1618 nsecdesclen += 5 * sizeof(struct cifs_ace); 1622 1619 } else { /* chown */
+1
fs/cifs/cifsfs.c
··· 919 919 920 920 out_super: 921 921 deactivate_locked_super(sb); 922 + return root; 922 923 out: 923 924 if (cifs_sb) { 924 925 kfree(cifs_sb->prepath);
+2 -2
fs/cifs/fs_context.c
··· 149 149 fsparam_u32("echo_interval", Opt_echo_interval), 150 150 fsparam_u32("max_credits", Opt_max_credits), 151 151 fsparam_u32("handletimeout", Opt_handletimeout), 152 - fsparam_u32("snapshot", Opt_snapshot), 152 + fsparam_u64("snapshot", Opt_snapshot), 153 153 fsparam_u32("max_channels", Opt_max_channels), 154 154 155 155 /* Mount options which take string value */ ··· 1078 1078 ctx->echo_interval = result.uint_32; 1079 1079 break; 1080 1080 case Opt_snapshot: 1081 - ctx->snapshot_time = result.uint_32; 1081 + ctx->snapshot_time = result.uint_64; 1082 1082 break; 1083 1083 case Opt_max_credits: 1084 1084 if (result.uint_32 < 20 || result.uint_32 > 60000) {
+6 -5
fs/cifs/sess.c
··· 127 127 struct cifs_server_iface *ifaces = NULL; 128 128 size_t iface_count; 129 129 130 - if (ses->server->dialect < SMB30_PROT_ID) { 131 - cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n"); 132 - return 0; 133 - } 134 - 135 130 spin_lock(&ses->chan_lock); 136 131 137 132 new_chan_count = old_chan_count = ses->chan_count; ··· 137 142 cifs_dbg(FYI, 138 143 "ses already at max_channels (%zu), nothing to open\n", 139 144 ses->chan_max); 145 + return 0; 146 + } 147 + 148 + if (ses->server->dialect < SMB30_PROT_ID) { 149 + spin_unlock(&ses->chan_lock); 150 + cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n"); 140 151 return 0; 141 152 } 142 153
+2
fs/cifs/xattr.c
··· 175 175 switch (handler->flags) { 176 176 case XATTR_CIFS_NTSD_FULL: 177 177 aclflags = (CIFS_ACL_OWNER | 178 + CIFS_ACL_GROUP | 178 179 CIFS_ACL_DACL | 179 180 CIFS_ACL_SACL); 180 181 break; 181 182 case XATTR_CIFS_NTSD: 182 183 aclflags = (CIFS_ACL_OWNER | 184 + CIFS_ACL_GROUP | 183 185 CIFS_ACL_DACL); 184 186 break; 185 187 case XATTR_CIFS_ACL:
+17 -7
fs/io_uring.c
··· 4567 4567 } else { 4568 4568 list_add_tail(&buf->list, &(*head)->list); 4569 4569 } 4570 + cond_resched(); 4570 4571 } 4571 4572 4572 4573 return i ? i : -ENOMEM; ··· 7694 7693 /* when returns >0, the caller should retry */ 7695 7694 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, 7696 7695 struct io_wait_queue *iowq, 7697 - signed long *timeout) 7696 + ktime_t timeout) 7698 7697 { 7699 7698 int ret; 7700 7699 ··· 7706 7705 if (test_bit(0, &ctx->check_cq_overflow)) 7707 7706 return 1; 7708 7707 7709 - *timeout = schedule_timeout(*timeout); 7710 - return !*timeout ? -ETIME : 1; 7708 + if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS)) 7709 + return -ETIME; 7710 + return 1; 7711 7711 } 7712 7712 7713 7713 /* ··· 7721 7719 { 7722 7720 struct io_wait_queue iowq; 7723 7721 struct io_rings *rings = ctx->rings; 7724 - signed long timeout = MAX_SCHEDULE_TIMEOUT; 7722 + ktime_t timeout = KTIME_MAX; 7725 7723 int ret; 7726 7724 7727 7725 do { ··· 7737 7735 7738 7736 if (get_timespec64(&ts, uts)) 7739 7737 return -EFAULT; 7740 - timeout = timespec64_to_jiffies(&ts); 7738 + timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); 7741 7739 } 7742 7740 7743 7741 if (sig) { ··· 7769 7767 } 7770 7768 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, 7771 7769 TASK_INTERRUPTIBLE); 7772 - ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); 7770 + ret = io_cqring_wait_schedule(ctx, &iowq, timeout); 7773 7771 finish_wait(&ctx->cq_wait, &iowq.wq); 7774 7772 cond_resched(); 7775 7773 } while (ret > 0); ··· 7926 7924 ret = wait_for_completion_interruptible(&data->done); 7927 7925 if (!ret) { 7928 7926 mutex_lock(&ctx->uring_lock); 7929 - break; 7927 + if (atomic_read(&data->refs) > 0) { 7928 + /* 7929 + * it has been revived by another thread while 7930 + * we were unlocked 7931 + */ 7932 + mutex_unlock(&ctx->uring_lock); 7933 + } else { 7934 + break; 7935 + } 7930 7936 } 7931 7937 7932 7938 atomic_inc(&data->refs);
+30
fs/namespace.c
··· 469 469 } 470 470 EXPORT_SYMBOL(mnt_drop_write_file); 471 471 472 + /** 473 + * mnt_hold_writers - prevent write access to the given mount 474 + * @mnt: mnt to prevent write access to 475 + * 476 + * Prevents write access to @mnt if there are no active writers for @mnt. 477 + * This function needs to be called and return successfully before changing 478 + * properties of @mnt that need to remain stable for callers with write access 479 + * to @mnt. 480 + * 481 + * After this functions has been called successfully callers must pair it with 482 + * a call to mnt_unhold_writers() in order to stop preventing write access to 483 + * @mnt. 484 + * 485 + * Context: This function expects lock_mount_hash() to be held serializing 486 + * setting MNT_WRITE_HOLD. 487 + * Return: On success 0 is returned. 488 + * On error, -EBUSY is returned. 489 + */ 472 490 static inline int mnt_hold_writers(struct mount *mnt) 473 491 { 474 492 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; ··· 518 500 return 0; 519 501 } 520 502 503 + /** 504 + * mnt_unhold_writers - stop preventing write access to the given mount 505 + * @mnt: mnt to stop preventing write access to 506 + * 507 + * Stop preventing write access to @mnt allowing callers to gain write access 508 + * to @mnt again. 509 + * 510 + * This function can only be called after a successful call to 511 + * mnt_hold_writers(). 512 + * 513 + * Context: This function expects lock_mount_hash() to be held. 514 + */ 521 515 static inline void mnt_unhold_writers(struct mount *mnt) 522 516 { 523 517 /*
+2 -2
fs/nfs/dir.c
··· 2010 2010 if (!res) { 2011 2011 inode = d_inode(dentry); 2012 2012 if ((lookup_flags & LOOKUP_DIRECTORY) && inode && 2013 - !S_ISDIR(inode->i_mode)) 2013 + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) 2014 2014 res = ERR_PTR(-ENOTDIR); 2015 2015 else if (inode && S_ISREG(inode->i_mode)) 2016 2016 res = ERR_PTR(-EOPENSTALE); 2017 2017 } else if (!IS_ERR(res)) { 2018 2018 inode = d_inode(res); 2019 2019 if ((lookup_flags & LOOKUP_DIRECTORY) && inode && 2020 - !S_ISDIR(inode->i_mode)) { 2020 + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) { 2021 2021 dput(res); 2022 2022 res = ERR_PTR(-ENOTDIR); 2023 2023 } else if (inode && S_ISREG(inode->i_mode)) {
+3 -6
fs/nfs/inode.c
··· 853 853 } 854 854 855 855 /* Flush out writes to the server in order to update c/mtime. */ 856 - if ((request_mask & (STATX_CTIME|STATX_MTIME)) && 857 - S_ISREG(inode->i_mode)) { 858 - err = filemap_write_and_wait(inode->i_mapping); 859 - if (err) 860 - goto out; 861 - } 856 + if ((request_mask & (STATX_CTIME | STATX_MTIME)) && 857 + S_ISREG(inode->i_mode)) 858 + filemap_write_and_wait(inode->i_mapping); 862 859 863 860 /* 864 861 * We may force a getattr if the user cares about atime.
+1 -2
fs/nfs/nfs4proc.c
··· 1229 1229 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1230 1230 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1231 1231 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1232 - NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR | 1233 - NFS_INO_REVAL_PAGECACHE; 1232 + NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR; 1234 1233 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1235 1234 } 1236 1235 nfsi->attrtimeo_timestamp = jiffies;
+2 -1
include/linux/blkdev.h
··· 748 748 749 749 bool __must_check blk_get_queue(struct request_queue *); 750 750 extern void blk_put_queue(struct request_queue *); 751 - extern void blk_set_queue_dying(struct request_queue *); 751 + 752 + void blk_mark_disk_dead(struct gendisk *disk); 752 753 753 754 #ifdef CONFIG_BLOCK 754 755 /*
+1
include/linux/nvme-tcp.h
··· 12 12 #define NVME_TCP_DISC_PORT 8009 13 13 #define NVME_TCP_ADMIN_CCSZ SZ_8K 14 14 #define NVME_TCP_DIGEST_LENGTH 4 15 + #define NVME_TCP_MIN_MAXH2CDATA 4096 15 16 16 17 enum nvme_tcp_pfv { 17 18 NVME_TCP_PFV_1_0 = 0x0,
+2 -2
include/linux/sched/task.h
··· 54 54 extern void init_idle(struct task_struct *idle, int cpu); 55 55 56 56 extern int sched_fork(unsigned long clone_flags, struct task_struct *p); 57 - extern void sched_post_fork(struct task_struct *p, 58 - struct kernel_clone_args *kargs); 57 + extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); 58 + extern void sched_post_fork(struct task_struct *p); 59 59 extern void sched_dead(struct task_struct *p); 60 60 61 61 void __noreturn do_task_dead(void);
+1 -2
include/linux/slab.h
··· 660 660 * allocator where we care about the real place the memory allocation 661 661 * request comes from. 662 662 */ 663 - extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 664 - __alloc_size(1); 663 + extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller); 665 664 #define kmalloc_track_caller(size, flags) \ 666 665 __kmalloc_track_caller(size, flags, _RET_IP_) 667 666
+29 -23
include/net/checksum.h
··· 22 22 #include <asm/checksum.h> 23 23 24 24 #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 25 - static inline 25 + static __always_inline 26 26 __wsum csum_and_copy_from_user (const void __user *src, void *dst, 27 27 int len) 28 28 { ··· 33 33 #endif 34 34 35 35 #ifndef HAVE_CSUM_COPY_USER 36 - static __inline__ __wsum csum_and_copy_to_user 36 + static __always_inline __wsum csum_and_copy_to_user 37 37 (const void *src, void __user *dst, int len) 38 38 { 39 39 __wsum sum = csum_partial(src, len, ~0U); ··· 45 45 #endif 46 46 47 47 #ifndef _HAVE_ARCH_CSUM_AND_COPY 48 - static inline __wsum 48 + static __always_inline __wsum 49 49 csum_partial_copy_nocheck(const void *src, void *dst, int len) 50 50 { 51 51 memcpy(dst, src, len); ··· 54 54 #endif 55 55 56 56 #ifndef HAVE_ARCH_CSUM_ADD 57 - static inline __wsum csum_add(__wsum csum, __wsum addend) 57 + static __always_inline __wsum csum_add(__wsum csum, __wsum addend) 58 58 { 59 59 u32 res = (__force u32)csum; 60 60 res += (__force u32)addend; ··· 62 62 } 63 63 #endif 64 64 65 - static inline __wsum csum_sub(__wsum csum, __wsum addend) 65 + static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) 66 66 { 67 67 return csum_add(csum, ~addend); 68 68 } 69 69 70 - static inline __sum16 csum16_add(__sum16 csum, __be16 addend) 70 + static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) 71 71 { 72 72 u16 res = (__force u16)csum; 73 73 ··· 75 75 return (__force __sum16)(res + (res < (__force u16)addend)); 76 76 } 77 77 78 - static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) 78 + static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) 79 79 { 80 80 return csum16_add(csum, ~addend); 81 81 } 82 82 83 - static inline __wsum csum_shift(__wsum sum, int offset) 83 + static __always_inline __wsum csum_shift(__wsum sum, int offset) 84 84 { 85 85 /* rotate sum to align it with a 16b boundary */ 86 86 if (offset & 1) ··· 88 88 return sum; 89 89 } 90 90 91 - static inline __wsum 91 + static __always_inline __wsum 92 92 csum_block_add(__wsum csum, __wsum csum2, int offset) 93 93 { 94 94 return csum_add(csum, csum_shift(csum2, offset)); 95 95 } 96 96 97 - static inline __wsum 97 + static __always_inline __wsum 98 98 csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) 99 99 { 100 100 return csum_block_add(csum, csum2, offset); 101 101 } 102 102 103 - static inline __wsum 103 + static __always_inline __wsum 104 104 csum_block_sub(__wsum csum, __wsum csum2, int offset) 105 105 { 106 106 return csum_block_add(csum, ~csum2, offset); 107 107 } 108 108 109 - static inline __wsum csum_unfold(__sum16 n) 109 + static __always_inline __wsum csum_unfold(__sum16 n) 110 110 { 111 111 return (__force __wsum)n; 112 112 } 113 113 114 - static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) 114 + static __always_inline 115 + __wsum csum_partial_ext(const void *buff, int len, __wsum sum) 115 116 { 116 117 return csum_partial(buff, len, sum); 117 118 } 118 119 119 120 #define CSUM_MANGLED_0 ((__force __sum16)0xffff) 120 121 121 - static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) 122 + static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) 122 123 { 123 124 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); 124 125 } 125 126 126 - static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) 127 + static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) 127 128 { 128 129 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); 129 130 ··· 137 136 * m : old value of a 16bit field 138 137 * m' : new value of a 16bit field 139 138 */ 140 - static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) 139 + static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) 141 140 { 142 141 *sum = ~csum16_add(csum16_sub(~(*sum), old), new); 142 + } 143 + 144 + static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) 145 + { 146 + *csum = csum_add(csum_sub(*csum, old), new); 143 147 } 144 148 145 149 struct sk_buff; ··· 156 150 void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, 157 151 __wsum diff, bool pseudohdr); 158 152 159 - static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, 160 - __be16 from, __be16 to, 161 - bool pseudohdr) 153 + static __always_inline 154 + void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, 155 + __be16 from, __be16 to, bool pseudohdr) 162 156 { 163 157 inet_proto_csum_replace4(sum, skb, (__force __be32)from, 164 158 (__force __be32)to, pseudohdr); 165 159 } 166 160 167 - static inline __wsum remcsum_adjust(void *ptr, __wsum csum, 168 - int start, int offset) 161 + static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, 162 + int start, int offset) 169 163 { 170 164 __sum16 *psum = (__sum16 *)(ptr + offset); 171 165 __wsum delta; ··· 181 175 return delta; 182 176 } 183 177 184 - static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) 178 + static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) 185 179 { 186 180 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); 187 181 } 188 182 189 - static inline __wsum wsum_negate(__wsum val) 183 + static __always_inline __wsum wsum_negate(__wsum val) 190 184 { 191 185 return (__force __wsum)-((__force u32)val); 192 186 }
+1 -1
include/net/netfilter/nf_tables.h
··· 905 905 int (*offload)(struct nft_offload_ctx *ctx, 906 906 struct nft_flow_rule *flow, 907 907 const struct nft_expr *expr); 908 + bool (*offload_action)(const struct nft_expr *expr); 908 909 void (*offload_stats)(struct nft_expr *expr, 909 910 const struct flow_stats *stats); 910 - u32 offload_flags; 911 911 const struct nft_expr_type *type; 912 912 void *data; 913 913 };
-2
include/net/netfilter/nf_tables_offload.h
··· 67 67 struct flow_rule *rule; 68 68 }; 69 69 70 - #define NFT_OFFLOAD_F_ACTION (1 << 0) 71 - 72 70 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, 73 71 enum flow_dissector_key_id addr_type); 74 72
+2 -2
include/net/sock.h
··· 509 509 #endif 510 510 u16 sk_tsflags; 511 511 u8 sk_shutdown; 512 - u32 sk_tskey; 512 + atomic_t sk_tskey; 513 513 atomic_t sk_zckey; 514 514 515 515 u8 sk_clockid; ··· 2681 2681 __sock_tx_timestamp(tsflags, tx_flags); 2682 2682 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey && 2683 2683 tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 2684 - *tskey = sk->sk_tskey++; 2684 + *tskey = atomic_inc_return(&sk->sk_tskey) - 1; 2685 2685 } 2686 2686 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) 2687 2687 *tx_flags |= SKBTX_WIFI_STATUS;
+4 -2
kernel/cgroup/cgroup-v1.c
··· 546 546 char *buf, size_t nbytes, loff_t off) 547 547 { 548 548 struct cgroup *cgrp; 549 + struct cgroup_file_ctx *ctx; 549 550 550 551 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); 551 552 ··· 554 553 * Release agent gets called with all capabilities, 555 554 * require capabilities to set release agent. 556 555 */ 557 - if ((of->file->f_cred->user_ns != &init_user_ns) || 558 - !capable(CAP_SYS_ADMIN)) 556 + ctx = of->priv; 557 + if ((ctx->ns->user_ns != &init_user_ns) || 558 + !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN)) 559 559 return -EPERM; 560 560 561 561 cgrp = cgroup_kn_lock_live(of->kn, false);
+14
kernel/cgroup/cgroup.c
··· 6166 6166 if (ret) 6167 6167 goto err; 6168 6168 6169 + /* 6170 + * Spawning a task directly into a cgroup works by passing a file 6171 + * descriptor to the target cgroup directory. This can even be an O_PATH 6172 + * file descriptor. But it can never be a cgroup.procs file descriptor. 6173 + * This was done on purpose so spawning into a cgroup could be 6174 + * conceptualized as an atomic 6175 + * 6176 + * fd = openat(dfd_cgroup, "cgroup.procs", ...); 6177 + * write(fd, <child-pid>, ...); 6178 + * 6179 + * sequence, i.e. it's a shorthand for the caller opening and writing 6180 + * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us 6181 + * to always use the caller's credentials. 6182 + */ 6169 6183 ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb, 6170 6184 !(kargs->flags & CLONE_THREAD), 6171 6185 current->nsproxy->cgroup_ns);
+7 -5
kernel/cgroup/cpuset.c
··· 2289 2289 cgroup_taskset_first(tset, &css); 2290 2290 cs = css_cs(css); 2291 2291 2292 + cpus_read_lock(); 2292 2293 percpu_down_write(&cpuset_rwsem); 2293 2294 2294 2295 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); ··· 2343 2342 wake_up(&cpuset_attach_wq); 2344 2343 2345 2344 percpu_up_write(&cpuset_rwsem); 2345 + cpus_read_unlock(); 2346 2346 } 2347 2347 2348 2348 /* The various types of files and directories in a cpuset file system */ ··· 3524 3522 return cs; 3525 3523 } 3526 3524 3527 - /** 3528 - * cpuset_node_allowed - Can we allocate on a memory node? 3525 + /* 3526 + * __cpuset_node_allowed - Can we allocate on a memory node? 3529 3527 * @node: is this an allowed node? 3530 3528 * @gfp_mask: memory allocation flags 3531 3529 * ··· 3696 3694 3697 3695 int cpuset_memory_pressure_enabled __read_mostly; 3698 3696 3699 - /** 3700 - * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 3697 + /* 3698 + * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 3701 3699 * 3702 3700 * Keep a running average of the rate of synchronous (direct) 3703 3701 * page reclaim efforts initiated by tasks in each cpuset. ··· 3712 3710 * "memory_pressure". Value displayed is an integer 3713 3711 * representing the recent rate of entry into the synchronous 3714 3712 * (direct) page reclaim by any task attached to the cpuset. 3715 - **/ 3713 + */ 3716 3714 3717 3715 void __cpuset_memory_pressure_bump(void) 3718 3716 {
+2 -7
kernel/cred.c
··· 665 665 666 666 int set_cred_ucounts(struct cred *new) 667 667 { 668 - struct task_struct *task = current; 669 - const struct cred *old = task->real_cred; 670 668 struct ucounts *new_ucounts, *old_ucounts = new->ucounts; 671 - 672 - if (new->user == old->user && new->user_ns == old->user_ns) 673 - return 0; 674 669 675 670 /* 676 671 * This optimization is needed because alloc_ucounts() uses locks 677 672 * for table lookups. 678 673 */ 679 - if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid)) 674 + if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid)) 680 675 return 0; 681 676 682 - if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid))) 677 + if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid))) 683 678 return -EAGAIN; 684 679 685 680 new->ucounts = new_ucounts;
+20 -10
kernel/fork.c
··· 2021 2021 #ifdef CONFIG_PROVE_LOCKING 2022 2022 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 2023 2023 #endif 2024 + retval = copy_creds(p, clone_flags); 2025 + if (retval < 0) 2026 + goto bad_fork_free; 2027 + 2024 2028 retval = -EAGAIN; 2025 2029 if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 2026 2030 if (p->real_cred->user != INIT_USER && 2027 2031 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 2028 - goto bad_fork_free; 2032 + goto bad_fork_cleanup_count; 2029 2033 } 2030 2034 current->flags &= ~PF_NPROC_EXCEEDED; 2031 - 2032 - retval = copy_creds(p, clone_flags); 2033 - if (retval < 0) 2034 - goto bad_fork_free; 2035 2035 2036 2036 /* 2037 2037 * If multiple threads are within copy_process(), then this check ··· 2267 2267 goto bad_fork_put_pidfd; 2268 2268 2269 2269 /* 2270 + * Now that the cgroups are pinned, re-clone the parent cgroup and put 2271 + * the new task on the correct runqueue. All this *before* the task 2272 + * becomes visible. 2273 + * 2274 + * This isn't part of ->can_fork() because while the re-cloning is 2275 + * cgroup specific, it unconditionally needs to place the task on a 2276 + * runqueue. 2277 + */ 2278 + sched_cgroup_fork(p, args); 2279 + 2280 + /* 2270 2281 * From this point on we must avoid any synchronous user-space 2271 2282 * communication until we take the tasklist-lock. In particular, we do 2272 2283 * not want user-space to be able to predict the process start-time by ··· 2334 2323 goto bad_fork_cancel_cgroup; 2335 2324 } 2336 2325 2337 - /* past the last point of failure */ 2338 - if (pidfile) 2339 - fd_install(pidfd, pidfile); 2340 - 2341 2326 init_task_pid_links(p); 2342 2327 if (likely(p->pid)) { 2343 2328 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); ··· 2382 2375 syscall_tracepoint_update(p); 2383 2376 write_unlock_irq(&tasklist_lock); 2384 2377 2378 + if (pidfile) 2379 + fd_install(pidfd, pidfile); 2380 + 2385 2381 proc_fork_connector(p); 2386 - sched_post_fork(p, args); 2382 + sched_post_fork(p); 2387 2383 cgroup_post_fork(p, args); 2388 2384 perf_event_fork(p); 2389 2385
+2 -2
kernel/locking/lockdep.c
··· 3462 3462 u16 chain_hlock = chain_hlocks[chain->base + i]; 3463 3463 unsigned int class_idx = chain_hlock_class_idx(chain_hlock); 3464 3464 3465 - return lock_classes + class_idx - 1; 3465 + return lock_classes + class_idx; 3466 3466 } 3467 3467 3468 3468 /* ··· 3530 3530 hlock_id = chain_hlocks[chain->base + i]; 3531 3531 chain_key = print_chain_key_iteration(hlock_id, chain_key); 3532 3532 3533 - print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1); 3533 + print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id)); 3534 3534 printk("\n"); 3535 3535 } 3536 3536 }
+21 -13
kernel/sched/core.c
··· 1214 1214 } 1215 1215 #endif 1216 1216 1217 - static void set_load_weight(struct task_struct *p) 1217 + static void set_load_weight(struct task_struct *p, bool update_load) 1218 1218 { 1219 - bool update_load = !(READ_ONCE(p->__state) & TASK_NEW); 1220 1219 int prio = p->static_prio - MAX_RT_PRIO; 1221 1220 struct load_weight *load = &p->se.load; 1222 1221 ··· 4406 4407 p->static_prio = NICE_TO_PRIO(0); 4407 4408 4408 4409 p->prio = p->normal_prio = p->static_prio; 4409 - set_load_weight(p); 4410 + set_load_weight(p, false); 4410 4411 4411 4412 /* 4412 4413 * We don't need the reset flag anymore after the fork. It has ··· 4424 4425 4425 4426 init_entity_runnable_average(&p->se); 4426 4427 4428 + 4427 4429 #ifdef CONFIG_SCHED_INFO 4428 4430 if (likely(sched_info_on())) 4429 4431 memset(&p->sched_info, 0, sizeof(p->sched_info)); ··· 4440 4440 return 0; 4441 4441 } 4442 4442 4443 - void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4443 + void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4444 4444 { 4445 4445 unsigned long flags; 4446 - #ifdef CONFIG_CGROUP_SCHED 4447 - struct task_group *tg; 4448 - #endif 4449 4446 4447 + /* 4448 + * Because we're not yet on the pid-hash, p->pi_lock isn't strictly 4449 + * required yet, but lockdep gets upset if rules are violated. 4450 + */ 4450 4451 raw_spin_lock_irqsave(&p->pi_lock, flags); 4451 4452 #ifdef CONFIG_CGROUP_SCHED 4452 - tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4453 - struct task_group, css); 4454 - p->sched_task_group = autogroup_task_group(p, tg); 4453 + if (1) { 4454 + struct task_group *tg; 4455 + tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4456 + struct task_group, css); 4457 + tg = autogroup_task_group(p, tg); 4458 + p->sched_task_group = tg; 4459 + } 4455 4460 #endif 4456 4461 rseq_migrate(p); 4457 4462 /* ··· 4467 4462 if (p->sched_class->task_fork) 4468 4463 p->sched_class->task_fork(p); 4469 4464 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4465 + } 4470 4466 4467 + void sched_post_fork(struct task_struct *p) 4468 + { 4471 4469 uclamp_post_fork(p); 4472 4470 } 4473 4471 ··· 6930 6922 put_prev_task(rq, p); 6931 6923 6932 6924 p->static_prio = NICE_TO_PRIO(nice); 6933 - set_load_weight(p); 6925 + set_load_weight(p, true); 6934 6926 old_prio = p->prio; 6935 6927 p->prio = effective_prio(p); 6936 6928 ··· 7221 7213 */ 7222 7214 p->rt_priority = attr->sched_priority; 7223 7215 p->normal_prio = normal_prio(p); 7224 - set_load_weight(p); 7216 + set_load_weight(p, true); 7225 7217 } 7226 7218 7227 7219 /* ··· 9454 9446 #endif 9455 9447 } 9456 9448 9457 - set_load_weight(&init_task); 9449 + set_load_weight(&init_task, false); 9458 9450 9459 9451 /* 9460 9452 * The boot idle thread does lazy MMU switching as well:
+14 -6
kernel/sys.c
··· 472 472 if (!new_user) 473 473 return -EAGAIN; 474 474 475 + free_uid(new->user); 476 + new->user = new_user; 477 + return 0; 478 + } 479 + 480 + static void flag_nproc_exceeded(struct cred *new) 481 + { 482 + if (new->ucounts == current_ucounts()) 483 + return; 484 + 475 485 /* 476 486 * We don't fail in case of NPROC limit excess here because too many 477 487 * poorly written programs don't check set*uid() return code, assuming ··· 490 480 * failure to the execve() stage. 491 481 */ 492 482 if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) && 493 - new_user != INIT_USER && 494 - !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 483 + new->user != INIT_USER) 495 484 current->flags |= PF_NPROC_EXCEEDED; 496 485 else 497 486 current->flags &= ~PF_NPROC_EXCEEDED; 498 - 499 - free_uid(new->user); 500 - new->user = new_user; 501 - return 0; 502 487 } 503 488 504 489 /* ··· 568 563 if (retval < 0) 569 564 goto error; 570 565 566 + flag_nproc_exceeded(new); 571 567 return commit_creds(new); 572 568 573 569 error: ··· 631 625 if (retval < 0) 632 626 goto error; 633 627 628 + flag_nproc_exceeded(new); 634 629 return commit_creds(new); 635 630 636 631 error: ··· 711 704 if (retval < 0) 712 705 goto error; 713 706 707 + flag_nproc_exceeded(new); 714 708 return commit_creds(new); 715 709 716 710 error:
+2 -1
kernel/ucount.c
··· 350 350 if (rlimit > LONG_MAX) 351 351 max = LONG_MAX; 352 352 for (iter = ucounts; iter; iter = iter->ns->ucounts) { 353 - if (get_ucounts_value(iter, type) > max) 353 + long val = get_ucounts_value(iter, type); 354 + if (val < 0 || val > max) 354 355 return true; 355 356 max = READ_ONCE(iter->ns->ucount_max[type]); 356 357 }
+2
lib/iov_iter.c
··· 414 414 return 0; 415 415 416 416 buf->ops = &page_cache_pipe_buf_ops; 417 + buf->flags = 0; 417 418 get_page(page); 418 419 buf->page = page; 419 420 buf->offset = offset; ··· 578 577 break; 579 578 580 579 buf->ops = &default_pipe_buf_ops; 580 + buf->flags = 0; 581 581 buf->page = page; 582 582 buf->offset = 0; 583 583 buf->len = min_t(ssize_t, left, PAGE_SIZE);
+1 -1
net/can/j1939/transport.c
··· 2006 2006 /* set the end-packet for broadcast */ 2007 2007 session->pkt.last = session->pkt.total; 2008 2008 2009 - skcb->tskey = session->sk->sk_tskey++; 2009 + skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1; 2010 2010 session->tskey = skcb->tskey; 2011 2011 2012 2012 return session;
+1 -1
net/core/net-sysfs.c
··· 213 213 if (!rtnl_trylock()) 214 214 return restart_syscall(); 215 215 216 - if (netif_running(netdev)) { 216 + if (netif_running(netdev) && netif_device_present(netdev)) { 217 217 struct ethtool_link_ksettings cmd; 218 218 219 219 if (!__ethtool_get_link_ksettings(netdev, &cmd))
+3 -3
net/core/skbuff.c
··· 2307 2307 /* Free pulled out fragments. */ 2308 2308 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2309 2309 skb_shinfo(skb)->frag_list = list->next; 2310 - kfree_skb(list); 2310 + consume_skb(list); 2311 2311 } 2312 2312 /* And insert new clone at head. */ 2313 2313 if (clone) { ··· 4761 4761 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 4762 4762 serr->ee.ee_data = skb_shinfo(skb)->tskey; 4763 4763 if (sk_is_tcp(sk)) 4764 - serr->ee.ee_data -= sk->sk_tskey; 4764 + serr->ee.ee_data -= atomic_read(&sk->sk_tskey); 4765 4765 } 4766 4766 4767 4767 err = sock_queue_err_skb(sk, skb); ··· 6132 6132 /* Free pulled out fragments. */ 6133 6133 while ((list = shinfo->frag_list) != insp) { 6134 6134 shinfo->frag_list = list->next; 6135 - kfree_skb(list); 6135 + consume_skb(list); 6136 6136 } 6137 6137 /* And insert new clone at head. */ 6138 6138 if (clone) {
+2 -2
net/core/sock.c
··· 879 879 if ((1 << sk->sk_state) & 880 880 (TCPF_CLOSE | TCPF_LISTEN)) 881 881 return -EINVAL; 882 - sk->sk_tskey = tcp_sk(sk)->snd_una; 882 + atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una); 883 883 } else { 884 - sk->sk_tskey = 0; 884 + atomic_set(&sk->sk_tskey, 0); 885 885 } 886 886 } 887 887
+6 -1
net/dsa/master.c
··· 260 260 dev->dsa_ptr->netdev_ops = ops; 261 261 } 262 262 263 + /* Keep the master always promiscuous if the tagging protocol requires that 264 + * (garbles MAC DA) or if it doesn't support unicast filtering, case in which 265 + * it would revert to promiscuous mode as soon as we call dev_uc_add() on it 266 + * anyway. 267 + */ 263 268 static void dsa_master_set_promiscuity(struct net_device *dev, int inc) 264 269 { 265 270 const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops; 266 271 267 - if (!ops->promisc_on_master) 272 + if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master) 268 273 return; 269 274 270 275 ASSERT_RTNL();
+22 -7
net/dsa/port.c
··· 395 395 .tree_index = dp->ds->dst->index, 396 396 .sw_index = dp->ds->index, 397 397 .port = dp->index, 398 - .bridge = *dp->bridge, 399 398 }; 400 399 int err; 400 + 401 + /* If the port could not be offloaded to begin with, then 402 + * there is nothing to do. 403 + */ 404 + if (!dp->bridge) 405 + return; 406 + 407 + info.bridge = *dp->bridge; 401 408 402 409 /* Here the port is already unbridged. Reflect the current configuration 403 410 * so that drivers can program their chips accordingly. ··· 788 781 struct dsa_port *cpu_dp = dp->cpu_dp; 789 782 int err; 790 783 791 - err = dev_uc_add(cpu_dp->master, addr); 792 - if (err) 793 - return err; 784 + /* Avoid a call to __dev_set_promiscuity() on the master, which 785 + * requires rtnl_lock(), since we can't guarantee that is held here, 786 + * and we can't take it either. 787 + */ 788 + if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 789 + err = dev_uc_add(cpu_dp->master, addr); 790 + if (err) 791 + return err; 792 + } 794 793 795 794 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); 796 795 } ··· 813 800 struct dsa_port *cpu_dp = dp->cpu_dp; 814 801 int err; 815 802 816 - err = dev_uc_del(cpu_dp->master, addr); 817 - if (err) 818 - return err; 803 + if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { 804 + err = dev_uc_del(cpu_dp->master, addr); 805 + if (err) 806 + return err; 807 + } 819 808 820 809 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); 821 810 }
+4 -1
net/ipv4/af_inet.c
··· 1376 1376 } 1377 1377 1378 1378 ops = rcu_dereference(inet_offloads[proto]); 1379 - if (likely(ops && ops->callbacks.gso_segment)) 1379 + if (likely(ops && ops->callbacks.gso_segment)) { 1380 1380 segs = ops->callbacks.gso_segment(skb, features); 1381 + if (!segs) 1382 + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; 1383 + } 1381 1384 1382 1385 if (IS_ERR_OR_NULL(segs)) 1383 1386 goto out;
+1 -1
net/ipv4/ip_output.c
··· 991 991 992 992 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && 993 993 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) 994 - tskey = sk->sk_tskey++; 994 + tskey = atomic_inc_return(&sk->sk_tskey) - 1; 995 995 996 996 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 997 997
-1
net/ipv4/ping.c
··· 187 187 (int)ident, &ipv6_hdr(skb)->daddr, dif); 188 188 #endif 189 189 } else { 190 - pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol)); 191 190 return NULL; 192 191 } 193 192
+1 -1
net/ipv4/udp_tunnel_nic.c
··· 846 846 list_for_each_entry(node, &info->shared->devices, list) 847 847 if (node->dev == dev) 848 848 break; 849 - if (node->dev != dev) 849 + if (list_entry_is_head(node, &info->shared->devices, list)) 850 850 return; 851 851 852 852 list_del(&node->list);
+2
net/ipv6/addrconf.c
··· 4996 4996 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) 4997 4997 goto error; 4998 4998 4999 + spin_lock_bh(&ifa->lock); 4999 5000 if (!((ifa->flags&IFA_F_PERMANENT) && 5000 5001 (ifa->prefered_lft == INFINITY_LIFE_TIME))) { 5001 5002 preferred = ifa->prefered_lft; ··· 5018 5017 preferred = INFINITY_LIFE_TIME; 5019 5018 valid = INFINITY_LIFE_TIME; 5020 5019 } 5020 + spin_unlock_bh(&ifa->lock); 5021 5021 5022 5022 if (!ipv6_addr_any(&ifa->peer_addr)) { 5023 5023 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
+2
net/ipv6/ip6_offload.c
··· 114 114 if (likely(ops && ops->callbacks.gso_segment)) { 115 115 skb_reset_transport_header(skb); 116 116 segs = ops->callbacks.gso_segment(skb, features); 117 + if (!segs) 118 + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; 117 119 } 118 120 119 121 if (IS_ERR_OR_NULL(segs))
+1 -1
net/ipv6/ip6_output.c
··· 1464 1464 1465 1465 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && 1466 1466 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) 1467 - tskey = sk->sk_tskey++; 1467 + tskey = atomic_inc_return(&sk->sk_tskey) - 1; 1468 1468 1469 1469 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1470 1470
+2
net/mptcp/mib.c
··· 35 35 SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR), 36 36 SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD), 37 37 SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD), 38 + SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP), 38 39 SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX), 39 40 SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX), 40 41 SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX), 41 42 SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX), 42 43 SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX), 43 44 SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR), 45 + SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP), 44 46 SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW), 45 47 SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX), 46 48 SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
+2
net/mptcp/mib.h
··· 28 28 MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */ 29 29 MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */ 30 30 MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */ 31 + MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */ 31 32 MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */ 32 33 MPTCP_MIB_JOINPORTSYNACKRX, /* Received a SYNACK MP_JOIN with a different port-number */ 33 34 MPTCP_MIB_JOINPORTACKRX, /* Received an ACK MP_JOIN with a different port-number */ 34 35 MPTCP_MIB_MISMATCHPORTSYNRX, /* Received a SYN MP_JOIN with a mismatched port-number */ 35 36 MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */ 36 37 MPTCP_MIB_RMADDR, /* Received RM_ADDR */ 38 + MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */ 37 39 MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */ 38 40 MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */ 39 41 MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
+6 -2
net/mptcp/pm.c
··· 213 213 mptcp_pm_add_addr_send_ack(msk); 214 214 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 215 215 pm->remote = *addr; 216 + } else { 217 + __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 216 218 } 217 219 218 220 spin_unlock_bh(&pm->lock); ··· 255 253 mptcp_event_addr_removed(msk, rm_list->ids[i]); 256 254 257 255 spin_lock_bh(&pm->lock); 258 - mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); 259 - pm->rm_list_rx = *rm_list; 256 + if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) 257 + pm->rm_list_rx = *rm_list; 258 + else 259 + __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); 260 260 spin_unlock_bh(&pm->lock); 261 261 } 262 262
+24 -5
net/mptcp/pm_netlink.c
··· 546 546 if (msk->pm.add_addr_signaled < add_addr_signal_max) { 547 547 local = select_signal_address(pernet, msk); 548 548 549 + /* due to racing events on both ends we can reach here while 550 + * previous add address is still running: if we invoke now 551 + * mptcp_pm_announce_addr(), that will fail and the 552 + * corresponding id will be marked as used. 553 + * Instead let the PM machinery reschedule us when the 554 + * current address announce will be completed. 555 + */ 556 + if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) 557 + return; 558 + 549 559 if (local) { 550 560 if (mptcp_pm_alloc_anno_list(msk, local)) { 551 561 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); ··· 660 650 unsigned int add_addr_accept_max; 661 651 struct mptcp_addr_info remote; 662 652 unsigned int subflows_max; 653 + bool reset_port = false; 663 654 int i, nr; 664 655 665 656 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); ··· 670 659 msk->pm.add_addr_accepted, add_addr_accept_max, 671 660 msk->pm.remote.family); 672 661 673 - if (lookup_subflow_by_daddr(&msk->conn_list, &msk->pm.remote)) 662 + remote = msk->pm.remote; 663 + if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) 674 664 goto add_addr_echo; 665 + 666 + /* pick id 0 port, if none is provided the remote address */ 667 + if (!remote.port) { 668 + reset_port = true; 669 + remote.port = sk->sk_dport; 670 + } 675 671 676 672 /* connect to the specified remote address, using whatever 677 673 * local address the routing configuration will pick. 678 674 */ 679 - remote = msk->pm.remote; 680 - if (!remote.port) 681 - remote.port = sk->sk_dport; 682 675 nr = fill_local_addresses_vec(msk, addrs); 683 676 684 677 msk->pm.add_addr_accepted++; ··· 695 680 __mptcp_subflow_connect(sk, &addrs[i], &remote); 696 681 spin_lock_bh(&msk->pm.lock); 697 682 683 + /* be sure to echo exactly the received address */ 684 + if (reset_port) 685 + remote.port = 0; 686 + 698 687 add_addr_echo: 699 - mptcp_pm_announce_addr(msk, &msk->pm.remote, true); 688 + mptcp_pm_announce_addr(msk, &remote, true); 700 689 mptcp_pm_nl_addr_send_ack(msk); 701 690 } 702 691
+12 -4
net/netfilter/nf_tables_api.c
··· 6551 6551 { 6552 6552 struct nft_object *newobj; 6553 6553 struct nft_trans *trans; 6554 - int err; 6554 + int err = -ENOMEM; 6555 + 6556 + if (!try_module_get(type->owner)) 6557 + return -ENOENT; 6555 6558 6556 6559 trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, 6557 6560 sizeof(struct nft_trans_obj)); 6558 6561 if (!trans) 6559 - return -ENOMEM; 6562 + goto err_trans; 6560 6563 6561 6564 newobj = nft_obj_init(ctx, type, attr); 6562 6565 if (IS_ERR(newobj)) { ··· 6576 6573 6577 6574 err_free_trans: 6578 6575 kfree(trans); 6576 + err_trans: 6577 + module_put(type->owner); 6579 6578 return err; 6580 6579 } 6581 6580 ··· 8190 8185 if (obj->ops->update) 8191 8186 obj->ops->update(obj, newobj); 8192 8187 8193 - kfree(newobj); 8188 + nft_obj_destroy(&trans->ctx, newobj); 8194 8189 } 8195 8190 8196 8191 static void nft_commit_release(struct nft_trans *trans) ··· 8981 8976 break; 8982 8977 case NFT_MSG_NEWOBJ: 8983 8978 if (nft_trans_obj_update(trans)) { 8984 - kfree(nft_trans_obj_newobj(trans)); 8979 + nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); 8985 8980 nft_trans_destroy(trans); 8986 8981 } else { 8987 8982 trans->ctx.table->use--; ··· 9641 9636 9642 9637 static void __nft_release_hook(struct net *net, struct nft_table *table) 9643 9638 { 9639 + struct nft_flowtable *flowtable; 9644 9640 struct nft_chain *chain; 9645 9641 9646 9642 list_for_each_entry(chain, &table->chains, list) 9647 9643 nf_tables_unregister_hook(net, table, chain); 9644 + list_for_each_entry(flowtable, &table->flowtables, list) 9645 + nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list); 9648 9646 } 9649 9647 9650 9648 static void __nft_release_hooks(struct net *net)
+2 -1
net/netfilter/nf_tables_offload.c
··· 94 94 95 95 expr = nft_expr_first(rule); 96 96 while (nft_expr_more(rule, expr)) { 97 - if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION) 97 + if (expr->ops->offload_action && 98 + expr->ops->offload_action(expr)) 98 99 num_actions++; 99 100 100 101 expr = nft_expr_next(expr);
+6
net/netfilter/nft_dup_netdev.c
··· 67 67 return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif); 68 68 } 69 69 70 + static bool nft_dup_netdev_offload_action(const struct nft_expr *expr) 71 + { 72 + return true; 73 + } 74 + 70 75 static struct nft_expr_type nft_dup_netdev_type; 71 76 static const struct nft_expr_ops nft_dup_netdev_ops = { 72 77 .type = &nft_dup_netdev_type, ··· 80 75 .init = nft_dup_netdev_init, 81 76 .dump = nft_dup_netdev_dump, 82 77 .offload = nft_dup_netdev_offload, 78 + .offload_action = nft_dup_netdev_offload_action, 83 79 }; 84 80 85 81 static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
+6
net/netfilter/nft_fwd_netdev.c
··· 79 79 return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif); 80 80 } 81 81 82 + static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr) 83 + { 84 + return true; 85 + } 86 + 82 87 struct nft_fwd_neigh { 83 88 u8 sreg_dev; 84 89 u8 sreg_addr; ··· 227 222 .dump = nft_fwd_netdev_dump, 228 223 .validate = nft_fwd_validate, 229 224 .offload = nft_fwd_netdev_offload, 225 + .offload_action = nft_fwd_netdev_offload_action, 230 226 }; 231 227 232 228 static const struct nft_expr_ops *
+11 -1
net/netfilter/nft_immediate.c
··· 213 213 return 0; 214 214 } 215 215 216 + static bool nft_immediate_offload_action(const struct nft_expr *expr) 217 + { 218 + const struct nft_immediate_expr *priv = nft_expr_priv(expr); 219 + 220 + if (priv->dreg == NFT_REG_VERDICT) 221 + return true; 222 + 223 + return false; 224 + } 225 + 216 226 static const struct nft_expr_ops nft_imm_ops = { 217 227 .type = &nft_imm_type, 218 228 .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), ··· 234 224 .dump = nft_immediate_dump, 235 225 .validate = nft_immediate_validate, 236 226 .offload = nft_immediate_offload, 237 - .offload_flags = NFT_OFFLOAD_F_ACTION, 227 + .offload_action = nft_immediate_offload_action, 238 228 }; 239 229 240 230 struct nft_expr_type nft_imm_type __read_mostly = {
+18
net/netfilter/nft_limit.c
··· 340 340 return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS); 341 341 } 342 342 343 + static void nft_limit_obj_pkts_destroy(const struct nft_ctx *ctx, 344 + struct nft_object *obj) 345 + { 346 + struct nft_limit_priv_pkts *priv = nft_obj_data(obj); 347 + 348 + nft_limit_destroy(ctx, &priv->limit); 349 + } 350 + 343 351 static struct nft_object_type nft_limit_obj_type; 344 352 static const struct nft_object_ops nft_limit_obj_pkts_ops = { 345 353 .type = &nft_limit_obj_type, 346 354 .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)), 347 355 .init = nft_limit_obj_pkts_init, 356 + .destroy = nft_limit_obj_pkts_destroy, 348 357 .eval = nft_limit_obj_pkts_eval, 349 358 .dump = nft_limit_obj_pkts_dump, 350 359 }; ··· 387 378 return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES); 388 379 } 389 380 381 + static void nft_limit_obj_bytes_destroy(const struct nft_ctx *ctx, 382 + struct nft_object *obj) 383 + { 384 + struct nft_limit_priv *priv = nft_obj_data(obj); 385 + 386 + nft_limit_destroy(ctx, priv); 387 + } 388 + 390 389 static struct nft_object_type nft_limit_obj_type; 391 390 static const struct nft_object_ops nft_limit_obj_bytes_ops = { 392 391 .type = &nft_limit_obj_type, 393 392 .size = sizeof(struct nft_limit_priv), 394 393 .init = nft_limit_obj_bytes_init, 394 + .destroy = nft_limit_obj_bytes_destroy, 395 395 .eval = nft_limit_obj_bytes_eval, 396 396 .dump = nft_limit_obj_bytes_dump, 397 397 };
+2
net/netfilter/xt_socket.c
··· 220 220 { 221 221 if (par->family == NFPROTO_IPV4) 222 222 nf_defrag_ipv4_disable(par->net); 223 + #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 223 224 else if (par->family == NFPROTO_IPV6) 224 225 nf_defrag_ipv6_disable(par->net); 226 + #endif 225 227 } 226 228 227 229 static struct xt_match socket_mt_reg[] __read_mostly = {
+38 -8
net/openvswitch/actions.c
··· 423 423 memcpy(addr, new_addr, sizeof(__be32[4])); 424 424 } 425 425 426 - static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) 426 + static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask) 427 427 { 428 + u8 old_ipv6_tclass = ipv6_get_dsfield(nh); 429 + 430 + ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask); 431 + 432 + if (skb->ip_summed == CHECKSUM_COMPLETE) 433 + csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12), 434 + (__force __wsum)(ipv6_tclass << 12)); 435 + 436 + ipv6_change_dsfield(nh, ~mask, ipv6_tclass); 437 + } 438 + 439 + static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask) 440 + { 441 + u32 ofl; 442 + 443 + ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2]; 444 + fl = OVS_MASKED(ofl, fl, mask); 445 + 428 446 /* Bits 21-24 are always unmasked, so this retains their values. */ 429 - OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); 430 - OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); 431 - OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); 447 + nh->flow_lbl[0] = (u8)(fl >> 16); 448 + nh->flow_lbl[1] = (u8)(fl >> 8); 449 + nh->flow_lbl[2] = (u8)fl; 450 + 451 + if (skb->ip_summed == CHECKSUM_COMPLETE) 452 + csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl)); 453 + } 454 + 455 + static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask) 456 + { 457 + new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask); 458 + 459 + if (skb->ip_summed == CHECKSUM_COMPLETE) 460 + csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8), 461 + (__force __wsum)(new_ttl << 8)); 462 + nh->hop_limit = new_ttl; 432 463 } 433 464 434 465 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, ··· 577 546 } 578 547 } 579 548 if (mask->ipv6_tclass) { 580 - ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); 549 + set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass); 581 550 flow_key->ip.tos = ipv6_get_dsfield(nh); 582 551 } 583 552 if (mask->ipv6_label) { 584 - set_ipv6_fl(nh, ntohl(key->ipv6_label), 553 + set_ipv6_fl(skb, nh, ntohl(key->ipv6_label), 585 554 ntohl(mask->ipv6_label)); 586 555 flow_key->ipv6.label = 587 556 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); 588 557 } 589 558 if (mask->ipv6_hlimit) { 590 - OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, 591 - mask->ipv6_hlimit); 559 + set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit); 592 560 flow_key->ip.ttl = nh->hop_limit; 593 561 } 594 562 return 0;
+1 -1
net/sched/act_api.c
··· 274 274 err = tc_setup_action(&fl_action->action, actions); 275 275 if (err) { 276 276 NL_SET_ERR_MSG_MOD(extack, 277 - "Failed to setup tc actions for offload\n"); 277 + "Failed to setup tc actions for offload"); 278 278 goto fl_err; 279 279 } 280 280
-5
net/sched/act_ct.c
··· 527 527 struct nf_conn *ct; 528 528 u8 dir; 529 529 530 - /* Previously seen or loopback */ 531 - ct = nf_ct_get(skb, &ctinfo); 532 - if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) 533 - return false; 534 - 535 530 switch (family) { 536 531 case NFPROTO_IPV4: 537 532 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
+21 -21
net/smc/smc_pnet.c
··· 113 113 pnettable = &sn->pnettable; 114 114 115 115 /* remove table entry */ 116 - write_lock(&pnettable->lock); 116 + mutex_lock(&pnettable->lock); 117 117 list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, 118 118 list) { 119 119 if (!pnet_name || ··· 131 131 rc = 0; 132 132 } 133 133 } 134 - write_unlock(&pnettable->lock); 134 + mutex_unlock(&pnettable->lock); 135 135 136 136 /* if this is not the initial namespace, stop here */ 137 137 if (net != &init_net) ··· 192 192 sn = net_generic(net, smc_net_id); 193 193 pnettable = &sn->pnettable; 194 194 195 - write_lock(&pnettable->lock); 195 + mutex_lock(&pnettable->lock); 196 196 list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { 197 197 if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && 198 198 !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { ··· 206 206 break; 207 207 } 208 208 } 209 - write_unlock(&pnettable->lock); 209 + mutex_unlock(&pnettable->lock); 210 210 return rc; 211 211 } 212 212 ··· 224 224 sn = net_generic(net, smc_net_id); 225 225 pnettable = &sn->pnettable; 226 226 227 - write_lock(&pnettable->lock); 227 + mutex_lock(&pnettable->lock); 228 228 list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { 229 229 if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { 230 230 dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker); ··· 237 237 break; 238 238 } 239 239 } 240 - write_unlock(&pnettable->lock); 240 + mutex_unlock(&pnettable->lock); 241 241 return rc; 242 242 } 243 243 ··· 370 370 strncpy(new_pe->eth_name, eth_name, IFNAMSIZ); 371 371 rc = -EEXIST; 372 372 new_netdev = true; 373 - write_lock(&pnettable->lock); 373 + mutex_lock(&pnettable->lock); 374 374 list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { 375 375 if (tmp_pe->type == SMC_PNET_ETH && 376 376 !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { ··· 385 385 GFP_ATOMIC); 386 386 } 387 387 list_add_tail(&new_pe->list, &pnettable->pnetlist); 388 - write_unlock(&pnettable->lock); 388 + mutex_unlock(&pnettable->lock); 389 389 } else { 390 - write_unlock(&pnettable->lock); 390 + mutex_unlock(&pnettable->lock); 391 391 kfree(new_pe); 392 392 goto out_put; 393 393 } ··· 448 448 new_pe->ib_port = ib_port; 449 449 450 450 new_ibdev = true; 451 - write_lock(&pnettable->lock); 451 + mutex_lock(&pnettable->lock); 452 452 list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { 453 453 if (tmp_pe->type == SMC_PNET_IB && 454 454 !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { ··· 458 458 } 459 459 if (new_ibdev) { 460 460 list_add_tail(&new_pe->list, &pnettable->pnetlist); 461 - write_unlock(&pnettable->lock); 461 + mutex_unlock(&pnettable->lock); 462 462 } else { 463 - write_unlock(&pnettable->lock); 463 + mutex_unlock(&pnettable->lock); 464 464 kfree(new_pe); 465 465 } 466 466 return (new_ibdev) ? 0 : -EEXIST; ··· 605 605 pnettable = &sn->pnettable; 606 606 607 607 /* dump pnettable entries */ 608 - read_lock(&pnettable->lock); 608 + mutex_lock(&pnettable->lock); 609 609 list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { 610 610 if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) 611 611 continue; ··· 620 620 break; 621 621 } 622 622 } 623 - read_unlock(&pnettable->lock); 623 + mutex_unlock(&pnettable->lock); 624 624 return idx; 625 625 } 626 626 ··· 864 864 struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; 865 865 866 866 INIT_LIST_HEAD(&pnettable->pnetlist); 867 - rwlock_init(&pnettable->lock); 867 + mutex_init(&pnettable->lock); 868 868 INIT_LIST_HEAD(&pnetids_ndev->list); 869 869 rwlock_init(&pnetids_ndev->lock); 870 870 ··· 947 947 sn = net_generic(net, smc_net_id); 948 948 pnettable = &sn->pnettable; 949 949 950 - read_lock(&pnettable->lock); 950 + mutex_lock(&pnettable->lock); 951 951 list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { 952 952 if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { 953 953 /* get pnetid of netdev device */ ··· 956 956 break; 957 957 } 958 958 } 959 - read_unlock(&pnettable->lock); 959 + mutex_unlock(&pnettable->lock); 960 960 return rc; 961 961 } 962 962 ··· 1159 1159 sn = net_generic(&init_net, smc_net_id); 1160 1160 pnettable = &sn->pnettable; 1161 1161 1162 - read_lock(&pnettable->lock); 1162 + mutex_lock(&pnettable->lock); 1163 1163 list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { 1164 1164 if (tmp_pe->type == SMC_PNET_IB && 1165 1165 !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && ··· 1169 1169 break; 1170 1170 } 1171 1171 } 1172 - read_unlock(&pnettable->lock); 1172 + mutex_unlock(&pnettable->lock); 1173 1173 1174 1174 return rc; 1175 1175 } ··· 1188 1188 sn = net_generic(&init_net, smc_net_id); 1189 1189 pnettable = &sn->pnettable; 1190 1190 1191 - read_lock(&pnettable->lock); 1191 + mutex_lock(&pnettable->lock); 1192 1192 list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { 1193 1193 if (tmp_pe->type == SMC_PNET_IB && 1194 1194 !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { ··· 1197 1197 break; 1198 1198 } 1199 1199 } 1200 - read_unlock(&pnettable->lock); 1200 + mutex_unlock(&pnettable->lock); 1201 1201 1202 1202 return rc; 1203 1203 }
+1 -1
net/smc/smc_pnet.h
··· 29 29 * @pnetlist: List of PNETIDs 30 30 */ 31 31 struct smc_pnettable { 32 - rwlock_t lock; 32 + struct mutex lock; 33 33 struct list_head pnetlist; 34 34 }; 35 35
+1 -1
net/tipc/name_table.c
··· 967 967 list_for_each_entry(p, &sr->all_publ, all_publ) 968 968 if (p->key == *last_key) 969 969 break; 970 - if (p->key != *last_key) 970 + if (list_entry_is_head(p, &sr->all_publ, all_publ)) 971 971 return -EPIPE; 972 972 } else { 973 973 p = list_first_entry(&sr->all_publ,
+1 -1
net/tipc/socket.c
··· 3749 3749 if (p->key == *last_publ) 3750 3750 break; 3751 3751 } 3752 - if (p->key != *last_publ) { 3752 + if (list_entry_is_head(p, &tsk->publications, binding_sock)) { 3753 3753 /* We never set seq or call nl_dump_check_consistent() 3754 3754 * this means that setting prev_seq here will cause the 3755 3755 * consistence check to fail in the netlink callback
+2 -2
security/selinux/ima.c
··· 77 77 size_t policy_len; 78 78 int rc = 0; 79 79 80 - WARN_ON(!mutex_is_locked(&state->policy_mutex)); 80 + lockdep_assert_held(&state->policy_mutex); 81 81 82 82 state_str = selinux_ima_collect_state(state); 83 83 if (!state_str) { ··· 117 117 */ 118 118 void selinux_ima_measure_state(struct selinux_state *state) 119 119 { 120 - WARN_ON(mutex_is_locked(&state->policy_mutex)); 120 + lockdep_assert_not_held(&state->policy_mutex); 121 121 122 122 mutex_lock(&state->policy_mutex); 123 123 selinux_ima_measure_state_locked(state);
+10 -5
sound/core/memalloc.c
··· 511 511 DEFAULT_GFP, 0); 512 512 if (!sgt) 513 513 return NULL; 514 - dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir); 514 + dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, 515 + sg_dma_address(sgt->sgl)); 515 516 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); 516 517 if (p) 517 518 dmab->private_data = sgt; ··· 541 540 if (mode == SNDRV_DMA_SYNC_CPU) { 542 541 if (dmab->dev.dir == DMA_TO_DEVICE) 543 542 return; 543 + invalidate_kernel_vmap_range(dmab->area, dmab->bytes); 544 544 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, 545 545 dmab->dev.dir); 546 - invalidate_kernel_vmap_range(dmab->area, dmab->bytes); 547 546 } else { 548 547 if (dmab->dev.dir == DMA_FROM_DEVICE) 549 548 return; ··· 672 671 */ 673 672 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) 674 673 { 675 - dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir); 676 - return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, 677 - dmab->dev.dir, DEFAULT_GFP); 674 + void *p; 675 + 676 + p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, 677 + dmab->dev.dir, DEFAULT_GFP); 678 + if (p) 679 + dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); 680 + return p; 678 681 } 679 682 680 683 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
+4 -2
sound/pci/hda/hda_intel.c
··· 1615 1615 /* forced codec slots */ 1616 1616 SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103), 1617 1617 SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103), 1618 + SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105), 1618 1619 /* WinFast VP200 H (Teradici) user reported broken communication */ 1619 1620 SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101), 1620 1621 {} ··· 1799 1798 1800 1799 assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); 1801 1800 1802 - check_probe_mask(chip, dev); 1803 - 1804 1801 if (single_cmd < 0) /* allow fallback to single_cmd at errors */ 1805 1802 chip->fallback_to_single_cmd = 1; 1806 1803 else /* explicitly set to single_cmd or not */ ··· 1823 1824 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n"); 1824 1825 chip->bus.core.needs_damn_long_delay = 1; 1825 1826 } 1827 + 1828 + check_probe_mask(chip, dev); 1826 1829 1827 1830 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); 1828 1831 if (err < 0) { ··· 1941 1940 dma_bits = 32; 1942 1941 if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(dma_bits))) 1943 1942 dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32)); 1943 + dma_set_max_seg_size(&pci->dev, UINT_MAX); 1944 1944 1945 1945 /* read number of streams from GCAP register instead of using 1946 1946 * hardcoded value
+25 -15
sound/pci/hda/patch_realtek.c
··· 138 138 * COEF access helper functions 139 139 */ 140 140 141 + static void coef_mutex_lock(struct hda_codec *codec) 142 + { 143 + struct alc_spec *spec = codec->spec; 144 + 145 + snd_hda_power_up_pm(codec); 146 + mutex_lock(&spec->coef_mutex); 147 + } 148 + 149 + static void coef_mutex_unlock(struct hda_codec *codec) 150 + { 151 + struct alc_spec *spec = codec->spec; 152 + 153 + mutex_unlock(&spec->coef_mutex); 154 + snd_hda_power_down_pm(codec); 155 + } 156 + 141 157 static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, 142 158 unsigned int coef_idx) 143 159 { ··· 167 151 static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, 168 152 unsigned int coef_idx) 169 153 { 170 - struct alc_spec *spec = codec->spec; 171 154 unsigned int val; 172 155 173 - mutex_lock(&spec->coef_mutex); 156 + coef_mutex_lock(codec); 174 157 val = __alc_read_coefex_idx(codec, nid, coef_idx); 175 - mutex_unlock(&spec->coef_mutex); 158 + coef_mutex_unlock(codec); 176 159 return val; 177 160 } 178 161 ··· 188 173 static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, 189 174 unsigned int coef_idx, unsigned int coef_val) 190 175 { 191 - struct alc_spec *spec = codec->spec; 192 - 193 - mutex_lock(&spec->coef_mutex); 176 + coef_mutex_lock(codec); 194 177 __alc_write_coefex_idx(codec, nid, coef_idx, coef_val); 195 - mutex_unlock(&spec->coef_mutex); 178 + coef_mutex_unlock(codec); 196 179 } 197 180 198 181 #define alc_write_coef_idx(codec, coef_idx, coef_val) \ ··· 211 198 unsigned int coef_idx, unsigned int mask, 212 199 unsigned int bits_set) 213 200 { 214 - struct alc_spec *spec = codec->spec; 215 - 216 - mutex_lock(&spec->coef_mutex); 201 + coef_mutex_lock(codec); 217 202 __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set); 218 - mutex_unlock(&spec->coef_mutex); 203 + coef_mutex_unlock(codec); 219 204 } 220 205 221 206 #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \ ··· 246 235 static void alc_process_coef_fw(struct hda_codec *codec, 247 236 const struct coef_fw *fw) 248 237 { 249 - struct alc_spec *spec = codec->spec; 250 - 251 - mutex_lock(&spec->coef_mutex); 238 + coef_mutex_lock(codec); 252 239 for (; fw->nid; fw++) { 253 240 if (fw->mask == (unsigned short)-1) 254 241 __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); ··· 254 245 __alc_update_coefex_idx(codec, fw->nid, fw->idx, 255 246 fw->mask, fw->val); 256 247 } 257 - mutex_unlock(&spec->coef_mutex); 248 + coef_mutex_unlock(codec); 258 249 } 259 250 260 251 /* ··· 9179 9170 SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), 9180 9171 SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), 9181 9172 SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), 9173 + SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), 9182 9174 SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), 9183 9175 SND_PCI_QUIRK(0x17aa, 0x3847, "Legion 7 16ACHG6", ALC287_FIXUP_LEGION_16ACHG6), 9184 9176 SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
-1
sound/soc/amd/acp/acp-mach.h
··· 21 21 #include <linux/gpio/consumer.h> 22 22 23 23 #define EN_SPKR_GPIO_GB 0x11F 24 - #define EN_SPKR_GPIO_NK 0x146 25 24 #define EN_SPKR_GPIO_NONE -EINVAL 26 25 27 26 enum be_id {
+2 -2
sound/soc/amd/acp/acp-sof-mach.c
··· 37 37 .hs_codec_id = RT5682, 38 38 .amp_codec_id = MAX98360A, 39 39 .dmic_codec_id = DMIC, 40 - .gpio_spkr_en = EN_SPKR_GPIO_NK, 40 + .gpio_spkr_en = EN_SPKR_GPIO_NONE, 41 41 }; 42 42 43 43 static struct acp_card_drvdata sof_rt5682s_max_data = { ··· 47 47 .hs_codec_id = RT5682S, 48 48 .amp_codec_id = MAX98360A, 49 49 .dmic_codec_id = DMIC, 50 - .gpio_spkr_en = EN_SPKR_GPIO_NK, 50 + .gpio_spkr_en = EN_SPKR_GPIO_NONE, 51 51 }; 52 52 53 53 static const struct snd_kcontrol_new acp_controls[] = {
+7 -5
sound/soc/codecs/rt5668.c
··· 1022 1022 container_of(work, struct rt5668_priv, jack_detect_work.work); 1023 1023 int val, btn_type; 1024 1024 1025 - while (!rt5668->component) 1026 - usleep_range(10000, 15000); 1027 - 1028 - while (!rt5668->component->card->instantiated) 1029 - usleep_range(10000, 15000); 1025 + if (!rt5668->component || !rt5668->component->card || 1026 + !rt5668->component->card->instantiated) { 1027 + /* card not yet ready, try later */ 1028 + mod_delayed_work(system_power_efficient_wq, 1029 + &rt5668->jack_detect_work, msecs_to_jiffies(15)); 1030 + return; 1031 + } 1030 1032 1031 1033 mutex_lock(&rt5668->calibrate_mutex); 1032 1034
+7 -5
sound/soc/codecs/rt5682.c
··· 1092 1092 struct snd_soc_dapm_context *dapm; 1093 1093 int val, btn_type; 1094 1094 1095 - while (!rt5682->component) 1096 - usleep_range(10000, 15000); 1097 - 1098 - while (!rt5682->component->card->instantiated) 1099 - usleep_range(10000, 15000); 1095 + if (!rt5682->component || !rt5682->component->card || 1096 + !rt5682->component->card->instantiated) { 1097 + /* card not yet ready, try later */ 1098 + mod_delayed_work(system_power_efficient_wq, 1099 + &rt5682->jack_detect_work, msecs_to_jiffies(15)); 1100 + return; 1101 + } 1100 1102 1101 1103 dapm = snd_soc_component_get_dapm(rt5682->component); 1102 1104
+7 -5
sound/soc/codecs/rt5682s.c
··· 824 824 container_of(work, struct rt5682s_priv, jack_detect_work.work); 825 825 int val, btn_type; 826 826 827 - while (!rt5682s->component) 828 - usleep_range(10000, 15000); 829 - 830 - while (!rt5682s->component->card->instantiated) 831 - usleep_range(10000, 15000); 827 + if (!rt5682s->component || !rt5682s->component->card || 828 + !rt5682s->component->card->instantiated) { 829 + /* card not yet ready, try later */ 830 + mod_delayed_work(system_power_efficient_wq, 831 + &rt5682s->jack_detect_work, msecs_to_jiffies(15)); 832 + return; 833 + } 832 834 833 835 mutex_lock(&rt5682s->jdet_mutex); 834 836 mutex_lock(&rt5682s->calibrate_mutex);
+6 -1
sound/soc/codecs/tas2770.c
··· 38 38 gpiod_set_value_cansleep(tas2770->reset_gpio, 0); 39 39 msleep(20); 40 40 gpiod_set_value_cansleep(tas2770->reset_gpio, 1); 41 + usleep_range(1000, 2000); 41 42 } 42 43 43 44 snd_soc_component_write(tas2770->component, TAS2770_SW_RST, 44 45 TAS2770_RST); 46 + usleep_range(1000, 2000); 45 47 } 46 48 47 49 static int tas2770_set_bias_level(struct snd_soc_component *component, ··· 112 110 113 111 if (tas2770->sdz_gpio) { 114 112 gpiod_set_value_cansleep(tas2770->sdz_gpio, 1); 113 + usleep_range(1000, 2000); 115 114 } else { 116 115 ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, 117 116 TAS2770_PWR_CTRL_MASK, ··· 513 510 514 511 tas2770->component = component; 515 512 516 - if (tas2770->sdz_gpio) 513 + if (tas2770->sdz_gpio) { 517 514 gpiod_set_value_cansleep(tas2770->sdz_gpio, 1); 515 + usleep_range(1000, 2000); 516 + } 518 517 519 518 tas2770_reset(tas2770); 520 519
+2 -1
sound/soc/codecs/wm_adsp.c
··· 1441 1441 int ret, i; 1442 1442 1443 1443 for (i = 0; i < 5; ++i) { 1444 - ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1, sizeof(coeff_v1)); 1444 + ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1, 1445 + min(cs_ctl->len, sizeof(coeff_v1))); 1445 1446 if (ret < 0) 1446 1447 return ret; 1447 1448
+1
sound/soc/intel/skylake/skl.c
··· 952 952 /* allow 64bit DMA address if supported by H/W */ 953 953 if (dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(64))) 954 954 dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(32)); 955 + dma_set_max_seg_size(bus->dev, UINT_MAX); 955 956 956 957 /* initialize streams */ 957 958 snd_hdac_ext_stream_init_all
+4 -4
sound/soc/qcom/lpass-platform.c
··· 524 524 return -EINVAL; 525 525 } 526 526 527 - ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr); 527 + ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr); 528 528 if (ret) { 529 529 dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret); 530 530 return ret; ··· 665 665 return -EINVAL; 666 666 } 667 667 if (interrupts & LPAIF_IRQ_PER(chan)) { 668 - rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val)); 668 + rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val)); 669 669 if (rv) { 670 670 dev_err(soc_runtime->dev, 671 671 "error writing to irqclear reg: %d\n", rv); ··· 676 676 } 677 677 678 678 if (interrupts & LPAIF_IRQ_XRUN(chan)) { 679 - rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val)); 679 + rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val)); 680 680 if (rv) { 681 681 dev_err(soc_runtime->dev, 682 682 "error writing to irqclear reg: %d\n", rv); ··· 688 688 } 689 689 690 690 if (interrupts & LPAIF_IRQ_ERR(chan)) { 691 - rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val)); 691 + rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val)); 692 692 if (rv) { 693 693 dev_err(soc_runtime->dev, 694 694 "error writing to irqclear reg: %d\n", rv);
+31 -10
sound/soc/soc-ops.c
··· 308 308 unsigned int sign_bit = mc->sign_bit; 309 309 unsigned int mask = (1 << fls(max)) - 1; 310 310 unsigned int invert = mc->invert; 311 - int err; 311 + int err, ret; 312 312 bool type_2r = false; 313 313 unsigned int val2 = 0; 314 314 unsigned int val, val_mask; ··· 350 350 err = snd_soc_component_update_bits(component, reg, val_mask, val); 351 351 if (err < 0) 352 352 return err; 353 + ret = err; 353 354 354 - if (type_2r) 355 + if (type_2r) { 355 356 err = snd_soc_component_update_bits(component, reg2, val_mask, 356 - val2); 357 + val2); 358 + /* Don't discard any error code or drop change flag */ 359 + if (ret == 0 || err < 0) { 360 + ret = err; 361 + } 362 + } 357 363 358 - return err; 364 + return ret; 359 365 } 360 366 EXPORT_SYMBOL_GPL(snd_soc_put_volsw); 361 367 ··· 427 421 int min = mc->min; 428 422 unsigned int mask = (1U << (fls(min + max) - 1)) - 1; 429 423 int err = 0; 424 + int ret; 430 425 unsigned int val, val_mask; 431 426 432 427 if (ucontrol->value.integer.value[0] < 0) ··· 444 437 err = snd_soc_component_update_bits(component, reg, val_mask, val); 445 438 if (err < 0) 446 439 return err; 440 + ret = err; 447 441 448 442 if (snd_soc_volsw_is_stereo(mc)) { 449 443 unsigned int val2; ··· 455 447 456 448 err = snd_soc_component_update_bits(component, reg2, val_mask, 457 449 val2); 450 + 451 + /* Don't discard any error code or drop change flag */ 452 + if (ret == 0 || err < 0) { 453 + ret = err; 454 + } 458 455 } 459 456 return err; 460 457 } ··· 519 506 unsigned int mask = (1 << fls(max)) - 1; 520 507 unsigned int invert = mc->invert; 521 508 unsigned int val, val_mask; 522 - int ret; 509 + int err, ret; 523 510 524 511 if (invert) 525 512 val = (max - ucontrol->value.integer.value[0]) & mask; ··· 528 515 val_mask = mask << shift; 529 516 val = val << shift; 530 517 531 - ret = snd_soc_component_update_bits(component, reg, val_mask, val); 532 - if (ret < 0) 533 - return ret; 518 + err = snd_soc_component_update_bits(component, reg, val_mask, val); 519 + if (err < 0) 520 + return err; 521 + ret = err; 534 522 535 523 if (snd_soc_volsw_is_stereo(mc)) { 536 524 if (invert) ··· 541 527 val_mask = mask << shift; 542 528 val = val << shift; 543 529 544 - ret = snd_soc_component_update_bits(component, rreg, val_mask, 530 + err = snd_soc_component_update_bits(component, rreg, val_mask, 545 531 val); 532 + /* Don't discard any error code or drop change flag */ 533 + if (ret == 0 || err < 0) { 534 + ret = err; 535 + } 546 536 } 547 537 548 538 return ret; ··· 895 877 unsigned long mask = (1UL<<mc->nbits)-1; 896 878 long max = mc->max; 897 879 long val = ucontrol->value.integer.value[0]; 880 + int ret = 0; 898 881 unsigned int i; 899 882 900 883 if (val < mc->min || val > mc->max) ··· 910 891 regmask, regval); 911 892 if (err < 0) 912 893 return err; 894 + if (err > 0) 895 + ret = err; 913 896 } 914 897 915 - return 0; 898 + return ret; 916 899 } 917 900 EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx); 918 901
+1
sound/soc/sof/intel/hda.c
··· 956 956 dev_dbg(sdev->dev, "DMA mask is 32 bit\n"); 957 957 dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32)); 958 958 } 959 + dma_set_max_seg_size(&pci->dev, UINT_MAX); 959 960 960 961 /* init streams */ 961 962 ret = hda_dsp_stream_init(sdev);
+2 -2
sound/usb/implicit.c
··· 47 47 static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = { 48 48 /* Generic matching */ 49 49 IMPLICIT_FB_GENERIC_DEV(0x0499, 0x1509), /* Steinberg UR22 */ 50 - IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2080), /* M-Audio FastTrack Ultra */ 51 - IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2081), /* M-Audio FastTrack Ultra */ 52 50 IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2030), /* M-Audio Fast Track C400 */ 53 51 IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2031), /* M-Audio Fast Track C600 */ 54 52 55 53 /* Fixed EP */ 56 54 /* FIXME: check the availability of generic matching */ 55 + IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */ 56 + IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */ 57 57 IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */ 58 58 IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0001, 0x81, 2), /* Solid State Logic SSL2 */ 59 59 IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */
+3 -6
sound/usb/mixer.c
··· 3678 3678 err = snd_usb_set_cur_mix_value(cval, c + 1, idx, 3679 3679 cval->cache_val[idx]); 3680 3680 if (err < 0) 3681 - return err; 3681 + break; 3682 3682 } 3683 3683 idx++; 3684 3684 } 3685 3685 } else { 3686 3686 /* master */ 3687 - if (cval->cached) { 3688 - err = snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val); 3689 - if (err < 0) 3690 - return err; 3691 - } 3687 + if (cval->cached) 3688 + snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val); 3692 3689 } 3693 3690 3694 3691 return 0;
+15 -15
tools/cgroup/memcg_slabinfo.py
··· 11 11 from drgn.helpers.linux import for_each_page 12 12 from drgn.helpers.linux.cpumask import for_each_online_cpu 13 13 from drgn.helpers.linux.percpu import per_cpu_ptr 14 - from drgn import container_of, FaultError, Object 14 + from drgn import container_of, FaultError, Object, cast 15 15 16 16 17 17 DESC = """ ··· 69 69 70 70 71 71 def count_partial(n, fn): 72 - nr_pages = 0 73 - for page in list_for_each_entry('struct page', n.partial.address_of_(), 74 - 'lru'): 75 - nr_pages += fn(page) 76 - return nr_pages 72 + nr_objs = 0 73 + for slab in list_for_each_entry('struct slab', n.partial.address_of_(), 74 + 'slab_list'): 75 + nr_objs += fn(slab) 76 + return nr_objs 77 77 78 78 79 - def count_free(page): 80 - return page.objects - page.inuse 79 + def count_free(slab): 80 + return slab.objects - slab.inuse 81 81 82 82 83 83 def slub_get_slabinfo(s, cfg): ··· 145 145 return cfg 146 146 147 147 148 - def for_each_slab_page(prog): 148 + def for_each_slab(prog): 149 149 PGSlab = 1 << prog.constant('PG_slab') 150 150 PGHead = 1 << prog.constant('PG_head') 151 151 152 152 for page in for_each_page(prog): 153 153 try: 154 154 if page.flags.value_() & PGSlab: 155 - yield page 155 + yield cast('struct slab *', page) 156 156 except FaultError: 157 157 pass 158 158 ··· 190 190 'list'): 191 191 obj_cgroups.add(ptr.value_()) 192 192 193 - # look over all slab pages, belonging to non-root memcgs 194 - # and look for objects belonging to the given memory cgroup 195 - for page in for_each_slab_page(prog): 196 - objcg_vec_raw = page.memcg_data.value_() 193 + # look over all slab folios and look for objects belonging 194 + # to the given memory cgroup 195 + for slab in for_each_slab(prog): 196 + objcg_vec_raw = slab.memcg_data.value_() 197 197 if objcg_vec_raw == 0: 198 198 continue 199 - cache = page.slab_cache 199 + cache = slab.slab_cache 200 200 if not cache: 201 201 continue 202 202 addr = cache.value_()
+2 -2
tools/testing/selftests/exec/Makefile
··· 3 3 CFLAGS += -Wno-nonnull 4 4 CFLAGS += -D_GNU_SOURCE 5 5 6 - TEST_PROGS := binfmt_script non-regular 7 - TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 6 + TEST_PROGS := binfmt_script 7 + TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular 8 8 TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir 9 9 # Makefile is a run-time dependency, since it's accessed by the execveat test 10 10 TEST_FILES := Makefile
+1 -1
tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
··· 19 19 20 20 FILTER=set_ftrace_filter 21 21 FUNC1="schedule" 22 - FUNC2="do_softirq" 22 + FUNC2="scheduler_tick" 23 23 24 24 ALL_FUNCS="#### all functions enabled ####" 25 25
+2 -2
tools/testing/selftests/mount_setattr/mount_setattr_test.c
··· 1236 1236 } 1237 1237 1238 1238 /** 1239 - * Validate that an attached mount in our mount namespace can be idmapped. 1239 + * Validate that an attached mount in our mount namespace cannot be idmapped. 1240 1240 * (The kernel enforces that the mount's mount namespace and the caller's mount 1241 1241 * namespace match.) 1242 1242 */ ··· 1259 1259 1260 1260 attr.userns_fd = get_userns_fd(0, 10000, 10000); 1261 1261 ASSERT_GE(attr.userns_fd, 0); 1262 - ASSERT_EQ(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1262 + ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1263 1263 ASSERT_EQ(close(attr.userns_fd), 0); 1264 1264 ASSERT_EQ(close(open_tree_fd), 0); 1265 1265 }
+37 -7
tools/testing/selftests/net/mptcp/diag.sh
··· 71 71 __chk_nr "grep -c remote_key" $* 72 72 } 73 73 74 + # $1: ns, $2: port 75 + wait_local_port_listen() 76 + { 77 + local listener_ns="${1}" 78 + local port="${2}" 79 + 80 + local port_hex i 81 + 82 + port_hex="$(printf "%04X" "${port}")" 83 + for i in $(seq 10); do 84 + ip netns exec "${listener_ns}" cat /proc/net/tcp | \ 85 + awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" && 86 + break 87 + sleep 0.1 88 + done 89 + } 90 + 91 + wait_connected() 92 + { 93 + local listener_ns="${1}" 94 + local port="${2}" 95 + 96 + local port_hex i 97 + 98 + port_hex="$(printf "%04X" "${port}")" 99 + for i in $(seq 10); do 100 + ip netns exec ${listener_ns} grep -q " 0100007F:${port_hex} " /proc/net/tcp && break 101 + sleep 0.1 102 + done 103 + } 74 104 75 105 trap cleanup EXIT 76 106 ip netns add $ns ··· 111 81 ip netns exec $ns \ 112 82 ./mptcp_connect -p 10000 -l -t ${timeout_poll} \ 113 83 0.0.0.0 >/dev/null & 114 - sleep 0.1 84 + wait_local_port_listen $ns 10000 115 85 chk_msk_nr 0 "no msk on netns creation" 116 86 117 87 echo "b" | \ 118 88 timeout ${timeout_test} \ 119 89 ip netns exec $ns \ 120 - ./mptcp_connect -p 10000 -j -t ${timeout_poll} \ 90 + ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \ 121 91 127.0.0.1 >/dev/null & 122 - sleep 0.1 92 + wait_connected $ns 10000 123 93 chk_msk_nr 2 "after MPC handshake " 124 94 chk_msk_remote_key_nr 2 "....chk remote_key" 125 95 chk_msk_fallback_nr 0 "....chk no fallback" ··· 131 101 ip netns exec $ns \ 132 102 ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \ 133 103 0.0.0.0 >/dev/null & 134 - sleep 0.1 104 + wait_local_port_listen $ns 10001 135 105 echo "b" | \ 136 106 timeout ${timeout_test} \ 137 107 ip netns exec $ns \ 138 - ./mptcp_connect -p 10001 -j -t ${timeout_poll} \ 108 + ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \ 139 109 127.0.0.1 >/dev/null & 140 - sleep 0.1 110 + wait_connected $ns 10001 141 111 chk_msk_fallback_nr 1 "check fallback" 142 112 flush_pids 143 113 ··· 149 119 ./mptcp_connect -p $((I+10001)) -l -w 10 \ 150 120 -t ${timeout_poll} 0.0.0.0 >/dev/null & 151 121 done 152 - sleep 0.1 122 + wait_local_port_listen $ns $((NR_CLIENTS + 10001)) 153 123 154 124 for I in `seq 1 $NR_CLIENTS`; do 155 125 echo "b" | \
+25 -7
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 799 799 local ack_nr=$4 800 800 local count 801 801 local dump_stats 802 + local with_cookie 802 803 803 804 printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn" 804 805 count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'` ··· 813 812 fi 814 813 815 814 echo -n " - synack" 815 + with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies` 816 816 count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'` 817 817 [ -z "$count" ] && count=0 818 818 if [ "$count" != "$syn_ack_nr" ]; then 819 - echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr" 820 - ret=1 821 - dump_stats=1 819 + # simult connections exceeding the limit with cookie enabled could go up to 820 + # synack validation as the conn limit can be enforced reliably only after 821 + # the subflow creation 822 + if [ "$with_cookie" = 2 ] && [ "$count" -gt "$syn_ack_nr" ] && [ "$count" -le "$syn_nr" ]; then 823 + echo -n "[ ok ]" 824 + else 825 + echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr" 826 + ret=1 827 + dump_stats=1 828 + fi 822 829 else 823 830 echo -n "[ ok ]" 824 831 fi ··· 900 891 local mis_ack_nr=${8:-0} 901 892 local count 902 893 local dump_stats 894 + local timeout 895 + 896 + timeout=`ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout` 903 897 904 898 printf "%-39s %s" " " "add" 905 - count=`ip netns exec $ns2 nstat -as | grep MPTcpExtAddAddr | awk '{print $2}'` 899 + count=`ip netns exec $ns2 nstat -as MPTcpExtAddAddr | grep MPTcpExtAddAddr | awk '{print $2}'` 906 900 [ -z "$count" ] && count=0 907 - if [ "$count" != "$add_nr" ]; then 901 + 902 + # if the test configured a short timeout tolerate greater then expected 903 + # add addrs options, due to retransmissions 904 + if [ "$count" != "$add_nr" ] && [ "$timeout" -gt 1 -o "$count" -lt "$add_nr" ]; then 908 905 echo "[fail] got $count ADD_ADDR[s] expected $add_nr" 909 906 ret=1 910 907 dump_stats=1 ··· 1115 1100 local ns=$1 1116 1101 1117 1102 while [ $time -lt $timeout_ms ]; do 1118 - local cnt=$(ip netns exec $ns ss -t state time-wait |wc -l) 1103 + local cnt=$(ip netns exec $ns nstat -as TcpAttemptFails | grep TcpAttemptFails | awk '{print $2}') 1119 1104 1120 1105 [ "$cnt" = 1 ] && return 1 1121 1106 time=$((time + 100)) ··· 1312 1297 pm_nl_add_endpoint $ns2 10.0.2.2 flags signal 1313 1298 pm_nl_add_endpoint $ns2 10.0.3.2 flags signal 1314 1299 pm_nl_add_endpoint $ns2 10.0.4.2 flags signal 1315 - run_tests $ns1 $ns2 10.0.1.1 1300 + 1301 + # the peer could possibly miss some addr notification, allow retransmission 1302 + ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1 1303 + run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow 1316 1304 chk_join_nr "signal addresses race test" 3 3 3 1317 1305 1318 1306 # the server will not signal the address terminating
+1 -1
tools/testing/selftests/seccomp/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - CFLAGS += -Wl,-no-as-needed -Wall 2 + CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/ 3 3 LDFLAGS += -lpthread 4 4 5 5 TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark