Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2795 -3769
-9
Documentation/ABI/testing/sysfs-class-led
··· 59 59 brightness. Reading this file when no hw brightness change 60 60 event has happened will return an ENODATA error. 61 61 62 - What: /sys/class/leds/<led>/color 63 - Date: June 2023 64 - KernelVersion: 6.5 65 - Description: 66 - Color of the LED. 67 - 68 - This is a read-only file. Reading this file returns the color 69 - of the LED as a string (e.g: "red", "green", "multicolor"). 70 - 71 62 What: /sys/class/leds/<led>/trigger 72 63 Date: March 2006 73 64 KernelVersion: 2.6.17
+1 -1
Documentation/devicetree/bindings/pinctrl/nxp,s32g2-siul2-pinctrl.yaml
··· 9 9 10 10 maintainers: 11 11 - Ghennadi Procopciuc <Ghennadi.Procopciuc@oss.nxp.com> 12 - - Chester Lin <clin@suse.com> 12 + - Chester Lin <chester62515@gmail.com> 13 13 14 14 description: | 15 15 S32G2 pinmux is implemented in SIUL2 (System Integration Unit Lite2),
+6 -1
Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
··· 36 36 37 37 vdd-supply: 38 38 description: 39 - VDD power supply to the hub 39 + 3V3 power supply to the hub 40 + 41 + vdd2-supply: 42 + description: 43 + 1V2 power supply to the hub 40 44 41 45 peer-hub: 42 46 $ref: /schemas/types.yaml#/definitions/phandle ··· 66 62 properties: 67 63 reset-gpios: false 68 64 vdd-supply: false 65 + vdd2-supply: false 69 66 peer-hub: false 70 67 i2c-bus: false 71 68 else:
+2 -2
Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
··· 521 521 522 522 interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, 523 523 <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>, 524 - <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>, 525 - <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>; 524 + <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>, 525 + <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>; 526 526 interrupt-names = "hs_phy_irq", "ss_phy_irq", 527 527 "dm_hs_phy_irq", "dp_hs_phy_irq"; 528 528
+1 -1
Documentation/devicetree/bindings/usb/usb-hcd.yaml
··· 41 41 - | 42 42 usb { 43 43 phys = <&usb2_phy1>, <&usb3_phy1>; 44 - phy-names = "usb"; 44 + phy-names = "usb2", "usb3"; 45 45 #address-cells = <1>; 46 46 #size-cells = <0>; 47 47
+1
MAINTAINERS
··· 22056 22056 TRACING 22057 22057 M: Steven Rostedt <rostedt@goodmis.org> 22058 22058 M: Masami Hiramatsu <mhiramat@kernel.org> 22059 + R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> 22059 22060 L: linux-kernel@vger.kernel.org 22060 22061 L: linux-trace-kernel@vger.kernel.org 22061 22062 S: Maintained
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 7 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc2 5 + EXTRAVERSION = -rc3 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION*
+2 -1
arch/arm/xen/enlighten.c
··· 484 484 * for secondary CPUs as they are brought up. 485 485 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. 486 486 */ 487 - xen_vcpu_info = alloc_percpu(struct vcpu_info); 487 + xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), 488 + 1 << fls(sizeof(struct vcpu_info) - 1)); 488 489 if (xen_vcpu_info == NULL) 489 490 return -ENOMEM; 490 491
+1 -1
arch/arm64/Makefile
··· 158 158 159 159 all: $(notdir $(KBUILD_IMAGE)) 160 160 161 - 161 + vmlinuz.efi: Image 162 162 Image vmlinuz.efi: vmlinux 163 163 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 164 164
+15 -2
arch/arm64/include/asm/setup.h
··· 21 21 extern bool rodata_enabled; 22 22 extern bool rodata_full; 23 23 24 - if (arg && !strcmp(arg, "full")) { 24 + if (!arg) 25 + return false; 26 + 27 + if (!strcmp(arg, "full")) { 28 + rodata_enabled = rodata_full = true; 29 + return true; 30 + } 31 + 32 + if (!strcmp(arg, "off")) { 33 + rodata_enabled = rodata_full = false; 34 + return true; 35 + } 36 + 37 + if (!strcmp(arg, "on")) { 25 38 rodata_enabled = true; 26 - rodata_full = true; 39 + rodata_full = false; 27 40 return true; 28 41 } 29 42
+3 -4
arch/arm64/mm/pageattr.c
··· 29 29 * 30 30 * KFENCE pool requires page-granular mapping if initialized late. 31 31 */ 32 - return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() || 33 - arm64_kfence_can_set_direct_map(); 32 + return rodata_full || debug_pagealloc_enabled() || 33 + arm64_kfence_can_set_direct_map(); 34 34 } 35 35 36 36 static int change_page_range(pte_t *ptep, unsigned long addr, void *data) ··· 105 105 * If we are manipulating read-only permissions, apply the same 106 106 * change to the linear mapping of the pages that back this VM area. 107 107 */ 108 - if (rodata_enabled && 109 - rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || 108 + if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || 110 109 pgprot_val(clear_mask) == PTE_RDONLY)) { 111 110 for (i = 0; i < area->nr_pages; i++) { 112 111 __change_memory_common((u64)page_address(area->pages[i]),
+5 -2
arch/parisc/Kconfig
··· 115 115 default n 116 116 117 117 config GENERIC_BUG 118 - bool 119 - default y 118 + def_bool y 120 119 depends on BUG 120 + select GENERIC_BUG_RELATIVE_POINTERS if 64BIT 121 + 122 + config GENERIC_BUG_RELATIVE_POINTERS 123 + bool 121 124 122 125 config GENERIC_HWEIGHT 123 126 bool
+6 -3
arch/parisc/include/asm/alternative.h
··· 34 34 35 35 /* Alternative SMP implementation. */ 36 36 #define ALTERNATIVE(cond, replacement) "!0:" \ 37 - ".section .altinstructions, \"aw\" !" \ 37 + ".section .altinstructions, \"a\" !" \ 38 + ".align 4 !" \ 38 39 ".word (0b-4-.) !" \ 39 40 ".hword 1, " __stringify(cond) " !" \ 40 41 ".word " __stringify(replacement) " !" \ ··· 45 44 46 45 /* to replace one single instructions by a new instruction */ 47 46 #define ALTERNATIVE(from, to, cond, replacement)\ 48 - .section .altinstructions, "aw" ! \ 47 + .section .altinstructions, "a" ! \ 48 + .align 4 ! \ 49 49 .word (from - .) ! \ 50 50 .hword (to - from)/4, cond ! \ 51 51 .word replacement ! \ ··· 54 52 55 53 /* to replace multiple instructions by new code */ 56 54 #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\ 57 - .section .altinstructions, "aw" ! \ 55 + .section .altinstructions, "a" ! \ 56 + .align 4 ! \ 58 57 .word (from - .) ! \ 59 58 .hword -num_instructions, cond ! \ 60 59 .word (new_instr_ptr - .) ! \
+1
arch/parisc/include/asm/assembly.h
··· 574 574 */ 575 575 #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \ 576 576 .section __ex_table,"aw" ! \ 577 + .align 4 ! \ 577 578 .word (fault_addr - .), (except_addr - .) ! \ 578 579 .previous 579 580
+22 -16
arch/parisc/include/asm/bug.h
··· 17 17 #define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff" 18 18 #define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */ 19 19 20 - #if defined(CONFIG_64BIT) 21 - #define ASM_WORD_INSN ".dword\t" 20 + #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS 21 + # define __BUG_REL(val) ".word " __stringify(val) " - ." 22 22 #else 23 - #define ASM_WORD_INSN ".word\t" 23 + # define __BUG_REL(val) ".word " __stringify(val) 24 24 #endif 25 + 25 26 26 27 #ifdef CONFIG_DEBUG_BUGVERBOSE 27 28 #define BUG() \ 28 29 do { \ 29 30 asm volatile("\n" \ 30 31 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 31 - "\t.pushsection __bug_table,\"aw\"\n" \ 32 - "2:\t" ASM_WORD_INSN "1b, %c0\n" \ 33 - "\t.short %c1, %c2\n" \ 34 - "\t.org 2b+%c3\n" \ 32 + "\t.pushsection __bug_table,\"a\"\n" \ 33 + "\t.align 4\n" \ 34 + "2:\t" __BUG_REL(1b) "\n" \ 35 + "\t" __BUG_REL(%c0) "\n" \ 36 + "\t.short %1, %2\n" \ 37 + "\t.blockz %3-2*4-2*2\n" \ 35 38 "\t.popsection" \ 36 39 : : "i" (__FILE__), "i" (__LINE__), \ 37 - "i" (0), "i" (sizeof(struct bug_entry)) ); \ 40 + "i" (0), "i" (sizeof(struct bug_entry)) ); \ 38 41 unreachable(); \ 39 42 } while(0) 40 43 ··· 54 51 do { \ 55 52 asm volatile("\n" \ 56 53 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 57 - "\t.pushsection __bug_table,\"aw\"\n" \ 58 - "2:\t" ASM_WORD_INSN "1b, %c0\n" \ 59 - "\t.short %c1, %c2\n" \ 60 - "\t.org 2b+%c3\n" \ 54 + "\t.pushsection __bug_table,\"a\"\n" \ 55 + "\t.align 4\n" \ 56 + "2:\t" __BUG_REL(1b) "\n" \ 57 + "\t" __BUG_REL(%c0) "\n" \ 58 + "\t.short %1, %2\n" \ 59 + "\t.blockz %3-2*4-2*2\n" \ 61 60 "\t.popsection" \ 62 61 : : "i" (__FILE__), "i" (__LINE__), \ 63 62 "i" (BUGFLAG_WARNING|(flags)), \ ··· 70 65 do { \ 71 66 asm volatile("\n" \ 72 67 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 73 - "\t.pushsection __bug_table,\"aw\"\n" \ 74 - "2:\t" ASM_WORD_INSN "1b\n" \ 75 - "\t.short %c0\n" \ 76 - "\t.org 2b+%c1\n" \ 68 + "\t.pushsection __bug_table,\"a\"\n" \ 69 + "\t.align %2\n" \ 70 + "2:\t" __BUG_REL(1b) "\n" \ 71 + "\t.short %0\n" \ 72 + "\t.blockz %1-4-2\n" \ 77 73 "\t.popsection" \ 78 74 : : "i" (BUGFLAG_WARNING|(flags)), \ 79 75 "i" (sizeof(struct bug_entry)) ); \
+6 -2
arch/parisc/include/asm/jump_label.h
··· 15 15 asm_volatile_goto("1:\n\t" 16 16 "nop\n\t" 17 17 ".pushsection __jump_table, \"aw\"\n\t" 18 + ".align %1\n\t" 18 19 ".word 1b - ., %l[l_yes] - .\n\t" 19 20 __stringify(ASM_ULONG_INSN) " %c0 - .\n\t" 20 21 ".popsection\n\t" 21 - : : "i" (&((char *)key)[branch]) : : l_yes); 22 + : : "i" (&((char *)key)[branch]), "i" (sizeof(long)) 23 + : : l_yes); 22 24 23 25 return false; 24 26 l_yes: ··· 32 30 asm_volatile_goto("1:\n\t" 33 31 "b,n %l[l_yes]\n\t" 34 32 ".pushsection __jump_table, \"aw\"\n\t" 33 + ".align %1\n\t" 35 34 ".word 1b - ., %l[l_yes] - .\n\t" 36 35 __stringify(ASM_ULONG_INSN) " %c0 - .\n\t" 37 36 ".popsection\n\t" 38 - : : "i" (&((char *)key)[branch]) : : l_yes); 37 + : : "i" (&((char *)key)[branch]), "i" (sizeof(long)) 38 + : : l_yes); 39 39 40 40 return false; 41 41 l_yes:
+1 -1
arch/parisc/include/asm/ldcw.h
··· 55 55 }) 56 56 57 57 #ifdef CONFIG_SMP 58 - # define __lock_aligned __section(".data..lock_aligned") 58 + # define __lock_aligned __section(".data..lock_aligned") __aligned(16) 59 59 #endif 60 60 61 61 #endif /* __PARISC_LDCW_H */
+1
arch/parisc/include/asm/uaccess.h
··· 41 41 42 42 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 43 43 ".section __ex_table,\"aw\"\n" \ 44 + ".align 4\n" \ 44 45 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 45 46 ".previous\n" 46 47
-2
arch/parisc/include/uapi/asm/errno.h
··· 75 75 76 76 /* We now return you to your regularly scheduled HPUX. */ 77 77 78 - #define ENOSYM 215 /* symbol does not exist in executable */ 79 78 #define ENOTSOCK 216 /* Socket operation on non-socket */ 80 79 #define EDESTADDRREQ 217 /* Destination address required */ 81 80 #define EMSGSIZE 218 /* Message too long */ ··· 100 101 #define ETIMEDOUT 238 /* Connection timed out */ 101 102 #define ECONNREFUSED 239 /* Connection refused */ 102 103 #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ 103 - #define EREMOTERELEASE 240 /* Remote peer released connection */ 104 104 #define EHOSTDOWN 241 /* Host is down */ 105 105 #define EHOSTUNREACH 242 /* No route to host */ 106 106
+1
arch/parisc/kernel/vmlinux.lds.S
··· 130 130 RO_DATA(8) 131 131 132 132 /* unwind info */ 133 + . = ALIGN(4); 133 134 .PARISC.unwind : { 134 135 __start___unwind = .; 135 136 *(.PARISC.unwind)
-1
arch/s390/include/asm/processor.h
··· 228 228 execve_tail(); \ 229 229 } while (0) 230 230 231 - /* Forward declaration, a strange C thing */ 232 231 struct task_struct; 233 232 struct mm_struct; 234 233 struct seq_file;
+1
arch/s390/kernel/ipl.c
··· 666 666 &ipl_ccw_attr_group_lpar); 667 667 break; 668 668 case IPL_TYPE_ECKD: 669 + case IPL_TYPE_ECKD_DUMP: 669 670 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group); 670 671 break; 671 672 case IPL_TYPE_FCP:
+5 -6
arch/s390/kernel/perf_pai_crypto.c
··· 279 279 if (IS_ERR(cpump)) 280 280 return PTR_ERR(cpump); 281 281 282 - /* Event initialization sets last_tag to 0. When later on the events 283 - * are deleted and re-added, do not reset the event count value to zero. 284 - * Events are added, deleted and re-added when 2 or more events 285 - * are active at the same time. 286 - */ 287 - event->hw.last_tag = 0; 288 282 event->destroy = paicrypt_event_destroy; 289 283 290 284 if (a->sample_period) { ··· 312 318 { 313 319 u64 sum; 314 320 321 + /* Event initialization sets last_tag to 0. When later on the events 322 + * are deleted and re-added, do not reset the event count value to zero. 323 + * Events are added, deleted and re-added when 2 or more events 324 + * are active at the same time. 325 + */ 315 326 if (!event->hw.last_tag) { 316 327 event->hw.last_tag = 1; 317 328 sum = paicrypt_getall(event); /* Get current value */
-1
arch/s390/kernel/perf_pai_ext.c
··· 260 260 rc = paiext_alloc(a, event); 261 261 if (rc) 262 262 return rc; 263 - event->hw.last_tag = 0; 264 263 event->destroy = paiext_event_destroy; 265 264 266 265 if (a->sample_period) {
+1 -1
arch/x86/events/intel/core.c
··· 4660 4660 if (pmu->intel_cap.pebs_output_pt_available) 4661 4661 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; 4662 4662 else 4663 - pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT; 4663 + pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT; 4664 4664 4665 4665 intel_pmu_check_event_constraints(pmu->event_constraints, 4666 4666 pmu->num_counters,
+11 -28
arch/x86/kernel/cpu/microcode/amd.c
··· 104 104 size_t size; 105 105 }; 106 106 107 - static u32 ucode_new_rev; 108 - 109 107 /* 110 108 * Microcode patch container file is prepended to the initrd in cpio 111 109 * format. See Documentation/arch/x86/microcode.rst ··· 440 442 * 441 443 * Returns true if container found (sets @desc), false otherwise. 442 444 */ 443 - static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) 445 + static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size) 444 446 { 445 447 struct cont_desc desc = { 0 }; 446 448 struct microcode_amd *mc; 447 449 bool ret = false; 448 - u32 rev, dummy; 449 450 450 451 desc.cpuid_1_eax = cpuid_1_eax; 451 452 ··· 454 457 if (!mc) 455 458 return ret; 456 459 457 - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 458 - 459 460 /* 460 461 * Allow application of the same revision to pick up SMT-specific 461 462 * changes even if the revision of the other SMT thread is already 462 463 * up-to-date. 463 464 */ 464 - if (rev > mc->hdr.patch_id) 465 + if (old_rev > mc->hdr.patch_id) 465 466 return ret; 466 467 467 - if (!__apply_microcode_amd(mc)) { 468 - ucode_new_rev = mc->hdr.patch_id; 469 - ret = true; 470 - } 471 - 472 - return ret; 468 + return !__apply_microcode_amd(mc); 473 469 } 474 470 475 471 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ··· 496 506 *ret = cp; 497 507 } 498 508 499 - void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) 509 + void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax) 500 510 { 501 511 struct cpio_data cp = { }; 512 + u32 dummy; 513 + 514 + native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy); 502 515 503 516 /* Needed in load_microcode_amd() */ 504 517 ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; ··· 510 517 if (!(cp.data && cp.size)) 511 518 return; 512 519 513 - early_apply_microcode(cpuid_1_eax, cp.data, cp.size); 520 + if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size)) 521 + native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); 514 522 } 515 523 516 524 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); ··· 619 625 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 620 626 621 627 if (rev < mc->hdr.patch_id) { 622 - if (!__apply_microcode_amd(mc)) { 623 - ucode_new_rev = mc->hdr.patch_id; 624 - pr_info("reload patch_level=0x%08x\n", ucode_new_rev); 625 - } 628 + if (!__apply_microcode_amd(mc)) 629 + pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id); 626 630 } 627 631 } 628 632 ··· 640 648 p = find_patch(cpu); 641 649 if (p && (p->patch_id == csig->rev)) 642 650 uci->mc = p->data; 643 - 644 - pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); 645 651 646 652 return 0; 647 653 } ··· 680 690 681 691 rev = mc_amd->hdr.patch_id; 682 692 ret = UCODE_UPDATED; 683 - 684 - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); 685 693 686 694 out: 687 695 uci->cpu_sig.rev = rev; ··· 923 935 pr_warn("AMD CPU family 0x%x not supported\n", c->x86); 924 936 return NULL; 925 937 } 926 - 927 - if (ucode_new_rev) 928 - pr_info_once("microcode updated early to new patch_level=0x%08x\n", 929 - ucode_new_rev); 930 - 931 938 return &microcode_amd_ops; 932 939 } 933 940
+9 -6
arch/x86/kernel/cpu/microcode/core.c
··· 41 41 42 42 #include "internal.h" 43 43 44 - #define DRIVER_VERSION "2.2" 45 - 46 44 static struct microcode_ops *microcode_ops; 47 45 bool dis_ucode_ldr = true; 48 46 ··· 74 76 0x010000af, 75 77 0, /* T-101 terminator */ 76 78 }; 79 + 80 + struct early_load_data early_data; 77 81 78 82 /* 79 83 * Check the current patch level on this CPU. ··· 155 155 return; 156 156 157 157 if (intel) 158 - load_ucode_intel_bsp(); 158 + load_ucode_intel_bsp(&early_data); 159 159 else 160 - load_ucode_amd_bsp(cpuid_1_eax); 160 + load_ucode_amd_bsp(&early_data, cpuid_1_eax); 161 161 } 162 162 163 163 void load_ucode_ap(void) ··· 828 828 if (!microcode_ops) 829 829 return -ENODEV; 830 830 831 + pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev)); 832 + 833 + if (early_data.new_rev) 834 + pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev); 835 + 831 836 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); 832 837 if (IS_ERR(microcode_pdev)) 833 838 return PTR_ERR(microcode_pdev); ··· 850 845 register_syscore_ops(&mc_syscore_ops); 851 846 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 852 847 mc_cpu_online, mc_cpu_down_prep); 853 - 854 - pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); 855 848 856 849 return 0; 857 850
+7 -10
arch/x86/kernel/cpu/microcode/intel.c
··· 339 339 static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) 340 340 { 341 341 struct microcode_intel *mc = uci->mc; 342 - enum ucode_state ret; 343 - u32 cur_rev, date; 342 + u32 cur_rev; 344 343 345 - ret = __apply_microcode(uci, mc, &cur_rev); 346 - if (ret == UCODE_UPDATED) { 347 - date = mc->hdr.date; 348 - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", 349 - cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); 350 - } 351 - return ret; 344 + return __apply_microcode(uci, mc, &cur_rev); 352 345 } 353 346 354 347 static __init bool load_builtin_intel_microcode(struct cpio_data *cp) ··· 406 413 early_initcall(save_builtin_microcode); 407 414 408 415 /* Load microcode on BSP from initrd or builtin blobs */ 409 - void __init load_ucode_intel_bsp(void) 416 + void __init load_ucode_intel_bsp(struct early_load_data *ed) 410 417 { 411 418 struct ucode_cpu_info uci; 419 + 420 + ed->old_rev = intel_get_microcode_revision(); 412 421 413 422 uci.mc = get_microcode_blob(&uci, false); 414 423 if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) 415 424 ucode_patch_va = UCODE_BSP_LOADED; 425 + 426 + ed->new_rev = uci.cpu_sig.rev; 416 427 } 417 428 418 429 void load_ucode_intel_ap(void)
+10 -4
arch/x86/kernel/cpu/microcode/internal.h
··· 37 37 use_nmi : 1; 38 38 }; 39 39 40 + struct early_load_data { 41 + u32 old_rev; 42 + u32 new_rev; 43 + }; 44 + 45 + extern struct early_load_data early_data; 40 46 extern struct ucode_cpu_info ucode_cpu_info[]; 41 47 struct cpio_data find_microcode_in_initrd(const char *path); 42 48 ··· 98 92 extern bool force_minrev; 99 93 100 94 #ifdef CONFIG_CPU_SUP_AMD 101 - void load_ucode_amd_bsp(unsigned int family); 95 + void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); 102 96 void load_ucode_amd_ap(unsigned int family); 103 97 int save_microcode_in_initrd_amd(unsigned int family); 104 98 void reload_ucode_amd(unsigned int cpu); 105 99 struct microcode_ops *init_amd_microcode(void); 106 100 void exit_amd_microcode(void); 107 101 #else /* CONFIG_CPU_SUP_AMD */ 108 - static inline void load_ucode_amd_bsp(unsigned int family) { } 102 + static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } 109 103 static inline void load_ucode_amd_ap(unsigned int family) { } 110 104 static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 111 105 static inline void reload_ucode_amd(unsigned int cpu) { } ··· 114 108 #endif /* !CONFIG_CPU_SUP_AMD */ 115 109 116 110 #ifdef CONFIG_CPU_SUP_INTEL 117 - void load_ucode_intel_bsp(void); 111 + void load_ucode_intel_bsp(struct early_load_data *ed); 118 112 void load_ucode_intel_ap(void); 119 113 void reload_ucode_intel(void); 120 114 struct microcode_ops *init_intel_microcode(void); 121 115 #else /* CONFIG_CPU_SUP_INTEL */ 122 - static inline void load_ucode_intel_bsp(void) { } 116 + static inline void load_ucode_intel_bsp(struct early_load_data *ed) { } 123 117 static inline void load_ucode_intel_ap(void) { } 124 118 static inline void reload_ucode_intel(void) { } 125 119 static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
+2
block/bdev.c
··· 425 425 426 426 void bdev_add(struct block_device *bdev, dev_t dev) 427 427 { 428 + if (bdev_stable_writes(bdev)) 429 + mapping_set_stable_writes(bdev->bd_inode->i_mapping); 428 430 bdev->bd_dev = dev; 429 431 bdev->bd_inode->i_rdev = dev; 430 432 bdev->bd_inode->i_ino = dev;
+13
block/blk-cgroup.c
··· 577 577 struct request_queue *q = disk->queue; 578 578 struct blkcg_gq *blkg, *n; 579 579 int count = BLKG_DESTROY_BATCH_SIZE; 580 + int i; 580 581 581 582 restart: 582 583 spin_lock_irq(&q->queue_lock); ··· 601 600 cond_resched(); 602 601 goto restart; 603 602 } 603 + } 604 + 605 + /* 606 + * Mark policy deactivated since policy offline has been done, and 607 + * the free is scheduled, so future blkcg_deactivate_policy() can 608 + * be bypassed 609 + */ 610 + for (i = 0; i < BLKCG_MAX_POLS; i++) { 611 + struct blkcg_policy *pol = blkcg_policy[i]; 612 + 613 + if (pol) 614 + __clear_bit(pol->plid, q->blkcg_pols); 604 615 } 605 616 606 617 q->root_blkg = NULL;
-2
block/blk-cgroup.h
··· 249 249 { 250 250 struct blkcg_gq *blkg; 251 251 252 - WARN_ON_ONCE(!rcu_read_lock_held()); 253 - 254 252 if (blkcg == &blkcg_root) 255 253 return q->root_blkg; 256 254
+5 -28
block/blk-pm.c
··· 163 163 * @q: the queue of the device 164 164 * 165 165 * Description: 166 - * For historical reasons, this routine merely calls blk_set_runtime_active() 167 - * to do the real work of restarting the queue. It does this regardless of 168 - * whether the device's runtime-resume succeeded; even if it failed the 166 + * Restart the queue of a runtime suspended device. It does this regardless 167 + * of whether the device's runtime-resume succeeded; even if it failed the 169 168 * driver or error handler will need to communicate with the device. 170 169 * 171 170 * This function should be called near the end of the device's 172 - * runtime_resume callback. 171 + * runtime_resume callback to correct queue runtime PM status and re-enable 172 + * peeking requests from the queue. 173 173 */ 174 174 void blk_post_runtime_resume(struct request_queue *q) 175 - { 176 - blk_set_runtime_active(q); 177 - } 178 - EXPORT_SYMBOL(blk_post_runtime_resume); 179 - 180 - /** 181 - * blk_set_runtime_active - Force runtime status of the queue to be active 182 - * @q: the queue of the device 183 - * 184 - * If the device is left runtime suspended during system suspend the resume 185 - * hook typically resumes the device and corrects runtime status 186 - * accordingly. However, that does not affect the queue runtime PM status 187 - * which is still "suspended". This prevents processing requests from the 188 - * queue. 189 - * 190 - * This function can be used in driver's resume hook to correct queue 191 - * runtime PM status and re-enable peeking requests from the queue. It 192 - * should be called before first request is added to the queue. 193 - * 194 - * This function is also called by blk_post_runtime_resume() for 195 - * runtime resumes. It does everything necessary to restart the queue. 196 - */ 197 - void blk_set_runtime_active(struct request_queue *q) 198 175 { 199 176 int old_status; 200 177 ··· 188 211 if (old_status != RPM_ACTIVE) 189 212 blk_clear_pm_only(q); 190 213 } 191 - EXPORT_SYMBOL(blk_set_runtime_active); 214 + EXPORT_SYMBOL(blk_post_runtime_resume);
+2
block/blk-throttle.c
··· 1320 1320 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), 1321 1321 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); 1322 1322 1323 + rcu_read_lock(); 1323 1324 /* 1324 1325 * Update has_rules[] flags for the updated tg's subtree. A tg is 1325 1326 * considered to have rules if either the tg itself or any of its ··· 1348 1347 this_tg->latency_target = max(this_tg->latency_target, 1349 1348 parent_tg->latency_target); 1350 1349 } 1350 + rcu_read_unlock(); 1351 1351 1352 1352 /* 1353 1353 * We're already holding queue_lock and know @tg is valid. Let's
+22 -24
drivers/accel/ivpu/ivpu_hw_37xx.c
··· 502 502 return ret; 503 503 } 504 504 505 + static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev) 506 + { 507 + ivpu_boot_dpu_active_drive(vdev, false); 508 + ivpu_boot_pwr_island_isolation_drive(vdev, true); 509 + ivpu_boot_pwr_island_trickle_drive(vdev, false); 510 + ivpu_boot_pwr_island_drive(vdev, false); 511 + 512 + return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0); 513 + } 514 + 505 515 static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) 506 516 { 507 517 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); ··· 610 600 611 601 static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) 612 602 { 613 - int ret; 614 - u32 val; 603 + int ret = 0; 615 604 616 - if (IVPU_WA(punit_disabled)) 617 - return 0; 618 - 619 - ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 620 - if (ret) { 621 - ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); 622 - return ret; 605 + if (ivpu_boot_pwr_domain_disable(vdev)) { 606 + ivpu_err(vdev, "Failed to disable power domain\n"); 607 + ret = -EIO; 623 608 } 624 609 625 - val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET); 626 - val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val); 627 - REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val); 628 - 629 - ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 630 - if (ret) 631 - ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 610 + if (ivpu_pll_disable(vdev)) { 611 + ivpu_err(vdev, "Failed to disable PLL\n"); 612 + ret = -EIO; 613 + } 632 614 633 615 return ret; 634 616 } ··· 652 650 static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) 653 651 { 654 652 int ret; 655 - 656 - ret = ivpu_hw_37xx_reset(vdev); 657 - if (ret) 658 - ivpu_warn(vdev, "Failed to reset HW: %d\n", ret); 659 653 660 654 ret = ivpu_hw_37xx_d0i3_disable(vdev); 661 655 if (ret) ··· 720 722 { 721 723 int ret = 0; 722 724 723 - if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev)) 724 - ivpu_err(vdev, "Failed to reset the VPU\n"); 725 + if (!ivpu_hw_37xx_is_idle(vdev)) 726 + ivpu_warn(vdev, "VPU not idle during power down\n"); 725 727 726 - if (ivpu_pll_disable(vdev)) { 727 - ivpu_err(vdev, "Failed to disable PLL\n"); 728 + if (ivpu_hw_37xx_reset(vdev)) { 729 + ivpu_err(vdev, "Failed to reset VPU\n"); 728 730 ret = -EIO; 729 731 } 730 732
+1 -1
drivers/acpi/acpi_video.c
··· 2031 2031 * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0 2032 2032 * evaluated to have functional panel brightness control. 2033 2033 */ 2034 - acpi_device_fix_up_power_extended(device); 2034 + acpi_device_fix_up_power_children(device); 2035 2035 2036 2036 pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n", 2037 2037 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
+13
drivers/acpi/device_pm.c
··· 397 397 } 398 398 EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended); 399 399 400 + /** 401 + * acpi_device_fix_up_power_children - Force a device's children into D0. 402 + * @adev: Parent device object whose children's power state is to be fixed up. 403 + * 404 + * Call acpi_device_fix_up_power() for @adev's children so long as they 405 + * are reported as present and enabled. 406 + */ 407 + void acpi_device_fix_up_power_children(struct acpi_device *adev) 408 + { 409 + acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL); 410 + } 411 + EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children); 412 + 400 413 int acpi_device_update_power(struct acpi_device *device, int *state_p) 401 414 { 402 415 int state;
+1 -1
drivers/acpi/processor_idle.c
··· 592 592 while (1) { 593 593 594 594 if (cx->entry_method == ACPI_CSTATE_HALT) 595 - safe_halt(); 595 + raw_safe_halt(); 596 596 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 597 597 io_idle(cx->address); 598 598 } else
+7
drivers/acpi/resource.c
··· 448 448 }, 449 449 }, 450 450 { 451 + /* Asus ExpertBook B1402CVA */ 452 + .matches = { 453 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 454 + DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"), 455 + }, 456 + }, 457 + { 451 458 /* Asus ExpertBook B1502CBA */ 452 459 .matches = { 453 460 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+3
drivers/ata/pata_isapnp.c
··· 82 82 if (pnp_port_valid(idev, 1)) { 83 83 ctl_addr = devm_ioport_map(&idev->dev, 84 84 pnp_port_start(idev, 1), 1); 85 + if (!ctl_addr) 86 + return -ENOMEM; 87 + 85 88 ap->ioaddr.altstatus_addr = ctl_addr; 86 89 ap->ioaddr.ctl_addr = ctl_addr; 87 90 ap->ops = &isapnp_port_ops;
+75 -42
drivers/block/nbd.c
··· 67 67 struct recv_thread_args { 68 68 struct work_struct work; 69 69 struct nbd_device *nbd; 70 + struct nbd_sock *nsock; 70 71 int index; 71 72 }; 72 73 ··· 396 395 } 397 396 } 398 397 398 + static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd) 399 + { 400 + if (refcount_inc_not_zero(&nbd->config_refs)) { 401 + /* 402 + * Add smp_mb__after_atomic to ensure that reading nbd->config_refs 403 + * and reading nbd->config is ordered. The pair is the barrier in 404 + * nbd_alloc_and_init_config(), avoid nbd->config_refs is set 405 + * before nbd->config. 406 + */ 407 + smp_mb__after_atomic(); 408 + return nbd->config; 409 + } 410 + 411 + return NULL; 412 + } 413 + 399 414 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) 400 415 { 401 416 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); ··· 426 409 return BLK_EH_DONE; 427 410 } 428 411 429 - if (!refcount_inc_not_zero(&nbd->config_refs)) { 412 + config = nbd_get_config_unlocked(nbd); 413 + if (!config) { 430 414 cmd->status = BLK_STS_TIMEOUT; 431 415 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); 432 416 mutex_unlock(&cmd->lock); 433 417 goto done; 434 418 } 435 - config = nbd->config; 436 419 437 420 if (config->num_connections > 1 || 438 421 (config->num_connections == 1 && nbd->tag_set.timeout)) { ··· 506 489 return BLK_EH_DONE; 507 490 } 508 491 509 - /* 510 - * Send or receive packet. Return a positive value on success and 511 - * negtive value on failue, and never return 0. 512 - */ 513 - static int sock_xmit(struct nbd_device *nbd, int index, int send, 514 - struct iov_iter *iter, int msg_flags, int *sent) 492 + static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, 493 + struct iov_iter *iter, int msg_flags, int *sent) 515 494 { 516 - struct nbd_config *config = nbd->config; 517 - struct socket *sock = config->socks[index]->sock; 518 495 int result; 519 496 struct msghdr msg; 520 497 unsigned int noreclaim_flag; ··· 549 538 memalloc_noreclaim_restore(noreclaim_flag); 550 539 551 540 return result; 541 + } 542 + 543 + /* 544 + * Send or receive packet. Return a positive value on success and 545 + * negtive value on failure, and never return 0. 546 + */ 547 + static int sock_xmit(struct nbd_device *nbd, int index, int send, 548 + struct iov_iter *iter, int msg_flags, int *sent) 549 + { 550 + struct nbd_config *config = nbd->config; 551 + struct socket *sock = config->socks[index]->sock; 552 + 553 + return __sock_xmit(nbd, sock, send, iter, msg_flags, sent); 552 554 } 553 555 554 556 /* ··· 720 696 return 0; 721 697 } 722 698 723 - static int nbd_read_reply(struct nbd_device *nbd, int index, 699 + static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, 724 700 struct nbd_reply *reply) 725 701 { 726 702 struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)}; ··· 729 705 730 706 reply->magic = 0; 731 707 iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply)); 732 - result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 708 + result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL); 733 709 if (result < 0) { 734 710 if (!nbd_disconnected(nbd->config)) 735 711 dev_err(disk_to_dev(nbd->disk), ··· 853 829 struct nbd_device *nbd = args->nbd; 854 830 struct nbd_config *config = nbd->config; 855 831 struct request_queue *q = nbd->disk->queue; 856 - struct nbd_sock *nsock; 832 + struct nbd_sock *nsock = args->nsock; 857 833 struct nbd_cmd *cmd; 858 834 struct request *rq; 859 835 860 836 while (1) { 861 837 struct nbd_reply reply; 862 838 863 - if (nbd_read_reply(nbd, args->index, &reply)) 839 + if (nbd_read_reply(nbd, nsock->sock, &reply)) 864 840 break; 865 841 866 842 /* ··· 895 871 percpu_ref_put(&q->q_usage_counter); 896 872 } 897 873 898 - nsock = config->socks[args->index]; 899 874 mutex_lock(&nsock->tx_lock); 900 875 nbd_mark_nsock_dead(nbd, nsock, 1); 901 876 mutex_unlock(&nsock->tx_lock); ··· 1000 977 struct nbd_sock *nsock; 1001 978 int ret; 1002 979 1003 - if (!refcount_inc_not_zero(&nbd->config_refs)) { 980 + config = nbd_get_config_unlocked(nbd); 981 + if (!config) { 1004 982 dev_err_ratelimited(disk_to_dev(nbd->disk), 1005 983 "Socks array is empty\n"); 1006 984 return -EINVAL; 1007 985 } 1008 - config = nbd->config; 1009 986 1010 987 if (index >= config->num_connections) { 1011 988 dev_err_ratelimited(disk_to_dev(nbd->disk), ··· 1238 1215 INIT_WORK(&args->work, recv_work); 1239 1216 args->index = i; 1240 1217 args->nbd = nbd; 1218 + args->nsock = nsock; 1241 1219 nsock->cookie++; 1242 1220 mutex_unlock(&nsock->tx_lock); 1243 1221 sockfd_put(old); ··· 1421 1397 refcount_inc(&nbd->config_refs); 1422 1398 INIT_WORK(&args->work, recv_work); 1423 1399 args->nbd = nbd; 1400 + args->nsock = config->socks[i]; 1424 1401 args->index = i; 1425 1402 queue_work(nbd->recv_workq, &args->work); 1426 1403 } ··· 1555 1530 return error; 1556 1531 } 1557 1532 1558 - static struct nbd_config *nbd_alloc_config(void) 1533 + static int nbd_alloc_and_init_config(struct nbd_device *nbd) 1559 1534 { 1560 1535 struct nbd_config *config; 1561 1536 1537 + if (WARN_ON(nbd->config)) 1538 + return -EINVAL; 1539 + 1562 1540 if (!try_module_get(THIS_MODULE)) 1563 - return ERR_PTR(-ENODEV); 1541 + return -ENODEV; 1564 1542 1565 1543 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); 1566 1544 if (!config) { 1567 1545 module_put(THIS_MODULE); 1568 - return ERR_PTR(-ENOMEM); 1546 + return -ENOMEM; 1569 1547 } 1570 1548 1571 1549 atomic_set(&config->recv_threads, 0); ··· 1576 1548 init_waitqueue_head(&config->conn_wait); 1577 1549 config->blksize_bits = NBD_DEF_BLKSIZE_BITS; 1578 1550 atomic_set(&config->live_connections, 0); 1579 - return config; 1551 + 1552 + nbd->config = config; 1553 + /* 1554 + * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment, 1555 + * its pair is the barrier in nbd_get_config_unlocked(). 1556 + * So nbd_get_config_unlocked() won't see nbd->config as null after 1557 + * refcount_inc_not_zero() succeed. 1558 + */ 1559 + smp_mb__before_atomic(); 1560 + refcount_set(&nbd->config_refs, 1); 1561 + 1562 + return 0; 1580 1563 } 1581 1564 1582 1565 static int nbd_open(struct gendisk *disk, blk_mode_t mode) 1583 1566 { 1584 1567 struct nbd_device *nbd; 1568 + struct nbd_config *config; 1585 1569 int ret = 0; 1586 1570 1587 1571 mutex_lock(&nbd_index_mutex); ··· 1606 1566 ret = -ENXIO; 1607 1567 goto out; 1608 1568 } 1609 - if (!refcount_inc_not_zero(&nbd->config_refs)) { 1610 - struct nbd_config *config; 1611 1569 1570 + config = nbd_get_config_unlocked(nbd); 1571 + if (!config) { 1612 1572 mutex_lock(&nbd->config_lock); 1613 1573 if (refcount_inc_not_zero(&nbd->config_refs)) { 1614 1574 mutex_unlock(&nbd->config_lock); 1615 1575 goto out; 1616 1576 } 1617 - config = nbd_alloc_config(); 1618 - if (IS_ERR(config)) { 1619 - ret = PTR_ERR(config); 1577 + ret = nbd_alloc_and_init_config(nbd); 1578 + if (ret) { 1620 1579 mutex_unlock(&nbd->config_lock); 1621 1580 goto out; 1622 1581 } 1623 - nbd->config = config; 1624 - refcount_set(&nbd->config_refs, 1); 1582 + 1625 1583 refcount_inc(&nbd->refs); 1626 1584 mutex_unlock(&nbd->config_lock); 1627 1585 if (max_part) 1628 1586 set_bit(GD_NEED_PART_SCAN, &disk->state); 1629 - } else if (nbd_disconnected(nbd->config)) { 1587 + } else if (nbd_disconnected(config)) { 1630 1588 if (max_part) 1631 1589 set_bit(GD_NEED_PART_SCAN, &disk->state); 1632 1590 } ··· 2028 1990 pr_err("nbd%d already in use\n", index); 2029 1991 return -EBUSY; 2030 1992 } 2031 - if (WARN_ON(nbd->config)) { 2032 - mutex_unlock(&nbd->config_lock); 2033 - nbd_put(nbd); 2034 - return -EINVAL; 2035 - } 2036 - config = nbd_alloc_config(); 2037 - if (IS_ERR(config)) { 1993 + 1994 + ret = nbd_alloc_and_init_config(nbd); 1995 + if (ret) { 2038 1996 mutex_unlock(&nbd->config_lock); 2039 1997 nbd_put(nbd); 2040 1998 pr_err("couldn't allocate config\n"); 2041 - return PTR_ERR(config); 1999 + return ret; 2042 2000 } 2043 - nbd->config = config; 2044 - refcount_set(&nbd->config_refs, 1); 2045 - set_bit(NBD_RT_BOUND, &config->runtime_flags); 2046 2001 2002 + config = nbd->config; 2003 + set_bit(NBD_RT_BOUND, &config->runtime_flags); 2047 2004 ret = nbd_genl_size_set(info, nbd); 2048 2005 if (ret) 2049 2006 goto out; ··· 2241 2208 } 2242 2209 mutex_unlock(&nbd_index_mutex); 2243 2210 2244 - if (!refcount_inc_not_zero(&nbd->config_refs)) { 2211 + config = nbd_get_config_unlocked(nbd); 2212 + if (!config) { 2245 2213 dev_err(nbd_to_dev(nbd), 2246 2214 "not configured, cannot reconfigure\n"); 2247 2215 nbd_put(nbd); ··· 2250 2216 } 2251 2217 2252 2218 mutex_lock(&nbd->config_lock); 2253 - config = nbd->config; 2254 2219 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || 2255 2220 !nbd->pid) { 2256 2221 dev_err(nbd_to_dev(nbd),
+13 -12
drivers/block/null_blk/main.c
··· 1464 1464 return BLK_STS_OK; 1465 1465 } 1466 1466 1467 - static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, 1468 - sector_t nr_sectors, enum req_op op) 1467 + static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, 1468 + sector_t nr_sectors, enum req_op op) 1469 1469 { 1470 1470 struct nullb_device *dev = cmd->nq->dev; 1471 1471 struct nullb *nullb = dev->nullb; 1472 1472 blk_status_t sts; 1473 - 1474 - if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 1475 - sts = null_handle_throttled(cmd); 1476 - if (sts != BLK_STS_OK) 1477 - return sts; 1478 - } 1479 1473 1480 1474 if (op == REQ_OP_FLUSH) { 1481 1475 cmd->error = errno_to_blk_status(null_handle_flush(nullb)); ··· 1487 1493 1488 1494 out: 1489 1495 nullb_complete_cmd(cmd); 1490 - return BLK_STS_OK; 1491 1496 } 1492 1497 1493 1498 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) ··· 1717 1724 cmd->fake_timeout = should_timeout_request(rq) || 1718 1725 blk_should_fake_timeout(rq->q); 1719 1726 1720 - blk_mq_start_request(rq); 1721 - 1722 1727 if (should_requeue_request(rq)) { 1723 1728 /* 1724 1729 * Alternate between hitting the core BUSY path, and the ··· 1729 1738 return BLK_STS_OK; 1730 1739 } 1731 1740 1741 + if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) { 1742 + blk_status_t sts = null_handle_throttled(cmd); 1743 + 1744 + if (sts != BLK_STS_OK) 1745 + return sts; 1746 + } 1747 + 1748 + blk_mq_start_request(rq); 1749 + 1732 1750 if (is_poll) { 1733 1751 spin_lock(&nq->poll_lock); 1734 1752 list_add_tail(&rq->queuelist, &nq->poll_list); ··· 1747 1747 if (cmd->fake_timeout) 1748 1748 return BLK_STS_OK; 1749 1749 1750 - return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); 1750 + null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); 1751 + return BLK_STS_OK; 1751 1752 } 1752 1753 1753 1754 static void null_queue_rqs(struct request **rqlist)
+1 -1
drivers/firmware/efi/unaccepted_memory.c
··· 101 101 * overlap on physical address level. 102 102 */ 103 103 list_for_each_entry(entry, &accepting_list, list) { 104 - if (entry->end < range.start) 104 + if (entry->end <= range.start) 105 105 continue; 106 106 if (entry->start >= range.end) 107 107 continue;
+12 -1
drivers/gpu/drm/ast/ast_drv.h
··· 174 174 return container_of(connector, struct ast_sil164_connector, base); 175 175 } 176 176 177 + struct ast_bmc_connector { 178 + struct drm_connector base; 179 + struct drm_connector *physical_connector; 180 + }; 181 + 182 + static inline struct ast_bmc_connector * 183 + to_ast_bmc_connector(struct drm_connector *connector) 184 + { 185 + return container_of(connector, struct ast_bmc_connector, base); 186 + } 187 + 177 188 /* 178 189 * Device 179 190 */ ··· 229 218 } astdp; 230 219 struct { 231 220 struct drm_encoder encoder; 232 - struct drm_connector connector; 221 + struct ast_bmc_connector bmc_connector; 233 222 } bmc; 234 223 } output; 235 224
+55 -7
drivers/gpu/drm/ast/ast_mode.c
··· 1767 1767 .destroy = drm_encoder_cleanup, 1768 1768 }; 1769 1769 1770 + static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector, 1771 + struct drm_modeset_acquire_ctx *ctx, 1772 + bool force) 1773 + { 1774 + struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector); 1775 + struct drm_connector *physical_connector = bmc_connector->physical_connector; 1776 + 1777 + /* 1778 + * Most user-space compositors cannot handle more than one connected 1779 + * connector per CRTC. Hence, we only mark the BMC as connected if the 1780 + * physical connector is disconnected. If the physical connector's status 1781 + * is connected or unknown, the BMC remains disconnected. This has no 1782 + * effect on the output of the BMC. 1783 + * 1784 + * FIXME: Remove this logic once user-space compositors can handle more 1785 + * than one connector per CRTC. The BMC should always be connected. 1786 + */ 1787 + 1788 + if (physical_connector && physical_connector->status == connector_status_disconnected) 1789 + return connector_status_connected; 1790 + 1791 + return connector_status_disconnected; 1792 + } 1793 + 1770 1794 static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector) 1771 1795 { 1772 1796 return drm_add_modes_noedid(connector, 4096, 4096); ··· 1798 1774 1799 1775 static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = { 1800 1776 .get_modes = ast_bmc_connector_helper_get_modes, 1777 + .detect_ctx = ast_bmc_connector_helper_detect_ctx, 1801 1778 }; 1802 1779 1803 1780 static const struct drm_connector_funcs ast_bmc_connector_funcs = { ··· 1809 1784 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1810 1785 }; 1811 1786 1812 - static int ast_bmc_output_init(struct ast_device *ast) 1787 + static int ast_bmc_connector_init(struct drm_device *dev, 1788 + struct ast_bmc_connector *bmc_connector, 1789 + struct drm_connector *physical_connector) 1790 + { 1791 + struct drm_connector *connector = &bmc_connector->base; 1792 + int ret; 1793 + 1794 + ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs, 1795 + DRM_MODE_CONNECTOR_VIRTUAL); 1796 + if (ret) 1797 + return ret; 1798 + 1799 + drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs); 1800 + 1801 + bmc_connector->physical_connector = physical_connector; 1802 + 1803 + return 0; 1804 + } 1805 + 1806 + static int ast_bmc_output_init(struct ast_device *ast, 1807 + struct drm_connector *physical_connector) 1813 1808 { 1814 1809 struct drm_device *dev = &ast->base; 1815 1810 struct drm_crtc *crtc = &ast->crtc; 1816 1811 struct drm_encoder *encoder = &ast->output.bmc.encoder; 1817 - struct drm_connector *connector = &ast->output.bmc.connector; 1812 + struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector; 1813 + struct drm_connector *connector = &bmc_connector->base; 1818 1814 int ret; 1819 1815 1820 1816 ret = drm_encoder_init(dev, encoder, ··· 1845 1799 return ret; 1846 1800 encoder->possible_crtcs = drm_crtc_mask(crtc); 1847 1801 1848 - ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs, 1849 - DRM_MODE_CONNECTOR_VIRTUAL); 1802 + ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector); 1850 1803 if (ret) 1851 1804 return ret; 1852 - 1853 - drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs); 1854 1805 1855 1806 ret = drm_connector_attach_encoder(connector, encoder); 1856 1807 if (ret) ··· 1907 1864 int ast_mode_config_init(struct ast_device *ast) 1908 1865 { 1909 1866 struct drm_device *dev = &ast->base; 1867 + struct drm_connector *physical_connector = NULL; 1910 1868 int ret; 1911 1869 1912 1870 ret = drmm_mode_config_init(dev); ··· 1948 1904 ret = ast_vga_output_init(ast); 1949 1905 if (ret) 1950 1906 return ret; 1907 + physical_connector = &ast->output.vga.vga_connector.base; 1951 1908 } 1952 1909 if (ast->tx_chip_types & AST_TX_SIL164_BIT) { 1953 1910 ret = ast_sil164_output_init(ast); 1954 1911 if (ret) 1955 1912 return ret; 1913 + physical_connector = &ast->output.sil164.sil164_connector.base; 1956 1914 } 1957 1915 if (ast->tx_chip_types & AST_TX_DP501_BIT) { 1958 1916 ret = ast_dp501_output_init(ast); 1959 1917 if (ret) 1960 1918 return ret; 1919 + physical_connector = &ast->output.dp501.connector; 1961 1920 } 1962 1921 if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { 1963 1922 ret = ast_astdp_output_init(ast); 1964 1923 if (ret) 1965 1924 return ret; 1925 + physical_connector = &ast->output.astdp.connector; 1966 1926 } 1967 - ret = ast_bmc_output_init(ast); 1927 + ret = ast_bmc_output_init(ast, physical_connector); 1968 1928 if (ret) 1969 1929 return ret; 1970 1930
+8 -8
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 1161 1161 intel_connector->port = port; 1162 1162 drm_dp_mst_get_port_malloc(port); 1163 1163 1164 + /* 1165 + * TODO: set the AUX for the actual MST port decompressing the stream. 1166 + * At the moment the driver only supports enabling this globally in the 1167 + * first downstream MST branch, via intel_dp's (root port) AUX. 1168 + */ 1169 + intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; 1170 + intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); 1171 + 1164 1172 connector = &intel_connector->base; 1165 1173 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, 1166 1174 DRM_MODE_CONNECTOR_DisplayPort); ··· 1179 1171 } 1180 1172 1181 1173 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); 1182 - 1183 - /* 1184 - * TODO: set the AUX for the actual MST port decompressing the stream. 1185 - * At the moment the driver only supports enabling this globally in the 1186 - * first downstream MST branch, via intel_dp's (root port) AUX. 1187 - */ 1188 - intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; 1189 - intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); 1190 1174 1191 1175 for_each_pipe(dev_priv, pipe) { 1192 1176 struct drm_encoder *enc =
-11
drivers/gpu/drm/i915/gt/intel_gt.c
··· 982 982 983 983 err: 984 984 i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); 985 - intel_gt_release_all(i915); 986 - 987 985 return ret; 988 986 } 989 987 ··· 998 1000 } 999 1001 1000 1002 return 0; 1001 - } 1002 - 1003 - void intel_gt_release_all(struct drm_i915_private *i915) 1004 - { 1005 - struct intel_gt *gt; 1006 - unsigned int id; 1007 - 1008 - for_each_gt(gt, i915, id) 1009 - i915->gt[id] = NULL; 1010 1003 } 1011 1004 1012 1005 void intel_gt_info_print(const struct intel_gt_info *info,
+1 -3
drivers/gpu/drm/i915/i915_driver.c
··· 782 782 783 783 ret = i915_driver_mmio_probe(i915); 784 784 if (ret < 0) 785 - goto out_tiles_cleanup; 785 + goto out_runtime_pm_put; 786 786 787 787 ret = i915_driver_hw_probe(i915); 788 788 if (ret < 0) ··· 842 842 i915_ggtt_driver_late_release(i915); 843 843 out_cleanup_mmio: 844 844 i915_driver_mmio_release(i915); 845 - out_tiles_cleanup: 846 - intel_gt_release_all(i915); 847 845 out_runtime_pm_put: 848 846 enable_rpm_wakeref_asserts(&i915->runtime_pm); 849 847 i915_driver_late_release(i915);
+1
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
··· 406 406 .min_llcc_ib = 0, 407 407 .min_dram_ib = 800000, 408 408 .danger_lut_tbl = {0xf, 0xffff, 0x0}, 409 + .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff}, 409 410 .qos_lut_tbl = { 410 411 {.nentry = ARRAY_SIZE(sc8180x_qos_linear), 411 412 .entries = sc8180x_qos_linear
+1 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 844 844 845 845 return 0; 846 846 fail: 847 - if (mdp5_kms) 848 - mdp5_destroy(mdp5_kms); 847 + mdp5_destroy(mdp5_kms); 849 848 return ret; 850 849 } 851 850
+10 -5
drivers/gpu/drm/msm/dp/dp_display.c
··· 365 365 /* reset video pattern flag on disconnect */ 366 366 if (!hpd) { 367 367 dp->panel->video_test = false; 368 - drm_dp_set_subconnector_property(dp->dp_display.connector, 369 - connector_status_disconnected, 370 - dp->panel->dpcd, dp->panel->downstream_ports); 368 + if (!dp->dp_display.is_edp) 369 + drm_dp_set_subconnector_property(dp->dp_display.connector, 370 + connector_status_disconnected, 371 + dp->panel->dpcd, 372 + dp->panel->downstream_ports); 371 373 } 372 374 373 375 dp->dp_display.is_connected = hpd; ··· 398 396 399 397 dp_link_process_request(dp->link); 400 398 401 - drm_dp_set_subconnector_property(dp->dp_display.connector, connector_status_connected, 402 - dp->panel->dpcd, dp->panel->downstream_ports); 399 + if (!dp->dp_display.is_edp) 400 + drm_dp_set_subconnector_property(dp->dp_display.connector, 401 + connector_status_connected, 402 + dp->panel->dpcd, 403 + dp->panel->downstream_ports); 403 404 404 405 edid = dp->panel->edid; 405 406
+3
drivers/gpu/drm/msm/dp/dp_drm.c
··· 345 345 if (IS_ERR(connector)) 346 346 return connector; 347 347 348 + if (!dp_display->is_edp) 349 + drm_connector_attach_dp_subconnector_property(connector); 350 + 348 351 drm_connector_attach_encoder(connector, encoder); 349 352 350 353 return connector;
+1 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
··· 918 918 if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) { 919 919 if (phy->cphy_mode) { 920 920 vreg_ctrl_0 = 0x45; 921 - vreg_ctrl_1 = 0x45; 921 + vreg_ctrl_1 = 0x41; 922 922 glbl_rescode_top_ctrl = 0x00; 923 923 glbl_rescode_bot_ctrl = 0x00; 924 924 } else {
-2
drivers/gpu/drm/msm/msm_drv.c
··· 288 288 if (ret) 289 289 goto err_msm_uninit; 290 290 291 - drm_kms_helper_poll_init(ddev); 292 - 293 291 if (priv->kms_init) { 294 292 drm_kms_helper_poll_init(ddev); 295 293 msm_fbdev_setup(ddev);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
··· 539 539 struct nvkm_runl *runl; 540 540 struct nvkm_engn *engn; 541 541 u32 cgids = 2048; 542 - u32 chids = 2048 / CHID_PER_USERD; 542 + u32 chids = 2048; 543 543 int ret; 544 544 NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl; 545 545
+5 -4
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
··· 1709 1709 .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | 1710 1710 MIPI_DSI_MODE_LPM, 1711 1711 .init_cmds = auo_b101uan08_3_init_cmd, 1712 + .lp11_before_reset = true, 1712 1713 }; 1713 1714 1714 1715 static const struct drm_display_mode boe_tv105wum_nw0_default_mode = { ··· 1767 1766 }; 1768 1767 1769 1768 static const struct drm_display_mode starry_himax83102_j02_default_mode = { 1770 - .clock = 161600, 1769 + .clock = 162850, 1771 1770 .hdisplay = 1200, 1772 - .hsync_start = 1200 + 40, 1773 - .hsync_end = 1200 + 40 + 20, 1774 - .htotal = 1200 + 40 + 20 + 40, 1771 + .hsync_start = 1200 + 50, 1772 + .hsync_end = 1200 + 50 + 20, 1773 + .htotal = 1200 + 50 + 20 + 50, 1775 1774 .vdisplay = 1920, 1776 1775 .vsync_start = 1920 + 116, 1777 1776 .vsync_end = 1920 + 116 + 8,
+7 -6
drivers/gpu/drm/panel/panel-simple.c
··· 2379 2379 static const struct display_timing innolux_g101ice_l01_timing = { 2380 2380 .pixelclock = { 60400000, 71100000, 74700000 }, 2381 2381 .hactive = { 1280, 1280, 1280 }, 2382 - .hfront_porch = { 41, 80, 100 }, 2383 - .hback_porch = { 40, 79, 99 }, 2384 - .hsync_len = { 1, 1, 1 }, 2382 + .hfront_porch = { 30, 60, 70 }, 2383 + .hback_porch = { 30, 60, 70 }, 2384 + .hsync_len = { 22, 40, 60 }, 2385 2385 .vactive = { 800, 800, 800 }, 2386 - .vfront_porch = { 5, 11, 14 }, 2387 - .vback_porch = { 4, 11, 14 }, 2388 - .vsync_len = { 1, 1, 1 }, 2386 + .vfront_porch = { 3, 8, 14 }, 2387 + .vback_porch = { 3, 8, 14 }, 2388 + .vsync_len = { 4, 7, 12 }, 2389 2389 .flags = DISPLAY_FLAGS_DE_HIGH, 2390 2390 }; 2391 2391 ··· 2402 2402 .disable = 200, 2403 2403 }, 2404 2404 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 2405 + .bus_flags = DRM_BUS_FLAG_DE_HIGH, 2405 2406 .connector_type = DRM_MODE_CONNECTOR_LVDS, 2406 2407 }; 2407 2408
+11 -3
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 247 247 VOP_REG_SET(vop, common, cfg_done, 1); 248 248 } 249 249 250 - static bool has_rb_swapped(uint32_t format) 250 + static bool has_rb_swapped(uint32_t version, uint32_t format) 251 251 { 252 252 switch (format) { 253 253 case DRM_FORMAT_XBGR8888: 254 254 case DRM_FORMAT_ABGR8888: 255 - case DRM_FORMAT_BGR888: 256 255 case DRM_FORMAT_BGR565: 257 256 return true; 257 + /* 258 + * full framework (IP version 3.x) only need rb swapped for RGB888 and 259 + * little framework (IP version 2.x) only need rb swapped for BGR888, 260 + * check for 3.x to also only rb swap BGR888 for unknown vop version 261 + */ 262 + case DRM_FORMAT_RGB888: 263 + return VOP_MAJOR(version) == 3; 264 + case DRM_FORMAT_BGR888: 265 + return VOP_MAJOR(version) != 3; 258 266 default: 259 267 return false; 260 268 } ··· 1038 1030 VOP_WIN_SET(vop, win, dsp_info, dsp_info); 1039 1031 VOP_WIN_SET(vop, win, dsp_st, dsp_st); 1040 1032 1041 - rb_swap = has_rb_swapped(fb->format->format); 1033 + rb_swap = has_rb_swapped(vop->data->version, fb->format->format); 1042 1034 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 1043 1035 1044 1036 /*
+2
drivers/hid/hid-apple.c
··· 345 345 { "AONE" }, 346 346 { "GANSS" }, 347 347 { "Hailuck" }, 348 + { "Jamesdonkey" }, 349 + { "A3R" }, 348 350 }; 349 351 350 352 static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
+23 -4
drivers/hid/hid-asus.c
··· 381 381 return 0; 382 382 } 383 383 384 - static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size) 384 + static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size) 385 385 { 386 386 unsigned char *dmabuf; 387 387 int ret; ··· 404 404 405 405 static int asus_kbd_init(struct hid_device *hdev) 406 406 { 407 - u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, 407 + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, 408 408 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; 409 409 int ret; 410 410 ··· 418 418 static int asus_kbd_get_functions(struct hid_device *hdev, 419 419 unsigned char *kbd_func) 420 420 { 421 - u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }; 421 + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }; 422 422 u8 *readbuf; 423 423 int ret; 424 424 ··· 449 449 450 450 static int rog_nkey_led_init(struct hid_device *hdev) 451 451 { 452 - u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 }; 452 + const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 }; 453 453 u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20, 454 454 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; 455 455 u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1, ··· 1000 1000 return 0; 1001 1001 } 1002 1002 1003 + static int __maybe_unused asus_resume(struct hid_device *hdev) { 1004 + struct asus_drvdata *drvdata = hid_get_drvdata(hdev); 1005 + int ret = 0; 1006 + 1007 + if (drvdata->kbd_backlight) { 1008 + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 1009 + drvdata->kbd_backlight->cdev.brightness }; 1010 + ret = asus_kbd_set_report(hdev, buf, sizeof(buf)); 1011 + if (ret < 0) { 1012 + hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret); 1013 + goto asus_resume_err; 1014 + } 1015 + } 1016 + 1017 + asus_resume_err: 1018 + return ret; 1019 + } 1020 + 1003 1021 static int __maybe_unused asus_reset_resume(struct hid_device *hdev) 1004 1022 { 1005 1023 struct asus_drvdata *drvdata = hid_get_drvdata(hdev); ··· 1312 1294 .input_configured = asus_input_configured, 1313 1295 #ifdef CONFIG_PM 1314 1296 .reset_resume = asus_reset_resume, 1297 + .resume = asus_resume, 1315 1298 #endif 1316 1299 .event = asus_event, 1317 1300 .raw_event = asus_raw_event
+10 -2
drivers/hid/hid-core.c
··· 702 702 * Free a device structure, all reports, and all fields. 703 703 */ 704 704 705 - static void hid_device_release(struct device *dev) 705 + void hiddev_free(struct kref *ref) 706 706 { 707 - struct hid_device *hid = to_hid_device(dev); 707 + struct hid_device *hid = container_of(ref, struct hid_device, ref); 708 708 709 709 hid_close_report(hid); 710 710 kfree(hid->dev_rdesc); 711 711 kfree(hid); 712 + } 713 + 714 + static void hid_device_release(struct device *dev) 715 + { 716 + struct hid_device *hid = to_hid_device(dev); 717 + 718 + kref_put(&hid->ref, hiddev_free); 712 719 } 713 720 714 721 /* ··· 2853 2846 spin_lock_init(&hdev->debug_list_lock); 2854 2847 sema_init(&hdev->driver_input_lock, 1); 2855 2848 mutex_init(&hdev->ll_open_lock); 2849 + kref_init(&hdev->ref); 2856 2850 2857 2851 hid_bpf_device_init(hdev); 2858 2852
+3
drivers/hid/hid-debug.c
··· 1135 1135 goto out; 1136 1136 } 1137 1137 list->hdev = (struct hid_device *) inode->i_private; 1138 + kref_get(&list->hdev->ref); 1138 1139 file->private_data = list; 1139 1140 mutex_init(&list->read_mutex); 1140 1141 ··· 1228 1227 list_del(&list->node); 1229 1228 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); 1230 1229 kfifo_free(&list->hid_debug_fifo); 1230 + 1231 + kref_put(&list->hdev->ref, hiddev_free); 1231 1232 kfree(list); 1232 1233 1233 1234 return 0;
+14 -2
drivers/hid/hid-glorious.c
··· 21 21 * Glorious Model O and O- specify the const flag in the consumer input 22 22 * report descriptor, which leads to inputs being ignored. Fix this 23 23 * by patching the descriptor. 24 + * 25 + * Glorious Model I incorrectly specifes the Usage Minimum for its 26 + * keyboard HID report, causing keycodes to be misinterpreted. 27 + * Fix this by setting Usage Minimum to 0 in that report. 24 28 */ 25 29 static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc, 26 30 unsigned int *rsize) ··· 35 31 hid_info(hdev, "patching Glorious Model O consumer control report descriptor\n"); 36 32 rdesc[85] = rdesc[113] = rdesc[141] = \ 37 33 HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE; 34 + } 35 + if (*rsize == 156 && rdesc[41] == 1) { 36 + hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n"); 37 + rdesc[41] = 0; 38 38 } 39 39 return rdesc; 40 40 } ··· 52 44 model = "Model O"; break; 53 45 case USB_DEVICE_ID_GLORIOUS_MODEL_D: 54 46 model = "Model D"; break; 47 + case USB_DEVICE_ID_GLORIOUS_MODEL_I: 48 + model = "Model I"; break; 55 49 } 56 50 57 51 snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model); ··· 76 66 } 77 67 78 68 static const struct hid_device_id glorious_devices[] = { 79 - { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS, 69 + { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH, 80 70 USB_DEVICE_ID_GLORIOUS_MODEL_O) }, 81 - { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS, 71 + { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH, 82 72 USB_DEVICE_ID_GLORIOUS_MODEL_D) }, 73 + { HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW, 74 + USB_DEVICE_ID_GLORIOUS_MODEL_I) }, 83 75 { } 84 76 }; 85 77 MODULE_DEVICE_TABLE(hid, glorious_devices);
+7 -5
drivers/hid/hid-ids.h
··· 511 511 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a 512 512 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 513 513 514 - #define USB_VENDOR_ID_GLORIOUS 0x258a 515 - #define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033 516 - #define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036 517 - 518 514 #define I2C_VENDOR_ID_GOODIX 0x27c6 519 515 #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 520 516 ··· 741 745 #define USB_VENDOR_ID_LABTEC 0x1020 742 746 #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 743 747 748 + #define USB_VENDOR_ID_LAVIEW 0x22D4 749 + #define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503 750 + 744 751 #define USB_VENDOR_ID_LCPOWER 0x1241 745 752 #define USB_DEVICE_ID_LCPOWER_LC1000 0xf767 746 753 ··· 868 869 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534 869 870 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539 870 871 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f 871 - #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547 872 872 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a 873 873 #define USB_DEVICE_ID_SPACETRAVELLER 0xc623 874 874 #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 ··· 1157 1159 1158 1160 #define USB_VENDOR_ID_SIGMATEL 0x066F 1159 1161 #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780 1162 + 1163 + #define USB_VENDOR_ID_SINOWEALTH 0x258a 1164 + #define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033 1165 + #define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036 1160 1166 1161 1167 #define USB_VENDOR_ID_SIS_TOUCH 0x0457 1162 1168 #define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
+3 -8
drivers/hid/hid-logitech-dj.c
··· 1695 1695 } 1696 1696 /* 1697 1697 * Mouse-only receivers send unnumbered mouse data. The 27 MHz 1698 - * receiver uses 6 byte packets, the nano receiver 8 bytes, 1699 - * the lightspeed receiver (Pro X Superlight) 13 bytes. 1698 + * receiver uses 6 byte packets, the nano receiver 8 bytes. 1700 1699 */ 1701 1700 if (djrcv_dev->unnumbered_application == HID_GD_MOUSE && 1702 - size <= 13){ 1703 - u8 mouse_report[14]; 1701 + size <= 8) { 1702 + u8 mouse_report[9]; 1704 1703 1705 1704 /* Prepend report id */ 1706 1705 mouse_report[0] = REPORT_TYPE_MOUSE; ··· 1982 1983 { /* Logitech lightspeed receiver (0xc53f) */ 1983 1984 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1984 1985 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1), 1985 - .driver_data = recvr_type_gaming_hidpp}, 1986 - { /* Logitech lightspeed receiver (0xc547) */ 1987 - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1988 - USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2), 1989 1986 .driver_data = recvr_type_gaming_hidpp}, 1990 1987 1991 1988 { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
+3 -1
drivers/hid/hid-mcp2221.c
··· 1142 1142 if (ret) 1143 1143 return ret; 1144 1144 1145 + hid_device_io_start(hdev); 1146 + 1145 1147 /* Set I2C bus clock diviser */ 1146 1148 if (i2c_clk_freq > 400) 1147 1149 i2c_clk_freq = 400; ··· 1159 1157 snprintf(mcp->adapter.name, sizeof(mcp->adapter.name), 1160 1158 "MCP2221 usb-i2c bridge"); 1161 1159 1160 + i2c_set_adapdata(&mcp->adapter, mcp); 1162 1161 ret = devm_i2c_add_adapter(&hdev->dev, &mcp->adapter); 1163 1162 if (ret) { 1164 1163 hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret); 1165 1164 return ret; 1166 1165 } 1167 - i2c_set_adapdata(&mcp->adapter, mcp); 1168 1166 1169 1167 #if IS_REACHABLE(CONFIG_GPIOLIB) 1170 1168 /* Setup GPIO chip */
+5
drivers/hid/hid-multitouch.c
··· 2046 2046 MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, 2047 2047 USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) }, 2048 2048 2049 + /* HONOR GLO-GXXX panel */ 2050 + { .driver_data = MT_CLS_VTL, 2051 + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 2052 + 0x347d, 0x7853) }, 2053 + 2049 2054 /* Ilitek dual touch panel */ 2050 2055 { .driver_data = MT_CLS_NSMU, 2051 2056 MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
+1
drivers/hid/hid-quirks.c
··· 33 33 { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS }, 34 34 { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD }, 35 35 { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL }, 36 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL }, 36 37 { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET }, 37 38 { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET }, 38 39 { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
-14
drivers/leds/led-class.c
··· 75 75 } 76 76 static DEVICE_ATTR_RO(max_brightness); 77 77 78 - static ssize_t color_show(struct device *dev, 79 - struct device_attribute *attr, char *buf) 80 - { 81 - const char *color_text = "invalid"; 82 - struct led_classdev *led_cdev = dev_get_drvdata(dev); 83 - 84 - if (led_cdev->color < LED_COLOR_ID_MAX) 85 - color_text = led_colors[led_cdev->color]; 86 - 87 - return sysfs_emit(buf, "%s\n", color_text); 88 - } 89 - static DEVICE_ATTR_RO(color); 90 - 91 78 #ifdef CONFIG_LEDS_TRIGGERS 92 79 static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0); 93 80 static struct bin_attribute *led_trigger_bin_attrs[] = { ··· 89 102 static struct attribute *led_class_attrs[] = { 90 103 &dev_attr_brightness.attr, 91 104 &dev_attr_max_brightness.attr, 92 - &dev_attr_color.attr, 93 105 NULL, 94 106 }; 95 107
+1
drivers/md/bcache/bcache.h
··· 265 265 #define BCACHE_DEV_WB_RUNNING 3 266 266 #define BCACHE_DEV_RATE_DW_RUNNING 4 267 267 int nr_stripes; 268 + #define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT) 268 269 unsigned int stripe_size; 269 270 atomic_t *stripe_sectors_dirty; 270 271 unsigned long *full_dirty_stripes;
+10 -1
drivers/md/bcache/btree.c
··· 1000 1000 * 1001 1001 * The btree node will have either a read or a write lock held, depending on 1002 1002 * level and op->lock. 1003 + * 1004 + * Note: Only error code or btree pointer will be returned, it is unncessary 1005 + * for callers to check NULL pointer. 1003 1006 */ 1004 1007 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 1005 1008 struct bkey *k, int level, bool write, ··· 1114 1111 mutex_unlock(&b->c->bucket_lock); 1115 1112 } 1116 1113 1114 + /* 1115 + * Only error code or btree pointer will be returned, it is unncessary for 1116 + * callers to check NULL pointer. 1117 + */ 1117 1118 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 1118 1119 int level, bool wait, 1119 1120 struct btree *parent) ··· 1375 1368 memset(new_nodes, 0, sizeof(new_nodes)); 1376 1369 closure_init_stack(&cl); 1377 1370 1378 - while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b)) 1371 + while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1379 1372 keys += r[nodes++].keys; 1380 1373 1381 1374 blocks = btree_default_blocks(b->c) * 2 / 3; ··· 1539 1532 return 0; 1540 1533 1541 1534 n = btree_node_alloc_replacement(replace, NULL); 1535 + if (IS_ERR(n)) 1536 + return 0; 1542 1537 1543 1538 /* recheck reserve after allocating replacement node */ 1544 1539 if (btree_check_reserve(b, NULL)) {
+3 -1
drivers/md/bcache/super.c
··· 905 905 906 906 if (!d->stripe_size) 907 907 d->stripe_size = 1 << 31; 908 + else if (d->stripe_size < BCH_MIN_STRIPE_SZ) 909 + d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size); 908 910 909 911 n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 910 912 if (!n || n > max_stripes) { ··· 2018 2016 c->root = bch_btree_node_get(c, NULL, k, 2019 2017 j->btree_level, 2020 2018 true, NULL); 2021 - if (IS_ERR_OR_NULL(c->root)) 2019 + if (IS_ERR(c->root)) 2022 2020 goto err; 2023 2021 2024 2022 list_del_init(&c->root->list);
+1 -1
drivers/md/bcache/sysfs.c
··· 1104 1104 sum += INITIAL_PRIO - cached[i]; 1105 1105 1106 1106 if (n) 1107 - do_div(sum, n); 1107 + sum = div64_u64(sum, n); 1108 1108 1109 1109 for (i = 0; i < ARRAY_SIZE(q); i++) 1110 1110 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
+18 -6
drivers/md/bcache/writeback.c
··· 913 913 int cur_idx, prev_idx, skip_nr; 914 914 915 915 k = p = NULL; 916 - cur_idx = prev_idx = 0; 916 + prev_idx = 0; 917 917 918 918 bch_btree_iter_init(&c->root->keys, &iter, NULL); 919 919 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); ··· 977 977 void bch_sectors_dirty_init(struct bcache_device *d) 978 978 { 979 979 int i; 980 + struct btree *b = NULL; 980 981 struct bkey *k = NULL; 981 982 struct btree_iter iter; 982 983 struct sectors_dirty_init op; 983 984 struct cache_set *c = d->c; 984 985 struct bch_dirty_init_state state; 985 986 987 + retry_lock: 988 + b = c->root; 989 + rw_lock(0, b, b->level); 990 + if (b != c->root) { 991 + rw_unlock(0, b); 992 + goto retry_lock; 993 + } 994 + 986 995 /* Just count root keys if no leaf node */ 987 - rw_lock(0, c->root, c->root->level); 988 996 if (c->root->level == 0) { 989 997 bch_btree_op_init(&op.op, -1); 990 998 op.inode = d->id; 991 999 op.count = 0; 992 1000 993 1001 for_each_key_filter(&c->root->keys, 994 - k, &iter, bch_ptr_invalid) 1002 + k, &iter, bch_ptr_invalid) { 1003 + if (KEY_INODE(k) != op.inode) 1004 + continue; 995 1005 sectors_dirty_init_fn(&op.op, c->root, k); 1006 + } 996 1007 997 - rw_unlock(0, c->root); 1008 + rw_unlock(0, b); 998 1009 return; 999 1010 } 1000 1011 ··· 1025 1014 if (atomic_read(&state.enough)) 1026 1015 break; 1027 1016 1017 + atomic_inc(&state.started); 1028 1018 state.infos[i].state = &state; 1029 1019 state.infos[i].thread = 1030 1020 kthread_run(bch_dirty_init_thread, &state.infos[i], 1031 1021 "bch_dirtcnt[%d]", i); 1032 1022 if (IS_ERR(state.infos[i].thread)) { 1033 1023 pr_err("fails to run thread bch_dirty_init[%d]\n", i); 1024 + atomic_dec(&state.started); 1034 1025 for (--i; i >= 0; i--) 1035 1026 kthread_stop(state.infos[i].thread); 1036 1027 goto out; 1037 1028 } 1038 - atomic_inc(&state.started); 1039 1029 } 1040 1030 1041 1031 out: 1042 1032 /* Must wait for all threads to stop. */ 1043 1033 wait_event(state.wait, atomic_read(&state.started) == 0); 1044 - rw_unlock(0, c->root); 1034 + rw_unlock(0, b); 1045 1035 } 1046 1036 1047 1037 void bch_cached_dev_writeback_init(struct cached_dev *dc)
+2 -1
drivers/md/md.c
··· 8666 8666 struct bio *orig_bio = md_io_clone->orig_bio; 8667 8667 struct mddev *mddev = md_io_clone->mddev; 8668 8668 8669 - orig_bio->bi_status = bio->bi_status; 8669 + if (bio->bi_status && !orig_bio->bi_status) 8670 + orig_bio->bi_status = bio->bi_status; 8670 8671 8671 8672 if (md_io_clone->start_time) 8672 8673 bio_end_io_acct(orig_bio, md_io_clone->start_time);
+1
drivers/media/pci/mgb4/Kconfig
··· 2 2 config VIDEO_MGB4 3 3 tristate "Digiteq Automotive MGB4 support" 4 4 depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO 5 + depends on COMMON_CLK 5 6 select VIDEOBUF2_DMA_SG 6 7 select IIO_BUFFER 7 8 select IIO_TRIGGERED_BUFFER
+15 -5
drivers/media/pci/mgb4/mgb4_core.c
··· 42 42 43 43 #define MGB4_USER_IRQS 16 44 44 45 + #define DIGITEQ_VID 0x1ed8 46 + #define T100_DID 0x0101 47 + #define T200_DID 0x0201 48 + 45 49 ATTRIBUTE_GROUPS(mgb4_pci); 46 50 47 51 static int flashid; ··· 155 151 return dev ? container_of(dev, struct spi_master, dev) : NULL; 156 152 } 157 153 158 - static int init_spi(struct mgb4_dev *mgbdev) 154 + static int init_spi(struct mgb4_dev *mgbdev, u32 devid) 159 155 { 160 156 struct resource spi_resources[] = { 161 157 { ··· 217 213 snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name), 218 214 "mgb4-fw.%d", flashid); 219 215 mgbdev->partitions[0].name = mgbdev->fw_part_name; 220 - mgbdev->partitions[0].size = 0x400000; 221 - mgbdev->partitions[0].offset = 0x400000; 216 + if (devid == T200_DID) { 217 + mgbdev->partitions[0].size = 0x950000; 218 + mgbdev->partitions[0].offset = 0x1000000; 219 + } else { 220 + mgbdev->partitions[0].size = 0x400000; 221 + mgbdev->partitions[0].offset = 0x400000; 222 + } 222 223 mgbdev->partitions[0].mask_flags = 0; 223 224 224 225 snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name), ··· 560 551 goto err_video_regs; 561 552 562 553 /* SPI FLASH */ 563 - rv = init_spi(mgbdev); 554 + rv = init_spi(mgbdev, id->device); 564 555 if (rv < 0) 565 556 goto err_cmt_regs; 566 557 ··· 675 666 } 676 667 677 668 static const struct pci_device_id mgb4_pci_ids[] = { 678 - { PCI_DEVICE(0x1ed8, 0x0101), }, 669 + { PCI_DEVICE(DIGITEQ_VID, T100_DID), }, 670 + { PCI_DEVICE(DIGITEQ_VID, T200_DID), }, 679 671 { 0, } 680 672 }; 681 673 MODULE_DEVICE_TABLE(pci, mgb4_pci_ids);
+1 -1
drivers/media/platform/renesas/vsp1/vsp1_pipe.c
··· 373 373 (7 << VI6_DPR_SMPPT_TGW_SHIFT) | 374 374 (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); 375 375 376 - v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0); 376 + vsp1_wpf_stop(pipe->output); 377 377 378 378 return ret; 379 379 }
+1 -9
drivers/media/platform/renesas/vsp1/vsp1_rpf.c
··· 44 44 } 45 45 46 46 /* ----------------------------------------------------------------------------- 47 - * V4L2 Subdevice Operations 48 - */ 49 - 50 - static const struct v4l2_subdev_ops rpf_ops = { 51 - .pad = &vsp1_rwpf_pad_ops, 52 - }; 53 - 54 - /* ----------------------------------------------------------------------------- 55 47 * VSP1 Entity Operations 56 48 */ 57 49 ··· 403 411 rpf->entity.index = index; 404 412 405 413 sprintf(name, "rpf.%u", index); 406 - ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops, 414 + ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &vsp1_rwpf_subdev_ops, 407 415 MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER); 408 416 if (ret < 0) 409 417 return ERR_PTR(ret);
+6 -2
drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
··· 24 24 } 25 25 26 26 /* ----------------------------------------------------------------------------- 27 - * V4L2 Subdevice Pad Operations 27 + * V4L2 Subdevice Operations 28 28 */ 29 29 30 30 static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev, ··· 243 243 return ret; 244 244 } 245 245 246 - const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = { 246 + static const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = { 247 247 .init_cfg = vsp1_entity_init_cfg, 248 248 .enum_mbus_code = vsp1_rwpf_enum_mbus_code, 249 249 .enum_frame_size = vsp1_rwpf_enum_frame_size, ··· 251 251 .set_fmt = vsp1_rwpf_set_format, 252 252 .get_selection = vsp1_rwpf_get_selection, 253 253 .set_selection = vsp1_rwpf_set_selection, 254 + }; 255 + 256 + const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops = { 257 + .pad = &vsp1_rwpf_pad_ops, 254 258 }; 255 259 256 260 /* -----------------------------------------------------------------------------
+3 -1
drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
··· 79 79 struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index); 80 80 struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index); 81 81 82 + void vsp1_wpf_stop(struct vsp1_rwpf *wpf); 83 + 82 84 int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols); 83 85 84 - extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops; 86 + extern const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops; 85 87 86 88 struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, 87 89 struct v4l2_subdev_state *sd_state);
+3 -26
drivers/media/platform/renesas/vsp1/vsp1_wpf.c
··· 186 186 } 187 187 188 188 /* ----------------------------------------------------------------------------- 189 - * V4L2 Subdevice Core Operations 189 + * VSP1 Entity Operations 190 190 */ 191 191 192 - static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) 192 + void vsp1_wpf_stop(struct vsp1_rwpf *wpf) 193 193 { 194 - struct vsp1_rwpf *wpf = to_rwpf(subdev); 195 194 struct vsp1_device *vsp1 = wpf->entity.vsp1; 196 - 197 - if (enable) 198 - return 0; 199 195 200 196 /* 201 197 * Write to registers directly when stopping the stream as there will be ··· 200 204 vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0); 201 205 vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET + 202 206 VI6_WPF_SRCRPF, 0); 203 - 204 - return 0; 205 207 } 206 - 207 - /* ----------------------------------------------------------------------------- 208 - * V4L2 Subdevice Operations 209 - */ 210 - 211 - static const struct v4l2_subdev_video_ops wpf_video_ops = { 212 - .s_stream = wpf_s_stream, 213 - }; 214 - 215 - static const struct v4l2_subdev_ops wpf_ops = { 216 - .video = &wpf_video_ops, 217 - .pad = &vsp1_rwpf_pad_ops, 218 - }; 219 - 220 - /* ----------------------------------------------------------------------------- 221 - * VSP1 Entity Operations 222 - */ 223 208 224 209 static void vsp1_wpf_destroy(struct vsp1_entity *entity) 225 210 { ··· 560 583 wpf->entity.index = index; 561 584 562 585 sprintf(name, "wpf.%u", index); 563 - ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops, 586 + ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &vsp1_rwpf_subdev_ops, 564 587 MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER); 565 588 if (ret < 0) 566 589 return ERR_PTR(ret);
+2
drivers/mmc/core/block.c
··· 1482 1482 blk_mq_requeue_request(req, true); 1483 1483 else 1484 1484 __blk_mq_end_request(req, BLK_STS_OK); 1485 + } else if (mq->in_recovery) { 1486 + blk_mq_requeue_request(req, true); 1485 1487 } else { 1486 1488 blk_mq_end_request(req, BLK_STS_OK); 1487 1489 }
+7 -2
drivers/mmc/core/core.c
··· 551 551 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 552 552 cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ 553 553 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT; 554 - mmc_wait_for_cmd(host, &cmd, 0); 554 + mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 555 + 556 + mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO); 555 557 556 558 memset(&cmd, 0, sizeof(cmd)); 557 559 cmd.opcode = MMC_CMDQ_TASK_MGMT; ··· 561 559 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 562 560 cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ 563 561 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT; 564 - err = mmc_wait_for_cmd(host, &cmd, 0); 562 + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 565 563 566 564 host->cqe_ops->cqe_recovery_finish(host); 565 + 566 + if (err) 567 + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 567 568 568 569 mmc_retune_release(host); 569 570
+22 -22
drivers/mmc/host/cqhci-core.c
··· 942 942 ret = cqhci_tasks_cleared(cq_host); 943 943 944 944 if (!ret) 945 - pr_debug("%s: cqhci: Failed to clear tasks\n", 946 - mmc_hostname(mmc)); 945 + pr_warn("%s: cqhci: Failed to clear tasks\n", 946 + mmc_hostname(mmc)); 947 947 948 948 return ret; 949 949 } ··· 976 976 ret = cqhci_halted(cq_host); 977 977 978 978 if (!ret) 979 - pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); 979 + pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); 980 980 981 981 return ret; 982 982 } ··· 984 984 /* 985 985 * After halting we expect to be able to use the command line. We interpret the 986 986 * failure to halt to mean the data lines might still be in use (and the upper 987 - * layers will need to send a STOP command), so we set the timeout based on a 988 - * generous command timeout. 987 + * layers will need to send a STOP command), however failing to halt complicates 988 + * the recovery, so set a timeout that would reasonably allow I/O to complete. 989 989 */ 990 - #define CQHCI_START_HALT_TIMEOUT 5 990 + #define CQHCI_START_HALT_TIMEOUT 500 991 991 992 992 static void cqhci_recovery_start(struct mmc_host *mmc) 993 993 { ··· 1075 1075 1076 1076 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1077 1077 1078 - if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1079 - ok = false; 1080 - 1081 1078 /* 1082 1079 * The specification contradicts itself, by saying that tasks cannot be 1083 1080 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should 1084 1081 * be disabled/re-enabled, but not to disable before clearing tasks. 1085 1082 * Have a go anyway. 1086 1083 */ 1087 - if (!ok) { 1088 - pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); 1089 - cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1090 - cqcfg &= ~CQHCI_ENABLE; 1091 - cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1092 - cqcfg |= CQHCI_ENABLE; 1093 - cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1094 - /* Be sure that there are no tasks */ 1095 - ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1096 - if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1097 - ok = false; 1098 - WARN_ON(!ok); 1099 - } 1084 + if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) 1085 + ok = false; 1086 + 1087 + /* Disable to make sure tasks really are cleared */ 1088 + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1089 + cqcfg &= ~CQHCI_ENABLE; 1090 + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1091 + 1092 + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); 1093 + cqcfg |= CQHCI_ENABLE; 1094 + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); 1095 + 1096 + cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); 1097 + 1098 + if (!ok) 1099 + cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT); 1100 1100 1101 1101 cqhci_recover_mrqs(cq_host); 1102 1102
+29 -25
drivers/mmc/host/sdhci-pci-gli.c
··· 1189 1189 sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG); 1190 1190 } 1191 1191 1192 + static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, 1193 + bool enable) 1194 + { 1195 + struct pci_dev *pdev = slot->chip->pdev; 1196 + u32 value; 1197 + 1198 + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); 1199 + value &= ~GLI_9763E_VHS_REV; 1200 + value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W); 1201 + pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value); 1202 + 1203 + pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value); 1204 + 1205 + if (enable) 1206 + value &= ~GLI_9763E_CFG_LPSN_DIS; 1207 + else 1208 + value |= GLI_9763E_CFG_LPSN_DIS; 1209 + 1210 + pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value); 1211 + 1212 + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); 1213 + value &= ~GLI_9763E_VHS_REV; 1214 + value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R); 1215 + pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value); 1216 + } 1217 + 1192 1218 static void sdhci_set_gl9763e_signaling(struct sdhci_host *host, 1193 1219 unsigned int timing) 1194 1220 { ··· 1323 1297 if (ret) 1324 1298 goto cleanup; 1325 1299 1300 + /* Disable LPM negotiation to avoid entering L1 state. */ 1301 + gl9763e_set_low_power_negotiation(slot, false); 1302 + 1326 1303 return 0; 1327 1304 1328 1305 cleanup: ··· 1369 1340 } 1370 1341 1371 1342 #ifdef CONFIG_PM 1372 - static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable) 1373 - { 1374 - struct pci_dev *pdev = slot->chip->pdev; 1375 - u32 value; 1376 - 1377 - pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); 1378 - value &= ~GLI_9763E_VHS_REV; 1379 - value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W); 1380 - pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value); 1381 - 1382 - pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value); 1383 - 1384 - if (enable) 1385 - value &= ~GLI_9763E_CFG_LPSN_DIS; 1386 - else 1387 - value |= GLI_9763E_CFG_LPSN_DIS; 1388 - 1389 - pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value); 1390 - 1391 - pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); 1392 - value &= ~GLI_9763E_VHS_REV; 1393 - value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R); 1394 - pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value); 1395 - } 1396 - 1397 1343 static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip) 1398 1344 { 1399 1345 struct sdhci_pci_slot *slot = chip->slots[0];
+25
drivers/mmc/host/sdhci-sprd.c
··· 416 416 mmc_request_done(host->mmc, mrq); 417 417 } 418 418 419 + static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode, 420 + unsigned short vdd) 421 + { 422 + struct mmc_host *mmc = host->mmc; 423 + 424 + switch (mode) { 425 + case MMC_POWER_OFF: 426 + mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0); 427 + 428 + mmc_regulator_disable_vqmmc(mmc); 429 + break; 430 + case MMC_POWER_ON: 431 + mmc_regulator_enable_vqmmc(mmc); 432 + break; 433 + case MMC_POWER_UP: 434 + mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd); 435 + break; 436 + } 437 + } 438 + 419 439 static struct sdhci_ops sdhci_sprd_ops = { 420 440 .read_l = sdhci_sprd_readl, 421 441 .write_l = sdhci_sprd_writel, 422 442 .write_w = sdhci_sprd_writew, 423 443 .write_b = sdhci_sprd_writeb, 424 444 .set_clock = sdhci_sprd_set_clock, 445 + .set_power = sdhci_sprd_set_power, 425 446 .get_max_clock = sdhci_sprd_get_max_clock, 426 447 .get_min_clock = sdhci_sprd_get_min_clock, 427 448 .set_bus_width = sdhci_set_bus_width, ··· 843 822 sdhci_read_caps(host); 844 823 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | 845 824 SDHCI_SUPPORT_DDR50); 825 + 826 + ret = mmc_regulator_get_supply(host->mmc); 827 + if (ret) 828 + goto pm_runtime_disable; 846 829 847 830 ret = sdhci_setup_host(host); 848 831 if (ret)
+20 -6
drivers/net/dsa/mv88e6xxx/chip.c
··· 577 577 config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100; 578 578 } 579 579 580 + static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, 581 + struct phylink_config *config) 582 + { 583 + unsigned long *supported = config->supported_interfaces; 584 + 585 + /* Translate the default cmode */ 586 + mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported); 587 + 588 + config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | 589 + MAC_1000FD; 590 + } 591 + 580 592 static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip) 581 593 { 582 594 u16 reg, val; ··· 3892 3880 struct mv88e6xxx_chip *chip = ds->priv; 3893 3881 int err; 3894 3882 3895 - if (chip->info->ops->pcs_ops->pcs_init) { 3883 + if (chip->info->ops->pcs_ops && 3884 + chip->info->ops->pcs_ops->pcs_init) { 3896 3885 err = chip->info->ops->pcs_ops->pcs_init(chip, port); 3897 3886 if (err) 3898 3887 return err; ··· 3908 3895 3909 3896 mv88e6xxx_teardown_devlink_regions_port(ds, port); 3910 3897 3911 - if (chip->info->ops->pcs_ops->pcs_teardown) 3898 + if (chip->info->ops->pcs_ops && 3899 + chip->info->ops->pcs_ops->pcs_teardown) 3912 3900 chip->info->ops->pcs_ops->pcs_teardown(chip, port); 3913 3901 } 3914 3902 ··· 4354 4340 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 4355 4341 .stu_getnext = mv88e6352_g1_stu_getnext, 4356 4342 .stu_loadpurge = mv88e6352_g1_stu_loadpurge, 4357 - .phylink_get_caps = mv88e6185_phylink_get_caps, 4343 + .phylink_get_caps = mv88e6351_phylink_get_caps, 4358 4344 }; 4359 4345 4360 4346 static const struct mv88e6xxx_ops mv88e6172_ops = { ··· 4454 4440 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 4455 4441 .stu_getnext = mv88e6352_g1_stu_getnext, 4456 4442 .stu_loadpurge = mv88e6352_g1_stu_loadpurge, 4457 - .phylink_get_caps = mv88e6185_phylink_get_caps, 4443 + .phylink_get_caps = mv88e6351_phylink_get_caps, 4458 4444 }; 4459 4445 4460 4446 static const struct mv88e6xxx_ops mv88e6176_ops = { ··· 5083 5069 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 5084 5070 .stu_getnext = mv88e6352_g1_stu_getnext, 5085 5071 .stu_loadpurge = mv88e6352_g1_stu_loadpurge, 5086 - .phylink_get_caps = mv88e6185_phylink_get_caps, 5072 + .phylink_get_caps = mv88e6351_phylink_get_caps, 5087 5073 }; 5088 5074 5089 5075 static const struct mv88e6xxx_ops mv88e6351_ops = { ··· 5131 5117 .stu_loadpurge = mv88e6352_g1_stu_loadpurge, 5132 5118 .avb_ops = &mv88e6352_avb_ops, 5133 5119 .ptp_ops = &mv88e6352_ptp_ops, 5134 - .phylink_get_caps = mv88e6185_phylink_get_caps, 5120 + .phylink_get_caps = mv88e6351_phylink_get_caps, 5135 5121 }; 5136 5122 5137 5123 static const struct mv88e6xxx_ops mv88e6352_ops = {
+10 -6
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
··· 516 516 517 517 memcpy(skb->data, fd_vaddr + fd_offset, fd_length); 518 518 519 - dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); 520 - 521 519 return skb; 522 520 } 523 521 ··· 587 589 struct rtnl_link_stats64 *percpu_stats; 588 590 struct dpaa2_eth_drv_stats *percpu_extras; 589 591 struct device *dev = priv->net_dev->dev.parent; 592 + bool recycle_rx_buf = false; 590 593 void *buf_data; 591 594 u32 xdp_act; 592 595 ··· 617 618 dma_unmap_page(dev, addr, priv->rx_buf_size, 618 619 DMA_BIDIRECTIONAL); 619 620 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); 621 + } else { 622 + recycle_rx_buf = true; 620 623 } 621 624 } else if (fd_format == dpaa2_fd_sg) { 622 625 WARN_ON(priv->xdp_prog); ··· 638 637 goto err_build_skb; 639 638 640 639 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); 640 + 641 + if (recycle_rx_buf) 642 + dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); 641 643 return; 642 644 643 645 err_build_skb: ··· 1077 1073 dma_addr_t addr; 1078 1074 1079 1075 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); 1080 - 1081 - /* If there's enough room to align the FD address, do it. 1082 - * It will help hardware optimize accesses. 1083 - */ 1084 1076 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 1085 1077 DPAA2_ETH_TX_BUF_ALIGN); 1086 1078 if (aligned_start >= skb->head) 1087 1079 buffer_start = aligned_start; 1080 + else 1081 + return -ENOMEM; 1088 1082 1089 1083 /* Store a backpointer to the skb at the beginning of the buffer 1090 1084 * (in the private data area) such that we can release it ··· 4968 4966 err = dpaa2_eth_dl_port_add(priv); 4969 4967 if (err) 4970 4968 goto err_dl_port_add; 4969 + 4970 + net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; 4971 4971 4972 4972 err = register_netdev(net_dev); 4973 4973 if (err < 0) {
+1 -1
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
··· 740 740 741 741 static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb) 742 742 { 743 - unsigned int headroom = DPAA2_ETH_SWA_SIZE; 743 + unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; 744 744 745 745 /* If we don't have an skb (e.g. XDP buffer), we only need space for 746 746 * the software annotation area
+72 -50
drivers/net/ethernet/intel/ice/ice_lag.c
··· 570 570 } 571 571 572 572 /** 573 + * ice_lag_build_netdev_list - populate the lag struct's netdev list 574 + * @lag: local lag struct 575 + * @ndlist: pointer to netdev list to populate 576 + */ 577 + static void ice_lag_build_netdev_list(struct ice_lag *lag, 578 + struct ice_lag_netdev_list *ndlist) 579 + { 580 + struct ice_lag_netdev_list *nl; 581 + struct net_device *tmp_nd; 582 + 583 + INIT_LIST_HEAD(&ndlist->node); 584 + rcu_read_lock(); 585 + for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { 586 + nl = kzalloc(sizeof(*nl), GFP_ATOMIC); 587 + if (!nl) 588 + break; 589 + 590 + nl->netdev = tmp_nd; 591 + list_add(&nl->node, &ndlist->node); 592 + } 593 + rcu_read_unlock(); 594 + lag->netdev_head = &ndlist->node; 595 + } 596 + 597 + /** 598 + * ice_lag_destroy_netdev_list - free lag struct's netdev list 599 + * @lag: pointer to local lag struct 600 + * @ndlist: pointer to lag struct netdev list 601 + */ 602 + static void ice_lag_destroy_netdev_list(struct ice_lag *lag, 603 + struct ice_lag_netdev_list *ndlist) 604 + { 605 + struct ice_lag_netdev_list *entry, *n; 606 + 607 + rcu_read_lock(); 608 + list_for_each_entry_safe(entry, n, &ndlist->node, node) { 609 + list_del(&entry->node); 610 + kfree(entry); 611 + } 612 + rcu_read_unlock(); 613 + lag->netdev_head = NULL; 614 + } 615 + 616 + /** 573 617 * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF 574 618 * @lag: primary interface LAG struct 575 619 * @oldport: lport of previous interface ··· 641 597 void ice_lag_move_new_vf_nodes(struct ice_vf *vf) 642 598 { 643 599 struct ice_lag_netdev_list ndlist; 644 - struct list_head *tmp, *n; 645 600 u8 pri_port, act_port; 646 601 struct ice_lag *lag; 647 602 struct ice_vsi *vsi; ··· 664 621 pri_port = pf->hw.port_info->lport; 665 622 act_port = lag->active_port; 666 623 667 - if (lag->upper_netdev) { 668 - struct ice_lag_netdev_list *nl; 669 - struct net_device *tmp_nd; 670 - 671 - INIT_LIST_HEAD(&ndlist.node); 672 - rcu_read_lock(); 673 - for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { 674 - nl = kzalloc(sizeof(*nl), GFP_ATOMIC); 675 - if (!nl) 676 - break; 677 - 678 - nl->netdev = tmp_nd; 679 - list_add(&nl->node, &ndlist.node); 680 - } 681 - rcu_read_unlock(); 682 - } 683 - 684 - lag->netdev_head = &ndlist.node; 624 + if (lag->upper_netdev) 625 + ice_lag_build_netdev_list(lag, &ndlist); 685 626 686 627 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) && 687 628 lag->bonded && lag->primary && pri_port != act_port && 688 629 !list_empty(lag->netdev_head)) 689 630 ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx); 690 631 691 - list_for_each_safe(tmp, n, &ndlist.node) { 692 - struct ice_lag_netdev_list *entry; 693 - 694 - entry = list_entry(tmp, struct ice_lag_netdev_list, node); 695 - list_del(&entry->node); 696 - kfree(entry); 697 - } 698 - lag->netdev_head = NULL; 632 + ice_lag_destroy_netdev_list(lag, &ndlist); 699 633 700 634 new_vf_unlock: 701 635 mutex_unlock(&pf->lag_mutex); ··· 697 677 if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || 698 678 pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) 699 679 ice_lag_move_single_vf_nodes(lag, oldport, newport, i); 680 + } 681 + 682 + /** 683 + * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context 684 + * @lag: local lag struct 685 + * @src_prt: lport value for source port 686 + * @dst_prt: lport value for destination port 687 + * 688 + * This function is used to move nodes during an out-of-netdev-event situation, 689 + * primarily when the driver needs to reconfigure or recreate resources. 690 + * 691 + * Must be called while holding the lag_mutex to avoid lag events from 692 + * processing while out-of-sync moves are happening. Also, paired moves, 693 + * such as used in a reset flow, should both be called under the same mutex 694 + * lock to avoid changes between start of reset and end of reset. 695 + */ 696 + void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt) 697 + { 698 + struct ice_lag_netdev_list ndlist; 699 + 700 + ice_lag_build_netdev_list(lag, &ndlist); 701 + ice_lag_move_vf_nodes(lag, src_prt, dst_prt); 702 + ice_lag_destroy_netdev_list(lag, &ndlist); 700 703 } 701 704 702 705 #define ICE_LAG_SRIOV_CP_RECIPE 10 ··· 2094 2051 { 2095 2052 struct ice_lag_netdev_list ndlist; 2096 2053 struct ice_lag *lag, *prim_lag; 2097 - struct list_head *tmp, *n; 2098 2054 u8 act_port, loc_port; 2099 2055 2100 2056 if (!pf->lag || !pf->lag->bonded) ··· 2105 2063 if (lag->primary) { 2106 2064 prim_lag = lag; 2107 2065 } else { 2108 - struct ice_lag_netdev_list *nl; 2109 - struct net_device *tmp_nd; 2110 - 2111 - INIT_LIST_HEAD(&ndlist.node); 2112 - rcu_read_lock(); 2113 - for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { 2114 - nl = kzalloc(sizeof(*nl), GFP_ATOMIC); 2115 - if (!nl) 2116 - break; 2117 - 2118 - nl->netdev = tmp_nd; 2119 - list_add(&nl->node, &ndlist.node); 2120 - } 2121 - rcu_read_unlock(); 2122 - lag->netdev_head = &ndlist.node; 2066 + ice_lag_build_netdev_list(lag, &ndlist); 2123 2067 prim_lag = ice_lag_find_primary(lag); 2124 2068 } 2125 2069 ··· 2135 2107 2136 2108 ice_clear_rdma_cap(pf); 2137 2109 lag_rebuild_out: 2138 - list_for_each_safe(tmp, n, &ndlist.node) { 2139 - struct ice_lag_netdev_list *entry; 2140 - 2141 - entry = list_entry(tmp, struct ice_lag_netdev_list, node); 2142 - list_del(&entry->node); 2143 - kfree(entry); 2144 - } 2110 + ice_lag_destroy_netdev_list(lag, &ndlist); 2145 2111 mutex_unlock(&pf->lag_mutex); 2146 2112 } 2147 2113
+1
drivers/net/ethernet/intel/ice/ice_lag.h
··· 65 65 void ice_deinit_lag(struct ice_pf *pf); 66 66 void ice_lag_rebuild(struct ice_pf *pf); 67 67 bool ice_lag_is_switchdev_running(struct ice_pf *pf); 68 + void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt); 68 69 #endif /* _ICE_LAG_H_ */
+20
drivers/net/ethernet/intel/ice/ice_vf_lib.c
··· 828 828 int ice_reset_vf(struct ice_vf *vf, u32 flags) 829 829 { 830 830 struct ice_pf *pf = vf->pf; 831 + struct ice_lag *lag; 831 832 struct ice_vsi *vsi; 833 + u8 act_prt, pri_prt; 832 834 struct device *dev; 833 835 int err = 0; 834 836 bool rsd; 835 837 836 838 dev = ice_pf_to_dev(pf); 839 + act_prt = ICE_LAG_INVALID_PORT; 840 + pri_prt = pf->hw.port_info->lport; 837 841 838 842 if (flags & ICE_VF_RESET_NOTIFY) 839 843 ice_notify_vf_reset(vf); ··· 846 842 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 847 843 vf->vf_id); 848 844 return 0; 845 + } 846 + 847 + lag = pf->lag; 848 + mutex_lock(&pf->lag_mutex); 849 + if (lag && lag->bonded && lag->primary) { 850 + act_prt = lag->active_port; 851 + if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && 852 + lag->upper_netdev) 853 + ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); 854 + else 855 + act_prt = ICE_LAG_INVALID_PORT; 849 856 } 850 857 851 858 if (flags & ICE_VF_RESET_LOCK) ··· 950 935 out_unlock: 951 936 if (flags & ICE_VF_RESET_LOCK) 952 937 mutex_unlock(&vf->cfg_lock); 938 + 939 + if (lag && lag->bonded && lag->primary && 940 + act_prt != ICE_LAG_INVALID_PORT) 941 + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); 942 + mutex_unlock(&pf->lag_mutex); 953 943 954 944 return err; 955 945 }
+25
drivers/net/ethernet/intel/ice/ice_virtchnl.c
··· 1603 1603 (struct virtchnl_vsi_queue_config_info *)msg; 1604 1604 struct virtchnl_queue_pair_info *qpi; 1605 1605 struct ice_pf *pf = vf->pf; 1606 + struct ice_lag *lag; 1606 1607 struct ice_vsi *vsi; 1608 + u8 act_prt, pri_prt; 1607 1609 int i = -1, q_idx; 1610 + 1611 + lag = pf->lag; 1612 + mutex_lock(&pf->lag_mutex); 1613 + act_prt = ICE_LAG_INVALID_PORT; 1614 + pri_prt = pf->hw.port_info->lport; 1615 + if (lag && lag->bonded && lag->primary) { 1616 + act_prt = lag->active_port; 1617 + if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && 1618 + lag->upper_netdev) 1619 + ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); 1620 + else 1621 + act_prt = ICE_LAG_INVALID_PORT; 1622 + } 1608 1623 1609 1624 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1610 1625 goto error_param; ··· 1744 1729 } 1745 1730 } 1746 1731 1732 + if (lag && lag->bonded && lag->primary && 1733 + act_prt != ICE_LAG_INVALID_PORT) 1734 + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); 1735 + mutex_unlock(&pf->lag_mutex); 1736 + 1747 1737 /* send the response to the VF */ 1748 1738 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1749 1739 VIRTCHNL_STATUS_SUCCESS, NULL, 0); ··· 1762 1742 dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n", 1763 1743 vf->vf_id, i); 1764 1744 } 1745 + 1746 + if (lag && lag->bonded && lag->primary && 1747 + act_prt != ICE_LAG_INVALID_PORT) 1748 + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); 1749 + mutex_unlock(&pf->lag_mutex); 1765 1750 1766 1751 ice_lag_move_new_vf_nodes(vf); 1767 1752
+2 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 5505 5505 5506 5506 ipolicer = &nix_hw->ipolicer[layer]; 5507 5507 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5508 + if (idx == MAX_BANDPROF_PER_PFFUNC) 5509 + break; 5508 5510 prof_idx = req->prof_idx[layer][idx]; 5509 5511 if (prof_idx >= ipolicer->band_prof.max || 5510 5512 ipolicer->pfvf_map[prof_idx] != pcifunc) ··· 5520 5518 ipolicer->pfvf_map[prof_idx] = 0x00; 5521 5519 ipolicer->match_id[prof_idx] = 0; 5522 5520 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5523 - if (idx == MAX_BANDPROF_PER_PFFUNC) 5524 - break; 5525 5521 } 5526 5522 } 5527 5523 mutex_unlock(&rvu->rsrc_lock);
+3
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
··· 450 450 aq->prof.pebs_mantissa = 0; 451 451 aq->prof_mask.pebs_mantissa = 0xFF; 452 452 453 + aq->prof.hl_en = 0; 454 + aq->prof_mask.hl_en = 1; 455 + 453 456 /* Fill AQ info */ 454 457 aq->qidx = profile; 455 458 aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 1070 1070 void otx2_shutdown_tc(struct otx2_nic *nic); 1071 1071 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1072 1072 void *type_data); 1073 + void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); 1074 + 1073 1075 /* CGX/RPM DMAC filters support */ 1074 1076 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1075 1077 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+7 -2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 566 566 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); 567 567 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, 568 568 TYPE_PFVF); 569 - vfs -= 64; 569 + if (intr) 570 + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); 571 + vfs = 64; 570 572 } 571 573 572 574 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); ··· 576 574 577 575 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); 578 576 579 - trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); 577 + if (intr) 578 + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); 580 579 581 580 return IRQ_HANDLED; 582 581 } ··· 1872 1869 /* Install DMAC Filters */ 1873 1870 if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) 1874 1871 otx2_dmacflt_reinstall_flows(pf); 1872 + 1873 + otx2_tc_apply_ingress_police_rules(pf); 1875 1874 1876 1875 err = otx2_rxtx_enable(pf, true); 1877 1876 /* If a mbox communication error happens at this point then interface
+98 -28
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
··· 47 47 bool is_act_police; 48 48 u32 prio; 49 49 struct npc_install_flow_req req; 50 + u64 rate; 51 + u32 burst; 52 + bool is_pps; 50 53 }; 51 54 52 55 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, ··· 287 284 return err; 288 285 } 289 286 287 + static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 288 + struct otx2_tc_flow *node) 289 + { 290 + int rc; 291 + 292 + mutex_lock(&nic->mbox.lock); 293 + 294 + rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 295 + if (rc) { 296 + mutex_unlock(&nic->mbox.lock); 297 + return rc; 298 + } 299 + 300 + rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 301 + node->burst, node->rate, node->is_pps); 302 + if (rc) 303 + goto free_leaf; 304 + 305 + rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 306 + if (rc) 307 + goto free_leaf; 308 + 309 + mutex_unlock(&nic->mbox.lock); 310 + 311 + return 0; 312 + 313 + free_leaf: 314 + if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 315 + netdev_err(nic->netdev, 316 + "Unable to free leaf bandwidth profile(%d)\n", 317 + node->leaf_profile); 318 + mutex_unlock(&nic->mbox.lock); 319 + return rc; 320 + } 321 + 290 322 static int otx2_tc_act_set_police(struct otx2_nic *nic, 291 323 struct otx2_tc_flow *node, 292 324 struct flow_cls_offload *f, ··· 338 300 return -EINVAL; 339 301 } 340 302 341 - mutex_lock(&nic->mbox.lock); 342 - 343 - rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 344 - if (rc) { 345 - mutex_unlock(&nic->mbox.lock); 346 - return rc; 347 - } 348 - 349 - rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 350 - if (rc) 351 - goto free_leaf; 352 - 353 - rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 354 - if (rc) 355 - goto free_leaf; 356 - 357 - mutex_unlock(&nic->mbox.lock); 358 - 359 303 req->match_id = mark & 0xFFFFULL; 360 304 req->index = rq_idx; 361 305 req->op = NIX_RX_ACTIONOP_UCAST; 362 - set_bit(rq_idx, &nic->rq_bmap); 306 + 363 307 node->is_act_police = true; 364 308 node->rq = rq_idx; 309 + node->burst = burst; 310 + node->rate = rate; 311 + node->is_pps = pps; 365 312 366 - return 0; 313 + rc = otx2_tc_act_set_hw_police(nic, node); 314 + if (!rc) 315 + set_bit(rq_idx, &nic->rq_bmap); 367 316 368 - free_leaf: 369 - if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 370 - netdev_err(nic->netdev, 371 - "Unable to free leaf bandwidth profile(%d)\n", 372 - node->leaf_profile); 373 - mutex_unlock(&nic->mbox.lock); 374 317 return rc; 375 318 } 376 319 ··· 1077 1058 } 1078 1059 1079 1060 if (flow_node->is_act_police) { 1061 + __clear_bit(flow_node->rq, &nic->rq_bmap); 1062 + 1063 + if (nic->flags & OTX2_FLAG_INTF_DOWN) 1064 + goto free_mcam_flow; 1065 + 1080 1066 mutex_lock(&nic->mbox.lock); 1081 1067 1082 1068 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, ··· 1097 1073 "Unable to free leaf bandwidth profile(%d)\n", 1098 1074 flow_node->leaf_profile); 1099 1075 1100 - __clear_bit(flow_node->rq, &nic->rq_bmap); 1101 - 1102 1076 mutex_unlock(&nic->mbox.lock); 1103 1077 } 1104 1078 1079 + free_mcam_flow: 1105 1080 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1106 1081 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1107 1082 kfree_rcu(flow_node, rcu); ··· 1119 1096 1120 1097 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1121 1098 return -ENOMEM; 1099 + 1100 + if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1101 + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1102 + return -EINVAL; 1103 + } 1122 1104 1123 1105 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1124 1106 NL_SET_ERR_MSG_MOD(extack, ··· 1484 1456 otx2_destroy_tc_flow_list(nic); 1485 1457 } 1486 1458 EXPORT_SYMBOL(otx2_shutdown_tc); 1459 + 1460 + static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1461 + struct otx2_tc_flow *node) 1462 + { 1463 + struct npc_install_flow_req *req; 1464 + 1465 + if (otx2_tc_act_set_hw_police(nic, node)) 1466 + return; 1467 + 1468 + mutex_lock(&nic->mbox.lock); 1469 + 1470 + req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1471 + if (!req) 1472 + goto err; 1473 + 1474 + memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1475 + 1476 + if (otx2_sync_mbox_msg(&nic->mbox)) 1477 + netdev_err(nic->netdev, 1478 + "Failed to install MCAM flow entry for ingress rule"); 1479 + err: 1480 + mutex_unlock(&nic->mbox.lock); 1481 + } 1482 + 1483 + void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1484 + { 1485 + struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1486 + struct otx2_tc_flow *node; 1487 + 1488 + /* If any ingress policer rules exist for the interface then 1489 + * apply those rules. Ingress policer rules depend on bandwidth 1490 + * profiles linked to the receive queues. Since no receive queues 1491 + * exist when interface is down, ingress policer rules are stored 1492 + * and configured in hardware after all receive queues are allocated 1493 + * in otx2_open. 1494 + */ 1495 + list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1496 + if (node->is_act_police) 1497 + otx2_tc_config_ingress_rule(nic, node); 1498 + } 1499 + } 1500 + EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
+7 -2
drivers/net/ethernet/realtek/r8169_main.c
··· 575 575 enum rtl_flag { 576 576 RTL_FLAG_TASK_ENABLED = 0, 577 577 RTL_FLAG_TASK_RESET_PENDING, 578 + RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, 578 579 RTL_FLAG_TASK_TX_TIMEOUT, 579 580 RTL_FLAG_MAX 580 581 }; ··· 4497 4496 reset: 4498 4497 rtl_reset_work(tp); 4499 4498 netif_wake_queue(tp->dev); 4499 + } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) { 4500 + rtl_reset_work(tp); 4500 4501 } 4501 4502 out_unlock: 4502 4503 rtnl_unlock(); ··· 4532 4529 } else { 4533 4530 /* In few cases rx is broken after link-down otherwise */ 4534 4531 if (rtl_is_8125(tp)) 4535 - rtl_reset_work(tp); 4532 + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE); 4536 4533 pm_runtime_idle(d); 4537 4534 } 4538 4535 ··· 4608 4605 rtl8169_down(tp); 4609 4606 rtl8169_rx_clear(tp); 4610 4607 4611 - cancel_work_sync(&tp->wk.work); 4608 + cancel_work(&tp->wk.work); 4612 4609 4613 4610 free_irq(tp->irq, tp); 4614 4611 ··· 4841 4838 4842 4839 if (pci_dev_run_wake(pdev)) 4843 4840 pm_runtime_get_noresume(&pdev->dev); 4841 + 4842 + cancel_work_sync(&tp->wk.work); 4844 4843 4845 4844 unregister_netdev(tp->dev); 4846 4845
+45 -24
drivers/net/ethernet/renesas/ravb_main.c
··· 515 515 { 516 516 struct ravb_private *priv = netdev_priv(ndev); 517 517 518 + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { 519 + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); 520 + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); 521 + } else { 522 + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35); 523 + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 524 + CXR31_SEL_LINK0); 525 + } 526 + 518 527 /* Receive frame limit set register */ 519 528 ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); 520 529 ··· 546 537 547 538 /* E-MAC interrupt enable register */ 548 539 ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); 549 - 550 - if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { 551 - ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); 552 - ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); 553 - } else { 554 - ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 555 - CXR31_SEL_LINK0); 556 - } 557 540 } 558 541 559 542 static void ravb_emac_init_rcar(struct net_device *ndev) ··· 1812 1811 if (info->gptp) 1813 1812 ravb_ptp_init(ndev, priv->pdev); 1814 1813 1815 - netif_tx_start_all_queues(ndev); 1816 - 1817 1814 /* PHY control start */ 1818 1815 error = ravb_phy_start(ndev); 1819 1816 if (error) 1820 1817 goto out_ptp_stop; 1818 + 1819 + netif_tx_start_all_queues(ndev); 1821 1820 1822 1821 return 0; 1823 1822 ··· 1825 1824 /* Stop PTP Clock driver */ 1826 1825 if (info->gptp) 1827 1826 ravb_ptp_stop(ndev); 1827 + ravb_stop_dma(ndev); 1828 1828 out_free_irq_mgmta: 1829 1829 if (!info->multi_irqs) 1830 1830 goto out_free_irq; ··· 1876 1874 struct net_device *ndev = priv->ndev; 1877 1875 int error; 1878 1876 1877 + if (!rtnl_trylock()) { 1878 + usleep_range(1000, 2000); 1879 + schedule_work(&priv->work); 1880 + return; 1881 + } 1882 + 1879 1883 netif_tx_stop_all_queues(ndev); 1880 1884 1881 1885 /* Stop PTP Clock driver */ ··· 1915 1907 */ 1916 1908 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", 1917 1909 __func__, error); 1918 - return; 1910 + goto out_unlock; 1919 1911 } 1920 1912 ravb_emac_init(ndev); 1921 1913 ··· 1925 1917 ravb_ptp_init(ndev, priv->pdev); 1926 1918 1927 1919 netif_tx_start_all_queues(ndev); 1920 + 1921 + out_unlock: 1922 + rtnl_unlock(); 1928 1923 } 1929 1924 1930 1925 /* Packet transmit function for Ethernet AVB */ ··· 2656 2645 ndev->features = info->net_features; 2657 2646 ndev->hw_features = info->net_hw_features; 2658 2647 2659 - reset_control_deassert(rstc); 2648 + error = reset_control_deassert(rstc); 2649 + if (error) 2650 + goto out_free_netdev; 2651 + 2660 2652 pm_runtime_enable(&pdev->dev); 2661 - pm_runtime_get_sync(&pdev->dev); 2653 + error = pm_runtime_resume_and_get(&pdev->dev); 2654 + if (error < 0) 2655 + goto out_rpm_disable; 2662 2656 2663 2657 if (info->multi_irqs) { 2664 2658 if (info->err_mgmt_irqs) ··· 2888 2872 out_disable_refclk: 2889 2873 clk_disable_unprepare(priv->refclk); 2890 2874 out_release: 2891 - free_netdev(ndev); 2892 - 2893 2875 pm_runtime_put(&pdev->dev); 2876 + out_rpm_disable: 2894 2877 pm_runtime_disable(&pdev->dev); 2895 2878 reset_control_assert(rstc); 2879 + out_free_netdev: 2880 + free_netdev(ndev); 2896 2881 return error; 2897 2882 } 2898 2883 ··· 2903 2886 struct ravb_private *priv = netdev_priv(ndev); 2904 2887 const struct ravb_hw_info *info = priv->info; 2905 2888 2906 - /* Stop PTP Clock driver */ 2907 - if (info->ccc_gac) 2908 - ravb_ptp_stop(ndev); 2909 - 2910 - clk_disable_unprepare(priv->gptp_clk); 2911 - clk_disable_unprepare(priv->refclk); 2912 - 2913 - /* Set reset mode */ 2914 - ravb_write(ndev, CCC_OPC_RESET, CCC); 2915 2889 unregister_netdev(ndev); 2916 2890 if (info->nc_queues) 2917 2891 netif_napi_del(&priv->napi[RAVB_NC]); 2918 2892 netif_napi_del(&priv->napi[RAVB_BE]); 2893 + 2919 2894 ravb_mdio_release(priv); 2895 + 2896 + /* Stop PTP Clock driver */ 2897 + if (info->ccc_gac) 2898 + ravb_ptp_stop(ndev); 2899 + 2920 2900 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, 2921 2901 priv->desc_bat_dma); 2902 + 2903 + /* Set reset mode */ 2904 + ravb_write(ndev, CCC_OPC_RESET, CCC); 2905 + 2906 + clk_disable_unprepare(priv->gptp_clk); 2907 + clk_disable_unprepare(priv->refclk); 2908 + 2922 2909 pm_runtime_put_sync(&pdev->dev); 2923 2910 pm_runtime_disable(&pdev->dev); 2924 2911 reset_control_assert(priv->rstc);
+13 -9
drivers/net/ethernet/renesas/rswitch.c
··· 1504 1504 { 1505 1505 struct rswitch_device *rdev = netdev_priv(ndev); 1506 1506 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1507 + netdev_tx_t ret = NETDEV_TX_OK; 1507 1508 struct rswitch_ext_desc *desc; 1508 - int ret = NETDEV_TX_OK; 1509 1509 dma_addr_t dma_addr; 1510 1510 1511 1511 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { ··· 1517 1517 return ret; 1518 1518 1519 1519 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1520 - if (dma_mapping_error(ndev->dev.parent, dma_addr)) { 1521 - dev_kfree_skb_any(skb); 1522 - return ret; 1523 - } 1520 + if (dma_mapping_error(ndev->dev.parent, dma_addr)) 1521 + goto err_kfree; 1524 1522 1525 1523 gq->skbs[gq->cur] = skb; 1526 1524 desc = &gq->tx_ring[gq->cur]; ··· 1531 1533 struct rswitch_gwca_ts_info *ts_info; 1532 1534 1533 1535 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); 1534 - if (!ts_info) { 1535 - dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); 1536 - return -ENOMEM; 1537 - } 1536 + if (!ts_info) 1537 + goto err_unmap; 1538 1538 1539 1539 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1540 1540 rdev->ts_tag++; ··· 1553 1557 1554 1558 gq->cur = rswitch_next_queue_index(gq, true, 1); 1555 1559 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1560 + 1561 + return ret; 1562 + 1563 + err_unmap: 1564 + dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); 1565 + 1566 + err_kfree: 1567 + dev_kfree_skb_any(skb); 1556 1568 1557 1569 return ret; 1558 1570 }
+4
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
··· 177 177 #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4 178 178 #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc 179 179 180 + #define MMC_XGMAC_TX_FPE_INTR_MASK 0x204 180 181 #define MMC_XGMAC_TX_FPE_FRAG 0x208 181 182 #define MMC_XGMAC_TX_HOLD_REQ 0x20c 183 + #define MMC_XGMAC_RX_FPE_INTR_MASK 0x224 182 184 #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228 183 185 #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c 184 186 #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 ··· 354 352 { 355 353 writel(0x0, mmcaddr + MMC_RX_INTR_MASK); 356 354 writel(0x0, mmcaddr + MMC_TX_INTR_MASK); 355 + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK); 356 + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK); 357 357 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); 358 358 } 359 359
+1 -1
drivers/net/ethernet/wangxun/libwx/wx_lib.c
··· 1972 1972 if (!pdev->msi_enabled && !pdev->msix_enabled) 1973 1973 return; 1974 1974 1975 - pci_free_irq_vectors(wx->pdev); 1976 1975 if (pdev->msix_enabled) { 1977 1976 kfree(wx->msix_entries); 1978 1977 wx->msix_entries = NULL; 1979 1978 } 1979 + pci_free_irq_vectors(wx->pdev); 1980 1980 } 1981 1981 EXPORT_SYMBOL(wx_reset_interrupt_capability); 1982 1982
+2 -2
drivers/net/netdevsim/bpf.c
··· 93 93 { 94 94 struct nsim_bpf_bound_prog *state; 95 95 96 - if (!prog || !prog->aux->offload) 96 + if (!prog || !bpf_prog_is_offloaded(prog->aux)) 97 97 return; 98 98 99 99 state = prog->aux->offload->dev_priv; ··· 311 311 if (!bpf->prog) 312 312 return 0; 313 313 314 - if (!bpf->prog->aux->offload) { 314 + if (!bpf_prog_is_offloaded(bpf->prog->aux)) { 315 315 NSIM_EA(bpf->extack, "xdpoffload of non-bound program"); 316 316 return -EINVAL; 317 317 }
+6
drivers/net/netkit.c
··· 851 851 return -EACCES; 852 852 } 853 853 854 + if (data[IFLA_NETKIT_PEER_INFO]) { 855 + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO], 856 + "netkit peer info cannot be changed after device creation"); 857 + return -EINVAL; 858 + } 859 + 854 860 if (data[IFLA_NETKIT_POLICY]) { 855 861 attr = data[IFLA_NETKIT_POLICY]; 856 862 policy = nla_get_u32(attr);
+1 -3
drivers/net/wireless/ath/ath9k/Kconfig
··· 57 57 58 58 config ATH9K_DEBUGFS 59 59 bool "Atheros ath9k debugging" 60 - depends on ATH9K && DEBUG_FS 61 - select MAC80211_DEBUGFS 60 + depends on ATH9K && DEBUG_FS && MAC80211_DEBUGFS 62 61 select ATH9K_COMMON_DEBUG 63 62 help 64 63 Say Y, if you need access to ath9k's statistics for ··· 69 70 config ATH9K_STATION_STATISTICS 70 71 bool "Detailed station statistics" 71 72 depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS 72 - select MAC80211_DEBUGFS 73 73 default n 74 74 help 75 75 This option enables detailed statistics for association stations.
+3 -1
drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
··· 707 707 rcu_dereference_protected(mvm_sta->link[link_id], 708 708 lockdep_is_held(&mvm->mutex)); 709 709 710 - if (WARN_ON(!link_conf || !mvm_link_sta)) 710 + if (WARN_ON(!link_conf || !mvm_link_sta)) { 711 + ret = -EINVAL; 711 712 goto err; 713 + } 712 714 713 715 ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, 714 716 mvm_link_sta);
+1
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
··· 375 375 int ret, i, len, offset = 0; 376 376 u8 *clc_base = NULL, hw_encap = 0; 377 377 378 + dev->phy.clc_chan_conf = 0xff; 378 379 if (mt7921_disable_clc || 379 380 mt76_is_usb(&dev->mt76)) 380 381 return 0;
+2 -2
drivers/net/wireless/mediatek/mt76/mt7925/main.c
··· 14 14 static void 15 15 mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band, 16 16 struct ieee80211_sband_iftype_data *data, 17 - enum nl80211_iftype iftype) 17 + enum nl80211_iftype iftype) 18 18 { 19 19 struct ieee80211_sta_he_cap *he_cap = &data->he_cap; 20 20 struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; ··· 53 53 IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | 54 54 IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; 55 55 56 - switch (i) { 56 + switch (iftype) { 57 57 case NL80211_IFTYPE_AP: 58 58 he_cap_elem->mac_cap_info[2] |= 59 59 IEEE80211_HE_MAC_CAP2_BSR;
+3 -2
drivers/nvme/host/auth.c
··· 757 757 __func__, chap->qid); 758 758 mutex_lock(&ctrl->dhchap_auth_mutex); 759 759 ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); 760 + mutex_unlock(&ctrl->dhchap_auth_mutex); 760 761 if (ret) { 761 - mutex_unlock(&ctrl->dhchap_auth_mutex); 762 762 chap->error = ret; 763 763 goto fail2; 764 764 } 765 - mutex_unlock(&ctrl->dhchap_auth_mutex); 766 765 767 766 /* DH-HMAC-CHAP Step 3: send reply */ 768 767 dev_dbg(ctrl->device, "%s: qid %d send reply\n", ··· 838 839 } 839 840 840 841 fail2: 842 + if (chap->status == 0) 843 + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 841 844 dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", 842 845 __func__, chap->qid, chap->status); 843 846 tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+14 -7
drivers/nvme/host/core.c
··· 482 482 483 483 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 484 484 { 485 - nvme_stop_keep_alive(ctrl); 486 485 if (ctrl->admin_tagset) { 487 486 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 488 487 nvme_cancel_request, ctrl); ··· 1813 1814 return ret; 1814 1815 } 1815 1816 1816 - static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1817 + static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1817 1818 { 1818 1819 struct nvme_ctrl *ctrl = ns->ctrl; 1820 + int ret; 1819 1821 1820 - if (nvme_init_ms(ns, id)) 1821 - return; 1822 + ret = nvme_init_ms(ns, id); 1823 + if (ret) 1824 + return ret; 1822 1825 1823 1826 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1824 1827 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1825 - return; 1828 + return 0; 1826 1829 1827 1830 if (ctrl->ops->flags & NVME_F_FABRICS) { 1828 1831 /* ··· 1833 1832 * remap the separate metadata buffer from the block layer. 1834 1833 */ 1835 1834 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1836 - return; 1835 + return 0; 1837 1836 1838 1837 ns->features |= NVME_NS_EXT_LBAS; 1839 1838 ··· 1860 1859 else 1861 1860 ns->features |= NVME_NS_METADATA_SUPPORTED; 1862 1861 } 1862 + return 0; 1863 1863 } 1864 1864 1865 1865 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, ··· 2034 2032 ns->lba_shift = id->lbaf[lbaf].ds; 2035 2033 nvme_set_queue_limits(ns->ctrl, ns->queue); 2036 2034 2037 - nvme_configure_metadata(ns, id); 2035 + ret = nvme_configure_metadata(ns, id); 2036 + if (ret < 0) { 2037 + blk_mq_unfreeze_queue(ns->disk->queue); 2038 + goto out; 2039 + } 2038 2040 nvme_set_chunk_sectors(ns, id); 2039 2041 nvme_update_disk_info(ns->disk, ns, id); 2040 2042 ··· 4354 4348 { 4355 4349 nvme_mpath_stop(ctrl); 4356 4350 nvme_auth_stop(ctrl); 4351 + nvme_stop_keep_alive(ctrl); 4357 4352 nvme_stop_failfast_work(ctrl); 4358 4353 flush_work(&ctrl->async_event_work); 4359 4354 cancel_work_sync(&ctrl->fw_act_work);
+2
drivers/nvme/host/fabrics.c
··· 667 667 #endif 668 668 { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, 669 669 { NVMF_OPT_DISCOVERY, "discovery" }, 670 + #ifdef CONFIG_NVME_HOST_AUTH 670 671 { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" }, 671 672 { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" }, 673 + #endif 672 674 #ifdef CONFIG_NVME_TCP_TLS 673 675 { NVMF_OPT_TLS, "tls" }, 674 676 #endif
+8 -11
drivers/nvme/host/fc.c
··· 2530 2530 * clean up the admin queue. Same thing as above. 2531 2531 */ 2532 2532 nvme_quiesce_admin_queue(&ctrl->ctrl); 2533 - 2534 - /* 2535 - * Open-coding nvme_cancel_admin_tagset() as fc 2536 - * is not using nvme_cancel_request(). 2537 - */ 2538 - nvme_stop_keep_alive(&ctrl->ctrl); 2539 2533 blk_sync_queue(ctrl->ctrl.admin_q); 2540 2534 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 2541 2535 nvme_fc_terminate_exchange, &ctrl->ctrl); ··· 3132 3138 nvme_unquiesce_admin_queue(&ctrl->ctrl); 3133 3139 3134 3140 ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); 3135 - if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) 3136 - ret = -EIO; 3137 3141 if (ret) 3138 3142 goto out_disconnect_admin_queue; 3139 - 3143 + if (test_bit(ASSOC_FAILED, &ctrl->flags)) { 3144 + ret = -EIO; 3145 + goto out_stop_keep_alive; 3146 + } 3140 3147 /* sanity checks */ 3141 3148 3142 3149 /* FC-NVME does not have other data in the capsule */ ··· 3145 3150 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", 3146 3151 ctrl->ctrl.icdoff); 3147 3152 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 3148 - goto out_disconnect_admin_queue; 3153 + goto out_stop_keep_alive; 3149 3154 } 3150 3155 3151 3156 /* FC-NVME supports normal SGL Data Block Descriptors */ ··· 3153 3158 dev_err(ctrl->ctrl.device, 3154 3159 "Mandatory sgls are not supported!\n"); 3155 3160 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 3156 - goto out_disconnect_admin_queue; 3161 + goto out_stop_keep_alive; 3157 3162 } 3158 3163 3159 3164 if (opts->queue_size > ctrl->ctrl.maxcmd) { ··· 3200 3205 3201 3206 out_term_aen_ops: 3202 3207 nvme_fc_term_aen_ops(ctrl); 3208 + out_stop_keep_alive: 3209 + nvme_stop_keep_alive(&ctrl->ctrl); 3203 3210 out_disconnect_admin_queue: 3204 3211 dev_warn(ctrl->ctrl.device, 3205 3212 "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
+1
drivers/nvme/host/rdma.c
··· 1080 1080 nvme_rdma_free_io_queues(ctrl); 1081 1081 } 1082 1082 destroy_admin: 1083 + nvme_stop_keep_alive(&ctrl->ctrl); 1083 1084 nvme_quiesce_admin_queue(&ctrl->ctrl); 1084 1085 blk_sync_queue(ctrl->ctrl.admin_q); 1085 1086 nvme_rdma_stop_queue(&ctrl->queues[0]);
+15 -17
drivers/nvme/host/tcp.c
··· 36 36 module_param(so_priority, int, 0644); 37 37 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); 38 38 39 - #ifdef CONFIG_NVME_TCP_TLS 40 39 /* 41 40 * TLS handshake timeout 42 41 */ 43 42 static int tls_handshake_timeout = 10; 43 + #ifdef CONFIG_NVME_TCP_TLS 44 44 module_param(tls_handshake_timeout, int, 0644); 45 45 MODULE_PARM_DESC(tls_handshake_timeout, 46 46 "nvme TLS handshake timeout in seconds (default 10)"); ··· 161 161 struct ahash_request *snd_hash; 162 162 __le32 exp_ddgst; 163 163 __le32 recv_ddgst; 164 - #ifdef CONFIG_NVME_TCP_TLS 165 164 struct completion tls_complete; 166 165 int tls_err; 167 - #endif 168 166 struct page_frag_cache pf_cache; 169 167 170 168 void (*state_change)(struct sock *); ··· 203 205 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) 204 206 { 205 207 return queue - queue->ctrl->queues; 208 + } 209 + 210 + static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl) 211 + { 212 + if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) 213 + return 0; 214 + 215 + return ctrl->opts->tls; 206 216 } 207 217 208 218 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) ··· 1418 1412 memset(&msg, 0, sizeof(msg)); 1419 1413 iov.iov_base = icresp; 1420 1414 iov.iov_len = sizeof(*icresp); 1421 - if (queue->ctrl->ctrl.opts->tls) { 1415 + if (nvme_tcp_tls(&queue->ctrl->ctrl)) { 1422 1416 msg.msg_control = cbuf; 1423 1417 msg.msg_controllen = sizeof(cbuf); 1424 1418 } ··· 1430 1424 goto free_icresp; 1431 1425 } 1432 1426 ret = -ENOTCONN; 1433 - if (queue->ctrl->ctrl.opts->tls) { 1427 + if (nvme_tcp_tls(&queue->ctrl->ctrl)) { 1434 1428 ctype = tls_get_record_type(queue->sock->sk, 1435 1429 (struct cmsghdr *)cbuf); 1436 1430 if (ctype != TLS_RECORD_TYPE_DATA) { ··· 1554 1548 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); 1555 1549 } 1556 1550 1557 - #ifdef CONFIG_NVME_TCP_TLS 1558 1551 static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) 1559 1552 { 1560 1553 struct nvme_tcp_queue *queue = data; ··· 1630 1625 } 1631 1626 return ret; 1632 1627 } 1633 - #else 1634 - static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl, 1635 - struct nvme_tcp_queue *queue, 1636 - key_serial_t pskid) 1637 - { 1638 - return -EPROTONOSUPPORT; 1639 - } 1640 - #endif 1641 1628 1642 1629 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, 1643 1630 key_serial_t pskid) ··· 1756 1759 } 1757 1760 1758 1761 /* If PSKs are configured try to start TLS */ 1759 - if (pskid) { 1762 + if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) { 1760 1763 ret = nvme_tcp_start_tls(nctrl, queue, pskid); 1761 1764 if (ret) 1762 1765 goto err_init_connect; ··· 1913 1916 int ret; 1914 1917 key_serial_t pskid = 0; 1915 1918 1916 - if (ctrl->opts->tls) { 1919 + if (nvme_tcp_tls(ctrl)) { 1917 1920 if (ctrl->opts->tls_key) 1918 1921 pskid = key_serial(ctrl->opts->tls_key); 1919 1922 else ··· 1946 1949 { 1947 1950 int i, ret; 1948 1951 1949 - if (ctrl->opts->tls && !ctrl->tls_key) { 1952 + if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) { 1950 1953 dev_err(ctrl->device, "no PSK negotiated\n"); 1951 1954 return -ENOKEY; 1952 1955 } ··· 2234 2237 nvme_tcp_destroy_io_queues(ctrl, new); 2235 2238 } 2236 2239 destroy_admin: 2240 + nvme_stop_keep_alive(ctrl); 2237 2241 nvme_tcp_teardown_admin_queue(ctrl, false); 2238 2242 return ret; 2239 2243 }
+2 -2
drivers/nvme/target/Kconfig
··· 4 4 tristate "NVMe Target support" 5 5 depends on BLOCK 6 6 depends on CONFIGFS_FS 7 + select NVME_KEYRING if NVME_TARGET_TCP_TLS 8 + select KEYS if NVME_TARGET_TCP_TLS 7 9 select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY 8 10 select SGL_ALLOC 9 11 help ··· 89 87 config NVME_TARGET_TCP_TLS 90 88 bool "NVMe over Fabrics TCP target TLS encryption support" 91 89 depends on NVME_TARGET_TCP 92 - select NVME_KEYRING 93 90 select NET_HANDSHAKE 94 - select KEYS 95 91 help 96 92 Enables TLS encryption for the NVMe TCP target using the netlink handshake API. 97 93
+1 -1
drivers/nvme/target/configfs.c
··· 1893 1893 return ERR_PTR(-ENOMEM); 1894 1894 } 1895 1895 1896 - if (nvme_keyring_id()) { 1896 + if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) { 1897 1897 port->keyring = key_lookup(nvme_keyring_id()); 1898 1898 if (IS_ERR(port->keyring)) { 1899 1899 pr_warn("NVMe keyring not available, disabling TLS\n");
+4
drivers/nvme/target/fabrics-cmd.c
··· 244 244 goto out; 245 245 } 246 246 247 + d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; 248 + d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; 247 249 status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, 248 250 le32_to_cpu(c->kato), &ctrl); 249 251 if (status) ··· 315 313 goto out; 316 314 } 317 315 316 + d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; 317 + d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; 318 318 ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, 319 319 le16_to_cpu(d->cntlid), req); 320 320 if (!ctrl) {
+3 -1
drivers/nvme/target/tcp.c
··· 1854 1854 } 1855 1855 return ret; 1856 1856 } 1857 + #else 1858 + static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {} 1857 1859 #endif 1858 1860 1859 1861 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ··· 1913 1911 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1914 1912 mutex_unlock(&nvmet_tcp_queue_mutex); 1915 1913 1916 - #ifdef CONFIG_NVME_TARGET_TCP_TLS 1917 1914 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work, 1918 1915 nvmet_tcp_tls_handshake_timeout); 1916 + #ifdef CONFIG_NVME_TARGET_TCP_TLS 1919 1917 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1920 1918 struct sock *sk = queue->sock->sk; 1921 1919
-1
drivers/phy/Kconfig
··· 87 87 source "drivers/phy/mscc/Kconfig" 88 88 source "drivers/phy/qualcomm/Kconfig" 89 89 source "drivers/phy/ralink/Kconfig" 90 - source "drivers/phy/realtek/Kconfig" 91 90 source "drivers/phy/renesas/Kconfig" 92 91 source "drivers/phy/rockchip/Kconfig" 93 92 source "drivers/phy/samsung/Kconfig"
-1
drivers/phy/Makefile
··· 26 26 mscc/ \ 27 27 qualcomm/ \ 28 28 ralink/ \ 29 - realtek/ \ 30 29 renesas/ \ 31 30 rockchip/ \ 32 31 samsung/ \
-32
drivers/phy/realtek/Kconfig
··· 1 - # SPDX-License-Identifier: GPL-2.0 2 - # 3 - # Phy drivers for Realtek platforms 4 - # 5 - 6 - if ARCH_REALTEK || COMPILE_TEST 7 - 8 - config PHY_RTK_RTD_USB2PHY 9 - tristate "Realtek RTD USB2 PHY Transceiver Driver" 10 - depends on USB_SUPPORT 11 - select GENERIC_PHY 12 - select USB_PHY 13 - select USB_COMMON 14 - help 15 - Enable this to support Realtek SoC USB2 phy transceiver. 16 - The DHC (digital home center) RTD series SoCs used the Synopsys 17 - DWC3 USB IP. This driver will do the PHY initialization 18 - of the parameters. 19 - 20 - config PHY_RTK_RTD_USB3PHY 21 - tristate "Realtek RTD USB3 PHY Transceiver Driver" 22 - depends on USB_SUPPORT 23 - select GENERIC_PHY 24 - select USB_PHY 25 - select USB_COMMON 26 - help 27 - Enable this to support Realtek SoC USB3 phy transceiver. 28 - The DHC (digital home center) RTD series SoCs used the Synopsys 29 - DWC3 USB IP. This driver will do the PHY initialization 30 - of the parameters. 31 - 32 - endif # ARCH_REALTEK || COMPILE_TEST
-3
drivers/phy/realtek/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0 2 - obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o 3 - obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
-1325
drivers/phy/realtek/phy-rtk-usb2.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * phy-rtk-usb2.c RTK usb2.0 PHY driver 4 - * 5 - * Copyright (C) 2023 Realtek Semiconductor Corporation 6 - * 7 - */ 8 - 9 - #include <linux/module.h> 10 - #include <linux/of.h> 11 - #include <linux/of_address.h> 12 - #include <linux/platform_device.h> 13 - #include <linux/uaccess.h> 14 - #include <linux/debugfs.h> 15 - #include <linux/nvmem-consumer.h> 16 - #include <linux/regmap.h> 17 - #include <linux/sys_soc.h> 18 - #include <linux/mfd/syscon.h> 19 - #include <linux/phy/phy.h> 20 - #include <linux/usb.h> 21 - #include <linux/usb/phy.h> 22 - #include <linux/usb/hcd.h> 23 - 24 - /* GUSB2PHYACCn register */ 25 - #define PHY_NEW_REG_REQ BIT(25) 26 - #define PHY_VSTS_BUSY BIT(23) 27 - #define PHY_VCTRL_SHIFT 8 28 - #define PHY_REG_DATA_MASK 0xff 29 - 30 - #define GET_LOW_NIBBLE(addr) ((addr) & 0x0f) 31 - #define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4) 32 - 33 - #define EFUS_USB_DC_CAL_RATE 2 34 - #define EFUS_USB_DC_CAL_MAX 7 35 - 36 - #define EFUS_USB_DC_DIS_RATE 1 37 - #define EFUS_USB_DC_DIS_MAX 7 38 - 39 - #define MAX_PHY_DATA_SIZE 20 40 - #define OFFEST_PHY_READ 0x20 41 - 42 - #define MAX_USB_PHY_NUM 4 43 - #define MAX_USB_PHY_PAGE0_DATA_SIZE 16 44 - #define MAX_USB_PHY_PAGE1_DATA_SIZE 16 45 - #define MAX_USB_PHY_PAGE2_DATA_SIZE 8 46 - 47 - #define SET_PAGE_OFFSET 0xf4 48 - #define SET_PAGE_0 0x9b 49 - #define SET_PAGE_1 0xbb 50 - #define SET_PAGE_2 0xdb 51 - 52 - #define PAGE_START 0xe0 53 - #define PAGE0_0XE4 0xe4 54 - #define PAGE0_0XE6 0xe6 55 - #define PAGE0_0XE7 0xe7 56 - #define PAGE1_0XE0 0xe0 57 - #define PAGE1_0XE2 0xe2 58 - 59 - #define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6)) 60 - #define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2) 61 - #define DEFAULT_DC_DRIVING_VALUE (0x8) 62 - #define DEFAULT_DC_DISCONNECTION_VALUE (0x6) 63 - #define HS_CLK_SELECT BIT(6) 64 - 65 - struct phy_reg { 66 - void __iomem *reg_wrap_vstatus; 67 - void __iomem *reg_gusb2phyacc0; 68 - int vstatus_index; 69 - }; 70 - 71 - struct phy_data { 72 - u8 addr; 73 - u8 data; 74 - }; 75 - 76 - struct phy_cfg { 77 - int page0_size; 78 - struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE]; 79 - int page1_size; 80 - struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE]; 81 - int page2_size; 82 - struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE]; 83 - 84 - int num_phy; 85 - 86 - bool check_efuse; 87 - int check_efuse_version; 88 - #define CHECK_EFUSE_V1 1 89 - #define CHECK_EFUSE_V2 2 90 - int efuse_dc_driving_rate; 91 - int efuse_dc_disconnect_rate; 92 - int dc_driving_mask; 93 - int dc_disconnect_mask; 94 - bool usb_dc_disconnect_at_page0; 95 - int driving_updated_for_dev_dis; 96 - 97 - bool do_toggle; 98 - bool do_toggle_driving; 99 - bool use_default_parameter; 100 - bool is_double_sensitivity_mode; 101 - }; 102 - 103 - struct phy_parameter { 104 - struct phy_reg phy_reg; 105 - 106 - /* Get from efuse */ 107 - s8 efuse_usb_dc_cal; 108 - s8 efuse_usb_dc_dis; 109 - 110 - /* Get from dts */ 111 - bool inverse_hstx_sync_clock; 112 - u32 driving_level; 113 - s32 driving_level_compensate; 114 - s32 disconnection_compensate; 115 - }; 116 - 117 - struct rtk_phy { 118 - struct usb_phy phy; 119 - struct device *dev; 120 - 121 - struct phy_cfg *phy_cfg; 122 - int num_phy; 123 - struct phy_parameter *phy_parameter; 124 - 125 - struct dentry *debug_dir; 126 - }; 127 - 128 - /* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */ 129 - static inline int page_addr_to_array_index(u8 addr) 130 - { 131 - return (int)((((addr) - PAGE_START) & 0x7) + 132 - ((((addr) - PAGE_START) & 0x10) >> 1)); 133 - } 134 - 135 - static inline u8 array_index_to_page_addr(int index) 136 - { 137 - return ((((index) + PAGE_START) & 0x7) + 138 - ((((index) & 0x8) << 1) + PAGE_START)); 139 - } 140 - 141 - #define PHY_IO_TIMEOUT_USEC (50000) 142 - #define PHY_IO_DELAY_US (100) 143 - 144 - static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result) 145 - { 146 - int ret; 147 - unsigned int val; 148 - 149 - ret = read_poll_timeout(readl, val, ((val & mask) == result), 150 - PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg); 151 - if (ret) { 152 - pr_err("%s can't program USB phy\n", __func__); 153 - return -ETIMEDOUT; 154 - } 155 - 156 - return 0; 157 - } 158 - 159 - static char rtk_phy_read(struct phy_reg *phy_reg, char addr) 160 - { 161 - void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0; 162 - unsigned int val; 163 - int ret = 0; 164 - 165 - addr -= OFFEST_PHY_READ; 166 - 167 - /* polling until VBusy == 0 */ 168 - ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); 169 - if (ret) 170 - return (char)ret; 171 - 172 - /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */ 173 - val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT); 174 - writel(val, reg_gusb2phyacc0); 175 - ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); 176 - if (ret) 177 - return (char)ret; 178 - 179 - /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */ 180 - val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT); 181 - writel(val, reg_gusb2phyacc0); 182 - ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); 183 - if (ret) 184 - return (char)ret; 185 - 186 - val = readl(reg_gusb2phyacc0); 187 - 188 - return (char)(val & PHY_REG_DATA_MASK); 189 - } 190 - 191 - static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data) 192 - { 193 - unsigned int val; 194 - void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus; 195 - void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0; 196 - int shift_bits = phy_reg->vstatus_index * 8; 197 - int ret = 0; 198 - 199 - /* write data to VStatusOut2 (data output to phy) */ 200 - writel((u32)data << shift_bits, reg_wrap_vstatus); 201 - 202 - ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); 203 - if (ret) 204 - return ret; 205 - 206 - /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */ 207 - val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT); 208 - 209 - writel(val, reg_gusb2phyacc0); 210 - ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); 211 - if (ret) 212 - return ret; 213 - 214 - /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */ 215 - val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT); 216 - 217 - writel(val, reg_gusb2phyacc0); 218 - ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); 219 - if (ret) 220 - return ret; 221 - 222 - return 0; 223 - } 224 - 225 - static int rtk_phy_set_page(struct phy_reg *phy_reg, int page) 226 - { 227 - switch (page) { 228 - case 0: 229 - return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0); 230 - case 1: 231 - return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1); 232 - case 2: 233 - return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2); 234 - default: 235 - pr_err("%s error page=%d\n", __func__, page); 236 - } 237 - 238 - return -EINVAL; 239 - } 240 - 241 - static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg, 242 - struct phy_parameter *phy_parameter, u8 data) 243 - { 244 - u8 ret; 245 - s32 val; 246 - s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask; 247 - int offset = 4; 248 - 249 - val = (s32)((data >> offset) & dc_disconnect_mask) 250 - + phy_parameter->efuse_usb_dc_dis 251 - + phy_parameter->disconnection_compensate; 252 - 253 - if (val > dc_disconnect_mask) 254 - val = dc_disconnect_mask; 255 - else if (val < 0) 256 - val = 0; 257 - 258 - ret = (data & (~(dc_disconnect_mask << offset))) | 259 - (val & dc_disconnect_mask) << offset; 260 - 261 - return ret; 262 - } 263 - 264 - /* updated disconnect level at page0 */ 265 - static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy, 266 - struct phy_parameter *phy_parameter, bool update) 267 - { 268 - struct phy_cfg *phy_cfg; 269 - struct phy_reg *phy_reg; 270 - struct phy_data *phy_data_page; 271 - struct phy_data *phy_data; 272 - u8 addr, data; 273 - int offset = 4; 274 - s32 dc_disconnect_mask; 275 - int i; 276 - 277 - phy_cfg = rtk_phy->phy_cfg; 278 - phy_reg = &phy_parameter->phy_reg; 279 - 280 - /* Set page 0 */ 281 - phy_data_page = phy_cfg->page0; 282 - rtk_phy_set_page(phy_reg, 0); 283 - 284 - i = page_addr_to_array_index(PAGE0_0XE4); 285 - phy_data = phy_data_page + i; 286 - if (!phy_data->addr) { 287 - phy_data->addr = PAGE0_0XE4; 288 - phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4); 289 - } 290 - 291 - addr = phy_data->addr; 292 - data = phy_data->data; 293 - dc_disconnect_mask = phy_cfg->dc_disconnect_mask; 294 - 295 - if (update) 296 - data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data); 297 - else 298 - data = (data & ~(dc_disconnect_mask << offset)) | 299 - (DEFAULT_DC_DISCONNECTION_VALUE << offset); 300 - 301 - if (rtk_phy_write(phy_reg, addr, data)) 302 - dev_err(rtk_phy->dev, 303 - "%s: Error to set page1 parameter addr=0x%x value=0x%x\n", 304 - __func__, addr, data); 305 - } 306 - 307 - static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg, 308 - struct phy_parameter *phy_parameter, u8 data) 309 - { 310 - u8 ret; 311 - s32 val; 312 - s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask; 313 - 314 - if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { 315 - val = (s32)(data & dc_disconnect_mask) 316 - + phy_parameter->efuse_usb_dc_dis 317 - + phy_parameter->disconnection_compensate; 318 - } else { /* for CHECK_EFUSE_V2 or no efuse */ 319 - if (phy_parameter->efuse_usb_dc_dis) 320 - val = (s32)(phy_parameter->efuse_usb_dc_dis + 321 - phy_parameter->disconnection_compensate); 322 - else 323 - val = (s32)((data & dc_disconnect_mask) + 324 - phy_parameter->disconnection_compensate); 325 - } 326 - 327 - if (val > dc_disconnect_mask) 328 - val = dc_disconnect_mask; 329 - else if (val < 0) 330 - val = 0; 331 - 332 - ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask); 333 - 334 - return ret; 335 - } 336 - 337 - /* updated disconnect level at page1 */ 338 - static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy, 339 - struct phy_parameter *phy_parameter, bool update) 340 - { 341 - struct phy_cfg *phy_cfg; 342 - struct phy_data *phy_data_page; 343 - struct phy_data *phy_data; 344 - struct phy_reg *phy_reg; 345 - u8 addr, data; 346 - s32 dc_disconnect_mask; 347 - int i; 348 - 349 - phy_cfg = rtk_phy->phy_cfg; 350 - phy_reg = &phy_parameter->phy_reg; 351 - 352 - /* Set page 1 */ 353 - phy_data_page = phy_cfg->page1; 354 - rtk_phy_set_page(phy_reg, 1); 355 - 356 - i = page_addr_to_array_index(PAGE1_0XE2); 357 - phy_data = phy_data_page + i; 358 - if (!phy_data->addr) { 359 - phy_data->addr = PAGE1_0XE2; 360 - phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2); 361 - } 362 - 363 - addr = phy_data->addr; 364 - data = phy_data->data; 365 - dc_disconnect_mask = phy_cfg->dc_disconnect_mask; 366 - 367 - if (update) 368 - data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data); 369 - else 370 - data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE; 371 - 372 - if (rtk_phy_write(phy_reg, addr, data)) 373 - dev_err(rtk_phy->dev, 374 - "%s: Error to set page1 parameter addr=0x%x value=0x%x\n", 375 - __func__, addr, data); 376 - } 377 - 378 - static void update_dc_disconnect_level(struct rtk_phy *rtk_phy, 379 - struct phy_parameter *phy_parameter, bool update) 380 - { 381 - struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; 382 - 383 - if (phy_cfg->usb_dc_disconnect_at_page0) 384 - update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update); 385 - else 386 - update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update); 387 - } 388 - 389 - static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg, 390 - struct phy_parameter *phy_parameter, u8 data) 391 - { 392 - s32 driving_level_compensate = phy_parameter->driving_level_compensate; 393 - s32 dc_driving_mask = phy_cfg->dc_driving_mask; 394 - s32 val; 395 - u8 ret; 396 - 397 - if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { 398 - val = (s32)(data & dc_driving_mask) + driving_level_compensate 399 - + phy_parameter->efuse_usb_dc_cal; 400 - } else { /* for CHECK_EFUSE_V2 or no efuse */ 401 - if (phy_parameter->efuse_usb_dc_cal) 402 - val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask) 403 - + driving_level_compensate); 404 - else 405 - val = (s32)(data & dc_driving_mask); 406 - } 407 - 408 - if (val > dc_driving_mask) 409 - val = dc_driving_mask; 410 - else if (val < 0) 411 - val = 0; 412 - 413 - ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask); 414 - 415 - return ret; 416 - } 417 - 418 - static void update_dc_driving_level(struct rtk_phy *rtk_phy, 419 - struct phy_parameter *phy_parameter) 420 - { 421 - struct phy_cfg *phy_cfg; 422 - struct phy_reg *phy_reg; 423 - 424 - phy_reg = &phy_parameter->phy_reg; 425 - phy_cfg = rtk_phy->phy_cfg; 426 - if (!phy_cfg->page0[4].addr) { 427 - rtk_phy_set_page(phy_reg, 0); 428 - phy_cfg->page0[4].addr = PAGE0_0XE4; 429 - phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4); 430 - } 431 - 432 - if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) { 433 - u32 dc_driving_mask; 434 - u8 driving_level; 435 - u8 data; 436 - 437 - data = phy_cfg->page0[4].data; 438 - dc_driving_mask = phy_cfg->dc_driving_mask; 439 - driving_level = data & dc_driving_mask; 440 - 441 - dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n", 442 - __func__, driving_level, phy_parameter->driving_level); 443 - 444 - phy_cfg->page0[4].data = (data & (~dc_driving_mask)) | 445 - (phy_parameter->driving_level & dc_driving_mask); 446 - } 447 - 448 - phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg, 449 - phy_parameter, 450 - phy_cfg->page0[4].data); 451 - } 452 - 453 - static void update_hs_clk_select(struct rtk_phy *rtk_phy, 454 - struct phy_parameter *phy_parameter) 455 - { 456 - struct phy_cfg *phy_cfg; 457 - struct phy_reg *phy_reg; 458 - 459 - phy_cfg = rtk_phy->phy_cfg; 460 - phy_reg = &phy_parameter->phy_reg; 461 - 462 - if (phy_parameter->inverse_hstx_sync_clock) { 463 - if (!phy_cfg->page0[6].addr) { 464 - rtk_phy_set_page(phy_reg, 0); 465 - phy_cfg->page0[6].addr = PAGE0_0XE6; 466 - phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6); 467 - } 468 - 469 - phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT; 470 - } 471 - } 472 - 473 - static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy, 474 - int index, bool connect) 475 - { 476 - struct phy_parameter *phy_parameter; 477 - struct phy_cfg *phy_cfg; 478 - struct phy_reg *phy_reg; 479 - struct phy_data *phy_data_page; 480 - u8 addr, data; 481 - int i; 482 - 483 - phy_cfg = rtk_phy->phy_cfg; 484 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 485 - phy_reg = &phy_parameter->phy_reg; 486 - 487 - if (!phy_cfg->do_toggle) 488 - goto out; 489 - 490 - if (phy_cfg->is_double_sensitivity_mode) 491 - goto do_toggle_driving; 492 - 493 - /* Set page 0 */ 494 - rtk_phy_set_page(phy_reg, 0); 495 - 496 - addr = PAGE0_0XE7; 497 - data = rtk_phy_read(phy_reg, addr); 498 - 499 - if (connect) 500 - rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL)); 501 - else 502 - rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL)); 503 - 504 - do_toggle_driving: 505 - 506 - if (!phy_cfg->do_toggle_driving) 507 - goto do_toggle; 508 - 509 - /* Page 0 addr 0xE4 driving capability */ 510 - 511 - /* Set page 0 */ 512 - phy_data_page = phy_cfg->page0; 513 - rtk_phy_set_page(phy_reg, 0); 514 - 515 - i = page_addr_to_array_index(PAGE0_0XE4); 516 - addr = phy_data_page[i].addr; 517 - data = phy_data_page[i].data; 518 - 519 - if (connect) { 520 - rtk_phy_write(phy_reg, addr, data); 521 - } else { 522 - u8 value; 523 - s32 tmp; 524 - s32 driving_updated = 525 - phy_cfg->driving_updated_for_dev_dis; 526 - s32 dc_driving_mask = phy_cfg->dc_driving_mask; 527 - 528 - tmp = (s32)(data & dc_driving_mask) + driving_updated; 529 - 530 - if (tmp > dc_driving_mask) 531 - tmp = dc_driving_mask; 532 - else if (tmp < 0) 533 - tmp = 0; 534 - 535 - value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask); 536 - 537 - rtk_phy_write(phy_reg, addr, value); 538 - } 539 - 540 - do_toggle: 541 - /* restore dc disconnect level before toggle */ 542 - update_dc_disconnect_level(rtk_phy, phy_parameter, false); 543 - 544 - /* Set page 1 */ 545 - rtk_phy_set_page(phy_reg, 1); 546 - 547 - addr = PAGE1_0XE0; 548 - data = rtk_phy_read(phy_reg, addr); 549 - 550 - rtk_phy_write(phy_reg, addr, data & 551 - (~ENABLE_AUTO_SENSITIVITY_CALIBRATION)); 552 - mdelay(1); 553 - rtk_phy_write(phy_reg, addr, data | 554 - (ENABLE_AUTO_SENSITIVITY_CALIBRATION)); 555 - 556 - /* update dc disconnect level after toggle */ 557 - update_dc_disconnect_level(rtk_phy, phy_parameter, true); 558 - 559 - out: 560 - return; 561 - } 562 - 563 - static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index) 564 - { 565 - struct phy_parameter *phy_parameter; 566 - struct phy_cfg *phy_cfg; 567 - struct phy_data *phy_data_page; 568 - struct phy_reg *phy_reg; 569 - int i; 570 - 571 - phy_cfg = rtk_phy->phy_cfg; 572 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 573 - phy_reg = &phy_parameter->phy_reg; 574 - 575 - if (phy_cfg->use_default_parameter) { 576 - dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n", 577 - __func__, index); 578 - goto do_toggle; 579 - } 580 - 581 - /* Set page 0 */ 582 - phy_data_page = phy_cfg->page0; 583 - rtk_phy_set_page(phy_reg, 0); 584 - 585 - for (i = 0; i < phy_cfg->page0_size; i++) { 586 - struct phy_data *phy_data = phy_data_page + i; 587 - u8 addr = phy_data->addr; 588 - u8 data = phy_data->data; 589 - 590 - if (!addr) 591 - continue; 592 - 593 - if (rtk_phy_write(phy_reg, addr, data)) { 594 - dev_err(rtk_phy->dev, 595 - "%s: Error to set page0 parameter addr=0x%x value=0x%x\n", 596 - __func__, addr, data); 597 - return -EINVAL; 598 - } 599 - } 600 - 601 - /* Set page 1 */ 602 - phy_data_page = phy_cfg->page1; 603 - rtk_phy_set_page(phy_reg, 1); 604 - 605 - for (i = 0; i < phy_cfg->page1_size; i++) { 606 - struct phy_data *phy_data = phy_data_page + i; 607 - u8 addr = phy_data->addr; 608 - u8 data = phy_data->data; 609 - 610 - if (!addr) 611 - continue; 612 - 613 - if (rtk_phy_write(phy_reg, addr, data)) { 614 - dev_err(rtk_phy->dev, 615 - "%s: Error to set page1 parameter addr=0x%x value=0x%x\n", 616 - __func__, addr, data); 617 - return -EINVAL; 618 - } 619 - } 620 - 621 - if (phy_cfg->page2_size == 0) 622 - goto do_toggle; 623 - 624 - /* Set page 2 */ 625 - phy_data_page = phy_cfg->page2; 626 - rtk_phy_set_page(phy_reg, 2); 627 - 628 - for (i = 0; i < phy_cfg->page2_size; i++) { 629 - struct phy_data *phy_data = phy_data_page + i; 630 - u8 addr = phy_data->addr; 631 - u8 data = phy_data->data; 632 - 633 - if (!addr) 634 - continue; 635 - 636 - if (rtk_phy_write(phy_reg, addr, data)) { 637 - dev_err(rtk_phy->dev, 638 - "%s: Error to set page2 parameter addr=0x%x value=0x%x\n", 639 - __func__, addr, data); 640 - return -EINVAL; 641 - } 642 - } 643 - 644 - do_toggle: 645 - do_rtk_phy_toggle(rtk_phy, index, false); 646 - 647 - return 0; 648 - } 649 - 650 - static int rtk_phy_init(struct phy *phy) 651 - { 652 - struct rtk_phy *rtk_phy = phy_get_drvdata(phy); 653 - unsigned long phy_init_time = jiffies; 654 - int i, ret = 0; 655 - 656 - if (!rtk_phy) 657 - return -EINVAL; 658 - 659 - for (i = 0; i < rtk_phy->num_phy; i++) 660 - ret = do_rtk_phy_init(rtk_phy, i); 661 - 662 - dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n", 663 - jiffies_to_msecs(jiffies - phy_init_time)); 664 - return ret; 665 - } 666 - 667 - static int rtk_phy_exit(struct phy *phy) 668 - { 669 - return 0; 670 - } 671 - 672 - static const struct phy_ops ops = { 673 - .init = rtk_phy_init, 674 - .exit = rtk_phy_exit, 675 - .owner = THIS_MODULE, 676 - }; 677 - 678 - static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port) 679 - { 680 - int index = port; 681 - struct rtk_phy *rtk_phy = NULL; 682 - 683 - rtk_phy = dev_get_drvdata(usb2_phy->dev); 684 - 685 - if (index > rtk_phy->num_phy) { 686 - dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n", 687 - __func__, index, rtk_phy->num_phy); 688 - return; 689 - } 690 - 691 - do_rtk_phy_toggle(rtk_phy, index, connect); 692 - } 693 - 694 - static int rtk_phy_notify_port_status(struct usb_phy *x, int port, 695 - u16 portstatus, u16 portchange) 696 - { 697 - bool connect = false; 698 - 699 - pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n", 700 - __func__, port, (int)portstatus, (int)portchange); 701 - if (portstatus & USB_PORT_STAT_CONNECTION) 702 - connect = true; 703 - 704 - if (portchange & USB_PORT_STAT_C_CONNECTION) 705 - rtk_phy_toggle(x, connect, port); 706 - 707 - return 0; 708 - } 709 - 710 - #ifdef CONFIG_DEBUG_FS 711 - static struct dentry *create_phy_debug_root(void) 712 - { 713 - struct dentry *phy_debug_root; 714 - 715 - phy_debug_root = debugfs_lookup("phy", usb_debug_root); 716 - if (!phy_debug_root) 717 - phy_debug_root = debugfs_create_dir("phy", usb_debug_root); 718 - 719 - return phy_debug_root; 720 - } 721 - 722 - static int rtk_usb2_parameter_show(struct seq_file *s, void *unused) 723 - { 724 - struct rtk_phy *rtk_phy = s->private; 725 - struct phy_cfg *phy_cfg; 726 - int i, index; 727 - 728 - phy_cfg = rtk_phy->phy_cfg; 729 - 730 - seq_puts(s, "Property:\n"); 731 - seq_printf(s, " check_efuse: %s\n", 732 - phy_cfg->check_efuse ? "Enable" : "Disable"); 733 - seq_printf(s, " check_efuse_version: %d\n", 734 - phy_cfg->check_efuse_version); 735 - seq_printf(s, " efuse_dc_driving_rate: %d\n", 736 - phy_cfg->efuse_dc_driving_rate); 737 - seq_printf(s, " dc_driving_mask: 0x%x\n", 738 - phy_cfg->dc_driving_mask); 739 - seq_printf(s, " efuse_dc_disconnect_rate: %d\n", 740 - phy_cfg->efuse_dc_disconnect_rate); 741 - seq_printf(s, " dc_disconnect_mask: 0x%x\n", 742 - phy_cfg->dc_disconnect_mask); 743 - seq_printf(s, " usb_dc_disconnect_at_page0: %s\n", 744 - phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false"); 745 - seq_printf(s, " do_toggle: %s\n", 746 - phy_cfg->do_toggle ? "Enable" : "Disable"); 747 - seq_printf(s, " do_toggle_driving: %s\n", 748 - phy_cfg->do_toggle_driving ? "Enable" : "Disable"); 749 - seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n", 750 - phy_cfg->driving_updated_for_dev_dis); 751 - seq_printf(s, " use_default_parameter: %s\n", 752 - phy_cfg->use_default_parameter ? "Enable" : "Disable"); 753 - seq_printf(s, " is_double_sensitivity_mode: %s\n", 754 - phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable"); 755 - 756 - for (index = 0; index < rtk_phy->num_phy; index++) { 757 - struct phy_parameter *phy_parameter; 758 - struct phy_reg *phy_reg; 759 - struct phy_data *phy_data_page; 760 - 761 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 762 - phy_reg = &phy_parameter->phy_reg; 763 - 764 - seq_printf(s, "PHY %d:\n", index); 765 - 766 - seq_puts(s, "Page 0:\n"); 767 - /* Set page 0 */ 768 - phy_data_page = phy_cfg->page0; 769 - rtk_phy_set_page(phy_reg, 0); 770 - 771 - for (i = 0; i < phy_cfg->page0_size; i++) { 772 - struct phy_data *phy_data = phy_data_page + i; 773 - u8 addr = array_index_to_page_addr(i); 774 - u8 data = phy_data->data; 775 - u8 value = rtk_phy_read(phy_reg, addr); 776 - 777 - if (phy_data->addr) 778 - seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n", 779 - addr, data, value); 780 - else 781 - seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n", 782 - addr, value); 783 - } 784 - 785 - seq_puts(s, "Page 1:\n"); 786 - /* Set page 1 */ 787 - phy_data_page = phy_cfg->page1; 788 - rtk_phy_set_page(phy_reg, 1); 789 - 790 - for (i = 0; i < phy_cfg->page1_size; i++) { 791 - struct phy_data *phy_data = phy_data_page + i; 792 - u8 addr = array_index_to_page_addr(i); 793 - u8 data = phy_data->data; 794 - u8 value = rtk_phy_read(phy_reg, addr); 795 - 796 - if (phy_data->addr) 797 - seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n", 798 - addr, data, value); 799 - else 800 - seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n", 801 - addr, value); 802 - } 803 - 804 - if (phy_cfg->page2_size == 0) 805 - goto out; 806 - 807 - seq_puts(s, "Page 2:\n"); 808 - /* Set page 2 */ 809 - phy_data_page = phy_cfg->page2; 810 - rtk_phy_set_page(phy_reg, 2); 811 - 812 - for (i = 0; i < phy_cfg->page2_size; i++) { 813 - struct phy_data *phy_data = phy_data_page + i; 814 - u8 addr = array_index_to_page_addr(i); 815 - u8 data = phy_data->data; 816 - u8 value = rtk_phy_read(phy_reg, addr); 817 - 818 - if (phy_data->addr) 819 - seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n", 820 - addr, data, value); 821 - else 822 - seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n", 823 - addr, value); 824 - } 825 - 826 - out: 827 - seq_puts(s, "PHY Property:\n"); 828 - seq_printf(s, " efuse_usb_dc_cal: %d\n", 829 - (int)phy_parameter->efuse_usb_dc_cal); 830 - seq_printf(s, " efuse_usb_dc_dis: %d\n", 831 - (int)phy_parameter->efuse_usb_dc_dis); 832 - seq_printf(s, " inverse_hstx_sync_clock: %s\n", 833 - phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable"); 834 - seq_printf(s, " driving_level: %d\n", 835 - phy_parameter->driving_level); 836 - seq_printf(s, " driving_level_compensate: %d\n", 837 - phy_parameter->driving_level_compensate); 838 - seq_printf(s, " disconnection_compensate: %d\n", 839 - phy_parameter->disconnection_compensate); 840 - } 841 - 842 - return 0; 843 - } 844 - DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter); 845 - 846 - static inline void create_debug_files(struct rtk_phy *rtk_phy) 847 - { 848 - struct dentry *phy_debug_root = NULL; 849 - 850 - phy_debug_root = create_phy_debug_root(); 851 - if (!phy_debug_root) 852 - return; 853 - 854 - rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), 855 - phy_debug_root); 856 - 857 - debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy, 858 - &rtk_usb2_parameter_fops); 859 - 860 - return; 861 - } 862 - 863 - static inline void remove_debug_files(struct rtk_phy *rtk_phy) 864 - { 865 - debugfs_remove_recursive(rtk_phy->debug_dir); 866 - } 867 - #else 868 - static inline void create_debug_files(struct rtk_phy *rtk_phy) { } 869 - static inline void remove_debug_files(struct rtk_phy *rtk_phy) { } 870 - #endif /* CONFIG_DEBUG_FS */ 871 - 872 - static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy, 873 - struct phy_parameter *phy_parameter, int index) 874 - { 875 - struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; 876 - u8 value = 0; 877 - struct nvmem_cell *cell; 878 - struct soc_device_attribute rtk_soc_groot[] = { 879 - { .family = "Realtek Groot",}, 880 - { /* empty */ } }; 881 - 882 - if (!phy_cfg->check_efuse) 883 - goto out; 884 - 885 - /* Read efuse for usb dc cal */ 886 - cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal"); 887 - if (IS_ERR(cell)) { 888 - dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n", 889 - __func__, PTR_ERR(cell)); 890 - } else { 891 - unsigned char *buf; 892 - size_t buf_size; 893 - 894 - buf = nvmem_cell_read(cell, &buf_size); 895 - if (!IS_ERR(buf)) { 896 - value = buf[0] & phy_cfg->dc_driving_mask; 897 - kfree(buf); 898 - } 899 - nvmem_cell_put(cell); 900 - } 901 - 902 - if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { 903 - int rate = phy_cfg->efuse_dc_driving_rate; 904 - 905 - if (value <= EFUS_USB_DC_CAL_MAX) 906 - phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate); 907 - else 908 - phy_parameter->efuse_usb_dc_cal = -(int8_t) 909 - ((EFUS_USB_DC_CAL_MAX & value) * rate); 910 - 911 - if (soc_device_match(rtk_soc_groot)) { 912 - dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n"); 913 - 914 - /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */ 915 - if (value <= EFUS_USB_DC_CAL_MAX) 916 - phy_parameter->efuse_usb_dc_cal = (int8_t)(value); 917 - 918 - /* We set max dc cal compensate is 0x8 if otp is 0x7 */ 919 - if (value == 0x7) 920 - phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1); 921 - } 922 - } else { /* for CHECK_EFUSE_V2 */ 923 - phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask; 924 - } 925 - 926 - /* Read efuse for usb dc disconnect level */ 927 - value = 0; 928 - cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis"); 929 - if (IS_ERR(cell)) { 930 - dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n", 931 - __func__, PTR_ERR(cell)); 932 - } else { 933 - unsigned char *buf; 934 - size_t buf_size; 935 - 936 - buf = nvmem_cell_read(cell, &buf_size); 937 - if (!IS_ERR(buf)) { 938 - value = buf[0] & phy_cfg->dc_disconnect_mask; 939 - kfree(buf); 940 - } 941 - nvmem_cell_put(cell); 942 - } 943 - 944 - if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { 945 - int rate = phy_cfg->efuse_dc_disconnect_rate; 946 - 947 - if (value <= EFUS_USB_DC_DIS_MAX) 948 - phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate); 949 - else 950 - phy_parameter->efuse_usb_dc_dis = -(int8_t) 951 - ((EFUS_USB_DC_DIS_MAX & value) * rate); 952 - } else { /* for CHECK_EFUSE_V2 */ 953 - phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask; 954 - } 955 - 956 - out: 957 - return 0; 958 - } 959 - 960 - static int parse_phy_data(struct rtk_phy *rtk_phy) 961 - { 962 - struct device *dev = rtk_phy->dev; 963 - struct device_node *np = dev->of_node; 964 - struct phy_parameter *phy_parameter; 965 - int ret = 0; 966 - int index; 967 - 968 - rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) * 969 - rtk_phy->num_phy, GFP_KERNEL); 970 - if (!rtk_phy->phy_parameter) 971 - return -ENOMEM; 972 - 973 - for (index = 0; index < rtk_phy->num_phy; index++) { 974 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 975 - 976 - phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0); 977 - phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index; 978 - phy_parameter->phy_reg.vstatus_index = index; 979 - 980 - if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock")) 981 - phy_parameter->inverse_hstx_sync_clock = true; 982 - else 983 - phy_parameter->inverse_hstx_sync_clock = false; 984 - 985 - if (of_property_read_u32_index(np, "realtek,driving-level", 986 - index, &phy_parameter->driving_level)) 987 - phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE; 988 - 989 - if (of_property_read_u32_index(np, "realtek,driving-level-compensate", 990 - index, &phy_parameter->driving_level_compensate)) 991 - phy_parameter->driving_level_compensate = 0; 992 - 993 - if (of_property_read_u32_index(np, "realtek,disconnection-compensate", 994 - index, &phy_parameter->disconnection_compensate)) 995 - phy_parameter->disconnection_compensate = 0; 996 - 997 - get_phy_data_by_efuse(rtk_phy, phy_parameter, index); 998 - 999 - update_dc_driving_level(rtk_phy, phy_parameter); 1000 - 1001 - update_hs_clk_select(rtk_phy, phy_parameter); 1002 - } 1003 - 1004 - return ret; 1005 - } 1006 - 1007 - static int rtk_usb2phy_probe(struct platform_device *pdev) 1008 - { 1009 - struct rtk_phy *rtk_phy; 1010 - struct device *dev = &pdev->dev; 1011 - struct phy *generic_phy; 1012 - struct phy_provider *phy_provider; 1013 - const struct phy_cfg *phy_cfg; 1014 - int ret = 0; 1015 - 1016 - phy_cfg = of_device_get_match_data(dev); 1017 - if (!phy_cfg) { 1018 - dev_err(dev, "phy config are not assigned!\n"); 1019 - return -EINVAL; 1020 - } 1021 - 1022 - rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL); 1023 - if (!rtk_phy) 1024 - return -ENOMEM; 1025 - 1026 - rtk_phy->dev = &pdev->dev; 1027 - rtk_phy->phy.dev = rtk_phy->dev; 1028 - rtk_phy->phy.label = "rtk-usb2phy"; 1029 - rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status; 1030 - 1031 - rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL); 1032 - 1033 - memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg)); 1034 - 1035 - rtk_phy->num_phy = phy_cfg->num_phy; 1036 - 1037 - ret = parse_phy_data(rtk_phy); 1038 - if (ret) 1039 - goto err; 1040 - 1041 - platform_set_drvdata(pdev, rtk_phy); 1042 - 1043 - generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops); 1044 - if (IS_ERR(generic_phy)) 1045 - return PTR_ERR(generic_phy); 1046 - 1047 - phy_set_drvdata(generic_phy, rtk_phy); 1048 - 1049 - phy_provider = devm_of_phy_provider_register(rtk_phy->dev, 1050 - of_phy_simple_xlate); 1051 - if (IS_ERR(phy_provider)) 1052 - return PTR_ERR(phy_provider); 1053 - 1054 - ret = usb_add_phy_dev(&rtk_phy->phy); 1055 - if (ret) 1056 - goto err; 1057 - 1058 - create_debug_files(rtk_phy); 1059 - 1060 - err: 1061 - return ret; 1062 - } 1063 - 1064 - static void rtk_usb2phy_remove(struct platform_device *pdev) 1065 - { 1066 - struct rtk_phy *rtk_phy = platform_get_drvdata(pdev); 1067 - 1068 - remove_debug_files(rtk_phy); 1069 - 1070 - usb_remove_phy(&rtk_phy->phy); 1071 - } 1072 - 1073 - static const struct phy_cfg rtd1295_phy_cfg = { 1074 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1075 - .page0 = { [0] = {0xe0, 0x90}, 1076 - [3] = {0xe3, 0x3a}, 1077 - [4] = {0xe4, 0x68}, 1078 - [6] = {0xe6, 0x91}, 1079 - [13] = {0xf5, 0x81}, 1080 - [15] = {0xf7, 0x02}, }, 1081 - .page1_size = 8, 1082 - .page1 = { /* default parameter */ }, 1083 - .page2_size = 0, 1084 - .page2 = { /* no parameter */ }, 1085 - .num_phy = 1, 1086 - .check_efuse = false, 1087 - .check_efuse_version = CHECK_EFUSE_V1, 1088 - .efuse_dc_driving_rate = 1, 1089 - .dc_driving_mask = 0xf, 1090 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1091 - .dc_disconnect_mask = 0xf, 1092 - .usb_dc_disconnect_at_page0 = true, 1093 - .do_toggle = true, 1094 - .do_toggle_driving = false, 1095 - .driving_updated_for_dev_dis = 0xf, 1096 - .use_default_parameter = false, 1097 - .is_double_sensitivity_mode = false, 1098 - }; 1099 - 1100 - static const struct phy_cfg rtd1395_phy_cfg = { 1101 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1102 - .page0 = { [4] = {0xe4, 0xac}, 1103 - [13] = {0xf5, 0x00}, 1104 - [15] = {0xf7, 0x02}, }, 1105 - .page1_size = 8, 1106 - .page1 = { /* default parameter */ }, 1107 - .page2_size = 0, 1108 - .page2 = { /* no parameter */ }, 1109 - .num_phy = 1, 1110 - .check_efuse = false, 1111 - .check_efuse_version = CHECK_EFUSE_V1, 1112 - .efuse_dc_driving_rate = 1, 1113 - .dc_driving_mask = 0xf, 1114 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1115 - .dc_disconnect_mask = 0xf, 1116 - .usb_dc_disconnect_at_page0 = true, 1117 - .do_toggle = true, 1118 - .do_toggle_driving = false, 1119 - .driving_updated_for_dev_dis = 0xf, 1120 - .use_default_parameter = false, 1121 - .is_double_sensitivity_mode = false, 1122 - }; 1123 - 1124 - static const struct phy_cfg rtd1395_phy_cfg_2port = { 1125 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1126 - .page0 = { [4] = {0xe4, 0xac}, 1127 - [13] = {0xf5, 0x00}, 1128 - [15] = {0xf7, 0x02}, }, 1129 - .page1_size = 8, 1130 - .page1 = { /* default parameter */ }, 1131 - .page2_size = 0, 1132 - .page2 = { /* no parameter */ }, 1133 - .num_phy = 2, 1134 - .check_efuse = false, 1135 - .check_efuse_version = CHECK_EFUSE_V1, 1136 - .efuse_dc_driving_rate = 1, 1137 - .dc_driving_mask = 0xf, 1138 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1139 - .dc_disconnect_mask = 0xf, 1140 - .usb_dc_disconnect_at_page0 = true, 1141 - .do_toggle = true, 1142 - .do_toggle_driving = false, 1143 - .driving_updated_for_dev_dis = 0xf, 1144 - .use_default_parameter = false, 1145 - .is_double_sensitivity_mode = false, 1146 - }; 1147 - 1148 - static const struct phy_cfg rtd1619_phy_cfg = { 1149 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1150 - .page0 = { [4] = {0xe4, 0x68}, }, 1151 - .page1_size = 8, 1152 - .page1 = { /* default parameter */ }, 1153 - .page2_size = 0, 1154 - .page2 = { /* no parameter */ }, 1155 - .num_phy = 1, 1156 - .check_efuse = true, 1157 - .check_efuse_version = CHECK_EFUSE_V1, 1158 - .efuse_dc_driving_rate = 1, 1159 - .dc_driving_mask = 0xf, 1160 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1161 - .dc_disconnect_mask = 0xf, 1162 - .usb_dc_disconnect_at_page0 = true, 1163 - .do_toggle = true, 1164 - .do_toggle_driving = false, 1165 - .driving_updated_for_dev_dis = 0xf, 1166 - .use_default_parameter = false, 1167 - .is_double_sensitivity_mode = false, 1168 - }; 1169 - 1170 - static const struct phy_cfg rtd1319_phy_cfg = { 1171 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1172 - .page0 = { [0] = {0xe0, 0x18}, 1173 - [4] = {0xe4, 0x6a}, 1174 - [7] = {0xe7, 0x71}, 1175 - [13] = {0xf5, 0x15}, 1176 - [15] = {0xf7, 0x32}, }, 1177 - .page1_size = 8, 1178 - .page1 = { [3] = {0xe3, 0x44}, }, 1179 - .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, 1180 - .page2 = { [0] = {0xe0, 0x01}, }, 1181 - .num_phy = 1, 1182 - .check_efuse = true, 1183 - .check_efuse_version = CHECK_EFUSE_V1, 1184 - .efuse_dc_driving_rate = 1, 1185 - .dc_driving_mask = 0xf, 1186 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1187 - .dc_disconnect_mask = 0xf, 1188 - .usb_dc_disconnect_at_page0 = true, 1189 - .do_toggle = true, 1190 - .do_toggle_driving = true, 1191 - .driving_updated_for_dev_dis = 0xf, 1192 - .use_default_parameter = false, 1193 - .is_double_sensitivity_mode = true, 1194 - }; 1195 - 1196 - static const struct phy_cfg rtd1312c_phy_cfg = { 1197 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1198 - .page0 = { [0] = {0xe0, 0x14}, 1199 - [4] = {0xe4, 0x67}, 1200 - [5] = {0xe5, 0x55}, }, 1201 - .page1_size = 8, 1202 - .page1 = { [3] = {0xe3, 0x23}, 1203 - [6] = {0xe6, 0x58}, }, 1204 - .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, 1205 - .page2 = { /* default parameter */ }, 1206 - .num_phy = 1, 1207 - .check_efuse = true, 1208 - .check_efuse_version = CHECK_EFUSE_V1, 1209 - .efuse_dc_driving_rate = 1, 1210 - .dc_driving_mask = 0xf, 1211 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1212 - .dc_disconnect_mask = 0xf, 1213 - .usb_dc_disconnect_at_page0 = true, 1214 - .do_toggle = true, 1215 - .do_toggle_driving = true, 1216 - .driving_updated_for_dev_dis = 0xf, 1217 - .use_default_parameter = false, 1218 - .is_double_sensitivity_mode = true, 1219 - }; 1220 - 1221 - static const struct phy_cfg rtd1619b_phy_cfg = { 1222 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1223 - .page0 = { [0] = {0xe0, 0xa3}, 1224 - [4] = {0xe4, 0x88}, 1225 - [5] = {0xe5, 0x4f}, 1226 - [6] = {0xe6, 0x02}, }, 1227 - .page1_size = 8, 1228 - .page1 = { [3] = {0xe3, 0x64}, }, 1229 - .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, 1230 - .page2 = { [7] = {0xe7, 0x45}, }, 1231 - .num_phy = 1, 1232 - .check_efuse = true, 1233 - .check_efuse_version = CHECK_EFUSE_V1, 1234 - .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE, 1235 - .dc_driving_mask = 0x1f, 1236 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1237 - .dc_disconnect_mask = 0xf, 1238 - .usb_dc_disconnect_at_page0 = false, 1239 - .do_toggle = true, 1240 - .do_toggle_driving = true, 1241 - .driving_updated_for_dev_dis = 0x8, 1242 - .use_default_parameter = false, 1243 - .is_double_sensitivity_mode = true, 1244 - }; 1245 - 1246 - static const struct phy_cfg rtd1319d_phy_cfg = { 1247 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1248 - .page0 = { [0] = {0xe0, 0xa3}, 1249 - [4] = {0xe4, 0x8e}, 1250 - [5] = {0xe5, 0x4f}, 1251 - [6] = {0xe6, 0x02}, }, 1252 - .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE, 1253 - .page1 = { [14] = {0xf5, 0x1}, }, 1254 - .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, 1255 - .page2 = { [7] = {0xe7, 0x44}, }, 1256 - .check_efuse = true, 1257 - .num_phy = 1, 1258 - .check_efuse_version = CHECK_EFUSE_V1, 1259 - .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE, 1260 - .dc_driving_mask = 0x1f, 1261 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1262 - .dc_disconnect_mask = 0xf, 1263 - .usb_dc_disconnect_at_page0 = false, 1264 - .do_toggle = true, 1265 - .do_toggle_driving = false, 1266 - .driving_updated_for_dev_dis = 0x8, 1267 - .use_default_parameter = false, 1268 - .is_double_sensitivity_mode = true, 1269 - }; 1270 - 1271 - static const struct phy_cfg rtd1315e_phy_cfg = { 1272 - .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, 1273 - .page0 = { [0] = {0xe0, 0xa3}, 1274 - [4] = {0xe4, 0x8c}, 1275 - [5] = {0xe5, 0x4f}, 1276 - [6] = {0xe6, 0x02}, }, 1277 - .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE, 1278 - .page1 = { [3] = {0xe3, 0x7f}, 1279 - [14] = {0xf5, 0x01}, }, 1280 - .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, 1281 - .page2 = { [7] = {0xe7, 0x44}, }, 1282 - .num_phy = 1, 1283 - .check_efuse = true, 1284 - .check_efuse_version = CHECK_EFUSE_V2, 1285 - .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE, 1286 - .dc_driving_mask = 0x1f, 1287 - .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, 1288 - .dc_disconnect_mask = 0xf, 1289 - .usb_dc_disconnect_at_page0 = false, 1290 - .do_toggle = true, 1291 - .do_toggle_driving = false, 1292 - .driving_updated_for_dev_dis = 0x8, 1293 - .use_default_parameter = false, 1294 - .is_double_sensitivity_mode = true, 1295 - }; 1296 - 1297 - static const struct of_device_id usbphy_rtk_dt_match[] = { 1298 - { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg }, 1299 - { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg }, 1300 - { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg }, 1301 - { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg }, 1302 - { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg }, 1303 - { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg }, 1304 - { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port }, 1305 - { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg }, 1306 - { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg }, 1307 - {}, 1308 - }; 1309 - MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); 1310 - 1311 - static struct platform_driver rtk_usb2phy_driver = { 1312 - .probe = rtk_usb2phy_probe, 1313 - .remove_new = rtk_usb2phy_remove, 1314 - .driver = { 1315 - .name = "rtk-usb2phy", 1316 - .of_match_table = usbphy_rtk_dt_match, 1317 - }, 1318 - }; 1319 - 1320 - module_platform_driver(rtk_usb2phy_driver); 1321 - 1322 - MODULE_LICENSE("GPL"); 1323 - MODULE_ALIAS("platform: rtk-usb2phy"); 1324 - MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>"); 1325 - MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
-761
drivers/phy/realtek/phy-rtk-usb3.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * phy-rtk-usb3.c RTK usb3.0 phy driver 4 - * 5 - * copyright (c) 2023 realtek semiconductor corporation 6 - * 7 - */ 8 - 9 - #include <linux/module.h> 10 - #include <linux/of.h> 11 - #include <linux/of_address.h> 12 - #include <linux/platform_device.h> 13 - #include <linux/uaccess.h> 14 - #include <linux/debugfs.h> 15 - #include <linux/nvmem-consumer.h> 16 - #include <linux/regmap.h> 17 - #include <linux/sys_soc.h> 18 - #include <linux/mfd/syscon.h> 19 - #include <linux/phy/phy.h> 20 - #include <linux/usb.h> 21 - #include <linux/usb/hcd.h> 22 - #include <linux/usb/phy.h> 23 - 24 - #define USB_MDIO_CTRL_PHY_BUSY BIT(7) 25 - #define USB_MDIO_CTRL_PHY_WRITE BIT(0) 26 - #define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8 27 - #define USB_MDIO_CTRL_PHY_DATA_SHIFT 16 28 - 29 - #define MAX_USB_PHY_DATA_SIZE 0x30 30 - #define PHY_ADDR_0X09 0x09 31 - #define PHY_ADDR_0X0B 0x0b 32 - #define PHY_ADDR_0X0D 0x0d 33 - #define PHY_ADDR_0X10 0x10 34 - #define PHY_ADDR_0X1F 0x1f 35 - #define PHY_ADDR_0X20 0x20 36 - #define PHY_ADDR_0X21 0x21 37 - #define PHY_ADDR_0X30 0x30 38 - 39 - #define REG_0X09_FORCE_CALIBRATION BIT(9) 40 - #define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc 41 - #define REG_0X0D_RX_DEBUG_TEST_EN BIT(6) 42 - #define REG_0X10_DEBUG_MODE_SETTING 0x3c0 43 - #define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8 44 - #define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e 45 - 46 - #define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4 47 - #define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf 48 - #define AMPLITUDE_CONTROL_COARSE_MASK 0xff 49 - #define AMPLITUDE_CONTROL_FINE_MASK 0xffff 50 - #define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff 51 - #define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff 52 - 53 - #define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr) 54 - #define ARRAY_INDEX_MAP_PHY_ADDR(index) (index) 55 - 56 - struct phy_reg { 57 - void __iomem *reg_mdio_ctl; 58 - }; 59 - 60 - struct phy_data { 61 - u8 addr; 62 - u16 data; 63 - }; 64 - 65 - struct phy_cfg { 66 - int param_size; 67 - struct phy_data param[MAX_USB_PHY_DATA_SIZE]; 68 - 69 - bool check_efuse; 70 - bool do_toggle; 71 - bool do_toggle_once; 72 - bool use_default_parameter; 73 - bool check_rx_front_end_offset; 74 - }; 75 - 76 - struct phy_parameter { 77 - struct phy_reg phy_reg; 78 - 79 - /* Get from efuse */ 80 - u8 efuse_usb_u3_tx_lfps_swing_trim; 81 - 82 - /* Get from dts */ 83 - u32 amplitude_control_coarse; 84 - u32 amplitude_control_fine; 85 - }; 86 - 87 - struct rtk_phy { 88 - struct usb_phy phy; 89 - struct device *dev; 90 - 91 - struct phy_cfg *phy_cfg; 92 - int num_phy; 93 - struct phy_parameter *phy_parameter; 94 - 95 - struct dentry *debug_dir; 96 - }; 97 - 98 - #define PHY_IO_TIMEOUT_USEC (50000) 99 - #define PHY_IO_DELAY_US (100) 100 - 101 - static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result) 102 - { 103 - int ret; 104 - unsigned int val; 105 - 106 - ret = read_poll_timeout(readl, val, ((val & mask) == result), 107 - PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg); 108 - if (ret) { 109 - pr_err("%s can't program USB phy\n", __func__); 110 - return -ETIMEDOUT; 111 - } 112 - 113 - return 0; 114 - } 115 - 116 - static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg) 117 - { 118 - return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0); 119 - } 120 - 121 - static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr) 122 - { 123 - unsigned int tmp; 124 - u32 value; 125 - 126 - tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT); 127 - 128 - writel(tmp, phy_reg->reg_mdio_ctl); 129 - 130 - rtk_phy3_wait_vbusy(phy_reg); 131 - 132 - value = readl(phy_reg->reg_mdio_ctl); 133 - value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT; 134 - 135 - return (u16)value; 136 - } 137 - 138 - static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data) 139 - { 140 - unsigned int val; 141 - 142 - val = USB_MDIO_CTRL_PHY_WRITE | 143 - (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) | 144 - (data << USB_MDIO_CTRL_PHY_DATA_SHIFT); 145 - 146 - writel(val, phy_reg->reg_mdio_ctl); 147 - 148 - rtk_phy3_wait_vbusy(phy_reg); 149 - 150 - return 0; 151 - } 152 - 153 - static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect) 154 - { 155 - struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; 156 - struct phy_reg *phy_reg; 157 - struct phy_parameter *phy_parameter; 158 - struct phy_data *phy_data; 159 - u8 addr; 160 - u16 data; 161 - int i; 162 - 163 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 164 - phy_reg = &phy_parameter->phy_reg; 165 - 166 - if (!phy_cfg->do_toggle) 167 - return; 168 - 169 - i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09); 170 - phy_data = phy_cfg->param + i; 171 - addr = phy_data->addr; 172 - data = phy_data->data; 173 - 174 - if (!addr && !data) { 175 - addr = PHY_ADDR_0X09; 176 - data = rtk_phy_read(phy_reg, addr); 177 - phy_data->addr = addr; 178 - phy_data->data = data; 179 - } 180 - 181 - rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION)); 182 - mdelay(1); 183 - rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION); 184 - } 185 - 186 - static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index) 187 - { 188 - struct phy_cfg *phy_cfg; 189 - struct phy_reg *phy_reg; 190 - struct phy_parameter *phy_parameter; 191 - int i = 0; 192 - 193 - phy_cfg = rtk_phy->phy_cfg; 194 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 195 - phy_reg = &phy_parameter->phy_reg; 196 - 197 - if (phy_cfg->use_default_parameter) 198 - goto do_toggle; 199 - 200 - for (i = 0; i < phy_cfg->param_size; i++) { 201 - struct phy_data *phy_data = phy_cfg->param + i; 202 - u8 addr = phy_data->addr; 203 - u16 data = phy_data->data; 204 - 205 - if (!addr && !data) 206 - continue; 207 - 208 - rtk_phy_write(phy_reg, addr, data); 209 - } 210 - 211 - do_toggle: 212 - if (phy_cfg->do_toggle_once) 213 - phy_cfg->do_toggle = true; 214 - 215 - do_rtk_usb3_phy_toggle(rtk_phy, index, false); 216 - 217 - if (phy_cfg->do_toggle_once) { 218 - u16 check_value = 0; 219 - int count = 10; 220 - u16 value_0x0d, value_0x10; 221 - 222 - /* Enable Debug mode by set 0x0D and 0x10 */ 223 - value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D); 224 - value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10); 225 - 226 - rtk_phy_write(phy_reg, PHY_ADDR_0X0D, 227 - value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN); 228 - rtk_phy_write(phy_reg, PHY_ADDR_0X10, 229 - (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) | 230 - REG_0X10_DEBUG_MODE_SETTING); 231 - 232 - check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30); 233 - 234 - while (!(check_value & BIT(15))) { 235 - check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30); 236 - mdelay(1); 237 - if (count-- < 0) 238 - break; 239 - } 240 - 241 - if (!(check_value & BIT(15))) 242 - dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n", 243 - PHY_ADDR_0X30, check_value); 244 - 245 - /* Disable Debug mode by set 0x0D and 0x10 to default*/ 246 - rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d); 247 - rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10); 248 - 249 - phy_cfg->do_toggle = false; 250 - } 251 - 252 - if (phy_cfg->check_rx_front_end_offset) { 253 - u16 rx_offset_code, rx_offset_range; 254 - u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK; 255 - u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK; 256 - bool do_update = false; 257 - 258 - rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F); 259 - if (((rx_offset_code & code_mask) == 0x0) || 260 - ((rx_offset_code & code_mask) == code_mask)) 261 - do_update = true; 262 - 263 - rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B); 264 - if (((rx_offset_range & range_mask) == range_mask) && do_update) { 265 - dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n", 266 - rx_offset_code, rx_offset_range); 267 - do_update = false; 268 - } 269 - 270 - if (do_update) { 271 - u16 tmp1, tmp2; 272 - 273 - tmp1 = rx_offset_range & (~range_mask); 274 - tmp2 = rx_offset_range & range_mask; 275 - tmp2 += (1 << 2); 276 - rx_offset_range = tmp1 | (tmp2 & range_mask); 277 - rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range); 278 - goto do_toggle; 279 - } 280 - } 281 - 282 - return 0; 283 - } 284 - 285 - static int rtk_phy_init(struct phy *phy) 286 - { 287 - struct rtk_phy *rtk_phy = phy_get_drvdata(phy); 288 - int ret = 0; 289 - int i; 290 - unsigned long phy_init_time = jiffies; 291 - 292 - for (i = 0; i < rtk_phy->num_phy; i++) 293 - ret = do_rtk_phy_init(rtk_phy, i); 294 - 295 - dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n", 296 - jiffies_to_msecs(jiffies - phy_init_time)); 297 - 298 - return ret; 299 - } 300 - 301 - static int rtk_phy_exit(struct phy *phy) 302 - { 303 - return 0; 304 - } 305 - 306 - static const struct phy_ops ops = { 307 - .init = rtk_phy_init, 308 - .exit = rtk_phy_exit, 309 - .owner = THIS_MODULE, 310 - }; 311 - 312 - static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port) 313 - { 314 - int index = port; 315 - struct rtk_phy *rtk_phy = NULL; 316 - 317 - rtk_phy = dev_get_drvdata(usb3_phy->dev); 318 - 319 - if (index > rtk_phy->num_phy) { 320 - dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n", 321 - __func__, index, rtk_phy->num_phy); 322 - return; 323 - } 324 - 325 - do_rtk_usb3_phy_toggle(rtk_phy, index, connect); 326 - } 327 - 328 - static int rtk_phy_notify_port_status(struct usb_phy *x, int port, 329 - u16 portstatus, u16 portchange) 330 - { 331 - bool connect = false; 332 - 333 - pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n", 334 - __func__, port, (int)portstatus, (int)portchange); 335 - if (portstatus & USB_PORT_STAT_CONNECTION) 336 - connect = true; 337 - 338 - if (portchange & USB_PORT_STAT_C_CONNECTION) 339 - rtk_phy_toggle(x, connect, port); 340 - 341 - return 0; 342 - } 343 - 344 - #ifdef CONFIG_DEBUG_FS 345 - static struct dentry *create_phy_debug_root(void) 346 - { 347 - struct dentry *phy_debug_root; 348 - 349 - phy_debug_root = debugfs_lookup("phy", usb_debug_root); 350 - if (!phy_debug_root) 351 - phy_debug_root = debugfs_create_dir("phy", usb_debug_root); 352 - 353 - return phy_debug_root; 354 - } 355 - 356 - static int rtk_usb3_parameter_show(struct seq_file *s, void *unused) 357 - { 358 - struct rtk_phy *rtk_phy = s->private; 359 - struct phy_cfg *phy_cfg; 360 - int i, index; 361 - 362 - phy_cfg = rtk_phy->phy_cfg; 363 - 364 - seq_puts(s, "Property:\n"); 365 - seq_printf(s, " check_efuse: %s\n", 366 - phy_cfg->check_efuse ? "Enable" : "Disable"); 367 - seq_printf(s, " do_toggle: %s\n", 368 - phy_cfg->do_toggle ? "Enable" : "Disable"); 369 - seq_printf(s, " do_toggle_once: %s\n", 370 - phy_cfg->do_toggle_once ? "Enable" : "Disable"); 371 - seq_printf(s, " use_default_parameter: %s\n", 372 - phy_cfg->use_default_parameter ? "Enable" : "Disable"); 373 - 374 - for (index = 0; index < rtk_phy->num_phy; index++) { 375 - struct phy_reg *phy_reg; 376 - struct phy_parameter *phy_parameter; 377 - 378 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 379 - phy_reg = &phy_parameter->phy_reg; 380 - 381 - seq_printf(s, "PHY %d:\n", index); 382 - 383 - for (i = 0; i < phy_cfg->param_size; i++) { 384 - struct phy_data *phy_data = phy_cfg->param + i; 385 - u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i); 386 - u16 data = phy_data->data; 387 - 388 - if (!phy_data->addr && !data) 389 - seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n", 390 - addr, rtk_phy_read(phy_reg, addr)); 391 - else 392 - seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n", 393 - addr, data, rtk_phy_read(phy_reg, addr)); 394 - } 395 - 396 - seq_puts(s, "PHY Property:\n"); 397 - seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n", 398 - (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim); 399 - seq_printf(s, " amplitude_control_coarse: 0x%x\n", 400 - (int)phy_parameter->amplitude_control_coarse); 401 - seq_printf(s, " amplitude_control_fine: 0x%x\n", 402 - (int)phy_parameter->amplitude_control_fine); 403 - } 404 - 405 - return 0; 406 - } 407 - DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter); 408 - 409 - static inline void create_debug_files(struct rtk_phy *rtk_phy) 410 - { 411 - struct dentry *phy_debug_root = NULL; 412 - 413 - phy_debug_root = create_phy_debug_root(); 414 - 415 - if (!phy_debug_root) 416 - return; 417 - 418 - rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root); 419 - 420 - debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy, 421 - &rtk_usb3_parameter_fops); 422 - 423 - return; 424 - } 425 - 426 - static inline void remove_debug_files(struct rtk_phy *rtk_phy) 427 - { 428 - debugfs_remove_recursive(rtk_phy->debug_dir); 429 - } 430 - #else 431 - static inline void create_debug_files(struct rtk_phy *rtk_phy) { } 432 - static inline void remove_debug_files(struct rtk_phy *rtk_phy) { } 433 - #endif /* CONFIG_DEBUG_FS */ 434 - 435 - static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy, 436 - struct phy_parameter *phy_parameter, int index) 437 - { 438 - struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; 439 - u8 value = 0; 440 - struct nvmem_cell *cell; 441 - 442 - if (!phy_cfg->check_efuse) 443 - goto out; 444 - 445 - cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim"); 446 - if (IS_ERR(cell)) { 447 - dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n", 448 - __func__, PTR_ERR(cell)); 449 - } else { 450 - unsigned char *buf; 451 - size_t buf_size; 452 - 453 - buf = nvmem_cell_read(cell, &buf_size); 454 - if (!IS_ERR(buf)) { 455 - value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK; 456 - kfree(buf); 457 - } 458 - nvmem_cell_put(cell); 459 - } 460 - 461 - if (value > 0 && value < 0x8) 462 - phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8; 463 - else 464 - phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value; 465 - 466 - out: 467 - return 0; 468 - } 469 - 470 - static void update_amplitude_control_value(struct rtk_phy *rtk_phy, 471 - struct phy_parameter *phy_parameter) 472 - { 473 - struct phy_cfg *phy_cfg; 474 - struct phy_reg *phy_reg; 475 - 476 - phy_reg = &phy_parameter->phy_reg; 477 - phy_cfg = rtk_phy->phy_cfg; 478 - 479 - if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) { 480 - u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK; 481 - u16 data; 482 - 483 - if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) { 484 - phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20; 485 - data = rtk_phy_read(phy_reg, PHY_ADDR_0X20); 486 - } else { 487 - data = phy_cfg->param[PHY_ADDR_0X20].data; 488 - } 489 - 490 - data &= (~val_mask); 491 - data |= (phy_parameter->amplitude_control_coarse & val_mask); 492 - 493 - phy_cfg->param[PHY_ADDR_0X20].data = data; 494 - } 495 - 496 - if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) { 497 - u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim; 498 - u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK; 499 - int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT; 500 - u16 data; 501 - 502 - if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) { 503 - phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20; 504 - data = rtk_phy_read(phy_reg, PHY_ADDR_0X20); 505 - } else { 506 - data = phy_cfg->param[PHY_ADDR_0X20].data; 507 - } 508 - 509 - data &= ~(val_mask << val_shift); 510 - data |= ((efuse_val & val_mask) << val_shift); 511 - 512 - phy_cfg->param[PHY_ADDR_0X20].data = data; 513 - } 514 - 515 - if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) { 516 - u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK; 517 - 518 - if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data) 519 - phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21; 520 - 521 - phy_cfg->param[PHY_ADDR_0X21].data = 522 - phy_parameter->amplitude_control_fine & val_mask; 523 - } 524 - } 525 - 526 - static int parse_phy_data(struct rtk_phy *rtk_phy) 527 - { 528 - struct device *dev = rtk_phy->dev; 529 - struct phy_parameter *phy_parameter; 530 - int ret = 0; 531 - int index; 532 - 533 - rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) * 534 - rtk_phy->num_phy, GFP_KERNEL); 535 - if (!rtk_phy->phy_parameter) 536 - return -ENOMEM; 537 - 538 - for (index = 0; index < rtk_phy->num_phy; index++) { 539 - phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; 540 - 541 - phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index; 542 - 543 - /* Amplitude control address 0x20 bit 0 to bit 7 */ 544 - if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning", 545 - &phy_parameter->amplitude_control_coarse)) 546 - phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT; 547 - 548 - /* Amplitude control address 0x21 bit 0 to bit 16 */ 549 - if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning", 550 - &phy_parameter->amplitude_control_fine)) 551 - phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT; 552 - 553 - get_phy_data_by_efuse(rtk_phy, phy_parameter, index); 554 - 555 - update_amplitude_control_value(rtk_phy, phy_parameter); 556 - } 557 - 558 - return ret; 559 - } 560 - 561 - static int rtk_usb3phy_probe(struct platform_device *pdev) 562 - { 563 - struct rtk_phy *rtk_phy; 564 - struct device *dev = &pdev->dev; 565 - struct phy *generic_phy; 566 - struct phy_provider *phy_provider; 567 - const struct phy_cfg *phy_cfg; 568 - int ret; 569 - 570 - phy_cfg = of_device_get_match_data(dev); 571 - if (!phy_cfg) { 572 - dev_err(dev, "phy config are not assigned!\n"); 573 - return -EINVAL; 574 - } 575 - 576 - rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL); 577 - if (!rtk_phy) 578 - return -ENOMEM; 579 - 580 - rtk_phy->dev = &pdev->dev; 581 - rtk_phy->phy.dev = rtk_phy->dev; 582 - rtk_phy->phy.label = "rtk-usb3phy"; 583 - rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status; 584 - 585 - rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL); 586 - 587 - memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg)); 588 - 589 - rtk_phy->num_phy = 1; 590 - 591 - ret = parse_phy_data(rtk_phy); 592 - if (ret) 593 - goto err; 594 - 595 - platform_set_drvdata(pdev, rtk_phy); 596 - 597 - generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops); 598 - if (IS_ERR(generic_phy)) 599 - return PTR_ERR(generic_phy); 600 - 601 - phy_set_drvdata(generic_phy, rtk_phy); 602 - 603 - phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate); 604 - if (IS_ERR(phy_provider)) 605 - return PTR_ERR(phy_provider); 606 - 607 - ret = usb_add_phy_dev(&rtk_phy->phy); 608 - if (ret) 609 - goto err; 610 - 611 - create_debug_files(rtk_phy); 612 - 613 - err: 614 - return ret; 615 - } 616 - 617 - static void rtk_usb3phy_remove(struct platform_device *pdev) 618 - { 619 - struct rtk_phy *rtk_phy = platform_get_drvdata(pdev); 620 - 621 - remove_debug_files(rtk_phy); 622 - 623 - usb_remove_phy(&rtk_phy->phy); 624 - } 625 - 626 - static const struct phy_cfg rtd1295_phy_cfg = { 627 - .param_size = MAX_USB_PHY_DATA_SIZE, 628 - .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046}, 629 - [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779}, 630 - [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3}, 631 - [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00}, 632 - [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c}, 633 - [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904}, 634 - [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c}, 635 - [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000}, 636 - [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00}, 637 - [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81}, 638 - [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000}, 639 - [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000}, 640 - [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004}, 641 - [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00}, 642 - [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f}, 643 - [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807}, 644 - [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa}, 645 - [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66}, 646 - [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000}, 647 - [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6}, 648 - [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080}, 649 - [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078}, 650 - [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff}, 651 - [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, }, 652 - .check_efuse = false, 653 - .do_toggle = true, 654 - .do_toggle_once = false, 655 - .use_default_parameter = false, 656 - .check_rx_front_end_offset = false, 657 - }; 658 - 659 - static const struct phy_cfg rtd1619_phy_cfg = { 660 - .param_size = MAX_USB_PHY_DATA_SIZE, 661 - .param = { [8] = {0x08, 0x3591}, 662 - [38] = {0x26, 0x840b}, 663 - [40] = {0x28, 0xf842}, }, 664 - .check_efuse = false, 665 - .do_toggle = true, 666 - .do_toggle_once = false, 667 - .use_default_parameter = false, 668 - .check_rx_front_end_offset = false, 669 - }; 670 - 671 - static const struct phy_cfg rtd1319_phy_cfg = { 672 - .param_size = MAX_USB_PHY_DATA_SIZE, 673 - .param = { [1] = {0x01, 0xac86}, 674 - [6] = {0x06, 0x0003}, 675 - [9] = {0x09, 0x924c}, 676 - [10] = {0x0a, 0xa608}, 677 - [11] = {0x0b, 0xb905}, 678 - [14] = {0x0e, 0x2010}, 679 - [32] = {0x20, 0x705a}, 680 - [33] = {0x21, 0xf645}, 681 - [34] = {0x22, 0x0013}, 682 - [35] = {0x23, 0xcb66}, 683 - [41] = {0x29, 0xff00}, }, 684 - .check_efuse = true, 685 - .do_toggle = true, 686 - .do_toggle_once = false, 687 - .use_default_parameter = false, 688 - .check_rx_front_end_offset = false, 689 - }; 690 - 691 - static const struct phy_cfg rtd1619b_phy_cfg = { 692 - .param_size = MAX_USB_PHY_DATA_SIZE, 693 - .param = { [1] = {0x01, 0xac8c}, 694 - [6] = {0x06, 0x0017}, 695 - [9] = {0x09, 0x724c}, 696 - [10] = {0x0a, 0xb610}, 697 - [11] = {0x0b, 0xb90d}, 698 - [13] = {0x0d, 0xef2a}, 699 - [15] = {0x0f, 0x9050}, 700 - [16] = {0x10, 0x000c}, 701 - [32] = {0x20, 0x70ff}, 702 - [34] = {0x22, 0x0013}, 703 - [35] = {0x23, 0xdb66}, 704 - [38] = {0x26, 0x8609}, 705 - [41] = {0x29, 0xff13}, 706 - [42] = {0x2a, 0x3070}, }, 707 - .check_efuse = true, 708 - .do_toggle = false, 709 - .do_toggle_once = true, 710 - .use_default_parameter = false, 711 - .check_rx_front_end_offset = false, 712 - }; 713 - 714 - static const struct phy_cfg rtd1319d_phy_cfg = { 715 - .param_size = MAX_USB_PHY_DATA_SIZE, 716 - .param = { [1] = {0x01, 0xac89}, 717 - [4] = {0x04, 0xf2f5}, 718 - [6] = {0x06, 0x0017}, 719 - [9] = {0x09, 0x424c}, 720 - [10] = {0x0a, 0x9610}, 721 - [11] = {0x0b, 0x9901}, 722 - [12] = {0x0c, 0xf000}, 723 - [13] = {0x0d, 0xef2a}, 724 - [14] = {0x0e, 0x1000}, 725 - [15] = {0x0f, 0x9050}, 726 - [32] = {0x20, 0x7077}, 727 - [35] = {0x23, 0x0b62}, 728 - [37] = {0x25, 0x10ec}, 729 - [42] = {0x2a, 0x3070}, }, 730 - .check_efuse = true, 731 - .do_toggle = false, 732 - .do_toggle_once = true, 733 - .use_default_parameter = false, 734 - .check_rx_front_end_offset = true, 735 - }; 736 - 737 - static const struct of_device_id usbphy_rtk_dt_match[] = { 738 - { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg }, 739 - { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg }, 740 - { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg }, 741 - { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg }, 742 - { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg }, 743 - {}, 744 - }; 745 - MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); 746 - 747 - static struct platform_driver rtk_usb3phy_driver = { 748 - .probe = rtk_usb3phy_probe, 749 - .remove_new = rtk_usb3phy_remove, 750 - .driver = { 751 - .name = "rtk-usb3phy", 752 - .of_match_table = usbphy_rtk_dt_match, 753 - }, 754 - }; 755 - 756 - module_platform_driver(rtk_usb3phy_driver); 757 - 758 - MODULE_LICENSE("GPL"); 759 - MODULE_ALIAS("platform: rtk-usb3phy"); 760 - MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>"); 761 - MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
+2 -1
drivers/pinctrl/cirrus/Kconfig
··· 12 12 13 13 config PINCTRL_LOCHNAGAR 14 14 tristate "Cirrus Logic Lochnagar pinctrl driver" 15 - depends on MFD_LOCHNAGAR 15 + # Avoid clash caused by MIPS defining RST, which is used in the driver 16 + depends on MFD_LOCHNAGAR && !MIPS 16 17 select GPIOLIB 17 18 select PINMUX 18 19 select PINCONF
+3 -3
drivers/pinctrl/core.c
··· 1262 1262 static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state) 1263 1263 { 1264 1264 struct pinctrl_setting *setting, *setting2; 1265 - struct pinctrl_state *old_state = p->state; 1265 + struct pinctrl_state *old_state = READ_ONCE(p->state); 1266 1266 int ret; 1267 1267 1268 - if (p->state) { 1268 + if (old_state) { 1269 1269 /* 1270 1270 * For each pinmux setting in the old state, forget SW's record 1271 1271 * of mux owner for that pingroup. Any pingroups which are 1272 1272 * still owned by the new state will be re-acquired by the call 1273 1273 * to pinmux_enable_setting() in the loop below. 1274 1274 */ 1275 - list_for_each_entry(setting, &p->state->settings, node) { 1275 + list_for_each_entry(setting, &old_state->settings, node) { 1276 1276 if (setting->type != PIN_MAP_TYPE_MUX_GROUP) 1277 1277 continue; 1278 1278 pinmux_disable_setting(setting);
+2 -2
drivers/pinctrl/nxp/pinctrl-s32cc.c
··· 843 843 if (!np) 844 844 return -ENODEV; 845 845 846 - if (mem_regions == 0) { 847 - dev_err(&pdev->dev, "mem_regions is 0\n"); 846 + if (mem_regions == 0 || mem_regions >= 10000) { 847 + dev_err(&pdev->dev, "mem_regions is invalid: %u\n", mem_regions); 848 848 return -EINVAL; 849 849 } 850 850
+1
drivers/pinctrl/pinctrl-cy8c95x0.c
··· 143 143 * @pinctrl_desc: pin controller description 144 144 * @name: Chip controller name 145 145 * @tpin: Total number of pins 146 + * @gpio_reset: GPIO line handler that can reset the IC 146 147 */ 147 148 struct cy8c95x0_pinctrl { 148 149 struct regmap *regmap;
+2 -2
drivers/pinctrl/realtek/pinctrl-rtd.c
··· 146 146 147 147 static const struct rtd_pin_desc *rtd_pinctrl_find_mux(struct rtd_pinctrl *data, unsigned int pin) 148 148 { 149 - if (!data->info->muxes[pin].name) 149 + if (data->info->muxes[pin].name) 150 150 return &data->info->muxes[pin]; 151 151 152 152 return NULL; ··· 249 249 static const struct rtd_pin_config_desc 250 250 *rtd_pinctrl_find_config(struct rtd_pinctrl *data, unsigned int pin) 251 251 { 252 - if (!data->info->configs[pin].name) 252 + if (data->info->configs[pin].name) 253 253 return &data->info->configs[pin]; 254 254 255 255 return NULL;
+10 -3
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 1273 1273 int i; 1274 1274 1275 1275 /* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */ 1276 - pin_desc = pctl->pins + stm32_pin_nb; 1277 - if (pin_desc->pin.number == stm32_pin_nb) 1278 - return pin_desc; 1276 + if (stm32_pin_nb < pctl->npins) { 1277 + pin_desc = pctl->pins + stm32_pin_nb; 1278 + if (pin_desc->pin.number == stm32_pin_nb) 1279 + return pin_desc; 1280 + } 1279 1281 1280 1282 /* Otherwise, loop all array to find the pin with the right number */ 1281 1283 for (i = 0; i < pctl->npins; i++) { ··· 1370 1368 } 1371 1369 1372 1370 names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL); 1371 + if (!names) { 1372 + err = -ENOMEM; 1373 + goto err_clk; 1374 + } 1375 + 1373 1376 for (i = 0; i < npins; i++) { 1374 1377 stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i); 1375 1378 if (stm32_pin && stm32_pin->pin.name)
+1 -1
drivers/pmdomain/arm/scmi_perf_domain.c
··· 35 35 if (!state) 36 36 return -EINVAL; 37 37 38 - ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, true); 38 + ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, false); 39 39 if (ret) 40 40 dev_warn(&genpd->dev, "Failed with %d when trying to set %d perf level", 41 41 ret, state);
+13 -11
drivers/s390/block/dasd.c
··· 676 676 * we count each request only once. 677 677 */ 678 678 device = cqr->startdev; 679 - if (device->profile.data) { 680 - counter = 1; /* request is not yet queued on the start device */ 681 - list_for_each(l, &device->ccw_queue) 682 - if (++counter >= 31) 683 - break; 684 - } 679 + if (!device->profile.data) 680 + return; 681 + 682 + spin_lock(get_ccwdev_lock(device->cdev)); 683 + counter = 1; /* request is not yet queued on the start device */ 684 + list_for_each(l, &device->ccw_queue) 685 + if (++counter >= 31) 686 + break; 687 + spin_unlock(get_ccwdev_lock(device->cdev)); 688 + 685 689 spin_lock(&device->profile.lock); 686 - if (device->profile.data) { 687 - device->profile.data->dasd_io_nr_req[counter]++; 688 - if (rq_data_dir(req) == READ) 689 - device->profile.data->dasd_read_nr_req[counter]++; 690 - } 690 + device->profile.data->dasd_io_nr_req[counter]++; 691 + if (rq_data_dir(req) == READ) 692 + device->profile.data->dasd_read_nr_req[counter]++; 691 693 spin_unlock(&device->profile.lock); 692 694 } 693 695
+1 -1
drivers/s390/block/dasd_int.h
··· 283 283 __u8 secondary; /* 7 Secondary device address */ 284 284 __u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */ 285 285 __u8 reserved2[12]; /* 10-21 reserved */ 286 - __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */ 286 + __u16 prim_cu_ssid; /* 22-23 Primary Control Unit SSID */ 287 287 __u8 reserved3[12]; /* 24-35 reserved */ 288 288 __u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */ 289 289 __u8 reserved4[90]; /* 38-127 reserved */
+3 -3
drivers/thunderbolt/switch.c
··· 1143 1143 * Only set bonding if the link was not already bonded. This 1144 1144 * avoids the lane adapter to re-enter bonding state. 1145 1145 */ 1146 - if (width == TB_LINK_WIDTH_SINGLE) { 1146 + if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) { 1147 1147 ret = tb_port_set_lane_bonding(port, true); 1148 1148 if (ret) 1149 1149 goto err_lane1; ··· 2880 2880 return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); 2881 2881 } 2882 2882 2883 + /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 2883 2884 static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) 2884 2885 { 2885 2886 struct tb_port *up, *down, *port; ··· 2920 2919 return ret; 2921 2920 } 2922 2921 2923 - sw->link_width = width; 2924 2922 return 0; 2925 2923 } 2926 2924 2925 + /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 2927 2926 static int tb_switch_asym_disable(struct tb_switch *sw) 2928 2927 { 2929 2928 struct tb_port *up, *down; ··· 2958 2957 return ret; 2959 2958 } 2960 2959 2961 - sw->link_width = TB_LINK_WIDTH_DUAL; 2962 2960 return 0; 2963 2961 } 2964 2962
+11 -1
drivers/thunderbolt/tb.c
··· 213 213 if (!tb_switch_query_dp_resource(sw, port)) 214 214 continue; 215 215 216 - list_add(&port->list, &tcm->dp_resources); 216 + /* 217 + * If DP IN on device router exist, position it at the 218 + * beginning of the DP resources list, so that it is used 219 + * before DP IN of the host router. This way external GPU(s) 220 + * will be prioritized when pairing DP IN to a DP OUT. 221 + */ 222 + if (tb_route(sw)) 223 + list_add(&port->list, &tcm->dp_resources); 224 + else 225 + list_add_tail(&port->list, &tcm->dp_resources); 226 + 217 227 tb_port_dbg(port, "DP IN resource available\n"); 218 228 } 219 229 }
+3
drivers/usb/cdns3/cdnsp-ring.c
··· 1529 1529 unsigned long flags; 1530 1530 int counter = 0; 1531 1531 1532 + local_bh_disable(); 1532 1533 spin_lock_irqsave(&pdev->lock, flags); 1533 1534 1534 1535 if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) { ··· 1542 1541 cdnsp_died(pdev); 1543 1542 1544 1543 spin_unlock_irqrestore(&pdev->lock, flags); 1544 + local_bh_enable(); 1545 1545 return IRQ_HANDLED; 1546 1546 } 1547 1547 ··· 1559 1557 cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); 1560 1558 1561 1559 spin_unlock_irqrestore(&pdev->lock, flags); 1560 + local_bh_enable(); 1562 1561 1563 1562 return IRQ_HANDLED; 1564 1563 }
+2 -1
drivers/usb/core/config.c
··· 1047 1047 1048 1048 if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { 1049 1049 dev_notice(ddev, "descriptor type invalid, skip\n"); 1050 - continue; 1050 + goto skip_to_next_descriptor; 1051 1051 } 1052 1052 1053 1053 switch (cap_type) { ··· 1078 1078 break; 1079 1079 } 1080 1080 1081 + skip_to_next_descriptor: 1081 1082 total_len -= length; 1082 1083 buffer += length; 1083 1084 }
-23
drivers/usb/core/hub.c
··· 622 622 ret = 0; 623 623 } 624 624 mutex_unlock(&hub->status_mutex); 625 - 626 - /* 627 - * There is no need to lock status_mutex here, because status_mutex 628 - * protects hub->status, and the phy driver only checks the port 629 - * status without changing the status. 630 - */ 631 - if (!ret) { 632 - struct usb_device *hdev = hub->hdev; 633 - 634 - /* 635 - * Only roothub will be notified of port state changes, 636 - * since the USB PHY only cares about changes at the next 637 - * level. 638 - */ 639 - if (is_root_hub(hdev)) { 640 - struct usb_hcd *hcd = bus_to_hcd(hdev->bus); 641 - 642 - if (hcd->usb_phy) 643 - usb_phy_notify_port_status(hcd->usb_phy, 644 - port1 - 1, *status, *change); 645 - } 646 - } 647 - 648 625 return ret; 649 626 } 650 627
+7 -8
drivers/usb/dwc2/hcd_intr.c
··· 2015 2015 { 2016 2016 struct dwc2_qtd *qtd; 2017 2017 struct dwc2_host_chan *chan; 2018 - u32 hcint, hcintmsk; 2018 + u32 hcint, hcintraw, hcintmsk; 2019 2019 2020 2020 chan = hsotg->hc_ptr_array[chnum]; 2021 2021 2022 - hcint = dwc2_readl(hsotg, HCINT(chnum)); 2022 + hcintraw = dwc2_readl(hsotg, HCINT(chnum)); 2023 2023 hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum)); 2024 + hcint = hcintraw & hcintmsk; 2025 + dwc2_writel(hsotg, hcint, HCINT(chnum)); 2026 + 2024 2027 if (!chan) { 2025 2028 dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n"); 2026 - dwc2_writel(hsotg, hcint, HCINT(chnum)); 2027 2029 return; 2028 2030 } 2029 2031 ··· 2034 2032 chnum); 2035 2033 dev_vdbg(hsotg->dev, 2036 2034 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", 2037 - hcint, hcintmsk, hcint & hcintmsk); 2035 + hcintraw, hcintmsk, hcint); 2038 2036 } 2039 - 2040 - dwc2_writel(hsotg, hcint, HCINT(chnum)); 2041 2037 2042 2038 /* 2043 2039 * If we got an interrupt after someone called ··· 2046 2046 return; 2047 2047 } 2048 2048 2049 - chan->hcint = hcint; 2050 - hcint &= hcintmsk; 2049 + chan->hcint = hcintraw; 2051 2050 2052 2051 /* 2053 2052 * If the channel was halted due to a dequeue, the qtd list might
+2
drivers/usb/dwc3/core.c
··· 2034 2034 2035 2035 pm_runtime_put(dev); 2036 2036 2037 + dma_set_max_seg_size(dev, UINT_MAX); 2038 + 2037 2039 return 0; 2038 2040 2039 2041 err_exit_debugfs:
+1 -1
drivers/usb/dwc3/drd.c
··· 505 505 dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL; 506 506 mode = DWC3_GCTL_PRTCAP_DEVICE; 507 507 } 508 + dwc3_set_mode(dwc, mode); 508 509 509 510 dwc3_role_switch.fwnode = dev_fwnode(dwc->dev); 510 511 dwc3_role_switch.set = dwc3_usb_role_switch_set; ··· 527 526 } 528 527 } 529 528 530 - dwc3_set_mode(dwc, mode); 531 529 return 0; 532 530 } 533 531 #else
+47 -22
drivers/usb/dwc3/dwc3-qcom.c
··· 546 546 pdata ? pdata->hs_phy_irq_index : -1); 547 547 if (irq > 0) { 548 548 /* Keep wakeup interrupts disabled until suspend */ 549 - irq_set_status_flags(irq, IRQ_NOAUTOEN); 550 549 ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 551 550 qcom_dwc3_resume_irq, 552 - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 551 + IRQF_ONESHOT | IRQF_NO_AUTOEN, 553 552 "qcom_dwc3 HS", qcom); 554 553 if (ret) { 555 554 dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret); ··· 560 561 irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq", 561 562 pdata ? pdata->dp_hs_phy_irq_index : -1); 562 563 if (irq > 0) { 563 - irq_set_status_flags(irq, IRQ_NOAUTOEN); 564 564 ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 565 565 qcom_dwc3_resume_irq, 566 - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 566 + IRQF_ONESHOT | IRQF_NO_AUTOEN, 567 567 "qcom_dwc3 DP_HS", qcom); 568 568 if (ret) { 569 569 dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret); ··· 574 576 irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq", 575 577 pdata ? pdata->dm_hs_phy_irq_index : -1); 576 578 if (irq > 0) { 577 - irq_set_status_flags(irq, IRQ_NOAUTOEN); 578 579 ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 579 580 qcom_dwc3_resume_irq, 580 - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 581 + IRQF_ONESHOT | IRQF_NO_AUTOEN, 581 582 "qcom_dwc3 DM_HS", qcom); 582 583 if (ret) { 583 584 dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret); ··· 588 591 irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq", 589 592 pdata ? pdata->ss_phy_irq_index : -1); 590 593 if (irq > 0) { 591 - irq_set_status_flags(irq, IRQ_NOAUTOEN); 592 594 ret = devm_request_threaded_irq(qcom->dev, irq, NULL, 593 595 qcom_dwc3_resume_irq, 594 - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 596 + IRQF_ONESHOT | IRQF_NO_AUTOEN, 595 597 "qcom_dwc3 SS", qcom); 596 598 if (ret) { 597 599 dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret); ··· 754 758 if (!qcom->dwc3) { 755 759 ret = -ENODEV; 756 760 dev_err(dev, "failed to get dwc3 platform device\n"); 761 + of_platform_depopulate(dev); 757 762 } 758 763 759 764 node_put: ··· 763 766 return ret; 764 767 } 765 768 766 - static struct platform_device * 767 - dwc3_qcom_create_urs_usb_platdev(struct device *dev) 769 + static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev) 768 770 { 771 + struct platform_device *urs_usb = NULL; 769 772 struct fwnode_handle *fwh; 770 773 struct acpi_device *adev; 771 774 char name[8]; ··· 785 788 786 789 adev = to_acpi_device_node(fwh); 787 790 if (!adev) 788 - return NULL; 791 + goto err_put_handle; 789 792 790 - return acpi_create_platform_device(adev, NULL); 793 + urs_usb = acpi_create_platform_device(adev, NULL); 794 + if (IS_ERR_OR_NULL(urs_usb)) 795 + goto err_put_handle; 796 + 797 + return urs_usb; 798 + 799 + err_put_handle: 800 + fwnode_handle_put(fwh); 801 + 802 + return urs_usb; 803 + } 804 + 805 + static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb) 806 + { 807 + struct fwnode_handle *fwh = urs_usb->dev.fwnode; 808 + 809 + platform_device_unregister(urs_usb); 810 + fwnode_handle_put(fwh); 791 811 } 792 812 793 813 static int dwc3_qcom_probe(struct platform_device *pdev) ··· 888 874 qcom->qscratch_base = devm_ioremap_resource(dev, parent_res); 889 875 if (IS_ERR(qcom->qscratch_base)) { 890 876 ret = PTR_ERR(qcom->qscratch_base); 891 - goto clk_disable; 877 + goto free_urs; 892 878 } 893 879 894 880 ret = dwc3_qcom_setup_irq(pdev); 895 881 if (ret) { 896 882 dev_err(dev, "failed to setup IRQs, err=%d\n", ret); 897 - goto clk_disable; 883 + goto free_urs; 898 884 } 899 885 900 886 /* ··· 913 899 914 900 if (ret) { 915 901 dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret); 916 - goto depopulate; 902 + goto free_urs; 917 903 } 918 904 919 905 ret = dwc3_qcom_interconnect_init(qcom); ··· 945 931 interconnect_exit: 946 932 dwc3_qcom_interconnect_exit(qcom); 947 933 depopulate: 948 - if (np) 934 + if (np) { 949 935 of_platform_depopulate(&pdev->dev); 950 - else 951 - platform_device_put(pdev); 936 + } else { 937 + device_remove_software_node(&qcom->dwc3->dev); 938 + platform_device_del(qcom->dwc3); 939 + } 940 + platform_device_put(qcom->dwc3); 941 + free_urs: 942 + if (qcom->urs_usb) 943 + dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb); 952 944 clk_disable: 953 945 for (i = qcom->num_clocks - 1; i >= 0; i--) { 954 946 clk_disable_unprepare(qcom->clks[i]); ··· 973 953 struct device *dev = &pdev->dev; 974 954 int i; 975 955 976 - device_remove_software_node(&qcom->dwc3->dev); 977 - if (np) 956 + if (np) { 978 957 of_platform_depopulate(&pdev->dev); 979 - else 980 - platform_device_put(pdev); 958 + } else { 959 + device_remove_software_node(&qcom->dwc3->dev); 960 + platform_device_del(qcom->dwc3); 961 + } 962 + platform_device_put(qcom->dwc3); 963 + 964 + if (qcom->urs_usb) 965 + dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb); 981 966 982 967 for (i = qcom->num_clocks - 1; i >= 0; i--) { 983 968 clk_disable_unprepare(qcom->clks[i]);
+7 -1
drivers/usb/dwc3/dwc3-rtk.c
··· 183 183 184 184 ret = of_property_read_string(dwc3_np, "maximum-speed", &maximum_speed); 185 185 if (ret < 0) 186 - return USB_SPEED_UNKNOWN; 186 + goto out; 187 187 188 188 ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed); 189 + 190 + out: 191 + of_node_put(dwc3_np); 189 192 190 193 return (ret < 0) ? USB_SPEED_UNKNOWN : ret; 191 194 } ··· 341 338 } 342 339 343 340 switch_usb2_role(rtk, rtk->cur_role); 341 + 342 + platform_device_put(dwc3_pdev); 343 + of_node_put(dwc3_node); 344 344 345 345 return 0; 346 346
+10 -3
drivers/usb/host/xhci-mtk-sch.c
··· 650 650 651 651 if (sch_ep->ep_type == ISOC_OUT_EP) { 652 652 for (j = 0; j < sch_ep->num_budget_microframes; j++) { 653 - k = XHCI_MTK_BW_INDEX(base + j + CS_OFFSET); 654 - /* use cs to indicate existence of in-ss @(base+j) */ 655 - if (tt->fs_bus_bw_in[k]) 653 + k = XHCI_MTK_BW_INDEX(base + j); 654 + if (tt->in_ss_cnt[k]) 656 655 return -ESCH_SS_OVERLAP; 657 656 } 658 657 } else if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) { ··· 767 768 fs_bus_bw[k] -= (u16)sch_ep->bw_budget_table[j]; 768 769 tt->fs_frame_bw[f] -= (u16)sch_ep->bw_budget_table[j]; 769 770 } 771 + } 772 + 773 + if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) { 774 + k = XHCI_MTK_BW_INDEX(base); 775 + if (used) 776 + tt->in_ss_cnt[k]++; 777 + else 778 + tt->in_ss_cnt[k]--; 770 779 } 771 780 } 772 781
+2
drivers/usb/host/xhci-mtk.h
··· 38 38 * @fs_bus_bw_in: save bandwidth used by FS/LS IN eps in each uframes 39 39 * @ls_bus_bw: save bandwidth used by LS eps in each uframes 40 40 * @fs_frame_bw: save bandwidth used by FS/LS eps in each FS frames 41 + * @in_ss_cnt: the count of Start-Split for IN eps 41 42 * @ep_list: Endpoints using this TT 42 43 */ 43 44 struct mu3h_sch_tt { ··· 46 45 u16 fs_bus_bw_in[XHCI_MTK_MAX_ESIT]; 47 46 u8 ls_bus_bw[XHCI_MTK_MAX_ESIT]; 48 47 u16 fs_frame_bw[XHCI_MTK_FRAMES_CNT]; 48 + u8 in_ss_cnt[XHCI_MTK_MAX_ESIT]; 49 49 struct list_head ep_list; 50 50 }; 51 51
+30 -20
drivers/usb/host/xhci-plat.c
··· 13 13 #include <linux/module.h> 14 14 #include <linux/pci.h> 15 15 #include <linux/of.h> 16 + #include <linux/of_device.h> 16 17 #include <linux/platform_device.h> 17 18 #include <linux/usb/phy.h> 18 19 #include <linux/slab.h> ··· 149 148 int ret; 150 149 int irq; 151 150 struct xhci_plat_priv *priv = NULL; 152 - 151 + bool of_match; 153 152 154 153 if (usb_disabled()) 155 154 return -ENODEV; ··· 254 253 &xhci->imod_interval); 255 254 } 256 255 257 - hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); 258 - if (IS_ERR(hcd->usb_phy)) { 259 - ret = PTR_ERR(hcd->usb_phy); 260 - if (ret == -EPROBE_DEFER) 261 - goto disable_clk; 262 - hcd->usb_phy = NULL; 263 - } else { 264 - ret = usb_phy_init(hcd->usb_phy); 265 - if (ret) 266 - goto disable_clk; 256 + /* 257 + * Drivers such as dwc3 manages PHYs themself (and rely on driver name 258 + * matching for the xhci platform device). 259 + */ 260 + of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev); 261 + if (of_match) { 262 + hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); 263 + if (IS_ERR(hcd->usb_phy)) { 264 + ret = PTR_ERR(hcd->usb_phy); 265 + if (ret == -EPROBE_DEFER) 266 + goto disable_clk; 267 + hcd->usb_phy = NULL; 268 + } else { 269 + ret = usb_phy_init(hcd->usb_phy); 270 + if (ret) 271 + goto disable_clk; 272 + } 267 273 } 268 274 269 275 hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); ··· 293 285 goto dealloc_usb2_hcd; 294 286 } 295 287 296 - xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, 297 - "usb-phy", 1); 298 - if (IS_ERR(xhci->shared_hcd->usb_phy)) { 299 - xhci->shared_hcd->usb_phy = NULL; 300 - } else { 301 - ret = usb_phy_init(xhci->shared_hcd->usb_phy); 302 - if (ret) 303 - dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n", 304 - __func__, ret); 288 + if (of_match) { 289 + xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, 290 + "usb-phy", 1); 291 + if (IS_ERR(xhci->shared_hcd->usb_phy)) { 292 + xhci->shared_hcd->usb_phy = NULL; 293 + } else { 294 + ret = usb_phy_init(xhci->shared_hcd->usb_phy); 295 + if (ret) 296 + dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n", 297 + __func__, ret); 298 + } 305 299 } 306 300 307 301 xhci->shared_hcd->tpl_support = hcd->tpl_support;
+2
drivers/usb/misc/onboard_usb_hub.c
··· 432 432 { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */ 433 433 { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ 434 434 { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */ 435 + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 */ 436 + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 */ 435 437 { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ 436 438 { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */ 437 439 { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
+7
drivers/usb/misc/onboard_usb_hub.h
··· 16 16 .num_supplies = 1, 17 17 }; 18 18 19 + static const struct onboard_hub_pdata microchip_usb5744_data = { 20 + .reset_us = 0, 21 + .num_supplies = 2, 22 + }; 23 + 19 24 static const struct onboard_hub_pdata realtek_rts5411_data = { 20 25 .reset_us = 0, 21 26 .num_supplies = 1, ··· 55 50 { .compatible = "usb424,2412", .data = &microchip_usb424_data, }, 56 51 { .compatible = "usb424,2514", .data = &microchip_usb424_data, }, 57 52 { .compatible = "usb424,2517", .data = &microchip_usb424_data, }, 53 + { .compatible = "usb424,2744", .data = &microchip_usb5744_data, }, 54 + { .compatible = "usb424,5744", .data = &microchip_usb5744_data, }, 58 55 { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, 59 56 { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, 60 57 { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
+4 -13
drivers/usb/misc/usb-ljca.c
··· 457 457 u64 adr, u8 id) 458 458 { 459 459 struct ljca_match_ids_walk_data wd = { 0 }; 460 - struct acpi_device *parent, *adev; 461 460 struct device *dev = adap->dev; 461 + struct acpi_device *parent; 462 462 char uid[4]; 463 463 464 464 parent = ACPI_COMPANION(dev); ··· 466 466 return; 467 467 468 468 /* 469 - * get auxdev ACPI handle from the ACPI device directly 470 - * under the parent that matches _ADR. 471 - */ 472 - adev = acpi_find_child_device(parent, adr, false); 473 - if (adev) { 474 - ACPI_COMPANION_SET(&auxdev->dev, adev); 475 - return; 476 - } 477 - 478 - /* 479 - * _ADR is a grey area in the ACPI specification, some 469 + * Currently LJCA hw doesn't use _ADR instead the shipped 480 470 * platforms use _HID to distinguish children devices. 481 471 */ 482 472 switch (adr) { ··· 646 656 unsigned int i; 647 657 int ret; 648 658 659 + /* Not all LJCA chips implement SPI, a timeout reading the descriptors is normal */ 649 660 ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_SPI, NULL, 0, buf, 650 661 sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS); 651 662 if (ret < 0) 652 - return ret; 663 + return (ret == -ETIMEDOUT) ? 0 : ret; 653 664 654 665 /* check firmware response */ 655 666 desc = (struct ljca_spi_descriptor *)buf;
+8 -3
drivers/usb/serial/option.c
··· 203 203 #define DELL_PRODUCT_5829E_ESIM 0x81e4 204 204 #define DELL_PRODUCT_5829E 0x81e6 205 205 206 - #define DELL_PRODUCT_FM101R 0x8213 207 - #define DELL_PRODUCT_FM101R_ESIM 0x8215 206 + #define DELL_PRODUCT_FM101R_ESIM 0x8213 207 + #define DELL_PRODUCT_FM101R 0x8215 208 208 209 209 #define KYOCERA_VENDOR_ID 0x0c88 210 210 #define KYOCERA_PRODUCT_KPC650 0x17da ··· 609 609 #define UNISOC_VENDOR_ID 0x1782 610 610 /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ 611 611 #define TOZED_PRODUCT_LT70C 0x4055 612 + /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ 613 + #define LUAT_PRODUCT_AIR720U 0x4e00 612 614 613 615 /* Device flags */ 614 616 ··· 1548 1546 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, 1549 1547 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), 1550 1548 .driver_info = RSVD(4) }, 1551 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, 1549 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff), 1550 + .driver_info = RSVD(4) }, 1552 1551 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ 1553 1552 .driver_info = RSVD(4) }, 1554 1553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, ··· 2252 2249 .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, 2253 2250 { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ 2254 2251 { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */ 2252 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */ 2255 2253 { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ 2256 2254 .driver_info = RSVD(4) | RSVD(5) }, 2257 2255 { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ ··· 2275 2271 { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, 2276 2272 { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, 2277 2273 { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, 2274 + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, 2278 2275 { } /* Terminating entry */ 2279 2276 }; 2280 2277 MODULE_DEVICE_TABLE(usb, option_ids);
+11 -1
drivers/usb/typec/tcpm/tcpm.c
··· 4273 4273 current_lim = PD_P_SNK_STDBY_MW / 5; 4274 4274 tcpm_set_current_limit(port, current_lim, 5000); 4275 4275 /* Not sink vbus if operational current is 0mA */ 4276 - tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0])); 4276 + tcpm_set_charge(port, !port->pd_supported || 4277 + pdo_max_current(port->snk_pdo[0])); 4277 4278 4278 4279 if (!port->pd_supported) 4279 4280 tcpm_set_state(port, SNK_READY, 0); ··· 5391 5390 tcpm_log_force(port, "Received hard reset"); 5392 5391 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data) 5393 5392 port->tcpc->set_bist_data(port->tcpc, false); 5393 + 5394 + switch (port->state) { 5395 + case ERROR_RECOVERY: 5396 + case PORT_RESET: 5397 + case PORT_RESET_WAIT_OFF: 5398 + return; 5399 + default: 5400 + break; 5401 + } 5394 5402 5395 5403 if (port->ams != NONE_AMS) 5396 5404 port->ams = NONE_AMS;
+9 -5
drivers/usb/typec/tipd/core.c
··· 968 968 ret = of_property_match_string(np, "reg-names", "patch-address"); 969 969 if (ret < 0) { 970 970 dev_err(tps->dev, "failed to get patch-address %d\n", ret); 971 - return ret; 971 + goto release_fw; 972 972 } 973 973 974 974 ret = of_property_read_u32_index(np, "reg", ret, &addr); 975 975 if (ret) 976 - return ret; 976 + goto release_fw; 977 977 978 978 if (addr == 0 || (addr >= 0x20 && addr <= 0x23)) { 979 979 dev_err(tps->dev, "wrong patch address %u\n", addr); 980 - return -EINVAL; 980 + ret = -EINVAL; 981 + goto release_fw; 981 982 } 982 983 983 984 bpms_data.addr = (u8)addr; ··· 1227 1226 TPS_REG_INT_PLUG_EVENT; 1228 1227 } 1229 1228 1230 - tps->data = device_get_match_data(tps->dev); 1229 + if (dev_fwnode(tps->dev)) 1230 + tps->data = device_get_match_data(tps->dev); 1231 + else 1232 + tps->data = i2c_get_match_data(client); 1231 1233 if (!tps->data) 1232 1234 return -EINVAL; 1233 1235 ··· 1429 1425 MODULE_DEVICE_TABLE(of, tps6598x_of_match); 1430 1426 1431 1427 static const struct i2c_device_id tps6598x_id[] = { 1432 - { "tps6598x" }, 1428 + { "tps6598x", (kernel_ulong_t)&tps6598x_data }, 1433 1429 { } 1434 1430 }; 1435 1431 MODULE_DEVICE_TABLE(i2c, tps6598x_id);
+1 -1
drivers/xen/privcmd.c
··· 1115 1115 spinlock_t lock; /* Protects ioeventfds list */ 1116 1116 struct list_head ioeventfds; 1117 1117 struct list_head list; 1118 - struct ioreq_port ports[0]; 1118 + struct ioreq_port ports[] __counted_by(vcpus); 1119 1119 }; 1120 1120 1121 1121 static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
+1
drivers/xen/swiotlb-xen.c
··· 405 405 .get_sgtable = dma_common_get_sgtable, 406 406 .alloc_pages = dma_common_alloc_pages, 407 407 .free_pages = dma_common_free_pages, 408 + .max_mapping_size = swiotlb_max_mapping_size, 408 409 };
+2 -2
fs/afs/dynroot.c
··· 132 132 133 133 ret = dns_query(net->net, "afsdb", name, len, "srv=1", 134 134 NULL, NULL, false); 135 - if (ret == -ENODATA) 136 - ret = -EDESTADDRREQ; 135 + if (ret == -ENODATA || ret == -ENOKEY) 136 + ret = -ENOENT; 137 137 return ret; 138 138 } 139 139
+1
fs/afs/internal.h
··· 553 553 }; 554 554 555 555 struct afs_server_list { 556 + struct rcu_head rcu; 556 557 afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */ 557 558 refcount_t usage; 558 559 unsigned char nr_servers;
+1 -1
fs/afs/server_list.c
··· 17 17 for (i = 0; i < slist->nr_servers; i++) 18 18 afs_unuse_server(net, slist->servers[i].server, 19 19 afs_server_trace_put_slist); 20 - kfree(slist); 20 + kfree_rcu(slist, rcu); 21 21 } 22 22 } 23 23
+4
fs/afs/super.c
··· 407 407 return PTR_ERR(volume); 408 408 409 409 ctx->volume = volume; 410 + if (volume->type != AFSVL_RWVOL) { 411 + ctx->flock_mode = afs_flock_mode_local; 412 + fc->sb_flags |= SB_RDONLY; 413 + } 410 414 } 411 415 412 416 return 0;
+10
fs/afs/vl_rotate.c
··· 58 58 } 59 59 60 60 /* Status load is ordered after lookup counter load */ 61 + if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) { 62 + pr_warn("No record of cell %s\n", cell->name); 63 + vc->error = -ENOENT; 64 + return false; 65 + } 66 + 61 67 if (cell->dns_source == DNS_RECORD_UNAVAILABLE) { 62 68 vc->error = -EDESTADDRREQ; 63 69 return false; ··· 291 285 */ 292 286 static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc) 293 287 { 288 + struct afs_cell *cell = vc->cell; 294 289 static int count; 295 290 int i; 296 291 ··· 301 294 302 295 rcu_read_lock(); 303 296 pr_notice("EDESTADDR occurred\n"); 297 + pr_notice("CELL: %s err=%d\n", cell->name, cell->error); 298 + pr_notice("DNS: src=%u st=%u lc=%x\n", 299 + cell->dns_source, cell->dns_status, cell->dns_lookup_count); 304 300 pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n", 305 301 vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error); 306 302
+21 -35
fs/autofs/inode.c
··· 309 309 struct autofs_fs_context *ctx = fc->fs_private; 310 310 struct autofs_sb_info *sbi = s->s_fs_info; 311 311 struct inode *root_inode; 312 - struct dentry *root; 313 312 struct autofs_info *ino; 314 - int ret = -ENOMEM; 315 313 316 314 pr_debug("starting up, sbi = %p\n", sbi); 317 315 ··· 326 328 */ 327 329 ino = autofs_new_ino(sbi); 328 330 if (!ino) 329 - goto fail; 331 + return -ENOMEM; 330 332 331 333 root_inode = autofs_get_inode(s, S_IFDIR | 0755); 334 + if (!root_inode) 335 + return -ENOMEM; 336 + 332 337 root_inode->i_uid = ctx->uid; 333 338 root_inode->i_gid = ctx->gid; 339 + root_inode->i_fop = &autofs_root_operations; 340 + root_inode->i_op = &autofs_dir_inode_operations; 334 341 335 - root = d_make_root(root_inode); 336 - if (!root) 337 - goto fail_ino; 338 - 339 - root->d_fsdata = ino; 342 + s->s_root = d_make_root(root_inode); 343 + if (unlikely(!s->s_root)) { 344 + autofs_free_ino(ino); 345 + return -ENOMEM; 346 + } 347 + s->s_root->d_fsdata = ino; 340 348 341 349 if (ctx->pgrp_set) { 342 350 sbi->oz_pgrp = find_get_pid(ctx->pgrp); 343 - if (!sbi->oz_pgrp) { 344 - ret = invalf(fc, "Could not find process group %d", 345 - ctx->pgrp); 346 - goto fail_dput; 347 - } 348 - } else { 351 + if (!sbi->oz_pgrp) 352 + return invalf(fc, "Could not find process group %d", 353 + ctx->pgrp); 354 + } else 349 355 sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID); 350 - } 351 356 352 357 if (autofs_type_trigger(sbi->type)) 353 - __managed_dentry_set_managed(root); 354 - 355 - root_inode->i_fop = &autofs_root_operations; 356 - root_inode->i_op = &autofs_dir_inode_operations; 358 + /* s->s_root won't be contended so there's little to 359 + * be gained by not taking the d_lock when setting 360 + * d_flags, even when a lot mounts are being done. 361 + */ 362 + managed_dentry_set_managed(s->s_root); 357 363 358 364 pr_debug("pipe fd = %d, pgrp = %u\n", 359 365 sbi->pipefd, pid_nr(sbi->oz_pgrp)); 360 366 361 367 sbi->flags &= ~AUTOFS_SBI_CATATONIC; 362 - 363 - /* 364 - * Success! Install the root dentry now to indicate completion. 365 - */ 366 - s->s_root = root; 367 368 return 0; 368 - 369 - /* 370 - * Failure ... clean up. 371 - */ 372 - fail_dput: 373 - dput(root); 374 - goto fail; 375 - fail_ino: 376 - autofs_free_ino(ino); 377 - fail: 378 - return ret; 379 369 } 380 370 381 371 /*
+1
fs/btrfs/disk-io.c
··· 3213 3213 goto fail_alloc; 3214 3214 } 3215 3215 3216 + btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid); 3216 3217 /* 3217 3218 * Verify the type first, if that or the checksum value are 3218 3219 * corrupted, we'll find out
+8 -3
fs/btrfs/extent_io.c
··· 674 674 * the array will be skipped 675 675 * 676 676 * Return: 0 if all pages were able to be allocated; 677 - * -ENOMEM otherwise, and the caller is responsible for freeing all 678 - * non-null page pointers in the array. 677 + * -ENOMEM otherwise, the partially allocated pages would be freed and 678 + * the array slots zeroed 679 679 */ 680 680 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array) 681 681 { ··· 694 694 * though alloc_pages_bulk_array() falls back to alloc_page() 695 695 * if it could not bulk-allocate. So we must be out of memory. 696 696 */ 697 - if (allocated == last) 697 + if (allocated == last) { 698 + for (int i = 0; i < allocated; i++) { 699 + __free_page(page_array[i]); 700 + page_array[i] = NULL; 701 + } 698 702 return -ENOMEM; 703 + } 699 704 700 705 memalloc_retry_wait(GFP_NOFS); 701 706 }
+1
fs/btrfs/ioctl.c
··· 4356 4356 arg->clone_sources = compat_ptr(args32.clone_sources); 4357 4357 arg->parent_root = args32.parent_root; 4358 4358 arg->flags = args32.flags; 4359 + arg->version = args32.version; 4359 4360 memcpy(arg->reserved, args32.reserved, 4360 4361 sizeof(args32.reserved)); 4361 4362 #else
+2
fs/btrfs/ref-verify.c
··· 794 794 dump_ref_action(fs_info, ra); 795 795 kfree(ref); 796 796 kfree(ra); 797 + kfree(re); 797 798 goto out_unlock; 798 799 } else if (be->num_refs == 0) { 799 800 btrfs_err(fs_info, ··· 804 803 dump_ref_action(fs_info, ra); 805 804 kfree(ref); 806 805 kfree(ra); 806 + kfree(re); 807 807 goto out_unlock; 808 808 } 809 809
+1 -1
fs/btrfs/send.c
··· 8158 8158 } 8159 8159 8160 8160 sctx->send_filp = fget(arg->send_fd); 8161 - if (!sctx->send_filp) { 8161 + if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) { 8162 8162 ret = -EBADF; 8163 8163 goto out; 8164 8164 }
+4 -1
fs/btrfs/super.c
··· 80 80 81 81 static void btrfs_put_super(struct super_block *sb) 82 82 { 83 - close_ctree(btrfs_sb(sb)); 83 + struct btrfs_fs_info *fs_info = btrfs_sb(sb); 84 + 85 + btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid); 86 + close_ctree(fs_info); 84 87 } 85 88 86 89 enum {
+1 -1
fs/btrfs/transaction.c
··· 1774 1774 btrfs_release_path(path); 1775 1775 1776 1776 ret = btrfs_create_qgroup(trans, objectid); 1777 - if (ret) { 1777 + if (ret && ret != -EEXIST) { 1778 1778 btrfs_abort_transaction(trans, ret); 1779 1779 goto fail; 1780 1780 }
+39
fs/btrfs/tree-checker.c
··· 31 31 #include "inode-item.h" 32 32 #include "dir-item.h" 33 33 #include "raid-stripe-tree.h" 34 + #include "extent-tree.h" 34 35 35 36 /* 36 37 * Error message should follow the following format: ··· 1277 1276 unsigned long ptr; /* Current pointer inside inline refs */ 1278 1277 unsigned long end; /* Extent item end */ 1279 1278 const u32 item_size = btrfs_item_size(leaf, slot); 1279 + u8 last_type = 0; 1280 + u64 last_seq = U64_MAX; 1280 1281 u64 flags; 1281 1282 u64 generation; 1282 1283 u64 total_refs; /* Total refs in btrfs_extent_item */ ··· 1325 1322 * 2.2) Ref type specific data 1326 1323 * Either using btrfs_extent_inline_ref::offset, or specific 1327 1324 * data structure. 1325 + * 1326 + * All above inline items should follow the order: 1327 + * 1328 + * - All btrfs_extent_inline_ref::type should be in an ascending 1329 + * order 1330 + * 1331 + * - Within the same type, the items should follow a descending 1332 + * order by their sequence number. The sequence number is 1333 + * determined by: 1334 + * * btrfs_extent_inline_ref::offset for all types other than 1335 + * EXTENT_DATA_REF 1336 + * * hash_extent_data_ref() for EXTENT_DATA_REF 1328 1337 */ 1329 1338 if (unlikely(item_size < sizeof(*ei))) { 1330 1339 extent_err(leaf, slot, ··· 1418 1403 struct btrfs_extent_inline_ref *iref; 1419 1404 struct btrfs_extent_data_ref *dref; 1420 1405 struct btrfs_shared_data_ref *sref; 1406 + u64 seq; 1421 1407 u64 dref_offset; 1422 1408 u64 inline_offset; 1423 1409 u8 inline_type; ··· 1432 1416 iref = (struct btrfs_extent_inline_ref *)ptr; 1433 1417 inline_type = btrfs_extent_inline_ref_type(leaf, iref); 1434 1418 inline_offset = btrfs_extent_inline_ref_offset(leaf, iref); 1419 + seq = inline_offset; 1435 1420 if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) { 1436 1421 extent_err(leaf, slot, 1437 1422 "inline ref item overflows extent item, ptr %lu iref size %u end %lu", ··· 1463 1446 case BTRFS_EXTENT_DATA_REF_KEY: 1464 1447 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1465 1448 dref_offset = btrfs_extent_data_ref_offset(leaf, dref); 1449 + seq = hash_extent_data_ref( 1450 + btrfs_extent_data_ref_root(leaf, dref), 1451 + btrfs_extent_data_ref_objectid(leaf, dref), 1452 + btrfs_extent_data_ref_offset(leaf, dref)); 1466 1453 if (unlikely(!IS_ALIGNED(dref_offset, 1467 1454 fs_info->sectorsize))) { 1468 1455 extent_err(leaf, slot, ··· 1496 1475 inline_type); 1497 1476 return -EUCLEAN; 1498 1477 } 1478 + if (inline_type < last_type) { 1479 + extent_err(leaf, slot, 1480 + "inline ref out-of-order: has type %u, prev type %u", 1481 + inline_type, last_type); 1482 + return -EUCLEAN; 1483 + } 1484 + /* Type changed, allow the sequence starts from U64_MAX again. */ 1485 + if (inline_type > last_type) 1486 + last_seq = U64_MAX; 1487 + if (seq > last_seq) { 1488 + extent_err(leaf, slot, 1489 + "inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx", 1490 + inline_type, inline_offset, seq, 1491 + last_type, last_seq); 1492 + return -EUCLEAN; 1493 + } 1494 + last_type = inline_type; 1495 + last_seq = seq; 1499 1496 ptr += btrfs_extent_inline_ref_size(inline_type); 1500 1497 } 1501 1498 /* No padding is allowed */
+5 -4
fs/btrfs/volumes.c
··· 3006 3006 read_unlock(&em_tree->lock); 3007 3007 3008 3008 if (!em) { 3009 - btrfs_crit(fs_info, "unable to find logical %llu length %llu", 3009 + btrfs_crit(fs_info, 3010 + "unable to find chunk map for logical %llu length %llu", 3010 3011 logical, length); 3011 3012 return ERR_PTR(-EINVAL); 3012 3013 } 3013 3014 3014 - if (em->start > logical || em->start + em->len < logical) { 3015 + if (em->start > logical || em->start + em->len <= logical) { 3015 3016 btrfs_crit(fs_info, 3016 - "found a bad mapping, wanted %llu-%llu, found %llu-%llu", 3017 - logical, length, em->start, em->start + em->len); 3017 + "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3018 + logical, logical + length, em->start, em->start + em->len); 3018 3019 free_extent_map(em); 3019 3020 return ERR_PTR(-EINVAL); 3020 3021 }
+100
fs/debugfs/file.c
··· 84 84 struct debugfs_fsdata *fsd; 85 85 void *d_fsd; 86 86 87 + /* 88 + * This could only happen if some debugfs user erroneously calls 89 + * debugfs_file_get() on a dentry that isn't even a file, let 90 + * them know about it. 91 + */ 92 + if (WARN_ON(!d_is_reg(dentry))) 93 + return -EINVAL; 94 + 87 95 d_fsd = READ_ONCE(dentry->d_fsdata); 88 96 if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) { 89 97 fsd = d_fsd; ··· 108 100 kfree(fsd); 109 101 fsd = READ_ONCE(dentry->d_fsdata); 110 102 } 103 + #ifdef CONFIG_LOCKDEP 104 + fsd->lock_name = kasprintf(GFP_KERNEL, "debugfs:%pd", dentry); 105 + lockdep_register_key(&fsd->key); 106 + lockdep_init_map(&fsd->lockdep_map, fsd->lock_name ?: "debugfs", 107 + &fsd->key, 0); 108 + #endif 109 + INIT_LIST_HEAD(&fsd->cancellations); 110 + mutex_init(&fsd->cancellations_mtx); 111 111 } 112 112 113 113 /* ··· 131 115 132 116 if (!refcount_inc_not_zero(&fsd->active_users)) 133 117 return -EIO; 118 + 119 + lock_map_acquire_read(&fsd->lockdep_map); 134 120 135 121 return 0; 136 122 } ··· 151 133 { 152 134 struct debugfs_fsdata *fsd = READ_ONCE(dentry->d_fsdata); 153 135 136 + lock_map_release(&fsd->lockdep_map); 137 + 154 138 if (refcount_dec_and_test(&fsd->active_users)) 155 139 complete(&fsd->active_users_drained); 156 140 } 157 141 EXPORT_SYMBOL_GPL(debugfs_file_put); 142 + 143 + /** 144 + * debugfs_enter_cancellation - enter a debugfs cancellation 145 + * @file: the file being accessed 146 + * @cancellation: the cancellation object, the cancel callback 147 + * inside of it must be initialized 148 + * 149 + * When a debugfs file is removed it needs to wait for all active 150 + * operations to complete. However, the operation itself may need 151 + * to wait for hardware or completion of some asynchronous process 152 + * or similar. As such, it may need to be cancelled to avoid long 153 + * waits or even deadlocks. 154 + * 155 + * This function can be used inside a debugfs handler that may 156 + * need to be cancelled. As soon as this function is called, the 157 + * cancellation's 'cancel' callback may be called, at which point 158 + * the caller should proceed to call debugfs_leave_cancellation() 159 + * and leave the debugfs handler function as soon as possible. 160 + * Note that the 'cancel' callback is only ever called in the 161 + * context of some kind of debugfs_remove(). 162 + * 163 + * This function must be paired with debugfs_leave_cancellation(). 164 + */ 165 + void debugfs_enter_cancellation(struct file *file, 166 + struct debugfs_cancellation *cancellation) 167 + { 168 + struct debugfs_fsdata *fsd; 169 + struct dentry *dentry = F_DENTRY(file); 170 + 171 + INIT_LIST_HEAD(&cancellation->list); 172 + 173 + if (WARN_ON(!d_is_reg(dentry))) 174 + return; 175 + 176 + if (WARN_ON(!cancellation->cancel)) 177 + return; 178 + 179 + fsd = READ_ONCE(dentry->d_fsdata); 180 + if (WARN_ON(!fsd || 181 + ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) 182 + return; 183 + 184 + mutex_lock(&fsd->cancellations_mtx); 185 + list_add(&cancellation->list, &fsd->cancellations); 186 + mutex_unlock(&fsd->cancellations_mtx); 187 + 188 + /* if we're already removing wake it up to cancel */ 189 + if (d_unlinked(dentry)) 190 + complete(&fsd->active_users_drained); 191 + } 192 + EXPORT_SYMBOL_GPL(debugfs_enter_cancellation); 193 + 194 + /** 195 + * debugfs_leave_cancellation - leave cancellation section 196 + * @file: the file being accessed 197 + * @cancellation: the cancellation previously registered with 198 + * debugfs_enter_cancellation() 199 + * 200 + * See the documentation of debugfs_enter_cancellation(). 201 + */ 202 + void debugfs_leave_cancellation(struct file *file, 203 + struct debugfs_cancellation *cancellation) 204 + { 205 + struct debugfs_fsdata *fsd; 206 + struct dentry *dentry = F_DENTRY(file); 207 + 208 + if (WARN_ON(!d_is_reg(dentry))) 209 + return; 210 + 211 + fsd = READ_ONCE(dentry->d_fsdata); 212 + if (WARN_ON(!fsd || 213 + ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) 214 + return; 215 + 216 + mutex_lock(&fsd->cancellations_mtx); 217 + if (!list_empty(&cancellation->list)) 218 + list_del(&cancellation->list); 219 + mutex_unlock(&fsd->cancellations_mtx); 220 + } 221 + EXPORT_SYMBOL_GPL(debugfs_leave_cancellation); 158 222 159 223 /* 160 224 * Only permit access to world-readable files when the kernel is locked down.
+63 -8
fs/debugfs/inode.c
··· 236 236 237 237 static void debugfs_release_dentry(struct dentry *dentry) 238 238 { 239 - void *fsd = dentry->d_fsdata; 239 + struct debugfs_fsdata *fsd = dentry->d_fsdata; 240 240 241 - if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) 242 - kfree(dentry->d_fsdata); 241 + if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) 242 + return; 243 + 244 + /* check it wasn't a dir (no fsdata) or automount (no real_fops) */ 245 + if (fsd && fsd->real_fops) { 246 + #ifdef CONFIG_LOCKDEP 247 + lockdep_unregister_key(&fsd->key); 248 + kfree(fsd->lock_name); 249 + #endif 250 + WARN_ON(!list_empty(&fsd->cancellations)); 251 + mutex_destroy(&fsd->cancellations_mtx); 252 + } 253 + 254 + kfree(fsd); 243 255 } 244 256 245 257 static struct vfsmount *debugfs_automount(struct path *path) 246 258 { 247 - debugfs_automount_t f; 248 - f = (debugfs_automount_t)path->dentry->d_fsdata; 249 - return f(path->dentry, d_inode(path->dentry)->i_private); 259 + struct debugfs_fsdata *fsd = path->dentry->d_fsdata; 260 + 261 + return fsd->automount(path->dentry, d_inode(path->dentry)->i_private); 250 262 } 251 263 252 264 static const struct dentry_operations debugfs_dops = { ··· 646 634 void *data) 647 635 { 648 636 struct dentry *dentry = start_creating(name, parent); 637 + struct debugfs_fsdata *fsd; 649 638 struct inode *inode; 650 639 651 640 if (IS_ERR(dentry)) 652 641 return dentry; 653 642 643 + fsd = kzalloc(sizeof(*fsd), GFP_KERNEL); 644 + if (!fsd) { 645 + failed_creating(dentry); 646 + return ERR_PTR(-ENOMEM); 647 + } 648 + 649 + fsd->automount = f; 650 + 654 651 if (!(debugfs_allow & DEBUGFS_ALLOW_API)) { 655 652 failed_creating(dentry); 653 + kfree(fsd); 656 654 return ERR_PTR(-EPERM); 657 655 } 658 656 ··· 670 648 if (unlikely(!inode)) { 671 649 pr_err("out of free dentries, can not create automount '%s'\n", 672 650 name); 651 + kfree(fsd); 673 652 return failed_creating(dentry); 674 653 } 675 654 676 655 make_empty_dir_inode(inode); 677 656 inode->i_flags |= S_AUTOMOUNT; 678 657 inode->i_private = data; 679 - dentry->d_fsdata = (void *)f; 658 + dentry->d_fsdata = fsd; 680 659 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 681 660 inc_nlink(inode); 682 661 d_instantiate(dentry, inode); ··· 754 731 fsd = READ_ONCE(dentry->d_fsdata); 755 732 if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) 756 733 return; 757 - if (!refcount_dec_and_test(&fsd->active_users)) 734 + 735 + lock_map_acquire(&fsd->lockdep_map); 736 + lock_map_release(&fsd->lockdep_map); 737 + 738 + /* if we hit zero, just wait for all to finish */ 739 + if (!refcount_dec_and_test(&fsd->active_users)) { 758 740 wait_for_completion(&fsd->active_users_drained); 741 + return; 742 + } 743 + 744 + /* if we didn't hit zero, try to cancel any we can */ 745 + while (refcount_read(&fsd->active_users)) { 746 + struct debugfs_cancellation *c; 747 + 748 + /* 749 + * Lock the cancellations. Note that the cancellations 750 + * structs are meant to be on the stack, so we need to 751 + * ensure we either use them here or don't touch them, 752 + * and debugfs_leave_cancellation() will wait for this 753 + * to be finished processing before exiting one. It may 754 + * of course win and remove the cancellation, but then 755 + * chances are we never even got into this bit, we only 756 + * do if the refcount isn't zero already. 757 + */ 758 + mutex_lock(&fsd->cancellations_mtx); 759 + while ((c = list_first_entry_or_null(&fsd->cancellations, 760 + typeof(*c), list))) { 761 + list_del_init(&c->list); 762 + c->cancel(dentry, c->cancel_data); 763 + } 764 + mutex_unlock(&fsd->cancellations_mtx); 765 + 766 + wait_for_completion(&fsd->active_users_drained); 767 + } 759 768 } 760 769 761 770 static void remove_one(struct dentry *victim)
+19 -2
fs/debugfs/internal.h
··· 7 7 8 8 #ifndef _DEBUGFS_INTERNAL_H_ 9 9 #define _DEBUGFS_INTERNAL_H_ 10 + #include <linux/lockdep.h> 11 + #include <linux/list.h> 10 12 11 13 struct file_operations; 12 14 ··· 19 17 20 18 struct debugfs_fsdata { 21 19 const struct file_operations *real_fops; 22 - refcount_t active_users; 23 - struct completion active_users_drained; 20 + union { 21 + /* automount_fn is used when real_fops is NULL */ 22 + debugfs_automount_t automount; 23 + struct { 24 + refcount_t active_users; 25 + struct completion active_users_drained; 26 + #ifdef CONFIG_LOCKDEP 27 + struct lockdep_map lockdep_map; 28 + struct lock_class_key key; 29 + char *lock_name; 30 + #endif 31 + 32 + /* protect cancellations */ 33 + struct mutex cancellations_mtx; 34 + struct list_head cancellations; 35 + }; 36 + }; 24 37 }; 25 38 26 39 /*
+10 -2
fs/ecryptfs/inode.c
··· 998 998 return rc; 999 999 } 1000 1000 1001 + static int ecryptfs_do_getattr(const struct path *path, struct kstat *stat, 1002 + u32 request_mask, unsigned int flags) 1003 + { 1004 + if (flags & AT_GETATTR_NOSEC) 1005 + return vfs_getattr_nosec(path, stat, request_mask, flags); 1006 + return vfs_getattr(path, stat, request_mask, flags); 1007 + } 1008 + 1001 1009 static int ecryptfs_getattr(struct mnt_idmap *idmap, 1002 1010 const struct path *path, struct kstat *stat, 1003 1011 u32 request_mask, unsigned int flags) ··· 1014 1006 struct kstat lower_stat; 1015 1007 int rc; 1016 1008 1017 - rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat, 1018 - request_mask, flags); 1009 + rc = ecryptfs_do_getattr(ecryptfs_dentry_to_lower_path(dentry), 1010 + &lower_stat, request_mask, flags); 1019 1011 if (!rc) { 1020 1012 fsstack_copy_attr_all(d_inode(dentry), 1021 1013 ecryptfs_inode_to_lower(d_inode(dentry)));
+2
fs/inode.c
··· 215 215 lockdep_set_class_and_name(&mapping->invalidate_lock, 216 216 &sb->s_type->invalidate_lock_key, 217 217 "mapping.invalidate_lock"); 218 + if (sb->s_iflags & SB_I_STABLE_WRITES) 219 + mapping_set_stable_writes(mapping); 218 220 inode->i_private = NULL; 219 221 inode->i_mapping = mapping; 220 222 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
+11 -3
fs/libfs.c
··· 399 399 return -EINVAL; 400 400 } 401 401 402 + /* In this case, ->private_data is protected by f_pos_lock */ 403 + file->private_data = NULL; 402 404 return vfs_setpos(file, offset, U32_MAX); 403 405 } 404 406 ··· 430 428 inode->i_ino, fs_umode_to_dtype(inode->i_mode)); 431 429 } 432 430 433 - static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx) 431 + static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx) 434 432 { 435 433 struct offset_ctx *so_ctx = inode->i_op->get_offset_ctx(inode); 436 434 XA_STATE(xas, &so_ctx->xa, ctx->pos); ··· 439 437 while (true) { 440 438 dentry = offset_find_next(&xas); 441 439 if (!dentry) 442 - break; 440 + return ERR_PTR(-ENOENT); 443 441 444 442 if (!offset_dir_emit(ctx, dentry)) { 445 443 dput(dentry); ··· 449 447 dput(dentry); 450 448 ctx->pos = xas.xa_index + 1; 451 449 } 450 + return NULL; 452 451 } 453 452 454 453 /** ··· 482 479 if (!dir_emit_dots(file, ctx)) 483 480 return 0; 484 481 485 - offset_iterate_dir(d_inode(dir), ctx); 482 + /* In this case, ->private_data is protected by f_pos_lock */ 483 + if (ctx->pos == 2) 484 + file->private_data = NULL; 485 + else if (file->private_data == ERR_PTR(-ENOENT)) 486 + return 0; 487 + file->private_data = offset_iterate_dir(d_inode(dir), ctx); 486 488 return 0; 487 489 } 488 490
+5 -5
fs/overlayfs/inode.c
··· 171 171 172 172 type = ovl_path_real(dentry, &realpath); 173 173 old_cred = ovl_override_creds(dentry->d_sb); 174 - err = vfs_getattr(&realpath, stat, request_mask, flags); 174 + err = ovl_do_getattr(&realpath, stat, request_mask, flags); 175 175 if (err) 176 176 goto out; 177 177 ··· 196 196 (!is_dir ? STATX_NLINK : 0); 197 197 198 198 ovl_path_lower(dentry, &realpath); 199 - err = vfs_getattr(&realpath, &lowerstat, 200 - lowermask, flags); 199 + err = ovl_do_getattr(&realpath, &lowerstat, lowermask, 200 + flags); 201 201 if (err) 202 202 goto out; 203 203 ··· 249 249 250 250 ovl_path_lowerdata(dentry, &realpath); 251 251 if (realpath.dentry) { 252 - err = vfs_getattr(&realpath, &lowerdatastat, 253 - lowermask, flags); 252 + err = ovl_do_getattr(&realpath, &lowerdatastat, 253 + lowermask, flags); 254 254 if (err) 255 255 goto out; 256 256 } else {
+8
fs/overlayfs/overlayfs.h
··· 408 408 return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC)); 409 409 } 410 410 411 + static inline int ovl_do_getattr(const struct path *path, struct kstat *stat, 412 + u32 request_mask, unsigned int flags) 413 + { 414 + if (flags & AT_GETATTR_NOSEC) 415 + return vfs_getattr_nosec(path, stat, request_mask, flags); 416 + return vfs_getattr(path, stat, request_mask, flags); 417 + } 418 + 411 419 /* util.c */ 412 420 int ovl_get_write_access(struct dentry *dentry); 413 421 void ovl_put_write_access(struct dentry *dentry);
+11 -3
fs/smb/client/cifsglob.h
··· 191 191 bool reparse_point; 192 192 bool symlink; 193 193 }; 194 - __u32 reparse_tag; 194 + struct { 195 + __u32 tag; 196 + union { 197 + struct reparse_data_buffer *buf; 198 + struct reparse_posix_data *posix; 199 + }; 200 + } reparse; 195 201 char *symlink_target; 196 202 union { 197 203 struct smb2_file_all_info fi; ··· 401 395 struct cifs_tcon *tcon, 402 396 struct cifs_sb_info *cifs_sb, 403 397 const char *full_path, 404 - char **target_path, 405 - struct kvec *rsp_iov); 398 + char **target_path); 406 399 /* open a file for non-posix mounts */ 407 400 int (*open)(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, 408 401 void *buf); ··· 556 551 bool (*is_status_io_timeout)(char *buf); 557 552 /* Check for STATUS_NETWORK_NAME_DELETED */ 558 553 bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv); 554 + int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb, 555 + struct kvec *rsp_iov, 556 + struct cifs_open_info_data *data); 559 557 }; 560 558 561 559 struct smb_version_values {
+2 -2
fs/smb/client/cifspdu.h
··· 1356 1356 __le32 DataDisplacement; 1357 1357 __u8 SetupCount; /* 1 */ 1358 1358 __le16 ReturnedDataLen; 1359 - __u16 ByteCount; 1359 + __le16 ByteCount; 1360 1360 } __attribute__((packed)) TRANSACT_IOCTL_RSP; 1361 1361 1362 1362 #define CIFS_ACL_OWNER 1 ··· 1509 1509 __le16 ReparseDataLength; 1510 1510 __u16 Reserved; 1511 1511 __le64 InodeType; /* LNK, FIFO, CHR etc. */ 1512 - char PathBuffer[]; 1512 + __u8 DataBuffer[]; 1513 1513 } __attribute__((packed)); 1514 1514 1515 1515 struct cifs_quota_data {
+13 -1
fs/smb/client/cifsproto.h
··· 210 210 const struct cifs_fid *fid); 211 211 bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb, 212 212 struct cifs_fattr *fattr, 213 - u32 tag); 213 + struct cifs_open_info_data *data); 214 214 extern int smb311_posix_get_inode_info(struct inode **pinode, const char *search_path, 215 215 struct super_block *sb, unsigned int xid); 216 216 extern int cifs_get_inode_info_unix(struct inode **pinode, ··· 458 458 struct cifs_tcon *tcon, 459 459 const unsigned char *searchName, char **syminfo, 460 460 const struct nls_table *nls_codepage, int remap); 461 + extern int cifs_query_reparse_point(const unsigned int xid, 462 + struct cifs_tcon *tcon, 463 + struct cifs_sb_info *cifs_sb, 464 + const char *full_path, 465 + u32 *tag, struct kvec *rsp, 466 + int *rsp_buftype); 461 467 extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, 462 468 __u16 fid, char **symlinkinfo, 463 469 const struct nls_table *nls_codepage); ··· 665 659 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix); 666 660 char *extract_hostname(const char *unc); 667 661 char *extract_sharename(const char *unc); 662 + int parse_reparse_point(struct reparse_data_buffer *buf, 663 + u32 plen, struct cifs_sb_info *cifs_sb, 664 + bool unicode, struct cifs_open_info_data *data); 665 + int cifs_sfu_make_node(unsigned int xid, struct inode *inode, 666 + struct dentry *dentry, struct cifs_tcon *tcon, 667 + const char *full_path, umode_t mode, dev_t dev); 668 668 669 669 #ifdef CONFIG_CIFS_DFS_UPCALL 670 670 static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
+76 -115
fs/smb/client/cifssmb.c
··· 2690 2690 return rc; 2691 2691 } 2692 2692 2693 - /* 2694 - * Recent Windows versions now create symlinks more frequently 2695 - * and they use the "reparse point" mechanism below. We can of course 2696 - * do symlinks nicely to Samba and other servers which support the 2697 - * CIFS Unix Extensions and we can also do SFU symlinks and "client only" 2698 - * "MF" symlinks optionally, but for recent Windows we really need to 2699 - * reenable the code below and fix the cifs_symlink callers to handle this. 2700 - * In the interim this code has been moved to its own config option so 2701 - * it is not compiled in by default until callers fixed up and more tested. 2702 - */ 2703 - int 2704 - CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, 2705 - __u16 fid, char **symlinkinfo, 2706 - const struct nls_table *nls_codepage) 2693 + int cifs_query_reparse_point(const unsigned int xid, 2694 + struct cifs_tcon *tcon, 2695 + struct cifs_sb_info *cifs_sb, 2696 + const char *full_path, 2697 + u32 *tag, struct kvec *rsp, 2698 + int *rsp_buftype) 2707 2699 { 2708 - int rc = 0; 2709 - int bytes_returned; 2710 - struct smb_com_transaction_ioctl_req *pSMB; 2711 - struct smb_com_transaction_ioctl_rsp *pSMBr; 2712 - bool is_unicode; 2713 - unsigned int sub_len; 2714 - char *sub_start; 2715 - struct reparse_symlink_data *reparse_buf; 2716 - struct reparse_posix_data *posix_buf; 2700 + struct cifs_open_parms oparms; 2701 + TRANSACT_IOCTL_REQ *io_req = NULL; 2702 + TRANSACT_IOCTL_RSP *io_rsp = NULL; 2703 + struct cifs_fid fid; 2717 2704 __u32 data_offset, data_count; 2718 - char *end_of_smb; 2705 + __u8 *start, *end; 2706 + int io_rsp_len; 2707 + int oplock = 0; 2708 + int rc; 2719 2709 2720 - cifs_dbg(FYI, "In Windows reparse style QueryLink for fid %u\n", fid); 2721 - rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, 2722 - (void **) &pSMBr); 2710 + cifs_tcon_dbg(FYI, "%s: path=%s\n", __func__, full_path); 2711 + 2712 + if (cap_unix(tcon->ses)) 2713 + return -EOPNOTSUPP; 2714 + 2715 + oparms = (struct cifs_open_parms) { 2716 + .tcon = tcon, 2717 + .cifs_sb = cifs_sb, 2718 + .desired_access = FILE_READ_ATTRIBUTES, 2719 + .create_options = cifs_create_options(cifs_sb, 2720 + OPEN_REPARSE_POINT), 2721 + .disposition = FILE_OPEN, 2722 + .path = full_path, 2723 + .fid = &fid, 2724 + }; 2725 + 2726 + rc = CIFS_open(xid, &oparms, &oplock, NULL); 2723 2727 if (rc) 2724 2728 return rc; 2725 2729 2726 - pSMB->TotalParameterCount = 0 ; 2727 - pSMB->TotalDataCount = 0; 2728 - pSMB->MaxParameterCount = cpu_to_le32(2); 2730 + rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, 2731 + (void **)&io_req, (void **)&io_rsp); 2732 + if (rc) 2733 + goto error; 2734 + 2735 + io_req->TotalParameterCount = 0; 2736 + io_req->TotalDataCount = 0; 2737 + io_req->MaxParameterCount = cpu_to_le32(2); 2729 2738 /* BB find exact data count max from sess structure BB */ 2730 - pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); 2731 - pSMB->MaxSetupCount = 4; 2732 - pSMB->Reserved = 0; 2733 - pSMB->ParameterOffset = 0; 2734 - pSMB->DataCount = 0; 2735 - pSMB->DataOffset = 0; 2736 - pSMB->SetupCount = 4; 2737 - pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL); 2738 - pSMB->ParameterCount = pSMB->TotalParameterCount; 2739 - pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT); 2740 - pSMB->IsFsctl = 1; /* FSCTL */ 2741 - pSMB->IsRootFlag = 0; 2742 - pSMB->Fid = fid; /* file handle always le */ 2743 - pSMB->ByteCount = 0; 2739 + io_req->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); 2740 + io_req->MaxSetupCount = 4; 2741 + io_req->Reserved = 0; 2742 + io_req->ParameterOffset = 0; 2743 + io_req->DataCount = 0; 2744 + io_req->DataOffset = 0; 2745 + io_req->SetupCount = 4; 2746 + io_req->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL); 2747 + io_req->ParameterCount = io_req->TotalParameterCount; 2748 + io_req->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT); 2749 + io_req->IsFsctl = 1; 2750 + io_req->IsRootFlag = 0; 2751 + io_req->Fid = fid.netfid; 2752 + io_req->ByteCount = 0; 2744 2753 2745 - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 2746 - (struct smb_hdr *) pSMBr, &bytes_returned, 0); 2747 - if (rc) { 2748 - cifs_dbg(FYI, "Send error in QueryReparseLinkInfo = %d\n", rc); 2749 - goto qreparse_out; 2750 - } 2754 + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)io_req, 2755 + (struct smb_hdr *)io_rsp, &io_rsp_len, 0); 2756 + if (rc) 2757 + goto error; 2751 2758 2752 - data_offset = le32_to_cpu(pSMBr->DataOffset); 2753 - data_count = le32_to_cpu(pSMBr->DataCount); 2754 - if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) { 2755 - /* BB also check enough total bytes returned */ 2756 - rc = -EIO; /* bad smb */ 2757 - goto qreparse_out; 2758 - } 2759 - if (!data_count || (data_count > 2048)) { 2759 + data_offset = le32_to_cpu(io_rsp->DataOffset); 2760 + data_count = le32_to_cpu(io_rsp->DataCount); 2761 + if (get_bcc(&io_rsp->hdr) < 2 || data_offset > 512 || 2762 + !data_count || data_count > 2048) { 2760 2763 rc = -EIO; 2761 - cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n"); 2762 - goto qreparse_out; 2764 + goto error; 2763 2765 } 2764 - end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount; 2765 - reparse_buf = (struct reparse_symlink_data *) 2766 - ((char *)&pSMBr->hdr.Protocol + data_offset); 2767 - if ((char *)reparse_buf >= end_of_smb) { 2766 + 2767 + end = 2 + get_bcc(&io_rsp->hdr) + (__u8 *)&io_rsp->ByteCount; 2768 + start = (__u8 *)&io_rsp->hdr.Protocol + data_offset; 2769 + if (start >= end) { 2768 2770 rc = -EIO; 2769 - goto qreparse_out; 2770 - } 2771 - if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) { 2772 - cifs_dbg(FYI, "NFS style reparse tag\n"); 2773 - posix_buf = (struct reparse_posix_data *)reparse_buf; 2774 - 2775 - if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) { 2776 - cifs_dbg(FYI, "unsupported file type 0x%llx\n", 2777 - le64_to_cpu(posix_buf->InodeType)); 2778 - rc = -EOPNOTSUPP; 2779 - goto qreparse_out; 2780 - } 2781 - is_unicode = true; 2782 - sub_len = le16_to_cpu(reparse_buf->ReparseDataLength); 2783 - if (posix_buf->PathBuffer + sub_len > end_of_smb) { 2784 - cifs_dbg(FYI, "reparse buf beyond SMB\n"); 2785 - rc = -EIO; 2786 - goto qreparse_out; 2787 - } 2788 - *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer, 2789 - sub_len, is_unicode, nls_codepage); 2790 - goto qreparse_out; 2791 - } else if (reparse_buf->ReparseTag != 2792 - cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) { 2793 - rc = -EOPNOTSUPP; 2794 - goto qreparse_out; 2771 + goto error; 2795 2772 } 2796 2773 2797 - /* Reparse tag is NTFS symlink */ 2798 - sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) + 2799 - reparse_buf->PathBuffer; 2800 - sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength); 2801 - if (sub_start + sub_len > end_of_smb) { 2802 - cifs_dbg(FYI, "reparse buf beyond SMB\n"); 2803 - rc = -EIO; 2804 - goto qreparse_out; 2805 - } 2806 - if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) 2807 - is_unicode = true; 2808 - else 2809 - is_unicode = false; 2774 + *tag = le32_to_cpu(((struct reparse_data_buffer *)start)->ReparseTag); 2775 + rsp->iov_base = io_rsp; 2776 + rsp->iov_len = io_rsp_len; 2777 + *rsp_buftype = CIFS_LARGE_BUFFER; 2778 + CIFSSMBClose(xid, tcon, fid.netfid); 2779 + return 0; 2810 2780 2811 - /* BB FIXME investigate remapping reserved chars here */ 2812 - *symlinkinfo = cifs_strndup_from_utf16(sub_start, sub_len, is_unicode, 2813 - nls_codepage); 2814 - if (!*symlinkinfo) 2815 - rc = -ENOMEM; 2816 - qreparse_out: 2817 - cifs_buf_release(pSMB); 2818 - 2819 - /* 2820 - * Note: On -EAGAIN error only caller can retry on handle based calls 2821 - * since file handle passed in no longer valid. 2822 - */ 2781 + error: 2782 + cifs_buf_release(io_req); 2783 + CIFSSMBClose(xid, tcon, fid.netfid); 2823 2784 return rc; 2824 2785 } 2825 2786
+60 -14
fs/smb/client/inode.c
··· 459 459 return -EOPNOTSUPP; 460 460 rc = server->ops->query_symlink(xid, tcon, 461 461 cifs_sb, full_path, 462 - &fattr->cf_symlink_target, 463 - NULL); 462 + &fattr->cf_symlink_target); 464 463 cifs_dbg(FYI, "%s: query_symlink: %d\n", __func__, rc); 465 464 } 466 465 return rc; ··· 721 722 fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink); 722 723 } 723 724 725 + static inline dev_t nfs_mkdev(struct reparse_posix_data *buf) 726 + { 727 + u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer); 728 + 729 + return MKDEV(v >> 32, v & 0xffffffff); 730 + } 731 + 724 732 bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb, 725 733 struct cifs_fattr *fattr, 726 - u32 tag) 734 + struct cifs_open_info_data *data) 727 735 { 736 + struct reparse_posix_data *buf = data->reparse.posix; 737 + u32 tag = data->reparse.tag; 738 + 739 + if (tag == IO_REPARSE_TAG_NFS && buf) { 740 + switch (le64_to_cpu(buf->InodeType)) { 741 + case NFS_SPECFILE_CHR: 742 + fattr->cf_mode |= S_IFCHR | cifs_sb->ctx->file_mode; 743 + fattr->cf_dtype = DT_CHR; 744 + fattr->cf_rdev = nfs_mkdev(buf); 745 + break; 746 + case NFS_SPECFILE_BLK: 747 + fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode; 748 + fattr->cf_dtype = DT_BLK; 749 + fattr->cf_rdev = nfs_mkdev(buf); 750 + break; 751 + case NFS_SPECFILE_FIFO: 752 + fattr->cf_mode |= S_IFIFO | cifs_sb->ctx->file_mode; 753 + fattr->cf_dtype = DT_FIFO; 754 + break; 755 + case NFS_SPECFILE_SOCK: 756 + fattr->cf_mode |= S_IFSOCK | cifs_sb->ctx->file_mode; 757 + fattr->cf_dtype = DT_SOCK; 758 + break; 759 + case NFS_SPECFILE_LNK: 760 + fattr->cf_mode = S_IFLNK | cifs_sb->ctx->file_mode; 761 + fattr->cf_dtype = DT_LNK; 762 + break; 763 + default: 764 + WARN_ON_ONCE(1); 765 + return false; 766 + } 767 + return true; 768 + } 769 + 728 770 switch (tag) { 729 771 case IO_REPARSE_TAG_LX_SYMLINK: 730 772 fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode; ··· 831 791 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); 832 792 833 793 if (cifs_open_data_reparse(data) && 834 - cifs_reparse_point_to_fattr(cifs_sb, fattr, data->reparse_tag)) 794 + cifs_reparse_point_to_fattr(cifs_sb, fattr, data)) 835 795 goto out_reparse; 836 796 837 797 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { ··· 896 856 data.adjust_tz = false; 897 857 if (data.symlink_target) { 898 858 data.symlink = true; 899 - data.reparse_tag = IO_REPARSE_TAG_SYMLINK; 859 + data.reparse.tag = IO_REPARSE_TAG_SYMLINK; 900 860 } 901 861 cifs_open_info_to_fattr(&fattr, &data, inode->i_sb); 902 862 break; ··· 1065 1025 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 1066 1026 struct kvec rsp_iov, *iov = NULL; 1067 1027 int rsp_buftype = CIFS_NO_BUFFER; 1068 - u32 tag = data->reparse_tag; 1028 + u32 tag = data->reparse.tag; 1069 1029 int rc = 0; 1070 1030 1071 1031 if (!tag && server->ops->query_reparse_point) { ··· 1075 1035 if (!rc) 1076 1036 iov = &rsp_iov; 1077 1037 } 1078 - switch ((data->reparse_tag = tag)) { 1038 + 1039 + rc = -EOPNOTSUPP; 1040 + switch ((data->reparse.tag = tag)) { 1079 1041 case 0: /* SMB1 symlink */ 1080 - iov = NULL; 1081 - fallthrough; 1082 - case IO_REPARSE_TAG_NFS: 1083 - case IO_REPARSE_TAG_SYMLINK: 1084 - if (!data->symlink_target && server->ops->query_symlink) { 1042 + if (server->ops->query_symlink) { 1085 1043 rc = server->ops->query_symlink(xid, tcon, 1086 1044 cifs_sb, full_path, 1087 - &data->symlink_target, 1088 - iov); 1045 + &data->symlink_target); 1089 1046 } 1090 1047 break; 1091 1048 case IO_REPARSE_TAG_MOUNT_POINT: 1092 1049 cifs_create_junction_fattr(fattr, sb); 1050 + rc = 0; 1093 1051 goto out; 1052 + default: 1053 + if (data->symlink_target) { 1054 + rc = 0; 1055 + } else if (server->ops->parse_reparse_point) { 1056 + rc = server->ops->parse_reparse_point(cifs_sb, 1057 + iov, data); 1058 + } 1059 + break; 1094 1060 } 1095 1061 1096 1062 cifs_open_info_to_fattr(fattr, data, sb);
+5 -1
fs/smb/client/readdir.c
··· 153 153 static void 154 154 cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) 155 155 { 156 + struct cifs_open_info_data data = { 157 + .reparse = { .tag = fattr->cf_cifstag, }, 158 + }; 159 + 156 160 fattr->cf_uid = cifs_sb->ctx->linux_uid; 157 161 fattr->cf_gid = cifs_sb->ctx->linux_gid; 158 162 ··· 169 165 * reasonably map some of them to directories vs. files vs. symlinks 170 166 */ 171 167 if ((fattr->cf_cifsattrs & ATTR_REPARSE) && 172 - cifs_reparse_point_to_fattr(cifs_sb, fattr, fattr->cf_cifstag)) 168 + cifs_reparse_point_to_fattr(cifs_sb, fattr, &data)) 173 169 goto out_reparse; 174 170 175 171 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+1 -1
fs/smb/client/sess.c
··· 332 332 333 333 if (iface) { 334 334 spin_lock(&ses->iface_lock); 335 - kref_put(&iface->refcount, release_iface); 336 335 iface->num_channels--; 337 336 if (iface->weight_fulfilled) 338 337 iface->weight_fulfilled--; 338 + kref_put(&iface->refcount, release_iface); 339 339 spin_unlock(&ses->iface_lock); 340 340 } 341 341
+32 -121
fs/smb/client/smb1ops.c
··· 976 976 struct cifs_tcon *tcon, 977 977 struct cifs_sb_info *cifs_sb, 978 978 const char *full_path, 979 - char **target_path, 980 - struct kvec *rsp_iov) 979 + char **target_path) 981 980 { 982 981 int rc; 983 - int oplock = 0; 984 - bool is_reparse_point = !!rsp_iov; 985 - struct cifs_fid fid; 986 - struct cifs_open_parms oparms; 987 982 988 - cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); 983 + cifs_tcon_dbg(FYI, "%s: path=%s\n", __func__, full_path); 989 984 990 - if (is_reparse_point) { 991 - cifs_dbg(VFS, "reparse points not handled for SMB1 symlinks\n"); 985 + if (!cap_unix(tcon->ses)) 992 986 return -EOPNOTSUPP; 993 - } 994 987 995 - /* Check for unix extensions */ 996 - if (cap_unix(tcon->ses)) { 997 - rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path, 998 - cifs_sb->local_nls, 999 - cifs_remap(cifs_sb)); 1000 - if (rc == -EREMOTE) 1001 - rc = cifs_unix_dfs_readlink(xid, tcon, full_path, 1002 - target_path, 1003 - cifs_sb->local_nls); 1004 - 1005 - goto out; 1006 - } 1007 - 1008 - oparms = (struct cifs_open_parms) { 1009 - .tcon = tcon, 1010 - .cifs_sb = cifs_sb, 1011 - .desired_access = FILE_READ_ATTRIBUTES, 1012 - .create_options = cifs_create_options(cifs_sb, 1013 - OPEN_REPARSE_POINT), 1014 - .disposition = FILE_OPEN, 1015 - .path = full_path, 1016 - .fid = &fid, 1017 - }; 1018 - 1019 - rc = CIFS_open(xid, &oparms, &oplock, NULL); 1020 - if (rc) 1021 - goto out; 1022 - 1023 - rc = CIFSSMBQuerySymLink(xid, tcon, fid.netfid, target_path, 1024 - cifs_sb->local_nls); 1025 - if (rc) 1026 - goto out_close; 1027 - 1028 - convert_delimiter(*target_path, '/'); 1029 - out_close: 1030 - CIFSSMBClose(xid, tcon, fid.netfid); 1031 - out: 1032 - if (!rc) 1033 - cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path); 988 + rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path, 989 + cifs_sb->local_nls, cifs_remap(cifs_sb)); 990 + if (rc == -EREMOTE) 991 + rc = cifs_unix_dfs_readlink(xid, tcon, full_path, 992 + target_path, cifs_sb->local_nls); 1034 993 return rc; 994 + } 995 + 996 + static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb, 997 + struct kvec *rsp_iov, 998 + struct cifs_open_info_data *data) 999 + { 1000 + struct reparse_data_buffer *buf; 1001 + TRANSACT_IOCTL_RSP *io = rsp_iov->iov_base; 1002 + bool unicode = !!(io->hdr.Flags2 & SMBFLG2_UNICODE); 1003 + u32 plen = le16_to_cpu(io->ByteCount); 1004 + 1005 + buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol + 1006 + le32_to_cpu(io->DataOffset)); 1007 + return parse_reparse_point(buf, plen, cifs_sb, unicode, data); 1035 1008 } 1036 1009 1037 1010 static bool ··· 1041 1068 { 1042 1069 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1043 1070 struct inode *newinode = NULL; 1044 - int rc = -EPERM; 1045 - struct cifs_open_info_data buf = {}; 1046 - struct cifs_io_parms io_parms; 1047 - __u32 oplock = 0; 1048 - struct cifs_fid fid; 1049 - struct cifs_open_parms oparms; 1050 - unsigned int bytes_written; 1051 - struct win_dev *pdev; 1052 - struct kvec iov[2]; 1071 + int rc; 1053 1072 1054 1073 if (tcon->unix_ext) { 1055 1074 /* ··· 1075 1110 d_instantiate(dentry, newinode); 1076 1111 return rc; 1077 1112 } 1078 - 1079 1113 /* 1080 - * SMB1 SFU emulation: should work with all servers, but only 1081 - * support block and char device (no socket & fifo) 1114 + * Check if mounted with mount parm 'sfu' mount parm. 1115 + * SFU emulation should work with all servers, but only 1116 + * supports block and char device (no socket & fifo), 1117 + * and was used by default in earlier versions of Windows 1082 1118 */ 1083 1119 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) 1084 - return rc; 1085 - 1086 - if (!S_ISCHR(mode) && !S_ISBLK(mode)) 1087 - return rc; 1088 - 1089 - cifs_dbg(FYI, "sfu compat create special file\n"); 1090 - 1091 - oparms = (struct cifs_open_parms) { 1092 - .tcon = tcon, 1093 - .cifs_sb = cifs_sb, 1094 - .desired_access = GENERIC_WRITE, 1095 - .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR | 1096 - CREATE_OPTION_SPECIAL), 1097 - .disposition = FILE_CREATE, 1098 - .path = full_path, 1099 - .fid = &fid, 1100 - }; 1101 - 1102 - if (tcon->ses->server->oplocks) 1103 - oplock = REQ_OPLOCK; 1104 - else 1105 - oplock = 0; 1106 - rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf); 1107 - if (rc) 1108 - return rc; 1109 - 1110 - /* 1111 - * BB Do not bother to decode buf since no local inode yet to put 1112 - * timestamps in, but we can reuse it safely. 1113 - */ 1114 - 1115 - pdev = (struct win_dev *)&buf.fi; 1116 - io_parms.pid = current->tgid; 1117 - io_parms.tcon = tcon; 1118 - io_parms.offset = 0; 1119 - io_parms.length = sizeof(struct win_dev); 1120 - iov[1].iov_base = &buf.fi; 1121 - iov[1].iov_len = sizeof(struct win_dev); 1122 - if (S_ISCHR(mode)) { 1123 - memcpy(pdev->type, "IntxCHR", 8); 1124 - pdev->major = cpu_to_le64(MAJOR(dev)); 1125 - pdev->minor = cpu_to_le64(MINOR(dev)); 1126 - rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 1127 - &bytes_written, iov, 1); 1128 - } else if (S_ISBLK(mode)) { 1129 - memcpy(pdev->type, "IntxBLK", 8); 1130 - pdev->major = cpu_to_le64(MAJOR(dev)); 1131 - pdev->minor = cpu_to_le64(MINOR(dev)); 1132 - rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 1133 - &bytes_written, iov, 1); 1134 - } 1135 - tcon->ses->server->ops->close(xid, tcon, &fid); 1136 - d_drop(dentry); 1137 - 1138 - /* FIXME: add code here to set EAs */ 1139 - 1140 - cifs_free_open_info(&buf); 1141 - return rc; 1120 + return -EPERM; 1121 + return cifs_sfu_make_node(xid, inode, dentry, tcon, 1122 + full_path, mode, dev); 1142 1123 } 1143 - 1144 - 1145 1124 1146 1125 struct smb_version_operations smb1_operations = { 1147 1126 .send_cancel = send_nt_cancel, ··· 1123 1214 .is_path_accessible = cifs_is_path_accessible, 1124 1215 .can_echo = cifs_can_echo, 1125 1216 .query_path_info = cifs_query_path_info, 1217 + .query_reparse_point = cifs_query_reparse_point, 1126 1218 .query_file_info = cifs_query_file_info, 1127 1219 .get_srv_inum = cifs_get_srv_inum, 1128 1220 .set_path_size = CIFSSMBSetEOF, ··· 1139 1229 .rename = CIFSSMBRename, 1140 1230 .create_hardlink = CIFSCreateHardLink, 1141 1231 .query_symlink = cifs_query_symlink, 1232 + .parse_reparse_point = cifs_parse_reparse_point, 1142 1233 .open = cifs_open_file, 1143 1234 .set_fid = cifs_set_fid, 1144 1235 .close = cifs_close_file,
+1 -1
fs/smb/client/smb2inode.c
··· 555 555 break; 556 556 } 557 557 data->reparse_point = reparse_point; 558 - data->reparse_tag = tag; 558 + data->reparse.tag = tag; 559 559 return rc; 560 560 } 561 561
+111 -114
fs/smb/client/smb2ops.c
··· 2866 2866 return rc; 2867 2867 } 2868 2868 2869 - static int 2870 - parse_reparse_posix(struct reparse_posix_data *symlink_buf, 2871 - u32 plen, char **target_path, 2872 - struct cifs_sb_info *cifs_sb) 2869 + /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */ 2870 + static int parse_reparse_posix(struct reparse_posix_data *buf, 2871 + struct cifs_sb_info *cifs_sb, 2872 + struct cifs_open_info_data *data) 2873 2873 { 2874 2874 unsigned int len; 2875 + u64 type; 2875 2876 2876 - /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */ 2877 - len = le16_to_cpu(symlink_buf->ReparseDataLength); 2878 - 2879 - if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) { 2880 - cifs_dbg(VFS, "%lld not a supported symlink type\n", 2881 - le64_to_cpu(symlink_buf->InodeType)); 2877 + switch ((type = le64_to_cpu(buf->InodeType))) { 2878 + case NFS_SPECFILE_LNK: 2879 + len = le16_to_cpu(buf->ReparseDataLength); 2880 + data->symlink_target = cifs_strndup_from_utf16(buf->DataBuffer, 2881 + len, true, 2882 + cifs_sb->local_nls); 2883 + if (!data->symlink_target) 2884 + return -ENOMEM; 2885 + convert_delimiter(data->symlink_target, '/'); 2886 + cifs_dbg(FYI, "%s: target path: %s\n", 2887 + __func__, data->symlink_target); 2888 + break; 2889 + case NFS_SPECFILE_CHR: 2890 + case NFS_SPECFILE_BLK: 2891 + case NFS_SPECFILE_FIFO: 2892 + case NFS_SPECFILE_SOCK: 2893 + break; 2894 + default: 2895 + cifs_dbg(VFS, "%s: unhandled inode type: 0x%llx\n", 2896 + __func__, type); 2882 2897 return -EOPNOTSUPP; 2883 2898 } 2884 - 2885 - *target_path = cifs_strndup_from_utf16( 2886 - symlink_buf->PathBuffer, 2887 - len, true, cifs_sb->local_nls); 2888 - if (!(*target_path)) 2889 - return -ENOMEM; 2890 - 2891 - convert_delimiter(*target_path, '/'); 2892 - cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path); 2893 - 2894 2899 return 0; 2895 2900 } 2896 2901 2897 - static int 2898 - parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf, 2899 - u32 plen, char **target_path, 2900 - struct cifs_sb_info *cifs_sb) 2902 + static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym, 2903 + u32 plen, bool unicode, 2904 + struct cifs_sb_info *cifs_sb, 2905 + struct cifs_open_info_data *data) 2901 2906 { 2902 - unsigned int sub_len; 2903 - unsigned int sub_offset; 2907 + unsigned int len; 2908 + unsigned int offs; 2904 2909 2905 2910 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */ 2906 2911 2907 - sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset); 2908 - sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength); 2909 - if (sub_offset + 20 > plen || 2910 - sub_offset + sub_len + 20 > plen) { 2912 + offs = le16_to_cpu(sym->SubstituteNameOffset); 2913 + len = le16_to_cpu(sym->SubstituteNameLength); 2914 + if (offs + 20 > plen || offs + len + 20 > plen) { 2911 2915 cifs_dbg(VFS, "srv returned malformed symlink buffer\n"); 2912 2916 return -EIO; 2913 2917 } 2914 2918 2915 - *target_path = cifs_strndup_from_utf16( 2916 - symlink_buf->PathBuffer + sub_offset, 2917 - sub_len, true, cifs_sb->local_nls); 2918 - if (!(*target_path)) 2919 + data->symlink_target = cifs_strndup_from_utf16(sym->PathBuffer + offs, 2920 + len, unicode, 2921 + cifs_sb->local_nls); 2922 + if (!data->symlink_target) 2919 2923 return -ENOMEM; 2920 2924 2921 - convert_delimiter(*target_path, '/'); 2922 - cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path); 2925 + convert_delimiter(data->symlink_target, '/'); 2926 + cifs_dbg(FYI, "%s: target path: %s\n", __func__, data->symlink_target); 2923 2927 2924 2928 return 0; 2925 2929 } 2926 2930 2927 - static int 2928 - parse_reparse_point(struct reparse_data_buffer *buf, 2929 - u32 plen, char **target_path, 2930 - struct cifs_sb_info *cifs_sb) 2931 + int parse_reparse_point(struct reparse_data_buffer *buf, 2932 + u32 plen, struct cifs_sb_info *cifs_sb, 2933 + bool unicode, struct cifs_open_info_data *data) 2931 2934 { 2932 - if (plen < sizeof(struct reparse_data_buffer)) { 2933 - cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n", 2934 - plen); 2935 + if (plen < sizeof(*buf)) { 2936 + cifs_dbg(VFS, "%s: reparse buffer is too small. Must be at least 8 bytes but was %d\n", 2937 + __func__, plen); 2935 2938 return -EIO; 2936 2939 } 2937 2940 2938 - if (plen < le16_to_cpu(buf->ReparseDataLength) + 2939 - sizeof(struct reparse_data_buffer)) { 2940 - cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n", 2941 - plen); 2941 + if (plen < le16_to_cpu(buf->ReparseDataLength) + sizeof(*buf)) { 2942 + cifs_dbg(VFS, "%s: invalid reparse buf length: %d\n", 2943 + __func__, plen); 2942 2944 return -EIO; 2943 2945 } 2946 + 2947 + data->reparse.buf = buf; 2944 2948 2945 2949 /* See MS-FSCC 2.1.2 */ 2946 2950 switch (le32_to_cpu(buf->ReparseTag)) { 2947 2951 case IO_REPARSE_TAG_NFS: 2948 - return parse_reparse_posix( 2949 - (struct reparse_posix_data *)buf, 2950 - plen, target_path, cifs_sb); 2952 + return parse_reparse_posix((struct reparse_posix_data *)buf, 2953 + cifs_sb, data); 2951 2954 case IO_REPARSE_TAG_SYMLINK: 2952 2955 return parse_reparse_symlink( 2953 2956 (struct reparse_symlink_data_buffer *)buf, 2954 - plen, target_path, cifs_sb); 2957 + plen, unicode, cifs_sb, data); 2958 + case IO_REPARSE_TAG_LX_SYMLINK: 2959 + case IO_REPARSE_TAG_AF_UNIX: 2960 + case IO_REPARSE_TAG_LX_FIFO: 2961 + case IO_REPARSE_TAG_LX_CHR: 2962 + case IO_REPARSE_TAG_LX_BLK: 2963 + return 0; 2955 2964 default: 2956 - cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n", 2957 - le32_to_cpu(buf->ReparseTag)); 2965 + cifs_dbg(VFS, "%s: unhandled reparse tag: 0x%08x\n", 2966 + __func__, le32_to_cpu(buf->ReparseTag)); 2958 2967 return -EOPNOTSUPP; 2959 2968 } 2960 2969 } 2961 2970 2962 - static int smb2_query_symlink(const unsigned int xid, 2963 - struct cifs_tcon *tcon, 2964 - struct cifs_sb_info *cifs_sb, 2965 - const char *full_path, 2966 - char **target_path, 2967 - struct kvec *rsp_iov) 2971 + static int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb, 2972 + struct kvec *rsp_iov, 2973 + struct cifs_open_info_data *data) 2968 2974 { 2969 2975 struct reparse_data_buffer *buf; 2970 2976 struct smb2_ioctl_rsp *io = rsp_iov->iov_base; 2971 2977 u32 plen = le32_to_cpu(io->OutputCount); 2972 2978 2973 - cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); 2974 - 2975 2979 buf = (struct reparse_data_buffer *)((u8 *)io + 2976 2980 le32_to_cpu(io->OutputOffset)); 2977 - return parse_reparse_point(buf, plen, target_path, cifs_sb); 2981 + return parse_reparse_point(buf, plen, cifs_sb, true, data); 2978 2982 } 2979 2983 2980 2984 static int smb2_query_reparse_point(const unsigned int xid, ··· 5068 5064 return le32_to_cpu(hdr->NextCommand); 5069 5065 } 5070 5066 5071 - static int 5072 - smb2_make_node(unsigned int xid, struct inode *inode, 5073 - struct dentry *dentry, struct cifs_tcon *tcon, 5074 - const char *full_path, umode_t mode, dev_t dev) 5067 + int cifs_sfu_make_node(unsigned int xid, struct inode *inode, 5068 + struct dentry *dentry, struct cifs_tcon *tcon, 5069 + const char *full_path, umode_t mode, dev_t dev) 5075 5070 { 5076 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 5077 - int rc = -EPERM; 5078 5071 struct cifs_open_info_data buf = {}; 5079 - struct cifs_io_parms io_parms = {0}; 5080 - __u32 oplock = 0; 5081 - struct cifs_fid fid; 5072 + struct TCP_Server_Info *server = tcon->ses->server; 5082 5073 struct cifs_open_parms oparms; 5074 + struct cifs_io_parms io_parms = {}; 5075 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 5076 + struct cifs_fid fid; 5083 5077 unsigned int bytes_written; 5084 5078 struct win_dev *pdev; 5085 5079 struct kvec iov[2]; 5086 - 5087 - /* 5088 - * Check if mounted with mount parm 'sfu' mount parm. 5089 - * SFU emulation should work with all servers, but only 5090 - * supports block and char device (no socket & fifo), 5091 - * and was used by default in earlier versions of Windows 5092 - */ 5093 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) 5094 - return rc; 5095 - 5096 - /* 5097 - * TODO: Add ability to create instead via reparse point. Windows (e.g. 5098 - * their current NFS server) uses this approach to expose special files 5099 - * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions 5100 - */ 5080 + __u32 oplock = server->oplocks ? REQ_OPLOCK : 0; 5081 + int rc; 5101 5082 5102 5083 if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode)) 5103 - return rc; 5104 - 5105 - cifs_dbg(FYI, "sfu compat create special file\n"); 5084 + return -EPERM; 5106 5085 5107 5086 oparms = (struct cifs_open_parms) { 5108 5087 .tcon = tcon, ··· 5098 5111 .fid = &fid, 5099 5112 }; 5100 5113 5101 - if (tcon->ses->server->oplocks) 5102 - oplock = REQ_OPLOCK; 5103 - else 5104 - oplock = 0; 5105 - rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf); 5114 + rc = server->ops->open(xid, &oparms, &oplock, &buf); 5106 5115 if (rc) 5107 5116 return rc; 5108 5117 ··· 5106 5123 * BB Do not bother to decode buf since no local inode yet to put 5107 5124 * timestamps in, but we can reuse it safely. 5108 5125 */ 5109 - 5110 5126 pdev = (struct win_dev *)&buf.fi; 5111 5127 io_parms.pid = current->tgid; 5112 5128 io_parms.tcon = tcon; 5113 - io_parms.offset = 0; 5114 - io_parms.length = sizeof(struct win_dev); 5115 - iov[1].iov_base = &buf.fi; 5116 - iov[1].iov_len = sizeof(struct win_dev); 5129 + io_parms.length = sizeof(*pdev); 5130 + iov[1].iov_base = pdev; 5131 + iov[1].iov_len = sizeof(*pdev); 5117 5132 if (S_ISCHR(mode)) { 5118 5133 memcpy(pdev->type, "IntxCHR", 8); 5119 5134 pdev->major = cpu_to_le64(MAJOR(dev)); 5120 5135 pdev->minor = cpu_to_le64(MINOR(dev)); 5121 - rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 5122 - &bytes_written, iov, 1); 5123 5136 } else if (S_ISBLK(mode)) { 5124 5137 memcpy(pdev->type, "IntxBLK", 8); 5125 5138 pdev->major = cpu_to_le64(MAJOR(dev)); 5126 5139 pdev->minor = cpu_to_le64(MINOR(dev)); 5127 - rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 5128 - &bytes_written, iov, 1); 5129 5140 } else if (S_ISFIFO(mode)) { 5130 5141 memcpy(pdev->type, "LnxFIFO", 8); 5131 - pdev->major = 0; 5132 - pdev->minor = 0; 5133 - rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 5134 - &bytes_written, iov, 1); 5135 5142 } 5136 - tcon->ses->server->ops->close(xid, tcon, &fid); 5143 + 5144 + rc = server->ops->sync_write(xid, &fid, &io_parms, 5145 + &bytes_written, iov, 1); 5146 + server->ops->close(xid, tcon, &fid); 5137 5147 d_drop(dentry); 5138 - 5139 5148 /* FIXME: add code here to set EAs */ 5140 - 5141 5149 cifs_free_open_info(&buf); 5142 5150 return rc; 5151 + } 5152 + 5153 + static int smb2_make_node(unsigned int xid, struct inode *inode, 5154 + struct dentry *dentry, struct cifs_tcon *tcon, 5155 + const char *full_path, umode_t mode, dev_t dev) 5156 + { 5157 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 5158 + 5159 + /* 5160 + * Check if mounted with mount parm 'sfu' mount parm. 5161 + * SFU emulation should work with all servers, but only 5162 + * supports block and char device (no socket & fifo), 5163 + * and was used by default in earlier versions of Windows 5164 + */ 5165 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) 5166 + return -EPERM; 5167 + /* 5168 + * TODO: Add ability to create instead via reparse point. Windows (e.g. 5169 + * their current NFS server) uses this approach to expose special files 5170 + * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions 5171 + */ 5172 + return cifs_sfu_make_node(xid, inode, dentry, tcon, 5173 + full_path, mode, dev); 5143 5174 } 5144 5175 5145 5176 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY ··· 5206 5209 .unlink = smb2_unlink, 5207 5210 .rename = smb2_rename_path, 5208 5211 .create_hardlink = smb2_create_hardlink, 5209 - .query_symlink = smb2_query_symlink, 5212 + .parse_reparse_point = smb2_parse_reparse_point, 5210 5213 .query_mf_symlink = smb3_query_mf_symlink, 5211 5214 .create_mf_symlink = smb3_create_mf_symlink, 5212 5215 .open = smb2_open_file, ··· 5308 5311 .unlink = smb2_unlink, 5309 5312 .rename = smb2_rename_path, 5310 5313 .create_hardlink = smb2_create_hardlink, 5311 - .query_symlink = smb2_query_symlink, 5314 + .parse_reparse_point = smb2_parse_reparse_point, 5312 5315 .query_mf_symlink = smb3_query_mf_symlink, 5313 5316 .create_mf_symlink = smb3_create_mf_symlink, 5314 5317 .open = smb2_open_file, ··· 5413 5416 .unlink = smb2_unlink, 5414 5417 .rename = smb2_rename_path, 5415 5418 .create_hardlink = smb2_create_hardlink, 5416 - .query_symlink = smb2_query_symlink, 5419 + .parse_reparse_point = smb2_parse_reparse_point, 5417 5420 .query_mf_symlink = smb3_query_mf_symlink, 5418 5421 .create_mf_symlink = smb3_create_mf_symlink, 5419 5422 .open = smb2_open_file, ··· 5527 5530 .unlink = smb2_unlink, 5528 5531 .rename = smb2_rename_path, 5529 5532 .create_hardlink = smb2_create_hardlink, 5530 - .query_symlink = smb2_query_symlink, 5533 + .parse_reparse_point = smb2_parse_reparse_point, 5531 5534 .query_mf_symlink = smb3_query_mf_symlink, 5532 5535 .create_mf_symlink = smb3_create_mf_symlink, 5533 5536 .open = smb2_open_file,
+8 -2
fs/smb/server/ksmbd_work.c
··· 56 56 kfree(work->tr_buf); 57 57 kvfree(work->request_buf); 58 58 kfree(work->iov); 59 + if (!list_empty(&work->interim_entry)) 60 + list_del(&work->interim_entry); 61 + 59 62 if (work->async_id) 60 63 ksmbd_release_id(&work->conn->async_ida, work->async_id); 61 64 kmem_cache_free(work_cache, work); ··· 109 106 static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len, 110 107 void *aux_buf, unsigned int aux_size) 111 108 { 112 - struct aux_read *ar; 109 + struct aux_read *ar = NULL; 113 110 int need_iov_cnt = 1; 114 111 115 112 if (aux_size) { ··· 126 123 new = krealloc(work->iov, 127 124 sizeof(struct kvec) * work->iov_alloc_cnt, 128 125 GFP_KERNEL | __GFP_ZERO); 129 - if (!new) 126 + if (!new) { 127 + kfree(ar); 128 + work->iov_alloc_cnt -= 4; 130 129 return -ENOMEM; 130 + } 131 131 work->iov = new; 132 132 } 133 133
+2 -1
fs/smb/server/oplock.c
··· 833 833 interim_entry); 834 834 setup_async_work(in_work, NULL, NULL); 835 835 smb2_send_interim_resp(in_work, STATUS_PENDING); 836 - list_del(&in_work->interim_entry); 836 + list_del_init(&in_work->interim_entry); 837 + release_async_work(in_work); 837 838 } 838 839 INIT_WORK(&work->work, __smb2_lease_break_noti); 839 840 ksmbd_queue_work(work);
+137 -135
fs/smb/server/smb2pdu.c
··· 657 657 658 658 int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg) 659 659 { 660 - struct smb2_hdr *rsp_hdr; 661 660 struct ksmbd_conn *conn = work->conn; 662 661 int id; 663 - 664 - rsp_hdr = ksmbd_resp_buf_next(work); 665 - rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND; 666 662 667 663 id = ksmbd_acquire_async_msg_id(&conn->async_ida); 668 664 if (id < 0) { ··· 667 671 } 668 672 work->asynchronous = true; 669 673 work->async_id = id; 670 - rsp_hdr->Id.AsyncId = cpu_to_le64(id); 671 674 672 675 ksmbd_debug(SMB, 673 676 "Send interim Response to inform async request id : %d\n", ··· 718 723 __SMB2_HEADER_STRUCTURE_SIZE); 719 724 720 725 rsp_hdr = smb2_get_msg(in_work->response_buf); 726 + rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND; 727 + rsp_hdr->Id.AsyncId = cpu_to_le64(work->async_id); 721 728 smb2_set_err_rsp(in_work); 722 729 rsp_hdr->Status = status; 723 730 ··· 2377 2380 rc = 0; 2378 2381 } else { 2379 2382 rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value, 2380 - le16_to_cpu(eabuf->EaValueLength), 0); 2383 + le16_to_cpu(eabuf->EaValueLength), 2384 + 0, true); 2381 2385 if (rc < 0) { 2382 2386 ksmbd_debug(SMB, 2383 2387 "ksmbd_vfs_setxattr is failed(%d)\n", ··· 2441 2443 return -EBADF; 2442 2444 } 2443 2445 2444 - rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0); 2446 + rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0, false); 2445 2447 if (rc < 0) 2446 2448 pr_err("Failed to store XATTR stream name :%d\n", rc); 2447 2449 return 0; ··· 2516 2518 da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME | 2517 2519 XATTR_DOSINFO_ITIME; 2518 2520 2519 - rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da); 2521 + rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, false); 2520 2522 if (rc) 2521 2523 ksmbd_debug(SMB, "failed to store file attribute into xattr\n"); 2522 2524 } ··· 2606 2608 sizeof(struct create_sd_buf_req)) 2607 2609 return -EINVAL; 2608 2610 return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd, 2609 - le32_to_cpu(sd_buf->ccontext.DataLength), true); 2611 + le32_to_cpu(sd_buf->ccontext.DataLength), true, false); 2610 2612 } 2611 2613 2612 2614 static void ksmbd_acls_fattr(struct smb_fattr *fattr, ··· 2688 2690 *(char *)req->Buffer == '\\') { 2689 2691 pr_err("not allow directory name included leading slash\n"); 2690 2692 rc = -EINVAL; 2691 - goto err_out1; 2693 + goto err_out2; 2692 2694 } 2693 2695 2694 2696 name = smb2_get_name(req->Buffer, ··· 2699 2701 if (rc != -ENOMEM) 2700 2702 rc = -ENOENT; 2701 2703 name = NULL; 2702 - goto err_out1; 2704 + goto err_out2; 2703 2705 } 2704 2706 2705 2707 ksmbd_debug(SMB, "converted name = %s\n", name); ··· 2707 2709 if (!test_share_config_flag(work->tcon->share_conf, 2708 2710 KSMBD_SHARE_FLAG_STREAMS)) { 2709 2711 rc = -EBADF; 2710 - goto err_out1; 2712 + goto err_out2; 2711 2713 } 2712 2714 rc = parse_stream_name(name, &stream_name, &s_type); 2713 2715 if (rc < 0) 2714 - goto err_out1; 2716 + goto err_out2; 2715 2717 } 2716 2718 2717 2719 rc = ksmbd_validate_filename(name); 2718 2720 if (rc < 0) 2719 - goto err_out1; 2721 + goto err_out2; 2720 2722 2721 2723 if (ksmbd_share_veto_filename(share, name)) { 2722 2724 rc = -ENOENT; 2723 2725 ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n", 2724 2726 name); 2725 - goto err_out1; 2727 + goto err_out2; 2726 2728 } 2727 2729 } else { 2728 2730 name = kstrdup("", GFP_KERNEL); 2729 2731 if (!name) { 2730 2732 rc = -ENOMEM; 2731 - goto err_out1; 2733 + goto err_out2; 2732 2734 } 2733 2735 } 2734 2736 ··· 2741 2743 le32_to_cpu(req->ImpersonationLevel)); 2742 2744 rc = -EIO; 2743 2745 rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL; 2744 - goto err_out1; 2746 + goto err_out2; 2745 2747 } 2746 2748 2747 2749 if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) { 2748 2750 pr_err("Invalid create options : 0x%x\n", 2749 2751 le32_to_cpu(req->CreateOptions)); 2750 2752 rc = -EINVAL; 2751 - goto err_out1; 2753 + goto err_out2; 2752 2754 } else { 2753 2755 if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE && 2754 2756 req->CreateOptions & FILE_RANDOM_ACCESS_LE) ··· 2758 2760 (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION | 2759 2761 FILE_RESERVE_OPFILTER_LE)) { 2760 2762 rc = -EOPNOTSUPP; 2761 - goto err_out1; 2763 + goto err_out2; 2762 2764 } 2763 2765 2764 2766 if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) { 2765 2767 if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) { 2766 2768 rc = -EINVAL; 2767 - goto err_out1; 2769 + goto err_out2; 2768 2770 } else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) { 2769 2771 req->CreateOptions = ~(FILE_NO_COMPRESSION_LE); 2770 2772 } ··· 2776 2778 pr_err("Invalid create disposition : 0x%x\n", 2777 2779 le32_to_cpu(req->CreateDisposition)); 2778 2780 rc = -EINVAL; 2779 - goto err_out1; 2781 + goto err_out2; 2780 2782 } 2781 2783 2782 2784 if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) { 2783 2785 pr_err("Invalid desired access : 0x%x\n", 2784 2786 le32_to_cpu(req->DesiredAccess)); 2785 2787 rc = -EACCES; 2786 - goto err_out1; 2788 + goto err_out2; 2787 2789 } 2788 2790 2789 2791 if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) { 2790 2792 pr_err("Invalid file attribute : 0x%x\n", 2791 2793 le32_to_cpu(req->FileAttributes)); 2792 2794 rc = -EINVAL; 2793 - goto err_out1; 2795 + goto err_out2; 2794 2796 } 2795 2797 2796 2798 if (req->CreateContextsOffset) { ··· 2798 2800 context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4); 2799 2801 if (IS_ERR(context)) { 2800 2802 rc = PTR_ERR(context); 2801 - goto err_out1; 2803 + goto err_out2; 2802 2804 } else if (context) { 2803 2805 ea_buf = (struct create_ea_buf_req *)context; 2804 2806 if (le16_to_cpu(context->DataOffset) + 2805 2807 le32_to_cpu(context->DataLength) < 2806 2808 sizeof(struct create_ea_buf_req)) { 2807 2809 rc = -EINVAL; 2808 - goto err_out1; 2810 + goto err_out2; 2809 2811 } 2810 2812 if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) { 2811 2813 rsp->hdr.Status = STATUS_ACCESS_DENIED; 2812 2814 rc = -EACCES; 2813 - goto err_out1; 2815 + goto err_out2; 2814 2816 } 2815 2817 } 2816 2818 ··· 2818 2820 SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4); 2819 2821 if (IS_ERR(context)) { 2820 2822 rc = PTR_ERR(context); 2821 - goto err_out1; 2823 + goto err_out2; 2822 2824 } else if (context) { 2823 2825 ksmbd_debug(SMB, 2824 2826 "get query maximal access context\n"); ··· 2829 2831 SMB2_CREATE_TIMEWARP_REQUEST, 4); 2830 2832 if (IS_ERR(context)) { 2831 2833 rc = PTR_ERR(context); 2832 - goto err_out1; 2834 + goto err_out2; 2833 2835 } else if (context) { 2834 2836 ksmbd_debug(SMB, "get timewarp context\n"); 2835 2837 rc = -EBADF; 2836 - goto err_out1; 2838 + goto err_out2; 2837 2839 } 2838 2840 2839 2841 if (tcon->posix_extensions) { ··· 2841 2843 SMB2_CREATE_TAG_POSIX, 16); 2842 2844 if (IS_ERR(context)) { 2843 2845 rc = PTR_ERR(context); 2844 - goto err_out1; 2846 + goto err_out2; 2845 2847 } else if (context) { 2846 2848 struct create_posix *posix = 2847 2849 (struct create_posix *)context; ··· 2849 2851 le32_to_cpu(context->DataLength) < 2850 2852 sizeof(struct create_posix) - 4) { 2851 2853 rc = -EINVAL; 2852 - goto err_out1; 2854 + goto err_out2; 2853 2855 } 2854 2856 ksmbd_debug(SMB, "get posix context\n"); 2855 2857 ··· 2861 2863 2862 2864 if (ksmbd_override_fsids(work)) { 2863 2865 rc = -ENOMEM; 2864 - goto err_out1; 2866 + goto err_out2; 2865 2867 } 2866 2868 2867 2869 rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS, ··· 3036 3038 } 3037 3039 } 3038 3040 3039 - rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent)); 3041 + rc = ksmbd_query_inode_status(path.dentry->d_parent); 3040 3042 if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) { 3041 3043 rc = -EBUSY; 3042 3044 goto err_out; ··· 3150 3152 idmap, 3151 3153 &path, 3152 3154 pntsd, 3153 - pntsd_size); 3155 + pntsd_size, 3156 + false); 3154 3157 kfree(pntsd); 3155 3158 if (rc) 3156 3159 pr_err("failed to store ntacl in xattr : %d\n", ··· 3174 3175 3175 3176 fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE | 3176 3177 FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE)); 3177 - if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC && 3178 - !fp->attrib_only && !stream_name) { 3179 - smb_break_all_oplock(work, fp); 3180 - need_truncate = 1; 3181 - } 3182 3178 3183 3179 /* fp should be searchable through ksmbd_inode.m_fp_list 3184 3180 * after daccess, saccess, attrib_only, and stream are ··· 3187 3193 if (ksmbd_inode_pending_delete(fp)) { 3188 3194 rc = -EBUSY; 3189 3195 goto err_out; 3190 - } 3191 - 3192 - share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp); 3193 - if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) || 3194 - (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && 3195 - !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) { 3196 - if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) { 3197 - rc = share_ret; 3198 - goto err_out; 3199 - } 3200 - } else { 3201 - if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) { 3202 - req_op_level = smb2_map_lease_to_oplock(lc->req_state); 3203 - ksmbd_debug(SMB, 3204 - "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n", 3205 - name, req_op_level, lc->req_state); 3206 - rc = find_same_lease_key(sess, fp->f_ci, lc); 3207 - if (rc) 3208 - goto err_out; 3209 - } else if (open_flags == O_RDONLY && 3210 - (req_op_level == SMB2_OPLOCK_LEVEL_BATCH || 3211 - req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) 3212 - req_op_level = SMB2_OPLOCK_LEVEL_II; 3213 - 3214 - rc = smb_grant_oplock(work, req_op_level, 3215 - fp->persistent_id, fp, 3216 - le32_to_cpu(req->hdr.Id.SyncId.TreeId), 3217 - lc, share_ret); 3218 - if (rc < 0) 3219 - goto err_out; 3220 - } 3221 - 3222 - if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) 3223 - ksmbd_fd_set_delete_on_close(fp, file_info); 3224 - 3225 - if (need_truncate) { 3226 - rc = smb2_create_truncate(&path); 3227 - if (rc) 3228 - goto err_out; 3229 - } 3230 - 3231 - if (req->CreateContextsOffset) { 3232 - struct create_alloc_size_req *az_req; 3233 - 3234 - az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req, 3235 - SMB2_CREATE_ALLOCATION_SIZE, 4); 3236 - if (IS_ERR(az_req)) { 3237 - rc = PTR_ERR(az_req); 3238 - goto err_out; 3239 - } else if (az_req) { 3240 - loff_t alloc_size; 3241 - int err; 3242 - 3243 - if (le16_to_cpu(az_req->ccontext.DataOffset) + 3244 - le32_to_cpu(az_req->ccontext.DataLength) < 3245 - sizeof(struct create_alloc_size_req)) { 3246 - rc = -EINVAL; 3247 - goto err_out; 3248 - } 3249 - alloc_size = le64_to_cpu(az_req->AllocationSize); 3250 - ksmbd_debug(SMB, 3251 - "request smb2 create allocate size : %llu\n", 3252 - alloc_size); 3253 - smb_break_all_levII_oplock(work, fp, 1); 3254 - err = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0, 3255 - alloc_size); 3256 - if (err < 0) 3257 - ksmbd_debug(SMB, 3258 - "vfs_fallocate is failed : %d\n", 3259 - err); 3260 - } 3261 - 3262 - context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4); 3263 - if (IS_ERR(context)) { 3264 - rc = PTR_ERR(context); 3265 - goto err_out; 3266 - } else if (context) { 3267 - ksmbd_debug(SMB, "get query on disk id context\n"); 3268 - query_disk_id = 1; 3269 - } 3270 3196 } 3271 3197 3272 3198 rc = ksmbd_vfs_getattr(&path, &stat); ··· 3205 3291 smb2_update_xattrs(tcon, &path, fp); 3206 3292 else 3207 3293 smb2_new_xattrs(tcon, &path, fp); 3294 + 3295 + if (file_present || created) 3296 + ksmbd_vfs_kern_path_unlock(&parent_path, &path); 3297 + 3298 + if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC && 3299 + !fp->attrib_only && !stream_name) { 3300 + smb_break_all_oplock(work, fp); 3301 + need_truncate = 1; 3302 + } 3303 + 3304 + share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp); 3305 + if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) || 3306 + (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && 3307 + !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) { 3308 + if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) { 3309 + rc = share_ret; 3310 + goto err_out1; 3311 + } 3312 + } else { 3313 + if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) { 3314 + req_op_level = smb2_map_lease_to_oplock(lc->req_state); 3315 + ksmbd_debug(SMB, 3316 + "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n", 3317 + name, req_op_level, lc->req_state); 3318 + rc = find_same_lease_key(sess, fp->f_ci, lc); 3319 + if (rc) 3320 + goto err_out1; 3321 + } else if (open_flags == O_RDONLY && 3322 + (req_op_level == SMB2_OPLOCK_LEVEL_BATCH || 3323 + req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) 3324 + req_op_level = SMB2_OPLOCK_LEVEL_II; 3325 + 3326 + rc = smb_grant_oplock(work, req_op_level, 3327 + fp->persistent_id, fp, 3328 + le32_to_cpu(req->hdr.Id.SyncId.TreeId), 3329 + lc, share_ret); 3330 + if (rc < 0) 3331 + goto err_out1; 3332 + } 3333 + 3334 + if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) 3335 + ksmbd_fd_set_delete_on_close(fp, file_info); 3336 + 3337 + if (need_truncate) { 3338 + rc = smb2_create_truncate(&fp->filp->f_path); 3339 + if (rc) 3340 + goto err_out1; 3341 + } 3342 + 3343 + if (req->CreateContextsOffset) { 3344 + struct create_alloc_size_req *az_req; 3345 + 3346 + az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req, 3347 + SMB2_CREATE_ALLOCATION_SIZE, 4); 3348 + if (IS_ERR(az_req)) { 3349 + rc = PTR_ERR(az_req); 3350 + goto err_out1; 3351 + } else if (az_req) { 3352 + loff_t alloc_size; 3353 + int err; 3354 + 3355 + if (le16_to_cpu(az_req->ccontext.DataOffset) + 3356 + le32_to_cpu(az_req->ccontext.DataLength) < 3357 + sizeof(struct create_alloc_size_req)) { 3358 + rc = -EINVAL; 3359 + goto err_out1; 3360 + } 3361 + alloc_size = le64_to_cpu(az_req->AllocationSize); 3362 + ksmbd_debug(SMB, 3363 + "request smb2 create allocate size : %llu\n", 3364 + alloc_size); 3365 + smb_break_all_levII_oplock(work, fp, 1); 3366 + err = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0, 3367 + alloc_size); 3368 + if (err < 0) 3369 + ksmbd_debug(SMB, 3370 + "vfs_fallocate is failed : %d\n", 3371 + err); 3372 + } 3373 + 3374 + context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4); 3375 + if (IS_ERR(context)) { 3376 + rc = PTR_ERR(context); 3377 + goto err_out1; 3378 + } else if (context) { 3379 + ksmbd_debug(SMB, "get query on disk id context\n"); 3380 + query_disk_id = 1; 3381 + } 3382 + } 3208 3383 3209 3384 memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE); 3210 3385 ··· 3401 3398 } 3402 3399 3403 3400 err_out: 3404 - if (file_present || created) { 3405 - inode_unlock(d_inode(parent_path.dentry)); 3406 - path_put(&path); 3407 - path_put(&parent_path); 3408 - } 3409 - ksmbd_revert_fsids(work); 3401 + if (rc && (file_present || created)) 3402 + ksmbd_vfs_kern_path_unlock(&parent_path, &path); 3403 + 3410 3404 err_out1: 3405 + ksmbd_revert_fsids(work); 3406 + 3407 + err_out2: 3411 3408 if (!rc) { 3412 3409 ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED); 3413 3410 rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len); ··· 5540 5537 rc = ksmbd_vfs_setxattr(file_mnt_idmap(fp->filp), 5541 5538 &fp->filp->f_path, 5542 5539 xattr_stream_name, 5543 - NULL, 0, 0); 5540 + NULL, 0, 0, true); 5544 5541 if (rc < 0) { 5545 5542 pr_err("failed to store stream name in xattr: %d\n", 5546 5543 rc); ··· 5633 5630 if (rc) 5634 5631 rc = -EINVAL; 5635 5632 out: 5636 - if (file_present) { 5637 - inode_unlock(d_inode(parent_path.dentry)); 5638 - path_put(&path); 5639 - path_put(&parent_path); 5640 - } 5633 + if (file_present) 5634 + ksmbd_vfs_kern_path_unlock(&parent_path, &path); 5635 + 5641 5636 if (!IS_ERR(link_name)) 5642 5637 kfree(link_name); 5643 5638 kfree(pathname); ··· 5702 5701 da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME | 5703 5702 XATTR_DOSINFO_ITIME; 5704 5703 5705 - rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da); 5704 + rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da, 5705 + true); 5706 5706 if (rc) 5707 5707 ksmbd_debug(SMB, 5708 5708 "failed to restore file attribute in EA\n"); ··· 6015 6013 fp->saccess |= FILE_SHARE_DELETE_LE; 6016 6014 6017 6015 return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd, 6018 - buf_len, false); 6016 + buf_len, false, true); 6019 6017 } 6020 6018 6021 6019 /** ··· 7584 7582 7585 7583 da.attr = le32_to_cpu(fp->f_ci->m_fattr); 7586 7584 ret = ksmbd_vfs_set_dos_attrib_xattr(idmap, 7587 - &fp->filp->f_path, &da); 7585 + &fp->filp->f_path, 7586 + &da, true); 7588 7587 if (ret) 7589 7588 fp->f_ci->m_fattr = old_fattr; 7590 7589 } ··· 8234 8231 return; 8235 8232 8236 8233 err_out: 8237 - opinfo->op_state = OPLOCK_STATE_NONE; 8238 8234 wake_up_interruptible_all(&opinfo->oplock_q); 8239 8235 atomic_dec(&opinfo->breaking_cnt); 8240 8236 wake_up_interruptible_all(&opinfo->oplock_brk);
+4 -3
fs/smb/server/smbacl.c
··· 1185 1185 pntsd_size += sizeof(struct smb_acl) + nt_size; 1186 1186 } 1187 1187 1188 - ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size); 1188 + ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size, false); 1189 1189 kfree(pntsd); 1190 1190 } 1191 1191 ··· 1377 1377 1378 1378 int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon, 1379 1379 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len, 1380 - bool type_check) 1380 + bool type_check, bool get_write) 1381 1381 { 1382 1382 int rc; 1383 1383 struct smb_fattr fattr = {{0}}; ··· 1437 1437 if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) { 1438 1438 /* Update WinACL in xattr */ 1439 1439 ksmbd_vfs_remove_sd_xattrs(idmap, path); 1440 - ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len); 1440 + ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len, 1441 + get_write); 1441 1442 } 1442 1443 1443 1444 out:
+1 -1
fs/smb/server/smbacl.h
··· 207 207 __le32 *pdaccess, int uid); 208 208 int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon, 209 209 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len, 210 - bool type_check); 210 + bool type_check, bool get_write); 211 211 void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid); 212 212 void ksmbd_init_domain(u32 *sub_auth); 213 213
+42 -28
fs/smb/server/vfs.c
··· 97 97 return -ENOENT; 98 98 } 99 99 100 + err = mnt_want_write(parent_path->mnt); 101 + if (err) { 102 + path_put(parent_path); 103 + putname(filename); 104 + return -ENOENT; 105 + } 106 + 100 107 inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT); 101 108 d = lookup_one_qstr_excl(&last, parent_path->dentry, 0); 102 109 if (IS_ERR(d)) ··· 130 123 131 124 err_out: 132 125 inode_unlock(d_inode(parent_path->dentry)); 126 + mnt_drop_write(parent_path->mnt); 133 127 path_put(parent_path); 134 128 putname(filename); 135 129 return -ENOENT; ··· 459 451 fp->stream.name, 460 452 (void *)stream_buf, 461 453 size, 462 - 0); 454 + 0, 455 + true); 463 456 if (err < 0) 464 457 goto out; 465 458 ··· 602 593 goto out_err; 603 594 } 604 595 605 - err = mnt_want_write(path->mnt); 606 - if (err) 607 - goto out_err; 608 - 609 596 idmap = mnt_idmap(path->mnt); 610 597 if (S_ISDIR(d_inode(path->dentry)->i_mode)) { 611 598 err = vfs_rmdir(idmap, d_inode(parent), path->dentry); ··· 612 607 if (err) 613 608 ksmbd_debug(VFS, "unlink failed, err %d\n", err); 614 609 } 615 - mnt_drop_write(path->mnt); 616 610 617 611 out_err: 618 612 ksmbd_revert_fsids(work); ··· 719 715 goto out3; 720 716 } 721 717 722 - parent_fp = ksmbd_lookup_fd_inode(d_inode(old_child->d_parent)); 718 + parent_fp = ksmbd_lookup_fd_inode(old_child->d_parent); 723 719 if (parent_fp) { 724 720 if (parent_fp->daccess & FILE_DELETE_LE) { 725 721 pr_err("parent dir is opened with delete access\n"); ··· 911 907 * @attr_value: xattr value to set 912 908 * @attr_size: size of xattr value 913 909 * @flags: destination buffer length 910 + * @get_write: get write access to a mount 914 911 * 915 912 * Return: 0 on success, otherwise error 916 913 */ 917 914 int ksmbd_vfs_setxattr(struct mnt_idmap *idmap, 918 915 const struct path *path, const char *attr_name, 919 - void *attr_value, size_t attr_size, int flags) 916 + void *attr_value, size_t attr_size, int flags, 917 + bool get_write) 920 918 { 921 919 int err; 922 920 923 - err = mnt_want_write(path->mnt); 924 - if (err) 925 - return err; 921 + if (get_write == true) { 922 + err = mnt_want_write(path->mnt); 923 + if (err) 924 + return err; 925 + } 926 926 927 927 err = vfs_setxattr(idmap, 928 928 path->dentry, ··· 936 928 flags); 937 929 if (err) 938 930 ksmbd_debug(VFS, "setxattr failed, err %d\n", err); 939 - mnt_drop_write(path->mnt); 931 + if (get_write == true) 932 + mnt_drop_write(path->mnt); 940 933 return err; 941 934 } 942 935 ··· 1261 1252 } 1262 1253 1263 1254 if (!err) { 1255 + err = mnt_want_write(parent_path->mnt); 1256 + if (err) { 1257 + path_put(path); 1258 + path_put(parent_path); 1259 + return err; 1260 + } 1261 + 1264 1262 err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry); 1265 1263 if (err) { 1266 1264 path_put(path); ··· 1275 1259 } 1276 1260 } 1277 1261 return err; 1262 + } 1263 + 1264 + void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path) 1265 + { 1266 + inode_unlock(d_inode(parent_path->dentry)); 1267 + mnt_drop_write(parent_path->mnt); 1268 + path_put(path); 1269 + path_put(parent_path); 1278 1270 } 1279 1271 1280 1272 struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, ··· 1439 1415 int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn, 1440 1416 struct mnt_idmap *idmap, 1441 1417 const struct path *path, 1442 - struct smb_ntsd *pntsd, int len) 1418 + struct smb_ntsd *pntsd, int len, 1419 + bool get_write) 1443 1420 { 1444 1421 int rc; 1445 1422 struct ndr sd_ndr = {0}, acl_ndr = {0}; ··· 1500 1475 1501 1476 rc = ksmbd_vfs_setxattr(idmap, path, 1502 1477 XATTR_NAME_SD, sd_ndr.data, 1503 - sd_ndr.offset, 0); 1478 + sd_ndr.offset, 0, get_write); 1504 1479 if (rc < 0) 1505 1480 pr_err("Failed to store XATTR ntacl :%d\n", rc); 1506 1481 ··· 1589 1564 1590 1565 int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap, 1591 1566 const struct path *path, 1592 - struct xattr_dos_attrib *da) 1567 + struct xattr_dos_attrib *da, 1568 + bool get_write) 1593 1569 { 1594 1570 struct ndr n; 1595 1571 int err; ··· 1600 1574 return err; 1601 1575 1602 1576 err = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_DOS_ATTRIBUTE, 1603 - (void *)n.data, n.offset, 0); 1577 + (void *)n.data, n.offset, 0, get_write); 1604 1578 if (err) 1605 1579 ksmbd_debug(SMB, "failed to store dos attribute in xattr\n"); 1606 1580 kfree(n.data); ··· 1872 1846 } 1873 1847 posix_state_to_acl(&acl_state, acls->a_entries); 1874 1848 1875 - rc = mnt_want_write(path->mnt); 1876 - if (rc) 1877 - goto out_err; 1878 - 1879 1849 rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls); 1880 1850 if (rc < 0) 1881 1851 ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n", ··· 1883 1861 ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n", 1884 1862 rc); 1885 1863 } 1886 - mnt_drop_write(path->mnt); 1887 1864 1888 - out_err: 1889 1865 free_acl_state(&acl_state); 1890 1866 posix_acl_release(acls); 1891 1867 return rc; ··· 1913 1893 } 1914 1894 } 1915 1895 1916 - rc = mnt_want_write(path->mnt); 1917 - if (rc) 1918 - goto out_err; 1919 - 1920 1896 rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls); 1921 1897 if (rc < 0) 1922 1898 ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n", ··· 1924 1908 ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n", 1925 1909 rc); 1926 1910 } 1927 - mnt_drop_write(path->mnt); 1928 1911 1929 - out_err: 1930 1912 posix_acl_release(acls); 1931 1913 return rc; 1932 1914 }
+7 -3
fs/smb/server/vfs.h
··· 109 109 int attr_name_len); 110 110 int ksmbd_vfs_setxattr(struct mnt_idmap *idmap, 111 111 const struct path *path, const char *attr_name, 112 - void *attr_value, size_t attr_size, int flags); 112 + void *attr_value, size_t attr_size, int flags, 113 + bool get_write); 113 114 int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name, 114 115 size_t *xattr_stream_name_size, int s_type); 115 116 int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap, ··· 118 117 int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, 119 118 unsigned int flags, struct path *parent_path, 120 119 struct path *path, bool caseless); 120 + void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path); 121 121 struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, 122 122 const char *name, 123 123 unsigned int flags, ··· 146 144 int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn, 147 145 struct mnt_idmap *idmap, 148 146 const struct path *path, 149 - struct smb_ntsd *pntsd, int len); 147 + struct smb_ntsd *pntsd, int len, 148 + bool get_write); 150 149 int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn, 151 150 struct mnt_idmap *idmap, 152 151 struct dentry *dentry, 153 152 struct smb_ntsd **pntsd); 154 153 int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap, 155 154 const struct path *path, 156 - struct xattr_dos_attrib *da); 155 + struct xattr_dos_attrib *da, 156 + bool get_write); 157 157 int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap, 158 158 struct dentry *dentry, 159 159 struct xattr_dos_attrib *da);
+13 -20
fs/smb/server/vfs_cache.c
··· 66 66 return tmp & inode_hash_mask; 67 67 } 68 68 69 - static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode) 69 + static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de) 70 70 { 71 71 struct hlist_head *head = inode_hashtable + 72 - inode_hash(inode->i_sb, inode->i_ino); 72 + inode_hash(d_inode(de)->i_sb, (unsigned long)de); 73 73 struct ksmbd_inode *ci = NULL, *ret_ci = NULL; 74 74 75 75 hlist_for_each_entry(ci, head, m_hash) { 76 - if (ci->m_inode == inode) { 76 + if (ci->m_de == de) { 77 77 if (atomic_inc_not_zero(&ci->m_count)) 78 78 ret_ci = ci; 79 79 break; ··· 84 84 85 85 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp) 86 86 { 87 - return __ksmbd_inode_lookup(file_inode(fp->filp)); 87 + return __ksmbd_inode_lookup(fp->filp->f_path.dentry); 88 88 } 89 89 90 - static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode) 91 - { 92 - struct ksmbd_inode *ci; 93 - 94 - read_lock(&inode_hash_lock); 95 - ci = __ksmbd_inode_lookup(inode); 96 - read_unlock(&inode_hash_lock); 97 - return ci; 98 - } 99 - 100 - int ksmbd_query_inode_status(struct inode *inode) 90 + int ksmbd_query_inode_status(struct dentry *dentry) 101 91 { 102 92 struct ksmbd_inode *ci; 103 93 int ret = KSMBD_INODE_STATUS_UNKNOWN; 104 94 105 95 read_lock(&inode_hash_lock); 106 - ci = __ksmbd_inode_lookup(inode); 96 + ci = __ksmbd_inode_lookup(dentry); 107 97 if (ci) { 108 98 ret = KSMBD_INODE_STATUS_OK; 109 99 if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)) ··· 133 143 static void ksmbd_inode_hash(struct ksmbd_inode *ci) 134 144 { 135 145 struct hlist_head *b = inode_hashtable + 136 - inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino); 146 + inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de); 137 147 138 148 hlist_add_head(&ci->m_hash, b); 139 149 } ··· 147 157 148 158 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp) 149 159 { 150 - ci->m_inode = file_inode(fp->filp); 151 160 atomic_set(&ci->m_count, 1); 152 161 atomic_set(&ci->op_count, 0); 153 162 atomic_set(&ci->sop_count, 0); ··· 155 166 INIT_LIST_HEAD(&ci->m_fp_list); 156 167 INIT_LIST_HEAD(&ci->m_op_list); 157 168 rwlock_init(&ci->m_lock); 169 + ci->m_de = fp->filp->f_path.dentry; 158 170 return 0; 159 171 } 160 172 ··· 478 488 return fp; 479 489 } 480 490 481 - struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode) 491 + struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry) 482 492 { 483 493 struct ksmbd_file *lfp; 484 494 struct ksmbd_inode *ci; 495 + struct inode *inode = d_inode(dentry); 485 496 486 - ci = ksmbd_inode_lookup_by_vfsinode(inode); 497 + read_lock(&inode_hash_lock); 498 + ci = __ksmbd_inode_lookup(dentry); 499 + read_unlock(&inode_hash_lock); 487 500 if (!ci) 488 501 return NULL; 489 502
+3 -3
fs/smb/server/vfs_cache.h
··· 51 51 atomic_t op_count; 52 52 /* opinfo count for streams */ 53 53 atomic_t sop_count; 54 - struct inode *m_inode; 54 + struct dentry *m_de; 55 55 unsigned int m_flags; 56 56 struct hlist_node m_hash; 57 57 struct list_head m_fp_list; ··· 140 140 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp); 141 141 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id); 142 142 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid); 143 - struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode); 143 + struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry); 144 144 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp); 145 145 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp); 146 146 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work); ··· 164 164 KSMBD_INODE_STATUS_PENDING_DELETE, 165 165 }; 166 166 167 - int ksmbd_query_inode_status(struct inode *inode); 167 + int ksmbd_query_inode_status(struct dentry *dentry); 168 168 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp); 169 169 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp); 170 170 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
+5 -1
fs/stat.c
··· 133 133 idmap = mnt_idmap(path->mnt); 134 134 if (inode->i_op->getattr) 135 135 return inode->i_op->getattr(idmap, path, stat, 136 - request_mask, query_flags); 136 + request_mask, 137 + query_flags | AT_GETATTR_NOSEC); 137 138 138 139 generic_fillattr(idmap, request_mask, inode, stat); 139 140 return 0; ··· 166 165 u32 request_mask, unsigned int query_flags) 167 166 { 168 167 int retval; 168 + 169 + if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC)) 170 + return -EPERM; 169 171 170 172 retval = security_inode_getattr(path); 171 173 if (retval)
+26 -39
fs/tracefs/event_inode.c
··· 27 27 /* 28 28 * eventfs_mutex protects the eventfs_inode (ei) dentry. Any access 29 29 * to the ei->dentry must be done under this mutex and after checking 30 - * if ei->is_freed is not set. The ei->dentry is released under the 31 - * mutex at the same time ei->is_freed is set. If ei->is_freed is set 32 - * then the ei->dentry is invalid. 30 + * if ei->is_freed is not set. When ei->is_freed is set, the dentry 31 + * is on its way to being freed after the last dput() is made on it. 33 32 */ 34 33 static DEFINE_MUTEX(eventfs_mutex); 35 34 36 35 /* 37 36 * The eventfs_inode (ei) itself is protected by SRCU. It is released from 38 37 * its parent's list and will have is_freed set (under eventfs_mutex). 39 - * After the SRCU grace period is over, the ei may be freed. 38 + * After the SRCU grace period is over and the last dput() is called 39 + * the ei is freed. 40 40 */ 41 41 DEFINE_STATIC_SRCU(eventfs_srcu); 42 42 ··· 95 95 if (!(dentry->d_inode->i_mode & S_IFDIR)) { 96 96 if (!ei->entry_attrs) { 97 97 ei->entry_attrs = kzalloc(sizeof(*ei->entry_attrs) * ei->nr_entries, 98 - GFP_KERNEL); 98 + GFP_NOFS); 99 99 if (!ei->entry_attrs) { 100 100 ret = -ENOMEM; 101 101 goto out; ··· 326 326 struct eventfs_attr *attr = NULL; 327 327 struct dentry **e_dentry = &ei->d_children[idx]; 328 328 struct dentry *dentry; 329 - bool invalidate = false; 329 + 330 + WARN_ON_ONCE(!inode_is_locked(parent->d_inode)); 330 331 331 332 mutex_lock(&eventfs_mutex); 332 333 if (ei->is_freed) { ··· 349 348 350 349 mutex_unlock(&eventfs_mutex); 351 350 352 - /* The lookup already has the parent->d_inode locked */ 353 - if (!lookup) 354 - inode_lock(parent->d_inode); 355 - 356 351 dentry = create_file(name, mode, attr, parent, data, fops); 357 - 358 - if (!lookup) 359 - inode_unlock(parent->d_inode); 360 352 361 353 mutex_lock(&eventfs_mutex); 362 354 ··· 359 365 * created the dentry for this e_dentry. In which case 360 366 * use that one. 361 367 * 362 - * Note, with the mutex held, the e_dentry cannot have content 363 - * and the ei->is_freed be true at the same time. 368 + * If ei->is_freed is set, the e_dentry is currently on its 369 + * way to being freed, don't return it. If e_dentry is NULL 370 + * it means it was already freed. 364 371 */ 365 - dentry = *e_dentry; 366 - if (WARN_ON_ONCE(dentry && ei->is_freed)) 372 + if (ei->is_freed) 367 373 dentry = NULL; 374 + else 375 + dentry = *e_dentry; 368 376 /* The lookup does not need to up the dentry refcount */ 369 377 if (dentry && !lookup) 370 378 dget(dentry); ··· 383 387 * Otherwise it means two dentries exist with the same name. 384 388 */ 385 389 WARN_ON_ONCE(!ei->is_freed); 386 - invalidate = true; 390 + dentry = NULL; 387 391 } 388 392 mutex_unlock(&eventfs_mutex); 389 393 390 - if (invalidate) 391 - d_invalidate(dentry); 392 - 393 - if (lookup || invalidate) 394 + if (lookup) 394 395 dput(dentry); 395 396 396 - return invalidate ? NULL : dentry; 397 + return dentry; 397 398 } 398 399 399 400 /** ··· 430 437 create_dir_dentry(struct eventfs_inode *pei, struct eventfs_inode *ei, 431 438 struct dentry *parent, bool lookup) 432 439 { 433 - bool invalidate = false; 434 440 struct dentry *dentry = NULL; 441 + 442 + WARN_ON_ONCE(!inode_is_locked(parent->d_inode)); 435 443 436 444 mutex_lock(&eventfs_mutex); 437 445 if (pei->is_freed || ei->is_freed) { ··· 450 456 } 451 457 mutex_unlock(&eventfs_mutex); 452 458 453 - /* The lookup already has the parent->d_inode locked */ 454 - if (!lookup) 455 - inode_lock(parent->d_inode); 456 - 457 459 dentry = create_dir(ei, parent); 458 - 459 - if (!lookup) 460 - inode_unlock(parent->d_inode); 461 460 462 461 mutex_lock(&eventfs_mutex); 463 462 ··· 460 473 * created the dentry for this e_dentry. In which case 461 474 * use that one. 462 475 * 463 - * Note, with the mutex held, the e_dentry cannot have content 464 - * and the ei->is_freed be true at the same time. 476 + * If ei->is_freed is set, the e_dentry is currently on its 477 + * way to being freed. 465 478 */ 466 479 dentry = ei->dentry; 467 480 if (dentry && !lookup) ··· 480 493 * Otherwise it means two dentries exist with the same name. 481 494 */ 482 495 WARN_ON_ONCE(!ei->is_freed); 483 - invalidate = true; 496 + dentry = NULL; 484 497 } 485 498 mutex_unlock(&eventfs_mutex); 486 - if (invalidate) 487 - d_invalidate(dentry); 488 499 489 - if (lookup || invalidate) 500 + if (lookup) 490 501 dput(dentry); 491 502 492 - return invalidate ? NULL : dentry; 503 + return dentry; 493 504 } 494 505 495 506 /** ··· 617 632 { 618 633 struct dentry **tmp; 619 634 620 - tmp = krealloc(*dentries, sizeof(d) * (cnt + 2), GFP_KERNEL); 635 + tmp = krealloc(*dentries, sizeof(d) * (cnt + 2), GFP_NOFS); 621 636 if (!tmp) 622 637 return -1; 623 638 tmp[cnt] = d; ··· 683 698 return -ENOMEM; 684 699 } 685 700 701 + inode_lock(parent->d_inode); 686 702 list_for_each_entry_srcu(ei_child, &ei->children, list, 687 703 srcu_read_lock_held(&eventfs_srcu)) { 688 704 d = create_dir_dentry(ei, ei_child, parent, false); ··· 716 730 cnt++; 717 731 } 718 732 } 733 + inode_unlock(parent->d_inode); 719 734 srcu_read_unlock(&eventfs_srcu, idx); 720 735 ret = dcache_dir_open(inode, file); 721 736
+4 -9
fs/tracefs/inode.c
··· 509 509 struct dentry *dentry; 510 510 int error; 511 511 512 + /* Must always have a parent. */ 513 + if (WARN_ON_ONCE(!parent)) 514 + return ERR_PTR(-EINVAL); 515 + 512 516 error = simple_pin_fs(&trace_fs_type, &tracefs_mount, 513 517 &tracefs_mount_count); 514 518 if (error) 515 519 return ERR_PTR(error); 516 - 517 - /* 518 - * If the parent is not specified, we create it in the root. 519 - * We need the root dentry to do this, which is in the super 520 - * block. A pointer to that is in the struct vfsmount that we 521 - * have around. 522 - */ 523 - if (!parent) 524 - parent = tracefs_mount->mnt_root; 525 520 526 521 if (unlikely(IS_DEADDIR(parent->d_inode))) 527 522 dentry = ERR_PTR(-ENOENT);
+3 -2
fs/xfs/xfs_dquot.c
··· 562 562 struct xfs_dquot *dqp, 563 563 struct xfs_buf *bp) 564 564 { 565 - struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset; 565 + struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset); 566 + struct xfs_disk_dquot *ddqp = &dqb->dd_diskdq; 566 567 567 568 /* 568 569 * Ensure that we got the type and ID we were looking for. ··· 1251 1250 } 1252 1251 1253 1252 /* Flush the incore dquot to the ondisk buffer. */ 1254 - dqblk = bp->b_addr + dqp->q_bufoffset; 1253 + dqblk = xfs_buf_offset(bp, dqp->q_bufoffset); 1255 1254 xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp); 1256 1255 1257 1256 /*
+18 -3
fs/xfs/xfs_dquot_item_recover.c
··· 19 19 #include "xfs_log.h" 20 20 #include "xfs_log_priv.h" 21 21 #include "xfs_log_recover.h" 22 + #include "xfs_error.h" 22 23 23 24 STATIC void 24 25 xlog_recover_dquot_ra_pass2( ··· 66 65 { 67 66 struct xfs_mount *mp = log->l_mp; 68 67 struct xfs_buf *bp; 68 + struct xfs_dqblk *dqb; 69 69 struct xfs_disk_dquot *ddq, *recddq; 70 70 struct xfs_dq_logformat *dq_f; 71 71 xfs_failaddr_t fa; ··· 132 130 return error; 133 131 134 132 ASSERT(bp); 135 - ddq = xfs_buf_offset(bp, dq_f->qlf_boffset); 133 + dqb = xfs_buf_offset(bp, dq_f->qlf_boffset); 134 + ddq = &dqb->dd_diskdq; 136 135 137 136 /* 138 137 * If the dquot has an LSN in it, recover the dquot only if it's less 139 138 * than the lsn of the transaction we are replaying. 140 139 */ 141 140 if (xfs_has_crc(mp)) { 142 - struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq; 143 141 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); 144 142 145 143 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { ··· 149 147 150 148 memcpy(ddq, recddq, item->ri_buf[1].i_len); 151 149 if (xfs_has_crc(mp)) { 152 - xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), 150 + xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), 153 151 XFS_DQUOT_CRC_OFF); 152 + } 153 + 154 + /* Validate the recovered dquot. */ 155 + fa = xfs_dqblk_verify(log->l_mp, dqb, dq_f->qlf_id); 156 + if (fa) { 157 + XFS_CORRUPTION_ERROR("Bad dquot after recovery", 158 + XFS_ERRLEVEL_LOW, mp, dqb, 159 + sizeof(struct xfs_dqblk)); 160 + xfs_alert(mp, 161 + "Metadata corruption detected at %pS, dquot 0x%x", 162 + fa, dq_f->qlf_id); 163 + error = -EFSCORRUPTED; 164 + goto out_release; 154 165 } 155 166 156 167 ASSERT(dq_f->qlf_size == 2);
+8
fs/xfs/xfs_inode.h
··· 569 569 extern void xfs_setup_iops(struct xfs_inode *ip); 570 570 extern void xfs_diflags_to_iflags(struct xfs_inode *ip, bool init); 571 571 572 + static inline void xfs_update_stable_writes(struct xfs_inode *ip) 573 + { 574 + if (bdev_stable_writes(xfs_inode_buftarg(ip)->bt_bdev)) 575 + mapping_set_stable_writes(VFS_I(ip)->i_mapping); 576 + else 577 + mapping_clear_stable_writes(VFS_I(ip)->i_mapping); 578 + } 579 + 572 580 /* 573 581 * When setting up a newly allocated inode, we need to call 574 582 * xfs_finish_inode_setup() once the inode is fully instantiated at
+22 -12
fs/xfs/xfs_ioctl.c
··· 1121 1121 struct fileattr *fa) 1122 1122 { 1123 1123 struct xfs_mount *mp = ip->i_mount; 1124 + bool rtflag = (fa->fsx_xflags & FS_XFLAG_REALTIME); 1124 1125 uint64_t i_flags2; 1125 1126 1126 - /* Can't change realtime flag if any extents are allocated. */ 1127 - if ((ip->i_df.if_nextents || ip->i_delayed_blks) && 1128 - XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) 1129 - return -EINVAL; 1130 - 1131 - /* If realtime flag is set then must have realtime device */ 1132 - if (fa->fsx_xflags & FS_XFLAG_REALTIME) { 1133 - if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1134 - xfs_extlen_to_rtxmod(mp, ip->i_extsize)) 1127 + if (rtflag != XFS_IS_REALTIME_INODE(ip)) { 1128 + /* Can't change realtime flag if any extents are allocated. */ 1129 + if (ip->i_df.if_nextents || ip->i_delayed_blks) 1135 1130 return -EINVAL; 1136 1131 } 1137 1132 1138 - /* Clear reflink if we are actually able to set the rt flag. */ 1139 - if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip)) 1140 - ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1133 + if (rtflag) { 1134 + /* If realtime flag is set then must have realtime device */ 1135 + if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1136 + xfs_extlen_to_rtxmod(mp, ip->i_extsize)) 1137 + return -EINVAL; 1138 + 1139 + /* Clear reflink if we are actually able to set the rt flag. */ 1140 + if (xfs_is_reflink_inode(ip)) 1141 + ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1142 + } 1141 1143 1142 1144 /* diflags2 only valid for v3 inodes. */ 1143 1145 i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); ··· 1150 1148 ip->i_diflags2 = i_flags2; 1151 1149 1152 1150 xfs_diflags_to_iflags(ip, false); 1151 + 1152 + /* 1153 + * Make the stable writes flag match that of the device the inode 1154 + * resides on when flipping the RT flag. 1155 + */ 1156 + if (rtflag != XFS_IS_REALTIME_INODE(ip) && S_ISREG(VFS_I(ip)->i_mode)) 1157 + xfs_update_stable_writes(ip); 1158 + 1153 1159 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1154 1160 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1155 1161 XFS_STATS_INC(mp, xs_ig_attrchg);
+7
fs/xfs/xfs_iops.c
··· 1299 1299 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); 1300 1300 1301 1301 /* 1302 + * For real-time inodes update the stable write flags to that of the RT 1303 + * device instead of the data device. 1304 + */ 1305 + if (S_ISREG(inode->i_mode) && XFS_IS_REALTIME_INODE(ip)) 1306 + xfs_update_stable_writes(ip); 1307 + 1308 + /* 1302 1309 * If there is no attribute fork no ACL can exist on this inode, 1303 1310 * and it can't have any file capabilities attached to it either. 1304 1311 */
+1
include/acpi/acpi_bus.h
··· 542 542 int acpi_bus_init_power(struct acpi_device *device); 543 543 int acpi_device_fix_up_power(struct acpi_device *device); 544 544 void acpi_device_fix_up_power_extended(struct acpi_device *adev); 545 + void acpi_device_fix_up_power_children(struct acpi_device *adev); 545 546 int acpi_bus_update_power(acpi_handle handle, int *state_p); 546 547 int acpi_device_update_power(struct acpi_device *device, int *state_p); 547 548 bool acpi_bus_power_manageable(acpi_handle handle);
-1
include/linux/blk-pm.h
··· 15 15 extern void blk_post_runtime_suspend(struct request_queue *q, int err); 16 16 extern void blk_pre_runtime_resume(struct request_queue *q); 17 17 extern void blk_post_runtime_resume(struct request_queue *q); 18 - extern void blk_set_runtime_active(struct request_queue *q); 19 18 #else 20 19 static inline void blk_pm_runtime_init(struct request_queue *q, 21 20 struct device *dev) {}
+19
include/linux/debugfs.h
··· 171 171 ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, 172 172 size_t count, loff_t *ppos); 173 173 174 + /** 175 + * struct debugfs_cancellation - cancellation data 176 + * @list: internal, for keeping track 177 + * @cancel: callback to call 178 + * @cancel_data: extra data for the callback to call 179 + */ 180 + struct debugfs_cancellation { 181 + struct list_head list; 182 + void (*cancel)(struct dentry *, void *); 183 + void *cancel_data; 184 + }; 185 + 186 + void __acquires(cancellation) 187 + debugfs_enter_cancellation(struct file *file, 188 + struct debugfs_cancellation *cancellation); 189 + void __releases(cancellation) 190 + debugfs_leave_cancellation(struct file *file, 191 + struct debugfs_cancellation *cancellation); 192 + 174 193 #else 175 194 176 195 #include <linux/err.h>
+3
include/linux/hid.h
··· 679 679 struct list_head debug_list; 680 680 spinlock_t debug_list_lock; 681 681 wait_queue_head_t debug_wait; 682 + struct kref ref; 682 683 683 684 unsigned int id; /* system unique id */ 684 685 ··· 687 686 struct hid_bpf bpf; /* hid-bpf data */ 688 687 #endif /* CONFIG_BPF */ 689 688 }; 689 + 690 + void hiddev_free(struct kref *ref); 690 691 691 692 #define to_hid_device(pdev) \ 692 693 container_of(pdev, struct hid_device, dev)
+3 -1
include/linux/ieee80211.h
··· 2830 2830 static inline const struct ieee80211_he_6ghz_oper * 2831 2831 ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) 2832 2832 { 2833 - const u8 *ret = (const void *)&he_oper->optional; 2833 + const u8 *ret; 2834 2834 u32 he_oper_params; 2835 2835 2836 2836 if (!he_oper) 2837 2837 return NULL; 2838 + 2839 + ret = (const void *)&he_oper->optional; 2838 2840 2839 2841 he_oper_params = le32_to_cpu(he_oper->he_oper_params); 2840 2842
+17
include/linux/pagemap.h
··· 204 204 AS_NO_WRITEBACK_TAGS = 5, 205 205 AS_LARGE_FOLIO_SUPPORT = 6, 206 206 AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ 207 + AS_STABLE_WRITES, /* must wait for writeback before modifying 208 + folio contents */ 207 209 }; 208 210 209 211 /** ··· 289 287 static inline void mapping_clear_release_always(struct address_space *mapping) 290 288 { 291 289 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); 290 + } 291 + 292 + static inline bool mapping_stable_writes(const struct address_space *mapping) 293 + { 294 + return test_bit(AS_STABLE_WRITES, &mapping->flags); 295 + } 296 + 297 + static inline void mapping_set_stable_writes(struct address_space *mapping) 298 + { 299 + set_bit(AS_STABLE_WRITES, &mapping->flags); 300 + } 301 + 302 + static inline void mapping_clear_stable_writes(struct address_space *mapping) 303 + { 304 + clear_bit(AS_STABLE_WRITES, &mapping->flags); 292 305 } 293 306 294 307 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+1
include/linux/skmsg.h
··· 106 106 struct mutex work_mutex; 107 107 struct sk_psock_work_state work_state; 108 108 struct delayed_work work; 109 + struct sock *sk_pair; 109 110 struct rcu_work rwork; 110 111 }; 111 112
-13
include/linux/usb/phy.h
··· 144 144 */ 145 145 int (*set_wakeup)(struct usb_phy *x, bool enabled); 146 146 147 - /* notify phy port status change */ 148 - int (*notify_port_status)(struct usb_phy *x, int port, 149 - u16 portstatus, u16 portchange); 150 - 151 147 /* notify phy connect status change */ 152 148 int (*notify_connect)(struct usb_phy *x, 153 149 enum usb_device_speed speed); ··· 312 316 { 313 317 if (x && x->set_wakeup) 314 318 return x->set_wakeup(x, enabled); 315 - else 316 - return 0; 317 - } 318 - 319 - static inline int 320 - usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange) 321 - { 322 - if (x && x->notify_port_status) 323 - return x->notify_port_status(x, port, portstatus, portchange); 324 319 else 325 320 return 0; 326 321 }
+1
include/net/af_unix.h
··· 75 75 }; 76 76 77 77 #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk) 78 + #define unix_peer(sk) (unix_sk(sk)->peer) 78 79 79 80 #define peer_wait peer_wq.wait 80 81
+46
include/net/cfg80211.h
··· 9302 9302 */ 9303 9303 void cfg80211_links_removed(struct net_device *dev, u16 link_mask); 9304 9304 9305 + #ifdef CONFIG_CFG80211_DEBUGFS 9306 + /** 9307 + * wiphy_locked_debugfs_read - do a locked read in debugfs 9308 + * @wiphy: the wiphy to use 9309 + * @file: the file being read 9310 + * @buf: the buffer to fill and then read from 9311 + * @bufsize: size of the buffer 9312 + * @userbuf: the user buffer to copy to 9313 + * @count: read count 9314 + * @ppos: read position 9315 + * @handler: the read handler to call (under wiphy lock) 9316 + * @data: additional data to pass to the read handler 9317 + */ 9318 + ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, 9319 + char *buf, size_t bufsize, 9320 + char __user *userbuf, size_t count, 9321 + loff_t *ppos, 9322 + ssize_t (*handler)(struct wiphy *wiphy, 9323 + struct file *file, 9324 + char *buf, 9325 + size_t bufsize, 9326 + void *data), 9327 + void *data); 9328 + 9329 + /** 9330 + * wiphy_locked_debugfs_write - do a locked write in debugfs 9331 + * @wiphy: the wiphy to use 9332 + * @file: the file being written to 9333 + * @buf: the buffer to copy the user data to 9334 + * @bufsize: size of the buffer 9335 + * @userbuf: the user buffer to copy from 9336 + * @count: read count 9337 + * @handler: the write handler to call (under wiphy lock) 9338 + * @data: additional data to pass to the write handler 9339 + */ 9340 + ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file, 9341 + char *buf, size_t bufsize, 9342 + const char __user *userbuf, size_t count, 9343 + ssize_t (*handler)(struct wiphy *wiphy, 9344 + struct file *file, 9345 + char *buf, 9346 + size_t count, 9347 + void *data), 9348 + void *data); 9349 + #endif 9350 + 9305 9351 #endif /* __NET_CFG80211_H */
+1 -1
include/net/neighbour.h
··· 162 162 struct rcu_head rcu; 163 163 struct net_device *dev; 164 164 netdevice_tracker dev_tracker; 165 - u8 primary_key[0]; 165 + u8 primary_key[]; 166 166 } __randomize_layout; 167 167 168 168 struct neigh_ops {
+3
include/uapi/linux/fcntl.h
··· 116 116 #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to 117 117 compare object identity and may not 118 118 be usable to open_by_handle_at(2) */ 119 + #if defined(__KERNEL__) 120 + #define AT_GETATTR_NOSEC 0x80000000 121 + #endif 119 122 120 123 #endif /* _UAPI_LINUX_FCNTL_H */
+1 -1
include/uapi/linux/v4l2-subdev.h
··· 239 239 * set (which is the default), the 'stream' fields will be forced to 0 by the 240 240 * kernel. 241 241 */ 242 - #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0) 242 + #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0) 243 243 244 244 /** 245 245 * struct v4l2_subdev_client_capability - Capabilities of the client accessing
+1 -1
io_uring/fs.c
··· 254 254 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 255 255 lnk->flags = READ_ONCE(sqe->hardlink_flags); 256 256 257 - lnk->oldpath = getname(oldf); 257 + lnk->oldpath = getname_uflags(oldf, lnk->flags); 258 258 if (IS_ERR(lnk->oldpath)) 259 259 return PTR_ERR(lnk->oldpath); 260 260
+1 -1
io_uring/rsrc.c
··· 1258 1258 */ 1259 1259 const struct bio_vec *bvec = imu->bvec; 1260 1260 1261 - if (offset <= bvec->bv_len) { 1261 + if (offset < bvec->bv_len) { 1262 1262 /* 1263 1263 * Note, huge pages buffers consists of one large 1264 1264 * bvec entry and should always go this way. The other
+2
kernel/bpf/memalloc.c
··· 978 978 memcg = get_memcg(c); 979 979 old_memcg = set_active_memcg(memcg); 980 980 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); 981 + if (ret) 982 + *(struct bpf_mem_cache **)ret = c; 981 983 set_active_memcg(old_memcg); 982 984 mem_cgroup_put(memcg); 983 985 }
+2 -1
kernel/locking/lockdep.c
··· 3497 3497 size = chain_block_size(curr); 3498 3498 if (likely(size >= req)) { 3499 3499 del_chain_block(0, size, chain_block_next(curr)); 3500 - add_chain_block(curr + req, size - req); 3500 + if (size > req) 3501 + add_chain_block(curr + req, size - req); 3501 3502 return curr; 3502 3503 } 3503 3504 }
-6
lib/errname.c
··· 111 111 E(ENOSPC), 112 112 E(ENOSR), 113 113 E(ENOSTR), 114 - #ifdef ENOSYM 115 - E(ENOSYM), 116 - #endif 117 114 E(ENOSYS), 118 115 E(ENOTBLK), 119 116 E(ENOTCONN), ··· 141 144 #endif 142 145 E(EREMOTE), 143 146 E(EREMOTEIO), 144 - #ifdef EREMOTERELEASE 145 - E(EREMOTERELEASE), 146 - #endif 147 147 E(ERESTART), 148 148 E(ERFKILL), 149 149 E(EROFS),
+1 -1
lib/iov_iter.c
··· 409 409 void *kaddr = kmap_local_page(page); 410 410 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 411 411 412 - n = iterate_and_advance(i, bytes, kaddr, 412 + n = iterate_and_advance(i, n, kaddr + offset, 413 413 copy_to_user_iter_nofault, 414 414 memcpy_to_iter); 415 415 kunmap_local(kaddr);
+1 -1
mm/page-writeback.c
··· 3107 3107 */ 3108 3108 void folio_wait_stable(struct folio *folio) 3109 3109 { 3110 - if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES) 3110 + if (mapping_stable_writes(folio_mapping(folio))) 3111 3111 folio_wait_writeback(folio); 3112 3112 } 3113 3113 EXPORT_SYMBOL_GPL(folio_wait_stable);
+2
net/core/skmsg.c
··· 826 826 827 827 if (psock->sk_redir) 828 828 sock_put(psock->sk_redir); 829 + if (psock->sk_pair) 830 + sock_put(psock->sk_pair); 829 831 sock_put(psock->sk); 830 832 kfree(psock); 831 833 }
+1
net/ethtool/netlink.c
··· 505 505 ret = skb->len; 506 506 break; 507 507 } 508 + ret = 0; 508 509 } 509 510 rtnl_unlock(); 510 511
+4 -2
net/ipv4/igmp.c
··· 216 216 int tv = get_random_u32_below(max_delay); 217 217 218 218 im->tm_running = 1; 219 - if (!mod_timer(&im->timer, jiffies+tv+2)) 220 - refcount_inc(&im->refcnt); 219 + if (refcount_inc_not_zero(&im->refcnt)) { 220 + if (mod_timer(&im->timer, jiffies + tv + 2)) 221 + ip_ma_put(im); 222 + } 221 223 } 222 224 223 225 static void igmp_gq_start_timer(struct in_device *in_dev)
+1 -1
net/mac80211/Kconfig
··· 88 88 89 89 config MAC80211_DEBUGFS 90 90 bool "Export mac80211 internals in DebugFS" 91 - depends on MAC80211 && DEBUG_FS 91 + depends on MAC80211 && CFG80211_DEBUGFS 92 92 help 93 93 Select this to see extensive information about 94 94 the internal state of mac80211 in debugfs.
+105 -45
net/mac80211/debugfs_netdev.c
··· 22 22 #include "debugfs_netdev.h" 23 23 #include "driver-ops.h" 24 24 25 + struct ieee80211_if_read_sdata_data { 26 + ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int); 27 + struct ieee80211_sub_if_data *sdata; 28 + }; 29 + 30 + static ssize_t ieee80211_if_read_sdata_handler(struct wiphy *wiphy, 31 + struct file *file, 32 + char *buf, 33 + size_t bufsize, 34 + void *data) 35 + { 36 + struct ieee80211_if_read_sdata_data *d = data; 37 + 38 + return d->format(d->sdata, buf, bufsize); 39 + } 40 + 25 41 static ssize_t ieee80211_if_read_sdata( 26 - struct ieee80211_sub_if_data *sdata, 42 + struct file *file, 27 43 char __user *userbuf, 28 44 size_t count, loff_t *ppos, 29 45 ssize_t (*format)(const struct ieee80211_sub_if_data *sdata, char *, int)) 30 46 { 47 + struct ieee80211_sub_if_data *sdata = file->private_data; 48 + struct ieee80211_if_read_sdata_data data = { 49 + .format = format, 50 + .sdata = sdata, 51 + }; 31 52 char buf[200]; 32 - ssize_t ret = -EINVAL; 33 53 34 - wiphy_lock(sdata->local->hw.wiphy); 35 - ret = (*format)(sdata, buf, sizeof(buf)); 36 - wiphy_unlock(sdata->local->hw.wiphy); 54 + return wiphy_locked_debugfs_read(sdata->local->hw.wiphy, 55 + file, buf, sizeof(buf), 56 + userbuf, count, ppos, 57 + ieee80211_if_read_sdata_handler, 58 + &data); 59 + } 37 60 38 - if (ret >= 0) 39 - ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret); 61 + struct ieee80211_if_write_sdata_data { 62 + ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int); 63 + struct ieee80211_sub_if_data *sdata; 64 + }; 40 65 41 - return ret; 66 + static ssize_t ieee80211_if_write_sdata_handler(struct wiphy *wiphy, 67 + struct file *file, 68 + char *buf, 69 + size_t count, 70 + void *data) 71 + { 72 + struct ieee80211_if_write_sdata_data *d = data; 73 + 74 + return d->write(d->sdata, buf, count); 42 75 } 43 76 44 77 static ssize_t ieee80211_if_write_sdata( 45 - struct ieee80211_sub_if_data *sdata, 78 + struct file *file, 46 79 const char __user *userbuf, 47 80 size_t count, loff_t *ppos, 48 81 ssize_t (*write)(struct ieee80211_sub_if_data *sdata, const char *, int)) 49 82 { 83 + struct ieee80211_sub_if_data *sdata = file->private_data; 84 + struct ieee80211_if_write_sdata_data data = { 85 + .write = write, 86 + .sdata = sdata, 87 + }; 50 88 char buf[64]; 51 - ssize_t ret; 52 89 53 - if (count >= sizeof(buf)) 54 - return -E2BIG; 90 + return wiphy_locked_debugfs_write(sdata->local->hw.wiphy, 91 + file, buf, sizeof(buf), 92 + userbuf, count, 93 + ieee80211_if_write_sdata_handler, 94 + &data); 95 + } 55 96 56 - if (copy_from_user(buf, userbuf, count)) 57 - return -EFAULT; 58 - buf[count] = '\0'; 97 + struct ieee80211_if_read_link_data { 98 + ssize_t (*format)(const struct ieee80211_link_data *, char *, int); 99 + struct ieee80211_link_data *link; 100 + }; 59 101 60 - wiphy_lock(sdata->local->hw.wiphy); 61 - ret = (*write)(sdata, buf, count); 62 - wiphy_unlock(sdata->local->hw.wiphy); 102 + static ssize_t ieee80211_if_read_link_handler(struct wiphy *wiphy, 103 + struct file *file, 104 + char *buf, 105 + size_t bufsize, 106 + void *data) 107 + { 108 + struct ieee80211_if_read_link_data *d = data; 63 109 64 - return ret; 110 + return d->format(d->link, buf, bufsize); 65 111 } 66 112 67 113 static ssize_t ieee80211_if_read_link( 68 - struct ieee80211_link_data *link, 114 + struct file *file, 69 115 char __user *userbuf, 70 116 size_t count, loff_t *ppos, 71 117 ssize_t (*format)(const struct ieee80211_link_data *link, char *, int)) 72 118 { 119 + struct ieee80211_link_data *link = file->private_data; 120 + struct ieee80211_if_read_link_data data = { 121 + .format = format, 122 + .link = link, 123 + }; 73 124 char buf[200]; 74 - ssize_t ret = -EINVAL; 75 125 76 - wiphy_lock(link->sdata->local->hw.wiphy); 77 - ret = (*format)(link, buf, sizeof(buf)); 78 - wiphy_unlock(link->sdata->local->hw.wiphy); 126 + return wiphy_locked_debugfs_read(link->sdata->local->hw.wiphy, 127 + file, buf, sizeof(buf), 128 + userbuf, count, ppos, 129 + ieee80211_if_read_link_handler, 130 + &data); 131 + } 79 132 80 - if (ret >= 0) 81 - ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret); 133 + struct ieee80211_if_write_link_data { 134 + ssize_t (*write)(struct ieee80211_link_data *, const char *, int); 135 + struct ieee80211_link_data *link; 136 + }; 82 137 83 - return ret; 138 + static ssize_t ieee80211_if_write_link_handler(struct wiphy *wiphy, 139 + struct file *file, 140 + char *buf, 141 + size_t count, 142 + void *data) 143 + { 144 + struct ieee80211_if_write_sdata_data *d = data; 145 + 146 + return d->write(d->sdata, buf, count); 84 147 } 85 148 86 149 static ssize_t ieee80211_if_write_link( 87 - struct ieee80211_link_data *link, 150 + struct file *file, 88 151 const char __user *userbuf, 89 152 size_t count, loff_t *ppos, 90 153 ssize_t (*write)(struct ieee80211_link_data *link, const char *, int)) 91 154 { 155 + struct ieee80211_link_data *link = file->private_data; 156 + struct ieee80211_if_write_link_data data = { 157 + .write = write, 158 + .link = link, 159 + }; 92 160 char buf[64]; 93 - ssize_t ret; 94 161 95 - if (count >= sizeof(buf)) 96 - return -E2BIG; 97 - 98 - if (copy_from_user(buf, userbuf, count)) 99 - return -EFAULT; 100 - buf[count] = '\0'; 101 - 102 - wiphy_lock(link->sdata->local->hw.wiphy); 103 - ret = (*write)(link, buf, count); 104 - wiphy_unlock(link->sdata->local->hw.wiphy); 105 - 106 - return ret; 162 + return wiphy_locked_debugfs_write(link->sdata->local->hw.wiphy, 163 + file, buf, sizeof(buf), 164 + userbuf, count, 165 + ieee80211_if_write_link_handler, 166 + &data); 107 167 } 108 168 109 169 #define IEEE80211_IF_FMT(name, type, field, format_string) \ ··· 233 173 char __user *userbuf, \ 234 174 size_t count, loff_t *ppos) \ 235 175 { \ 236 - return ieee80211_if_read_sdata(file->private_data, \ 176 + return ieee80211_if_read_sdata(file, \ 237 177 userbuf, count, ppos, \ 238 178 ieee80211_if_fmt_##name); \ 239 179 } ··· 243 183 const char __user *userbuf, \ 244 184 size_t count, loff_t *ppos) \ 245 185 { \ 246 - return ieee80211_if_write_sdata(file->private_data, userbuf, \ 186 + return ieee80211_if_write_sdata(file, userbuf, \ 247 187 count, ppos, \ 248 188 ieee80211_if_parse_##name); \ 249 189 } ··· 271 211 char __user *userbuf, \ 272 212 size_t count, loff_t *ppos) \ 273 213 { \ 274 - return ieee80211_if_read_link(file->private_data, \ 214 + return ieee80211_if_read_link(file, \ 275 215 userbuf, count, ppos, \ 276 216 ieee80211_if_fmt_##name); \ 277 217 } ··· 281 221 const char __user *userbuf, \ 282 222 size_t count, loff_t *ppos) \ 283 223 { \ 284 - return ieee80211_if_write_link(file->private_data, userbuf, \ 224 + return ieee80211_if_write_link(file, userbuf, \ 285 225 count, ppos, \ 286 226 ieee80211_if_parse_##name); \ 287 227 }
+42 -32
net/mac80211/debugfs_sta.c
··· 312 312 STA_OPS_RW(aql); 313 313 314 314 315 - static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 316 - size_t count, loff_t *ppos) 315 + static ssize_t sta_agg_status_do_read(struct wiphy *wiphy, struct file *file, 316 + char *buf, size_t bufsz, void *data) 317 317 { 318 - char *buf, *p; 319 - ssize_t bufsz = 71 + IEEE80211_NUM_TIDS * 40; 318 + struct sta_info *sta = data; 319 + char *p = buf; 320 320 int i; 321 - struct sta_info *sta = file->private_data; 322 321 struct tid_ampdu_rx *tid_rx; 323 322 struct tid_ampdu_tx *tid_tx; 324 - ssize_t ret; 325 - 326 - buf = kzalloc(bufsz, GFP_KERNEL); 327 - if (!buf) 328 - return -ENOMEM; 329 - p = buf; 330 - 331 - rcu_read_lock(); 332 323 333 324 p += scnprintf(p, bufsz + buf - p, "next dialog_token: %#02x\n", 334 325 sta->ampdu_mlme.dialog_token_allocator + 1); ··· 329 338 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 330 339 bool tid_rx_valid; 331 340 332 - tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); 333 - tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); 341 + tid_rx = wiphy_dereference(wiphy, sta->ampdu_mlme.tid_rx[i]); 342 + tid_tx = wiphy_dereference(wiphy, sta->ampdu_mlme.tid_tx[i]); 334 343 tid_rx_valid = test_bit(i, sta->ampdu_mlme.agg_session_valid); 335 344 336 345 p += scnprintf(p, bufsz + buf - p, "%02d", i); ··· 349 358 tid_tx ? skb_queue_len(&tid_tx->pending) : 0); 350 359 p += scnprintf(p, bufsz + buf - p, "\n"); 351 360 } 352 - rcu_read_unlock(); 353 361 354 - ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); 362 + return p - buf; 363 + } 364 + 365 + static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 366 + size_t count, loff_t *ppos) 367 + { 368 + struct sta_info *sta = file->private_data; 369 + struct wiphy *wiphy = sta->local->hw.wiphy; 370 + size_t bufsz = 71 + IEEE80211_NUM_TIDS * 40; 371 + char *buf = kmalloc(bufsz, GFP_KERNEL); 372 + ssize_t ret; 373 + 374 + if (!buf) 375 + return -ENOMEM; 376 + 377 + ret = wiphy_locked_debugfs_read(wiphy, file, buf, bufsz, 378 + userbuf, count, ppos, 379 + sta_agg_status_do_read, sta); 355 380 kfree(buf); 381 + 356 382 return ret; 357 383 } 358 384 359 - static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, 360 - size_t count, loff_t *ppos) 385 + static ssize_t sta_agg_status_do_write(struct wiphy *wiphy, struct file *file, 386 + char *buf, size_t count, void *data) 361 387 { 362 - char _buf[25] = {}, *buf = _buf; 363 - struct sta_info *sta = file->private_data; 388 + struct sta_info *sta = data; 364 389 bool start, tx; 365 390 unsigned long tid; 366 - char *pos; 391 + char *pos = buf; 367 392 int ret, timeout = 5000; 368 393 369 - if (count > sizeof(_buf)) 370 - return -EINVAL; 371 - 372 - if (copy_from_user(buf, userbuf, count)) 373 - return -EFAULT; 374 - 375 - buf[sizeof(_buf) - 1] = '\0'; 376 - pos = buf; 377 394 buf = strsep(&pos, " "); 378 395 if (!buf) 379 396 return -EINVAL; ··· 419 420 if (ret || tid >= IEEE80211_NUM_TIDS) 420 421 return -EINVAL; 421 422 422 - wiphy_lock(sta->local->hw.wiphy); 423 423 if (tx) { 424 424 if (start) 425 425 ret = ieee80211_start_tx_ba_session(&sta->sta, tid, ··· 430 432 3, true); 431 433 ret = 0; 432 434 } 433 - wiphy_unlock(sta->local->hw.wiphy); 434 435 435 436 return ret ?: count; 437 + } 438 + 439 + static ssize_t sta_agg_status_write(struct file *file, 440 + const char __user *userbuf, 441 + size_t count, loff_t *ppos) 442 + { 443 + struct sta_info *sta = file->private_data; 444 + struct wiphy *wiphy = sta->local->hw.wiphy; 445 + char _buf[26]; 446 + 447 + return wiphy_locked_debugfs_write(wiphy, file, _buf, sizeof(_buf), 448 + userbuf, count, 449 + sta_agg_status_do_write, sta); 436 450 } 437 451 STA_OPS_RW(agg_status); 438 452
+7 -2
net/mac80211/driver-ops.h
··· 23 23 static inline struct ieee80211_sub_if_data * 24 24 get_bss_sdata(struct ieee80211_sub_if_data *sdata) 25 25 { 26 - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 26 + if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 27 27 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 28 28 u.ap); 29 29 ··· 695 695 struct ieee80211_sub_if_data *sdata, 696 696 u32 queues, bool drop) 697 697 { 698 - struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL; 698 + struct ieee80211_vif *vif; 699 699 700 700 might_sleep(); 701 701 lockdep_assert_wiphy(local->hw.wiphy); 702 + 703 + sdata = get_bss_sdata(sdata); 704 + vif = sdata ? &sdata->vif : NULL; 702 705 703 706 if (sdata && !check_sdata_in_driver(sdata)) 704 707 return; ··· 718 715 { 719 716 might_sleep(); 720 717 lockdep_assert_wiphy(local->hw.wiphy); 718 + 719 + sdata = get_bss_sdata(sdata); 721 720 722 721 if (sdata && !check_sdata_in_driver(sdata)) 723 722 return;
+1
net/mac80211/ht.c
··· 271 271 case NL80211_CHAN_WIDTH_80: 272 272 case NL80211_CHAN_WIDTH_80P80: 273 273 case NL80211_CHAN_WIDTH_160: 274 + case NL80211_CHAN_WIDTH_320: 274 275 bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? 275 276 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; 276 277 break;
+1
net/mptcp/options.c
··· 108 108 mp_opt->suboptions |= OPTION_MPTCP_DSS; 109 109 mp_opt->use_map = 1; 110 110 mp_opt->mpc_map = 1; 111 + mp_opt->use_ack = 0; 111 112 mp_opt->data_len = get_unaligned_be16(ptr); 112 113 ptr += 2; 113 114 }
-2
net/unix/af_unix.c
··· 213 213 } 214 214 #endif /* CONFIG_SECURITY_NETWORK */ 215 215 216 - #define unix_peer(sk) (unix_sk(sk)->peer) 217 - 218 216 static inline int unix_our_peer(struct sock *sk, struct sock *osk) 219 217 { 220 218 return unix_peer(osk) == sk;
+5
net/unix/unix_bpf.c
··· 159 159 160 160 int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) 161 161 { 162 + struct sock *sk_pair; 163 + 162 164 if (restore) { 163 165 sk->sk_write_space = psock->saved_write_space; 164 166 sock_replace_proto(sk, psock->sk_proto); 165 167 return 0; 166 168 } 167 169 170 + sk_pair = unix_peer(sk); 171 + sock_hold(sk_pair); 172 + psock->sk_pair = sk_pair; 168 173 unix_stream_bpf_check_needs_rebuild(psock->sk_proto); 169 174 sock_replace_proto(sk, &unix_stream_bpf_prot); 170 175 return 0;
+4 -2
net/wireless/core.c
··· 191 191 return err; 192 192 } 193 193 194 + wiphy_lock(&rdev->wiphy); 194 195 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 195 196 if (!wdev->netdev) 196 197 continue; 197 198 nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); 198 199 } 199 200 200 - wiphy_lock(&rdev->wiphy); 201 201 nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY); 202 202 203 203 wiphy_net_set(&rdev->wiphy, net); ··· 206 206 WARN_ON(err); 207 207 208 208 nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); 209 - wiphy_unlock(&rdev->wiphy); 210 209 211 210 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 212 211 if (!wdev->netdev) 213 212 continue; 214 213 nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); 215 214 } 215 + wiphy_unlock(&rdev->wiphy); 216 216 217 217 return 0; 218 218 } ··· 221 221 { 222 222 struct cfg80211_registered_device *rdev = data; 223 223 224 + wiphy_lock(&rdev->wiphy); 224 225 rdev_rfkill_poll(rdev); 226 + wiphy_unlock(&rdev->wiphy); 225 227 } 226 228 227 229 void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+1
net/wireless/core.h
··· 293 293 u32 rssi_hyst; 294 294 s32 last_rssi_event_value; 295 295 enum nl80211_cqm_rssi_threshold_event last_rssi_event_type; 296 + bool use_range_api; 296 297 int n_rssi_thresholds; 297 298 s32 rssi_thresholds[] __counted_by(n_rssi_thresholds); 298 299 };
+160
net/wireless/debugfs.c
··· 4 4 * 5 5 * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com> 6 6 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 7 + * Copyright (C) 2023 Intel Corporation 7 8 */ 8 9 9 10 #include <linux/slab.h> ··· 110 109 DEBUGFS_ADD(long_retry_limit); 111 110 DEBUGFS_ADD(ht40allow_map); 112 111 } 112 + 113 + struct debugfs_read_work { 114 + struct wiphy_work work; 115 + ssize_t (*handler)(struct wiphy *wiphy, 116 + struct file *file, 117 + char *buf, 118 + size_t count, 119 + void *data); 120 + struct wiphy *wiphy; 121 + struct file *file; 122 + char *buf; 123 + size_t bufsize; 124 + void *data; 125 + ssize_t ret; 126 + struct completion completion; 127 + }; 128 + 129 + static void wiphy_locked_debugfs_read_work(struct wiphy *wiphy, 130 + struct wiphy_work *work) 131 + { 132 + struct debugfs_read_work *w = container_of(work, typeof(*w), work); 133 + 134 + w->ret = w->handler(w->wiphy, w->file, w->buf, w->bufsize, w->data); 135 + complete(&w->completion); 136 + } 137 + 138 + static void wiphy_locked_debugfs_read_cancel(struct dentry *dentry, 139 + void *data) 140 + { 141 + struct debugfs_read_work *w = data; 142 + 143 + wiphy_work_cancel(w->wiphy, &w->work); 144 + complete(&w->completion); 145 + } 146 + 147 + ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, 148 + char *buf, size_t bufsize, 149 + char __user *userbuf, size_t count, 150 + loff_t *ppos, 151 + ssize_t (*handler)(struct wiphy *wiphy, 152 + struct file *file, 153 + char *buf, 154 + size_t bufsize, 155 + void *data), 156 + void *data) 157 + { 158 + struct debugfs_read_work work = { 159 + .handler = handler, 160 + .wiphy = wiphy, 161 + .file = file, 162 + .buf = buf, 163 + .bufsize = bufsize, 164 + .data = data, 165 + .ret = -ENODEV, 166 + .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), 167 + }; 168 + struct debugfs_cancellation cancellation = { 169 + .cancel = wiphy_locked_debugfs_read_cancel, 170 + .cancel_data = &work, 171 + }; 172 + 173 + /* don't leak stack data or whatever */ 174 + memset(buf, 0, bufsize); 175 + 176 + wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work); 177 + wiphy_work_queue(wiphy, &work.work); 178 + 179 + debugfs_enter_cancellation(file, &cancellation); 180 + wait_for_completion(&work.completion); 181 + debugfs_leave_cancellation(file, &cancellation); 182 + 183 + if (work.ret < 0) 184 + return work.ret; 185 + 186 + if (WARN_ON(work.ret > bufsize)) 187 + return -EINVAL; 188 + 189 + return simple_read_from_buffer(userbuf, count, ppos, buf, work.ret); 190 + } 191 + EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_read); 192 + 193 + struct debugfs_write_work { 194 + struct wiphy_work work; 195 + ssize_t (*handler)(struct wiphy *wiphy, 196 + struct file *file, 197 + char *buf, 198 + size_t count, 199 + void *data); 200 + struct wiphy *wiphy; 201 + struct file *file; 202 + char *buf; 203 + size_t count; 204 + void *data; 205 + ssize_t ret; 206 + struct completion completion; 207 + }; 208 + 209 + static void wiphy_locked_debugfs_write_work(struct wiphy *wiphy, 210 + struct wiphy_work *work) 211 + { 212 + struct debugfs_write_work *w = container_of(work, typeof(*w), work); 213 + 214 + w->ret = w->handler(w->wiphy, w->file, w->buf, w->count, w->data); 215 + complete(&w->completion); 216 + } 217 + 218 + static void wiphy_locked_debugfs_write_cancel(struct dentry *dentry, 219 + void *data) 220 + { 221 + struct debugfs_write_work *w = data; 222 + 223 + wiphy_work_cancel(w->wiphy, &w->work); 224 + complete(&w->completion); 225 + } 226 + 227 + ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, 228 + struct file *file, char *buf, size_t bufsize, 229 + const char __user *userbuf, size_t count, 230 + ssize_t (*handler)(struct wiphy *wiphy, 231 + struct file *file, 232 + char *buf, 233 + size_t count, 234 + void *data), 235 + void *data) 236 + { 237 + struct debugfs_write_work work = { 238 + .handler = handler, 239 + .wiphy = wiphy, 240 + .file = file, 241 + .buf = buf, 242 + .count = count, 243 + .data = data, 244 + .ret = -ENODEV, 245 + .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), 246 + }; 247 + struct debugfs_cancellation cancellation = { 248 + .cancel = wiphy_locked_debugfs_write_cancel, 249 + .cancel_data = &work, 250 + }; 251 + 252 + /* mostly used for strings so enforce NUL-termination for safety */ 253 + if (count >= bufsize) 254 + return -EINVAL; 255 + 256 + memset(buf, 0, bufsize); 257 + 258 + if (copy_from_user(buf, userbuf, count)) 259 + return -EFAULT; 260 + 261 + wiphy_work_init(&work.work, wiphy_locked_debugfs_write_work); 262 + wiphy_work_queue(wiphy, &work.work); 263 + 264 + debugfs_enter_cancellation(file, &cancellation); 265 + wait_for_completion(&work.completion); 266 + debugfs_leave_cancellation(file, &cancellation); 267 + 268 + return work.ret; 269 + } 270 + EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_write);
+36 -19
net/wireless/nl80211.c
··· 3822 3822 struct net_device *dev = wdev->netdev; 3823 3823 void *hdr; 3824 3824 3825 + lockdep_assert_wiphy(&rdev->wiphy); 3826 + 3825 3827 WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE && 3826 3828 cmd != NL80211_CMD_DEL_INTERFACE && 3827 3829 cmd != NL80211_CMD_SET_INTERFACE); ··· 3991 3989 3992 3990 if_idx = 0; 3993 3991 3992 + wiphy_lock(&rdev->wiphy); 3994 3993 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 3995 3994 if (if_idx < if_start) { 3996 3995 if_idx++; ··· 4001 3998 cb->nlh->nlmsg_seq, NLM_F_MULTI, 4002 3999 rdev, wdev, 4003 4000 NL80211_CMD_NEW_INTERFACE) < 0) { 4001 + wiphy_unlock(&rdev->wiphy); 4004 4002 goto out; 4005 4003 } 4006 4004 if_idx++; 4007 4005 } 4006 + wiphy_unlock(&rdev->wiphy); 4008 4007 4009 4008 wp_idx++; 4010 4009 } ··· 12793 12788 int i, n, low_index; 12794 12789 int err; 12795 12790 12796 - /* RSSI reporting disabled? */ 12797 - if (!cqm_config) 12798 - return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0); 12799 - 12800 12791 /* 12801 12792 * Obtain current RSSI value if possible, if not and no RSSI threshold 12802 12793 * event has been received yet, we should receive an event after a ··· 12867 12866 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) 12868 12867 return -EOPNOTSUPP; 12869 12868 12870 - if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) { 12871 - if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */ 12872 - return rdev_set_cqm_rssi_config(rdev, dev, 0, 0); 12873 - 12874 - return rdev_set_cqm_rssi_config(rdev, dev, 12875 - thresholds[0], hysteresis); 12876 - } 12877 - 12878 - if (!wiphy_ext_feature_isset(&rdev->wiphy, 12879 - NL80211_EXT_FEATURE_CQM_RSSI_LIST)) 12880 - return -EOPNOTSUPP; 12881 - 12882 12869 if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */ 12883 12870 n_thresholds = 0; 12884 12871 12885 12872 old = wiphy_dereference(wdev->wiphy, wdev->cqm_config); 12873 + 12874 + /* if already disabled just succeed */ 12875 + if (!n_thresholds && !old) 12876 + return 0; 12877 + 12878 + if (n_thresholds > 1) { 12879 + if (!wiphy_ext_feature_isset(&rdev->wiphy, 12880 + NL80211_EXT_FEATURE_CQM_RSSI_LIST) || 12881 + !rdev->ops->set_cqm_rssi_range_config) 12882 + return -EOPNOTSUPP; 12883 + } else { 12884 + if (!rdev->ops->set_cqm_rssi_config) 12885 + return -EOPNOTSUPP; 12886 + } 12886 12887 12887 12888 if (n_thresholds) { 12888 12889 cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds, ··· 12898 12895 memcpy(cqm_config->rssi_thresholds, thresholds, 12899 12896 flex_array_size(cqm_config, rssi_thresholds, 12900 12897 n_thresholds)); 12898 + cqm_config->use_range_api = n_thresholds > 1 || 12899 + !rdev->ops->set_cqm_rssi_config; 12901 12900 12902 12901 rcu_assign_pointer(wdev->cqm_config, cqm_config); 12902 + 12903 + if (cqm_config->use_range_api) 12904 + err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config); 12905 + else 12906 + err = rdev_set_cqm_rssi_config(rdev, dev, 12907 + thresholds[0], 12908 + hysteresis); 12903 12909 } else { 12904 12910 RCU_INIT_POINTER(wdev->cqm_config, NULL); 12911 + /* if enabled as range also disable via range */ 12912 + if (old->use_range_api) 12913 + err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0); 12914 + else 12915 + err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0); 12905 12916 } 12906 12917 12907 - err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config); 12908 12918 if (err) { 12909 12919 rcu_assign_pointer(wdev->cqm_config, old); 12910 12920 kfree_rcu(cqm_config, rcu_head); ··· 19026 19010 s32 rssi_level; 19027 19011 19028 19012 cqm_config = wiphy_dereference(wdev->wiphy, wdev->cqm_config); 19029 - if (!wdev->cqm_config) 19013 + if (!cqm_config) 19030 19014 return; 19031 19015 19032 - cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config); 19016 + if (cqm_config->use_range_api) 19017 + cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config); 19033 19018 19034 19019 rssi_level = cqm_config->last_rssi_event_value; 19035 19020 rssi_event = cqm_config->last_rssi_event_type;
+1 -2
scripts/checkstack.pl
··· 97 97 # 11160: a7 fb ff 60 aghi %r15,-160 98 98 # or 99 99 # 100092: e3 f0 ff c8 ff 71 lay %r15,-56(%r15) 100 - $re = qr/.*(?:lay|ag?hi).*\%r15,-(([0-9]{2}|[3-9])[0-9]{2}) 101 - (?:\(\%r15\))?$/ox; 100 + $re = qr/.*(?:lay|ag?hi).*\%r15,-([0-9]+)(?:\(\%r15\))?$/o; 102 101 } elsif ($arch eq 'sparc' || $arch eq 'sparc64') { 103 102 # f0019d10: 9d e3 bf 90 save %sp, -112, %sp 104 103 $re = qr/.*save.*%sp, -(([0-9]{2}|[3-9])[0-9]{2}), %sp/o;
-2
tools/arch/parisc/include/uapi/asm/errno.h
··· 75 75 76 76 /* We now return you to your regularly scheduled HPUX. */ 77 77 78 - #define ENOSYM 215 /* symbol does not exist in executable */ 79 78 #define ENOTSOCK 216 /* Socket operation on non-socket */ 80 79 #define EDESTADDRREQ 217 /* Destination address required */ 81 80 #define EMSGSIZE 218 /* Message too long */ ··· 100 101 #define ETIMEDOUT 238 /* Connection timed out */ 101 102 #define ECONNREFUSED 239 /* Connection refused */ 102 103 #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ 103 - #define EREMOTERELEASE 240 /* Remote peer released connection */ 104 104 #define EHOSTDOWN 241 /* Host is down */ 105 105 #define EHOSTUNREACH 242 /* No route to host */ 106 106
+34 -17
tools/net/ynl/generated/ethtool-user.c
··· 1843 1843 int ethtool_linkinfo_set(struct ynl_sock *ys, 1844 1844 struct ethtool_linkinfo_set_req *req) 1845 1845 { 1846 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 1846 1847 struct nlmsghdr *nlh; 1847 1848 int err; 1848 1849 ··· 1863 1862 if (req->_present.transceiver) 1864 1863 mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TRANSCEIVER, req->transceiver); 1865 1864 1866 - err = ynl_exec(ys, nlh, NULL); 1865 + err = ynl_exec(ys, nlh, &yrs); 1867 1866 if (err < 0) 1868 1867 return -1; 1869 1868 ··· 2068 2067 int ethtool_linkmodes_set(struct ynl_sock *ys, 2069 2068 struct ethtool_linkmodes_set_req *req) 2070 2069 { 2070 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 2071 2071 struct nlmsghdr *nlh; 2072 2072 int err; 2073 2073 ··· 2096 2094 if (req->_present.rate_matching) 2097 2095 mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_RATE_MATCHING, req->rate_matching); 2098 2096 2099 - err = ynl_exec(ys, nlh, NULL); 2097 + err = ynl_exec(ys, nlh, &yrs); 2100 2098 if (err < 0) 2101 2099 return -1; 2102 2100 ··· 2400 2398 2401 2399 int ethtool_debug_set(struct ynl_sock *ys, struct ethtool_debug_set_req *req) 2402 2400 { 2401 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 2403 2402 struct nlmsghdr *nlh; 2404 2403 int err; 2405 2404 ··· 2412 2409 if (req->_present.msgmask) 2413 2410 ethtool_bitset_put(nlh, ETHTOOL_A_DEBUG_MSGMASK, &req->msgmask); 2414 2411 2415 - err = ynl_exec(ys, nlh, NULL); 2412 + err = ynl_exec(ys, nlh, &yrs); 2416 2413 if (err < 0) 2417 2414 return -1; 2418 2415 ··· 2580 2577 2581 2578 int ethtool_wol_set(struct ynl_sock *ys, struct ethtool_wol_set_req *req) 2582 2579 { 2580 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 2583 2581 struct nlmsghdr *nlh; 2584 2582 int err; 2585 2583 ··· 2594 2590 if (req->_present.sopass_len) 2595 2591 mnl_attr_put(nlh, ETHTOOL_A_WOL_SOPASS, req->_present.sopass_len, req->sopass); 2596 2592 2597 - err = ynl_exec(ys, nlh, NULL); 2593 + err = ynl_exec(ys, nlh, &yrs); 2598 2594 if (err < 0) 2599 2595 return -1; 2600 2596 ··· 3049 3045 int ethtool_privflags_set(struct ynl_sock *ys, 3050 3046 struct ethtool_privflags_set_req *req) 3051 3047 { 3048 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 3052 3049 struct nlmsghdr *nlh; 3053 3050 int err; 3054 3051 ··· 3061 3056 if (req->_present.flags) 3062 3057 ethtool_bitset_put(nlh, ETHTOOL_A_PRIVFLAGS_FLAGS, &req->flags); 3063 3058 3064 - err = ynl_exec(ys, nlh, NULL); 3059 + err = ynl_exec(ys, nlh, &yrs); 3065 3060 if (err < 0) 3066 3061 return -1; 3067 3062 ··· 3278 3273 3279 3274 int ethtool_rings_set(struct ynl_sock *ys, struct ethtool_rings_set_req *req) 3280 3275 { 3276 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 3281 3277 struct nlmsghdr *nlh; 3282 3278 int err; 3283 3279 ··· 3318 3312 if (req->_present.tx_push_buf_len_max) 3319 3313 mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, req->tx_push_buf_len_max); 3320 3314 3321 - err = ynl_exec(ys, nlh, NULL); 3315 + err = ynl_exec(ys, nlh, &yrs); 3322 3316 if (err < 0) 3323 3317 return -1; 3324 3318 ··· 3501 3495 int ethtool_channels_set(struct ynl_sock *ys, 3502 3496 struct ethtool_channels_set_req *req) 3503 3497 { 3498 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 3504 3499 struct nlmsghdr *nlh; 3505 3500 int err; 3506 3501 ··· 3527 3520 if (req->_present.combined_count) 3528 3521 mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_COMBINED_COUNT, req->combined_count); 3529 3522 3530 - err = ynl_exec(ys, nlh, NULL); 3523 + err = ynl_exec(ys, nlh, &yrs); 3531 3524 if (err < 0) 3532 3525 return -1; 3533 3526 ··· 3805 3798 int ethtool_coalesce_set(struct ynl_sock *ys, 3806 3799 struct ethtool_coalesce_set_req *req) 3807 3800 { 3801 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 3808 3802 struct nlmsghdr *nlh; 3809 3803 int err; 3810 3804 ··· 3869 3861 if (req->_present.tx_aggr_time_usecs) 3870 3862 mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, req->tx_aggr_time_usecs); 3871 3863 3872 - err = ynl_exec(ys, nlh, NULL); 3864 + err = ynl_exec(ys, nlh, &yrs); 3873 3865 if (err < 0) 3874 3866 return -1; 3875 3867 ··· 4044 4036 4045 4037 int ethtool_pause_set(struct ynl_sock *ys, struct ethtool_pause_set_req *req) 4046 4038 { 4039 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 4047 4040 struct nlmsghdr *nlh; 4048 4041 int err; 4049 4042 ··· 4064 4055 if (req->_present.stats_src) 4065 4056 mnl_attr_put_u32(nlh, ETHTOOL_A_PAUSE_STATS_SRC, req->stats_src); 4066 4057 4067 - err = ynl_exec(ys, nlh, NULL); 4058 + err = ynl_exec(ys, nlh, &yrs); 4068 4059 if (err < 0) 4069 4060 return -1; 4070 4061 ··· 4251 4242 4252 4243 int ethtool_eee_set(struct ynl_sock *ys, struct ethtool_eee_set_req *req) 4253 4244 { 4245 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 4254 4246 struct nlmsghdr *nlh; 4255 4247 int err; 4256 4248 ··· 4273 4263 if (req->_present.tx_lpi_timer) 4274 4264 mnl_attr_put_u32(nlh, ETHTOOL_A_EEE_TX_LPI_TIMER, req->tx_lpi_timer); 4275 4265 4276 - err = ynl_exec(ys, nlh, NULL); 4266 + err = ynl_exec(ys, nlh, &yrs); 4277 4267 if (err < 0) 4278 4268 return -1; 4279 4269 ··· 4447 4437 int ethtool_cable_test_act(struct ynl_sock *ys, 4448 4438 struct ethtool_cable_test_act_req *req) 4449 4439 { 4440 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 4450 4441 struct nlmsghdr *nlh; 4451 4442 int err; 4452 4443 ··· 4457 4446 if (req->_present.header) 4458 4447 ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_HEADER, &req->header); 4459 4448 4460 - err = ynl_exec(ys, nlh, NULL); 4449 + err = ynl_exec(ys, nlh, &yrs); 4461 4450 if (err < 0) 4462 4451 return -1; 4463 4452 ··· 4476 4465 int ethtool_cable_test_tdr_act(struct ynl_sock *ys, 4477 4466 struct ethtool_cable_test_tdr_act_req *req) 4478 4467 { 4468 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 4479 4469 struct nlmsghdr *nlh; 4480 4470 int err; 4481 4471 ··· 4486 4474 if (req->_present.header) 4487 4475 ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_TDR_HEADER, &req->header); 4488 4476 4489 - err = ynl_exec(ys, nlh, NULL); 4477 + err = ynl_exec(ys, nlh, &yrs); 4490 4478 if (err < 0) 4491 4479 return -1; 4492 4480 ··· 4794 4782 4795 4783 int ethtool_fec_set(struct ynl_sock *ys, struct ethtool_fec_set_req *req) 4796 4784 { 4785 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 4797 4786 struct nlmsghdr *nlh; 4798 4787 int err; 4799 4788 ··· 4812 4799 if (req->_present.stats) 4813 4800 ethtool_fec_stat_put(nlh, ETHTOOL_A_FEC_STATS, &req->stats); 4814 4801 4815 - err = ynl_exec(ys, nlh, NULL); 4802 + err = ynl_exec(ys, nlh, &yrs); 4816 4803 if (err < 0) 4817 4804 return -1; 4818 4805 ··· 5248 5235 5249 5236 int ethtool_module_set(struct ynl_sock *ys, struct ethtool_module_set_req *req) 5250 5237 { 5238 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 5251 5239 struct nlmsghdr *nlh; 5252 5240 int err; 5253 5241 ··· 5262 5248 if (req->_present.power_mode) 5263 5249 mnl_attr_put_u8(nlh, ETHTOOL_A_MODULE_POWER_MODE, req->power_mode); 5264 5250 5265 - err = ynl_exec(ys, nlh, NULL); 5251 + err = ynl_exec(ys, nlh, &yrs); 5266 5252 if (err < 0) 5267 5253 return -1; 5268 5254 ··· 5411 5397 5412 5398 int ethtool_pse_set(struct ynl_sock *ys, struct ethtool_pse_set_req *req) 5413 5399 { 5400 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 5414 5401 struct nlmsghdr *nlh; 5415 5402 int err; 5416 5403 ··· 5427 5412 if (req->_present.pw_d_status) 5428 5413 mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_PW_D_STATUS, req->pw_d_status); 5429 5414 5430 - err = ynl_exec(ys, nlh, NULL); 5415 + err = ynl_exec(ys, nlh, &yrs); 5431 5416 if (err < 0) 5432 5417 return -1; 5433 5418 ··· 5761 5746 int ethtool_plca_set_cfg(struct ynl_sock *ys, 5762 5747 struct ethtool_plca_set_cfg_req *req) 5763 5748 { 5749 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 5764 5750 struct nlmsghdr *nlh; 5765 5751 int err; 5766 5752 ··· 5787 5771 if (req->_present.burst_tmr) 5788 5772 mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_BURST_TMR, req->burst_tmr); 5789 5773 5790 - err = ynl_exec(ys, nlh, NULL); 5774 + err = ynl_exec(ys, nlh, &yrs); 5791 5775 if (err < 0) 5792 5776 return -1; 5793 5777 ··· 6140 6124 6141 6125 int ethtool_mm_set(struct ynl_sock *ys, struct ethtool_mm_set_req *req) 6142 6126 { 6127 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 6143 6128 struct nlmsghdr *nlh; 6144 6129 int err; 6145 6130 ··· 6160 6143 if (req->_present.tx_min_frag_size) 6161 6144 mnl_attr_put_u32(nlh, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, req->tx_min_frag_size); 6162 6145 6163 - err = ynl_exec(ys, nlh, NULL); 6146 + err = ynl_exec(ys, nlh, &yrs); 6164 6147 if (err < 0) 6165 6148 return -1; 6166 6149
+4 -2
tools/net/ynl/generated/fou-user.c
··· 72 72 73 73 int fou_add(struct ynl_sock *ys, struct fou_add_req *req) 74 74 { 75 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 75 76 struct nlmsghdr *nlh; 76 77 int err; 77 78 ··· 100 99 if (req->_present.ifindex) 101 100 mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex); 102 101 103 - err = ynl_exec(ys, nlh, NULL); 102 + err = ynl_exec(ys, nlh, &yrs); 104 103 if (err < 0) 105 104 return -1; 106 105 ··· 118 117 119 118 int fou_del(struct ynl_sock *ys, struct fou_del_req *req) 120 119 { 120 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 121 121 struct nlmsghdr *nlh; 122 122 int err; 123 123 ··· 142 140 if (req->_present.peer_v6_len) 143 141 mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6); 144 142 145 - err = ynl_exec(ys, nlh, NULL); 143 + err = ynl_exec(ys, nlh, &yrs); 146 144 if (err < 0) 147 145 return -1; 148 146
+2 -1
tools/net/ynl/generated/handshake-user.c
··· 295 295 296 296 int handshake_done(struct ynl_sock *ys, struct handshake_done_req *req) 297 297 { 298 + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; 298 299 struct nlmsghdr *nlh; 299 300 int err; 300 301 ··· 309 308 for (unsigned int i = 0; i < req->n_remote_auth; i++) 310 309 mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_REMOTE_AUTH, req->remote_auth[i]); 311 310 312 - err = ynl_exec(ys, nlh, NULL); 311 + err = ynl_exec(ys, nlh, &yrs); 313 312 if (err < 0) 314 313 return -1; 315 314
+4 -6
tools/net/ynl/ynl-gen-c.py
··· 1715 1715 ret_ok = '0' 1716 1716 ret_err = '-1' 1717 1717 direction = "request" 1718 - local_vars = ['struct nlmsghdr *nlh;', 1718 + local_vars = ['struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };', 1719 + 'struct nlmsghdr *nlh;', 1719 1720 'int err;'] 1720 1721 1721 1722 if 'reply' in ri.op[ri.op_mode]: 1722 1723 ret_ok = 'rsp' 1723 1724 ret_err = 'NULL' 1724 - local_vars += [f'{type_name(ri, rdir(direction))} *rsp;', 1725 - 'struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };'] 1725 + local_vars += [f'{type_name(ri, rdir(direction))} *rsp;'] 1726 1726 1727 1727 print_prototype(ri, direction, terminate=False) 1728 1728 ri.cw.block_start() ··· 1738 1738 attr.attr_put(ri, "req") 1739 1739 ri.cw.nl() 1740 1740 1741 - parse_arg = "NULL" 1742 1741 if 'reply' in ri.op[ri.op_mode]: 1743 1742 ri.cw.p('rsp = calloc(1, sizeof(*rsp));') 1744 1743 ri.cw.p('yrs.yarg.data = rsp;') ··· 1747 1748 else: 1748 1749 ri.cw.p(f'yrs.rsp_cmd = {ri.op.rsp_value};') 1749 1750 ri.cw.nl() 1750 - parse_arg = '&yrs' 1751 - ri.cw.p(f"err = ynl_exec(ys, nlh, {parse_arg});") 1751 + ri.cw.p("err = ynl_exec(ys, nlh, &yrs);") 1752 1752 ri.cw.p('if (err < 0)') 1753 1753 if 'reply' in ri.op[ri.op_mode]: 1754 1754 ri.cw.p('goto err_free;')
+1 -1
tools/power/pm-graph/sleepgraph.py
··· 4151 4151 elif(re.match('Enabling non-boot CPUs .*', msg)): 4152 4152 # start of first cpu resume 4153 4153 cpu_start = ktime 4154 - elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)) \ 4154 + elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \ 4155 4155 or re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)): 4156 4156 # end of a cpu suspend, start of the next 4157 4157 m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
+1 -1
tools/testing/selftests/arm64/fp/za-fork.c
··· 85 85 */ 86 86 ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0); 87 87 if (ret >= 0) { 88 - ksft_test_result(fork_test(), "fork_test"); 88 + ksft_test_result(fork_test(), "fork_test\n"); 89 89 90 90 } else { 91 91 ksft_print_msg("SME not supported\n");
+40 -11
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
··· 1337 1337 } 1338 1338 1339 1339 static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1, 1340 - int sock_mapfd, int verd_mapfd, enum redir_mode mode) 1340 + int sock_mapfd, int nop_mapfd, 1341 + int verd_mapfd, enum redir_mode mode) 1341 1342 { 1342 1343 const char *log_prefix = redir_mode_str(mode); 1343 1344 unsigned int pass; ··· 1351 1350 err = add_to_sockmap(sock_mapfd, peer0, peer1); 1352 1351 if (err) 1353 1352 return; 1353 + 1354 + if (nop_mapfd >= 0) { 1355 + err = add_to_sockmap(nop_mapfd, cli0, cli1); 1356 + if (err) 1357 + return; 1358 + } 1354 1359 1355 1360 n = write(cli1, "a", 1); 1356 1361 if (n < 0) ··· 1394 1387 goto close0; 1395 1388 c1 = sfd[0], p1 = sfd[1]; 1396 1389 1397 - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode); 1390 + pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode); 1398 1391 1399 1392 xclose(c1); 1400 1393 xclose(p1); ··· 1684 1677 if (err) 1685 1678 goto close_cli0; 1686 1679 1687 - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode); 1680 + pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode); 1688 1681 1689 1682 xclose(c1); 1690 1683 xclose(p1); ··· 1742 1735 if (err) 1743 1736 goto close; 1744 1737 1745 - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode); 1738 + pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode); 1746 1739 1747 1740 xclose(c1); 1748 1741 xclose(p1); ··· 1777 1770 xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); 1778 1771 } 1779 1772 1780 - static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd, 1781 - int verd_mapfd, enum redir_mode mode) 1773 + static void unix_inet_redir_to_connected(int family, int type, 1774 + int sock_mapfd, int nop_mapfd, 1775 + int verd_mapfd, 1776 + enum redir_mode mode) 1782 1777 { 1783 1778 int c0, c1, p0, p1; 1784 1779 int sfd[2]; ··· 1794 1785 goto close_cli0; 1795 1786 c1 = sfd[0], p1 = sfd[1]; 1796 1787 1797 - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode); 1788 + pairs_redir_to_connected(c0, p0, c1, p1, 1789 + sock_mapfd, nop_mapfd, verd_mapfd, mode); 1798 1790 1799 1791 xclose(c1); 1800 1792 xclose(p1); ··· 1809 1799 struct bpf_map *inner_map, int family) 1810 1800 { 1811 1801 int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 1802 + int nop_map = bpf_map__fd(skel->maps.nop_map); 1812 1803 int verdict_map = bpf_map__fd(skel->maps.verdict_map); 1813 1804 int sock_map = bpf_map__fd(inner_map); 1814 1805 int err; ··· 1819 1808 return; 1820 1809 1821 1810 skel->bss->test_ingress = false; 1822 - unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, 1811 + unix_inet_redir_to_connected(family, SOCK_DGRAM, 1812 + sock_map, -1, verdict_map, 1823 1813 REDIR_EGRESS); 1824 - unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, 1814 + unix_inet_redir_to_connected(family, SOCK_DGRAM, 1815 + sock_map, -1, verdict_map, 1816 + REDIR_EGRESS); 1817 + 1818 + unix_inet_redir_to_connected(family, SOCK_DGRAM, 1819 + sock_map, nop_map, verdict_map, 1820 + REDIR_EGRESS); 1821 + unix_inet_redir_to_connected(family, SOCK_STREAM, 1822 + sock_map, nop_map, verdict_map, 1825 1823 REDIR_EGRESS); 1826 1824 skel->bss->test_ingress = true; 1827 - unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, 1825 + unix_inet_redir_to_connected(family, SOCK_DGRAM, 1826 + sock_map, -1, verdict_map, 1828 1827 REDIR_INGRESS); 1829 - unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, 1828 + unix_inet_redir_to_connected(family, SOCK_STREAM, 1829 + sock_map, -1, verdict_map, 1830 + REDIR_INGRESS); 1831 + 1832 + unix_inet_redir_to_connected(family, SOCK_DGRAM, 1833 + sock_map, nop_map, verdict_map, 1834 + REDIR_INGRESS); 1835 + unix_inet_redir_to_connected(family, SOCK_STREAM, 1836 + sock_map, nop_map, verdict_map, 1830 1837 REDIR_INGRESS); 1831 1838 1832 1839 xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
+7
tools/testing/selftests/bpf/progs/test_sockmap_listen.c
··· 15 15 } sock_map SEC(".maps"); 16 16 17 17 struct { 18 + __uint(type, BPF_MAP_TYPE_SOCKMAP); 19 + __uint(max_entries, 2); 20 + __type(key, __u32); 21 + __type(value, __u64); 22 + } nop_map SEC(".maps"); 23 + 24 + struct { 18 25 __uint(type, BPF_MAP_TYPE_SOCKHASH); 19 26 __uint(max_entries, 2); 20 27 __type(key, __u32);
-1
tools/testing/selftests/net/af_unix/diag_uid.c
··· 148 148 .msg_iov = &iov, 149 149 .msg_iovlen = 1 150 150 }; 151 - struct unix_diag_req *udr; 152 151 struct nlmsghdr *nlh; 153 152 int ret; 154 153
+1 -1
tools/testing/selftests/net/cmsg_sender.c
··· 428 428 { 429 429 struct addrinfo hints, *ai; 430 430 struct iovec iov[1]; 431 + unsigned char *buf; 431 432 struct msghdr msg; 432 433 char cbuf[1024]; 433 - char *buf; 434 434 int err; 435 435 int fd; 436 436 int i;
+2 -2
tools/testing/selftests/net/ipsec.c
··· 2263 2263 2264 2264 int main(int argc, char **argv) 2265 2265 { 2266 - unsigned int nr_process = 1; 2266 + long nr_process = 1; 2267 2267 int route_sock = -1, ret = KSFT_SKIP; 2268 2268 int test_desc_fd[2]; 2269 2269 uint32_t route_seq; ··· 2284 2284 exit_usage(argv); 2285 2285 } 2286 2286 2287 - if (nr_process > MAX_PROCESSES || !nr_process) { 2287 + if (nr_process > MAX_PROCESSES || nr_process < 1) { 2288 2288 printk("nr_process should be between [1; %u]", 2289 2289 MAX_PROCESSES); 2290 2290 exit_usage(argv);
+4 -7
tools/testing/selftests/net/mptcp/mptcp_connect.c
··· 18 18 19 19 #include <sys/ioctl.h> 20 20 #include <sys/poll.h> 21 + #include <sys/random.h> 21 22 #include <sys/sendfile.h> 22 23 #include <sys/stat.h> 23 24 #include <sys/socket.h> ··· 1126 1125 1127 1126 static void init_rng(void) 1128 1127 { 1129 - int fd = open("/dev/urandom", O_RDONLY); 1130 1128 unsigned int foo; 1131 1129 1132 - if (fd > 0) { 1133 - int ret = read(fd, &foo, sizeof(foo)); 1134 - 1135 - if (ret < 0) 1136 - srand(fd + foo); 1137 - close(fd); 1130 + if (getrandom(&foo, sizeof(foo), 0) == -1) { 1131 + perror("getrandom"); 1132 + exit(1); 1138 1133 } 1139 1134 1140 1135 srand(foo);
+4 -7
tools/testing/selftests/net/mptcp/mptcp_inq.c
··· 18 18 #include <time.h> 19 19 20 20 #include <sys/ioctl.h> 21 + #include <sys/random.h> 21 22 #include <sys/socket.h> 22 23 #include <sys/types.h> 23 24 #include <sys/wait.h> ··· 520 519 521 520 static void init_rng(void) 522 521 { 523 - int fd = open("/dev/urandom", O_RDONLY); 524 522 unsigned int foo; 525 523 526 - if (fd > 0) { 527 - int ret = read(fd, &foo, sizeof(foo)); 528 - 529 - if (ret < 0) 530 - srand(fd + foo); 531 - close(fd); 524 + if (getrandom(&foo, sizeof(foo), 0) == -1) { 525 + perror("getrandom"); 526 + exit(1); 532 527 } 533 528 534 529 srand(foo);