Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

AsoC: Phase out hybrid PCI devres

Merge series from Philipp Stanner <phasta@kernel.org>:

A year ago we spent quite some work trying to get PCI into better shape.
Some pci_ functions can be sometimes managed with devres, which is
obviously bad. We want to provide an obvious API, where pci_ functions
are never, and pcim_ functions are always managed.

Thus, everyone enabling his device with pcim_enable_device() must be
ported to pcim_ functions. Porting all users will later enable us to
significantly simplify parts of the PCI subsystem. See here [1] for
details.

This patch series does that for sound.

Feel free to squash the commits as you see fit.

P.

[1] https://elixir.bootlin.com/linux/v6.14-rc4/source/drivers/pci/devres.c#L18

Mark Brown 02ca7898 fe65ce84

+3650 -1510
+2 -2
Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
··· 111 111 112 112 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp2_input 113 113 Date: March 2025 114 - KernelVersion: 6.14 114 + KernelVersion: 6.15 115 115 Contact: intel-xe@lists.freedesktop.org 116 116 Description: RO. Package temperature in millidegree Celsius. 117 117 ··· 119 119 120 120 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp3_input 121 121 Date: March 2025 122 - KernelVersion: 6.14 122 + KernelVersion: 6.15 123 123 Contact: intel-xe@lists.freedesktop.org 124 124 Description: RO. VRAM temperature in millidegree Celsius. 125 125
+7 -12
Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
··· 56 56 enum: 57 57 - snps,dw-apb-ssi 58 58 - snps,dwc-ssi-1.01a 59 - - description: Microsemi Ocelot/Jaguar2 SoC SPI Controller 60 - items: 61 - - enum: 62 - - mscc,ocelot-spi 63 - - mscc,jaguar2-spi 64 - - const: snps,dw-apb-ssi 65 59 - description: Microchip Sparx5 SoC SPI Controller 66 60 const: microchip,sparx5-spi 67 61 - description: Amazon Alpine SPI Controller 68 62 const: amazon,alpine-dw-apb-ssi 69 - - description: Renesas RZ/N1 SPI Controller 63 + - description: Vendor controllers which use snps,dw-apb-ssi as fallback 70 64 items: 71 - - const: renesas,rzn1-spi 65 + - enum: 66 + - mscc,ocelot-spi 67 + - mscc,jaguar2-spi 68 + - renesas,rzn1-spi 69 + - sophgo,sg2042-spi 70 + - thead,th1520-spi 72 71 - const: snps,dw-apb-ssi 73 72 - description: Intel Keem Bay SPI Controller 74 73 const: intel,keembay-ssi ··· 87 88 - renesas,r9a06g032-spi # RZ/N1D 88 89 - renesas,r9a06g033-spi # RZ/N1S 89 90 - const: renesas,rzn1-spi # RZ/N1 90 - - description: T-HEAD TH1520 SoC SPI Controller 91 - items: 92 - - const: thead,th1520-spi 93 - - const: snps,dw-apb-ssi 94 91 95 92 reg: 96 93 minItems: 1
+3 -1
Documentation/netlink/specs/ethtool.yaml
··· 89 89 doc: Group of short_detected states 90 90 - 91 91 name: phy-upstream-type 92 - enum-name: 92 + enum-name: phy-upstream 93 + header: linux/ethtool.h 93 94 type: enum 95 + name-prefix: phy-upstream 94 96 entries: [ mac, phy ] 95 97 - 96 98 name: tcp-data-split
+1 -8
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 15 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION* ··· 1051 1051 # arrays. Enforce this for everything that may examine structure sizes and 1052 1052 # perform bounds checking. 1053 1053 KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3) 1054 - 1055 - #Currently, disable -Wstringop-overflow for GCC 11, globally. 1056 - KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow) 1057 - KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow) 1058 - 1059 - #Currently, disable -Wunterminated-string-initialization as broken 1060 - KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization) 1061 1054 1062 1055 # disable invalid "can't wrap" optimizations for signed / pointers 1063 1056 KBUILD_CFLAGS += -fno-strict-overflow
+2
arch/arm64/kernel/proton-pack.c
··· 879 879 static const struct midr_range spectre_bhb_k132_list[] = { 880 880 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), 881 881 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), 882 + {}, 882 883 }; 883 884 static const struct midr_range spectre_bhb_k38_list[] = { 884 885 MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), 885 886 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), 887 + {}, 886 888 }; 887 889 static const struct midr_range spectre_bhb_k32_list[] = { 888 890 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+13 -3
arch/parisc/math-emu/driver.c
··· 97 97 98 98 memcpy(regs->fr, frcopy, sizeof regs->fr); 99 99 if (signalcode != 0) { 100 - force_sig_fault(signalcode >> 24, signalcode & 0xffffff, 101 - (void __user *) regs->iaoq[0]); 102 - return -1; 100 + int sig = signalcode >> 24; 101 + 102 + if (sig == SIGFPE) { 103 + /* 104 + * Clear floating point trap bit to avoid trapping 105 + * again on the first floating-point instruction in 106 + * the userspace signal handler. 107 + */ 108 + regs->fr[0] &= ~(1ULL << 38); 109 + } 110 + force_sig_fault(sig, signalcode & 0xffffff, 111 + (void __user *) regs->iaoq[0]); 112 + return -1; 103 113 } 104 114 105 115 return signalcode ? -1 : 0;
+2 -4
arch/powerpc/boot/wrapper
··· 234 234 235 235 # suppress some warnings in recent ld versions 236 236 nowarn="-z noexecstack" 237 - if ! ld_is_lld; then 238 - if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then 239 - nowarn="$nowarn --no-warn-rwx-segments" 240 - fi 237 + if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then 238 + nowarn="$nowarn --no-warn-rwx-segments" 241 239 fi 242 240 243 241 platformo=$object/"$platform".o
-4
arch/powerpc/kernel/module_64.c
··· 258 258 break; 259 259 } 260 260 } 261 - if (i == hdr->e_shnum) { 262 - pr_err("%s: doesn't contain __patchable_function_entries.\n", me->name); 263 - return -ENOEXEC; 264 - } 265 261 #endif 266 262 267 263 pr_debug("Looks like a total of %lu stubs, max\n", relocs);
+17 -3
arch/powerpc/mm/book3s64/radix_pgtable.c
··· 976 976 return 0; 977 977 } 978 978 979 - 979 + #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP 980 980 bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) 981 981 { 982 982 if (radix_enabled()) ··· 984 984 985 985 return false; 986 986 } 987 + #endif 987 988 988 989 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, 989 990 unsigned long addr, unsigned long next) ··· 1121 1120 pmd_t *pmd; 1122 1121 pte_t *pte; 1123 1122 1123 + /* 1124 + * Make sure we align the start vmemmap addr so that we calculate 1125 + * the correct start_pfn in altmap boundary check to decided whether 1126 + * we should use altmap or RAM based backing memory allocation. Also 1127 + * the address need to be aligned for set_pte operation. 1128 + 1129 + * If the start addr is already PMD_SIZE aligned we will try to use 1130 + * a pmd mapping. We don't want to be too aggressive here beacause 1131 + * that will cause more allocations in RAM. So only if the namespace 1132 + * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. 1133 + */ 1134 + 1135 + start = ALIGN_DOWN(start, PAGE_SIZE); 1124 1136 for (addr = start; addr < end; addr = next) { 1125 1137 next = pmd_addr_end(addr, end); 1126 1138 ··· 1159 1145 * in altmap block allocation failures, in which case 1160 1146 * we fallback to RAM for vmemmap allocation. 1161 1147 */ 1162 - if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || 1163 - altmap_cross_boundary(altmap, addr, PMD_SIZE))) { 1148 + if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && 1149 + altmap_cross_boundary(altmap, addr, PMD_SIZE))) { 1164 1150 /* 1165 1151 * make sure we don't create altmap mappings 1166 1152 * covering things outside the device.
+1 -1
arch/powerpc/platforms/powernv/Kconfig
··· 17 17 select MMU_NOTIFIER 18 18 select FORCE_SMP 19 19 select ARCH_SUPPORTS_PER_VMA_LOCK 20 - select PPC_RADIX_BROADCAST_TLBIE 20 + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU 21 21 default y 22 22 23 23 config OPAL_PRD
+1 -1
arch/powerpc/platforms/pseries/Kconfig
··· 23 23 select FORCE_SMP 24 24 select SWIOTLB 25 25 select ARCH_SUPPORTS_PER_VMA_LOCK 26 - select PPC_RADIX_BROADCAST_TLBIE 26 + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU 27 27 default y 28 28 29 29 config PARAVIRT
+1 -4
arch/x86/boot/compressed/mem.c
··· 34 34 35 35 void arch_accept_memory(phys_addr_t start, phys_addr_t end) 36 36 { 37 - static bool sevsnp; 38 - 39 37 /* Platform-specific memory-acceptance call goes here */ 40 38 if (early_is_tdx_guest()) { 41 39 if (!tdx_accept_memory(start, end)) 42 40 panic("TDX: Failed to accept memory\n"); 43 - } else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) { 44 - sevsnp = true; 41 + } else if (early_is_sevsnp_guest()) { 45 42 snp_accept_memory(start, end); 46 43 } else { 47 44 error("Cannot accept memory: unknown platform\n");
+40
arch/x86/boot/compressed/sev.c
··· 645 645 646 646 sev_verify_cbit(top_level_pgt); 647 647 } 648 + 649 + bool early_is_sevsnp_guest(void) 650 + { 651 + static bool sevsnp; 652 + 653 + if (sevsnp) 654 + return true; 655 + 656 + if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) 657 + return false; 658 + 659 + sevsnp = true; 660 + 661 + if (!snp_vmpl) { 662 + unsigned int eax, ebx, ecx, edx; 663 + 664 + /* 665 + * CPUID Fn8000_001F_EAX[28] - SVSM support 666 + */ 667 + eax = 0x8000001f; 668 + ecx = 0; 669 + native_cpuid(&eax, &ebx, &ecx, &edx); 670 + if (eax & BIT(28)) { 671 + struct msr m; 672 + 673 + /* Obtain the address of the calling area to use */ 674 + boot_rdmsr(MSR_SVSM_CAA, &m); 675 + boot_svsm_caa = (void *)m.q; 676 + boot_svsm_caa_pa = m.q; 677 + 678 + /* 679 + * The real VMPL level cannot be discovered, but the 680 + * memory acceptance routines make no use of that so 681 + * any non-zero value suffices here. 682 + */ 683 + snp_vmpl = U8_MAX; 684 + } 685 + } 686 + return true; 687 + }
+2
arch/x86/boot/compressed/sev.h
··· 13 13 bool sev_snp_enabled(void); 14 14 void snp_accept_memory(phys_addr_t start, phys_addr_t end); 15 15 u64 sev_get_status(void); 16 + bool early_is_sevsnp_guest(void); 16 17 17 18 #else 18 19 19 20 static inline bool sev_snp_enabled(void) { return false; } 20 21 static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } 21 22 static inline u64 sev_get_status(void) { return 0; } 23 + static inline bool early_is_sevsnp_guest(void) { return false; } 22 24 23 25 #endif 24 26
+1 -1
arch/x86/events/core.c
··· 754 754 } 755 755 } 756 756 757 - static inline int is_x86_event(struct perf_event *event) 757 + int is_x86_event(struct perf_event *event) 758 758 { 759 759 int i; 760 760
+1 -1
arch/x86/events/intel/core.c
··· 4395 4395 arr[pebs_enable] = (struct perf_guest_switch_msr){ 4396 4396 .msr = MSR_IA32_PEBS_ENABLE, 4397 4397 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, 4398 - .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, 4398 + .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable, 4399 4399 }; 4400 4400 4401 4401 if (arr[pebs_enable].host) {
+19 -2
arch/x86/events/intel/ds.c
··· 2379 2379 */ 2380 2380 intel_pmu_save_and_restart_reload(event, count); 2381 2381 } 2382 - } else 2383 - intel_pmu_save_and_restart(event); 2382 + } else { 2383 + /* 2384 + * For a non-precise event, it's possible the 2385 + * counters-snapshotting records a positive value for the 2386 + * overflowed event. Then the HW auto-reload mechanism 2387 + * reset the counter to 0 immediately, because the 2388 + * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD 2389 + * is not set. The counter backwards may be observed in a 2390 + * PMI handler. 2391 + * 2392 + * Since the event value has been updated when processing the 2393 + * counters-snapshotting record, only needs to set the new 2394 + * period for the counter. 2395 + */ 2396 + if (is_pebs_counter_event_group(event)) 2397 + static_call(x86_pmu_set_period)(event); 2398 + else 2399 + intel_pmu_save_and_restart(event); 2400 + } 2384 2401 } 2385 2402 2386 2403 static __always_inline void
+9 -2
arch/x86/events/perf_event.h
··· 110 110 return is_metric_event(event) || is_slots_event(event); 111 111 } 112 112 113 + int is_x86_event(struct perf_event *event); 114 + 115 + static inline bool check_leader_group(struct perf_event *leader, int flags) 116 + { 117 + return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false; 118 + } 119 + 113 120 static inline bool is_branch_counters_group(struct perf_event *event) 114 121 { 115 - return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; 122 + return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS); 116 123 } 117 124 118 125 static inline bool is_pebs_counter_event_group(struct perf_event *event) 119 126 { 120 - return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR; 127 + return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR); 121 128 } 122 129 123 130 struct amd_nb {
+2 -3
crypto/scompress.c
··· 163 163 if (ret) 164 164 goto unlock; 165 165 } 166 - if (!scomp_scratch_users) { 166 + if (!scomp_scratch_users++) { 167 167 ret = crypto_scomp_alloc_scratches(); 168 168 if (ret) 169 - goto unlock; 170 - scomp_scratch_users++; 169 + scomp_scratch_users--; 171 170 } 172 171 unlock: 173 172 mutex_unlock(&scomp_lock);
+2 -2
drivers/accel/ivpu/ivpu_fw.c
··· 544 544 boot_params->d0i3_entry_vpu_ts); 545 545 ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n", 546 546 boot_params->system_time_us); 547 - ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n", 547 + ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n", 548 548 boot_params->power_profile); 549 549 } 550 550 ··· 646 646 boot_params->d0i3_residency_time_us = 0; 647 647 boot_params->d0i3_entry_vpu_ts = 0; 648 648 if (IVPU_WA(disable_d0i2)) 649 - boot_params->power_profile = 1; 649 + boot_params->power_profile |= BIT(1); 650 650 651 651 boot_params->system_time_us = ktime_to_us(ktime_get_real()); 652 652 wmb(); /* Flush WC buffers after writing bootparams */
+1 -1
drivers/accel/ivpu/ivpu_hw_btrs.h
··· 14 14 #define PLL_PROFILING_FREQ_DEFAULT 38400000 15 15 #define PLL_PROFILING_FREQ_HIGH 400000000 16 16 17 - #define DCT_DEFAULT_ACTIVE_PERCENT 15u 17 + #define DCT_DEFAULT_ACTIVE_PERCENT 30u 18 18 #define DCT_PERIOD_US 35300u 19 19 20 20 int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
+10 -8
drivers/accel/ivpu/ivpu_pm.c
··· 428 428 active_us = (DCT_PERIOD_US * active_percent) / 100; 429 429 inactive_us = DCT_PERIOD_US - active_us; 430 430 431 + vdev->pm->dct_active_percent = active_percent; 432 + 433 + ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n", 434 + active_percent, active_us, inactive_us); 435 + 431 436 ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us); 432 437 if (ret) { 433 438 ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret); 434 439 return ret; 435 440 } 436 441 437 - vdev->pm->dct_active_percent = active_percent; 438 - 439 - ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n", 440 - active_percent, active_us, inactive_us); 441 442 return 0; 442 443 } 443 444 ··· 446 445 { 447 446 int ret; 448 447 448 + vdev->pm->dct_active_percent = 0; 449 + 450 + ivpu_dbg(vdev, PM, "DCT requested to be disabled\n"); 451 + 449 452 ret = ivpu_jsm_dct_disable(vdev); 450 453 if (ret) { 451 454 ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret); 452 455 return ret; 453 456 } 454 457 455 - vdev->pm->dct_active_percent = 0; 456 - 457 - ivpu_dbg(vdev, PM, "DCT disabled\n"); 458 458 return 0; 459 459 } 460 460 ··· 468 466 if (ivpu_hw_btrs_dct_get_request(vdev, &enable)) 469 467 return; 470 468 471 - if (vdev->pm->dct_active_percent) 469 + if (enable) 472 470 ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT); 473 471 else 474 472 ret = ivpu_pm_dct_disable(vdev);
+5 -8
drivers/base/module.c
··· 42 42 if (mod) 43 43 mk = &mod->mkobj; 44 44 else if (drv->mod_name) { 45 - struct kobject *mkobj; 46 - 47 - /* Lookup built-in module entry in /sys/modules */ 48 - mkobj = kset_find_obj(module_kset, drv->mod_name); 49 - if (mkobj) { 50 - mk = container_of(mkobj, struct module_kobject, kobj); 45 + /* Lookup or create built-in module entry in /sys/modules */ 46 + mk = lookup_or_create_module_kobject(drv->mod_name); 47 + if (mk) { 51 48 /* remember our module structure */ 52 49 drv->p->mkobj = mk; 53 - /* kset_find_obj took a reference */ 54 - kobject_put(mkobj); 50 + /* lookup_or_create_module_kobject took a reference */ 51 + kobject_put(&mk->kobj); 55 52 } 56 53 } 57 54
+43 -19
drivers/block/ublk_drv.c
··· 201 201 static void ublk_stop_dev_unlocked(struct ublk_device *ub); 202 202 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq); 203 203 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, 204 - struct ublk_queue *ubq, int tag, size_t offset); 204 + const struct ublk_queue *ubq, int tag, size_t offset); 205 205 static inline unsigned int ublk_req_build_flags(struct request *req); 206 206 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, 207 207 int tag); 208 - static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub) 209 - { 210 - return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY); 211 - } 212 - 213 208 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub) 214 209 { 215 210 return ub->dev_info.flags & UBLK_F_ZONED; ··· 604 609 ublk_dev_param_zoned_apply(ub); 605 610 } 606 611 612 + static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq) 613 + { 614 + return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY; 615 + } 616 + 607 617 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) 608 618 { 609 - return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY); 619 + return ubq->flags & UBLK_F_USER_COPY; 610 620 } 611 621 612 622 static inline bool ublk_need_map_io(const struct ublk_queue *ubq) 613 623 { 614 - return !ublk_support_user_copy(ubq); 624 + return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq); 615 625 } 616 626 617 627 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) ··· 624 624 /* 625 625 * read()/write() is involved in user copy, so request reference 626 626 * has to be grabbed 627 + * 628 + * for zero copy, request buffer need to be registered to io_uring 629 + * buffer table, so reference is needed 627 630 */ 628 - return ublk_support_user_copy(ubq); 631 + return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq); 629 632 } 630 633 631 634 static inline void ublk_init_req_ref(const struct ublk_queue *ubq, ··· 1949 1946 } 1950 1947 1951 1948 static int ublk_register_io_buf(struct io_uring_cmd *cmd, 1952 - struct ublk_queue *ubq, unsigned int tag, 1949 + const struct ublk_queue *ubq, unsigned int tag, 1953 1950 unsigned int index, unsigned int issue_flags) 1954 1951 { 1955 1952 struct ublk_device *ub = cmd->file->private_data; 1953 + const struct ublk_io *io = &ubq->ios[tag]; 1956 1954 struct request *req; 1957 1955 int ret; 1956 + 1957 + if (!ublk_support_zero_copy(ubq)) 1958 + return -EINVAL; 1959 + 1960 + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) 1961 + return -EINVAL; 1958 1962 1959 1963 req = __ublk_check_and_get_req(ub, ubq, tag, 0); 1960 1964 if (!req) ··· 1978 1968 } 1979 1969 1980 1970 static int ublk_unregister_io_buf(struct io_uring_cmd *cmd, 1971 + const struct ublk_queue *ubq, unsigned int tag, 1981 1972 unsigned int index, unsigned int issue_flags) 1982 1973 { 1974 + const struct ublk_io *io = &ubq->ios[tag]; 1975 + 1976 + if (!ublk_support_zero_copy(ubq)) 1977 + return -EINVAL; 1978 + 1979 + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) 1980 + return -EINVAL; 1981 + 1983 1982 return io_buffer_unregister_bvec(cmd, index, issue_flags); 1984 1983 } 1985 1984 ··· 2092 2073 case UBLK_IO_REGISTER_IO_BUF: 2093 2074 return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); 2094 2075 case UBLK_IO_UNREGISTER_IO_BUF: 2095 - return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags); 2076 + return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); 2096 2077 case UBLK_IO_FETCH_REQ: 2097 2078 ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr); 2098 2079 if (ret) ··· 2144 2125 } 2145 2126 2146 2127 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, 2147 - struct ublk_queue *ubq, int tag, size_t offset) 2128 + const struct ublk_queue *ubq, int tag, size_t offset) 2148 2129 { 2149 2130 struct request *req; 2150 - 2151 - if (!ublk_need_req_ref(ubq)) 2152 - return NULL; 2153 2131 2154 2132 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); 2155 2133 if (!req) ··· 2260 2244 ubq = ublk_get_queue(ub, q_id); 2261 2245 if (!ubq) 2262 2246 return ERR_PTR(-EINVAL); 2247 + 2248 + if (!ublk_support_user_copy(ubq)) 2249 + return ERR_PTR(-EACCES); 2263 2250 2264 2251 if (tag >= ubq->q_depth) 2265 2252 return ERR_PTR(-EINVAL); ··· 2802 2783 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | 2803 2784 UBLK_F_URING_CMD_COMP_IN_TASK; 2804 2785 2805 - /* GET_DATA isn't needed any more with USER_COPY */ 2806 - if (ublk_dev_is_user_copy(ub)) 2786 + /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */ 2787 + if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)) 2807 2788 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA; 2808 2789 2809 - /* Zoned storage support requires user copy feature */ 2790 + /* 2791 + * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for 2792 + * returning write_append_lba, which is only allowed in case of 2793 + * user copy or zero copy 2794 + */ 2810 2795 if (ublk_dev_is_zoned(ub) && 2811 - (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) { 2796 + (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags & 2797 + (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) { 2812 2798 ret = -EINVAL; 2813 2799 goto out_free_dev_number; 2814 2800 }
+31 -26
drivers/bluetooth/btintel_pcie.c
··· 957 957 /* This is a debug event that comes from IML and OP image when it 958 958 * starts execution. There is no need pass this event to stack. 959 959 */ 960 - if (skb->data[2] == 0x97) 960 + if (skb->data[2] == 0x97) { 961 + hci_recv_diag(hdev, skb); 961 962 return 0; 963 + } 962 964 } 963 965 964 966 return hci_recv_frame(hdev, skb); ··· 976 974 u8 pkt_type; 977 975 u16 plen; 978 976 u32 pcie_pkt_type; 979 - struct sk_buff *new_skb; 980 977 void *pdata; 981 978 struct hci_dev *hdev = data->hdev; 982 979 ··· 1052 1051 1053 1052 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen); 1054 1053 1055 - new_skb = bt_skb_alloc(plen, GFP_ATOMIC); 1056 - if (!new_skb) { 1057 - bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u", 1058 - skb->len); 1059 - ret = -ENOMEM; 1060 - goto exit_error; 1061 - } 1062 - 1063 - hci_skb_pkt_type(new_skb) = pkt_type; 1064 - skb_put_data(new_skb, skb->data, plen); 1054 + hci_skb_pkt_type(skb) = pkt_type; 1065 1055 hdev->stat.byte_rx += plen; 1056 + skb_trim(skb, plen); 1066 1057 1067 1058 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT) 1068 - ret = btintel_pcie_recv_event(hdev, new_skb); 1059 + ret = btintel_pcie_recv_event(hdev, skb); 1069 1060 else 1070 - ret = hci_recv_frame(hdev, new_skb); 1061 + ret = hci_recv_frame(hdev, skb); 1062 + skb = NULL; /* skb is freed in the callee */ 1071 1063 1072 1064 exit_error: 1065 + if (skb) 1066 + kfree_skb(skb); 1067 + 1073 1068 if (ret) 1074 1069 hdev->stat.err_rx++; 1075 1070 ··· 1199 1202 struct btintel_pcie_data *data = container_of(work, 1200 1203 struct btintel_pcie_data, rx_work); 1201 1204 struct sk_buff *skb; 1202 - int err; 1203 - struct hci_dev *hdev = data->hdev; 1204 1205 1205 1206 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { 1206 1207 /* Unlike usb products, controller will not send hardware ··· 1219 1224 1220 1225 /* Process the sk_buf in queue and send to the HCI layer */ 1221 1226 while ((skb = skb_dequeue(&data->rx_skb_q))) { 1222 - err = btintel_pcie_recv_frame(data, skb); 1223 - if (err) 1224 - bt_dev_err(hdev, "Failed to send received frame: %d", 1225 - err); 1226 - kfree_skb(skb); 1227 + btintel_pcie_recv_frame(data, skb); 1227 1228 } 1228 1229 } 1229 1230 ··· 1272 1281 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia); 1273 1282 1274 1283 /* Check CR_TIA and CR_HIA for change */ 1275 - if (cr_tia == cr_hia) { 1276 - bt_dev_warn(hdev, "RXQ: no new CD found"); 1284 + if (cr_tia == cr_hia) 1277 1285 return; 1278 - } 1279 1286 1280 1287 rxq = &data->rxq; 1281 1288 ··· 1309 1320 return IRQ_WAKE_THREAD; 1310 1321 } 1311 1322 1323 + static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data) 1324 + { 1325 + return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; 1326 + } 1327 + 1328 + static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data) 1329 + { 1330 + return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; 1331 + } 1332 + 1312 1333 static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) 1313 1334 { 1314 1335 struct msix_entry *entry = dev_id; ··· 1350 1351 btintel_pcie_msix_gp0_handler(data); 1351 1352 1352 1353 /* For TX */ 1353 - if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) 1354 + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) { 1354 1355 btintel_pcie_msix_tx_handle(data); 1356 + if (!btintel_pcie_is_rxq_empty(data)) 1357 + btintel_pcie_msix_rx_handle(data); 1358 + } 1355 1359 1356 1360 /* For RX */ 1357 - if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) 1361 + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) { 1358 1362 btintel_pcie_msix_rx_handle(data); 1363 + if (!btintel_pcie_is_txackq_empty(data)) 1364 + btintel_pcie_msix_tx_handle(data); 1365 + } 1359 1366 1360 1367 /* 1361 1368 * Before sending the interrupt the HW disables it to prevent a nested
+10 -2
drivers/bluetooth/btmtksdio.c
··· 723 723 { 724 724 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 725 725 726 + /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */ 727 + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) 728 + return 0; 729 + 726 730 sdio_claim_host(bdev->func); 727 731 728 732 /* Disable interrupt */ ··· 1447 1443 if (!bdev) 1448 1444 return; 1449 1445 1446 + hdev = bdev->hdev; 1447 + 1448 + /* Make sure to call btmtksdio_close before removing sdio card */ 1449 + if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) 1450 + btmtksdio_close(hdev); 1451 + 1450 1452 /* Be consistent the state in btmtksdio_probe */ 1451 1453 pm_runtime_get_noresume(bdev->dev); 1452 - 1453 - hdev = bdev->hdev; 1454 1454 1455 1455 sdio_set_drvdata(func, NULL); 1456 1456 hci_unregister_dev(hdev);
+73 -28
drivers/bluetooth/btusb.c
··· 3010 3010 bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err); 3011 3011 } 3012 3012 3013 - /* 3014 - * ==0: not a dump pkt. 3015 - * < 0: fails to handle a dump pkt 3016 - * > 0: otherwise. 3017 - */ 3013 + /* Return: 0 on success, negative errno on failure. */ 3018 3014 static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) 3019 3015 { 3020 - int ret = 1; 3016 + int ret = 0; 3021 3017 u8 pkt_type; 3022 3018 u8 *sk_ptr; 3023 3019 unsigned int sk_len; 3024 3020 u16 seqno; 3025 3021 u32 dump_size; 3026 3022 3027 - struct hci_event_hdr *event_hdr; 3028 - struct hci_acl_hdr *acl_hdr; 3029 3023 struct qca_dump_hdr *dump_hdr; 3030 3024 struct btusb_data *btdata = hci_get_drvdata(hdev); 3031 3025 struct usb_device *udev = btdata->udev; ··· 3029 3035 sk_len = skb->len; 3030 3036 3031 3037 if (pkt_type == HCI_ACLDATA_PKT) { 3032 - acl_hdr = hci_acl_hdr(skb); 3033 - if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) 3034 - return 0; 3035 3038 sk_ptr += HCI_ACL_HDR_SIZE; 3036 3039 sk_len -= HCI_ACL_HDR_SIZE; 3037 - event_hdr = (struct hci_event_hdr *)sk_ptr; 3038 - } else { 3039 - event_hdr = hci_event_hdr(skb); 3040 3040 } 3041 - 3042 - if ((event_hdr->evt != HCI_VENDOR_PKT) 3043 - || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) 3044 - return 0; 3045 3041 3046 3042 sk_ptr += HCI_EVENT_HDR_SIZE; 3047 3043 sk_len -= HCI_EVENT_HDR_SIZE; 3048 3044 3049 3045 dump_hdr = (struct qca_dump_hdr *)sk_ptr; 3050 - if ((sk_len < offsetof(struct qca_dump_hdr, data)) 3051 - || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) 3052 - || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) 3053 - return 0; 3054 - 3055 - /*it is dump pkt now*/ 3056 3046 seqno = le16_to_cpu(dump_hdr->seqno); 3057 3047 if (seqno == 0) { 3058 3048 set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); ··· 3110 3132 return ret; 3111 3133 } 3112 3134 3135 + /* Return: true if the ACL packet is a dump packet, false otherwise. */ 3136 + static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) 3137 + { 3138 + u8 *sk_ptr; 3139 + unsigned int sk_len; 3140 + 3141 + struct hci_event_hdr *event_hdr; 3142 + struct hci_acl_hdr *acl_hdr; 3143 + struct qca_dump_hdr *dump_hdr; 3144 + 3145 + sk_ptr = skb->data; 3146 + sk_len = skb->len; 3147 + 3148 + acl_hdr = hci_acl_hdr(skb); 3149 + if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) 3150 + return false; 3151 + 3152 + sk_ptr += HCI_ACL_HDR_SIZE; 3153 + sk_len -= HCI_ACL_HDR_SIZE; 3154 + event_hdr = (struct hci_event_hdr *)sk_ptr; 3155 + 3156 + if ((event_hdr->evt != HCI_VENDOR_PKT) || 3157 + (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) 3158 + return false; 3159 + 3160 + sk_ptr += HCI_EVENT_HDR_SIZE; 3161 + sk_len -= HCI_EVENT_HDR_SIZE; 3162 + 3163 + dump_hdr = (struct qca_dump_hdr *)sk_ptr; 3164 + if ((sk_len < offsetof(struct qca_dump_hdr, data)) || 3165 + (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || 3166 + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) 3167 + return false; 3168 + 3169 + return true; 3170 + } 3171 + 3172 + /* Return: true if the event packet is a dump packet, false otherwise. */ 3173 + static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) 3174 + { 3175 + u8 *sk_ptr; 3176 + unsigned int sk_len; 3177 + 3178 + struct hci_event_hdr *event_hdr; 3179 + struct qca_dump_hdr *dump_hdr; 3180 + 3181 + sk_ptr = skb->data; 3182 + sk_len = skb->len; 3183 + 3184 + event_hdr = hci_event_hdr(skb); 3185 + 3186 + if ((event_hdr->evt != HCI_VENDOR_PKT) 3187 + || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) 3188 + return false; 3189 + 3190 + sk_ptr += HCI_EVENT_HDR_SIZE; 3191 + sk_len -= HCI_EVENT_HDR_SIZE; 3192 + 3193 + dump_hdr = (struct qca_dump_hdr *)sk_ptr; 3194 + if ((sk_len < offsetof(struct qca_dump_hdr, data)) || 3195 + (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || 3196 + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) 3197 + return false; 3198 + 3199 + return true; 3200 + } 3201 + 3113 3202 static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) 3114 3203 { 3115 - if (handle_dump_pkt_qca(hdev, skb)) 3116 - return 0; 3204 + if (acl_pkt_is_dump_qca(hdev, skb)) 3205 + return handle_dump_pkt_qca(hdev, skb); 3117 3206 return hci_recv_frame(hdev, skb); 3118 3207 } 3119 3208 3120 3209 static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) 3121 3210 { 3122 - if (handle_dump_pkt_qca(hdev, skb)) 3123 - return 0; 3211 + if (evt_pkt_is_dump_qca(hdev, skb)) 3212 + return handle_dump_pkt_qca(hdev, skb); 3124 3213 return hci_recv_frame(hdev, skb); 3125 3214 } 3126 3215
+13 -2
drivers/cpufreq/acpi-cpufreq.c
··· 909 909 if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) 910 910 pr_warn(FW_WARN "P-state 0 is not max freq\n"); 911 911 912 - if (acpi_cpufreq_driver.set_boost) 913 - policy->boost_supported = true; 912 + if (acpi_cpufreq_driver.set_boost) { 913 + if (policy->boost_supported) { 914 + /* 915 + * The firmware may have altered boost state while the 916 + * CPU was offline (for example during a suspend-resume 917 + * cycle). 918 + */ 919 + if (policy->boost_enabled != boost_state(cpu)) 920 + set_boost(policy, policy->boost_enabled); 921 + } else { 922 + policy->boost_supported = true; 923 + } 924 + } 914 925 915 926 return result; 916 927
+14 -8
drivers/cpufreq/cpufreq.c
··· 536 536 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); 537 537 538 538 static unsigned int __resolve_freq(struct cpufreq_policy *policy, 539 - unsigned int target_freq, unsigned int relation) 539 + unsigned int target_freq, 540 + unsigned int min, unsigned int max, 541 + unsigned int relation) 540 542 { 541 543 unsigned int idx; 544 + 545 + target_freq = clamp_val(target_freq, min, max); 542 546 543 547 if (!policy->freq_table) 544 548 return target_freq; 545 549 546 - idx = cpufreq_frequency_table_target(policy, target_freq, relation); 550 + idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation); 547 551 policy->cached_resolved_idx = idx; 548 552 policy->cached_target_freq = target_freq; 549 553 return policy->freq_table[idx].frequency; ··· 581 577 if (unlikely(min > max)) 582 578 min = max; 583 579 584 - return __resolve_freq(policy, clamp_val(target_freq, min, max), 585 - CPUFREQ_RELATION_LE); 580 + return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE); 586 581 } 587 582 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); 588 583 ··· 2400 2397 if (cpufreq_disabled()) 2401 2398 return -ENODEV; 2402 2399 2403 - target_freq = clamp_val(target_freq, policy->min, policy->max); 2404 - target_freq = __resolve_freq(policy, target_freq, relation); 2400 + target_freq = __resolve_freq(policy, target_freq, policy->min, 2401 + policy->max, relation); 2405 2402 2406 2403 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 2407 2404 policy->cpu, target_freq, relation, old_target_freq); ··· 2730 2727 * compiler optimizations around them because they may be accessed 2731 2728 * concurrently by cpufreq_driver_resolve_freq() during the update. 2732 2729 */ 2733 - WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H)); 2734 - new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L); 2730 + WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, 2731 + new_data.min, new_data.max, 2732 + CPUFREQ_RELATION_H)); 2733 + new_data.min = __resolve_freq(policy, new_data.min, new_data.min, 2734 + new_data.max, CPUFREQ_RELATION_L); 2735 2735 WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min); 2736 2736 2737 2737 trace_cpu_frequency_limits(policy);
+2 -1
drivers/cpufreq/cpufreq_ondemand.c
··· 76 76 return freq_next; 77 77 } 78 78 79 - index = cpufreq_frequency_table_target(policy, freq_next, relation); 79 + index = cpufreq_frequency_table_target(policy, freq_next, policy->min, 80 + policy->max, relation); 80 81 freq_req = freq_table[index].frequency; 81 82 freq_reduc = freq_req * od_tuners->powersave_bias / 1000; 82 83 freq_avg = freq_req - freq_reduc;
+3 -3
drivers/cpufreq/freq_table.c
··· 115 115 EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); 116 116 117 117 int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, 118 - unsigned int target_freq, 119 - unsigned int relation) 118 + unsigned int target_freq, unsigned int min, 119 + unsigned int max, unsigned int relation) 120 120 { 121 121 struct cpufreq_frequency_table optimal = { 122 122 .driver_data = ~0, ··· 147 147 cpufreq_for_each_valid_entry_idx(pos, table, i) { 148 148 freq = pos->frequency; 149 149 150 - if ((freq < policy->min) || (freq > policy->max)) 150 + if (freq < min || freq > max) 151 151 continue; 152 152 if (freq == target_freq) { 153 153 optimal.driver_data = i;
+3
drivers/cpufreq/intel_pstate.c
··· 598 598 { 599 599 u64 misc_en; 600 600 601 + if (!cpu_feature_enabled(X86_FEATURE_IDA)) 602 + return true; 603 + 601 604 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 602 605 603 606 return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+5 -4
drivers/edac/altera_edac.c
··· 99 99 if (status & priv->ecc_stat_ce_mask) { 100 100 regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset, 101 101 &err_addr); 102 - if (priv->ecc_uecnt_offset) 102 + if (priv->ecc_cecnt_offset) 103 103 regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset, 104 104 &err_count); 105 105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, ··· 1005 1005 } 1006 1006 } 1007 1007 1008 - /* Interrupt mode set to every SBERR */ 1009 - regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST, 1010 - ALTR_A10_ECC_INTMODE); 1011 1008 /* Enable ECC */ 1012 1009 ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base + 1013 1010 ALTR_A10_ECC_CTRL_OFST)); ··· 2123 2126 "Unable to get syscon altr,sysmgr-syscon\n"); 2124 2127 return PTR_ERR(edac->ecc_mgr_map); 2125 2128 } 2129 + 2130 + /* Set irq mask for DDR SBE to avoid any pending irq before registration */ 2131 + regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, 2132 + (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0)); 2126 2133 2127 2134 edac->irq_chip.name = pdev->dev.of_node->name; 2128 2135 edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
+2
drivers/edac/altera_edac.h
··· 249 249 #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94 250 250 #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 251 251 #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1) 252 + #define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16) 253 + #define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17) 252 254 253 255 #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C 254 256 #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
+1 -1
drivers/gpu/drm/Kconfig
··· 188 188 bool "Enable refcount backtrace history in the DP MST helpers" 189 189 depends on STACKTRACE_SUPPORT 190 190 select STACKDEPOT 191 - depends on DRM_KMS_HELPER 191 + select DRM_KMS_HELPER 192 192 depends on DEBUG_KERNEL 193 193 depends on EXPERT 194 194 help
+14 -13
drivers/gpu/drm/adp/adp_drv.c
··· 121 121 dma_addr_t mask_iova; 122 122 int be_irq; 123 123 int fe_irq; 124 - spinlock_t irq_lock; 125 124 struct drm_pending_vblank_event *event; 126 125 }; 127 126 ··· 287 288 writel(BIT(0), adp->be + ADBE_BLEND_EN3); 288 289 writel(BIT(0), adp->be + ADBE_BLEND_BYPASS); 289 290 writel(BIT(0), adp->be + ADBE_BLEND_EN4); 291 + drm_crtc_vblank_on(crtc); 290 292 } 291 293 292 294 static void adp_crtc_atomic_disable(struct drm_crtc *crtc, ··· 310 310 struct drm_atomic_state *state) 311 311 { 312 312 u32 frame_num = 1; 313 + unsigned long flags; 313 314 struct adp_drv_private *adp = crtc_to_adp(crtc); 314 315 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc); 315 316 u64 new_size = ALIGN(new_state->mode.hdisplay * ··· 331 330 } 332 331 writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO); 333 332 //FIXME: use adbe flush interrupt 334 - spin_lock_irq(&crtc->dev->event_lock); 335 333 if (crtc->state->event) { 336 - drm_crtc_vblank_get(crtc); 337 - adp->event = crtc->state->event; 334 + struct drm_pending_vblank_event *event = crtc->state->event; 335 + 336 + crtc->state->event = NULL; 337 + spin_lock_irqsave(&crtc->dev->event_lock, flags); 338 + 339 + if (drm_crtc_vblank_get(crtc) != 0) 340 + drm_crtc_send_vblank_event(crtc, event); 341 + else 342 + adp->event = event; 343 + 344 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 338 345 } 339 - crtc->state->event = NULL; 340 - spin_unlock_irq(&crtc->dev->event_lock); 341 346 } 342 347 343 348 static const struct drm_crtc_funcs adp_crtc_funcs = { ··· 489 482 u32 int_status; 490 483 u32 int_ctl; 491 484 492 - spin_lock(&adp->irq_lock); 493 - 494 485 int_status = readl(adp->fe + ADP_INT_STATUS); 495 486 if (int_status & ADP_INT_STATUS_VBLANK) { 496 487 drm_crtc_handle_vblank(&adp->crtc); ··· 506 501 507 502 writel(int_status, adp->fe + ADP_INT_STATUS); 508 503 509 - spin_unlock(&adp->irq_lock); 510 504 511 505 return IRQ_HANDLED; 512 506 } ··· 516 512 struct adp_drv_private *adp = to_adp(drm); 517 513 int err; 518 514 519 - adp_disable_vblank(adp); 520 - writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); 515 + writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL); 521 516 522 517 adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0); 523 518 if (IS_ERR(adp->next_bridge)) { ··· 569 566 adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm); 570 567 if (IS_ERR(adp)) 571 568 return PTR_ERR(adp); 572 - 573 - spin_lock_init(&adp->irq_lock); 574 569 575 570 dev_set_drvdata(&pdev->dev, &adp->drm); 576 571
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 199 199 break; 200 200 201 201 case TTM_PL_VRAM: 202 + /* XGMI-accessible memory should never be DMA-mapped */ 203 + if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible( 204 + dma_buf_attach_adev(attach), bo))) 205 + return ERR_PTR(-EINVAL); 206 + 202 207 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, 203 208 bo->tbo.base.size, attach->dev, 204 209 dir, &sgt);
+1 -1
drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
··· 360 360 *flags |= AMD_CG_SUPPORT_BIF_LS; 361 361 } 362 362 363 - #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 363 + #define MMIO_REG_HOLE_OFFSET 0x44000 364 364 365 365 static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev) 366 366 {
+54
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
··· 503 503 } 504 504 505 505 /** 506 + * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode 507 + * 508 + * @vinst: VCN instance 509 + * @new_state: pause state 510 + * 511 + * Pause dpg mode for VCN block 512 + */ 513 + static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, 514 + struct dpg_pause_state *new_state) 515 + { 516 + struct amdgpu_device *adev = vinst->adev; 517 + uint32_t reg_data = 0; 518 + int vcn_inst; 519 + 520 + vcn_inst = GET_INST(VCN, vinst->inst); 521 + 522 + /* pause/unpause if state is changed */ 523 + if (vinst->pause_state.fw_based != new_state->fw_based) { 524 + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n", 525 + vinst->pause_state.fw_based, new_state->fw_based, 526 + new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE"); 527 + reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) & 528 + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 529 + 530 + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 531 + /* pause DPG */ 532 + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 533 + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); 534 + 535 + /* wait for ACK */ 536 + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE, 537 + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 538 + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 539 + } else { 540 + /* unpause DPG, no need to wait */ 541 + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 542 + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); 543 + } 544 + vinst->pause_state.fw_based = new_state->fw_based; 545 + } 546 + 547 + return 0; 548 + } 549 + 550 + 551 + /** 506 552 * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode 507 553 * 508 554 * @vinst: VCN instance ··· 564 518 volatile struct amdgpu_vcn5_fw_shared *fw_shared = 565 519 adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 566 520 struct amdgpu_ring *ring; 521 + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; 567 522 int vcn_inst; 568 523 uint32_t tmp; 569 524 ··· 628 581 629 582 if (indirect) 630 583 amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); 584 + 585 + /* Pause dpg */ 586 + vcn_v5_0_1_pause_dpg_mode(vinst, &state); 631 587 632 588 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 633 589 ··· 825 775 int inst_idx = vinst->inst; 826 776 uint32_t tmp; 827 777 int vcn_inst; 778 + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; 828 779 829 780 vcn_inst = GET_INST(VCN, inst_idx); 781 + 782 + /* Unpause dpg */ 783 + vcn_v5_0_1_pause_dpg_mode(vinst, &state); 830 784 831 785 /* Wait for power status to be 1 */ 832 786 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
+16 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
··· 173 173 unsigned int conn_index = aconnector->base.index; 174 174 175 175 guard(mutex)(&hdcp_w->mutex); 176 + drm_connector_get(&aconnector->base); 177 + if (hdcp_w->aconnector[conn_index]) 178 + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); 176 179 hdcp_w->aconnector[conn_index] = aconnector; 177 180 178 181 memset(&link_adjust, 0, sizeof(link_adjust)); ··· 223 220 unsigned int conn_index = aconnector->base.index; 224 221 225 222 guard(mutex)(&hdcp_w->mutex); 226 - hdcp_w->aconnector[conn_index] = aconnector; 227 223 228 224 /* the removal of display will invoke auth reset -> hdcp destroy and 229 225 * we'd expect the Content Protection (CP) property changed back to ··· 238 236 } 239 237 240 238 mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); 241 - 239 + if (hdcp_w->aconnector[conn_index]) { 240 + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); 241 + hdcp_w->aconnector[conn_index] = NULL; 242 + } 242 243 process_output(hdcp_w); 243 244 } 244 245 ··· 259 254 for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { 260 255 hdcp_w->encryption_status[conn_index] = 261 256 MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; 257 + if (hdcp_w->aconnector[conn_index]) { 258 + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); 259 + hdcp_w->aconnector[conn_index] = NULL; 260 + } 262 261 } 263 262 264 263 process_output(hdcp_w); ··· 497 488 struct hdcp_workqueue *hdcp_work = handle; 498 489 struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; 499 490 int link_index = aconnector->dc_link->link_index; 491 + unsigned int conn_index = aconnector->base.index; 500 492 struct mod_hdcp_display *display = &hdcp_work[link_index].display; 501 493 struct mod_hdcp_link *link = &hdcp_work[link_index].link; 502 494 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; ··· 554 544 guard(mutex)(&hdcp_w->mutex); 555 545 556 546 mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); 557 - 547 + drm_connector_get(&aconnector->base); 548 + if (hdcp_w->aconnector[conn_index]) 549 + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); 550 + hdcp_w->aconnector[conn_index] = aconnector; 558 551 process_output(hdcp_w); 559 552 } 560 553
+6
drivers/gpu/drm/drm_file.c
··· 964 964 struct drm_file *file = f->private_data; 965 965 struct drm_device *dev = file->minor->dev; 966 966 struct drm_printer p = drm_seq_file_printer(m); 967 + int idx; 968 + 969 + if (!drm_dev_enter(dev, &idx)) 970 + return; 967 971 968 972 drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name); 969 973 drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id); ··· 987 983 988 984 if (dev->driver->show_fdinfo) 989 985 dev->driver->show_fdinfo(&p, file); 986 + 987 + drm_dev_exit(idx); 990 988 } 991 989 EXPORT_SYMBOL(drm_show_fdinfo); 992 990
+1 -1
drivers/gpu/drm/drm_gpusvm.c
··· 1469 1469 } 1470 1470 i += 1 << order; 1471 1471 num_dma_mapped = i; 1472 + range->flags.has_dma_mapping = true; 1472 1473 } 1473 1474 1474 - range->flags.has_dma_mapping = true; 1475 1475 if (zdd) { 1476 1476 range->flags.has_devmem_pages = true; 1477 1477 range->dpagemap = dpagemap;
+5 -1
drivers/gpu/drm/drm_mipi_dbi.c
··· 404 404 u16 height = drm->mode_config.min_height; 405 405 u16 width = drm->mode_config.min_width; 406 406 struct mipi_dbi *dbi = &dbidev->dbi; 407 - size_t len = width * height * 2; 407 + const struct drm_format_info *dst_format; 408 + size_t len; 408 409 int idx; 409 410 410 411 if (!drm_dev_enter(drm, &idx)) 411 412 return; 413 + 414 + dst_format = drm_format_info(dbidev->pixel_format); 415 + len = drm_format_info_min_pitch(dst_format, 0, width) * height; 412 416 413 417 memset(dbidev->tx_buf, 0, len); 414 418
+6 -2
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
··· 23 23 24 24 int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id); 25 25 void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); 26 + bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); 26 27 27 28 #else 28 29 static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp) ··· 35 34 return 0; 36 35 } 37 36 38 - #endif 37 + static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) 38 + { 39 + return false; 40 + } 39 41 40 - bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); 42 + #endif 41 43 42 44 #endif /*__INTEL_PXP_GSCCS_H__ */
+1 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 90 90 while (!list_empty(&fctx->pending)) { 91 91 fence = list_entry(fctx->pending.next, typeof(*fence), head); 92 92 93 - if (error) 93 + if (error && !dma_fence_is_signaled_locked(&fence->base)) 94 94 dma_fence_set_error(&fence->base, error); 95 95 96 96 if (nouveau_fence_signal(fence))
+3
drivers/gpu/drm/tests/drm_gem_shmem_test.c
··· 216 216 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); 217 217 KUNIT_EXPECT_NULL(test, shmem->sgt); 218 218 219 + ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt); 220 + KUNIT_ASSERT_EQ(test, ret, 0); 221 + 219 222 ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); 220 223 KUNIT_ASSERT_EQ(test, ret, 0); 221 224
+2 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 1093 1093 struct ttm_lru_walk walk; 1094 1094 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */ 1095 1095 gfp_t gfp_flags; 1096 - 1096 + /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */ 1097 + /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */ 1097 1098 bool hit_low, evict_low; 1098 1099 }; 1099 1100
+12 -2
drivers/gpu/drm/xe/xe_eu_stall.c
··· 52 52 53 53 struct xe_gt *gt; 54 54 struct xe_bo *bo; 55 + /* Lock to protect data buffer pointers */ 56 + struct mutex xecore_buf_lock; 55 57 struct per_xecore_buf *xecore_buf; 56 58 struct { 57 59 bool reported_to_user; ··· 209 207 { 210 208 struct xe_device *xe = gt_to_xe(gt); 211 209 int ret; 210 + 211 + if (!xe_eu_stall_supported_on_platform(xe)) 212 + return 0; 212 213 213 214 gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL); 214 215 if (!gt->eu_stall) { ··· 383 378 u16 group, instance; 384 379 unsigned int xecore; 385 380 386 - mutex_lock(&gt->eu_stall->stream_lock); 381 + mutex_lock(&stream->xecore_buf_lock); 387 382 for_each_dss_steering(xecore, gt, group, instance) { 388 383 xecore_buf = &stream->xecore_buf[xecore]; 389 384 read_ptr = xecore_buf->read; ··· 401 396 set_bit(xecore, stream->data_drop.mask); 402 397 xecore_buf->write = write_ptr; 403 398 } 404 - mutex_unlock(&gt->eu_stall->stream_lock); 399 + mutex_unlock(&stream->xecore_buf_lock); 405 400 406 401 return min_data_present; 407 402 } ··· 516 511 unsigned int xecore; 517 512 int ret = 0; 518 513 514 + mutex_lock(&stream->xecore_buf_lock); 519 515 if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) { 520 516 if (!stream->data_drop.reported_to_user) { 521 517 stream->data_drop.reported_to_user = true; 522 518 xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n", 523 519 XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask); 520 + mutex_unlock(&stream->xecore_buf_lock); 524 521 return -EIO; 525 522 } 526 523 stream->data_drop.reported_to_user = false; ··· 534 527 if (ret || count == total_size) 535 528 break; 536 529 } 530 + mutex_unlock(&stream->xecore_buf_lock); 537 531 return total_size ?: (ret ?: -EAGAIN); 538 532 } 539 533 ··· 591 583 { 592 584 struct xe_gt *gt = stream->gt; 593 585 586 + mutex_destroy(&stream->xecore_buf_lock); 594 587 gt->eu_stall->stream = NULL; 595 588 kfree(stream); 596 589 } ··· 727 718 } 728 719 729 720 init_waitqueue_head(&stream->poll_wq); 721 + mutex_init(&stream->xecore_buf_lock); 730 722 INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn); 731 723 stream->per_xecore_buf_size = per_xecore_buf_size; 732 724 stream->sampling_rate_mult = props->sampling_rate_mult;
+2 -1
drivers/gpu/drm/xe/xe_eu_stall.h
··· 7 7 #define __XE_EU_STALL_H__ 8 8 9 9 #include "xe_gt_types.h" 10 + #include "xe_sriov.h" 10 11 11 12 size_t xe_eu_stall_get_per_xecore_buf_size(void); 12 13 size_t xe_eu_stall_data_record_size(struct xe_device *xe); ··· 20 19 21 20 static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe) 22 21 { 23 - return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20; 22 + return !IS_SRIOV_VF(xe) && (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20); 24 23 } 25 24 #endif
+1 -1
drivers/gpu/drm/xe/xe_guc_capture.c
··· 359 359 360 360 ext->reg = XE_REG(extlist->reg.__reg.addr); 361 361 ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1); 362 - ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); 362 + ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); 363 363 ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); 364 364 ext->regname = extlist->name; 365 365 }
+1 -1
drivers/gpu/drm/xe/xe_svm.c
··· 79 79 80 80 range = kzalloc(sizeof(*range), GFP_KERNEL); 81 81 if (!range) 82 - return ERR_PTR(-ENOMEM); 82 + return NULL; 83 83 84 84 INIT_LIST_HEAD(&range->garbage_collector_link); 85 85 xe_vm_get(gpusvm_to_vm(gpusvm));
+6 -4
drivers/hv/hv_common.c
··· 307 307 308 308 local_irq_save(flags); 309 309 output = *this_cpu_ptr(hyperv_pcpu_input_arg); 310 - status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output); 310 + status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output); 311 311 pt_id = output->partition_id; 312 312 local_irq_restore(flags); 313 313 ··· 566 566 * originally allocated memory is reused in hv_common_cpu_init(). 567 567 */ 568 568 569 - synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); 570 - kfree(*synic_eventring_tail); 571 - *synic_eventring_tail = NULL; 569 + if (hv_root_partition()) { 570 + synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); 571 + kfree(*synic_eventring_tail); 572 + *synic_eventring_tail = NULL; 573 + } 572 574 573 575 return 0; 574 576 }
+2 -2
drivers/i2c/busses/i2c-imx-lpi2c.c
··· 1380 1380 return 0; 1381 1381 1382 1382 rpm_disable: 1383 - pm_runtime_put(&pdev->dev); 1384 - pm_runtime_disable(&pdev->dev); 1385 1383 pm_runtime_dont_use_autosuspend(&pdev->dev); 1384 + pm_runtime_put_sync(&pdev->dev); 1385 + pm_runtime_disable(&pdev->dev); 1386 1386 1387 1387 return ret; 1388 1388 }
+8
drivers/iommu/amd/init.c
··· 3664 3664 while (*uid == '0' && *(uid + 1)) 3665 3665 uid++; 3666 3666 3667 + if (strlen(hid) >= ACPIHID_HID_LEN) { 3668 + pr_err("Invalid command line: hid is too long\n"); 3669 + return 1; 3670 + } else if (strlen(uid) >= ACPIHID_UID_LEN) { 3671 + pr_err("Invalid command line: uid is too long\n"); 3672 + return 1; 3673 + } 3674 + 3667 3675 i = early_acpihid_map_size++; 3668 3676 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3669 3677 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+6
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
··· 411 411 return ERR_CAST(smmu_domain); 412 412 smmu_domain->domain.type = IOMMU_DOMAIN_SVA; 413 413 smmu_domain->domain.ops = &arm_smmu_sva_domain_ops; 414 + 415 + /* 416 + * Choose page_size as the leaf page size for invalidation when 417 + * ARM_SMMU_FEAT_RANGE_INV is present 418 + */ 419 + smmu_domain->domain.pgsize_bitmap = PAGE_SIZE; 414 420 smmu_domain->smmu = smmu; 415 421 416 422 ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
+18 -5
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
··· 3388 3388 mutex_lock(&smmu->streams_mutex); 3389 3389 for (i = 0; i < fwspec->num_ids; i++) { 3390 3390 struct arm_smmu_stream *new_stream = &master->streams[i]; 3391 + struct rb_node *existing; 3391 3392 u32 sid = fwspec->ids[i]; 3392 3393 3393 3394 new_stream->id = sid; ··· 3399 3398 break; 3400 3399 3401 3400 /* Insert into SID tree */ 3402 - if (rb_find_add(&new_stream->node, &smmu->streams, 3403 - arm_smmu_streams_cmp_node)) { 3404 - dev_warn(master->dev, "stream %u already in tree\n", 3405 - sid); 3406 - ret = -EINVAL; 3401 + existing = rb_find_add(&new_stream->node, &smmu->streams, 3402 + arm_smmu_streams_cmp_node); 3403 + if (existing) { 3404 + struct arm_smmu_master *existing_master = 3405 + rb_entry(existing, struct arm_smmu_stream, node) 3406 + ->master; 3407 + 3408 + /* Bridged PCI devices may end up with duplicated IDs */ 3409 + if (existing_master == master) 3410 + continue; 3411 + 3412 + dev_warn(master->dev, 3413 + "Aliasing StreamID 0x%x (from %s) unsupported, expect DMA to be broken\n", 3414 + sid, dev_name(existing_master->dev)); 3415 + ret = -ENODEV; 3407 3416 break; 3408 3417 } 3409 3418 } ··· 4440 4429 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); 4441 4430 if (FIELD_GET(IDR3_RIL, reg)) 4442 4431 smmu->features |= ARM_SMMU_FEAT_RANGE_INV; 4432 + if (FIELD_GET(IDR3_FWB, reg)) 4433 + smmu->features |= ARM_SMMU_FEAT_S2FWB; 4443 4434 4444 4435 /* IDR5 */ 4445 4436 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
+24 -15
drivers/iommu/intel/iommu.c
··· 3785 3785 3786 3786 intel_iommu_debugfs_create_dev(info); 3787 3787 3788 - /* 3789 - * The PCIe spec, in its wisdom, declares that the behaviour of the 3790 - * device is undefined if you enable PASID support after ATS support. 3791 - * So always enable PASID support on devices which have it, even if 3792 - * we can't yet know if we're ever going to use it. 3793 - */ 3794 - if (info->pasid_supported && 3795 - !pci_enable_pasid(pdev, info->pasid_supported & ~1)) 3796 - info->pasid_enabled = 1; 3797 - 3798 - if (sm_supported(iommu)) 3799 - iommu_enable_pci_ats(info); 3800 - iommu_enable_pci_pri(info); 3801 - 3802 3788 return &iommu->iommu; 3803 3789 free_table: 3804 3790 intel_pasid_free_table(dev); ··· 3794 3808 kfree(info); 3795 3809 3796 3810 return ERR_PTR(ret); 3811 + } 3812 + 3813 + static void intel_iommu_probe_finalize(struct device *dev) 3814 + { 3815 + struct device_domain_info *info = dev_iommu_priv_get(dev); 3816 + struct intel_iommu *iommu = info->iommu; 3817 + 3818 + /* 3819 + * The PCIe spec, in its wisdom, declares that the behaviour of the 3820 + * device is undefined if you enable PASID support after ATS support. 3821 + * So always enable PASID support on devices which have it, even if 3822 + * we can't yet know if we're ever going to use it. 3823 + */ 3824 + if (info->pasid_supported && 3825 + !pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1)) 3826 + info->pasid_enabled = 1; 3827 + 3828 + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) 3829 + iommu_enable_pci_ats(info); 3830 + iommu_enable_pci_pri(info); 3797 3831 } 3798 3832 3799 3833 static void intel_iommu_release_device(struct device *dev) ··· 4397 4391 .domain_alloc_sva = intel_svm_domain_alloc, 4398 4392 .domain_alloc_nested = intel_iommu_domain_alloc_nested, 4399 4393 .probe_device = intel_iommu_probe_device, 4394 + .probe_finalize = intel_iommu_probe_finalize, 4400 4395 .release_device = intel_iommu_release_device, 4401 4396 .get_resv_regions = intel_iommu_get_resv_regions, 4402 4397 .device_group = intel_iommu_device_group, ··· 4438 4431 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); 4439 4432 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); 4440 4433 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); 4434 + 4435 + /* QM57/QS57 integrated gfx malfunctions with dmar */ 4436 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx); 4441 4437 4442 4438 /* Broadwell igfx malfunctions with dmar */ 4443 4439 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); ··· 4519 4509 } 4520 4510 } 4521 4511 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); 4522 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); 4523 4512 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); 4524 4513 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); 4525 4514
+3
drivers/irqchip/irq-qcom-mpm.c
··· 227 227 if (ret) 228 228 return ret; 229 229 230 + if (pin == GPIO_NO_WAKE_IRQ) 231 + return irq_domain_disconnect_hierarchy(domain, virq); 232 + 230 233 ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, 231 234 &qcom_mpm_chip, priv); 232 235 if (ret)
+8 -1
drivers/md/dm-bufio.c
··· 68 68 #define LIST_DIRTY 1 69 69 #define LIST_SIZE 2 70 70 71 + #define SCAN_RESCHED_CYCLE 16 72 + 71 73 /*--------------------------------------------------------------*/ 72 74 73 75 /* ··· 2426 2424 2427 2425 atomic_long_dec(&c->need_shrink); 2428 2426 freed++; 2429 - cond_resched(); 2427 + 2428 + if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) { 2429 + dm_bufio_unlock(c); 2430 + cond_resched(); 2431 + dm_bufio_lock(c); 2432 + } 2430 2433 } 2431 2434 } 2432 2435 }
+1 -1
drivers/md/dm-integrity.c
··· 5164 5164 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 5165 5165 BUG_ON(!list_empty(&ic->wait_list)); 5166 5166 5167 - if (ic->mode == 'B') 5167 + if (ic->mode == 'B' && ic->bitmap_flush_work.work.func) 5168 5168 cancel_delayed_work_sync(&ic->bitmap_flush_work); 5169 5169 if (ic->metadata_wq) 5170 5170 destroy_workqueue(ic->metadata_wq);
+3 -5
drivers/md/dm-table.c
··· 523 523 gfp = GFP_NOIO; 524 524 } 525 525 argv = kmalloc_array(new_size, sizeof(*argv), gfp); 526 - if (argv && old_argv) { 527 - memcpy(argv, old_argv, *size * sizeof(*argv)); 526 + if (argv) { 528 527 *size = new_size; 528 + if (old_argv) 529 + memcpy(argv, old_argv, *size * sizeof(*argv)); 529 530 } 530 531 531 532 kfree(old_argv); ··· 1050 1049 unsigned int min_pool_size = 0, pool_size; 1051 1050 struct dm_md_mempools *pools; 1052 1051 unsigned int bioset_flags = 0; 1053 - bool mempool_needs_integrity = t->integrity_supported; 1054 1052 1055 1053 if (unlikely(type == DM_TYPE_NONE)) { 1056 1054 DMERR("no table type is set, can't allocate mempools"); ··· 1074 1074 1075 1075 per_io_data_size = max(per_io_data_size, ti->per_io_data_size); 1076 1076 min_pool_size = max(min_pool_size, ti->num_flush_bios); 1077 - 1078 - mempool_needs_integrity |= ti->mempool_needs_integrity; 1079 1077 } 1080 1078 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 1081 1079 front_pad = roundup(per_io_data_size,
+1 -1
drivers/mmc/host/Kconfig
··· 691 691 config MMC_SDHI 692 692 tristate "Renesas SDHI SD/SDIO controller support" 693 693 depends on SUPERH || ARCH_RENESAS || COMPILE_TEST 694 + depends on (RESET_CONTROLLER && REGULATOR) || !OF 694 695 select MMC_TMIO_CORE 695 - select RESET_CONTROLLER if ARCH_RENESAS 696 696 help 697 697 This provides support for the SDHI SD/SDIO controller found in 698 698 Renesas SuperH, ARM and ARM64 based SoCs
+5 -7
drivers/mmc/host/renesas_sdhi_core.c
··· 1179 1179 if (IS_ERR(rdev)) { 1180 1180 dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev)); 1181 1181 ret = PTR_ERR(rdev); 1182 - goto efree; 1182 + goto edisclk; 1183 1183 } 1184 1184 priv->rdev = rdev; 1185 1185 } ··· 1243 1243 num_irqs = platform_irq_count(pdev); 1244 1244 if (num_irqs < 0) { 1245 1245 ret = num_irqs; 1246 - goto eirq; 1246 + goto edisclk; 1247 1247 } 1248 1248 1249 1249 /* There must be at least one IRQ source */ 1250 1250 if (!num_irqs) { 1251 1251 ret = -ENXIO; 1252 - goto eirq; 1252 + goto edisclk; 1253 1253 } 1254 1254 1255 1255 for (i = 0; i < num_irqs; i++) { 1256 1256 irq = platform_get_irq(pdev, i); 1257 1257 if (irq < 0) { 1258 1258 ret = irq; 1259 - goto eirq; 1259 + goto edisclk; 1260 1260 } 1261 1261 1262 1262 ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, 1263 1263 dev_name(&pdev->dev), host); 1264 1264 if (ret) 1265 - goto eirq; 1265 + goto edisclk; 1266 1266 } 1267 1267 1268 1268 ret = tmio_mmc_host_probe(host); ··· 1274 1274 1275 1275 return ret; 1276 1276 1277 - eirq: 1278 - tmio_mmc_host_remove(host); 1279 1277 edisclk: 1280 1278 renesas_sdhi_clk_disable(host); 1281 1279 efree:
+4 -1
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 1543 1543 struct tc_taprio_qopt_offload *taprio; 1544 1544 struct ocelot_port *ocelot_port; 1545 1545 struct timespec64 base_ts; 1546 - int port; 1546 + int i, port; 1547 1547 u32 val; 1548 1548 1549 1549 mutex_lock(&ocelot->fwd_domain_lock); ··· 1574 1574 QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val), 1575 1575 QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M, 1576 1576 QSYS_PARAM_CFG_REG_3); 1577 + 1578 + for (i = 0; i < taprio->num_entries; i++) 1579 + vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]); 1577 1580 1578 1581 ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, 1579 1582 QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
-1
drivers/net/ethernet/amd/pds_core/auxbus.c
··· 186 186 pds_client_unregister(pf, padev->client_id); 187 187 auxiliary_device_delete(&padev->aux_dev); 188 188 auxiliary_device_uninit(&padev->aux_dev); 189 - padev->client_id = 0; 190 189 *pd_ptr = NULL; 191 190 192 191 mutex_unlock(&pf->config_lock);
+7 -2
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
··· 373 373 } 374 374 375 375 /* Set up the header page info */ 376 - xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, 377 - XGBE_SKB_ALLOC_SIZE); 376 + if (pdata->netdev->features & NETIF_F_RXCSUM) { 377 + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, 378 + XGBE_SKB_ALLOC_SIZE); 379 + } else { 380 + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, 381 + pdata->rx_buf_size); 382 + } 378 383 379 384 /* Set up the buffer page info */ 380 385 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+22 -2
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 320 320 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 321 321 } 322 322 323 + static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) 324 + { 325 + unsigned int i; 326 + 327 + for (i = 0; i < pdata->channel_count; i++) { 328 + if (!pdata->channel[i]->rx_ring) 329 + break; 330 + 331 + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); 332 + } 333 + } 334 + 323 335 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 324 336 unsigned int index, unsigned int val) 325 337 { ··· 3557 3545 xgbe_config_tx_coalesce(pdata); 3558 3546 xgbe_config_rx_buffer_size(pdata); 3559 3547 xgbe_config_tso_mode(pdata); 3560 - xgbe_config_sph_mode(pdata); 3561 - xgbe_config_rss(pdata); 3548 + 3549 + if (pdata->netdev->features & NETIF_F_RXCSUM) { 3550 + xgbe_config_sph_mode(pdata); 3551 + xgbe_config_rss(pdata); 3552 + } 3553 + 3562 3554 desc_if->wrapper_tx_desc_init(pdata); 3563 3555 desc_if->wrapper_rx_desc_init(pdata); 3564 3556 xgbe_enable_dma_interrupts(pdata); ··· 3717 3701 hw_if->enable_vxlan = xgbe_enable_vxlan; 3718 3702 hw_if->disable_vxlan = xgbe_disable_vxlan; 3719 3703 hw_if->set_vxlan_id = xgbe_set_vxlan_id; 3704 + 3705 + /* For Split Header*/ 3706 + hw_if->enable_sph = xgbe_config_sph_mode; 3707 + hw_if->disable_sph = xgbe_disable_sph_mode; 3720 3708 3721 3709 DBGPR("<--xgbe_init_function_ptrs\n"); 3722 3710 }
+9 -2
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 2257 2257 if (ret) 2258 2258 return ret; 2259 2259 2260 - if ((features & NETIF_F_RXCSUM) && !rxcsum) 2260 + if ((features & NETIF_F_RXCSUM) && !rxcsum) { 2261 + hw_if->enable_sph(pdata); 2262 + hw_if->enable_vxlan(pdata); 2261 2263 hw_if->enable_rx_csum(pdata); 2262 - else if (!(features & NETIF_F_RXCSUM) && rxcsum) 2264 + schedule_work(&pdata->restart_work); 2265 + } else if (!(features & NETIF_F_RXCSUM) && rxcsum) { 2266 + hw_if->disable_sph(pdata); 2267 + hw_if->disable_vxlan(pdata); 2263 2268 hw_if->disable_rx_csum(pdata); 2269 + schedule_work(&pdata->restart_work); 2270 + } 2264 2271 2265 2272 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) 2266 2273 hw_if->enable_rx_vlan_stripping(pdata);
+4
drivers/net/ethernet/amd/xgbe/xgbe.h
··· 865 865 void (*enable_vxlan)(struct xgbe_prv_data *); 866 866 void (*disable_vxlan)(struct xgbe_prv_data *); 867 867 void (*set_vxlan_id)(struct xgbe_prv_data *); 868 + 869 + /* For Split Header */ 870 + void (*enable_sph)(struct xgbe_prv_data *pdata); 871 + void (*disable_sph)(struct xgbe_prv_data *pdata); 868 872 }; 869 873 870 874 /* This structure represents implementation specific routines for an
+20 -15
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 2015 2015 } 2016 2016 return skb; 2017 2017 vlan_err: 2018 + skb_mark_for_recycle(skb); 2018 2019 dev_kfree_skb(skb); 2019 2020 return NULL; 2020 2021 } ··· 3415 3414 3416 3415 bnxt_free_one_tx_ring_skbs(bp, txr, i); 3417 3416 } 3417 + 3418 + if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 3419 + bnxt_ptp_free_txts_skbs(bp->ptp_cfg); 3418 3420 } 3419 3421 3420 3422 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) ··· 11603 11599 poll_fn = bnxt_poll_p5; 11604 11600 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 11605 11601 cp_nr_rings--; 11602 + 11603 + set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 11604 + 11606 11605 for (i = 0; i < cp_nr_rings; i++) { 11607 11606 bnapi = bp->bnapi[i]; 11608 11607 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, ··· 12325 12318 { 12326 12319 struct hwrm_func_drv_if_change_output *resp; 12327 12320 struct hwrm_func_drv_if_change_input *req; 12328 - bool fw_reset = !bp->irq_tbl; 12329 12321 bool resc_reinit = false; 12330 12322 bool caps_change = false; 12331 12323 int rc, retry = 0; 12324 + bool fw_reset; 12332 12325 u32 flags = 0; 12326 + 12327 + fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT); 12328 + bp->fw_reset_state = 0; 12333 12329 12334 12330 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 12335 12331 return 0; ··· 12402 12392 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12403 12393 return rc; 12404 12394 } 12395 + /* IRQ will be initialized later in bnxt_request_irq()*/ 12405 12396 bnxt_clear_int_mode(bp); 12406 - rc = bnxt_init_int_mode(bp); 12407 - if (rc) { 12408 - clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12409 - netdev_err(bp->dev, "init int mode failed\n"); 12410 - return rc; 12411 - } 12412 12397 } 12413 12398 rc = bnxt_cancel_reservations(bp, fw_reset); 12414 12399 } ··· 12802 12797 /* VF-reps may need to be re-opened after the PF is re-opened */ 12803 12798 if (BNXT_PF(bp)) 12804 12799 bnxt_vf_reps_open(bp); 12805 - if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 12806 - WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); 12807 12800 bnxt_ptp_init_rtc(bp, true); 12808 12801 bnxt_ptp_cfg_tstamp_filters(bp); 12809 12802 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) ··· 14836 14833 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14837 14834 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 14838 14835 bnxt_dl_health_fw_status_update(bp, false); 14839 - bp->fw_reset_state = 0; 14836 + bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT; 14840 14837 netif_close(bp->dev); 14841 14838 } 14842 14839 ··· 16006 16003 16007 16004 bnxt_rdma_aux_device_del(bp); 16008 16005 16009 - bnxt_ptp_clear(bp); 16010 16006 unregister_netdev(dev); 16007 + bnxt_ptp_clear(bp); 16011 16008 16012 16009 bnxt_rdma_aux_device_uninit(bp); 16013 16010 ··· 16934 16931 if (!err) 16935 16932 result = PCI_ERS_RESULT_RECOVERED; 16936 16933 16934 + /* IRQ will be initialized later in bnxt_io_resume */ 16937 16935 bnxt_ulp_irq_stop(bp); 16938 16936 bnxt_clear_int_mode(bp); 16939 - err = bnxt_init_int_mode(bp); 16940 - bnxt_ulp_irq_restart(bp, err); 16941 16937 } 16942 16938 16943 16939 reset_exit: ··· 16965 16963 16966 16964 err = bnxt_hwrm_func_qcaps(bp); 16967 16965 if (!err) { 16968 - if (netif_running(netdev)) 16966 + if (netif_running(netdev)) { 16969 16967 err = bnxt_open(netdev); 16970 - else 16968 + } else { 16971 16969 err = bnxt_reserve_rings(bp, true); 16970 + if (!err) 16971 + err = bnxt_init_int_mode(bp); 16972 + } 16972 16973 } 16973 16974 16974 16975 if (!err)
+1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 2614 2614 #define BNXT_FW_RESET_STATE_POLL_FW 4 2615 2615 #define BNXT_FW_RESET_STATE_OPENING 5 2616 2616 #define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6 2617 + #define BNXT_FW_RESET_STATE_ABORT 7 2617 2618 2618 2619 u16 fw_reset_min_dsecs; 2619 2620 #define BNXT_DFLT_FW_RST_MIN_DSECS 20
+20 -10
drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
··· 110 110 } 111 111 } 112 112 113 - if (info->dest_buf) { 114 - if ((info->seg_start + off + len) <= 115 - BNXT_COREDUMP_BUF_LEN(info->buf_len)) { 116 - memcpy(info->dest_buf + off, dma_buf, len); 117 - } else { 118 - rc = -ENOBUFS; 119 - break; 120 - } 121 - } 122 - 123 113 if (cmn_req->req_type == 124 114 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) 125 115 info->dest_buf_size += len; 116 + 117 + if (info->dest_buf) { 118 + if ((info->seg_start + off + len) <= 119 + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { 120 + u16 copylen = min_t(u16, len, 121 + info->dest_buf_size - off); 122 + 123 + memcpy(info->dest_buf + off, dma_buf, copylen); 124 + if (copylen < len) 125 + break; 126 + } else { 127 + rc = -ENOBUFS; 128 + if (cmn_req->req_type == 129 + cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { 130 + kfree(info->dest_buf); 131 + info->dest_buf = NULL; 132 + } 133 + break; 134 + } 135 + } 126 136 127 137 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) 128 138 break;
+32 -6
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 2062 2062 return reg_len; 2063 2063 } 2064 2064 2065 + #define BNXT_PCIE_32B_ENTRY(start, end) \ 2066 + { offsetof(struct pcie_ctx_hw_stats, start), \ 2067 + offsetof(struct pcie_ctx_hw_stats, end) } 2068 + 2069 + static const struct { 2070 + u16 start; 2071 + u16 end; 2072 + } bnxt_pcie_32b_entries[] = { 2073 + BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), 2074 + }; 2075 + 2065 2076 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2066 2077 void *_p) 2067 2078 { ··· 2105 2094 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 2106 2095 rc = hwrm_req_send(bp, req); 2107 2096 if (!rc) { 2108 - __le64 *src = (__le64 *)hw_pcie_stats; 2109 - u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); 2110 - int i; 2097 + u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); 2098 + u8 *src = (u8 *)hw_pcie_stats; 2099 + int i, j; 2111 2100 2112 - for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) 2113 - dst[i] = le64_to_cpu(src[i]); 2101 + for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) { 2102 + if (i >= bnxt_pcie_32b_entries[j].start && 2103 + i <= bnxt_pcie_32b_entries[j].end) { 2104 + u32 *dst32 = (u32 *)(dst + i); 2105 + 2106 + *dst32 = le32_to_cpu(*(__le32 *)(src + i)); 2107 + i += 4; 2108 + if (i > bnxt_pcie_32b_entries[j].end && 2109 + j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) 2110 + j++; 2111 + } else { 2112 + u64 *dst64 = (u64 *)(dst + i); 2113 + 2114 + *dst64 = le64_to_cpu(*(__le64 *)(src + i)); 2115 + i += 8; 2116 + } 2117 + } 2114 2118 } 2115 2119 hwrm_req_drop(bp, req); 2116 2120 } ··· 5017 4991 if (!bp->num_tests || !BNXT_PF(bp)) 5018 4992 return; 5019 4993 4994 + memset(buf, 0, sizeof(u64) * bp->num_tests); 5020 4995 if (etest->flags & ETH_TEST_FL_OFFLINE && 5021 4996 bnxt_ulp_registered(bp->edev)) { 5022 4997 etest->flags |= ETH_TEST_FL_FAILED; ··· 5025 4998 return; 5026 4999 } 5027 5000 5028 - memset(buf, 0, sizeof(u64) * bp->num_tests); 5029 5001 if (!netif_running(dev)) { 5030 5002 etest->flags |= ETH_TEST_FL_FAILED; 5031 5003 return;
+21 -8
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
··· 794 794 return HZ; 795 795 } 796 796 797 + void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp) 798 + { 799 + struct bnxt_ptp_tx_req *txts_req; 800 + u16 cons = ptp->txts_cons; 801 + 802 + /* make sure ptp aux worker finished with 803 + * possible BNXT_STATE_OPEN set 804 + */ 805 + ptp_cancel_worker_sync(ptp->ptp_clock); 806 + 807 + ptp->tx_avail = BNXT_MAX_TX_TS; 808 + while (cons != ptp->txts_prod) { 809 + txts_req = &ptp->txts_req[cons]; 810 + if (!IS_ERR_OR_NULL(txts_req->tx_skb)) 811 + dev_kfree_skb_any(txts_req->tx_skb); 812 + cons = NEXT_TXTS(cons); 813 + } 814 + ptp->txts_cons = cons; 815 + ptp_schedule_worker(ptp->ptp_clock, 0); 816 + } 817 + 797 818 int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod) 798 819 { 799 820 spin_lock_bh(&ptp->ptp_tx_lock); ··· 1126 1105 void bnxt_ptp_clear(struct bnxt *bp) 1127 1106 { 1128 1107 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1129 - int i; 1130 1108 1131 1109 if (!ptp) 1132 1110 return; ··· 1136 1116 ptp->ptp_clock = NULL; 1137 1117 kfree(ptp->ptp_info.pin_config); 1138 1118 ptp->ptp_info.pin_config = NULL; 1139 - 1140 - for (i = 0; i < BNXT_MAX_TX_TS; i++) { 1141 - if (ptp->txts_req[i].tx_skb) { 1142 - dev_kfree_skb_any(ptp->txts_req[i].tx_skb); 1143 - ptp->txts_req[i].tx_skb = NULL; 1144 - } 1145 - } 1146 1119 1147 1120 bnxt_unmap_ptp_regs(bp); 1148 1121 }
+1
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
··· 162 162 void bnxt_ptp_reapply_pps(struct bnxt *bp); 163 163 int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); 164 164 int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); 165 + void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp); 165 166 int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); 166 167 void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod); 167 168 int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+1 -1
drivers/net/ethernet/dlink/dl2k.c
··· 352 352 eth_hw_addr_set(dev, psrom->mac_addr); 353 353 354 354 if (np->chip_id == CHIP_IP1000A) { 355 - np->led_mode = psrom->led_mode; 355 + np->led_mode = le16_to_cpu(psrom->led_mode); 356 356 return 0; 357 357 } 358 358
+1 -1
drivers/net/ethernet/dlink/dl2k.h
··· 335 335 u16 sub_system_id; /* 0x06 */ 336 336 u16 pci_base_1; /* 0x08 (IP1000A only) */ 337 337 u16 pci_base_2; /* 0x0a (IP1000A only) */ 338 - u16 led_mode; /* 0x0c (IP1000A only) */ 338 + __le16 led_mode; /* 0x0c (IP1000A only) */ 339 339 u16 reserved1[9]; /* 0x0e-0x1f */ 340 340 u8 mac_addr[6]; /* 0x20-0x25 */ 341 341 u8 reserved2[10]; /* 0x26-0x2f */
+6 -1
drivers/net/ethernet/freescale/fec_main.c
··· 714 714 txq->bd.cur = bdp; 715 715 716 716 /* Trigger transmission start */ 717 - writel(0, txq->bd.reg_desc_active); 717 + if (!(fep->quirks & FEC_QUIRK_ERR007885) || 718 + !readl(txq->bd.reg_desc_active) || 719 + !readl(txq->bd.reg_desc_active) || 720 + !readl(txq->bd.reg_desc_active) || 721 + !readl(txq->bd.reg_desc_active)) 722 + writel(0, txq->bd.reg_desc_active); 718 723 719 724 return 0; 720 725 }
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
··· 61 61 .name = "tm_qset", 62 62 .cmd = HNAE3_DBG_CMD_TM_QSET, 63 63 .dentry = HNS3_DBG_DENTRY_TM, 64 - .buf_len = HNS3_DBG_READ_LEN, 64 + .buf_len = HNS3_DBG_READ_LEN_1MB, 65 65 .init = hns3_dbg_common_file_init, 66 66 }, 67 67 {
+39 -43
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 473 473 writel(mask_en, tqp_vector->mask_addr); 474 474 } 475 475 476 - static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 476 + static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector) 477 477 { 478 478 napi_enable(&tqp_vector->napi); 479 479 enable_irq(tqp_vector->vector_irq); 480 - 481 - /* enable vector */ 482 - hns3_mask_vector_irq(tqp_vector, 1); 483 480 } 484 481 485 - static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 482 + static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector) 486 483 { 487 - /* disable vector */ 488 - hns3_mask_vector_irq(tqp_vector, 0); 489 - 490 484 disable_irq(tqp_vector->vector_irq); 491 485 napi_disable(&tqp_vector->napi); 492 486 cancel_work_sync(&tqp_vector->rx_group.dim.work); ··· 701 707 return 0; 702 708 } 703 709 710 + static void hns3_enable_irqs_and_tqps(struct net_device *netdev) 711 + { 712 + struct hns3_nic_priv *priv = netdev_priv(netdev); 713 + struct hnae3_handle *h = priv->ae_handle; 714 + u16 i; 715 + 716 + for (i = 0; i < priv->vector_num; i++) 717 + hns3_irq_enable(&priv->tqp_vector[i]); 718 + 719 + for (i = 0; i < priv->vector_num; i++) 720 + hns3_mask_vector_irq(&priv->tqp_vector[i], 1); 721 + 722 + for (i = 0; i < h->kinfo.num_tqps; i++) 723 + hns3_tqp_enable(h->kinfo.tqp[i]); 724 + } 725 + 726 + static void hns3_disable_irqs_and_tqps(struct net_device *netdev) 727 + { 728 + struct hns3_nic_priv *priv = netdev_priv(netdev); 729 + struct hnae3_handle *h = priv->ae_handle; 730 + u16 i; 731 + 732 + for (i = 0; i < h->kinfo.num_tqps; i++) 733 + hns3_tqp_disable(h->kinfo.tqp[i]); 734 + 735 + for (i = 0; i < priv->vector_num; i++) 736 + hns3_mask_vector_irq(&priv->tqp_vector[i], 0); 737 + 738 + for (i = 0; i < priv->vector_num; i++) 739 + hns3_irq_disable(&priv->tqp_vector[i]); 740 + } 741 + 704 742 static int hns3_nic_net_up(struct net_device *netdev) 705 743 { 706 744 struct hns3_nic_priv *priv = netdev_priv(netdev); 707 745 struct hnae3_handle *h = priv->ae_handle; 708 - int i, j; 709 746 int ret; 710 747 711 748 ret = hns3_nic_reset_all_ring(h); ··· 745 720 746 721 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 747 722 748 - /* enable the vectors */ 749 - for (i = 0; i < priv->vector_num; i++) 750 - hns3_vector_enable(&priv->tqp_vector[i]); 751 - 752 - /* enable rcb */ 753 - for (j = 0; j < h->kinfo.num_tqps; j++) 754 - hns3_tqp_enable(h->kinfo.tqp[j]); 723 + hns3_enable_irqs_and_tqps(netdev); 755 724 756 725 /* start the ae_dev */ 757 726 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 758 727 if (ret) { 759 728 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 760 - while (j--) 761 - hns3_tqp_disable(h->kinfo.tqp[j]); 762 - 763 - for (j = i - 1; j >= 0; j--) 764 - hns3_vector_disable(&priv->tqp_vector[j]); 729 + hns3_disable_irqs_and_tqps(netdev); 765 730 } 766 731 767 732 return ret; ··· 838 823 static void hns3_nic_net_down(struct net_device *netdev) 839 824 { 840 825 struct hns3_nic_priv *priv = netdev_priv(netdev); 841 - struct hnae3_handle *h = hns3_get_handle(netdev); 842 826 const struct hnae3_ae_ops *ops; 843 - int i; 844 827 845 - /* disable vectors */ 846 - for (i = 0; i < priv->vector_num; i++) 847 - hns3_vector_disable(&priv->tqp_vector[i]); 848 - 849 - /* disable rcb */ 850 - for (i = 0; i < h->kinfo.num_tqps; i++) 851 - hns3_tqp_disable(h->kinfo.tqp[i]); 828 + hns3_disable_irqs_and_tqps(netdev); 852 829 853 830 /* stop ae_dev */ 854 831 ops = priv->ae_handle->ae_algo->ops; ··· 5871 5864 void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) 5872 5865 { 5873 5866 struct hns3_nic_priv *priv = netdev_priv(ndev); 5874 - struct hnae3_handle *h = priv->ae_handle; 5875 - int i; 5876 5867 5877 5868 if (!if_running) 5878 5869 return; ··· 5881 5876 netif_carrier_off(ndev); 5882 5877 netif_tx_disable(ndev); 5883 5878 5884 - for (i = 0; i < priv->vector_num; i++) 5885 - hns3_vector_disable(&priv->tqp_vector[i]); 5886 - 5887 - for (i = 0; i < h->kinfo.num_tqps; i++) 5888 - hns3_tqp_disable(h->kinfo.tqp[i]); 5879 + hns3_disable_irqs_and_tqps(ndev); 5889 5880 5890 5881 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 5891 5882 * during reset process, because driver may not be able ··· 5897 5896 { 5898 5897 struct hns3_nic_priv *priv = netdev_priv(ndev); 5899 5898 struct hnae3_handle *h = priv->ae_handle; 5900 - int i; 5901 5899 5902 5900 if (!if_running) 5903 5901 return; ··· 5912 5912 5913 5913 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 5914 5914 5915 - for (i = 0; i < priv->vector_num; i++) 5916 - hns3_vector_enable(&priv->tqp_vector[i]); 5917 - 5918 - for (i = 0; i < h->kinfo.num_tqps; i++) 5919 - hns3_tqp_enable(h->kinfo.tqp[i]); 5915 + hns3_enable_irqs_and_tqps(ndev); 5920 5916 5921 5917 netif_tx_wake_all_queues(ndev); 5922 5918
+7 -6
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
··· 440 440 ptp->info.settime64 = hclge_ptp_settime; 441 441 442 442 ptp->info.n_alarm = 0; 443 + 444 + spin_lock_init(&ptp->lock); 445 + ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; 446 + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 447 + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; 448 + hdev->ptp = ptp; 449 + 443 450 ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); 444 451 if (IS_ERR(ptp->clock)) { 445 452 dev_err(&hdev->pdev->dev, ··· 457 450 dev_err(&hdev->pdev->dev, "failed to register ptp clock\n"); 458 451 return -ENODEV; 459 452 } 460 - 461 - spin_lock_init(&ptp->lock); 462 - ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; 463 - ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 464 - ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; 465 - hdev->ptp = ptp; 466 453 467 454 return 0; 468 455 }
+19 -6
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 1292 1292 rtnl_unlock(); 1293 1293 } 1294 1294 1295 - static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1295 + static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable) 1296 1296 { 1297 - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1298 1297 struct hclge_vf_to_pf_msg send_msg; 1299 1298 1300 1299 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1301 1300 HCLGE_MBX_VLAN_RX_OFF_CFG); 1302 1301 send_msg.data[0] = enable ? 1 : 0; 1303 1302 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1303 + } 1304 + 1305 + static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1306 + { 1307 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1308 + int ret; 1309 + 1310 + ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable); 1311 + if (ret) 1312 + return ret; 1313 + 1314 + hdev->rxvtag_strip_en = enable; 1315 + return 0; 1304 1316 } 1305 1317 1306 1318 static int hclgevf_reset_tqp(struct hnae3_handle *handle) ··· 2216 2204 tc_valid, tc_size); 2217 2205 } 2218 2206 2219 - static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2207 + static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev, 2208 + bool rxvtag_strip_en) 2220 2209 { 2221 2210 struct hnae3_handle *nic = &hdev->nic; 2222 2211 int ret; 2223 2212 2224 - ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2213 + ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en); 2225 2214 if (ret) { 2226 2215 dev_err(&hdev->pdev->dev, 2227 2216 "failed to enable rx vlan offload, ret = %d\n", ret); ··· 2892 2879 if (ret) 2893 2880 return ret; 2894 2881 2895 - ret = hclgevf_init_vlan_config(hdev); 2882 + ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en); 2896 2883 if (ret) { 2897 2884 dev_err(&hdev->pdev->dev, 2898 2885 "failed(%d) to initialize VLAN config\n", ret); ··· 3007 2994 goto err_config; 3008 2995 } 3009 2996 3010 - ret = hclgevf_init_vlan_config(hdev); 2997 + ret = hclgevf_init_vlan_config(hdev, true); 3011 2998 if (ret) { 3012 2999 dev_err(&hdev->pdev->dev, 3013 3000 "failed(%d) to initialize VLAN config\n", ret);
+1
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
··· 253 253 int *vector_irq; 254 254 255 255 bool gro_en; 256 + bool rxvtag_strip_en; 256 257 257 258 unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; 258 259
+5 -5
drivers/net/ethernet/intel/ice/ice_ddp.c
··· 2345 2345 cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | 2346 2346 ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; 2347 2347 2348 - if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) 2349 - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2348 + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2350 2349 } else { 2351 2350 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); 2352 2351 cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; 2353 - } 2354 2352 2355 - if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) 2356 - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2353 + if (hw->mac_type == ICE_MAC_E810 || 2354 + hw->mac_type == ICE_MAC_GENERIC) 2355 + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2356 + } 2357 2357 2358 2358 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2359 2359 if (status)
+5
drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
··· 2097 2097 pf = vf->pf; 2098 2098 dev = ice_pf_to_dev(pf); 2099 2099 vf_vsi = ice_get_vf_vsi(vf); 2100 + if (!vf_vsi) { 2101 + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); 2102 + v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2103 + goto err_exit; 2104 + } 2100 2105 2101 2106 #define ICE_VF_MAX_FDIR_FILTERS 128 2102 2107 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
+8 -10
drivers/net/ethernet/intel/idpf/idpf.h
··· 629 629 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ 630 630 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) 631 631 632 - #define IDPF_CAP_RX_CSUM_L4V4 (\ 633 - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ 634 - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP) 632 + #define IDPF_CAP_TX_CSUM_L4V4 (\ 633 + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\ 634 + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP) 635 635 636 - #define IDPF_CAP_RX_CSUM_L4V6 (\ 637 - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ 638 - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) 636 + #define IDPF_CAP_TX_CSUM_L4V6 (\ 637 + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\ 638 + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP) 639 639 640 640 #define IDPF_CAP_RX_CSUM (\ 641 641 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ ··· 644 644 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ 645 645 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) 646 646 647 - #define IDPF_CAP_SCTP_CSUM (\ 647 + #define IDPF_CAP_TX_SCTP_CSUM (\ 648 648 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ 649 - VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\ 650 - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\ 651 - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP) 649 + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP) 652 650 653 651 #define IDPF_CAP_TUNNEL_TX_CSUM (\ 654 652 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
+30 -46
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 703 703 { 704 704 struct idpf_adapter *adapter = vport->adapter; 705 705 struct idpf_vport_config *vport_config; 706 + netdev_features_t other_offloads = 0; 707 + netdev_features_t csum_offloads = 0; 708 + netdev_features_t tso_offloads = 0; 706 709 netdev_features_t dflt_features; 707 - netdev_features_t offloads = 0; 708 710 struct idpf_netdev_priv *np; 709 711 struct net_device *netdev; 710 712 u16 idx = vport->idx; ··· 768 766 769 767 if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 770 768 dflt_features |= NETIF_F_RXHASH; 771 - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4)) 772 - dflt_features |= NETIF_F_IP_CSUM; 773 - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6)) 774 - dflt_features |= NETIF_F_IPV6_CSUM; 769 + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) 770 + csum_offloads |= NETIF_F_IP_CSUM; 771 + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) 772 + csum_offloads |= NETIF_F_IPV6_CSUM; 775 773 if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) 776 - dflt_features |= NETIF_F_RXCSUM; 777 - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM)) 778 - dflt_features |= NETIF_F_SCTP_CRC; 774 + csum_offloads |= NETIF_F_RXCSUM; 775 + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM)) 776 + csum_offloads |= NETIF_F_SCTP_CRC; 779 777 780 778 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) 781 - dflt_features |= NETIF_F_TSO; 779 + tso_offloads |= NETIF_F_TSO; 782 780 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) 783 - dflt_features |= NETIF_F_TSO6; 781 + tso_offloads |= NETIF_F_TSO6; 784 782 if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, 785 783 VIRTCHNL2_CAP_SEG_IPV4_UDP | 786 784 VIRTCHNL2_CAP_SEG_IPV6_UDP)) 787 - dflt_features |= NETIF_F_GSO_UDP_L4; 785 + tso_offloads |= NETIF_F_GSO_UDP_L4; 788 786 if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) 789 - offloads |= NETIF_F_GRO_HW; 790 - /* advertise to stack only if offloads for encapsulated packets is 791 - * supported 792 - */ 793 - if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS, 794 - VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) { 795 - offloads |= NETIF_F_GSO_UDP_TUNNEL | 796 - NETIF_F_GSO_GRE | 797 - NETIF_F_GSO_GRE_CSUM | 798 - NETIF_F_GSO_PARTIAL | 799 - NETIF_F_GSO_UDP_TUNNEL_CSUM | 800 - NETIF_F_GSO_IPXIP4 | 801 - NETIF_F_GSO_IPXIP6 | 802 - 0; 803 - 804 - if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS, 805 - IDPF_CAP_TUNNEL_TX_CSUM)) 806 - netdev->gso_partial_features |= 807 - NETIF_F_GSO_UDP_TUNNEL_CSUM; 808 - 809 - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 810 - offloads |= NETIF_F_TSO_MANGLEID; 811 - } 787 + other_offloads |= NETIF_F_GRO_HW; 812 788 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) 813 - offloads |= NETIF_F_LOOPBACK; 789 + other_offloads |= NETIF_F_LOOPBACK; 814 790 815 - netdev->features |= dflt_features; 816 - netdev->hw_features |= dflt_features | offloads; 817 - netdev->hw_enc_features |= dflt_features | offloads; 791 + netdev->features |= dflt_features | csum_offloads | tso_offloads; 792 + netdev->hw_features |= netdev->features | other_offloads; 793 + netdev->vlan_features |= netdev->features | other_offloads; 794 + netdev->hw_enc_features |= dflt_features | other_offloads; 818 795 idpf_set_ethtool_ops(netdev); 819 796 netif_set_affinity_auto(netdev); 820 797 SET_NETDEV_DEV(netdev, &adapter->pdev->dev); ··· 1113 1132 1114 1133 num_max_q = max(max_q->max_txq, max_q->max_rxq); 1115 1134 vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); 1116 - if (!vport->q_vector_idxs) { 1117 - kfree(vport); 1135 + if (!vport->q_vector_idxs) 1136 + goto free_vport; 1118 1137 1119 - return NULL; 1120 - } 1121 1138 idpf_vport_init(vport, max_q); 1122 1139 1123 1140 /* This alloc is done separate from the LUT because it's not strictly ··· 1125 1146 */ 1126 1147 rss_data = &adapter->vport_config[idx]->user_config.rss_data; 1127 1148 rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); 1128 - if (!rss_data->rss_key) { 1129 - kfree(vport); 1149 + if (!rss_data->rss_key) 1150 + goto free_vector_idxs; 1130 1151 1131 - return NULL; 1132 - } 1133 1152 /* Initialize default rss key */ 1134 1153 netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); 1135 1154 ··· 1140 1163 adapter->next_vport = idpf_get_free_slot(adapter); 1141 1164 1142 1165 return vport; 1166 + 1167 + free_vector_idxs: 1168 + kfree(vport->q_vector_idxs); 1169 + free_vport: 1170 + kfree(vport); 1171 + 1172 + return NULL; 1143 1173 } 1144 1174 1145 1175 /**
+1
drivers/net/ethernet/intel/idpf/idpf_main.c
··· 89 89 { 90 90 struct idpf_adapter *adapter = pci_get_drvdata(pdev); 91 91 92 + cancel_delayed_work_sync(&adapter->serv_task); 92 93 cancel_delayed_work_sync(&adapter->vc_event_task); 93 94 idpf_vc_core_deinit(adapter); 94 95 idpf_deinit_dflt_mbx(adapter);
+4 -2
drivers/net/ethernet/intel/igc/igc_ptp.c
··· 1290 1290 /* reset the tstamp_config */ 1291 1291 igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); 1292 1292 1293 + mutex_lock(&adapter->ptm_lock); 1294 + 1293 1295 spin_lock_irqsave(&adapter->tmreg_lock, flags); 1294 1296 1295 1297 switch (adapter->hw.mac.type) { ··· 1310 1308 if (!igc_is_crosststamp_supported(adapter)) 1311 1309 break; 1312 1310 1313 - mutex_lock(&adapter->ptm_lock); 1314 1311 wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); 1315 1312 wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); 1316 1313 ··· 1333 1332 netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); 1334 1333 1335 1334 igc_ptm_reset(hw); 1336 - mutex_unlock(&adapter->ptm_lock); 1337 1335 break; 1338 1336 default: 1339 1337 /* No work to do. */ ··· 1348 1348 } 1349 1349 out: 1350 1350 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 1351 + 1352 + mutex_unlock(&adapter->ptm_lock); 1351 1353 1352 1354 wrfl(); 1353 1355 }
+1 -1
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
··· 1223 1223 miss_cnt); 1224 1224 rtnl_lock(); 1225 1225 if (netif_running(oct->netdev)) 1226 - octep_stop(oct->netdev); 1226 + dev_close(oct->netdev); 1227 1227 rtnl_unlock(); 1228 1228 } 1229 1229
+3 -1
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
··· 835 835 struct octep_vf_device *oct = netdev_priv(netdev); 836 836 837 837 netdev_hold(netdev, NULL, GFP_ATOMIC); 838 - schedule_work(&oct->tx_timeout_task); 838 + if (!schedule_work(&oct->tx_timeout_task)) 839 + netdev_put(netdev, NULL); 840 + 839 841 } 840 842 841 843 static int octep_vf_set_mac(struct net_device *netdev, void *p)
+9 -9
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 269 269 "ethwarp_wocpu2", 270 270 "ethwarp_wocpu1", 271 271 "ethwarp_wocpu0", 272 - "top_usxgmii0_sel", 273 - "top_usxgmii1_sel", 274 272 "top_sgm0_sel", 275 273 "top_sgm1_sel", 276 - "top_xfi_phy0_xtal_sel", 277 - "top_xfi_phy1_xtal_sel", 278 274 "top_eth_gmii_sel", 279 275 "top_eth_refck_50m_sel", 280 276 "top_eth_sys_200m_sel", ··· 2248 2252 ring->data[idx] = new_data; 2249 2253 rxd->rxd1 = (unsigned int)dma_addr; 2250 2254 release_desc: 2255 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { 2256 + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) 2257 + addr64 = FIELD_GET(RX_DMA_ADDR64_MASK, 2258 + rxd->rxd2); 2259 + else 2260 + addr64 = RX_DMA_PREP_ADDR64(dma_addr); 2261 + } 2262 + 2251 2263 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) 2252 2264 rxd->rxd2 = RX_DMA_LSO; 2253 2265 else 2254 - rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); 2255 - 2256 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && 2257 - likely(dma_addr != DMA_MAPPING_ERROR)) 2258 - rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); 2266 + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64; 2259 2267 2260 2268 ring->calc_idx = idx; 2261 2269 done++;
+7 -6
drivers/net/ethernet/mediatek/mtk_star_emac.c
··· 1163 1163 struct net_device *ndev = priv->ndev; 1164 1164 unsigned int head = ring->head; 1165 1165 unsigned int entry = ring->tail; 1166 + unsigned long flags; 1166 1167 1167 1168 while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) { 1168 1169 ret = mtk_star_tx_complete_one(priv); ··· 1183 1182 netif_wake_queue(ndev); 1184 1183 1185 1184 if (napi_complete(napi)) { 1186 - spin_lock(&priv->lock); 1185 + spin_lock_irqsave(&priv->lock, flags); 1187 1186 mtk_star_enable_dma_irq(priv, false, true); 1188 - spin_unlock(&priv->lock); 1187 + spin_unlock_irqrestore(&priv->lock, flags); 1189 1188 } 1190 1189 1191 1190 return 0; ··· 1342 1341 static int mtk_star_rx_poll(struct napi_struct *napi, int budget) 1343 1342 { 1344 1343 struct mtk_star_priv *priv; 1344 + unsigned long flags; 1345 1345 int work_done = 0; 1346 1346 1347 1347 priv = container_of(napi, struct mtk_star_priv, rx_napi); 1348 1348 1349 1349 work_done = mtk_star_rx(priv, budget); 1350 - if (work_done < budget) { 1351 - napi_complete_done(napi, work_done); 1352 - spin_lock(&priv->lock); 1350 + if (work_done < budget && napi_complete_done(napi, work_done)) { 1351 + spin_lock_irqsave(&priv->lock, flags); 1353 1352 mtk_star_enable_dma_irq(priv, true, false); 1354 - spin_unlock(&priv->lock); 1353 + spin_unlock_irqrestore(&priv->lock, flags); 1355 1354 } 1356 1355 1357 1356 return work_done;
+2 -4
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
··· 176 176 177 177 priv = ptpsq->txqsq.priv; 178 178 179 + rtnl_lock(); 179 180 mutex_lock(&priv->state_lock); 180 181 chs = &priv->channels; 181 182 netdev = priv->netdev; ··· 184 183 carrier_ok = netif_carrier_ok(netdev); 185 184 netif_carrier_off(netdev); 186 185 187 - rtnl_lock(); 188 186 mlx5e_deactivate_priv_channels(priv); 189 - rtnl_unlock(); 190 187 191 188 mlx5e_ptp_close(chs->ptp); 192 189 err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp); 193 190 194 - rtnl_lock(); 195 191 mlx5e_activate_priv_channels(priv); 196 - rtnl_unlock(); 197 192 198 193 /* return carrier back if needed */ 199 194 if (carrier_ok) 200 195 netif_carrier_on(netdev); 201 196 202 197 mutex_unlock(&priv->state_lock); 198 + rtnl_unlock(); 203 199 204 200 return err; 205 201 }
+29 -3
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
··· 165 165 struct flow_match_enc_keyid enc_keyid; 166 166 void *misc_c, *misc_v; 167 167 168 - misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 169 - misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 170 - 171 168 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) 172 169 return 0; 173 170 ··· 179 182 err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f); 180 183 if (err) 181 184 return err; 185 + 186 + /* We can't mix custom tunnel headers with symbolic ones and we 187 + * don't have a symbolic field name for GBP, so we use custom 188 + * tunnel headers in this case. We need hardware support to 189 + * match on custom tunnel headers, but we already know it's 190 + * supported because the previous call successfully checked for 191 + * that. 192 + */ 193 + misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 194 + misc_parameters_5); 195 + misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 196 + misc_parameters_5); 197 + 198 + /* Shift by 8 to account for the reserved bits in the vxlan 199 + * header after the VNI. 200 + */ 201 + MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1, 202 + be32_to_cpu(enc_keyid.mask->keyid) << 8); 203 + MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1, 204 + be32_to_cpu(enc_keyid.key->keyid) << 8); 205 + 206 + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; 207 + 208 + return 0; 182 209 } 183 210 184 211 /* match on VNI is required */ ··· 215 194 "Matching on VXLAN VNI is not supported\n"); 216 195 return -EOPNOTSUPP; 217 196 } 197 + 198 + misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 199 + misc_parameters); 200 + misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 201 + misc_parameters); 218 202 219 203 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, 220 204 be32_to_cpu(enc_keyid.mask->keyid));
+1 -4
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1750 1750 !list_is_first(&attr->list, &flow->attrs)) 1751 1751 return 0; 1752 1752 1753 - if (flow_flag_test(flow, SLOW)) 1754 - return 0; 1755 - 1756 1753 esw_attr = attr->esw_attr; 1757 1754 if (!esw_attr->split_count || 1758 1755 esw_attr->split_count == esw_attr->out_count - 1) ··· 1763 1766 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { 1764 1767 /* external dest with encap is considered as internal by firmware */ 1765 1768 if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK && 1766 - !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) 1769 + !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) 1767 1770 ext_dest = true; 1768 1771 else 1769 1772 int_dest = true;
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 3533 3533 int err; 3534 3534 3535 3535 mutex_init(&esw->offloads.termtbl_mutex); 3536 - mlx5_rdma_enable_roce(esw->dev); 3536 + err = mlx5_rdma_enable_roce(esw->dev); 3537 + if (err) 3538 + goto err_roce; 3537 3539 3538 3540 err = mlx5_esw_host_number_init(esw); 3539 3541 if (err) ··· 3596 3594 esw_offloads_metadata_uninit(esw); 3597 3595 err_metadata: 3598 3596 mlx5_rdma_disable_roce(esw->dev); 3597 + err_roce: 3599 3598 mutex_destroy(&esw->offloads.termtbl_mutex); 3600 3599 return err; 3601 3600 }
+6 -5
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
··· 118 118 119 119 static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) 120 120 { 121 + u8 mac[ETH_ALEN] = {}; 121 122 union ib_gid gid; 122 - u8 mac[ETH_ALEN]; 123 123 124 124 mlx5_rdma_make_default_gid(dev, &gid); 125 125 return mlx5_core_roce_gid_set(dev, 0, ··· 140 140 mlx5_nic_vport_disable_roce(dev); 141 141 } 142 142 143 - void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) 143 + int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) 144 144 { 145 145 int err; 146 146 147 147 if (!MLX5_CAP_GEN(dev, roce)) 148 - return; 148 + return 0; 149 149 150 150 err = mlx5_nic_vport_enable_roce(dev); 151 151 if (err) { 152 152 mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err); 153 - return; 153 + return err; 154 154 } 155 155 156 156 err = mlx5_rdma_add_roce_addr(dev); ··· 165 165 goto del_roce_addr; 166 166 } 167 167 168 - return; 168 + return err; 169 169 170 170 del_roce_addr: 171 171 mlx5_rdma_del_roce_addr(dev); 172 172 disable_roce: 173 173 mlx5_nic_vport_disable_roce(dev); 174 + return err; 174 175 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/rdma.h
··· 8 8 9 9 #ifdef CONFIG_MLX5_ESWITCH 10 10 11 - void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); 11 + int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); 12 12 void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev); 13 13 14 14 #else /* CONFIG_MLX5_ESWITCH */ 15 15 16 - static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {} 16 + static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; } 17 17 static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {} 18 18 19 19 #endif /* CONFIG_MLX5_ESWITCH */
+6 -2
drivers/net/ethernet/microchip/lan743x_main.c
··· 1815 1815 if (nr_frags <= 0) { 1816 1816 tx->frame_data0 |= TX_DESC_DATA0_LS_; 1817 1817 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 1818 + tx->frame_last = tx->frame_first; 1818 1819 } 1819 1820 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1820 1821 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); ··· 1885 1884 tx->frame_first = 0; 1886 1885 tx->frame_data0 = 0; 1887 1886 tx->frame_tail = 0; 1887 + tx->frame_last = 0; 1888 1888 return -ENOMEM; 1889 1889 } 1890 1890 ··· 1926 1924 TX_DESC_DATA0_DTYPE_DATA_) { 1927 1925 tx->frame_data0 |= TX_DESC_DATA0_LS_; 1928 1926 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 1927 + tx->frame_last = tx->frame_tail; 1929 1928 } 1930 1929 1931 - tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1932 - buffer_info = &tx->buffer_info[tx->frame_tail]; 1930 + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last]; 1931 + buffer_info = &tx->buffer_info[tx->frame_last]; 1933 1932 buffer_info->skb = skb; 1934 1933 if (time_stamp) 1935 1934 buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; 1936 1935 if (ignore_sync) 1937 1936 buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; 1938 1937 1938 + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1939 1939 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 1940 1940 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1941 1941 tx->last_tail = tx->frame_tail;
+1
drivers/net/ethernet/microchip/lan743x_main.h
··· 980 980 u32 frame_first; 981 981 u32 frame_data0; 982 982 u32 frame_tail; 983 + u32 frame_last; 983 984 984 985 struct lan743x_tx_buffer_info *buffer_info; 985 986
+6
drivers/net/ethernet/mscc/ocelot.c
··· 830 830 int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, 831 831 bool untagged) 832 832 { 833 + struct ocelot_port *ocelot_port = ocelot->ports[port]; 833 834 int err; 834 835 835 836 /* Ignore VID 0 added to our RX filter by the 8021q module, since ··· 848 847 if (pvid) { 849 848 err = ocelot_port_set_pvid(ocelot, port, 850 849 ocelot_bridge_vlan_find(ocelot, vid)); 850 + if (err) 851 + return err; 852 + } else if (ocelot_port->pvid_vlan && 853 + ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) { 854 + err = ocelot_port_set_pvid(ocelot, port, NULL); 851 855 if (err) 852 856 return err; 853 857 }
+2 -2
drivers/net/ethernet/realtek/rtase/rtase_main.c
··· 1925 1925 1926 1926 time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME); 1927 1927 1928 - msb = fls(time_us); 1929 - if (msb >= RTASE_MITI_COUNT_BIT_NUM) { 1928 + if (time_us > RTASE_MITI_TIME_COUNT_MASK) { 1929 + msb = fls(time_us); 1930 1930 time_unit = msb - RTASE_MITI_COUNT_BIT_NUM; 1931 1931 time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM); 1932 1932 } else {
+29 -7
drivers/net/ethernet/vertexcom/mse102x.c
··· 6 6 7 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 8 9 + #include <linux/if_vlan.h> 9 10 #include <linux/interrupt.h> 10 11 #include <linux/module.h> 11 12 #include <linux/kernel.h> ··· 34 33 #define CMD_CTR (0x2 << CMD_SHIFT) 35 34 36 35 #define CMD_MASK GENMASK(15, CMD_SHIFT) 37 - #define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) 36 + #define LEN_MASK GENMASK(CMD_SHIFT - 2, 0) 38 37 39 38 #define DET_CMD_LEN 4 40 39 #define DET_SOF_LEN 2 ··· 263 262 } 264 263 265 264 static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, 266 - unsigned int frame_len) 265 + unsigned int frame_len, bool drop) 267 266 { 268 267 struct mse102x_net_spi *mses = to_mse102x_spi(mse); 269 268 struct spi_transfer *xfer = &mses->spi_xfer; ··· 281 280 netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", 282 281 __func__, ret); 283 282 mse->stats.xfer_err++; 283 + } else if (drop) { 284 + netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__); 285 + ret = -EINVAL; 284 286 } else if (*sof != cpu_to_be16(DET_SOF)) { 285 287 netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", 286 288 __func__, *sof); ··· 311 307 struct sk_buff *skb; 312 308 unsigned int rxalign; 313 309 unsigned int rxlen; 310 + bool drop = false; 314 311 __be16 rx = 0; 315 312 u16 cmd_resp; 316 313 u8 *rxpkt; ··· 334 329 net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", 335 330 __func__, cmd_resp); 336 331 mse->stats.invalid_rts++; 337 - return; 332 + drop = true; 333 + goto drop; 338 334 } 339 335 340 336 net_dbg_ratelimited("%s: Unexpected response to first CMD\n", ··· 343 337 } 344 338 345 339 rxlen = cmd_resp & LEN_MASK; 346 - if (!rxlen) { 347 - net_dbg_ratelimited("%s: No frame length defined\n", __func__); 340 + if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) { 341 + net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__, 342 + rxlen); 348 343 mse->stats.invalid_len++; 349 - return; 344 + drop = true; 350 345 } 346 + 347 + /* In case of a invalid CMD_RTS, the frame must be consumed anyway. 348 + * So assume the maximum possible frame length. 349 + */ 350 + drop: 351 + if (drop) 352 + rxlen = VLAN_ETH_FRAME_LEN; 351 353 352 354 rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); 353 355 skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); ··· 367 353 * They are copied, but ignored. 368 354 */ 369 355 rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN; 370 - if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) { 356 + if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) { 371 357 mse->ndev->stats.rx_errors++; 372 358 dev_kfree_skb(skb); 373 359 return; ··· 523 509 static int mse102x_net_open(struct net_device *ndev) 524 510 { 525 511 struct mse102x_net *mse = netdev_priv(ndev); 512 + struct mse102x_net_spi *mses = to_mse102x_spi(mse); 526 513 int ret; 527 514 528 515 ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, ··· 538 523 netif_start_queue(ndev); 539 524 540 525 netif_carrier_on(ndev); 526 + 527 + /* The SPI interrupt can stuck in case of pending packet(s). 528 + * So poll for possible packet(s) to re-arm the interrupt. 529 + */ 530 + mutex_lock(&mses->lock); 531 + mse102x_rx_pkt_spi(mse); 532 + mutex_unlock(&mses->lock); 541 533 542 534 netif_dbg(mse, ifup, ndev, "network device up\n"); 543 535
+2 -1
drivers/net/mdio/mdio-mux-meson-gxl.c
··· 17 17 #define REG2_LEDACT GENMASK(23, 22) 18 18 #define REG2_LEDLINK GENMASK(25, 24) 19 19 #define REG2_DIV4SEL BIT(27) 20 + #define REG2_REVERSED BIT(28) 20 21 #define REG2_ADCBYPASS BIT(30) 21 22 #define REG2_CLKINSEL BIT(31) 22 23 #define ETH_REG3 0x4 ··· 66 65 * The only constraint is that it must match the one in 67 66 * drivers/net/phy/meson-gxl.c to properly match the PHY. 68 67 */ 69 - writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), 68 + writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), 70 69 priv->regs + ETH_REG2); 71 70 72 71 /* Enable the internal phy */
+2 -14
drivers/net/usb/rndis_host.c
··· 630 630 .tx_fixup = rndis_tx_fixup, 631 631 }; 632 632 633 - static const struct driver_info wwan_rndis_info = { 634 - .description = "Mobile Broadband RNDIS device", 635 - .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, 636 - .bind = rndis_bind, 637 - .unbind = rndis_unbind, 638 - .status = rndis_status, 639 - .rx_fixup = rndis_rx_fixup, 640 - .tx_fixup = rndis_tx_fixup, 641 - }; 642 - 643 633 /*-------------------------------------------------------------------------*/ 644 634 645 635 static const struct usb_device_id products [] = { ··· 666 676 USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), 667 677 .driver_info = (unsigned long) &rndis_info, 668 678 }, { 669 - /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and 670 - * Telit FN990A (RNDIS) 671 - */ 679 + /* Novatel Verizon USB730L */ 672 680 USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), 673 - .driver_info = (unsigned long)&wwan_rndis_info, 681 + .driver_info = (unsigned long) &rndis_info, 674 682 }, 675 683 { }, // END 676 684 };
+1 -1
drivers/net/vmxnet3/vmxnet3_xdp.c
··· 397 397 398 398 xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); 399 399 xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, 400 - rbi->len, false); 400 + rcd->len, false); 401 401 xdp_buff_clear_frags_flag(&xdp); 402 402 403 403 xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
+7 -1
drivers/net/vxlan/vxlan_vnifilter.c
··· 627 627 * default dst remote_ip previously added for this vni 628 628 */ 629 629 if (!vxlan_addr_any(&vninode->remote_ip) || 630 - !vxlan_addr_any(&dst->remote_ip)) 630 + !vxlan_addr_any(&dst->remote_ip)) { 631 + u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, 632 + vninode->vni); 633 + 634 + spin_lock_bh(&vxlan->hash_lock[hash_index]); 631 635 __vxlan_fdb_delete(vxlan, all_zeros_mac, 632 636 (vxlan_addr_any(&vninode->remote_ip) ? 633 637 dst->remote_ip : vninode->remote_ip), ··· 639 635 vninode->vni, vninode->vni, 640 636 dst->remote_ifindex, 641 637 true); 638 + spin_unlock_bh(&vxlan->hash_lock[hash_index]); 639 + } 642 640 643 641 if (vxlan->dev->flags & IFF_UP) { 644 642 if (vxlan_addr_multicast(&vninode->remote_ip) &&
+4 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
··· 896 896 } 897 897 898 898 /* 1) Prepare USB boot loader for runtime image */ 899 - brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); 899 + err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); 900 + if (err) 901 + goto fail; 900 902 901 903 rdlstate = le32_to_cpu(state.state); 902 904 rdlbytes = le32_to_cpu(state.bytes); 903 905 904 906 /* 2) Check we are in the Waiting state */ 905 907 if (rdlstate != DL_WAITING) { 906 - brcmf_err("Failed to DL_START\n"); 908 + brcmf_err("Invalid DL state: %u\n", rdlstate); 907 909 err = -EINVAL; 908 910 goto fail; 909 911 }
-2
drivers/net/wireless/intel/iwlwifi/cfg/sc.c
··· 142 142 .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, 143 143 }; 144 144 145 - const char iwl_sp_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz"; 146 - 147 145 const struct iwl_cfg iwl_cfg_sc = { 148 146 .fw_name_mac = "sc", 149 147 IWL_DEVICE_SC,
+9 -7
drivers/net/wireless/intel/iwlwifi/iwl-config.h
··· 2 2 /* 3 3 * Copyright (C) 2005-2014, 2018-2021 Intel Corporation 4 4 * Copyright (C) 2016-2017 Intel Deutschland GmbH 5 - * Copyright (C) 2018-2025 Intel Corporation 5 + * Copyright (C) 2018-2024 Intel Corporation 6 6 */ 7 7 #ifndef __IWL_CONFIG_H__ 8 8 #define __IWL_CONFIG_H__ ··· 451 451 #define IWL_CFG_RF_ID_HR 0x7 452 452 #define IWL_CFG_RF_ID_HR1 0x4 453 453 454 - #define IWL_CFG_BW_NO_LIM (U16_MAX - 1) 455 - #define IWL_CFG_BW_ANY U16_MAX 454 + #define IWL_CFG_NO_160 0x1 455 + #define IWL_CFG_160 0x0 456 + 457 + #define IWL_CFG_NO_320 0x1 458 + #define IWL_CFG_320 0x0 456 459 457 460 #define IWL_CFG_CORES_BT 0x0 458 461 #define IWL_CFG_CORES_BT_GNSS 0x5 ··· 467 464 #define IWL_CFG_IS_JACKET 0x1 468 465 469 466 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) 470 - #define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9) 467 + #define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) 471 468 #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) 472 469 473 470 struct iwl_dev_info { ··· 475 472 u16 subdevice; 476 473 u16 mac_type; 477 474 u16 rf_type; 478 - u16 bw_limit; 479 475 u8 mac_step; 480 476 u8 rf_step; 481 477 u8 rf_id; 478 + u8 no_160; 482 479 u8 cores; 483 480 u8 cdb; 484 481 u8 jacket; ··· 492 489 const struct iwl_dev_info * 493 490 iwl_pci_find_dev_info(u16 device, u16 subsystem_device, 494 491 u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, 495 - u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step); 492 + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step); 496 493 extern const struct pci_device_id iwl_hw_card_ids[]; 497 494 #endif 498 495 ··· 553 550 extern const char iwl_ax411_name[]; 554 551 extern const char iwl_fm_name[]; 555 552 extern const char iwl_wh_name[]; 556 - extern const char iwl_sp_name[]; 557 553 extern const char iwl_gl_name[]; 558 554 extern const char iwl_mtp_name[]; 559 555 extern const char iwl_dr_name[];
+1
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
··· 148 148 * during a error FW error. 149 149 */ 150 150 #define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) 151 + #define CSR_FUNC_SCRATCH_POWER_OFF_MASK 0xFFFF 151 152 152 153 /* Bits for CSR_HW_IF_CONFIG_REG */ 153 154 #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F)
+6 -10
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation 3 + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 6 */ ··· 944 944 IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); 945 945 break; 946 946 case NL80211_BAND_6GHZ: 947 - if (!trans->reduced_cap_sku && 948 - trans->bw_limit >= 320) { 947 + if (!trans->reduced_cap_sku) { 949 948 iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |= 950 949 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 951 950 iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |= ··· 1094 1095 iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0; 1095 1096 } 1096 1097 1097 - if (trans->bw_limit < 160) 1098 + if (trans->no_160) 1098 1099 iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= 1099 1100 ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; 1100 1101 1101 - if (trans->bw_limit < 320 || trans->reduced_cap_sku) { 1102 + if (trans->reduced_cap_sku) { 1102 1103 memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0, 1103 1104 sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320)); 1104 - iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= 1105 - ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; 1106 - } 1107 - 1108 - if (trans->reduced_cap_sku) { 1109 1105 iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0; 1110 1106 iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0; 1111 1107 iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &= 1112 1108 ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA; 1109 + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= 1110 + ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; 1113 1111 } 1114 1112 } 1115 1113
+21 -7
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
··· 21 21 struct list_head list; 22 22 unsigned int restart_count; 23 23 time64_t last_error; 24 + bool backoff; 24 25 char name[]; 25 26 }; 26 27 ··· 126 125 if (!data) 127 126 return at_least; 128 127 129 - if (ktime_get_boottime_seconds() - data->last_error >= 128 + if (!data->backoff && 129 + ktime_get_boottime_seconds() - data->last_error >= 130 130 IWL_TRANS_RESET_OK_TIME) 131 131 data->restart_count = 0; 132 132 133 133 index = data->restart_count; 134 - if (index >= ARRAY_SIZE(escalation_list)) 134 + if (index >= ARRAY_SIZE(escalation_list)) { 135 135 index = ARRAY_SIZE(escalation_list) - 1; 136 + if (!data->backoff) { 137 + data->backoff = true; 138 + return IWL_RESET_MODE_BACKOFF; 139 + } 140 + data->backoff = false; 141 + } 136 142 137 143 return max(at_least, escalation_list[index]); 138 144 } ··· 148 140 149 141 static void iwl_trans_restart_wk(struct work_struct *wk) 150 142 { 151 - struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk); 143 + struct iwl_trans *trans = container_of(wk, typeof(*trans), 144 + restart.wk.work); 152 145 struct iwl_trans_reprobe *reprobe; 153 146 enum iwl_reset_mode mode; 154 147 ··· 177 168 return; 178 169 179 170 mode = iwl_trans_determine_restart_mode(trans); 171 + if (mode == IWL_RESET_MODE_BACKOFF) { 172 + IWL_ERR(trans, "Too many device errors - delay next reset\n"); 173 + queue_delayed_work(system_unbound_wq, &trans->restart.wk, 174 + IWL_TRANS_RESET_DELAY); 175 + return; 176 + } 180 177 181 178 iwl_trans_inc_restart_count(trans->dev); 182 179 ··· 242 227 trans->dev = dev; 243 228 trans->num_rx_queues = 1; 244 229 245 - INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk); 230 + INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk); 246 231 247 232 return trans; 248 233 } ··· 286 271 287 272 void iwl_trans_free(struct iwl_trans *trans) 288 273 { 289 - cancel_work_sync(&trans->restart.wk); 274 + cancel_delayed_work_sync(&trans->restart.wk); 290 275 kmem_cache_destroy(trans->dev_cmd_pool); 291 276 } 292 277 ··· 418 403 419 404 iwl_trans_pcie_op_mode_leave(trans); 420 405 421 - cancel_work_sync(&trans->restart.wk); 406 + cancel_delayed_work_sync(&trans->restart.wk); 422 407 423 408 trans->op_mode = NULL; 424 409 ··· 555 540 iwl_trans_release_nic_access(struct iwl_trans *trans) 556 541 { 557 542 iwl_trans_pcie_release_nic_access(trans); 558 - __release(nic_access); 559 543 } 560 544 IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); 561 545
+8 -6
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 2 /* 3 - * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation 3 + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 6 */ ··· 876 876 * only valid for discrete (not integrated) NICs 877 877 * @invalid_tx_cmd: invalid TX command buffer 878 878 * @reduced_cap_sku: reduced capability supported SKU 879 - * @bw_limit: the max bandwidth 879 + * @no_160: device not supporting 160 MHz 880 880 * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz 881 881 * @restart: restart worker data 882 882 * @restart.wk: restart worker ··· 911 911 char hw_id_str[52]; 912 912 u32 sku_id[3]; 913 913 bool reduced_cap_sku; 914 - u16 bw_limit; 915 - bool step_urm; 914 + u8 no_160:1, step_urm:1; 916 915 917 916 u8 dsbr_urm_fw_dependent:1, 918 917 dsbr_urm_permanent:1; ··· 961 962 struct iwl_dma_ptr invalid_tx_cmd; 962 963 963 964 struct { 964 - struct work_struct wk; 965 + struct delayed_work wk; 965 966 struct iwl_fw_error_dump_mode mode; 966 967 bool during_reset; 967 968 } restart; ··· 1162 1163 */ 1163 1164 trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET, 1164 1165 &trans->status); 1165 - queue_work(system_unbound_wq, &trans->restart.wk); 1166 + queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0); 1166 1167 } 1167 1168 1168 1169 static inline void iwl_trans_fw_error(struct iwl_trans *trans, ··· 1261 1262 IWL_RESET_MODE_RESCAN, 1262 1263 IWL_RESET_MODE_FUNC_RESET, 1263 1264 IWL_RESET_MODE_PROD_RESET, 1265 + 1266 + /* keep last - special backoff value */ 1267 + IWL_RESET_MODE_BACKOFF, 1264 1268 }; 1265 1269 1266 1270 void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
+3 -3
drivers/net/wireless/intel/iwlwifi/mld/agg.c
··· 124 124 125 125 rcu_read_lock(); 126 126 baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); 127 - if (!IWL_FW_CHECK(mld, !baid_data, 128 - "Got valid BAID %d but not allocated, invalid BAR release!\n", 129 - baid)) 127 + if (IWL_FW_CHECK(mld, !baid_data, 128 + "Got valid BAID %d but not allocated, invalid BAR release!\n", 129 + baid)) 130 130 goto out_unlock; 131 131 132 132 if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
+3 -2
drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
··· 949 949 snprintf(name, sizeof(name), "%pd", vif->debugfs_dir); 950 950 snprintf(target, sizeof(target), "../../../%pd3/iwlmld", 951 951 vif->debugfs_dir); 952 - mld_vif->dbgfs_slink = 953 - debugfs_create_symlink(name, mld->debugfs_dir, target); 952 + if (!mld_vif->dbgfs_slink) 953 + mld_vif->dbgfs_slink = 954 + debugfs_create_symlink(name, mld->debugfs_dir, target); 954 955 955 956 if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && 956 957 vif->type == NL80211_IFTYPE_STATION) {
+10 -3
drivers/net/wireless/intel/iwlwifi/mld/fw.c
··· 333 333 334 334 ret = iwl_trans_start_hw(mld->trans); 335 335 if (ret) 336 - return ret; 336 + goto err; 337 337 338 338 ret = iwl_mld_run_fw_init_sequence(mld); 339 339 if (ret) 340 - return ret; 340 + goto err; 341 341 342 342 ret = iwl_mld_init_mcc(mld); 343 343 if (ret) 344 - return ret; 344 + goto err; 345 345 346 346 mld->fw_status.running = true; 347 347 348 348 return 0; 349 + err: 350 + iwl_mld_stop_fw(mld); 351 + return ret; 349 352 } 350 353 351 354 void iwl_mld_stop_fw(struct iwl_mld *mld) ··· 360 357 iwl_fw_dbg_stop_sync(&mld->fwrt); 361 358 362 359 iwl_trans_stop_device(mld->trans); 360 + 361 + wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk); 362 + 363 + iwl_mld_purge_async_handlers_list(mld); 363 364 364 365 mld->fw_status.running = false; 365 366 }
+1
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
··· 651 651 652 652 #ifdef CONFIG_IWLWIFI_DEBUGFS 653 653 debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink); 654 + iwl_mld_vif_from_mac80211(vif)->dbgfs_slink = NULL; 654 655 #endif 655 656 656 657 iwl_mld_rm_vif(mld, vif);
+9 -2
drivers/net/wireless/intel/iwlwifi/mld/mld.c
··· 75 75 76 76 /* Setup async RX handling */ 77 77 spin_lock_init(&mld->async_handlers_lock); 78 + INIT_LIST_HEAD(&mld->async_handlers_list); 78 79 wiphy_work_init(&mld->async_handlers_wk, 79 80 iwl_mld_async_handlers_wk); 80 81 ··· 415 414 wiphy_unlock(mld->wiphy); 416 415 rtnl_unlock(); 417 416 iwl_fw_flush_dumps(&mld->fwrt); 418 - goto free_hw; 417 + goto err; 419 418 } 419 + 420 + /* We are about to stop the FW. Notifications may require an 421 + * operational FW, so handle them all here before we stop. 422 + */ 423 + wiphy_work_flush(mld->wiphy, &mld->async_handlers_wk); 420 424 421 425 iwl_mld_stop_fw(mld); 422 426 ··· 461 455 iwl_mld_leds_exit(mld); 462 456 free_nvm: 463 457 kfree(mld->nvm_data); 464 - free_hw: 458 + err: 459 + iwl_trans_op_mode_leave(mld->trans); 465 460 ieee80211_free_hw(mld->hw); 466 461 return ERR_PTR(ret); 467 462 }
-5
drivers/net/wireless/intel/iwlwifi/mld/mld.h
··· 298 298 #endif 299 299 300 300 iwl_mld_low_latency_restart_cleanup(mld); 301 - 302 - /* Empty the list of async notification handlers so we won't process 303 - * notifications from the dead fw after the reconfig flow. 304 - */ 305 - iwl_mld_purge_async_handlers_list(mld); 306 301 } 307 302 308 303 enum iwl_power_scheme {
+119 -126
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2005-2014, 2018-2025 Intel Corporation 3 + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 6 */ ··· 552 552 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); 553 553 554 554 #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ 555 - _rf_id, _rf_step, _bw_limit, _cores, _cdb, _cfg, _name) \ 555 + _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ 556 556 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ 557 557 .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ 558 - .bw_limit = _bw_limit, .cores = _cores, .rf_id = _rf_id, \ 558 + .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ 559 559 .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } 560 560 561 561 #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ 562 562 _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ 563 - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ 564 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, \ 565 - _cfg, _name) 563 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ 564 + IWL_CFG_ANY, _cfg, _name) 566 565 567 566 VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { 568 567 #if IS_ENABLED(CONFIG_IWLMVM) ··· 724 725 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 725 726 IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, 726 727 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 727 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 728 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 728 729 iwl9560_2ac_cfg_soc, iwl9461_160_name), 729 730 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 730 731 IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, 731 732 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 732 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 733 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 733 734 iwl9560_2ac_cfg_soc, iwl9461_name), 734 735 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 735 736 IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, 736 737 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 737 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 738 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 738 739 iwl9560_2ac_cfg_soc, iwl9462_160_name), 739 740 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 740 741 IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, 741 742 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 742 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 743 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 743 744 iwl9560_2ac_cfg_soc, iwl9462_name), 744 745 745 746 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 746 747 IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, 747 748 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 748 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 749 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 749 750 iwl9560_2ac_cfg_soc, iwl9560_160_name), 750 751 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 751 752 IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, 752 753 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 753 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 754 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 754 755 iwl9560_2ac_cfg_soc, iwl9560_name), 755 756 756 757 _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, 757 758 IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, 758 759 IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, 759 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, 760 + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, 760 761 iwl9260_2ac_cfg, iwl9270_160_name), 761 762 _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, 762 763 IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, 763 764 IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, 764 - 80, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, 765 + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, 765 766 iwl9260_2ac_cfg, iwl9270_name), 766 767 767 768 _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, 768 769 IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, 769 770 IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, 770 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 771 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 771 772 iwl9260_2ac_cfg, iwl9162_160_name), 772 773 _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, 773 774 IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, 774 775 IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, 775 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 776 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 776 777 iwl9260_2ac_cfg, iwl9162_name), 777 778 778 779 _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, 779 780 IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, 780 781 IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, 781 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 782 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 782 783 iwl9260_2ac_cfg, iwl9260_160_name), 783 784 _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, 784 785 IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, 785 786 IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, 786 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 787 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 787 788 iwl9260_2ac_cfg, iwl9260_name), 788 789 789 790 /* Qu with Jf */ ··· 791 792 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 792 793 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 793 794 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 794 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 795 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 795 796 iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), 796 797 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 797 798 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 798 799 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 799 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 800 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 800 801 iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), 801 802 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 802 803 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 803 804 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 804 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 805 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 805 806 iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), 806 807 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 807 808 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 808 809 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 809 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 810 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 810 811 iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), 811 812 812 813 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 813 814 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 814 815 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 815 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 816 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 816 817 iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), 817 818 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 818 819 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 819 820 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 820 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 821 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 821 822 iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), 822 823 823 824 _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, 824 825 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 825 826 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 826 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 827 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 827 828 iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), 828 829 _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, 829 830 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 830 831 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 831 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 832 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 832 833 iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), 833 834 834 835 /* Qu C step */ 835 836 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 836 837 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 837 838 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 838 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 839 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 839 840 iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), 840 841 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 841 842 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 842 843 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 843 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 844 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 844 845 iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), 845 846 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 846 847 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 847 848 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 848 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 849 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 849 850 iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), 850 851 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 851 852 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 852 853 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 853 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 854 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 854 855 iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), 855 856 856 857 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 857 858 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 858 859 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 859 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 860 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 860 861 iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), 861 862 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 862 863 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 863 864 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 864 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 865 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 865 866 iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), 866 867 867 868 _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, 868 869 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 869 870 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 870 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 871 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 871 872 iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), 872 873 _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, 873 874 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 874 875 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 875 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 876 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 876 877 iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), 877 878 878 879 /* QuZ */ 879 880 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 880 881 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 881 882 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 882 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 883 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 883 884 iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), 884 885 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 885 886 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 886 887 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 887 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 888 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 888 889 iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), 889 890 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 890 891 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 891 892 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 892 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 893 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 893 894 iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), 894 895 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 895 896 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 896 897 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 897 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 898 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 898 899 iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), 899 900 900 901 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 901 902 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 902 903 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 903 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 904 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 904 905 iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), 905 906 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 906 907 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 907 908 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 908 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 909 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 909 910 iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), 910 911 911 912 _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, 912 913 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 913 914 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 914 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 915 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 915 916 iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), 916 917 _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, 917 918 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 918 919 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 919 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 920 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 920 921 iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), 921 922 922 923 /* Qu with Hr */ ··· 924 925 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 925 926 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 926 927 IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, 927 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 928 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 928 929 iwl_qu_b0_hr1_b0, iwl_ax101_name), 929 930 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 930 931 IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, 931 932 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 932 - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, 933 + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 933 934 iwl_qu_b0_hr_b0, iwl_ax203_name), 934 935 935 936 /* Qu C step */ 936 937 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 937 938 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 938 939 IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, 939 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 940 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 940 941 iwl_qu_c0_hr1_b0, iwl_ax101_name), 941 942 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 942 943 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 943 944 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 944 - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, 945 + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 945 946 iwl_qu_c0_hr_b0, iwl_ax203_name), 946 947 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 947 948 IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, 948 949 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 949 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, 950 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 950 951 iwl_qu_c0_hr_b0, iwl_ax201_name), 951 952 952 953 /* QuZ */ 953 954 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 954 955 IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, 955 956 IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, 956 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 957 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 957 958 iwl_quz_a0_hr1_b0, iwl_ax101_name), 958 959 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 959 960 IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, 960 961 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 961 - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, 962 + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 962 963 iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), 963 964 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 964 965 IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, 965 966 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 966 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, 967 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 967 968 iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), 968 969 969 970 /* Ma */ 970 971 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 971 972 IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, 972 973 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 973 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 974 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 974 975 iwl_cfg_ma, iwl_ax201_name), 975 976 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 976 977 IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, 977 978 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 978 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 979 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 979 980 iwl_cfg_ma, iwl_ax211_name), 980 981 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 981 982 IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, 982 983 IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, 983 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 984 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, 984 985 iwl_cfg_ma, iwl_ax231_name), 985 986 986 987 /* So with Hr */ 987 988 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 988 989 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 989 990 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 990 - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, 991 + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 991 992 iwl_cfg_so_a0_hr_a0, iwl_ax203_name), 992 993 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 993 994 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 994 995 IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, 995 - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, 996 + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 996 997 iwl_cfg_so_a0_hr_a0, iwl_ax101_name), 997 998 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 998 999 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 999 1000 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 1000 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1001 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1001 1002 iwl_cfg_so_a0_hr_a0, iwl_ax201_name), 1002 1003 1003 1004 /* So-F with Hr */ 1004 1005 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1005 1006 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1006 1007 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 1007 - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1008 + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1008 1009 iwl_cfg_so_a0_hr_a0, iwl_ax203_name), 1009 1010 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1010 1011 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1011 1012 IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, 1012 - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1013 + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1013 1014 iwl_cfg_so_a0_hr_a0, iwl_ax101_name), 1014 1015 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1015 1016 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1016 1017 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 1017 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1018 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1018 1019 iwl_cfg_so_a0_hr_a0, iwl_ax201_name), 1019 1020 1020 1021 /* So-F with Gf */ 1021 1022 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1022 1023 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1023 1024 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1024 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1025 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1025 1026 iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), 1026 1027 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1027 1028 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1028 1029 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1029 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, 1030 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, 1030 1031 iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), 1031 1032 1032 1033 /* SoF with JF2 */ 1033 1034 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1034 1035 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1035 1036 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 1036 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1037 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1037 1038 iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), 1038 1039 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1039 1040 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1040 1041 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 1041 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1042 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1042 1043 iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), 1043 1044 1044 1045 /* SoF with JF */ 1045 1046 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1046 1047 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1047 1048 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 1048 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1049 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1049 1050 iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), 1050 1051 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1051 1052 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1052 1053 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 1053 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1054 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1054 1055 iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), 1055 1056 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1056 1057 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1057 1058 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 1058 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1059 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1059 1060 iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), 1060 1061 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1061 1062 IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, 1062 1063 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 1063 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1064 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1064 1065 iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), 1065 1066 1066 1067 /* So with GF */ 1067 1068 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1068 1069 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1069 1070 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1070 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1071 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1071 1072 iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), 1072 1073 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1073 1074 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1074 1075 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1075 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, 1076 + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, 1076 1077 iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), 1077 1078 1078 1079 /* So with JF2 */ 1079 1080 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1080 1081 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1081 1082 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 1082 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1083 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1083 1084 iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), 1084 1085 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1085 1086 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1086 1087 IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, 1087 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1088 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1088 1089 iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), 1089 1090 1090 1091 /* So with JF */ 1091 1092 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1092 1093 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1093 1094 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 1094 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1095 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1095 1096 iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), 1096 1097 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1097 1098 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1098 1099 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 1099 - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1100 + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1100 1101 iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), 1101 1102 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1102 1103 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1103 1104 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, 1104 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1105 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1105 1106 iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), 1106 1107 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1107 1108 IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, 1108 1109 IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, 1109 - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1110 + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, 1110 1111 iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), 1111 1112 1112 1113 #endif /* CONFIG_IWLMVM */ ··· 1115 1116 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1116 1117 IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, 1117 1118 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 1118 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1119 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1119 1120 iwl_cfg_bz, iwl_ax201_name), 1120 1121 1121 1122 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1122 1123 IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, 1123 1124 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1124 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1125 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1125 1126 iwl_cfg_bz, iwl_ax211_name), 1126 1127 1127 1128 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, ··· 1133 1134 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1134 1135 IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, 1135 1136 IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1136 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1137 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1137 1138 iwl_cfg_bz, iwl_wh_name), 1138 1139 1139 1140 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1140 1141 IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, 1141 1142 IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, 1142 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1143 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1143 1144 iwl_cfg_bz, iwl_ax201_name), 1144 1145 1145 1146 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1146 1147 IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, 1147 1148 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1148 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1149 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1149 1150 iwl_cfg_bz, iwl_ax211_name), 1150 1151 1151 1152 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1152 1153 IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, 1153 1154 IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, 1154 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1155 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1155 1156 iwl_cfg_bz, iwl_fm_name), 1156 1157 1157 1158 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1158 1159 IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, 1159 1160 IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1160 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1161 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1161 1162 iwl_cfg_bz, iwl_wh_name), 1162 1163 1163 1164 /* Ga (Gl) */ 1164 1165 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1165 1166 IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, 1166 1167 IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, 1167 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1168 + IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1168 1169 iwl_cfg_gl, iwl_gl_name), 1169 1170 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1170 1171 IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, 1171 1172 IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, 1172 - 160, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1173 + IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, 1173 1174 iwl_cfg_gl, iwl_mtp_name), 1174 1175 1175 1176 /* Sc */ 1176 1177 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1177 1178 IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, 1178 1179 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1179 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1180 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1180 1181 iwl_cfg_sc, iwl_ax211_name), 1181 1182 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1182 1183 IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, 1183 1184 IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, 1184 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1185 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1185 1186 iwl_cfg_sc, iwl_fm_name), 1186 1187 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1187 1188 IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, 1188 1189 IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1189 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, 1190 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1190 1191 iwl_cfg_sc, iwl_wh_name), 1191 - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1192 - IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, 1193 - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1194 - 160, IWL_CFG_ANY, IWL_CFG_ANY, 1195 - iwl_cfg_sc, iwl_sp_name), 1196 1192 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1197 1193 IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, 1198 1194 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1199 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1195 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1200 1196 iwl_cfg_sc2, iwl_ax211_name), 1201 1197 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1202 1198 IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, 1203 1199 IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, 1204 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1200 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1205 1201 iwl_cfg_sc2, iwl_fm_name), 1206 1202 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1207 1203 IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, 1208 1204 IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1209 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, 1205 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1210 1206 iwl_cfg_sc2, iwl_wh_name), 1211 - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1212 - IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, 1213 - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1214 - 160, IWL_CFG_ANY, IWL_CFG_ANY, 1215 - iwl_cfg_sc2, iwl_sp_name), 1216 1207 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1217 1208 IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, 1218 1209 IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, 1219 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1210 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1220 1211 iwl_cfg_sc2f, iwl_ax211_name), 1221 1212 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1222 1213 IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, 1223 1214 IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, 1224 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1215 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1225 1216 iwl_cfg_sc2f, iwl_fm_name), 1226 1217 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1227 1218 IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, 1228 1219 IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1229 - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, 1220 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1230 1221 iwl_cfg_sc2f, iwl_wh_name), 1231 - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1232 - IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, 1233 - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, 1234 - 160, IWL_CFG_ANY, IWL_CFG_ANY, 1235 - iwl_cfg_sc2f, iwl_sp_name), 1236 1222 1237 1223 /* Dr */ 1238 1224 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1239 1225 IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY, 1240 1226 IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1241 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1227 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1242 1228 iwl_cfg_dr, iwl_dr_name), 1243 1229 1244 1230 /* Br */ 1245 1231 _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, 1246 1232 IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY, 1247 1233 IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1248 - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1234 + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, 1249 1235 iwl_cfg_br, iwl_br_name), 1250 1236 #endif /* CONFIG_IWLMLD */ 1251 1237 }; ··· 1382 1398 VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info * 1383 1399 iwl_pci_find_dev_info(u16 device, u16 subsystem_device, 1384 1400 u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, 1385 - u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step) 1401 + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) 1386 1402 { 1387 1403 int num_devices = ARRAY_SIZE(iwl_dev_info_table); 1388 1404 int i; ··· 1425 1441 dev_info->rf_id != rf_id) 1426 1442 continue; 1427 1443 1428 - /* 1429 - * Check that bw_limit have the same "boolean" value since 1430 - * IWL_SUBDEVICE_BW_LIM can only return a boolean value and 1431 - * dev_info->bw_limit encodes a non-boolean value. 1432 - * dev_info->bw_limit == IWL_CFG_BW_NO_LIM must be equal to 1433 - * !bw_limit to have a match. 1434 - */ 1435 - if (dev_info->bw_limit != IWL_CFG_BW_ANY && 1436 - (dev_info->bw_limit == IWL_CFG_BW_NO_LIM) == !!bw_limit) 1444 + if (dev_info->no_160 != (u8)IWL_CFG_ANY && 1445 + dev_info->no_160 != no_160) 1437 1446 continue; 1438 1447 1439 1448 if (dev_info->cores != (u8)IWL_CFG_ANY && ··· 1564 1587 CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), 1565 1588 CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), 1566 1589 IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), 1567 - IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device), 1590 + IWL_SUBDEVICE_NO_160(pdev->subsystem_device), 1568 1591 IWL_SUBDEVICE_CORES(pdev->subsystem_device), 1569 1592 CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); 1570 1593 if (dev_info) { 1571 1594 iwl_trans->cfg = dev_info->cfg; 1572 1595 iwl_trans->name = dev_info->name; 1573 - iwl_trans->bw_limit = dev_info->bw_limit; 1596 + iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160; 1574 1597 } 1575 1598 1576 1599 #if IS_ENABLED(CONFIG_IWLMVM) ··· 1736 1759 * Scratch value was altered, this means the device was powered off, we 1737 1760 * need to reset it completely. 1738 1761 * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan, 1739 - * so assume that any bits there mean that the device is usable. 1762 + * but not bits [15:8]. So if we have bits set in lower word, assume 1763 + * the device is alive. 1764 + * For older devices, just try silently to grab the NIC. 1740 1765 */ 1741 - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && 1742 - !iwl_read32(trans, CSR_FUNC_SCRATCH)) 1743 - device_was_powered_off = true; 1766 + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 1767 + if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) & 1768 + CSR_FUNC_SCRATCH_POWER_OFF_MASK)) 1769 + device_was_powered_off = true; 1770 + } else { 1771 + /* 1772 + * bh are re-enabled by iwl_trans_pcie_release_nic_access, 1773 + * so re-enable them if _iwl_trans_pcie_grab_nic_access fails. 1774 + */ 1775 + local_bh_disable(); 1776 + if (_iwl_trans_pcie_grab_nic_access(trans, true)) { 1777 + iwl_trans_pcie_release_nic_access(trans); 1778 + } else { 1779 + device_was_powered_off = true; 1780 + local_bh_enable(); 1781 + } 1782 + } 1744 1783 1745 1784 if (restore || device_was_powered_off) { 1746 1785 trans->state = IWL_TRANS_NO_FW;
+5 -4
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
··· 558 558 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, 559 559 struct device *dev); 560 560 561 - bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); 562 - #define _iwl_trans_pcie_grab_nic_access(trans) \ 561 + bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent); 562 + #define _iwl_trans_pcie_grab_nic_access(trans, silent) \ 563 563 __cond_lock(nic_access_nobh, \ 564 - likely(__iwl_trans_pcie_grab_nic_access(trans))) 564 + likely(__iwl_trans_pcie_grab_nic_access(trans, silent))) 565 565 566 566 void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev); 567 567 void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev); ··· 1105 1105 int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 1106 1106 u32 *val); 1107 1107 bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); 1108 - void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); 1108 + void __releases(nic_access_nobh) 1109 + iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); 1109 1110 1110 1111 /* transport gen 1 exported functions */ 1111 1112 void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+12 -4
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 2351 2351 struct iwl_trans_pcie_removal *removal; 2352 2352 char _msg = 0, *msg = &_msg; 2353 2353 2354 - if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY)) 2354 + if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY || 2355 + mode == IWL_RESET_MODE_BACKOFF)) 2355 2356 return; 2356 2357 2357 2358 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) ··· 2406 2405 * This version doesn't disable BHs but rather assumes they're 2407 2406 * already disabled. 2408 2407 */ 2409 - bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2408 + bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent) 2410 2409 { 2411 2410 int ret; 2412 2411 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); ··· 2458 2457 if (unlikely(ret < 0)) { 2459 2458 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2460 2459 2460 + if (silent) { 2461 + spin_unlock(&trans_pcie->reg_lock); 2462 + return false; 2463 + } 2464 + 2461 2465 WARN_ONCE(1, 2462 2466 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2463 2467 cntrl); ··· 2494 2488 bool ret; 2495 2489 2496 2490 local_bh_disable(); 2497 - ret = __iwl_trans_pcie_grab_nic_access(trans); 2491 + ret = __iwl_trans_pcie_grab_nic_access(trans, false); 2498 2492 if (ret) { 2499 2493 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ 2500 2494 return ret; ··· 2503 2497 return false; 2504 2498 } 2505 2499 2506 - void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2500 + void __releases(nic_access_nobh) 2501 + iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2507 2502 { 2508 2503 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2509 2504 ··· 2531 2524 * scheduled on different CPUs (after we drop reg_lock). 2532 2525 */ 2533 2526 out: 2527 + __release(nic_access_nobh); 2534 2528 spin_unlock_bh(&trans_pcie->reg_lock); 2535 2529 } 2536 2530
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 1021 1021 * returned. This needs to be done only on NICs that have 1022 1022 * apmg_wake_up_wa set (see above.) 1023 1023 */ 1024 - if (!_iwl_trans_pcie_grab_nic_access(trans)) 1024 + if (!_iwl_trans_pcie_grab_nic_access(trans, false)) 1025 1025 return -EIO; 1026 1026 1027 1027 /*
+5 -10
drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
··· 2 2 /* 3 3 * KUnit tests for the iwlwifi device info table 4 4 * 5 - * Copyright (C) 2023-2025 Intel Corporation 5 + * Copyright (C) 2023-2024 Intel Corporation 6 6 */ 7 7 #include <kunit/test.h> 8 8 #include <linux/pci.h> ··· 13 13 14 14 static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di) 15 15 { 16 - printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,bw_limit=%d,cores=%.2x\n", 16 + printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n", 17 17 pfx, di->device, di->subdevice, di->mac_type, di->mac_step, 18 - di->rf_type, di->cdb, di->jacket, di->rf_id, di->bw_limit, 18 + di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160, 19 19 di->cores); 20 20 } 21 21 ··· 31 31 di->mac_type, di->mac_step, 32 32 di->rf_type, di->cdb, 33 33 di->jacket, di->rf_id, 34 - di->bw_limit != IWL_CFG_BW_NO_LIM, 35 - di->cores, di->rf_step); 36 - if (!ret) { 37 - iwl_pci_print_dev_info("No entry found for: ", di); 38 - KUNIT_FAIL(test, 39 - "No entry found for entry at index %d\n", idx); 40 - } else if (ret != di) { 34 + di->no_160, di->cores, di->rf_step); 35 + if (ret != di) { 41 36 iwl_pci_print_dev_info("searched: ", di); 42 37 iwl_pci_print_dev_info("found: ", ret); 43 38 KUNIT_FAIL(test,
-1
drivers/net/wireless/purelifi/plfxlc/mac.c
··· 102 102 void plfxlc_mac_release(struct plfxlc_mac *mac) 103 103 { 104 104 plfxlc_chip_release(&mac->chip); 105 - lockdep_assert_held(&mac->lock); 106 105 } 107 106 108 107 int plfxlc_op_start(struct ieee80211_hw *hw)
+1
drivers/nvme/host/Kconfig
··· 102 102 depends on NVME_TCP 103 103 select NET_HANDSHAKE 104 104 select KEYS 105 + select TLS 105 106 help 106 107 Enables TLS encryption for NVMe TCP using the netlink handshake API. 107 108
+7 -1
drivers/nvme/host/pci.c
··· 3575 3575 3576 3576 dev_info(dev->ctrl.device, "restart after slot reset\n"); 3577 3577 pci_restore_state(pdev); 3578 - if (!nvme_try_sched_reset(&dev->ctrl)) 3578 + if (nvme_try_sched_reset(&dev->ctrl)) 3579 3579 nvme_unquiesce_io_queues(&dev->ctrl); 3580 3580 return PCI_ERS_RESULT_RECOVERED; 3581 3581 } ··· 3623 3623 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3624 3624 { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ 3625 3625 .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, 3626 + { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */ 3627 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3628 + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3626 3629 { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ 3627 3630 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3628 3631 NVME_QUIRK_BOGUS_NID, }, ··· 3649 3646 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3650 3647 { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ 3651 3648 .driver_data = NVME_QUIRK_BROKEN_MSI }, 3649 + { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */ 3650 + .driver_data = NVME_QUIRK_BROKEN_MSI | 3651 + NVME_QUIRK_NO_DEEPEST_PS }, 3652 3652 { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ 3653 3653 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3654 3654 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+29 -2
drivers/nvme/host/tcp.c
··· 1946 1946 cancel_work_sync(&queue->io_work); 1947 1947 } 1948 1948 1949 - static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) 1949 + static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid) 1950 1950 { 1951 1951 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1952 1952 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; ··· 1964 1964 queue->tls_enabled = false; 1965 1965 mutex_unlock(&queue->queue_lock); 1966 1966 } 1967 + 1968 + static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid) 1969 + { 1970 + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1971 + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 1972 + int timeout = 100; 1973 + 1974 + while (timeout > 0) { 1975 + if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || 1976 + !sk_wmem_alloc_get(queue->sock->sk)) 1977 + return; 1978 + msleep(2); 1979 + timeout -= 2; 1980 + } 1981 + dev_warn(nctrl->device, 1982 + "qid %d: timeout draining sock wmem allocation expired\n", 1983 + qid); 1984 + } 1985 + 1986 + static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) 1987 + { 1988 + nvme_tcp_stop_queue_nowait(nctrl, qid); 1989 + nvme_tcp_wait_queue(nctrl, qid); 1990 + } 1991 + 1967 1992 1968 1993 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) 1969 1994 { ··· 2057 2032 int i; 2058 2033 2059 2034 for (i = 1; i < ctrl->queue_count; i++) 2060 - nvme_tcp_stop_queue(ctrl, i); 2035 + nvme_tcp_stop_queue_nowait(ctrl, i); 2036 + for (i = 1; i < ctrl->queue_count; i++) 2037 + nvme_tcp_wait_queue(ctrl, i); 2061 2038 } 2062 2039 2063 2040 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+1
drivers/nvme/target/Kconfig
··· 98 98 bool "NVMe over Fabrics TCP target TLS encryption support" 99 99 depends on NVME_TARGET_TCP 100 100 select NET_HANDSHAKE 101 + select TLS 101 102 help 102 103 Enables TLS encryption for the NVMe TCP target using the netlink handshake API. 103 104
+1 -2
drivers/nvme/target/auth.c
··· 600 600 pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", 601 601 __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key)); 602 602 tls_key = NULL; 603 - kfree_sensitive(tls_psk); 604 603 } 605 604 if (sq->ctrl->tls_key) 606 605 key_put(sq->ctrl->tls_key); 607 606 sq->ctrl->tls_key = tls_key; 608 607 #endif 609 - 608 + kfree_sensitive(tls_psk); 610 609 out_free_digest: 611 610 kfree_sensitive(digest); 612 611 out_free_psk:
+3
drivers/nvme/target/tcp.c
··· 1560 1560 { 1561 1561 struct socket *sock = queue->sock; 1562 1562 1563 + if (!queue->state_change) 1564 + return; 1565 + 1563 1566 write_lock_bh(&sock->sk->sk_callback_lock); 1564 1567 sock->sk->sk_data_ready = queue->data_ready; 1565 1568 sock->sk->sk_state_change = queue->state_change;
+3 -3
drivers/pinctrl/freescale/pinctrl-imx.c
··· 37 37 struct pinctrl_dev *pctldev, 38 38 const char *name) 39 39 { 40 - const struct group_desc *grp = NULL; 40 + const struct group_desc *grp; 41 41 int i; 42 42 43 43 for (i = 0; i < pctldev->num_groups; i++) { 44 44 grp = pinctrl_generic_get_group(pctldev, i); 45 45 if (grp && !strcmp(grp->grp.name, name)) 46 - break; 46 + return grp; 47 47 } 48 48 49 - return grp; 49 + return NULL; 50 50 } 51 51 52 52 static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+1 -1
drivers/pinctrl/mediatek/mtk-eint.c
··· 449 449 return -EOPNOTSUPP; 450 450 451 451 virq = irq_find_mapping(eint->domain, eint_num); 452 - eint_offset = (eint_num % 4) * 8; 452 + eint_offset = (idx % 4) * 8; 453 453 d = irq_get_irq_data(virq); 454 454 455 455 set_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
+70 -89
drivers/pinctrl/mediatek/pinctrl-airoha.c
··· 6 6 */ 7 7 8 8 #include <dt-bindings/pinctrl/mt65xx.h> 9 + #include <linux/bitfield.h> 9 10 #include <linux/bits.h> 10 11 #include <linux/cleanup.h> 11 12 #include <linux/gpio/driver.h> ··· 113 112 #define REG_LAN_LED1_MAPPING 0x0280 114 113 115 114 #define LAN4_LED_MAPPING_MASK GENMASK(18, 16) 116 - #define LAN4_PHY4_LED_MAP BIT(18) 117 - #define LAN4_PHY2_LED_MAP BIT(17) 118 - #define LAN4_PHY1_LED_MAP BIT(16) 119 - #define LAN4_PHY0_LED_MAP 0 120 - #define LAN4_PHY3_LED_MAP GENMASK(17, 16) 115 + #define LAN4_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN4_LED_MAPPING_MASK, (_n)) 121 116 122 117 #define LAN3_LED_MAPPING_MASK GENMASK(14, 12) 123 - #define LAN3_PHY4_LED_MAP BIT(14) 124 - #define LAN3_PHY2_LED_MAP BIT(13) 125 - #define LAN3_PHY1_LED_MAP BIT(12) 126 - #define LAN3_PHY0_LED_MAP 0 127 - #define LAN3_PHY3_LED_MAP GENMASK(13, 12) 118 + #define LAN3_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN3_LED_MAPPING_MASK, (_n)) 128 119 129 120 #define LAN2_LED_MAPPING_MASK GENMASK(10, 8) 130 - #define LAN2_PHY4_LED_MAP BIT(12) 131 - #define LAN2_PHY2_LED_MAP BIT(11) 132 - #define LAN2_PHY1_LED_MAP BIT(10) 133 - #define LAN2_PHY0_LED_MAP 0 134 - #define LAN2_PHY3_LED_MAP GENMASK(11, 10) 121 + #define LAN2_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN2_LED_MAPPING_MASK, (_n)) 135 122 136 123 #define LAN1_LED_MAPPING_MASK GENMASK(6, 4) 137 - #define LAN1_PHY4_LED_MAP BIT(6) 138 - #define LAN1_PHY2_LED_MAP BIT(5) 139 - #define LAN1_PHY1_LED_MAP BIT(4) 140 - #define LAN1_PHY0_LED_MAP 0 141 - #define LAN1_PHY3_LED_MAP GENMASK(5, 4) 124 + #define LAN1_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN1_LED_MAPPING_MASK, (_n)) 142 125 143 126 #define LAN0_LED_MAPPING_MASK GENMASK(2, 0) 144 - #define LAN0_PHY4_LED_MAP BIT(3) 145 - #define LAN0_PHY2_LED_MAP BIT(2) 146 - #define LAN0_PHY1_LED_MAP BIT(1) 147 - #define LAN0_PHY0_LED_MAP 0 148 - #define LAN0_PHY3_LED_MAP GENMASK(2, 1) 127 + #define LAN0_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN0_LED_MAPPING_MASK, (_n)) 149 128 150 129 /* CONF */ 151 130 #define REG_I2C_SDA_E2 0x001c ··· 1457 1476 .regmap[1] = { 1458 1477 AIROHA_FUNC_MUX, 1459 1478 REG_LAN_LED0_MAPPING, 1460 - LAN1_LED_MAPPING_MASK, 1461 - LAN1_PHY1_LED_MAP 1479 + LAN0_LED_MAPPING_MASK, 1480 + LAN0_PHY_LED_MAP(0) 1462 1481 }, 1463 1482 .regmap_size = 2, 1464 1483 }, { ··· 1472 1491 .regmap[1] = { 1473 1492 AIROHA_FUNC_MUX, 1474 1493 REG_LAN_LED0_MAPPING, 1475 - LAN2_LED_MAPPING_MASK, 1476 - LAN2_PHY1_LED_MAP 1494 + LAN1_LED_MAPPING_MASK, 1495 + LAN1_PHY_LED_MAP(0) 1477 1496 }, 1478 1497 .regmap_size = 2, 1479 1498 }, { ··· 1487 1506 .regmap[1] = { 1488 1507 AIROHA_FUNC_MUX, 1489 1508 REG_LAN_LED0_MAPPING, 1490 - LAN3_LED_MAPPING_MASK, 1491 - LAN3_PHY1_LED_MAP 1509 + LAN2_LED_MAPPING_MASK, 1510 + LAN2_PHY_LED_MAP(0) 1492 1511 }, 1493 1512 .regmap_size = 2, 1494 1513 }, { ··· 1502 1521 .regmap[1] = { 1503 1522 AIROHA_FUNC_MUX, 1504 1523 REG_LAN_LED0_MAPPING, 1505 - LAN4_LED_MAPPING_MASK, 1506 - LAN4_PHY1_LED_MAP 1524 + LAN3_LED_MAPPING_MASK, 1525 + LAN3_PHY_LED_MAP(0) 1507 1526 }, 1508 1527 .regmap_size = 2, 1509 1528 }, ··· 1521 1540 .regmap[1] = { 1522 1541 AIROHA_FUNC_MUX, 1523 1542 REG_LAN_LED0_MAPPING, 1524 - LAN1_LED_MAPPING_MASK, 1525 - LAN1_PHY2_LED_MAP 1543 + LAN0_LED_MAPPING_MASK, 1544 + LAN0_PHY_LED_MAP(1) 1526 1545 }, 1527 1546 .regmap_size = 2, 1528 1547 }, { ··· 1536 1555 .regmap[1] = { 1537 1556 AIROHA_FUNC_MUX, 1538 1557 REG_LAN_LED0_MAPPING, 1539 - LAN2_LED_MAPPING_MASK, 1540 - LAN2_PHY2_LED_MAP 1558 + LAN1_LED_MAPPING_MASK, 1559 + LAN1_PHY_LED_MAP(1) 1541 1560 }, 1542 1561 .regmap_size = 2, 1543 1562 }, { ··· 1551 1570 .regmap[1] = { 1552 1571 AIROHA_FUNC_MUX, 1553 1572 REG_LAN_LED0_MAPPING, 1554 - LAN3_LED_MAPPING_MASK, 1555 - LAN3_PHY2_LED_MAP 1573 + LAN2_LED_MAPPING_MASK, 1574 + LAN2_PHY_LED_MAP(1) 1556 1575 }, 1557 1576 .regmap_size = 2, 1558 1577 }, { ··· 1566 1585 .regmap[1] = { 1567 1586 AIROHA_FUNC_MUX, 1568 1587 REG_LAN_LED0_MAPPING, 1569 - LAN4_LED_MAPPING_MASK, 1570 - LAN4_PHY2_LED_MAP 1588 + LAN3_LED_MAPPING_MASK, 1589 + LAN3_PHY_LED_MAP(1) 1571 1590 }, 1572 1591 .regmap_size = 2, 1573 1592 }, ··· 1585 1604 .regmap[1] = { 1586 1605 AIROHA_FUNC_MUX, 1587 1606 REG_LAN_LED0_MAPPING, 1588 - LAN1_LED_MAPPING_MASK, 1589 - LAN1_PHY3_LED_MAP 1607 + LAN0_LED_MAPPING_MASK, 1608 + LAN0_PHY_LED_MAP(2) 1590 1609 }, 1591 1610 .regmap_size = 2, 1592 1611 }, { ··· 1600 1619 .regmap[1] = { 1601 1620 AIROHA_FUNC_MUX, 1602 1621 REG_LAN_LED0_MAPPING, 1603 - LAN2_LED_MAPPING_MASK, 1604 - LAN2_PHY3_LED_MAP 1622 + LAN1_LED_MAPPING_MASK, 1623 + LAN1_PHY_LED_MAP(2) 1605 1624 }, 1606 1625 .regmap_size = 2, 1607 1626 }, { ··· 1615 1634 .regmap[1] = { 1616 1635 AIROHA_FUNC_MUX, 1617 1636 REG_LAN_LED0_MAPPING, 1618 - LAN3_LED_MAPPING_MASK, 1619 - LAN3_PHY3_LED_MAP 1637 + LAN2_LED_MAPPING_MASK, 1638 + LAN2_PHY_LED_MAP(2) 1620 1639 }, 1621 1640 .regmap_size = 2, 1622 1641 }, { ··· 1630 1649 .regmap[1] = { 1631 1650 AIROHA_FUNC_MUX, 1632 1651 REG_LAN_LED0_MAPPING, 1633 - LAN4_LED_MAPPING_MASK, 1634 - LAN4_PHY3_LED_MAP 1652 + LAN3_LED_MAPPING_MASK, 1653 + LAN3_PHY_LED_MAP(2) 1635 1654 }, 1636 1655 .regmap_size = 2, 1637 1656 }, ··· 1649 1668 .regmap[1] = { 1650 1669 AIROHA_FUNC_MUX, 1651 1670 REG_LAN_LED0_MAPPING, 1652 - LAN1_LED_MAPPING_MASK, 1653 - LAN1_PHY4_LED_MAP 1671 + LAN0_LED_MAPPING_MASK, 1672 + LAN0_PHY_LED_MAP(3) 1654 1673 }, 1655 1674 .regmap_size = 2, 1656 1675 }, { ··· 1664 1683 .regmap[1] = { 1665 1684 AIROHA_FUNC_MUX, 1666 1685 REG_LAN_LED0_MAPPING, 1667 - LAN2_LED_MAPPING_MASK, 1668 - LAN2_PHY4_LED_MAP 1686 + LAN1_LED_MAPPING_MASK, 1687 + LAN1_PHY_LED_MAP(3) 1669 1688 }, 1670 1689 .regmap_size = 2, 1671 1690 }, { ··· 1679 1698 .regmap[1] = { 1680 1699 AIROHA_FUNC_MUX, 1681 1700 REG_LAN_LED0_MAPPING, 1682 - LAN3_LED_MAPPING_MASK, 1683 - LAN3_PHY4_LED_MAP 1701 + LAN2_LED_MAPPING_MASK, 1702 + LAN2_PHY_LED_MAP(3) 1684 1703 }, 1685 1704 .regmap_size = 2, 1686 1705 }, { ··· 1694 1713 .regmap[1] = { 1695 1714 AIROHA_FUNC_MUX, 1696 1715 REG_LAN_LED0_MAPPING, 1697 - LAN4_LED_MAPPING_MASK, 1698 - LAN4_PHY4_LED_MAP 1716 + LAN3_LED_MAPPING_MASK, 1717 + LAN3_PHY_LED_MAP(3) 1699 1718 }, 1700 1719 .regmap_size = 2, 1701 1720 }, ··· 1713 1732 .regmap[1] = { 1714 1733 AIROHA_FUNC_MUX, 1715 1734 REG_LAN_LED1_MAPPING, 1716 - LAN1_LED_MAPPING_MASK, 1717 - LAN1_PHY1_LED_MAP 1735 + LAN0_LED_MAPPING_MASK, 1736 + LAN0_PHY_LED_MAP(0) 1718 1737 }, 1719 1738 .regmap_size = 2, 1720 1739 }, { ··· 1728 1747 .regmap[1] = { 1729 1748 AIROHA_FUNC_MUX, 1730 1749 REG_LAN_LED1_MAPPING, 1731 - LAN2_LED_MAPPING_MASK, 1732 - LAN2_PHY1_LED_MAP 1750 + LAN1_LED_MAPPING_MASK, 1751 + LAN1_PHY_LED_MAP(0) 1733 1752 }, 1734 1753 .regmap_size = 2, 1735 1754 }, { ··· 1743 1762 .regmap[1] = { 1744 1763 AIROHA_FUNC_MUX, 1745 1764 REG_LAN_LED1_MAPPING, 1746 - LAN3_LED_MAPPING_MASK, 1747 - LAN3_PHY1_LED_MAP 1765 + LAN2_LED_MAPPING_MASK, 1766 + LAN2_PHY_LED_MAP(0) 1748 1767 }, 1749 1768 .regmap_size = 2, 1750 1769 }, { ··· 1758 1777 .regmap[1] = { 1759 1778 AIROHA_FUNC_MUX, 1760 1779 REG_LAN_LED1_MAPPING, 1761 - LAN4_LED_MAPPING_MASK, 1762 - LAN4_PHY1_LED_MAP 1780 + LAN3_LED_MAPPING_MASK, 1781 + LAN3_PHY_LED_MAP(0) 1763 1782 }, 1764 1783 .regmap_size = 2, 1765 1784 }, ··· 1777 1796 .regmap[1] = { 1778 1797 AIROHA_FUNC_MUX, 1779 1798 REG_LAN_LED1_MAPPING, 1780 - LAN1_LED_MAPPING_MASK, 1781 - LAN1_PHY2_LED_MAP 1799 + LAN0_LED_MAPPING_MASK, 1800 + LAN0_PHY_LED_MAP(1) 1782 1801 }, 1783 1802 .regmap_size = 2, 1784 1803 }, { ··· 1792 1811 .regmap[1] = { 1793 1812 AIROHA_FUNC_MUX, 1794 1813 REG_LAN_LED1_MAPPING, 1795 - LAN2_LED_MAPPING_MASK, 1796 - LAN2_PHY2_LED_MAP 1814 + LAN1_LED_MAPPING_MASK, 1815 + LAN1_PHY_LED_MAP(1) 1797 1816 }, 1798 1817 .regmap_size = 2, 1799 1818 }, { ··· 1807 1826 .regmap[1] = { 1808 1827 AIROHA_FUNC_MUX, 1809 1828 REG_LAN_LED1_MAPPING, 1810 - LAN3_LED_MAPPING_MASK, 1811 - LAN3_PHY2_LED_MAP 1829 + LAN2_LED_MAPPING_MASK, 1830 + LAN2_PHY_LED_MAP(1) 1812 1831 }, 1813 1832 .regmap_size = 2, 1814 1833 }, { ··· 1822 1841 .regmap[1] = { 1823 1842 AIROHA_FUNC_MUX, 1824 1843 REG_LAN_LED1_MAPPING, 1825 - LAN4_LED_MAPPING_MASK, 1826 - LAN4_PHY2_LED_MAP 1844 + LAN3_LED_MAPPING_MASK, 1845 + LAN3_PHY_LED_MAP(1) 1827 1846 }, 1828 1847 .regmap_size = 2, 1829 1848 }, ··· 1841 1860 .regmap[1] = { 1842 1861 AIROHA_FUNC_MUX, 1843 1862 REG_LAN_LED1_MAPPING, 1844 - LAN1_LED_MAPPING_MASK, 1845 - LAN1_PHY3_LED_MAP 1863 + LAN0_LED_MAPPING_MASK, 1864 + LAN0_PHY_LED_MAP(2) 1846 1865 }, 1847 1866 .regmap_size = 2, 1848 1867 }, { ··· 1856 1875 .regmap[1] = { 1857 1876 AIROHA_FUNC_MUX, 1858 1877 REG_LAN_LED1_MAPPING, 1859 - LAN2_LED_MAPPING_MASK, 1860 - LAN2_PHY3_LED_MAP 1878 + LAN1_LED_MAPPING_MASK, 1879 + LAN1_PHY_LED_MAP(2) 1861 1880 }, 1862 1881 .regmap_size = 2, 1863 1882 }, { ··· 1871 1890 .regmap[1] = { 1872 1891 AIROHA_FUNC_MUX, 1873 1892 REG_LAN_LED1_MAPPING, 1874 - LAN3_LED_MAPPING_MASK, 1875 - LAN3_PHY3_LED_MAP 1893 + LAN2_LED_MAPPING_MASK, 1894 + LAN2_PHY_LED_MAP(2) 1876 1895 }, 1877 1896 .regmap_size = 2, 1878 1897 }, { ··· 1886 1905 .regmap[1] = { 1887 1906 AIROHA_FUNC_MUX, 1888 1907 REG_LAN_LED1_MAPPING, 1889 - LAN4_LED_MAPPING_MASK, 1890 - LAN4_PHY3_LED_MAP 1908 + LAN3_LED_MAPPING_MASK, 1909 + LAN3_PHY_LED_MAP(2) 1891 1910 }, 1892 1911 .regmap_size = 2, 1893 1912 }, ··· 1905 1924 .regmap[1] = { 1906 1925 AIROHA_FUNC_MUX, 1907 1926 REG_LAN_LED1_MAPPING, 1908 - LAN1_LED_MAPPING_MASK, 1909 - LAN1_PHY4_LED_MAP 1927 + LAN0_LED_MAPPING_MASK, 1928 + LAN0_PHY_LED_MAP(3) 1910 1929 }, 1911 1930 .regmap_size = 2, 1912 1931 }, { ··· 1920 1939 .regmap[1] = { 1921 1940 AIROHA_FUNC_MUX, 1922 1941 REG_LAN_LED1_MAPPING, 1923 - LAN2_LED_MAPPING_MASK, 1924 - LAN2_PHY4_LED_MAP 1942 + LAN1_LED_MAPPING_MASK, 1943 + LAN1_PHY_LED_MAP(3) 1925 1944 }, 1926 1945 .regmap_size = 2, 1927 1946 }, { ··· 1935 1954 .regmap[1] = { 1936 1955 AIROHA_FUNC_MUX, 1937 1956 REG_LAN_LED1_MAPPING, 1938 - LAN3_LED_MAPPING_MASK, 1939 - LAN3_PHY4_LED_MAP 1957 + LAN2_LED_MAPPING_MASK, 1958 + LAN2_PHY_LED_MAP(3) 1940 1959 }, 1941 1960 .regmap_size = 2, 1942 1961 }, { ··· 1950 1969 .regmap[1] = { 1951 1970 AIROHA_FUNC_MUX, 1952 1971 REG_LAN_LED1_MAPPING, 1953 - LAN4_LED_MAPPING_MASK, 1954 - LAN4_PHY4_LED_MAP 1972 + LAN3_LED_MAPPING_MASK, 1973 + LAN3_PHY_LED_MAP(3) 1955 1974 }, 1956 1975 .regmap_size = 2, 1957 1976 },
+9 -3
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
··· 1015 1015 if (!pctl->eint) 1016 1016 return -ENOMEM; 1017 1017 1018 - pctl->eint->base = devm_platform_ioremap_resource(pdev, 0); 1019 - if (IS_ERR(pctl->eint->base)) 1020 - return PTR_ERR(pctl->eint->base); 1018 + pctl->eint->nbase = 1; 1019 + /* mtk-eint expects an array */ 1020 + pctl->eint->base = devm_kzalloc(pctl->dev, sizeof(pctl->eint->base), GFP_KERNEL); 1021 + if (!pctl->eint->base) 1022 + return -ENOMEM; 1023 + 1024 + pctl->eint->base[0] = devm_platform_ioremap_resource(pdev, 0); 1025 + if (IS_ERR(pctl->eint->base[0])) 1026 + return PTR_ERR(pctl->eint->base[0]); 1021 1027 1022 1028 pctl->eint->irq = irq_of_parse_and_map(np, 0); 1023 1029 if (!pctl->eint->irq)
+1 -1
drivers/pinctrl/meson/pinctrl-meson.c
··· 487 487 case PIN_CONFIG_BIAS_PULL_DOWN: 488 488 case PIN_CONFIG_BIAS_PULL_UP: 489 489 if (meson_pinconf_get_pull(pc, pin) == param) 490 - arg = 1; 490 + arg = 60000; 491 491 else 492 492 return -EINVAL; 493 493 break;
+3 -1
drivers/pinctrl/qcom/pinctrl-sm8750.c
··· 46 46 .out_bit = 1, \ 47 47 .intr_enable_bit = 0, \ 48 48 .intr_status_bit = 0, \ 49 - .intr_target_bit = 5, \ 49 + .intr_wakeup_present_bit = 6, \ 50 + .intr_wakeup_enable_bit = 7, \ 51 + .intr_target_bit = 8, \ 50 52 .intr_target_kpss_val = 3, \ 51 53 .intr_raw_status_bit = 4, \ 52 54 .intr_polarity_bit = 1, \
+3 -4
drivers/platform/x86/amd/pmc/pmc.c
··· 644 644 struct smu_metrics table; 645 645 int rc; 646 646 647 - /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */ 648 - if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) && 649 - table.s0i3_last_entry_status) 650 - usleep_range(10000, 20000); 647 + /* Avoid triggering OVP */ 648 + if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status) 649 + msleep(2500); 651 650 652 651 /* Dump the IdleMask before we add to the STB */ 653 652 amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
+10 -1
drivers/platform/x86/asus-wmi.c
··· 304 304 305 305 u32 kbd_rgb_dev; 306 306 bool kbd_rgb_state_available; 307 + bool oobe_state_available; 307 308 308 309 u8 throttle_thermal_policy_mode; 309 310 u32 throttle_thermal_policy_dev; ··· 1827 1826 goto error; 1828 1827 } 1829 1828 1830 - if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) { 1829 + if (asus->oobe_state_available) { 1831 1830 /* 1832 1831 * Disable OOBE state, so that e.g. the keyboard backlight 1833 1832 * works. ··· 4724 4723 asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); 4725 4724 asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); 4726 4725 asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); 4726 + asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE); 4727 4727 asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) 4728 4728 && dmi_check_system(asus_ally_mcu_quirk); 4729 4729 ··· 4972 4970 } 4973 4971 if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) 4974 4972 kbd_led_update(asus); 4973 + if (asus->oobe_state_available) { 4974 + /* 4975 + * Disable OOBE state, so that e.g. the keyboard backlight 4976 + * works. 4977 + */ 4978 + asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL); 4979 + } 4975 4980 4976 4981 if (asus_wmi_has_fnlock_key(asus)) 4977 4982 asus_wmi_fnlock_update(asus);
+10 -4
drivers/platform/x86/dell/alienware-wmi-wmax.c
··· 70 70 .driver_data = &generic_quirks, 71 71 }, 72 72 { 73 + .ident = "Alienware m15 R7", 74 + .matches = { 75 + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), 76 + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R7"), 77 + }, 78 + .driver_data = &generic_quirks, 79 + }, 80 + { 73 81 .ident = "Alienware m16 R1", 74 82 .matches = { 75 83 DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), ··· 663 655 for (u32 i = 0; i < sys_desc[3]; i++) { 664 656 ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_LIST_IDS, 665 657 i + first_mode, &out_data); 666 - 667 - if (ret == -EIO) 668 - return ret; 669 - 670 658 if (ret == -EBADRQC) 671 659 break; 660 + if (ret) 661 + return ret; 672 662 673 663 if (!is_wmax_thermal_code(out_data)) 674 664 continue;
+16
drivers/platform/x86/ideapad-laptop.c
··· 1294 1294 /* Specific to some newer models */ 1295 1295 { KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } }, 1296 1296 { KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } }, 1297 + /* Star- (User Assignable Key) */ 1298 + { KE_KEY, 0x44 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, 1299 + /* Eye */ 1300 + { KE_KEY, 0x45 | IDEAPAD_WMI_KEY, { KEY_PROG3 } }, 1301 + /* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */ 1302 + { KE_KEY, 0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } }, 1303 + /* shift + prtsc */ 1304 + { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } }, 1305 + { KE_KEY, 0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } }, 1306 + { KE_KEY, 0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } }, 1297 1307 1298 1308 { KE_END }, 1299 1309 }; ··· 2089 2079 2090 2080 dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n", 2091 2081 data->integer.value); 2082 + 2083 + /* performance button triggered by 0x3d */ 2084 + if (data->integer.value == 0x3d && priv->dytc) { 2085 + platform_profile_cycle(); 2086 + break; 2087 + } 2092 2088 2093 2089 /* 0x02 FnLock, 0x03 Esc */ 2094 2090 if (data->integer.value == 0x02 || data->integer.value == 0x03)
+11 -10
drivers/platform/x86/intel/hid.c
··· 44 44 MODULE_AUTHOR("Alex Hung"); 45 45 46 46 static const struct acpi_device_id intel_hid_ids[] = { 47 - {"INT33D5", 0}, 48 - {"INTC1051", 0}, 49 - {"INTC1054", 0}, 50 - {"INTC1070", 0}, 51 - {"INTC1076", 0}, 52 - {"INTC1077", 0}, 53 - {"INTC1078", 0}, 54 - {"INTC107B", 0}, 55 - {"INTC10CB", 0}, 56 - {"", 0}, 47 + { "INT33D5" }, 48 + { "INTC1051" }, 49 + { "INTC1054" }, 50 + { "INTC1070" }, 51 + { "INTC1076" }, 52 + { "INTC1077" }, 53 + { "INTC1078" }, 54 + { "INTC107B" }, 55 + { "INTC10CB" }, 56 + { "INTC10CC" }, 57 + { } 57 58 }; 58 59 MODULE_DEVICE_TABLE(acpi, intel_hid_ids); 59 60
+9 -4
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
··· 146 146 { 147 147 struct uncore_data *data; 148 148 int target; 149 + int ret; 149 150 150 151 /* Check if there is an online cpu in the package for uncore MSR */ 151 152 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); 152 153 if (target < nr_cpu_ids) 153 154 return 0; 154 - 155 - /* Use this CPU on this die as a control CPU */ 156 - cpumask_set_cpu(cpu, &uncore_cpu_mask); 157 155 158 156 data = uncore_get_instance(cpu); 159 157 if (!data) ··· 161 163 data->die_id = topology_die_id(cpu); 162 164 data->domain_id = UNCORE_DOMAIN_ID_INVALID; 163 165 164 - return uncore_freq_add_entry(data, cpu); 166 + ret = uncore_freq_add_entry(data, cpu); 167 + if (ret) 168 + return ret; 169 + 170 + /* Use this CPU on this die as a control CPU */ 171 + cpumask_set_cpu(cpu, &uncore_cpu_mask); 172 + 173 + return 0; 165 174 } 166 175 167 176 static int uncore_event_cpu_offline(unsigned int cpu)
+50 -2
drivers/ptp/ptp_ocp.c
··· 2578 2578 .set_output = ptp_ocp_sma_fb_set_output, 2579 2579 }; 2580 2580 2581 + static int 2582 + ptp_ocp_sma_adva_set_output(struct ptp_ocp *bp, int sma_nr, u32 val) 2583 + { 2584 + u32 reg, mask, shift; 2585 + unsigned long flags; 2586 + u32 __iomem *gpio; 2587 + 2588 + gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; 2589 + shift = sma_nr & 1 ? 0 : 16; 2590 + 2591 + mask = 0xffff << (16 - shift); 2592 + 2593 + spin_lock_irqsave(&bp->lock, flags); 2594 + 2595 + reg = ioread32(gpio); 2596 + reg = (reg & mask) | (val << shift); 2597 + 2598 + iowrite32(reg, gpio); 2599 + 2600 + spin_unlock_irqrestore(&bp->lock, flags); 2601 + 2602 + return 0; 2603 + } 2604 + 2605 + static int 2606 + ptp_ocp_sma_adva_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) 2607 + { 2608 + u32 reg, mask, shift; 2609 + unsigned long flags; 2610 + u32 __iomem *gpio; 2611 + 2612 + gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; 2613 + shift = sma_nr & 1 ? 0 : 16; 2614 + 2615 + mask = 0xffff << (16 - shift); 2616 + 2617 + spin_lock_irqsave(&bp->lock, flags); 2618 + 2619 + reg = ioread32(gpio); 2620 + reg = (reg & mask) | (val << shift); 2621 + 2622 + iowrite32(reg, gpio); 2623 + 2624 + spin_unlock_irqrestore(&bp->lock, flags); 2625 + 2626 + return 0; 2627 + } 2628 + 2581 2629 static const struct ocp_sma_op ocp_adva_sma_op = { 2582 2630 .tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out }, 2583 2631 .init = ptp_ocp_sma_fb_init, 2584 2632 .get = ptp_ocp_sma_fb_get, 2585 - .set_inputs = ptp_ocp_sma_fb_set_inputs, 2586 - .set_output = ptp_ocp_sma_fb_set_output, 2633 + .set_inputs = ptp_ocp_sma_adva_set_inputs, 2634 + .set_output = ptp_ocp_sma_adva_set_output, 2587 2635 }; 2588 2636 2589 2637 static int
+1 -1
drivers/scsi/myrb.c
··· 891 891 status = mmio_init_fn(pdev, base, &mbox); 892 892 if (status != MYRB_STATUS_SUCCESS) { 893 893 dev_err(&pdev->dev, 894 - "Failed to enable mailbox, statux %02X\n", 894 + "Failed to enable mailbox, status %02X\n", 895 895 status); 896 896 return false; 897 897 }
+13 -23
drivers/soundwire/intel_auxdevice.c
··· 353 353 /* use generic bandwidth allocation algorithm */ 354 354 sdw->cdns.bus.compute_params = sdw_compute_params; 355 355 356 - /* avoid resuming from pm_runtime suspend if it's not required */ 357 - dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); 358 - 359 356 ret = sdw_bus_master_add(bus, dev, dev->fwnode); 360 357 if (ret) { 361 358 dev_err(dev, "sdw_bus_master_add fail: %d\n", ret); ··· 637 640 return 0; 638 641 } 639 642 640 - if (pm_runtime_suspended(dev)) { 643 + /* Prevent runtime PM from racing with the code below. */ 644 + pm_runtime_disable(dev); 645 + 646 + if (pm_runtime_status_suspended(dev)) { 641 647 dev_dbg(dev, "pm_runtime status: suspended\n"); 642 648 643 649 clock_stop_quirks = sdw->link_res->clock_stop_quirks; ··· 648 648 if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || 649 649 !clock_stop_quirks) { 650 650 651 - if (pm_runtime_suspended(dev->parent)) { 651 + if (pm_runtime_status_suspended(dev->parent)) { 652 652 /* 653 653 * paranoia check: this should not happen with the .prepare 654 654 * resume to full power ··· 715 715 struct sdw_cdns *cdns = dev_get_drvdata(dev); 716 716 struct sdw_intel *sdw = cdns_to_intel(cdns); 717 717 struct sdw_bus *bus = &cdns->bus; 718 - int link_flags; 719 718 int ret; 720 719 721 720 if (bus->prop.hw_disabled || !sdw->startup_done) { 722 721 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", 723 722 bus->link_id); 724 723 return 0; 725 - } 726 - 727 - if (pm_runtime_suspended(dev)) { 728 - dev_dbg(dev, "pm_runtime status was suspended, forcing active\n"); 729 - 730 - /* follow required sequence from runtime_pm.rst */ 731 - pm_runtime_disable(dev); 732 - pm_runtime_set_active(dev); 733 - pm_runtime_mark_last_busy(dev); 734 - pm_runtime_enable(dev); 735 - 736 - pm_runtime_resume(bus->dev); 737 - 738 - link_flags = md_flags >> (bus->link_id * 8); 739 - 740 - if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) 741 - pm_runtime_idle(dev); 742 724 } 743 725 744 726 ret = sdw_intel_link_power_up(sdw); ··· 741 759 sdw_intel_link_power_down(sdw); 742 760 return ret; 743 761 } 762 + 763 + /* 764 + * Runtime PM has been disabled in intel_suspend(), so set the status 765 + * to active because the device has just been resumed and re-enable 766 + * runtime PM. 767 + */ 768 + pm_runtime_set_active(dev); 769 + pm_runtime_enable(dev); 744 770 745 771 /* 746 772 * after system resume, the pm_runtime suspend() may kick in
+5 -1
drivers/spi/spi-mem.c
··· 596 596 ns_per_cycles = 1000000000 / op->max_freq; 597 597 ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1); 598 598 ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1); 599 - ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); 599 + 600 + /* Dummy bytes are optional for some SPI flash memory operations */ 601 + if (op->dummy.nbytes) 602 + ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); 603 + 600 604 ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1); 601 605 602 606 return ncycles * ns_per_cycles;
+2 -3
drivers/spi/spi-qpic-snand.c
··· 142 142 else if (reg == NAND_READ_LOCATION_1) 143 143 snandc->regs->read_location1 = locreg_val; 144 144 else if (reg == NAND_READ_LOCATION_2) 145 - snandc->regs->read_location1 = locreg_val; 145 + snandc->regs->read_location2 = locreg_val; 146 146 else if (reg == NAND_READ_LOCATION_3) 147 147 snandc->regs->read_location3 = locreg_val; 148 148 } ··· 1307 1307 snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); 1308 1308 snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); 1309 1309 snandc->qspi->cmd = cpu_to_le32(cmd); 1310 - qcom_spi_block_erase(snandc); 1311 - return 0; 1310 + return qcom_spi_block_erase(snandc); 1312 1311 default: 1313 1312 break; 1314 1313 }
+4
drivers/spi/spi-stm32-ospi.c
··· 960 960 err_pm_enable: 961 961 pm_runtime_force_suspend(ospi->dev); 962 962 mutex_destroy(&ospi->lock); 963 + if (ospi->dma_chtx) 964 + dma_release_channel(ospi->dma_chtx); 965 + if (ospi->dma_chrx) 966 + dma_release_channel(ospi->dma_chrx); 963 967 964 968 return ret; 965 969 }
+3 -3
drivers/spi/spi-tegra114.c
··· 728 728 u32 inactive_cycles; 729 729 u8 cs_state; 730 730 731 - if (setup->unit != SPI_DELAY_UNIT_SCK || 732 - hold->unit != SPI_DELAY_UNIT_SCK || 733 - inactive->unit != SPI_DELAY_UNIT_SCK) { 731 + if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) || 732 + (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) || 733 + (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) { 734 734 dev_err(&spi->dev, 735 735 "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", 736 736 SPI_DELAY_UNIT_SCK);
-2
drivers/ufs/core/ufshcd.c
··· 7265 7265 err = -EINVAL; 7266 7266 } 7267 7267 } 7268 - ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 7269 - (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 7270 7268 7271 7269 return err; 7272 7270 }
+25 -2
fs/bcachefs/btree_gc.c
··· 47 47 #define DROP_PREV_NODE 11 48 48 #define DID_FILL_FROM_SCAN 12 49 49 50 + /* 51 + * Returns true if it's a btree we can easily reconstruct, or otherwise won't 52 + * cause data loss if it's missing: 53 + */ 54 + static bool btree_id_important(enum btree_id btree) 55 + { 56 + if (btree_id_is_alloc(btree)) 57 + return false; 58 + 59 + switch (btree) { 60 + case BTREE_ID_quotas: 61 + case BTREE_ID_snapshot_trees: 62 + case BTREE_ID_logged_ops: 63 + case BTREE_ID_rebalance_work: 64 + case BTREE_ID_subvolume_children: 65 + return false; 66 + default: 67 + return true; 68 + } 69 + } 70 + 50 71 static const char * const bch2_gc_phase_strs[] = { 51 72 #define x(n) #n, 52 73 GC_PHASES() ··· 555 534 r->error = 0; 556 535 557 536 if (!bch2_btree_has_scanned_nodes(c, i)) { 558 - mustfix_fsck_err(trans, btree_root_unreadable_and_scan_found_nothing, 559 - "no nodes found for btree %s, continue?", buf.buf); 537 + __fsck_err(trans, 538 + FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0), 539 + btree_root_unreadable_and_scan_found_nothing, 540 + "no nodes found for btree %s, continue?", buf.buf); 560 541 bch2_btree_root_alloc_fake_trans(trans, i, 0); 561 542 } else { 562 543 bch2_btree_root_alloc_fake_trans(trans, i, 1);
+1 -1
fs/bcachefs/btree_journal_iter.c
··· 288 288 .size = max_t(size_t, keys->size, 8) * 2, 289 289 }; 290 290 291 - new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL); 291 + new_keys.data = bch2_kvmalloc(new_keys.size * sizeof(new_keys.data[0]), GFP_KERNEL); 292 292 if (!new_keys.data) { 293 293 bch_err(c, "%s: error allocating new key array (size %zu)", 294 294 __func__, new_keys.size);
+32 -17
fs/bcachefs/btree_update_interior.c
··· 1389 1389 printbuf_exit(&buf); 1390 1390 } 1391 1391 1392 - static void 1392 + static int 1393 1393 bch2_btree_insert_keys_interior(struct btree_update *as, 1394 1394 struct btree_trans *trans, 1395 1395 struct btree_path *path, ··· 1411 1411 insert = bkey_next(insert)) 1412 1412 bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); 1413 1413 1414 - if (bch2_btree_node_check_topology(trans, b)) { 1414 + int ret = bch2_btree_node_check_topology(trans, b); 1415 + if (ret) { 1415 1416 struct printbuf buf = PRINTBUF; 1416 1417 1417 1418 for (struct bkey_i *k = keys->keys; ··· 1422 1421 prt_newline(&buf); 1423 1422 } 1424 1423 1425 - panic("%s(): check_topology error: inserted keys\n%s", __func__, buf.buf); 1424 + bch2_fs_fatal_error(as->c, "%ps -> %s(): check_topology error %s: inserted keys\n%s", 1425 + (void *) _RET_IP_, __func__, bch2_err_str(ret), buf.buf); 1426 + dump_stack(); 1427 + return ret; 1426 1428 } 1427 1429 1428 1430 memmove_u64s_down(keys->keys, insert, keys->top_p - insert->_data); 1429 1431 keys->top_p -= insert->_data - keys->keys_p; 1432 + return 0; 1430 1433 } 1431 1434 1432 1435 static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos) ··· 1564 1559 * nodes that were coalesced, and thus in the middle of a child node post 1565 1560 * coalescing: 1566 1561 */ 1567 - static void btree_split_insert_keys(struct btree_update *as, 1568 - struct btree_trans *trans, 1569 - btree_path_idx_t path_idx, 1570 - struct btree *b, 1571 - struct keylist *keys) 1562 + static int btree_split_insert_keys(struct btree_update *as, 1563 + struct btree_trans *trans, 1564 + btree_path_idx_t path_idx, 1565 + struct btree *b, 1566 + struct keylist *keys) 1572 1567 { 1573 1568 struct btree_path *path = trans->paths + path_idx; 1574 1569 ··· 1578 1573 1579 1574 bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); 1580 1575 1581 - bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); 1576 + int ret = bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); 1577 + if (ret) 1578 + return ret; 1582 1579 } 1580 + 1581 + return 0; 1583 1582 } 1584 1583 1585 1584 static int btree_split(struct btree_update *as, struct btree_trans *trans, ··· 1616 1607 __btree_split_node(as, trans, b, n, keys); 1617 1608 1618 1609 if (keys) { 1619 - btree_split_insert_keys(as, trans, path, n1, keys); 1620 - btree_split_insert_keys(as, trans, path, n2, keys); 1610 + ret = btree_split_insert_keys(as, trans, path, n1, keys) ?: 1611 + btree_split_insert_keys(as, trans, path, n2, keys); 1612 + if (ret) 1613 + goto err; 1621 1614 BUG_ON(!bch2_keylist_empty(keys)); 1622 1615 } 1623 1616 ··· 1665 1654 n3->sib_u64s[0] = U16_MAX; 1666 1655 n3->sib_u64s[1] = U16_MAX; 1667 1656 1668 - btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); 1657 + ret = btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); 1658 + if (ret) 1659 + goto err; 1669 1660 } 1670 1661 } else { 1671 1662 trace_and_count(c, btree_node_compact, trans, b); ··· 1675 1662 n1 = bch2_btree_node_alloc_replacement(as, trans, b); 1676 1663 1677 1664 if (keys) { 1678 - btree_split_insert_keys(as, trans, path, n1, keys); 1665 + ret = btree_split_insert_keys(as, trans, path, n1, keys); 1666 + if (ret) 1667 + goto err; 1679 1668 BUG_ON(!bch2_keylist_empty(keys)); 1680 1669 } 1681 1670 ··· 1824 1809 goto split; 1825 1810 } 1826 1811 1827 - ret = bch2_btree_node_check_topology(trans, b); 1812 + 1813 + ret = bch2_btree_node_check_topology(trans, b) ?: 1814 + bch2_btree_insert_keys_interior(as, trans, path, b, 1815 + path->l[b->c.level].iter, keys); 1828 1816 if (ret) { 1829 1817 bch2_btree_node_unlock_write(trans, path, b); 1830 1818 return ret; 1831 1819 } 1832 - 1833 - bch2_btree_insert_keys_interior(as, trans, path, b, 1834 - path->l[b->c.level].iter, keys); 1835 1820 1836 1821 trans_for_each_path_with_node(trans, b, linked, i) 1837 1822 bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
+10 -5
fs/bcachefs/buckets.c
··· 604 604 } 605 605 606 606 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); 607 + if (!bucket_valid(ca, bucket.offset)) { 608 + if (insert) { 609 + bch2_dev_bucket_missing(ca, bucket.offset); 610 + ret = -BCH_ERR_trigger_pointer; 611 + } 612 + goto err; 613 + } 607 614 608 615 if (flags & BTREE_TRIGGER_transactional) { 609 616 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0); ··· 1314 1307 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); 1315 1308 1316 1309 if (resize) { 1317 - bucket_gens->nbuckets = min(bucket_gens->nbuckets, 1318 - old_bucket_gens->nbuckets); 1319 - bucket_gens->nbuckets_minus_first = 1320 - bucket_gens->nbuckets - bucket_gens->first_bucket; 1310 + u64 copy = min(bucket_gens->nbuckets, 1311 + old_bucket_gens->nbuckets); 1321 1312 memcpy(bucket_gens->b, 1322 1313 old_bucket_gens->b, 1323 - bucket_gens->nbuckets); 1314 + sizeof(bucket_gens->b[0]) * copy); 1324 1315 } 1325 1316 1326 1317 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
+2 -2
fs/bcachefs/dirent.c
··· 685 685 vfs_d_type(d.v->d_type)); 686 686 if (ret) 687 687 ctx->pos = d.k->p.offset + 1; 688 - return ret; 688 + return !ret; 689 689 } 690 690 691 691 int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx) ··· 710 710 if (ret2 > 0) 711 711 continue; 712 712 713 - ret2 ?: drop_locks_do(trans, bch2_dir_emit(ctx, dirent, target)); 713 + ret2 ?: (bch2_trans_unlock(trans), bch2_dir_emit(ctx, dirent, target)); 714 714 }))); 715 715 716 716 bch2_bkey_buf_exit(&sk, c);
+12 -13
fs/bcachefs/disk_groups.c
··· 470 470 471 471 int __bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name) 472 472 { 473 - struct bch_member *mi; 474 - int ret, v = -1; 473 + lockdep_assert_held(&c->sb_lock); 475 474 476 - if (!strlen(name) || !strcmp(name, "none")) 477 - return 0; 478 475 479 - v = bch2_disk_path_find_or_create(&c->disk_sb, name); 480 - if (v < 0) 481 - return v; 476 + if (!strlen(name) || !strcmp(name, "none")) { 477 + struct bch_member *mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 478 + SET_BCH_MEMBER_GROUP(mi, 0); 479 + } else { 480 + int v = bch2_disk_path_find_or_create(&c->disk_sb, name); 481 + if (v < 0) 482 + return v; 482 483 483 - ret = bch2_sb_disk_groups_to_cpu(c); 484 - if (ret) 485 - return ret; 484 + struct bch_member *mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 485 + SET_BCH_MEMBER_GROUP(mi, v + 1); 486 + } 486 487 487 - mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 488 - SET_BCH_MEMBER_GROUP(mi, v + 1); 489 - return 0; 488 + return bch2_sb_disk_groups_to_cpu(c); 490 489 } 491 490 492 491 int bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name)
+2 -2
fs/bcachefs/ec.c
··· 2204 2204 2205 2205 static bool bch2_fs_ec_flush_done(struct bch_fs *c) 2206 2206 { 2207 - bool ret; 2207 + sched_annotate_sleep(); 2208 2208 2209 2209 mutex_lock(&c->ec_stripe_new_lock); 2210 - ret = list_empty(&c->ec_stripe_new_list); 2210 + bool ret = list_empty(&c->ec_stripe_new_list); 2211 2211 mutex_unlock(&c->ec_stripe_new_lock); 2212 2212 2213 2213 return ret;
+3 -1
fs/bcachefs/error.c
··· 478 478 } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) { 479 479 if (c->opts.errors != BCH_ON_ERROR_continue || 480 480 !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { 481 - prt_str(out, ", shutting down"); 481 + prt_str_indented(out, ", shutting down\n" 482 + "error not marked as autofix and not in fsck\n" 483 + "run fsck, and forward to devs so error can be marked for self-healing"); 482 484 inconsistent = true; 483 485 print = true; 484 486 ret = -BCH_ERR_fsck_errors_not_fixed;
+35 -9
fs/bcachefs/fs-io.c
··· 144 144 void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, 145 145 struct quota_res *quota_res, s64 sectors) 146 146 { 147 - bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c, 148 - "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)", 149 - inode->v.i_ino, (u64) inode->v.i_blocks, sectors, 150 - inode->ei_inode.bi_sectors); 147 + if (unlikely((s64) inode->v.i_blocks + sectors < 0)) { 148 + struct printbuf buf = PRINTBUF; 149 + bch2_log_msg_start(c, &buf); 150 + prt_printf(&buf, "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)", 151 + inode->v.i_ino, (u64) inode->v.i_blocks, sectors, 152 + inode->ei_inode.bi_sectors); 153 + 154 + bool repeat = false, print = false, suppress = false; 155 + bch2_count_fsck_err(c, vfs_inode_i_blocks_underflow, buf.buf, &repeat, &print, &suppress); 156 + if (print) 157 + bch2_print_str(c, buf.buf); 158 + printbuf_exit(&buf); 159 + 160 + if (sectors < 0) 161 + sectors = -inode->v.i_blocks; 162 + else 163 + sectors = 0; 164 + } 165 + 151 166 inode->v.i_blocks += sectors; 152 167 153 168 #ifdef CONFIG_BCACHEFS_QUOTA ··· 517 502 goto err; 518 503 } 519 504 520 - bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks && 521 - !bch2_journal_error(&c->journal), c, 522 - "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)", 523 - inode->v.i_ino, (u64) inode->v.i_blocks, 524 - inode->ei_inode.bi_sectors); 505 + if (unlikely(!inode->v.i_size && inode->v.i_blocks && 506 + !bch2_journal_error(&c->journal))) { 507 + struct printbuf buf = PRINTBUF; 508 + bch2_log_msg_start(c, &buf); 509 + prt_printf(&buf, 510 + "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)", 511 + inode->v.i_ino, (u64) inode->v.i_blocks, 512 + inode->ei_inode.bi_sectors); 513 + 514 + bool repeat = false, print = false, suppress = false; 515 + bch2_count_fsck_err(c, vfs_inode_i_blocks_not_zero_at_truncate, buf.buf, 516 + &repeat, &print, &suppress); 517 + if (print) 518 + bch2_print_str(c, buf.buf); 519 + printbuf_exit(&buf); 520 + } 525 521 526 522 ret = bch2_setattr_nonsize(idmap, inode, iattr); 527 523 err:
+10 -5
fs/bcachefs/fs.c
··· 66 66 67 67 if (bch2_inode_casefold(c, &inode->ei_inode)) 68 68 inode->v.i_flags |= S_CASEFOLD; 69 + else 70 + inode->v.i_flags &= ~S_CASEFOLD; 69 71 } 70 72 71 73 void bch2_inode_update_after_write(struct btree_trans *trans, ··· 850 848 set_nlink(&inode->v, 0); 851 849 } 852 850 853 - if (IS_CASEFOLDED(vdir)) { 851 + if (IS_CASEFOLDED(vdir)) 854 852 d_invalidate(dentry); 855 - d_prune_aliases(&inode->v); 856 - } 857 853 err: 858 854 bch2_trans_put(trans); 859 855 bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode); ··· 1464 1464 unsigned sectors = cur->kbuf.k->k.size; 1465 1465 s64 offset_into_extent = 0; 1466 1466 enum btree_id data_btree = BTREE_ID_extents; 1467 - int ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent, 1468 - &cur->kbuf); 1467 + ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent, 1468 + &cur->kbuf); 1469 1469 if (ret) 1470 1470 goto err; 1471 1471 ··· 2570 2570 ret = bch2_fs_start(c); 2571 2571 if (ret) 2572 2572 goto err_put_super; 2573 + 2574 + #ifdef CONFIG_UNICODE 2575 + sb->s_encoding = c->cf_encoding; 2576 + #endif 2577 + generic_set_sb_d_ops(sb); 2573 2578 2574 2579 vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM); 2575 2580 ret = PTR_ERR_OR_ZERO(vinode);
+21
fs/bcachefs/io_write.c
··· 255 255 } 256 256 257 257 if (i_sectors_delta) { 258 + s64 bi_sectors = le64_to_cpu(inode->v.bi_sectors); 259 + if (unlikely(bi_sectors + i_sectors_delta < 0)) { 260 + struct bch_fs *c = trans->c; 261 + struct printbuf buf = PRINTBUF; 262 + bch2_log_msg_start(c, &buf); 263 + prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0", 264 + extent_iter->pos.inode, bi_sectors, i_sectors_delta); 265 + 266 + bool repeat = false, print = false, suppress = false; 267 + bch2_count_fsck_err(c, inode_i_sectors_underflow, buf.buf, 268 + &repeat, &print, &suppress); 269 + if (print) 270 + bch2_print_str(c, buf.buf); 271 + printbuf_exit(&buf); 272 + 273 + if (i_sectors_delta < 0) 274 + i_sectors_delta = -bi_sectors; 275 + else 276 + i_sectors_delta = 0; 277 + } 278 + 258 279 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta); 259 280 inode_update_flags = 0; 260 281 }
+1 -1
fs/bcachefs/journal_io.c
··· 1782 1782 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); 1783 1783 if (!ca) { 1784 1784 /* XXX: fix this */ 1785 - bch_err(c, "missing device for journal write\n"); 1785 + bch_err(c, "missing device %u for journal write", ptr->dev); 1786 1786 continue; 1787 1787 } 1788 1788
+3
fs/bcachefs/namei.c
··· 343 343 bool ret = false; 344 344 345 345 for (id = 0; id < Inode_opt_nr; id++) { 346 + if (!S_ISDIR(dst_u->bi_mode) && id == Inode_opt_casefold) 347 + continue; 348 + 346 349 /* Skip attributes that were explicitly set on this inode */ 347 350 if (dst_u->bi_fields_set & (1 << id)) 348 351 continue;
+4
fs/bcachefs/sb-downgrade.c
··· 20 20 * x(version, recovery_passes, errors...) 21 21 */ 22 22 #define UPGRADE_TABLE() \ 23 + x(snapshot_2, \ 24 + RECOVERY_PASS_ALL_FSCK, \ 25 + BCH_FSCK_ERR_subvol_root_wrong_bi_subvol, \ 26 + BCH_FSCK_ERR_subvol_not_master_and_not_snapshot) \ 23 27 x(backpointers, \ 24 28 RECOVERY_PASS_ALL_FSCK) \ 25 29 x(inode_v3, \
+9 -4
fs/bcachefs/sb-errors_format.h
··· 46 46 x(btree_node_unsupported_version, 34, 0) \ 47 47 x(btree_node_bset_older_than_sb_min, 35, 0) \ 48 48 x(btree_node_bset_newer_than_sb, 36, 0) \ 49 - x(btree_node_data_missing, 37, 0) \ 49 + x(btree_node_data_missing, 37, FSCK_AUTOFIX) \ 50 50 x(btree_node_bset_after_end, 38, 0) \ 51 51 x(btree_node_replicas_sectors_written_mismatch, 39, 0) \ 52 52 x(btree_node_replicas_data_mismatch, 40, 0) \ ··· 205 205 x(snapshot_bad_depth, 184, 0) \ 206 206 x(snapshot_bad_skiplist, 185, 0) \ 207 207 x(subvol_pos_bad, 186, 0) \ 208 - x(subvol_not_master_and_not_snapshot, 187, 0) \ 208 + x(subvol_not_master_and_not_snapshot, 187, FSCK_AUTOFIX) \ 209 209 x(subvol_to_missing_root, 188, 0) \ 210 - x(subvol_root_wrong_bi_subvol, 189, 0) \ 210 + x(subvol_root_wrong_bi_subvol, 189, FSCK_AUTOFIX) \ 211 211 x(bkey_in_missing_snapshot, 190, 0) \ 212 212 x(inode_pos_inode_nonzero, 191, 0) \ 213 213 x(inode_pos_blockdev_range, 192, 0) \ ··· 236 236 x(inode_has_child_snapshots_wrong, 287, 0) \ 237 237 x(inode_unreachable, 210, FSCK_AUTOFIX) \ 238 238 x(inode_journal_seq_in_future, 299, FSCK_AUTOFIX) \ 239 + x(inode_i_sectors_underflow, 312, FSCK_AUTOFIX) \ 240 + x(vfs_inode_i_blocks_underflow, 311, FSCK_AUTOFIX) \ 241 + x(vfs_inode_i_blocks_not_zero_at_truncate, 313, FSCK_AUTOFIX) \ 239 242 x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \ 240 243 x(deleted_inode_missing, 212, FSCK_AUTOFIX) \ 241 244 x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \ ··· 320 317 x(directory_size_mismatch, 303, FSCK_AUTOFIX) \ 321 318 x(dirent_cf_name_too_big, 304, 0) \ 322 319 x(dirent_stray_data_after_cf_name, 305, 0) \ 323 - x(MAX, 308, 0) 320 + x(rebalance_work_incorrectly_set, 309, FSCK_AUTOFIX) \ 321 + x(rebalance_work_incorrectly_unset, 310, FSCK_AUTOFIX) \ 322 + x(MAX, 314, 0) 324 323 325 324 enum bch_sb_error_id { 326 325 #define x(t, n, ...) BCH_FSCK_ERR_##t = n,
+4 -2
fs/bcachefs/sb-members.c
··· 15 15 bch2_fs_inconsistent(c, "pointer to nonexistent device %u", dev); 16 16 } 17 17 18 - void bch2_dev_bucket_missing(struct bch_fs *c, struct bpos bucket) 18 + void bch2_dev_bucket_missing(struct bch_dev *ca, u64 bucket) 19 19 { 20 - bch2_fs_inconsistent(c, "pointer to nonexistent bucket %llu:%llu", bucket.inode, bucket.offset); 20 + bch2_fs_inconsistent(ca->fs, 21 + "pointer to nonexistent bucket %llu on device %s (valid range %u-%llu)", 22 + bucket, ca->name, ca->mi.first_bucket, ca->mi.nbuckets); 21 23 } 22 24 23 25 #define x(t, n, ...) [n] = #t,
+8 -5
fs/bcachefs/sb-members.h
··· 249 249 static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket) 250 250 { 251 251 struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode); 252 - if (ca && !bucket_valid(ca, bucket.offset)) { 252 + if (ca && unlikely(!bucket_valid(ca, bucket.offset))) { 253 253 bch2_dev_put(ca); 254 254 ca = NULL; 255 255 } 256 256 return ca; 257 257 } 258 258 259 - void bch2_dev_bucket_missing(struct bch_fs *, struct bpos); 259 + void bch2_dev_bucket_missing(struct bch_dev *, u64); 260 260 261 261 static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket) 262 262 { 263 - struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket); 264 - if (!ca) 265 - bch2_dev_bucket_missing(c, bucket); 263 + struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode); 264 + if (ca && unlikely(!bucket_valid(ca, bucket.offset))) { 265 + bch2_dev_bucket_missing(ca, bucket.offset); 266 + bch2_dev_put(ca); 267 + ca = NULL; 268 + } 266 269 return ca; 267 270 } 268 271
+3 -2
fs/bcachefs/subvolume.c
··· 6 6 #include "errcode.h" 7 7 #include "error.h" 8 8 #include "fs.h" 9 + #include "recovery_passes.h" 9 10 #include "snapshot.h" 10 11 #include "subvolume.h" 11 12 ··· 45 44 ret = bch2_snapshot_lookup(trans, snapid, &snapshot); 46 45 47 46 if (bch2_err_matches(ret, ENOENT)) 48 - bch_err(c, "subvolume %llu points to nonexistent snapshot %u", 49 - k.k->p.offset, snapid); 47 + return bch2_run_explicit_recovery_pass(c, 48 + BCH_RECOVERY_PASS_reconstruct_snapshots) ?: ret; 50 49 if (ret) 51 50 return ret; 52 51
+27 -19
fs/bcachefs/super.c
··· 531 531 for (unsigned i = 0; i < BCH_TIME_STAT_NR; i++) 532 532 bch2_time_stats_exit(&c->times[i]); 533 533 534 + #ifdef CONFIG_UNICODE 535 + utf8_unload(c->cf_encoding); 536 + #endif 537 + 534 538 bch2_find_btree_nodes_exit(&c->found_btree_nodes); 535 539 bch2_free_pending_node_rewrites(c); 536 540 bch2_free_fsck_errs(c); ··· 827 823 if (ret) 828 824 goto err; 829 825 830 - #ifdef CONFIG_UNICODE 831 - /* Default encoding until we can potentially have more as an option. */ 832 - c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING); 833 - if (IS_ERR(c->cf_encoding)) { 834 - printk(KERN_ERR "Cannot load UTF-8 encoding for filesystem. Version: %u.%u.%u", 835 - unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING), 836 - unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING), 837 - unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING)); 838 - ret = -EINVAL; 839 - goto err; 840 - } 841 - #else 842 - if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) { 843 - printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n"); 844 - ret = -EINVAL; 845 - goto err; 846 - } 847 - #endif 848 - 849 826 pr_uuid(&name, c->sb.user_uuid.b); 850 827 ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0; 851 828 if (ret) ··· 925 940 bch2_fs_fs_io_direct_init(c); 926 941 if (ret) 927 942 goto err; 943 + 944 + #ifdef CONFIG_UNICODE 945 + /* Default encoding until we can potentially have more as an option. */ 946 + c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING); 947 + if (IS_ERR(c->cf_encoding)) { 948 + printk(KERN_ERR "Cannot load UTF-8 encoding for filesystem. Version: %u.%u.%u", 949 + unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING), 950 + unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING), 951 + unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING)); 952 + ret = -EINVAL; 953 + goto err; 954 + } 955 + bch_info(c, "Using encoding defined by superblock: utf8-%u.%u.%u", 956 + unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING), 957 + unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING), 958 + unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING)); 959 + #else 960 + if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) { 961 + printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n"); 962 + ret = -EINVAL; 963 + goto err; 964 + } 965 + #endif 928 966 929 967 for (i = 0; i < c->sb.nr_devices; i++) { 930 968 if (!bch2_member_exists(c->disk_sb.sb, i))
+7 -1
fs/bcachefs/xattr_format.h
··· 13 13 __u8 x_type; 14 14 __u8 x_name_len; 15 15 __le16 x_val_len; 16 - __u8 x_name[] __counted_by(x_name_len); 16 + /* 17 + * x_name contains the name and value counted by 18 + * x_name_len + x_val_len. The introduction of 19 + * __counted_by(x_name_len) caused a false positive 20 + * detection of an out of bounds write. 21 + */ 22 + __u8 x_name[]; 17 23 } __packed __aligned(8); 18 24 19 25 #endif /* _BCACHEFS_XATTR_FORMAT_H */
+1 -1
fs/btrfs/extent_io.c
··· 2047 2047 subpage->bitmaps)) { 2048 2048 spin_unlock_irqrestore(&subpage->lock, flags); 2049 2049 spin_unlock(&folio->mapping->i_private_lock); 2050 - bit_start++; 2050 + bit_start += sectors_per_node; 2051 2051 continue; 2052 2052 } 2053 2053
+8 -5
fs/btrfs/inode.c
··· 2129 2129 2130 2130 /* 2131 2131 * If the found extent starts after requested offset, then 2132 - * adjust extent_end to be right before this extent begins 2132 + * adjust cur_offset to be right before this extent begins. 2133 2133 */ 2134 2134 if (found_key.offset > cur_offset) { 2135 - extent_end = found_key.offset; 2136 - extent_type = 0; 2137 - goto must_cow; 2135 + if (cow_start == (u64)-1) 2136 + cow_start = cur_offset; 2137 + cur_offset = found_key.offset; 2138 + goto next_slot; 2138 2139 } 2139 2140 2140 2141 /* ··· 5682 5681 return inode; 5683 5682 5684 5683 path = btrfs_alloc_path(); 5685 - if (!path) 5684 + if (!path) { 5685 + iget_failed(&inode->vfs_inode); 5686 5686 return ERR_PTR(-ENOMEM); 5687 + } 5687 5688 5688 5689 ret = btrfs_read_locked_inode(inode, path); 5689 5690 btrfs_free_path(path);
+1 -6
fs/notify/fanotify/fanotify_user.c
··· 1961 1961 return -EINVAL; 1962 1962 1963 1963 if (mark_cmd == FAN_MARK_FLUSH) { 1964 - if (mark_type == FAN_MARK_MOUNT) 1965 - fsnotify_clear_vfsmount_marks_by_group(group); 1966 - else if (mark_type == FAN_MARK_FILESYSTEM) 1967 - fsnotify_clear_sb_marks_by_group(group); 1968 - else 1969 - fsnotify_clear_inode_marks_by_group(group); 1964 + fsnotify_clear_marks_by_group(group, obj_type); 1970 1965 return 0; 1971 1966 } 1972 1967
+2 -3
fs/smb/client/cifspdu.h
··· 1266 1266 typedef struct smb_com_setattr_req { 1267 1267 struct smb_hdr hdr; /* wct = 8 */ 1268 1268 __le16 attr; 1269 - __le16 time_low; 1270 - __le16 time_high; 1269 + __le32 last_write_time; 1271 1270 __le16 reserved[5]; /* must be zero */ 1272 - __u16 ByteCount; 1271 + __le16 ByteCount; 1273 1272 __u8 BufferFormat; /* 4 = ASCII */ 1274 1273 unsigned char fileName[]; 1275 1274 } __attribute__((packed)) SETATTR_REQ;
+4
fs/smb/client/cifsproto.h
··· 395 395 extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon, 396 396 struct kstatfs *FSData); 397 397 398 + extern int SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, 399 + const char *fileName, __le32 attributes, __le64 write_time, 400 + const struct nls_table *nls_codepage, 401 + struct cifs_sb_info *cifs_sb); 398 402 extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, 399 403 const char *fileName, const FILE_BASIC_INFO *data, 400 404 const struct nls_table *nls_codepage,
+57
fs/smb/client/cifssmb.c
··· 5171 5171 return rc; 5172 5172 } 5173 5173 5174 + int 5175 + SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, 5176 + const char *fileName, __le32 attributes, __le64 write_time, 5177 + const struct nls_table *nls_codepage, 5178 + struct cifs_sb_info *cifs_sb) 5179 + { 5180 + SETATTR_REQ *pSMB; 5181 + SETATTR_RSP *pSMBr; 5182 + struct timespec64 ts; 5183 + int bytes_returned; 5184 + int name_len; 5185 + int rc; 5186 + 5187 + cifs_dbg(FYI, "In %s path %s\n", __func__, fileName); 5188 + 5189 + retry: 5190 + rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, 5191 + (void **) &pSMBr); 5192 + if (rc) 5193 + return rc; 5194 + 5195 + if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 5196 + name_len = 5197 + cifsConvertToUTF16((__le16 *) pSMB->fileName, 5198 + fileName, PATH_MAX, nls_codepage, 5199 + cifs_remap(cifs_sb)); 5200 + name_len++; /* trailing null */ 5201 + name_len *= 2; 5202 + } else { 5203 + name_len = copy_path_name(pSMB->fileName, fileName); 5204 + } 5205 + /* Only few attributes can be set by this command, others are not accepted by Win9x. */ 5206 + pSMB->attr = cpu_to_le16(le32_to_cpu(attributes) & 5207 + (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE)); 5208 + /* Zero write time value (in both NT and SETATTR formats) means to not change it. */ 5209 + if (le64_to_cpu(write_time) != 0) { 5210 + ts = cifs_NTtimeToUnix(write_time); 5211 + pSMB->last_write_time = cpu_to_le32(ts.tv_sec); 5212 + } 5213 + pSMB->BufferFormat = 0x04; 5214 + name_len++; /* account for buffer type byte */ 5215 + inc_rfc1001_len(pSMB, (__u16)name_len); 5216 + pSMB->ByteCount = cpu_to_le16(name_len); 5217 + 5218 + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 5219 + (struct smb_hdr *) pSMBr, &bytes_returned, 0); 5220 + if (rc) 5221 + cifs_dbg(FYI, "Send error in %s = %d\n", __func__, rc); 5222 + 5223 + cifs_buf_release(pSMB); 5224 + 5225 + if (rc == -EAGAIN) 5226 + goto retry; 5227 + 5228 + return rc; 5229 + } 5230 + 5174 5231 /* Some legacy servers such as NT4 require that the file times be set on 5175 5232 an open handle, rather than by pathname - this is awkward due to 5176 5233 potential access conflicts on the open, but it is unavoidable for these
+1 -22
fs/smb/client/connect.c
··· 3753 3753 } 3754 3754 } 3755 3755 3756 - /* 3757 - * Clamp the rsize/wsize mount arguments if they are too big for the server 3758 - * and set the rsize/wsize to the negotiated values if not passed in by 3759 - * the user on mount 3760 - */ 3761 - if ((cifs_sb->ctx->wsize == 0) || 3762 - (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) { 3763 - cifs_sb->ctx->wsize = 3764 - round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE); 3765 - /* 3766 - * in the very unlikely event that the server sent a max write size under PAGE_SIZE, 3767 - * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096 3768 - */ 3769 - if (cifs_sb->ctx->wsize == 0) { 3770 - cifs_sb->ctx->wsize = PAGE_SIZE; 3771 - cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n"); 3772 - } 3773 - } 3774 - if ((cifs_sb->ctx->rsize == 0) || 3775 - (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx))) 3776 - cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx); 3777 - 3756 + cifs_negotiate_iosize(server, cifs_sb->ctx, tcon); 3778 3757 /* 3779 3758 * The cookie is initialized from volume info returned above. 3780 3759 * Inside cifs_fscache_get_super_cookie it checks
+2 -4
fs/smb/client/file.c
··· 160 160 server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); 161 161 rdata->server = server; 162 162 163 - if (cifs_sb->ctx->rsize == 0) 164 - cifs_sb->ctx->rsize = 165 - server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink), 166 - cifs_sb->ctx); 163 + cifs_negotiate_rsize(server, cifs_sb->ctx, 164 + tlink_tcon(req->cfile->tlink)); 167 165 168 166 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, 169 167 &size, &rdata->credits);
+6 -19
fs/smb/client/fs_context.c
··· 1021 1021 struct dentry *root = fc->root; 1022 1022 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 1023 1023 struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses; 1024 + unsigned int rsize = ctx->rsize, wsize = ctx->wsize; 1024 1025 char *new_password = NULL, *new_password2 = NULL; 1025 1026 bool need_recon = false; 1026 1027 int rc; ··· 1104 1103 STEAL_STRING(cifs_sb, ctx, iocharset); 1105 1104 1106 1105 /* if rsize or wsize not passed in on remount, use previous values */ 1107 - if (ctx->rsize == 0) 1108 - ctx->rsize = cifs_sb->ctx->rsize; 1109 - if (ctx->wsize == 0) 1110 - ctx->wsize = cifs_sb->ctx->wsize; 1111 - 1106 + ctx->rsize = rsize ? CIFS_ALIGN_RSIZE(fc, rsize) : cifs_sb->ctx->rsize; 1107 + ctx->wsize = wsize ? CIFS_ALIGN_WSIZE(fc, wsize) : cifs_sb->ctx->wsize; 1112 1108 1113 1109 smb3_cleanup_fs_context_contents(cifs_sb->ctx); 1114 1110 rc = smb3_fs_context_dup(cifs_sb->ctx, ctx); ··· 1310 1312 __func__); 1311 1313 goto cifs_parse_mount_err; 1312 1314 } 1313 - ctx->bsize = result.uint_32; 1315 + ctx->bsize = CIFS_ALIGN_BSIZE(fc, result.uint_32); 1314 1316 ctx->got_bsize = true; 1315 1317 break; 1316 1318 case Opt_rasize: ··· 1334 1336 ctx->rasize = result.uint_32; 1335 1337 break; 1336 1338 case Opt_rsize: 1337 - ctx->rsize = result.uint_32; 1339 + ctx->rsize = CIFS_ALIGN_RSIZE(fc, result.uint_32); 1338 1340 ctx->got_rsize = true; 1339 1341 ctx->vol_rsize = ctx->rsize; 1340 1342 break; 1341 1343 case Opt_wsize: 1342 - ctx->wsize = result.uint_32; 1344 + ctx->wsize = CIFS_ALIGN_WSIZE(fc, result.uint_32); 1343 1345 ctx->got_wsize = true; 1344 - if (ctx->wsize % PAGE_SIZE != 0) { 1345 - ctx->wsize = round_down(ctx->wsize, PAGE_SIZE); 1346 - if (ctx->wsize == 0) { 1347 - ctx->wsize = PAGE_SIZE; 1348 - cifs_dbg(VFS, "wsize too small, reset to minimum %ld\n", PAGE_SIZE); 1349 - } else { 1350 - cifs_dbg(VFS, 1351 - "wsize rounded down to %d to multiple of PAGE_SIZE %ld\n", 1352 - ctx->wsize, PAGE_SIZE); 1353 - } 1354 - } 1355 1346 ctx->vol_wsize = ctx->wsize; 1356 1347 break; 1357 1348 case Opt_acregmax:
+47
fs/smb/client/fs_context.h
··· 20 20 cifs_dbg(VFS, fmt, ## __VA_ARGS__); \ 21 21 } while (0) 22 22 23 + static inline size_t cifs_io_align(struct fs_context *fc, 24 + const char *name, size_t size) 25 + { 26 + if (!size || !IS_ALIGNED(size, PAGE_SIZE)) { 27 + cifs_errorf(fc, "unaligned %s, making it a multiple of %lu bytes\n", 28 + name, PAGE_SIZE); 29 + size = umax(round_down(size, PAGE_SIZE), PAGE_SIZE); 30 + } 31 + return size; 32 + } 33 + 34 + #define CIFS_ALIGN_WSIZE(_fc, _size) cifs_io_align(_fc, "wsize", _size) 35 + #define CIFS_ALIGN_RSIZE(_fc, _size) cifs_io_align(_fc, "rsize", _size) 36 + #define CIFS_ALIGN_BSIZE(_fc, _size) cifs_io_align(_fc, "bsize", _size) 37 + 23 38 enum smb_version { 24 39 Smb_1 = 1, 25 40 Smb_20, ··· 374 359 static inline void cifs_mount_unlock(void) 375 360 { 376 361 mutex_unlock(&cifs_mount_mutex); 362 + } 363 + 364 + static inline void cifs_negotiate_rsize(struct TCP_Server_Info *server, 365 + struct smb3_fs_context *ctx, 366 + struct cifs_tcon *tcon) 367 + { 368 + unsigned int size; 369 + 370 + size = umax(server->ops->negotiate_rsize(tcon, ctx), PAGE_SIZE); 371 + if (ctx->rsize) 372 + size = umax(umin(ctx->rsize, size), PAGE_SIZE); 373 + ctx->rsize = round_down(size, PAGE_SIZE); 374 + } 375 + 376 + static inline void cifs_negotiate_wsize(struct TCP_Server_Info *server, 377 + struct smb3_fs_context *ctx, 378 + struct cifs_tcon *tcon) 379 + { 380 + unsigned int size; 381 + 382 + size = umax(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE); 383 + if (ctx->wsize) 384 + size = umax(umin(ctx->wsize, size), PAGE_SIZE); 385 + ctx->wsize = round_down(size, PAGE_SIZE); 386 + } 387 + 388 + static inline void cifs_negotiate_iosize(struct TCP_Server_Info *server, 389 + struct smb3_fs_context *ctx, 390 + struct cifs_tcon *tcon) 391 + { 392 + cifs_negotiate_rsize(server, ctx, tcon); 393 + cifs_negotiate_wsize(server, ctx, tcon); 377 394 } 378 395 379 396 #endif
+202 -21
fs/smb/client/smb1ops.c
··· 432 432 } 433 433 434 434 static unsigned int 435 - cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 435 + smb1_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 436 436 { 437 437 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 438 438 struct TCP_Server_Info *server = tcon->ses->server; ··· 467 467 } 468 468 469 469 static unsigned int 470 - cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 470 + smb1_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 471 471 { 472 472 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 473 473 struct TCP_Server_Info *server = tcon->ses->server; ··· 543 543 const char *full_path, 544 544 struct cifs_open_info_data *data) 545 545 { 546 - int rc; 546 + int rc = -EOPNOTSUPP; 547 547 FILE_ALL_INFO fi = {}; 548 + struct cifs_search_info search_info = {}; 549 + bool non_unicode_wildcard = false; 548 550 549 551 data->reparse_point = false; 550 552 data->adjust_tz = false; 551 553 552 - /* could do find first instead but this returns more info */ 553 - rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls, 554 - cifs_remap(cifs_sb)); 555 554 /* 556 - * BB optimize code so we do not make the above call when server claims 557 - * no NT SMB support and the above call failed at least once - set flag 558 - * in tcon or mount. 555 + * First try CIFSSMBQPathInfo() function which returns more info 556 + * (NumberOfLinks) than CIFSFindFirst() fallback function. 557 + * Some servers like Win9x do not support SMB_QUERY_FILE_ALL_INFO over 558 + * TRANS2_QUERY_PATH_INFORMATION, but supports it with filehandle over 559 + * TRANS2_QUERY_FILE_INFORMATION (function CIFSSMBQFileInfo(). But SMB 560 + * Open command on non-NT servers works only for files, does not work 561 + * for directories. And moreover Win9x SMB server returns bogus data in 562 + * SMB_QUERY_FILE_ALL_INFO Attributes field. So for non-NT servers, 563 + * do not even use CIFSSMBQPathInfo() or CIFSSMBQFileInfo() function. 559 564 */ 560 - if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) { 565 + if (tcon->ses->capabilities & CAP_NT_SMBS) 566 + rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, 567 + cifs_sb->local_nls, cifs_remap(cifs_sb)); 568 + 569 + /* 570 + * Non-UNICODE variant of fallback functions below expands wildcards, 571 + * so they cannot be used for querying paths with wildcard characters. 572 + */ 573 + if (rc && !(tcon->ses->capabilities & CAP_UNICODE) && strpbrk(full_path, "*?\"><")) 574 + non_unicode_wildcard = true; 575 + 576 + /* 577 + * Then fallback to CIFSFindFirst() which works also with non-NT servers 578 + * but does not does not provide NumberOfLinks. 579 + */ 580 + if ((rc == -EOPNOTSUPP || rc == -EINVAL) && 581 + !non_unicode_wildcard) { 582 + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) 583 + search_info.info_level = SMB_FIND_FILE_INFO_STANDARD; 584 + else 585 + search_info.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO; 586 + rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL, 587 + CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END, 588 + &search_info, false); 589 + if (rc == 0) { 590 + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) { 591 + FIND_FILE_STANDARD_INFO *di; 592 + int offset = tcon->ses->server->timeAdj; 593 + 594 + di = (FIND_FILE_STANDARD_INFO *)search_info.srch_entries_start; 595 + fi.CreationTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( 596 + di->CreationDate, di->CreationTime, offset))); 597 + fi.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( 598 + di->LastAccessDate, di->LastAccessTime, offset))); 599 + fi.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( 600 + di->LastWriteDate, di->LastWriteTime, offset))); 601 + fi.ChangeTime = fi.LastWriteTime; 602 + fi.Attributes = cpu_to_le32(le16_to_cpu(di->Attributes)); 603 + fi.AllocationSize = cpu_to_le64(le32_to_cpu(di->AllocationSize)); 604 + fi.EndOfFile = cpu_to_le64(le32_to_cpu(di->DataSize)); 605 + } else { 606 + FILE_FULL_DIRECTORY_INFO *di; 607 + 608 + di = (FILE_FULL_DIRECTORY_INFO *)search_info.srch_entries_start; 609 + fi.CreationTime = di->CreationTime; 610 + fi.LastAccessTime = di->LastAccessTime; 611 + fi.LastWriteTime = di->LastWriteTime; 612 + fi.ChangeTime = di->ChangeTime; 613 + fi.Attributes = di->ExtFileAttributes; 614 + fi.AllocationSize = di->AllocationSize; 615 + fi.EndOfFile = di->EndOfFile; 616 + fi.EASize = di->EaSize; 617 + } 618 + fi.NumberOfLinks = cpu_to_le32(1); 619 + fi.DeletePending = 0; 620 + fi.Directory = !!(le32_to_cpu(fi.Attributes) & ATTR_DIRECTORY); 621 + cifs_buf_release(search_info.ntwrk_buf_start); 622 + } else if (!full_path[0]) { 623 + /* 624 + * CIFSFindFirst() does not work on root path if the 625 + * root path was exported on the server from the top 626 + * level path (drive letter). 627 + */ 628 + rc = -EOPNOTSUPP; 629 + } 630 + } 631 + 632 + /* 633 + * If everything failed then fallback to the legacy SMB command 634 + * SMB_COM_QUERY_INFORMATION which works with all servers, but 635 + * provide just few information. 636 + */ 637 + if ((rc == -EOPNOTSUPP || rc == -EINVAL) && !non_unicode_wildcard) { 561 638 rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls, 562 639 cifs_remap(cifs_sb)); 563 640 data->adjust_tz = true; 641 + } else if ((rc == -EOPNOTSUPP || rc == -EINVAL) && non_unicode_wildcard) { 642 + /* Path with non-UNICODE wildcard character cannot exist. */ 643 + rc = -ENOENT; 564 644 } 565 645 566 646 if (!rc) { ··· 718 638 { 719 639 int rc; 720 640 FILE_ALL_INFO fi = {}; 641 + 642 + /* 643 + * CIFSSMBQFileInfo() for non-NT servers returns bogus data in 644 + * Attributes fields. So do not use this command for non-NT servers. 645 + */ 646 + if (!(tcon->ses->capabilities & CAP_NT_SMBS)) 647 + return -EOPNOTSUPP; 721 648 722 649 if (cfile->symlink_target) { 723 650 data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); ··· 896 809 struct cifs_fid fid; 897 810 struct cifs_open_parms oparms; 898 811 struct cifsFileInfo *open_file; 812 + FILE_BASIC_INFO new_buf; 813 + struct cifs_open_info_data query_data; 814 + __le64 write_time = buf->LastWriteTime; 899 815 struct cifsInodeInfo *cinode = CIFS_I(inode); 900 816 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 901 817 struct tcon_link *tlink = NULL; ··· 906 816 907 817 /* if the file is already open for write, just use that fileid */ 908 818 open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); 819 + 909 820 if (open_file) { 910 821 fid.netfid = open_file->fid.netfid; 911 822 netpid = open_file->pid; 912 823 tcon = tlink_tcon(open_file->tlink); 913 - goto set_via_filehandle; 824 + } else { 825 + tlink = cifs_sb_tlink(cifs_sb); 826 + if (IS_ERR(tlink)) { 827 + rc = PTR_ERR(tlink); 828 + tlink = NULL; 829 + goto out; 830 + } 831 + tcon = tlink_tcon(tlink); 914 832 } 915 833 916 - tlink = cifs_sb_tlink(cifs_sb); 917 - if (IS_ERR(tlink)) { 918 - rc = PTR_ERR(tlink); 919 - tlink = NULL; 920 - goto out; 834 + /* 835 + * Non-NT servers interprets zero time value in SMB_SET_FILE_BASIC_INFO 836 + * over TRANS2_SET_FILE_INFORMATION as a valid time value. NT servers 837 + * interprets zero time value as do not change existing value on server. 838 + * API of ->set_file_info() callback expects that zero time value has 839 + * the NT meaning - do not change. Therefore if server is non-NT and 840 + * some time values in "buf" are zero, then fetch missing time values. 841 + */ 842 + if (!(tcon->ses->capabilities & CAP_NT_SMBS) && 843 + (!buf->CreationTime || !buf->LastAccessTime || 844 + !buf->LastWriteTime || !buf->ChangeTime)) { 845 + rc = cifs_query_path_info(xid, tcon, cifs_sb, full_path, &query_data); 846 + if (rc) { 847 + if (open_file) { 848 + cifsFileInfo_put(open_file); 849 + open_file = NULL; 850 + } 851 + goto out; 852 + } 853 + /* 854 + * Original write_time from buf->LastWriteTime is preserved 855 + * as SMBSetInformation() interprets zero as do not change. 856 + */ 857 + new_buf = *buf; 858 + buf = &new_buf; 859 + if (!buf->CreationTime) 860 + buf->CreationTime = query_data.fi.CreationTime; 861 + if (!buf->LastAccessTime) 862 + buf->LastAccessTime = query_data.fi.LastAccessTime; 863 + if (!buf->LastWriteTime) 864 + buf->LastWriteTime = query_data.fi.LastWriteTime; 865 + if (!buf->ChangeTime) 866 + buf->ChangeTime = query_data.fi.ChangeTime; 921 867 } 922 - tcon = tlink_tcon(tlink); 868 + 869 + if (open_file) 870 + goto set_via_filehandle; 923 871 924 872 rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, 925 873 cifs_sb); ··· 978 850 .fid = &fid, 979 851 }; 980 852 981 - cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n"); 982 - rc = CIFS_open(xid, &oparms, &oplock, NULL); 853 + if (S_ISDIR(inode->i_mode) && !(tcon->ses->capabilities & CAP_NT_SMBS)) { 854 + /* Opening directory path is not possible on non-NT servers. */ 855 + rc = -EOPNOTSUPP; 856 + } else { 857 + /* 858 + * Use cifs_open_file() instead of CIFS_open() as the 859 + * cifs_open_file() selects the correct function which 860 + * works also on non-NT servers. 861 + */ 862 + rc = cifs_open_file(xid, &oparms, &oplock, NULL); 863 + /* 864 + * Opening path for writing on non-NT servers is not 865 + * possible when the read-only attribute is already set. 866 + * Non-NT server in this case returns -EACCES. For those 867 + * servers the only possible way how to clear the read-only 868 + * bit is via SMB_COM_SETATTR command. 869 + */ 870 + if (rc == -EACCES && 871 + (cinode->cifsAttrs & ATTR_READONLY) && 872 + le32_to_cpu(buf->Attributes) != 0 && /* 0 = do not change attrs */ 873 + !(le32_to_cpu(buf->Attributes) & ATTR_READONLY) && 874 + !(tcon->ses->capabilities & CAP_NT_SMBS)) 875 + rc = -EOPNOTSUPP; 876 + } 877 + 878 + /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */ 879 + if (rc == -EOPNOTSUPP) { 880 + cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n"); 881 + rc = SMBSetInformation(xid, tcon, full_path, 882 + buf->Attributes != 0 ? buf->Attributes : cpu_to_le32(cinode->cifsAttrs), 883 + write_time, 884 + cifs_sb->local_nls, cifs_sb); 885 + if (rc == 0) 886 + cinode->cifsAttrs = le32_to_cpu(buf->Attributes); 887 + else 888 + rc = -EACCES; 889 + goto out; 890 + } 891 + 983 892 if (rc != 0) { 984 893 if (rc == -EIO) 985 894 rc = -EINVAL; ··· 1024 859 } 1025 860 1026 861 netpid = current->tgid; 862 + cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for attrs/times not supported by this server\n"); 1027 863 1028 864 set_via_filehandle: 1029 865 rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid); ··· 1035 869 CIFSSMBClose(xid, tcon, fid.netfid); 1036 870 else 1037 871 cifsFileInfo_put(open_file); 872 + 873 + /* 874 + * Setting the read-only bit is not honered on non-NT servers when done 875 + * via open-semantics. So for setting it, use SMB_COM_SETATTR command. 876 + * This command works only after the file is closed, so use it only when 877 + * operation was called without the filehandle. 878 + */ 879 + if (open_file == NULL && 880 + !(tcon->ses->capabilities & CAP_NT_SMBS) && 881 + le32_to_cpu(buf->Attributes) & ATTR_READONLY) { 882 + SMBSetInformation(xid, tcon, full_path, 883 + buf->Attributes, 884 + 0 /* do not change write time */, 885 + cifs_sb->local_nls, cifs_sb); 886 + } 1038 887 out: 1039 888 if (tlink != NULL) 1040 889 cifs_put_tlink(tlink); ··· 1342 1161 .check_trans2 = cifs_check_trans2, 1343 1162 .need_neg = cifs_need_neg, 1344 1163 .negotiate = cifs_negotiate, 1345 - .negotiate_wsize = cifs_negotiate_wsize, 1346 - .negotiate_rsize = cifs_negotiate_rsize, 1164 + .negotiate_wsize = smb1_negotiate_wsize, 1165 + .negotiate_rsize = smb1_negotiate_rsize, 1347 1166 .sess_setup = CIFS_SessSetup, 1348 1167 .logoff = CIFSSMBLogoff, 1349 1168 .tree_connect = CIFSTCon,
+3 -6
fs/smb/client/smb2pdu.c
··· 2921 2921 req->CreateContextsOffset = cpu_to_le32( 2922 2922 sizeof(struct smb2_create_req) + 2923 2923 iov[1].iov_len); 2924 + le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len); 2924 2925 pc_buf = iov[n_iov-1].iov_base; 2925 2926 } 2926 2927 ··· 4093 4092 return; 4094 4093 4095 4094 spin_lock(&tcon->sb_list_lock); 4096 - list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) { 4097 - cifs_sb->ctx->rsize = 4098 - server->ops->negotiate_rsize(tcon, cifs_sb->ctx); 4099 - cifs_sb->ctx->wsize = 4100 - server->ops->negotiate_wsize(tcon, cifs_sb->ctx); 4101 - } 4095 + list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) 4096 + cifs_negotiate_iosize(server, cifs_sb->ctx, tcon); 4102 4097 spin_unlock(&tcon->sb_list_lock); 4103 4098 } 4104 4099
+13 -1
fs/smb/server/auth.c
··· 550 550 retval = -ENOMEM; 551 551 goto out; 552 552 } 553 - sess->user = user; 553 + 554 + if (!sess->user) { 555 + /* First successful authentication */ 556 + sess->user = user; 557 + } else { 558 + if (!ksmbd_compare_user(sess->user, user)) { 559 + ksmbd_debug(AUTH, "different user tried to reuse session\n"); 560 + retval = -EPERM; 561 + ksmbd_free_user(user); 562 + goto out; 563 + } 564 + ksmbd_free_user(user); 565 + } 554 566 555 567 memcpy(sess->sess_key, resp->payload, resp->session_key_len); 556 568 memcpy(out_blob, resp->payload + resp->session_key_len,
+14 -6
fs/smb/server/mgmt/user_session.c
··· 59 59 struct ksmbd_session_rpc *entry; 60 60 long index; 61 61 62 + down_write(&sess->rpc_lock); 62 63 xa_for_each(&sess->rpc_handle_list, index, entry) { 63 64 xa_erase(&sess->rpc_handle_list, index); 64 65 __session_rpc_close(sess, entry); 65 66 } 67 + up_write(&sess->rpc_lock); 66 68 67 69 xa_destroy(&sess->rpc_handle_list); 68 70 } ··· 94 92 { 95 93 struct ksmbd_session_rpc *entry, *old; 96 94 struct ksmbd_rpc_command *resp; 97 - int method; 95 + int method, id; 98 96 99 97 method = __rpc_method(rpc_name); 100 98 if (!method) ··· 104 102 if (!entry) 105 103 return -ENOMEM; 106 104 105 + down_read(&sess->rpc_lock); 107 106 entry->method = method; 108 - entry->id = ksmbd_ipc_id_alloc(); 109 - if (entry->id < 0) 107 + entry->id = id = ksmbd_ipc_id_alloc(); 108 + if (id < 0) 110 109 goto free_entry; 111 - old = xa_store(&sess->rpc_handle_list, entry->id, entry, KSMBD_DEFAULT_GFP); 110 + old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP); 112 111 if (xa_is_err(old)) 113 112 goto free_id; 114 113 115 - resp = ksmbd_rpc_open(sess, entry->id); 114 + resp = ksmbd_rpc_open(sess, id); 116 115 if (!resp) 117 116 goto erase_xa; 118 117 118 + up_read(&sess->rpc_lock); 119 119 kvfree(resp); 120 - return entry->id; 120 + return id; 121 121 erase_xa: 122 122 xa_erase(&sess->rpc_handle_list, entry->id); 123 123 free_id: 124 124 ksmbd_rpc_id_free(entry->id); 125 125 free_entry: 126 126 kfree(entry); 127 + up_read(&sess->rpc_lock); 127 128 return -EINVAL; 128 129 } 129 130 ··· 134 129 { 135 130 struct ksmbd_session_rpc *entry; 136 131 132 + down_write(&sess->rpc_lock); 137 133 entry = xa_erase(&sess->rpc_handle_list, id); 138 134 if (entry) 139 135 __session_rpc_close(sess, entry); 136 + up_write(&sess->rpc_lock); 140 137 } 141 138 142 139 int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id) ··· 446 439 sess->sequence_number = 1; 447 440 rwlock_init(&sess->tree_conns_lock); 448 441 atomic_set(&sess->refcnt, 2); 442 + init_rwsem(&sess->rpc_lock); 449 443 450 444 ret = __init_smb2_session(sess); 451 445 if (ret)
+1
fs/smb/server/mgmt/user_session.h
··· 63 63 rwlock_t tree_conns_lock; 64 64 65 65 atomic_t refcnt; 66 + struct rw_semaphore rpc_lock; 66 67 }; 67 68 68 69 static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+7 -11
fs/smb/server/smb2pdu.c
··· 1445 1445 { 1446 1446 struct ksmbd_conn *conn = work->conn; 1447 1447 struct ksmbd_session *sess = work->sess; 1448 - struct channel *chann = NULL; 1448 + struct channel *chann = NULL, *old; 1449 1449 struct ksmbd_user *user; 1450 1450 u64 prev_id; 1451 1451 int sz, rc; ··· 1557 1557 return -ENOMEM; 1558 1558 1559 1559 chann->conn = conn; 1560 - xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP); 1560 + old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann, 1561 + KSMBD_DEFAULT_GFP); 1562 + if (xa_is_err(old)) { 1563 + kfree(chann); 1564 + return xa_err(old); 1565 + } 1561 1566 } 1562 1567 } 1563 1568 ··· 1606 1601 prev_sess_id = le64_to_cpu(req->PreviousSessionId); 1607 1602 if (prev_sess_id && prev_sess_id != sess->id) 1608 1603 destroy_previous_session(conn, sess->user, prev_sess_id); 1609 - 1610 - if (sess->state == SMB2_SESSION_VALID) { 1611 - ksmbd_free_user(sess->user); 1612 - sess->user = NULL; 1613 - } 1614 1604 1615 1605 retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, 1616 1606 out_blob, &out_len); ··· 2249 2249 sess->state = SMB2_SESSION_EXPIRED; 2250 2250 up_write(&conn->session_lock); 2251 2251 2252 - if (sess->user) { 2253 - ksmbd_free_user(sess->user); 2254 - sess->user = NULL; 2255 - } 2256 2252 ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP); 2257 2253 2258 2254 rsp->StructureSize = cpu_to_le16(4);
+54 -29
include/linux/cpufreq.h
··· 776 776 int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); 777 777 778 778 int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, 779 - unsigned int target_freq, 780 - unsigned int relation); 779 + unsigned int target_freq, unsigned int min, 780 + unsigned int max, unsigned int relation); 781 781 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, 782 782 unsigned int freq); 783 783 ··· 840 840 return best; 841 841 } 842 842 843 - /* Works only on sorted freq-tables */ 844 - static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, 845 - unsigned int target_freq, 846 - bool efficiencies) 843 + static inline int find_index_l(struct cpufreq_policy *policy, 844 + unsigned int target_freq, 845 + unsigned int min, unsigned int max, 846 + bool efficiencies) 847 847 { 848 - target_freq = clamp_val(target_freq, policy->min, policy->max); 848 + target_freq = clamp_val(target_freq, min, max); 849 849 850 850 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 851 851 return cpufreq_table_find_index_al(policy, target_freq, ··· 853 853 else 854 854 return cpufreq_table_find_index_dl(policy, target_freq, 855 855 efficiencies); 856 + } 857 + 858 + /* Works only on sorted freq-tables */ 859 + static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, 860 + unsigned int target_freq, 861 + bool efficiencies) 862 + { 863 + return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies); 856 864 } 857 865 858 866 /* Find highest freq at or below target in a table in ascending order */ ··· 916 908 return best; 917 909 } 918 910 919 - /* Works only on sorted freq-tables */ 920 - static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, 921 - unsigned int target_freq, 922 - bool efficiencies) 911 + static inline int find_index_h(struct cpufreq_policy *policy, 912 + unsigned int target_freq, 913 + unsigned int min, unsigned int max, 914 + bool efficiencies) 923 915 { 924 - target_freq = clamp_val(target_freq, policy->min, policy->max); 916 + target_freq = clamp_val(target_freq, min, max); 925 917 926 918 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 927 919 return cpufreq_table_find_index_ah(policy, target_freq, ··· 929 921 else 930 922 return cpufreq_table_find_index_dh(policy, target_freq, 931 923 efficiencies); 924 + } 925 + 926 + /* Works only on sorted freq-tables */ 927 + static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, 928 + unsigned int target_freq, 929 + bool efficiencies) 930 + { 931 + return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies); 932 932 } 933 933 934 934 /* Find closest freq to target in a table in ascending order */ ··· 1009 993 return best; 1010 994 } 1011 995 1012 - /* Works only on sorted freq-tables */ 1013 - static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, 1014 - unsigned int target_freq, 1015 - bool efficiencies) 996 + static inline int find_index_c(struct cpufreq_policy *policy, 997 + unsigned int target_freq, 998 + unsigned int min, unsigned int max, 999 + bool efficiencies) 1016 1000 { 1017 - target_freq = clamp_val(target_freq, policy->min, policy->max); 1001 + target_freq = clamp_val(target_freq, min, max); 1018 1002 1019 1003 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 1020 1004 return cpufreq_table_find_index_ac(policy, target_freq, ··· 1024 1008 efficiencies); 1025 1009 } 1026 1010 1027 - static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) 1011 + /* Works only on sorted freq-tables */ 1012 + static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, 1013 + unsigned int target_freq, 1014 + bool efficiencies) 1015 + { 1016 + return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies); 1017 + } 1018 + 1019 + static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, 1020 + unsigned int min, unsigned int max, 1021 + int idx) 1028 1022 { 1029 1023 unsigned int freq; 1030 1024 ··· 1043 1017 1044 1018 freq = policy->freq_table[idx].frequency; 1045 1019 1046 - return freq == clamp_val(freq, policy->min, policy->max); 1020 + return freq == clamp_val(freq, min, max); 1047 1021 } 1048 1022 1049 1023 static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, 1050 1024 unsigned int target_freq, 1025 + unsigned int min, 1026 + unsigned int max, 1051 1027 unsigned int relation) 1052 1028 { 1053 1029 bool efficiencies = policy->efficiencies_available && ··· 1060 1032 relation &= ~CPUFREQ_RELATION_E; 1061 1033 1062 1034 if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) 1063 - return cpufreq_table_index_unsorted(policy, target_freq, 1064 - relation); 1035 + return cpufreq_table_index_unsorted(policy, target_freq, min, 1036 + max, relation); 1065 1037 retry: 1066 1038 switch (relation) { 1067 1039 case CPUFREQ_RELATION_L: 1068 - idx = cpufreq_table_find_index_l(policy, target_freq, 1069 - efficiencies); 1040 + idx = find_index_l(policy, target_freq, min, max, efficiencies); 1070 1041 break; 1071 1042 case CPUFREQ_RELATION_H: 1072 - idx = cpufreq_table_find_index_h(policy, target_freq, 1073 - efficiencies); 1043 + idx = find_index_h(policy, target_freq, min, max, efficiencies); 1074 1044 break; 1075 1045 case CPUFREQ_RELATION_C: 1076 - idx = cpufreq_table_find_index_c(policy, target_freq, 1077 - efficiencies); 1046 + idx = find_index_c(policy, target_freq, min, max, efficiencies); 1078 1047 break; 1079 1048 default: 1080 1049 WARN_ON_ONCE(1); 1081 1050 return 0; 1082 1051 } 1083 1052 1084 - /* Limit frequency index to honor policy->min/max */ 1085 - if (!cpufreq_is_in_limits(policy, idx) && efficiencies) { 1053 + /* Limit frequency index to honor min and max */ 1054 + if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) { 1086 1055 efficiencies = false; 1087 1056 goto retry; 1088 1057 }
-15
include/linux/fsnotify_backend.h
··· 907 907 /* Clear all of the marks of a group attached to a given object type */ 908 908 extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, 909 909 unsigned int obj_type); 910 - /* run all the marks in a group, and clear all of the vfsmount marks */ 911 - static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) 912 - { 913 - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT); 914 - } 915 - /* run all the marks in a group, and clear all of the inode marks */ 916 - static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) 917 - { 918 - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE); 919 - } 920 - /* run all the marks in a group, and clear all of the sn marks */ 921 - static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group) 922 - { 923 - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB); 924 - } 925 910 extern void fsnotify_get_mark(struct fsnotify_mark *mark); 926 911 extern void fsnotify_put_mark(struct fsnotify_mark *mark); 927 912 extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
+4 -4
include/linux/iommu.h
··· 440 440 void *dst_data, const struct iommu_user_data *src_data, 441 441 unsigned int data_type, size_t data_len, size_t min_len) 442 442 { 443 - if (src_data->type != data_type) 444 - return -EINVAL; 445 443 if (WARN_ON(!dst_data || !src_data)) 444 + return -EINVAL; 445 + if (src_data->type != data_type) 446 446 return -EINVAL; 447 447 if (src_data->len < min_len || data_len < src_data->len) 448 448 return -EINVAL; ··· 456 456 * include/uapi/linux/iommufd.h 457 457 * @user_data: Pointer to a struct iommu_user_data for user space data info 458 458 * @data_type: The data type of the @kdst. Must match with @user_data->type 459 - * @min_last: The last memember of the data structure @kdst points in the 460 - * initial version. 459 + * @min_last: The last member of the data structure @kdst points in the initial 460 + * version. 461 461 * Return 0 for success, otherwise -error. 462 462 */ 463 463 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
+2
include/linux/module.h
··· 162 162 #define __INITRODATA_OR_MODULE __INITRODATA 163 163 #endif /*CONFIG_MODULES*/ 164 164 165 + struct module_kobject *lookup_or_create_module_kobject(const char *name); 166 + 165 167 /* Generic info of form tag = "info" */ 166 168 #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) 167 169
+3 -1
include/net/bluetooth/hci.h
··· 1931 1931 __u8 sync_cte_type; 1932 1932 } __packed; 1933 1933 1934 + #define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045 1935 + 1934 1936 #define HCI_OP_LE_PA_TERM_SYNC 0x2046 1935 1937 struct hci_cp_le_pa_term_sync { 1936 1938 __le16 handle; ··· 2832 2830 __le16 bis_handle[]; 2833 2831 } __packed; 2834 2832 2835 - #define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d 2833 + #define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d 2836 2834 struct hci_evt_le_big_sync_estabilished { 2837 2835 __u8 status; 2838 2836 __u8 handle;
+9 -11
include/net/bluetooth/hci_core.h
··· 1113 1113 return NULL; 1114 1114 } 1115 1115 1116 - static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev, 1117 - __u8 sid, 1118 - bdaddr_t *dst, 1119 - __u8 dst_type) 1116 + static inline struct hci_conn * 1117 + hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev) 1120 1118 { 1121 1119 struct hci_conn_hash *h = &hdev->conn_hash; 1122 1120 struct hci_conn *c; ··· 1122 1124 rcu_read_lock(); 1123 1125 1124 1126 list_for_each_entry_rcu(c, &h->list, list) { 1125 - if (c->type != ISO_LINK || bacmp(&c->dst, dst) || 1126 - c->dst_type != dst_type || c->sid != sid) 1127 + if (c->type != ISO_LINK) 1128 + continue; 1129 + 1130 + if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags)) 1127 1131 continue; 1128 1132 1129 1133 rcu_read_unlock(); ··· 1524 1524 void hci_sco_setup(struct hci_conn *conn, __u8 status); 1525 1525 bool hci_iso_setup_path(struct hci_conn *conn); 1526 1526 int hci_le_create_cis_pending(struct hci_dev *hdev); 1527 - int hci_pa_create_sync_pending(struct hci_dev *hdev); 1528 - int hci_le_big_create_sync_pending(struct hci_dev *hdev); 1529 1527 int hci_conn_check_create_cis(struct hci_conn *conn); 1530 1528 1531 1529 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, ··· 1564 1566 __u8 data_len, __u8 *data); 1565 1567 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, 1566 1568 __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); 1567 - int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, 1568 - struct bt_iso_qos *qos, 1569 - __u16 sync_handle, __u8 num_bis, __u8 bis[]); 1569 + int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, 1570 + struct bt_iso_qos *qos, __u16 sync_handle, 1571 + __u8 num_bis, __u8 bis[]); 1570 1572 int hci_conn_check_link_mode(struct hci_conn *conn); 1571 1573 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); 1572 1574 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+3
include/net/bluetooth/hci_sync.h
··· 185 185 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn); 186 186 int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, 187 187 struct hci_conn_params *params); 188 + 189 + int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn); 190 + int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
-3
include/net/xdp_sock.h
··· 71 71 */ 72 72 u32 tx_budget_spent; 73 73 74 - /* Protects generic receive. */ 75 - spinlock_t rx_lock; 76 - 77 74 /* Statistics */ 78 75 u64 rx_dropped; 79 76 u64 rx_queue_full;
+3 -1
include/net/xsk_buff_pool.h
··· 53 53 refcount_t users; 54 54 struct xdp_umem *umem; 55 55 struct work_struct work; 56 + /* Protects generic receive in shared and non-shared umem mode. */ 57 + spinlock_t rx_lock; 56 58 struct list_head free_list; 57 59 struct list_head xskb_list; 58 60 u32 heads_cnt; ··· 240 238 return orig_addr; 241 239 242 240 offset = xskb->xdp.data - xskb->xdp.data_hard_start; 243 - orig_addr -= offset; 244 241 offset += pool->headroom; 242 + orig_addr -= offset; 245 243 return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 246 244 } 247 245
+1
include/sound/soc_sdw_utils.h
··· 226 226 bool playback); 227 227 int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd, 228 228 struct snd_soc_dai *dai); 229 + int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix); 229 230 230 231 /* MAXIM codec support */ 231 232 int asoc_sdw_maxim_init(struct snd_soc_card *card,
+1 -1
include/sound/ump_convert.h
··· 19 19 /* context for converting from MIDI1 byte stream to UMP packet */ 20 20 struct ump_cvt_to_ump { 21 21 /* MIDI1 intermediate buffer */ 22 - unsigned char buf[4]; 22 + unsigned char buf[6]; /* up to 6 bytes for SysEx */ 23 23 int len; 24 24 int cmd_bytes; 25 25
+2 -2
io_uring/fdinfo.c
··· 123 123 seq_printf(m, "SqMask:\t0x%x\n", sq_mask); 124 124 seq_printf(m, "SqHead:\t%u\n", sq_head); 125 125 seq_printf(m, "SqTail:\t%u\n", sq_tail); 126 - seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); 126 + seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head)); 127 127 seq_printf(m, "CqMask:\t0x%x\n", cq_mask); 128 128 seq_printf(m, "CqHead:\t%u\n", cq_head); 129 129 seq_printf(m, "CqTail:\t%u\n", cq_tail); 130 - seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); 130 + seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail)); 131 131 seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head); 132 132 sq_entries = min(sq_tail - sq_head, ctx->sq_entries); 133 133 for (i = 0; i < sq_entries; i++) {
+1 -1
kernel/irq/msi.c
··· 761 761 static void msi_domain_debug_show(struct seq_file *m, struct irq_domain *d, 762 762 struct irq_data *irqd, int ind) 763 763 { 764 - struct msi_desc *desc = irq_data_get_msi_desc(irqd); 764 + struct msi_desc *desc = irqd ? irq_data_get_msi_desc(irqd) : NULL; 765 765 766 766 if (!desc) 767 767 return;
+21 -24
kernel/params.c
··· 760 760 params[i].ops->free(params[i].arg); 761 761 } 762 762 763 - static struct module_kobject * __init locate_module_kobject(const char *name) 763 + struct module_kobject __modinit * lookup_or_create_module_kobject(const char *name) 764 764 { 765 765 struct module_kobject *mk; 766 766 struct kobject *kobj; 767 767 int err; 768 768 769 769 kobj = kset_find_obj(module_kset, name); 770 - if (kobj) { 771 - mk = to_module_kobject(kobj); 772 - } else { 773 - mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); 774 - BUG_ON(!mk); 770 + if (kobj) 771 + return to_module_kobject(kobj); 775 772 776 - mk->mod = THIS_MODULE; 777 - mk->kobj.kset = module_kset; 778 - err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, 779 - "%s", name); 780 - #ifdef CONFIG_MODULES 781 - if (!err) 782 - err = sysfs_create_file(&mk->kobj, &module_uevent.attr); 783 - #endif 784 - if (err) { 785 - kobject_put(&mk->kobj); 786 - pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n", 787 - name, err); 788 - return NULL; 789 - } 773 + mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); 774 + if (!mk) 775 + return NULL; 790 776 791 - /* So that we hold reference in both cases. */ 792 - kobject_get(&mk->kobj); 777 + mk->mod = THIS_MODULE; 778 + mk->kobj.kset = module_kset; 779 + err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name); 780 + if (IS_ENABLED(CONFIG_MODULES) && !err) 781 + err = sysfs_create_file(&mk->kobj, &module_uevent.attr); 782 + if (err) { 783 + kobject_put(&mk->kobj); 784 + pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n", 785 + name, err); 786 + return NULL; 793 787 } 788 + 789 + /* So that we hold reference in both cases. */ 790 + kobject_get(&mk->kobj); 794 791 795 792 return mk; 796 793 } ··· 799 802 struct module_kobject *mk; 800 803 int err; 801 804 802 - mk = locate_module_kobject(name); 805 + mk = lookup_or_create_module_kobject(name); 803 806 if (!mk) 804 807 return; 805 808 ··· 870 873 int err; 871 874 872 875 for (vattr = __start___modver; vattr < __stop___modver; vattr++) { 873 - mk = locate_module_kobject(vattr->module_name); 876 + mk = lookup_or_create_module_kobject(vattr->module_name); 874 877 if (mk) { 875 878 err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); 876 879 WARN_ON_ONCE(err);
+1 -1
kernel/trace/ftrace.c
··· 3436 3436 3437 3437 /* Copy the subops hash */ 3438 3438 *filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash); 3439 - if (!filter_hash) 3439 + if (!*filter_hash) 3440 3440 return -ENOMEM; 3441 3441 /* Remove any notrace functions from the copy */ 3442 3442 remove_hash(*filter_hash, subops_hash->notrace_hash);
+6 -3
kernel/trace/trace.c
··· 6043 6043 tscratch = tr->scratch; 6044 6044 /* if there is no tscrach, module_delta must be NULL. */ 6045 6045 module_delta = READ_ONCE(tr->module_delta); 6046 - if (!module_delta || tscratch->entries[0].mod_addr > addr) 6046 + if (!module_delta || !tscratch->nr_entries || 6047 + tscratch->entries[0].mod_addr > addr) { 6047 6048 return addr + tr->text_delta; 6049 + } 6048 6050 6049 6051 /* Note that entries must be sorted. */ 6050 6052 nr_entries = tscratch->nr_entries; ··· 6823 6821 /* Copy the data into the page, so we can start over. */ 6824 6822 ret = trace_seq_to_buffer(&iter->seq, 6825 6823 page_address(spd.pages[i]), 6826 - trace_seq_used(&iter->seq)); 6824 + min((size_t)trace_seq_used(&iter->seq), 6825 + PAGE_SIZE)); 6827 6826 if (ret < 0) { 6828 6827 __free_page(spd.pages[i]); 6829 6828 break; 6830 6829 } 6831 6830 spd.partial[i].offset = 0; 6832 - spd.partial[i].len = trace_seq_used(&iter->seq); 6831 + spd.partial[i].len = ret; 6833 6832 6834 6833 trace_seq_init(&iter->seq); 6835 6834 }
+2 -2
kernel/trace/trace_output.c
··· 1042 1042 struct trace_event_call *call; 1043 1043 struct list_head *head; 1044 1044 1045 + lockdep_assert_held_read(&trace_event_sem); 1046 + 1045 1047 /* ftrace defined events have separate call structures */ 1046 1048 if (event->type <= __TRACE_LAST_TYPE) { 1047 1049 bool found = false; 1048 1050 1049 - down_read(&trace_event_sem); 1050 1051 list_for_each_entry(call, &ftrace_events, list) { 1051 1052 if (call->event.type == event->type) { 1052 1053 found = true; ··· 1057 1056 if (call->event.type > __TRACE_LAST_TYPE) 1058 1057 break; 1059 1058 } 1060 - up_read(&trace_event_sem); 1061 1059 if (!found) { 1062 1060 trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type); 1063 1061 goto out;
+11 -1
mm/memblock.c
··· 2183 2183 struct memblock_region *region; 2184 2184 phys_addr_t start, end; 2185 2185 int nid; 2186 + unsigned long max_reserved; 2186 2187 2187 2188 /* 2188 2189 * set nid on all reserved pages and also treat struct 2189 2190 * pages for the NOMAP regions as PageReserved 2190 2191 */ 2192 + repeat: 2193 + max_reserved = memblock.reserved.max; 2191 2194 for_each_mem_region(region) { 2192 2195 nid = memblock_get_region_node(region); 2193 2196 start = region->base; ··· 2199 2196 if (memblock_is_nomap(region)) 2200 2197 reserve_bootmem_region(start, end, nid); 2201 2198 2202 - memblock_set_node(start, end, &memblock.reserved, nid); 2199 + memblock_set_node(start, region->size, &memblock.reserved, nid); 2203 2200 } 2201 + /* 2202 + * 'max' is changed means memblock.reserved has been doubled its 2203 + * array, which may result a new reserved region before current 2204 + * 'start'. Now we should repeat the procedure to set its node id. 2205 + */ 2206 + if (max_reserved != memblock.reserved.max) 2207 + goto repeat; 2204 2208 2205 2209 /* 2206 2210 * initialize struct pages for reserved regions that don't have
+8 -22
mm/slub.c
··· 2028 2028 return 0; 2029 2029 } 2030 2030 2031 - /* Should be called only if mem_alloc_profiling_enabled() */ 2032 - static noinline void free_slab_obj_exts(struct slab *slab) 2031 + static inline void free_slab_obj_exts(struct slab *slab) 2033 2032 { 2034 2033 struct slabobj_ext *obj_exts; 2035 2034 ··· 2048 2049 slab->obj_exts = 0; 2049 2050 } 2050 2051 2051 - static inline bool need_slab_obj_ext(void) 2052 - { 2053 - if (mem_alloc_profiling_enabled()) 2054 - return true; 2055 - 2056 - /* 2057 - * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally 2058 - * inside memcg_slab_post_alloc_hook. No other users for now. 2059 - */ 2060 - return false; 2061 - } 2062 - 2063 2052 #else /* CONFIG_SLAB_OBJ_EXT */ 2064 2053 2065 2054 static inline void init_slab_obj_exts(struct slab *slab) ··· 2062 2075 2063 2076 static inline void free_slab_obj_exts(struct slab *slab) 2064 2077 { 2065 - } 2066 - 2067 - static inline bool need_slab_obj_ext(void) 2068 - { 2069 - return false; 2070 2078 } 2071 2079 2072 2080 #endif /* CONFIG_SLAB_OBJ_EXT */ ··· 2111 2129 static inline void 2112 2130 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2113 2131 { 2114 - if (need_slab_obj_ext()) 2132 + if (mem_alloc_profiling_enabled()) 2115 2133 __alloc_tagging_slab_alloc_hook(s, object, flags); 2116 2134 } 2117 2135 ··· 2583 2601 static __always_inline void unaccount_slab(struct slab *slab, int order, 2584 2602 struct kmem_cache *s) 2585 2603 { 2586 - if (memcg_kmem_online() || need_slab_obj_ext()) 2587 - free_slab_obj_exts(slab); 2604 + /* 2605 + * The slab object extensions should now be freed regardless of 2606 + * whether mem_alloc_profiling_enabled() or not because profiling 2607 + * might have been disabled after slab->obj_exts got allocated. 2608 + */ 2609 + free_slab_obj_exts(slab); 2588 2610 2589 2611 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2590 2612 -(PAGE_SIZE << order));
+7 -174
net/bluetooth/hci_conn.c
··· 2064 2064 return hci_le_create_big(conn, &conn->iso_qos); 2065 2065 } 2066 2066 2067 - static void create_pa_complete(struct hci_dev *hdev, void *data, int err) 2068 - { 2069 - bt_dev_dbg(hdev, ""); 2070 - 2071 - if (err) 2072 - bt_dev_err(hdev, "Unable to create PA: %d", err); 2073 - } 2074 - 2075 - static bool hci_conn_check_create_pa_sync(struct hci_conn *conn) 2076 - { 2077 - if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID) 2078 - return false; 2079 - 2080 - return true; 2081 - } 2082 - 2083 - static int create_pa_sync(struct hci_dev *hdev, void *data) 2084 - { 2085 - struct hci_cp_le_pa_create_sync cp = {0}; 2086 - struct hci_conn *conn; 2087 - int err = 0; 2088 - 2089 - hci_dev_lock(hdev); 2090 - 2091 - rcu_read_lock(); 2092 - 2093 - /* The spec allows only one pending LE Periodic Advertising Create 2094 - * Sync command at a time. If the command is pending now, don't do 2095 - * anything. We check for pending connections after each PA Sync 2096 - * Established event. 2097 - * 2098 - * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 2099 - * page 2493: 2100 - * 2101 - * If the Host issues this command when another HCI_LE_Periodic_ 2102 - * Advertising_Create_Sync command is pending, the Controller shall 2103 - * return the error code Command Disallowed (0x0C). 2104 - */ 2105 - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 2106 - if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags)) 2107 - goto unlock; 2108 - } 2109 - 2110 - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 2111 - if (hci_conn_check_create_pa_sync(conn)) { 2112 - struct bt_iso_qos *qos = &conn->iso_qos; 2113 - 2114 - cp.options = qos->bcast.options; 2115 - cp.sid = conn->sid; 2116 - cp.addr_type = conn->dst_type; 2117 - bacpy(&cp.addr, &conn->dst); 2118 - cp.skip = cpu_to_le16(qos->bcast.skip); 2119 - cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); 2120 - cp.sync_cte_type = qos->bcast.sync_cte_type; 2121 - 2122 - break; 2123 - } 2124 - } 2125 - 2126 - unlock: 2127 - rcu_read_unlock(); 2128 - 2129 - hci_dev_unlock(hdev); 2130 - 2131 - if (bacmp(&cp.addr, BDADDR_ANY)) { 2132 - hci_dev_set_flag(hdev, HCI_PA_SYNC); 2133 - set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); 2134 - 2135 - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC, 2136 - sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2137 - if (!err) 2138 - err = hci_update_passive_scan_sync(hdev); 2139 - 2140 - if (err) { 2141 - hci_dev_clear_flag(hdev, HCI_PA_SYNC); 2142 - clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); 2143 - } 2144 - } 2145 - 2146 - return err; 2147 - } 2148 - 2149 - int hci_pa_create_sync_pending(struct hci_dev *hdev) 2150 - { 2151 - /* Queue start pa_create_sync and scan */ 2152 - return hci_cmd_sync_queue(hdev, create_pa_sync, 2153 - NULL, create_pa_complete); 2154 - } 2155 - 2156 2067 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, 2157 2068 __u8 dst_type, __u8 sid, 2158 2069 struct bt_iso_qos *qos) ··· 2078 2167 conn->dst_type = dst_type; 2079 2168 conn->sid = sid; 2080 2169 conn->state = BT_LISTEN; 2170 + conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10); 2081 2171 2082 2172 hci_conn_hold(conn); 2083 2173 2084 - hci_pa_create_sync_pending(hdev); 2174 + hci_connect_pa_sync(hdev, conn); 2085 2175 2086 2176 return conn; 2087 2177 } 2088 2178 2089 - static bool hci_conn_check_create_big_sync(struct hci_conn *conn) 2090 - { 2091 - if (!conn->num_bis) 2092 - return false; 2093 - 2094 - return true; 2095 - } 2096 - 2097 - static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err) 2098 - { 2099 - bt_dev_dbg(hdev, ""); 2100 - 2101 - if (err) 2102 - bt_dev_err(hdev, "Unable to create BIG sync: %d", err); 2103 - } 2104 - 2105 - static int big_create_sync(struct hci_dev *hdev, void *data) 2106 - { 2107 - DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11); 2108 - struct hci_conn *conn; 2109 - 2110 - rcu_read_lock(); 2111 - 2112 - pdu->num_bis = 0; 2113 - 2114 - /* The spec allows only one pending LE BIG Create Sync command at 2115 - * a time. If the command is pending now, don't do anything. We 2116 - * check for pending connections after each BIG Sync Established 2117 - * event. 2118 - * 2119 - * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 2120 - * page 2586: 2121 - * 2122 - * If the Host sends this command when the Controller is in the 2123 - * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ 2124 - * Established event has not been generated, the Controller shall 2125 - * return the error code Command Disallowed (0x0C). 2126 - */ 2127 - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 2128 - if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags)) 2129 - goto unlock; 2130 - } 2131 - 2132 - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 2133 - if (hci_conn_check_create_big_sync(conn)) { 2134 - struct bt_iso_qos *qos = &conn->iso_qos; 2135 - 2136 - set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); 2137 - 2138 - pdu->handle = qos->bcast.big; 2139 - pdu->sync_handle = cpu_to_le16(conn->sync_handle); 2140 - pdu->encryption = qos->bcast.encryption; 2141 - memcpy(pdu->bcode, qos->bcast.bcode, 2142 - sizeof(pdu->bcode)); 2143 - pdu->mse = qos->bcast.mse; 2144 - pdu->timeout = cpu_to_le16(qos->bcast.timeout); 2145 - pdu->num_bis = conn->num_bis; 2146 - memcpy(pdu->bis, conn->bis, conn->num_bis); 2147 - 2148 - break; 2149 - } 2150 - } 2151 - 2152 - unlock: 2153 - rcu_read_unlock(); 2154 - 2155 - if (!pdu->num_bis) 2156 - return 0; 2157 - 2158 - return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC, 2159 - struct_size(pdu, bis, pdu->num_bis), pdu); 2160 - } 2161 - 2162 - int hci_le_big_create_sync_pending(struct hci_dev *hdev) 2163 - { 2164 - /* Queue big_create_sync */ 2165 - return hci_cmd_sync_queue_once(hdev, big_create_sync, 2166 - NULL, big_create_sync_complete); 2167 - } 2168 - 2169 - int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, 2170 - struct bt_iso_qos *qos, 2171 - __u16 sync_handle, __u8 num_bis, __u8 bis[]) 2179 + int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, 2180 + struct bt_iso_qos *qos, __u16 sync_handle, 2181 + __u8 num_bis, __u8 bis[]) 2172 2182 { 2173 2183 int err; 2174 2184 ··· 2106 2274 2107 2275 hcon->num_bis = num_bis; 2108 2276 memcpy(hcon->bis, bis, num_bis); 2277 + hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10); 2109 2278 } 2110 2279 2111 - return hci_le_big_create_sync_pending(hdev); 2280 + return hci_connect_big_sync(hdev, hcon); 2112 2281 } 2113 2282 2114 2283 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
+4 -11
net/bluetooth/hci_event.c
··· 6378 6378 6379 6379 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6380 6380 6381 - conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr, 6382 - ev->bdaddr_type); 6381 + conn = hci_conn_hash_lookup_create_pa_sync(hdev); 6383 6382 if (!conn) { 6384 6383 bt_dev_err(hdev, 6385 6384 "Unable to find connection for dst %pMR sid 0x%2.2x", ··· 6417 6418 } 6418 6419 6419 6420 unlock: 6420 - /* Handle any other pending PA sync command */ 6421 - hci_pa_create_sync_pending(hdev); 6422 - 6423 6421 hci_dev_unlock(hdev); 6424 6422 } 6425 6423 ··· 6928 6932 6929 6933 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6930 6934 6931 - if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 6935 + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED, 6932 6936 flex_array_size(ev, bis, ev->num_bis))) 6933 6937 return; 6934 6938 ··· 6999 7003 } 7000 7004 7001 7005 unlock: 7002 - /* Handle any other pending BIG sync command */ 7003 - hci_le_big_create_sync_pending(hdev); 7004 - 7005 7006 hci_dev_unlock(hdev); 7006 7007 } 7007 7008 ··· 7120 7127 hci_le_create_big_complete_evt, 7121 7128 sizeof(struct hci_evt_le_create_big_complete), 7122 7129 HCI_MAX_EVENT_SIZE), 7123 - /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ 7124 - HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 7130 + /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */ 7131 + HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED, 7125 7132 hci_le_big_sync_established_evt, 7126 7133 sizeof(struct hci_evt_le_big_sync_estabilished), 7127 7134 HCI_MAX_EVENT_SIZE),
+145 -5
net/bluetooth/hci_sync.c
··· 2693 2693 2694 2694 /* Force address filtering if PA Sync is in progress */ 2695 2695 if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { 2696 - struct hci_cp_le_pa_create_sync *sent; 2696 + struct hci_conn *conn; 2697 2697 2698 - sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC); 2699 - if (sent) { 2698 + conn = hci_conn_hash_lookup_create_pa_sync(hdev); 2699 + if (conn) { 2700 2700 struct conn_params pa; 2701 2701 2702 2702 memset(&pa, 0, sizeof(pa)); 2703 2703 2704 - bacpy(&pa.addr, &sent->addr); 2705 - pa.addr_type = sent->addr_type; 2704 + bacpy(&pa.addr, &conn->dst); 2705 + pa.addr_type = conn->dst_type; 2706 2706 2707 2707 /* Clear first since there could be addresses left 2708 2708 * behind. ··· 6894 6894 6895 6895 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, 6896 6896 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6897 + } 6898 + 6899 + static void create_pa_complete(struct hci_dev *hdev, void *data, int err) 6900 + { 6901 + bt_dev_dbg(hdev, "err %d", err); 6902 + 6903 + if (!err) 6904 + return; 6905 + 6906 + hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6907 + 6908 + if (err == -ECANCELED) 6909 + return; 6910 + 6911 + hci_dev_lock(hdev); 6912 + 6913 + hci_update_passive_scan_sync(hdev); 6914 + 6915 + hci_dev_unlock(hdev); 6916 + } 6917 + 6918 + static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) 6919 + { 6920 + struct hci_cp_le_pa_create_sync cp; 6921 + struct hci_conn *conn = data; 6922 + struct bt_iso_qos *qos = &conn->iso_qos; 6923 + int err; 6924 + 6925 + if (!hci_conn_valid(hdev, conn)) 6926 + return -ECANCELED; 6927 + 6928 + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) 6929 + return -EBUSY; 6930 + 6931 + /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can 6932 + * program the address in the allow list so PA advertisements can be 6933 + * received. 6934 + */ 6935 + set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); 6936 + 6937 + hci_update_passive_scan_sync(hdev); 6938 + 6939 + memset(&cp, 0, sizeof(cp)); 6940 + cp.options = qos->bcast.options; 6941 + cp.sid = conn->sid; 6942 + cp.addr_type = conn->dst_type; 6943 + bacpy(&cp.addr, &conn->dst); 6944 + cp.skip = cpu_to_le16(qos->bcast.skip); 6945 + cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); 6946 + cp.sync_cte_type = qos->bcast.sync_cte_type; 6947 + 6948 + /* The spec allows only one pending LE Periodic Advertising Create 6949 + * Sync command at a time so we forcefully wait for PA Sync Established 6950 + * event since cmd_work can only schedule one command at a time. 6951 + * 6952 + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 6953 + * page 2493: 6954 + * 6955 + * If the Host issues this command when another HCI_LE_Periodic_ 6956 + * Advertising_Create_Sync command is pending, the Controller shall 6957 + * return the error code Command Disallowed (0x0C). 6958 + */ 6959 + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC, 6960 + sizeof(cp), &cp, 6961 + HCI_EV_LE_PA_SYNC_ESTABLISHED, 6962 + conn->conn_timeout, NULL); 6963 + if (err == -ETIMEDOUT) 6964 + __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL, 6965 + 0, NULL, HCI_CMD_TIMEOUT); 6966 + 6967 + return err; 6968 + } 6969 + 6970 + int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn) 6971 + { 6972 + return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn, 6973 + create_pa_complete); 6974 + } 6975 + 6976 + static void create_big_complete(struct hci_dev *hdev, void *data, int err) 6977 + { 6978 + struct hci_conn *conn = data; 6979 + 6980 + bt_dev_dbg(hdev, "err %d", err); 6981 + 6982 + if (err == -ECANCELED) 6983 + return; 6984 + 6985 + if (hci_conn_valid(hdev, conn)) 6986 + clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); 6987 + } 6988 + 6989 + static int hci_le_big_create_sync(struct hci_dev *hdev, void *data) 6990 + { 6991 + DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11); 6992 + struct hci_conn *conn = data; 6993 + struct bt_iso_qos *qos = &conn->iso_qos; 6994 + int err; 6995 + 6996 + if (!hci_conn_valid(hdev, conn)) 6997 + return -ECANCELED; 6998 + 6999 + set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); 7000 + 7001 + memset(cp, 0, sizeof(*cp)); 7002 + cp->handle = qos->bcast.big; 7003 + cp->sync_handle = cpu_to_le16(conn->sync_handle); 7004 + cp->encryption = qos->bcast.encryption; 7005 + memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode)); 7006 + cp->mse = qos->bcast.mse; 7007 + cp->timeout = cpu_to_le16(qos->bcast.timeout); 7008 + cp->num_bis = conn->num_bis; 7009 + memcpy(cp->bis, conn->bis, conn->num_bis); 7010 + 7011 + /* The spec allows only one pending LE BIG Create Sync command at 7012 + * a time, so we forcefully wait for BIG Sync Established event since 7013 + * cmd_work can only schedule one command at a time. 7014 + * 7015 + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 7016 + * page 2586: 7017 + * 7018 + * If the Host sends this command when the Controller is in the 7019 + * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ 7020 + * Established event has not been generated, the Controller shall 7021 + * return the error code Command Disallowed (0x0C). 7022 + */ 7023 + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC, 7024 + struct_size(cp, bis, cp->num_bis), cp, 7025 + HCI_EVT_LE_BIG_SYNC_ESTABLISHED, 7026 + conn->conn_timeout, NULL); 7027 + if (err == -ETIMEDOUT) 7028 + hci_le_big_terminate_sync(hdev, cp->handle); 7029 + 7030 + return err; 7031 + } 7032 + 7033 + int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn) 7034 + { 7035 + return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn, 7036 + create_big_complete); 6897 7037 }
+12 -14
net/bluetooth/iso.c
··· 1462 1462 lock_sock(sk); 1463 1463 1464 1464 if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { 1465 - err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon, 1466 - &iso_pi(sk)->qos, 1467 - iso_pi(sk)->sync_handle, 1468 - iso_pi(sk)->bc_num_bis, 1469 - iso_pi(sk)->bc_bis); 1465 + err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon, 1466 + &iso_pi(sk)->qos, 1467 + iso_pi(sk)->sync_handle, 1468 + iso_pi(sk)->bc_num_bis, 1469 + iso_pi(sk)->bc_bis); 1470 1470 if (err) 1471 - bt_dev_err(hdev, "hci_le_big_create_sync: %d", 1472 - err); 1471 + bt_dev_err(hdev, "hci_big_create_sync: %d", err); 1473 1472 } 1474 1473 1475 1474 release_sock(sk); ··· 1921 1922 hcon); 1922 1923 } else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) { 1923 1924 ev = hci_recv_event_data(hcon->hdev, 1924 - HCI_EVT_LE_BIG_SYNC_ESTABILISHED); 1925 + HCI_EVT_LE_BIG_SYNC_ESTABLISHED); 1925 1926 1926 1927 /* Get reference to PA sync parent socket, if it exists */ 1927 1928 parent = iso_get_sock(&hcon->src, &hcon->dst, ··· 2112 2113 2113 2114 if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && 2114 2115 !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { 2115 - err = hci_le_big_create_sync(hdev, 2116 - hcon, 2117 - &iso_pi(sk)->qos, 2118 - iso_pi(sk)->sync_handle, 2119 - iso_pi(sk)->bc_num_bis, 2120 - iso_pi(sk)->bc_bis); 2116 + err = hci_conn_big_create_sync(hdev, hcon, 2117 + &iso_pi(sk)->qos, 2118 + iso_pi(sk)->sync_handle, 2119 + iso_pi(sk)->bc_num_bis, 2120 + iso_pi(sk)->bc_bis); 2121 2121 if (err) { 2122 2122 bt_dev_err(hdev, "hci_le_big_create_sync: %d", 2123 2123 err);
+3
net/bluetooth/l2cap_core.c
··· 7415 7415 return -ENOMEM; 7416 7416 /* Init rx_len */ 7417 7417 conn->rx_len = len; 7418 + 7419 + skb_set_delivery_time(conn->rx_skb, skb->tstamp, 7420 + skb->tstamp_type); 7418 7421 } 7419 7422 7420 7423 /* Copy as much as the rx_skb can hold */
+1 -1
net/ipv4/tcp_offload.c
··· 439 439 iif, sdif); 440 440 NAPI_GRO_CB(skb)->is_flist = !sk; 441 441 if (sk) 442 - sock_put(sk); 442 + sock_gen_put(sk); 443 443 } 444 444 445 445 INDIRECT_CALLABLE_SCOPE
+60 -1
net/ipv4/udp_offload.c
··· 247 247 return segs; 248 248 } 249 249 250 + static void __udpv6_gso_segment_csum(struct sk_buff *seg, 251 + struct in6_addr *oldip, 252 + const struct in6_addr *newip, 253 + __be16 *oldport, __be16 newport) 254 + { 255 + struct udphdr *uh = udp_hdr(seg); 256 + 257 + if (ipv6_addr_equal(oldip, newip) && *oldport == newport) 258 + return; 259 + 260 + if (uh->check) { 261 + inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32, 262 + newip->s6_addr32, true); 263 + 264 + inet_proto_csum_replace2(&uh->check, seg, *oldport, newport, 265 + false); 266 + if (!uh->check) 267 + uh->check = CSUM_MANGLED_0; 268 + } 269 + 270 + *oldip = *newip; 271 + *oldport = newport; 272 + } 273 + 274 + static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs) 275 + { 276 + const struct ipv6hdr *iph; 277 + const struct udphdr *uh; 278 + struct ipv6hdr *iph2; 279 + struct sk_buff *seg; 280 + struct udphdr *uh2; 281 + 282 + seg = segs; 283 + uh = udp_hdr(seg); 284 + iph = ipv6_hdr(seg); 285 + uh2 = udp_hdr(seg->next); 286 + iph2 = ipv6_hdr(seg->next); 287 + 288 + if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) && 289 + ipv6_addr_equal(&iph->saddr, &iph2->saddr) && 290 + ipv6_addr_equal(&iph->daddr, &iph2->daddr)) 291 + return segs; 292 + 293 + while ((seg = seg->next)) { 294 + uh2 = udp_hdr(seg); 295 + iph2 = ipv6_hdr(seg); 296 + 297 + __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr, 298 + &uh2->source, uh->source); 299 + __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr, 300 + &uh2->dest, uh->dest); 301 + } 302 + 303 + return segs; 304 + } 305 + 250 306 static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, 251 307 netdev_features_t features, 252 308 bool is_ipv6) ··· 315 259 316 260 udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); 317 261 318 - return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb); 262 + if (is_ipv6) 263 + return __udpv6_gso_segment_list_csum(skb); 264 + else 265 + return __udpv4_gso_segment_list_csum(skb); 319 266 } 320 267 321 268 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+1 -1
net/ipv6/tcpv6_offload.c
··· 42 42 iif, sdif); 43 43 NAPI_GRO_CB(skb)->is_flist = !sk; 44 44 if (sk) 45 - sock_put(sk); 45 + sock_gen_put(sk); 46 46 #endif /* IS_ENABLED(CONFIG_IPV6) */ 47 47 } 48 48
+7 -1
net/mac80211/status.c
··· 1085 1085 1086 1086 ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp); 1087 1087 1088 - if (status->free_list) 1088 + /* 1089 + * This is a bit racy but we can avoid a lot of work 1090 + * with this test... 1091 + */ 1092 + if (local->tx_mntrs) 1093 + ieee80211_tx_monitor(local, skb, retry_count, status); 1094 + else if (status->free_list) 1089 1095 list_add_tail(&skb->list, status->free_list); 1090 1096 else 1091 1097 dev_kfree_skb(skb);
+6 -3
net/sched/sch_drr.c
··· 35 35 struct Qdisc_class_hash clhash; 36 36 }; 37 37 38 + static bool cl_is_active(struct drr_class *cl) 39 + { 40 + return !list_empty(&cl->alist); 41 + } 42 + 38 43 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) 39 44 { 40 45 struct drr_sched *q = qdisc_priv(sch); ··· 342 337 struct drr_sched *q = qdisc_priv(sch); 343 338 struct drr_class *cl; 344 339 int err = 0; 345 - bool first; 346 340 347 341 cl = drr_classify(skb, sch, &err); 348 342 if (cl == NULL) { ··· 351 347 return err; 352 348 } 353 349 354 - first = !cl->qdisc->q.qlen; 355 350 err = qdisc_enqueue(skb, cl->qdisc, to_free); 356 351 if (unlikely(err != NET_XMIT_SUCCESS)) { 357 352 if (net_xmit_drop_count(err)) { ··· 360 357 return err; 361 358 } 362 359 363 - if (first) { 360 + if (!cl_is_active(cl)) { 364 361 list_add_tail(&cl->alist, &q->active); 365 362 cl->deficit = cl->quantum; 366 363 }
+6 -3
net/sched/sch_ets.c
··· 74 74 [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 }, 75 75 }; 76 76 77 + static bool cl_is_active(struct ets_class *cl) 78 + { 79 + return !list_empty(&cl->alist); 80 + } 81 + 77 82 static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr, 78 83 unsigned int *quantum, 79 84 struct netlink_ext_ack *extack) ··· 421 416 struct ets_sched *q = qdisc_priv(sch); 422 417 struct ets_class *cl; 423 418 int err = 0; 424 - bool first; 425 419 426 420 cl = ets_classify(skb, sch, &err); 427 421 if (!cl) { ··· 430 426 return err; 431 427 } 432 428 433 - first = !cl->qdisc->q.qlen; 434 429 err = qdisc_enqueue(skb, cl->qdisc, to_free); 435 430 if (unlikely(err != NET_XMIT_SUCCESS)) { 436 431 if (net_xmit_drop_count(err)) { ··· 439 436 return err; 440 437 } 441 438 442 - if (first && !ets_class_is_strict(q, cl)) { 439 + if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) { 443 440 list_add_tail(&cl->alist, &q->active); 444 441 cl->deficit = cl->quantum; 445 442 }
+1 -1
net/sched/sch_hfsc.c
··· 1569 1569 return err; 1570 1570 } 1571 1571 1572 - if (first) { 1572 + if (first && !cl->cl_nactive) { 1573 1573 if (cl->cl_flags & HFSC_RSC) 1574 1574 init_ed(cl, len); 1575 1575 if (cl->cl_flags & HFSC_FSC)
+7 -4
net/sched/sch_qfq.c
··· 202 202 */ 203 203 enum update_reason {enqueue, requeue}; 204 204 205 + static bool cl_is_active(struct qfq_class *cl) 206 + { 207 + return !list_empty(&cl->alist); 208 + } 209 + 205 210 static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) 206 211 { 207 212 struct qfq_sched *q = qdisc_priv(sch); ··· 1220 1215 struct qfq_class *cl; 1221 1216 struct qfq_aggregate *agg; 1222 1217 int err = 0; 1223 - bool first; 1224 1218 1225 1219 cl = qfq_classify(skb, sch, &err); 1226 1220 if (cl == NULL) { ··· 1241 1237 } 1242 1238 1243 1239 gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 1244 - first = !cl->qdisc->q.qlen; 1245 1240 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1246 1241 if (unlikely(err != NET_XMIT_SUCCESS)) { 1247 1242 pr_debug("qfq_enqueue: enqueue failed %d\n", err); ··· 1256 1253 ++sch->q.qlen; 1257 1254 1258 1255 agg = cl->agg; 1259 - /* if the queue was not empty, then done here */ 1260 - if (!first) { 1256 + /* if the class is active, then done here */ 1257 + if (cl_is_active(cl)) { 1261 1258 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && 1262 1259 list_first_entry(&agg->active, struct qfq_class, alist) 1263 1260 == cl && cl->deficit < len)
+3 -3
net/xdp/xsk.c
··· 338 338 u32 len = xdp_get_buff_len(xdp); 339 339 int err; 340 340 341 - spin_lock_bh(&xs->rx_lock); 342 341 err = xsk_rcv_check(xs, xdp, len); 343 342 if (!err) { 343 + spin_lock_bh(&xs->pool->rx_lock); 344 344 err = __xsk_rcv(xs, xdp, len); 345 345 xsk_flush(xs); 346 + spin_unlock_bh(&xs->pool->rx_lock); 346 347 } 347 - spin_unlock_bh(&xs->rx_lock); 348 + 348 349 return err; 349 350 } 350 351 ··· 1735 1734 xs = xdp_sk(sk); 1736 1735 xs->state = XSK_READY; 1737 1736 mutex_init(&xs->mutex); 1738 - spin_lock_init(&xs->rx_lock); 1739 1737 1740 1738 INIT_LIST_HEAD(&xs->map_list); 1741 1739 spin_lock_init(&xs->map_list_lock);
+1
net/xdp/xsk_buff_pool.c
··· 89 89 pool->addrs = umem->addrs; 90 90 pool->tx_metadata_len = umem->tx_metadata_len; 91 91 pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM; 92 + spin_lock_init(&pool->rx_lock); 92 93 INIT_LIST_HEAD(&pool->free_list); 93 94 INIT_LIST_HEAD(&pool->xskb_list); 94 95 INIT_LIST_HEAD(&pool->xsk_tx_list);
+8 -1
scripts/Makefile.extrawarn
··· 8 8 9 9 # Default set of warnings, always enabled 10 10 KBUILD_CFLAGS += -Wall 11 + KBUILD_CFLAGS += -Wextra 11 12 KBUILD_CFLAGS += -Wundef 12 13 KBUILD_CFLAGS += -Werror=implicit-function-declaration 13 14 KBUILD_CFLAGS += -Werror=implicit-int ··· 57 56 # globally built with -Wcast-function-type. 58 57 KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type) 59 58 59 + # Currently, disable -Wstringop-overflow for GCC 11, globally. 60 + KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow) 61 + KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow) 62 + 63 + # Currently, disable -Wunterminated-string-initialization as broken 64 + KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization) 65 + 60 66 # The allocators already balk at large sizes, so silence the compiler 61 67 # warnings for bounds checks involving those possible values. While 62 68 # -Wno-alloc-size-larger-than would normally be used here, earlier versions ··· 90 82 # Warn if there is an enum types mismatch 91 83 KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion) 92 84 93 - KBUILD_CFLAGS += -Wextra 94 85 KBUILD_CFLAGS += -Wunused 95 86 96 87 #
+81 -10
sound/pci/hda/patch_realtek.c
··· 441 441 alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); 442 442 fallthrough; 443 443 case 0x10ec0215: 444 + case 0x10ec0236: 445 + case 0x10ec0245: 446 + case 0x10ec0256: 447 + case 0x10ec0257: 444 448 case 0x10ec0285: 445 449 case 0x10ec0289: 446 450 alc_update_coef_idx(codec, 0x36, 1<<13, 0); ··· 452 448 case 0x10ec0230: 453 449 case 0x10ec0233: 454 450 case 0x10ec0235: 455 - case 0x10ec0236: 456 - case 0x10ec0245: 457 451 case 0x10ec0255: 458 - case 0x10ec0256: 459 452 case 0x19e58326: 460 - case 0x10ec0257: 461 453 case 0x10ec0282: 462 454 case 0x10ec0283: 463 455 case 0x10ec0286: ··· 6742 6742 codec->power_save_node = 0; 6743 6743 } 6744 6744 6745 + /* avoid DAC 0x06 for speaker switch 0x17; it has no volume control */ 6746 + static void alc274_fixup_hp_aio_bind_dacs(struct hda_codec *codec, 6747 + const struct hda_fixup *fix, int action) 6748 + { 6749 + static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ 6750 + /* The speaker is routed to the Node 0x06 by a mistake, thus the 6751 + * speaker's volume can't be adjusted since the node doesn't have 6752 + * Amp-out capability. Assure the speaker and lineout pin to be 6753 + * coupled with DAC NID 0x02. 6754 + */ 6755 + static const hda_nid_t preferred_pairs[] = { 6756 + 0x16, 0x02, 0x17, 0x02, 0x21, 0x03, 0 6757 + }; 6758 + struct alc_spec *spec = codec->spec; 6759 + 6760 + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); 6761 + spec->gen.preferred_dacs = preferred_pairs; 6762 + } 6763 + 6745 6764 /* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */ 6746 6765 static void alc289_fixup_asus_ga401(struct hda_codec *codec, 6747 6766 const struct hda_fixup *fix, int action) ··· 6965 6946 switch (action) { 6966 6947 case HDA_FIXUP_ACT_PRE_PROBE: 6967 6948 spec->micmute_led_polarity = 1; 6949 + /* needed for amp of back speakers */ 6950 + spec->gpio_mask |= 0x01; 6951 + spec->gpio_dir |= 0x01; 6952 + snd_hda_apply_pincfgs(codec, pincfgs); 6953 + /* share DAC to have unified volume control */ 6954 + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); 6955 + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); 6956 + break; 6957 + case HDA_FIXUP_ACT_INIT: 6958 + /* need to toggle GPIO to enable the amp of back speakers */ 6959 + alc_update_gpio_data(codec, 0x01, true); 6960 + msleep(100); 6961 + alc_update_gpio_data(codec, 0x01, false); 6962 + break; 6963 + } 6964 + } 6965 + 6966 + /* GPIO1 = amplifier on/off */ 6967 + static void alc285_fixup_hp_spectre_x360_df1(struct hda_codec *codec, 6968 + const struct hda_fixup *fix, 6969 + int action) 6970 + { 6971 + struct alc_spec *spec = codec->spec; 6972 + static const hda_nid_t conn[] = { 0x02 }; 6973 + static const struct hda_pintbl pincfgs[] = { 6974 + { 0x14, 0x90170110 }, /* front/high speakers */ 6975 + { 0x17, 0x90170130 }, /* back/bass speakers */ 6976 + { } 6977 + }; 6978 + 6979 + // enable mute led 6980 + alc285_fixup_hp_mute_led_coefbit(codec, fix, action); 6981 + 6982 + switch (action) { 6983 + case HDA_FIXUP_ACT_PRE_PROBE: 6968 6984 /* needed for amp of back speakers */ 6969 6985 spec->gpio_mask |= 0x01; 6970 6986 spec->gpio_dir |= 0x01; ··· 7815 7761 ALC280_FIXUP_HP_9480M, 7816 7762 ALC245_FIXUP_HP_X360_AMP, 7817 7763 ALC285_FIXUP_HP_SPECTRE_X360_EB1, 7764 + ALC285_FIXUP_HP_SPECTRE_X360_DF1, 7818 7765 ALC285_FIXUP_HP_ENVY_X360, 7819 7766 ALC288_FIXUP_DELL_HEADSET_MODE, 7820 7767 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ··· 8025 7970 ALC294_FIXUP_BASS_SPEAKER_15, 8026 7971 ALC283_FIXUP_DELL_HP_RESUME, 8027 7972 ALC294_FIXUP_ASUS_CS35L41_SPI_2, 7973 + ALC274_FIXUP_HP_AIO_BIND_DACS, 8028 7974 }; 8029 7975 8030 7976 /* A special fixup for Lenovo C940 and Yoga Duet 7; ··· 9893 9837 .type = HDA_FIXUP_FUNC, 9894 9838 .v.func = alc285_fixup_hp_spectre_x360_eb1 9895 9839 }, 9840 + [ALC285_FIXUP_HP_SPECTRE_X360_DF1] = { 9841 + .type = HDA_FIXUP_FUNC, 9842 + .v.func = alc285_fixup_hp_spectre_x360_df1 9843 + }, 9896 9844 [ALC285_FIXUP_HP_ENVY_X360] = { 9897 9845 .type = HDA_FIXUP_FUNC, 9898 9846 .v.func = alc285_fixup_hp_envy_x360, ··· 10400 10340 .chained = true, 10401 10341 .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC, 10402 10342 }, 10343 + [ALC274_FIXUP_HP_AIO_BIND_DACS] = { 10344 + .type = HDA_FIXUP_FUNC, 10345 + .v.func = alc274_fixup_hp_aio_bind_dacs, 10346 + }, 10403 10347 }; 10404 10348 10405 10349 static const struct hda_quirk alc269_fixup_tbl[] = { ··· 10628 10564 SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), 10629 10565 SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), 10630 10566 SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), 10567 + SND_PCI_QUIRK(0x103c, 0x863e, "HP Spectre x360 15-df1xxx", ALC285_FIXUP_HP_SPECTRE_X360_DF1), 10631 10568 SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), 10632 10569 SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), 10633 10570 SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), ··· 10833 10768 SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 10834 10769 SND_PCI_QUIRK(0x103c, 0x8caf, "HP Elite mt645 G8 Mobile Thin Client", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 10835 10770 SND_PCI_QUIRK(0x103c, 0x8cbd, "HP Pavilion Aero Laptop 13-bg0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), 10836 - SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), 10837 - SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), 10771 + SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), 10772 + SND_PCI_QUIRK(0x103c, 0x8cde, "HP OmniBook Ultra Flip Laptop 14t", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), 10838 10773 SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 10839 10774 SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 10840 10775 SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), 10841 10776 SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED), 10777 + SND_PCI_QUIRK(0x103c, 0x8d18, "HP EliteStudio 8 AIO", ALC274_FIXUP_HP_AIO_BIND_DACS), 10842 10778 SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED), 10843 10779 SND_PCI_QUIRK(0x103c, 0x8d85, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED), 10844 10780 SND_PCI_QUIRK(0x103c, 0x8d86, "HP Elite X360 14 G12", ALC285_FIXUP_HP_GPIO_LED), ··· 10859 10793 SND_PCI_QUIRK(0x103c, 0x8da1, "HP 16 Clipper OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), 10860 10794 SND_PCI_QUIRK(0x103c, 0x8da7, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), 10861 10795 SND_PCI_QUIRK(0x103c, 0x8da8, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), 10796 + SND_PCI_QUIRK(0x103c, 0x8dd4, "HP EliteStudio 8 AIO", ALC274_FIXUP_HP_AIO_BIND_DACS), 10862 10797 SND_PCI_QUIRK(0x103c, 0x8de8, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2), 10863 10798 SND_PCI_QUIRK(0x103c, 0x8de9, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2), 10864 10799 SND_PCI_QUIRK(0x103c, 0x8dec, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED), 10800 + SND_PCI_QUIRK(0x103c, 0x8ded, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED), 10865 10801 SND_PCI_QUIRK(0x103c, 0x8dee, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED), 10802 + SND_PCI_QUIRK(0x103c, 0x8def, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED), 10866 10803 SND_PCI_QUIRK(0x103c, 0x8df0, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED), 10804 + SND_PCI_QUIRK(0x103c, 0x8df1, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED), 10867 10805 SND_PCI_QUIRK(0x103c, 0x8dfc, "HP EliteBook 645 G12", ALC236_FIXUP_HP_GPIO_LED), 10868 10806 SND_PCI_QUIRK(0x103c, 0x8dfe, "HP EliteBook 665 G12", ALC236_FIXUP_HP_GPIO_LED), 10869 10807 SND_PCI_QUIRK(0x103c, 0x8e11, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2), ··· 10913 10843 SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM), 10914 10844 SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), 10915 10845 SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), 10916 - SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), 10917 - SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), 10846 + SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), 10847 + SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), 10918 10848 SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), 10919 - SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), 10849 + SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), 10920 10850 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), 10921 10851 SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), 10922 10852 SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), ··· 10970 10900 SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), 10971 10901 SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC), 10972 10902 SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), 10973 - SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 10903 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), 10974 10904 SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2), 10975 10905 SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), 10976 10906 SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC), ··· 11564 11494 {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, 11565 11495 {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, 11566 11496 {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"}, 11497 + {.id = ALC285_FIXUP_HP_SPECTRE_X360_DF1, .name = "alc285-hp-spectre-x360-df1"}, 11567 11498 {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"}, 11568 11499 {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, 11569 11500 {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
+1 -1
sound/soc/amd/acp/acp-i2s.c
··· 97 97 struct acp_stream *stream; 98 98 int slot_len, no_of_slots; 99 99 100 - chip = dev_get_platdata(dev); 100 + chip = dev_get_drvdata(dev->parent); 101 101 switch (slot_width) { 102 102 case SLOT_WIDTH_8: 103 103 slot_len = 8;
+1 -1
sound/soc/amd/acp/acp-legacy-common.c
··· 450 450 struct snd_soc_acpi_mach *mach; 451 451 int size, platform; 452 452 453 - if (chip->flag == FLAG_AMD_LEGACY_ONLY_DMIC) { 453 + if (chip->flag == FLAG_AMD_LEGACY_ONLY_DMIC && chip->is_pdm_dev) { 454 454 platform = chip->acp_rev; 455 455 chip->mach_dev = platform_device_register_data(chip->dev, "acp-pdm-mach", 456 456 PLATFORM_DEVID_NONE, &platform,
+1 -1
sound/soc/amd/acp/acp-rembrandt.c
··· 199 199 200 200 static int rmb_pcm_resume(struct device *dev) 201 201 { 202 - struct acp_chip_info *chip = dev_get_platdata(dev); 202 + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); 203 203 struct acp_stream *stream; 204 204 struct snd_pcm_substream *substream; 205 205 snd_pcm_uframes_t buf_in_frames;
+1 -1
sound/soc/amd/acp/acp-renoir.c
··· 146 146 147 147 static int rn_pcm_resume(struct device *dev) 148 148 { 149 - struct acp_chip_info *chip = dev_get_platdata(dev); 149 + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); 150 150 struct acp_stream *stream; 151 151 struct snd_pcm_substream *substream; 152 152 snd_pcm_uframes_t buf_in_frames;
+1 -1
sound/soc/amd/acp/acp63.c
··· 250 250 251 251 static int acp63_pcm_resume(struct device *dev) 252 252 { 253 - struct acp_chip_info *chip = dev_get_platdata(dev); 253 + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); 254 254 struct acp_stream *stream; 255 255 struct snd_pcm_substream *substream; 256 256 snd_pcm_uframes_t buf_in_frames;
+1 -1
sound/soc/amd/acp/acp70.c
··· 182 182 183 183 static int acp70_pcm_resume(struct device *dev) 184 184 { 185 - struct acp_chip_info *chip = dev_get_platdata(dev); 185 + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); 186 186 struct acp_stream *stream; 187 187 struct snd_pcm_substream *substream; 188 188 snd_pcm_uframes_t buf_in_frames;
+3 -2
sound/soc/amd/ps/pci-ps.c
··· 193 193 struct amd_sdw_manager *amd_manager; 194 194 u32 ext_intr_stat, ext_intr_stat1; 195 195 u16 irq_flag = 0; 196 + u16 wake_irq_flag = 0; 196 197 u16 sdw_dma_irq_flag = 0; 197 198 198 199 adata = dev_id; ··· 232 231 } 233 232 234 233 if (adata->acp_rev >= ACP70_PCI_REV) 235 - irq_flag = check_and_handle_acp70_sdw_wake_irq(adata); 234 + wake_irq_flag = check_and_handle_acp70_sdw_wake_irq(adata); 236 235 237 236 if (ext_intr_stat & BIT(PDM_DMA_STAT)) { 238 237 ps_pdm_data = dev_get_drvdata(&adata->pdm_dev->dev); ··· 246 245 if (sdw_dma_irq_flag) 247 246 return IRQ_WAKE_THREAD; 248 247 249 - if (irq_flag) 248 + if (irq_flag | wake_irq_flag) 250 249 return IRQ_HANDLED; 251 250 else 252 251 return IRQ_NONE;
+2 -2
sound/soc/generic/simple-card-utils.c
··· 1179 1179 bool is_playback_only = of_property_read_bool(np, "playback-only"); 1180 1180 bool is_capture_only = of_property_read_bool(np, "capture-only"); 1181 1181 1182 - if (is_playback_only) 1182 + if (playback_only) 1183 1183 *playback_only = is_playback_only; 1184 - if (is_capture_only) 1184 + if (capture_only) 1185 1185 *capture_only = is_capture_only; 1186 1186 } 1187 1187 EXPORT_SYMBOL_GPL(graph_util_parse_link_direction);
+24 -35
sound/soc/intel/atom/sst/sst_pci.c
··· 26 26 int ddr_base, ret = 0; 27 27 struct pci_dev *pci = ctx->pci; 28 28 29 - ret = pci_request_regions(pci, SST_DRV_NAME); 29 + ret = pcim_request_all_regions(pci, SST_DRV_NAME); 30 30 if (ret) 31 31 return ret; 32 32 ··· 38 38 ddr_base = relocate_imr_addr_mrfld(ctx->ddr_base); 39 39 if (!ctx->pdata->lib_info) { 40 40 dev_err(ctx->dev, "lib_info pointer NULL\n"); 41 - ret = -EINVAL; 42 - goto do_release_regions; 41 + return -EINVAL; 43 42 } 44 43 if (ddr_base != ctx->pdata->lib_info->mod_base) { 45 44 dev_err(ctx->dev, 46 45 "FW LSP DDR BASE does not match with IFWI\n"); 47 - ret = -EINVAL; 48 - goto do_release_regions; 46 + return -EINVAL; 49 47 } 50 48 ctx->ddr_end = pci_resource_end(pci, 0); 51 49 52 - ctx->ddr = pcim_iomap(pci, 0, 53 - pci_resource_len(pci, 0)); 54 - if (!ctx->ddr) { 55 - ret = -EINVAL; 56 - goto do_release_regions; 57 - } 50 + ctx->ddr = pcim_iomap(pci, 0, 0); 51 + if (!ctx->ddr) 52 + return -ENOMEM; 53 + 58 54 dev_dbg(ctx->dev, "sst: DDR Ptr %p\n", ctx->ddr); 59 55 } else { 60 56 ctx->ddr = NULL; 61 57 } 62 58 /* SHIM */ 63 59 ctx->shim_phy_add = pci_resource_start(pci, 1); 64 - ctx->shim = pcim_iomap(pci, 1, pci_resource_len(pci, 1)); 65 - if (!ctx->shim) { 66 - ret = -EINVAL; 67 - goto do_release_regions; 68 - } 60 + ctx->shim = pcim_iomap(pci, 1, 0); 61 + if (!ctx->shim) 62 + return -ENOMEM; 63 + 69 64 dev_dbg(ctx->dev, "SST Shim Ptr %p\n", ctx->shim); 70 65 71 66 /* Shared SRAM */ 72 67 ctx->mailbox_add = pci_resource_start(pci, 2); 73 - ctx->mailbox = pcim_iomap(pci, 2, pci_resource_len(pci, 2)); 74 - if (!ctx->mailbox) { 75 - ret = -EINVAL; 76 - goto do_release_regions; 77 - } 68 + ctx->mailbox = pcim_iomap(pci, 2, 0); 69 + if (!ctx->mailbox) 70 + return -ENOMEM; 71 + 78 72 dev_dbg(ctx->dev, "SRAM Ptr %p\n", ctx->mailbox); 79 73 80 74 /* IRAM */ 81 75 ctx->iram_end = pci_resource_end(pci, 3); 82 76 ctx->iram_base = pci_resource_start(pci, 3); 83 - ctx->iram = pcim_iomap(pci, 3, pci_resource_len(pci, 3)); 84 - if (!ctx->iram) { 85 - ret = -EINVAL; 86 - goto do_release_regions; 87 - } 77 + ctx->iram = pcim_iomap(pci, 3, 0); 78 + if (!ctx->iram) 79 + return -ENOMEM; 80 + 88 81 dev_dbg(ctx->dev, "IRAM Ptr %p\n", ctx->iram); 89 82 90 83 /* DRAM */ 91 84 ctx->dram_end = pci_resource_end(pci, 4); 92 85 ctx->dram_base = pci_resource_start(pci, 4); 93 - ctx->dram = pcim_iomap(pci, 4, pci_resource_len(pci, 4)); 94 - if (!ctx->dram) { 95 - ret = -EINVAL; 96 - goto do_release_regions; 97 - } 86 + ctx->dram = pcim_iomap(pci, 4, 0); 87 + if (!ctx->dram) 88 + return -ENOMEM; 89 + 98 90 dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram); 99 - do_release_regions: 100 - pci_release_regions(pci); 101 - return ret; 91 + return 0; 102 92 } 103 93 104 94 /* ··· 157 167 158 168 sst_context_cleanup(sst_drv_ctx); 159 169 pci_dev_put(sst_drv_ctx->pci); 160 - pci_release_regions(pci); 161 170 pci_set_drvdata(pci, NULL); 162 171 } 163 172
+2 -6
sound/soc/intel/avs/core.c
··· 452 452 return ret; 453 453 } 454 454 455 - ret = pci_request_regions(pci, "AVS HDAudio"); 455 + ret = pcim_request_all_regions(pci, "AVS HDAudio"); 456 456 if (ret < 0) 457 457 return ret; 458 458 ··· 461 461 bus->remap_addr = pci_ioremap_bar(pci, 0); 462 462 if (!bus->remap_addr) { 463 463 dev_err(bus->dev, "ioremap error\n"); 464 - ret = -ENXIO; 465 - goto err_remap_bar0; 464 + return -ENXIO; 466 465 } 467 466 468 467 adev->dsp_ba = pci_ioremap_bar(pci, 4); ··· 518 519 iounmap(adev->dsp_ba); 519 520 err_remap_bar4: 520 521 iounmap(bus->remap_addr); 521 - err_remap_bar0: 522 - pci_release_regions(pci); 523 522 return ret; 524 523 } 525 524 ··· 588 591 pci_free_irq_vectors(pci); 589 592 iounmap(bus->remap_addr); 590 593 iounmap(adev->dsp_ba); 591 - pci_release_regions(pci); 592 594 593 595 /* Firmware is not needed anymore */ 594 596 avs_release_firmwares(adev);
+13
sound/soc/intel/boards/bytcr_rt5640.c
··· 576 576 BYT_RT5640_SSP0_AIF2 | 577 577 BYT_RT5640_MCLK_EN), 578 578 }, 579 + { /* Acer Aspire SW3-013 */ 580 + .matches = { 581 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 582 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-013"), 583 + }, 584 + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | 585 + BYT_RT5640_JD_SRC_JD2_IN4N | 586 + BYT_RT5640_OVCD_TH_2000UA | 587 + BYT_RT5640_OVCD_SF_0P75 | 588 + BYT_RT5640_DIFF_MIC | 589 + BYT_RT5640_SSP0_AIF1 | 590 + BYT_RT5640_MCLK_EN), 591 + }, 579 592 { 580 593 .matches = { 581 594 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+1 -1
sound/soc/intel/catpt/dsp.c
··· 156 156 { 157 157 unsigned long old; 158 158 u32 off = sram->start; 159 - u32 b = __ffs(mask); 159 + unsigned long b = __ffs(mask); 160 160 161 161 old = catpt_readl_pci(cdev, VDRTCTL0) & mask; 162 162 dev_dbg(cdev->dev, "SRAMPGE [0x%08lx] 0x%08lx -> 0x%08lx",
+1 -1
sound/soc/renesas/rz-ssi.c
··· 1244 1244 1245 1245 static const struct dev_pm_ops rz_ssi_pm_ops = { 1246 1246 RUNTIME_PM_OPS(rz_ssi_runtime_suspend, rz_ssi_runtime_resume, NULL) 1247 - SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 1247 + NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 1248 1248 }; 1249 1249 1250 1250 static struct platform_driver rz_ssi_driver = {
+4
sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c
··· 60 60 61 61 /* 4 x 16-bit sample slots and FSYNC=48000, BCLK=3.072 MHz */ 62 62 for_each_rtd_codec_dais(rtd, i, codec_dai) { 63 + ret = asoc_sdw_cs35l56_volume_limit(card, codec_dai->component->name_prefix); 64 + if (ret) 65 + return ret; 66 + 63 67 ret = snd_soc_dai_set_tdm_slot(codec_dai, tx_mask, rx_mask, 4, 16); 64 68 if (ret < 0) 65 69 return ret;
+10
sound/soc/sdw_utils/soc_sdw_cs42l43.c
··· 20 20 #include <sound/soc-dapm.h> 21 21 #include <sound/soc_sdw_utils.h> 22 22 23 + #define CS42L43_SPK_VOLUME_0DB 128 /* 0dB Max */ 24 + 23 25 static const struct snd_soc_dapm_route cs42l43_hs_map[] = { 24 26 { "Headphone", NULL, "cs42l43 AMP3_OUT" }, 25 27 { "Headphone", NULL, "cs42l43 AMP4_OUT" }, ··· 118 116 if (!card->components) 119 117 return -ENOMEM; 120 118 } 119 + 120 + ret = snd_soc_limit_volume(card, "cs42l43 Speaker Digital Volume", 121 + CS42L43_SPK_VOLUME_0DB); 122 + if (ret) 123 + dev_err(card->dev, "cs42l43 speaker volume limit failed: %d\n", ret); 124 + else 125 + dev_info(card->dev, "Setting CS42L43 Speaker volume limit to %d\n", 126 + CS42L43_SPK_VOLUME_0DB); 121 127 122 128 ret = snd_soc_dapm_add_routes(&card->dapm, cs42l43_spk_map, 123 129 ARRAY_SIZE(cs42l43_spk_map));
+24
sound/soc/sdw_utils/soc_sdw_cs_amp.c
··· 16 16 17 17 #define CODEC_NAME_SIZE 8 18 18 #define CS_AMP_CHANNELS_PER_AMP 4 19 + #define CS35L56_SPK_VOLUME_0DB 400 /* 0dB Max */ 20 + 21 + int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix) 22 + { 23 + char *volume_ctl_name; 24 + int ret; 25 + 26 + volume_ctl_name = kasprintf(GFP_KERNEL, "%s Speaker Volume", name_prefix); 27 + if (!volume_ctl_name) 28 + return -ENOMEM; 29 + 30 + ret = snd_soc_limit_volume(card, volume_ctl_name, CS35L56_SPK_VOLUME_0DB); 31 + if (ret) 32 + dev_err(card->dev, "%s limit set failed: %d\n", volume_ctl_name, ret); 33 + 34 + kfree(volume_ctl_name); 35 + return ret; 36 + } 37 + EXPORT_SYMBOL_NS(asoc_sdw_cs35l56_volume_limit, "SND_SOC_SDW_UTILS"); 19 38 20 39 int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai) 21 40 { ··· 59 40 60 41 snprintf(widget_name, sizeof(widget_name), "%s SPK", 61 42 codec_dai->component->name_prefix); 43 + 44 + ret = asoc_sdw_cs35l56_volume_limit(card, codec_dai->component->name_prefix); 45 + if (ret) 46 + return ret; 47 + 62 48 ret = snd_soc_dapm_add_routes(&card->dapm, &route, 1); 63 49 if (ret) 64 50 return ret;
+3 -13
sound/soc/sof/sof-pci-dev.c
··· 216 216 if (ret < 0) 217 217 return ret; 218 218 219 - ret = pci_request_regions(pci, "Audio DSP"); 219 + ret = pcim_request_all_regions(pci, "Audio DSP"); 220 220 if (ret < 0) 221 221 return ret; 222 222 ··· 240 240 path_override->ipc_type = sof_pci_ipc_type; 241 241 } else { 242 242 dev_err(dev, "Invalid IPC type requested: %d\n", sof_pci_ipc_type); 243 - ret = -EINVAL; 244 - goto out; 243 + return -EINVAL; 245 244 } 246 245 247 246 path_override->fw_path = fw_path; ··· 270 271 sof_pdata->sof_probe_complete = sof_pci_probe_complete; 271 272 272 273 /* call sof helper for DSP hardware probe */ 273 - ret = snd_sof_device_probe(dev, sof_pdata); 274 - 275 - out: 276 - if (ret) 277 - pci_release_regions(pci); 278 - 279 - return ret; 274 + return snd_sof_device_probe(dev, sof_pdata); 280 275 } 281 276 EXPORT_SYMBOL_NS(sof_pci_probe, "SND_SOC_SOF_PCI_DEV"); 282 277 ··· 283 290 if (snd_sof_device_probe_completed(&pci->dev) && 284 291 !(sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME)) 285 292 pm_runtime_get_noresume(&pci->dev); 286 - 287 - /* release pci regions and disable device */ 288 - pci_release_regions(pci); 289 293 } 290 294 EXPORT_SYMBOL_NS(sof_pci_remove, "SND_SOC_SOF_PCI_DEV"); 291 295
+11 -5
sound/soc/stm/stm32_sai_sub.c
··· 409 409 unsigned int rate) 410 410 { 411 411 struct platform_device *pdev = sai->pdev; 412 - unsigned int sai_ck_rate, sai_ck_max_rate, sai_curr_rate, sai_new_rate; 412 + unsigned int sai_ck_rate, sai_ck_max_rate, sai_ck_min_rate, sai_curr_rate, sai_new_rate; 413 413 int div, ret; 414 414 415 415 /* 416 - * Set maximum expected kernel clock frequency 416 + * Set minimum and maximum expected kernel clock frequency 417 417 * - mclk on or spdif: 418 418 * f_sai_ck = MCKDIV * mclk-fs * fs 419 419 * Here typical 256 ratio is assumed for mclk-fs ··· 423 423 * Set constraint MCKDIV * FRL <= 256, to ensure MCKDIV is in available range 424 424 * f_sai_ck = sai_ck_max_rate * pow_of_two(FRL) / 256 425 425 */ 426 + sai_ck_min_rate = rate * 256; 426 427 if (!(rate % SAI_RATE_11K)) 427 428 sai_ck_max_rate = SAI_MAX_SAMPLE_RATE_11K * 256; 428 429 else 429 430 sai_ck_max_rate = SAI_MAX_SAMPLE_RATE_8K * 256; 430 431 431 - if (!sai->sai_mclk && !STM_SAI_PROTOCOL_IS_SPDIF(sai)) 432 + if (!sai->sai_mclk && !STM_SAI_PROTOCOL_IS_SPDIF(sai)) { 433 + sai_ck_min_rate = rate * sai->fs_length; 432 434 sai_ck_max_rate /= DIV_ROUND_CLOSEST(256, roundup_pow_of_two(sai->fs_length)); 435 + } 433 436 434 437 /* 435 438 * Request exclusivity, as the clock is shared by SAI sub-blocks and by ··· 447 444 * return immediately. 448 445 */ 449 446 sai_curr_rate = clk_get_rate(sai->sai_ck); 450 - if (stm32_sai_rate_accurate(sai_ck_max_rate, sai_curr_rate)) 447 + dev_dbg(&pdev->dev, "kernel clock rate: min [%u], max [%u], current [%u]", 448 + sai_ck_min_rate, sai_ck_max_rate, sai_curr_rate); 449 + if (stm32_sai_rate_accurate(sai_ck_max_rate, sai_curr_rate) && 450 + sai_curr_rate >= sai_ck_min_rate) 451 451 return 0; 452 452 453 453 /* ··· 478 472 /* Try a lower frequency */ 479 473 div++; 480 474 sai_ck_rate = sai_ck_max_rate / div; 481 - } while (sai_ck_rate > rate); 475 + } while (sai_ck_rate >= sai_ck_min_rate); 482 476 483 477 /* No accurate rate found */ 484 478 dev_err(&pdev->dev, "Failed to find an accurate rate");
+7
sound/usb/endpoint.c
··· 926 926 { 927 927 int altset = set ? ep->altsetting : 0; 928 928 int err; 929 + int retries = 0; 930 + const int max_retries = 5; 929 931 930 932 if (ep->iface_ref->altset == altset) 931 933 return 0; ··· 937 935 938 936 usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n", 939 937 ep->iface, altset, ep->ep_num); 938 + retry: 940 939 err = usb_set_interface(chip->dev, ep->iface, altset); 941 940 if (err < 0) { 941 + if (err == -EPROTO && ++retries <= max_retries) { 942 + msleep(5 * (1 << (retries - 1))); 943 + goto retry; 944 + } 942 945 usb_audio_err_ratelimited( 943 946 chip, "%d:%d: usb_set_interface failed (%d)\n", 944 947 ep->iface, altset, err);
+2 -1
sound/usb/format.c
··· 260 260 } 261 261 262 262 /* Jabra Evolve 65 headset */ 263 - if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) { 263 + if (chip->usb_id == USB_ID(0x0b0e, 0x030b) || 264 + chip->usb_id == USB_ID(0x0b0e, 0x030c)) { 264 265 /* only 48kHz for playback while keeping 16kHz for capture */ 265 266 if (fp->nr_rates != 1) 266 267 return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
+12 -4
sound/usb/midi.c
··· 1885 1885 } 1886 1886 1887 1887 port_info = find_port_info(umidi, number); 1888 - name_format = port_info ? port_info->name : 1889 - (jack_name != default_jack_name ? "%s %s" : "%s %s %d"); 1890 - snprintf(substream->name, sizeof(substream->name), 1891 - name_format, umidi->card->shortname, jack_name, number + 1); 1888 + if (port_info || jack_name == default_jack_name || 1889 + strncmp(umidi->card->shortname, jack_name, strlen(umidi->card->shortname)) != 0) { 1890 + name_format = port_info ? port_info->name : 1891 + (jack_name != default_jack_name ? "%s %s" : "%s %s %d"); 1892 + snprintf(substream->name, sizeof(substream->name), 1893 + name_format, umidi->card->shortname, jack_name, number + 1); 1894 + } else { 1895 + /* The manufacturer included the iProduct name in the jack 1896 + * name, do not use both 1897 + */ 1898 + strscpy(substream->name, jack_name); 1899 + } 1892 1900 1893 1901 *rsubstream = substream; 1894 1902 }
+84 -24
tools/hv/hv_kvp_daemon.c
··· 24 24 25 25 #include <sys/poll.h> 26 26 #include <sys/utsname.h> 27 + #include <stdbool.h> 27 28 #include <stdio.h> 28 29 #include <stdlib.h> 29 30 #include <unistd.h> ··· 678 677 pclose(file); 679 678 } 680 679 680 + static bool kvp_verify_ip_address(const void *address_string) 681 + { 682 + char verify_buf[sizeof(struct in6_addr)]; 683 + 684 + if (inet_pton(AF_INET, address_string, verify_buf) == 1) 685 + return true; 686 + if (inet_pton(AF_INET6, address_string, verify_buf) == 1) 687 + return true; 688 + return false; 689 + } 690 + 691 + static void kvp_extract_routes(const char *line, void **output, size_t *remaining) 692 + { 693 + static const char needle[] = "via "; 694 + const char *match, *haystack = line; 695 + 696 + while ((match = strstr(haystack, needle))) { 697 + const char *address, *next_char; 698 + 699 + /* Address starts after needle. */ 700 + address = match + strlen(needle); 701 + 702 + /* The char following address is a space or end of line. */ 703 + next_char = strpbrk(address, " \t\\"); 704 + if (!next_char) 705 + next_char = address + strlen(address) + 1; 706 + 707 + /* Enough room for address and semicolon. */ 708 + if (*remaining >= (next_char - address) + 1) { 709 + memcpy(*output, address, next_char - address); 710 + /* Terminate string for verification. */ 711 + memcpy(*output + (next_char - address), "", 1); 712 + if (kvp_verify_ip_address(*output)) { 713 + /* Advance output buffer. */ 714 + *output += next_char - address; 715 + *remaining -= next_char - address; 716 + 717 + /* Each address needs a trailing semicolon. */ 718 + memcpy(*output, ";", 1); 719 + *output += 1; 720 + *remaining -= 1; 721 + } 722 + } 723 + haystack = next_char; 724 + } 725 + } 726 + 727 + static void kvp_get_gateway(void *buffer, size_t buffer_len) 728 + { 729 + static const char needle[] = "default "; 730 + FILE *f; 731 + void *output = buffer; 732 + char *line = NULL; 733 + size_t alloc_size = 0, remaining = buffer_len - 1; 734 + ssize_t num_chars; 735 + 736 + /* Show route information in a single line, for each address family */ 737 + f = popen("ip --oneline -4 route show;ip --oneline -6 route show", "r"); 738 + if (!f) { 739 + /* Convert buffer into C-String. */ 740 + memcpy(output, "", 1); 741 + return; 742 + } 743 + while ((num_chars = getline(&line, &alloc_size, f)) > 0) { 744 + /* Skip short lines. */ 745 + if (num_chars <= strlen(needle)) 746 + continue; 747 + /* Skip lines without default route. */ 748 + if (memcmp(line, needle, strlen(needle))) 749 + continue; 750 + /* Remove trailing newline to simplify further parsing. */ 751 + if (line[num_chars - 1] == '\n') 752 + line[num_chars - 1] = '\0'; 753 + /* Search routes after match. */ 754 + kvp_extract_routes(line + strlen(needle), &output, &remaining); 755 + } 756 + /* Convert buffer into C-String. */ 757 + memcpy(output, "", 1); 758 + free(line); 759 + pclose(f); 760 + } 761 + 681 762 static void kvp_get_ipconfig_info(char *if_name, 682 763 struct hv_kvp_ipaddr_value *buffer) 683 764 { ··· 768 685 char *p; 769 686 FILE *file; 770 687 771 - /* 772 - * Get the address of default gateway (ipv4). 773 - */ 774 - sprintf(cmd, "%s %s", "ip route show dev", if_name); 775 - strcat(cmd, " | awk '/default/ {print $3 }'"); 776 - 777 - /* 778 - * Execute the command to gather gateway info. 779 - */ 780 - kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, 781 - (MAX_GATEWAY_SIZE * 2), INET_ADDRSTRLEN, 0); 782 - 783 - /* 784 - * Get the address of default gateway (ipv6). 785 - */ 786 - sprintf(cmd, "%s %s", "ip -f inet6 route show dev", if_name); 787 - strcat(cmd, " | awk '/default/ {print $3 }'"); 788 - 789 - /* 790 - * Execute the command to gather gateway info (ipv6). 791 - */ 792 - kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, 793 - (MAX_GATEWAY_SIZE * 2), INET6_ADDRSTRLEN, 1); 794 - 688 + kvp_get_gateway(buffer->gate_way, sizeof(buffer->gate_way)); 795 689 796 690 /* 797 691 * Gather the DNS state.
+12 -2
tools/lib/perf/Makefile
··· 42 42 TEST_ARGS := $(if $(V),-v) 43 43 44 44 INCLUDES = \ 45 + -I$(OUTPUT)arch/$(SRCARCH)/include/generated/uapi \ 45 46 -I$(srctree)/tools/lib/perf/include \ 46 47 -I$(srctree)/tools/lib/ \ 47 48 -I$(srctree)/tools/include \ ··· 100 99 $(call QUIET_CLEAN, libapi) 101 100 $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null 102 101 103 - $(LIBPERF_IN): FORCE 102 + uapi-asm := $(OUTPUT)arch/$(SRCARCH)/include/generated/uapi/asm 103 + ifeq ($(SRCARCH),arm64) 104 + syscall-y := $(uapi-asm)/unistd_64.h 105 + endif 106 + uapi-asm-generic: 107 + $(if $(syscall-y),\ 108 + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-headers obj=$(uapi-asm) \ 109 + generic=include/uapi/asm-generic $(syscall-y),) 110 + 111 + $(LIBPERF_IN): uapi-asm-generic FORCE 104 112 $(Q)$(MAKE) $(build)=libperf 105 113 106 114 $(LIBPERF_A): $(LIBPERF_IN) ··· 130 120 clean: $(LIBAPI)-clean 131 121 $(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \ 132 122 *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \ 133 - $(TESTS_STATIC) $(TESTS_SHARED) 123 + $(TESTS_STATIC) $(TESTS_SHARED) $(syscall-y) 134 124 135 125 TESTS_IN = tests-in.o 136 126
+1
tools/perf/Makefile.config
··· 29 29 $(call detected_var,SRCARCH) 30 30 31 31 CFLAGS += -I$(OUTPUT)arch/$(SRCARCH)/include/generated 32 + CFLAGS += -I$(OUTPUT)libperf/arch/$(SRCARCH)/include/generated/uapi 32 33 33 34 # Additional ARCH settings for ppc 34 35 ifeq ($(SRCARCH),powerpc)
+102
tools/testing/memblock/tests/basic_api.c
··· 2434 2434 return 0; 2435 2435 } 2436 2436 2437 + #ifdef CONFIG_NUMA 2438 + static int memblock_set_node_check(void) 2439 + { 2440 + unsigned long i, max_reserved; 2441 + struct memblock_region *rgn; 2442 + void *orig_region; 2443 + 2444 + PREFIX_PUSH(); 2445 + 2446 + reset_memblock_regions(); 2447 + memblock_allow_resize(); 2448 + 2449 + dummy_physical_memory_init(); 2450 + memblock_add(dummy_physical_memory_base(), MEM_SIZE); 2451 + orig_region = memblock.reserved.regions; 2452 + 2453 + /* Equally Split range to node 0 and 1*/ 2454 + memblock_set_node(memblock_start_of_DRAM(), 2455 + memblock_phys_mem_size() / 2, &memblock.memory, 0); 2456 + memblock_set_node(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2, 2457 + memblock_phys_mem_size() / 2, &memblock.memory, 1); 2458 + 2459 + ASSERT_EQ(memblock.memory.cnt, 2); 2460 + rgn = &memblock.memory.regions[0]; 2461 + ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); 2462 + ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2); 2463 + ASSERT_EQ(memblock_get_region_node(rgn), 0); 2464 + rgn = &memblock.memory.regions[1]; 2465 + ASSERT_EQ(rgn->base, memblock_start_of_DRAM() + memblock_phys_mem_size() / 2); 2466 + ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2); 2467 + ASSERT_EQ(memblock_get_region_node(rgn), 1); 2468 + 2469 + /* Reserve 126 regions with the last one across node boundary */ 2470 + for (i = 0; i < 125; i++) 2471 + memblock_reserve(memblock_start_of_DRAM() + SZ_16 * i, SZ_8); 2472 + 2473 + memblock_reserve(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2 - SZ_8, 2474 + SZ_16); 2475 + 2476 + /* 2477 + * Commit 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()") 2478 + * do following process to set nid to each memblock.reserved region. 2479 + * But it may miss some region if memblock_set_node() double the 2480 + * array. 2481 + * 2482 + * By checking 'max', we make sure all region nid is set properly. 2483 + */ 2484 + repeat: 2485 + max_reserved = memblock.reserved.max; 2486 + for_each_mem_region(rgn) { 2487 + int nid = memblock_get_region_node(rgn); 2488 + 2489 + memblock_set_node(rgn->base, rgn->size, &memblock.reserved, nid); 2490 + } 2491 + if (max_reserved != memblock.reserved.max) 2492 + goto repeat; 2493 + 2494 + /* Confirm each region has valid node set */ 2495 + for_each_reserved_mem_region(rgn) { 2496 + ASSERT_TRUE(numa_valid_node(memblock_get_region_node(rgn))); 2497 + if (rgn == (memblock.reserved.regions + memblock.reserved.cnt - 1)) 2498 + ASSERT_EQ(1, memblock_get_region_node(rgn)); 2499 + else 2500 + ASSERT_EQ(0, memblock_get_region_node(rgn)); 2501 + } 2502 + 2503 + dummy_physical_memory_cleanup(); 2504 + 2505 + /* 2506 + * The current reserved.regions is occupying a range of memory that 2507 + * allocated from dummy_physical_memory_init(). After free the memory, 2508 + * we must not use it. So restore the origin memory region to make sure 2509 + * the tests can run as normal and not affected by the double array. 2510 + */ 2511 + memblock.reserved.regions = orig_region; 2512 + memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; 2513 + 2514 + test_pass_pop(); 2515 + 2516 + return 0; 2517 + } 2518 + 2519 + static int memblock_set_node_checks(void) 2520 + { 2521 + prefix_reset(); 2522 + prefix_push("memblock_set_node"); 2523 + test_print("Running memblock_set_node tests...\n"); 2524 + 2525 + memblock_set_node_check(); 2526 + 2527 + prefix_pop(); 2528 + 2529 + return 0; 2530 + } 2531 + #else 2532 + static int memblock_set_node_checks(void) 2533 + { 2534 + return 0; 2535 + } 2536 + #endif 2537 + 2437 2538 int memblock_basic_checks(void) 2438 2539 { 2439 2540 memblock_initialization_check(); ··· 2545 2444 memblock_bottom_up_checks(); 2546 2445 memblock_trim_memory_checks(); 2547 2446 memblock_overlaps_region_checks(); 2447 + memblock_set_node_checks(); 2548 2448 2549 2449 return 0; 2550 2450 }
+2 -6
tools/testing/selftests/drivers/net/ocelot/psfp.sh
··· 266 266 "${base_time}" \ 267 267 "${CYCLE_TIME_NS}" \ 268 268 "${SHIFT_TIME_NS}" \ 269 + "${GATE_DURATION_NS}" \ 269 270 "${NUM_PKTS}" \ 270 271 "${STREAM_VID}" \ 271 272 "${STREAM_PRIO}" \ 272 273 "" \ 273 274 "${isochron_dat}" 274 275 275 - # Count all received packets by looking at the non-zero RX timestamps 276 - received=$(isochron report \ 277 - --input-file "${isochron_dat}" \ 278 - --printf-format "%u\n" --printf-args "R" | \ 279 - grep -w -v '0' | wc -l) 280 - 276 + received=$(isochron_report_num_received "${isochron_dat}") 281 277 if [ "${received}" = "${expected}" ]; then 282 278 RET=0 283 279 else
+46 -11
tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
··· 48 48 49 49 static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX"; 50 50 51 + static const int mark_cmds[] = { 52 + FAN_MARK_ADD, 53 + FAN_MARK_REMOVE, 54 + FAN_MARK_FLUSH 55 + }; 56 + 57 + #define NUM_FAN_FDS ARRAY_SIZE(mark_cmds) 58 + 51 59 FIXTURE(fanotify) { 52 - int fan_fd; 60 + int fan_fd[NUM_FAN_FDS]; 53 61 char buf[256]; 54 62 unsigned int rem; 55 63 void *next; ··· 69 61 70 62 FIXTURE_SETUP(fanotify) 71 63 { 72 - int ret; 64 + int i, ret; 73 65 74 66 ASSERT_EQ(unshare(CLONE_NEWNS), 0); 75 67 ··· 97 89 self->root_id = get_mnt_id(_metadata, "/"); 98 90 ASSERT_NE(self->root_id, 0); 99 91 100 - self->fan_fd = fanotify_init(FAN_REPORT_MNT, 0); 101 - ASSERT_GE(self->fan_fd, 0); 102 - 103 - ret = fanotify_mark(self->fan_fd, FAN_MARK_ADD | FAN_MARK_MNTNS, 104 - FAN_MNT_ATTACH | FAN_MNT_DETACH, self->ns_fd, NULL); 105 - ASSERT_EQ(ret, 0); 92 + for (i = 0; i < NUM_FAN_FDS; i++) { 93 + self->fan_fd[i] = fanotify_init(FAN_REPORT_MNT | FAN_NONBLOCK, 94 + 0); 95 + ASSERT_GE(self->fan_fd[i], 0); 96 + ret = fanotify_mark(self->fan_fd[i], FAN_MARK_ADD | 97 + FAN_MARK_MNTNS, 98 + FAN_MNT_ATTACH | FAN_MNT_DETACH, 99 + self->ns_fd, NULL); 100 + ASSERT_EQ(ret, 0); 101 + // On fd[0] we do an extra ADD that changes nothing. 102 + // On fd[1]/fd[2] we REMOVE/FLUSH which removes the mark. 103 + ret = fanotify_mark(self->fan_fd[i], mark_cmds[i] | 104 + FAN_MARK_MNTNS, 105 + FAN_MNT_ATTACH | FAN_MNT_DETACH, 106 + self->ns_fd, NULL); 107 + ASSERT_EQ(ret, 0); 108 + } 106 109 107 110 self->rem = 0; 108 111 } 109 112 110 113 FIXTURE_TEARDOWN(fanotify) 111 114 { 115 + int i; 116 + 112 117 ASSERT_EQ(self->rem, 0); 113 - close(self->fan_fd); 118 + for (i = 0; i < NUM_FAN_FDS; i++) 119 + close(self->fan_fd[i]); 114 120 115 121 ASSERT_EQ(fchdir(self->orig_root), 0); 116 122 ··· 145 123 unsigned int thislen; 146 124 147 125 if (!self->rem) { 148 - ssize_t len = read(self->fan_fd, self->buf, sizeof(self->buf)); 149 - ASSERT_GT(len, 0); 126 + ssize_t len; 127 + int i; 128 + 129 + for (i = NUM_FAN_FDS - 1; i >= 0; i--) { 130 + len = read(self->fan_fd[i], self->buf, 131 + sizeof(self->buf)); 132 + if (i > 0) { 133 + // Groups 1,2 should get EAGAIN 134 + ASSERT_EQ(len, -1); 135 + ASSERT_EQ(errno, EAGAIN); 136 + } else { 137 + // Group 0 should get events 138 + ASSERT_GT(len, 0); 139 + } 140 + } 150 141 151 142 self->rem = len; 152 143 self->next = (void *) self->buf;
+95 -1
tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
··· 1 1 #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 - ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn other_tpid" 4 + ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn other_tpid 8021p drop_untagged" 5 5 NUM_NETIFS=4 6 6 CHECK_TC="yes" 7 7 source lib.sh ··· 190 190 log_test "Reception of VLAN with other TPID as untagged (no PVID)" 191 191 192 192 bridge vlan add dev $swp1 vid 1 pvid untagged 193 + ip link set $h2 promisc off 194 + tc qdisc del dev $h2 clsact 195 + } 196 + 197 + 8021p_do() 198 + { 199 + local should_fail=$1; shift 200 + local mac=de:ad:be:ef:13:37 201 + 202 + tc filter add dev $h2 ingress protocol all pref 1 handle 101 \ 203 + flower dst_mac $mac action drop 204 + 205 + $MZ -q $h1 -c 1 -b $mac -a own "81:00 00:00 08:00 aa-aa-aa-aa-aa-aa-aa-aa-aa" 206 + sleep 1 207 + 208 + tc -j -s filter show dev $h2 ingress \ 209 + | jq -e ".[] | select(.options.handle == 101) \ 210 + | select(.options.actions[0].stats.packets == 1)" &> /dev/null 211 + check_err_fail $should_fail $? "802.1p-tagged reception" 212 + 213 + tc filter del dev $h2 ingress pref 1 214 + } 215 + 216 + 8021p() 217 + { 218 + RET=0 219 + 220 + tc qdisc add dev $h2 clsact 221 + ip link set $h2 promisc on 222 + 223 + # Test that with the default_pvid, 1, packets tagged with VID 0 are 224 + # accepted. 225 + 8021p_do 0 226 + 227 + # Test that packets tagged with VID 0 are still accepted after changing 228 + # the default_pvid. 229 + ip link set br0 type bridge vlan_default_pvid 10 230 + 8021p_do 0 231 + 232 + log_test "Reception of 802.1p-tagged traffic" 233 + 234 + ip link set $h2 promisc off 235 + tc qdisc del dev $h2 clsact 236 + } 237 + 238 + send_untagged_and_8021p() 239 + { 240 + ping_do $h1 192.0.2.2 241 + check_fail $? 242 + 243 + 8021p_do 1 244 + } 245 + 246 + drop_untagged() 247 + { 248 + RET=0 249 + 250 + tc qdisc add dev $h2 clsact 251 + ip link set $h2 promisc on 252 + 253 + # Test that with no PVID, untagged and 802.1p-tagged traffic is 254 + # dropped. 255 + ip link set br0 type bridge vlan_default_pvid 1 256 + 257 + # First we reconfigure the default_pvid, 1, as a non-PVID VLAN. 258 + bridge vlan add dev $swp1 vid 1 untagged 259 + send_untagged_and_8021p 260 + bridge vlan add dev $swp1 vid 1 pvid untagged 261 + 262 + # Next we try to delete VID 1 altogether 263 + bridge vlan del dev $swp1 vid 1 264 + send_untagged_and_8021p 265 + bridge vlan add dev $swp1 vid 1 pvid untagged 266 + 267 + # Set up the bridge without a default_pvid, then check that the 8021q 268 + # module, when the bridge port goes down and then up again, does not 269 + # accidentally re-enable untagged packet reception. 270 + ip link set br0 type bridge vlan_default_pvid 0 271 + ip link set $swp1 down 272 + ip link set $swp1 up 273 + setup_wait 274 + send_untagged_and_8021p 275 + 276 + # Remove swp1 as a bridge port and let it rejoin the bridge while it 277 + # has no default_pvid. 278 + ip link set $swp1 nomaster 279 + ip link set $swp1 master br0 280 + send_untagged_and_8021p 281 + 282 + # Restore settings 283 + ip link set br0 type bridge vlan_default_pvid 1 284 + 285 + log_test "Dropping of untagged and 802.1p-tagged traffic with no PVID" 286 + 193 287 ip link set $h2 promisc off 194 288 tc qdisc del dev $h2 clsact 195 289 }
+421
tools/testing/selftests/net/forwarding/tc_taprio.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + ALL_TESTS=" \ 5 + test_clock_jump_backward \ 6 + test_taprio_after_ptp \ 7 + test_max_sdu \ 8 + test_clock_jump_backward_forward \ 9 + " 10 + NUM_NETIFS=4 11 + source tc_common.sh 12 + source lib.sh 13 + source tsn_lib.sh 14 + 15 + require_command python3 16 + 17 + # The test assumes the usual topology from the README, where h1 is connected to 18 + # swp1, h2 to swp2, and swp1 and swp2 are together in a bridge. 19 + # Additional assumption: h1 and h2 use the same PHC, and so do swp1 and swp2. 20 + # By synchronizing h1 to swp1 via PTP, h2 is also implicitly synchronized to 21 + # swp1 (and both to CLOCK_REALTIME). 22 + h1=${NETIFS[p1]} 23 + swp1=${NETIFS[p2]} 24 + swp2=${NETIFS[p3]} 25 + h2=${NETIFS[p4]} 26 + 27 + UDS_ADDRESS_H1="/var/run/ptp4l_h1" 28 + UDS_ADDRESS_SWP1="/var/run/ptp4l_swp1" 29 + 30 + H1_IPV4="192.0.2.1" 31 + H2_IPV4="192.0.2.2" 32 + H1_IPV6="2001:db8:1::1" 33 + H2_IPV6="2001:db8:1::2" 34 + 35 + # Tunables 36 + NUM_PKTS=100 37 + STREAM_VID=10 38 + STREAM_PRIO_1=6 39 + STREAM_PRIO_2=5 40 + STREAM_PRIO_3=4 41 + # PTP uses TC 0 42 + ALL_GATES=$((1 << 0 | 1 << STREAM_PRIO_1 | 1 << STREAM_PRIO_2)) 43 + # Use a conservative cycle of 10 ms to allow the test to still pass when the 44 + # kernel has some extra overhead like lockdep etc 45 + CYCLE_TIME_NS=10000000 46 + # Create two Gate Control List entries, one OPEN and one CLOSE, of equal 47 + # durations 48 + GATE_DURATION_NS=$((CYCLE_TIME_NS / 2)) 49 + # Give 2/3 of the cycle time to user space and 1/3 to the kernel 50 + FUDGE_FACTOR=$((CYCLE_TIME_NS / 3)) 51 + # Shift the isochron base time by half the gate time, so that packets are 52 + # always received by swp1 close to the middle of the time slot, to minimize 53 + # inaccuracies due to network sync 54 + SHIFT_TIME_NS=$((GATE_DURATION_NS / 2)) 55 + 56 + path_delay= 57 + 58 + h1_create() 59 + { 60 + simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64 61 + } 62 + 63 + h1_destroy() 64 + { 65 + simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64 66 + } 67 + 68 + h2_create() 69 + { 70 + simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64 71 + } 72 + 73 + h2_destroy() 74 + { 75 + simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64 76 + } 77 + 78 + switch_create() 79 + { 80 + local h2_mac_addr=$(mac_get $h2) 81 + 82 + ip link set $swp1 up 83 + ip link set $swp2 up 84 + 85 + ip link add br0 type bridge vlan_filtering 1 86 + ip link set $swp1 master br0 87 + ip link set $swp2 master br0 88 + ip link set br0 up 89 + 90 + bridge vlan add dev $swp2 vid $STREAM_VID 91 + bridge vlan add dev $swp1 vid $STREAM_VID 92 + bridge fdb add dev $swp2 \ 93 + $h2_mac_addr vlan $STREAM_VID static master 94 + } 95 + 96 + switch_destroy() 97 + { 98 + ip link del br0 99 + } 100 + 101 + ptp_setup() 102 + { 103 + # Set up swp1 as a master PHC for h1, synchronized to the local 104 + # CLOCK_REALTIME. 105 + phc2sys_start $UDS_ADDRESS_SWP1 106 + ptp4l_start $h1 true $UDS_ADDRESS_H1 107 + ptp4l_start $swp1 false $UDS_ADDRESS_SWP1 108 + } 109 + 110 + ptp_cleanup() 111 + { 112 + ptp4l_stop $swp1 113 + ptp4l_stop $h1 114 + phc2sys_stop 115 + } 116 + 117 + txtime_setup() 118 + { 119 + local if_name=$1 120 + 121 + tc qdisc add dev $if_name clsact 122 + # Classify PTP on TC 7 and isochron on TC 6 123 + tc filter add dev $if_name egress protocol 0x88f7 \ 124 + flower action skbedit priority 7 125 + tc filter add dev $if_name egress protocol 802.1Q \ 126 + flower vlan_ethtype 0xdead action skbedit priority 6 127 + tc qdisc add dev $if_name handle 100: parent root mqprio num_tc 8 \ 128 + queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \ 129 + map 0 1 2 3 4 5 6 7 \ 130 + hw 1 131 + # Set up TC 5, 6, 7 for SO_TXTIME. tc-mqprio queues count from 1. 132 + tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_1 + 1)) etf \ 133 + clockid CLOCK_TAI offload delta $FUDGE_FACTOR 134 + tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_2 + 1)) etf \ 135 + clockid CLOCK_TAI offload delta $FUDGE_FACTOR 136 + tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_3 + 1)) etf \ 137 + clockid CLOCK_TAI offload delta $FUDGE_FACTOR 138 + } 139 + 140 + txtime_cleanup() 141 + { 142 + local if_name=$1 143 + 144 + tc qdisc del dev $if_name clsact 145 + tc qdisc del dev $if_name root 146 + } 147 + 148 + taprio_replace() 149 + { 150 + local if_name="$1"; shift 151 + local extra_args="$1"; shift 152 + 153 + # STREAM_PRIO_1 always has an open gate. 154 + # STREAM_PRIO_2 has a gate open for GATE_DURATION_NS (half the cycle time) 155 + # STREAM_PRIO_3 always has a closed gate. 156 + tc qdisc replace dev $if_name root stab overhead 24 taprio num_tc 8 \ 157 + queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \ 158 + map 0 1 2 3 4 5 6 7 \ 159 + sched-entry S $(printf "%x" $ALL_GATES) $GATE_DURATION_NS \ 160 + sched-entry S $(printf "%x" $((ALL_GATES & ~(1 << STREAM_PRIO_2)))) $GATE_DURATION_NS \ 161 + base-time 0 flags 0x2 $extra_args 162 + taprio_wait_for_admin $if_name 163 + } 164 + 165 + taprio_cleanup() 166 + { 167 + local if_name=$1 168 + 169 + tc qdisc del dev $if_name root 170 + } 171 + 172 + probe_path_delay() 173 + { 174 + local isochron_dat="$(mktemp)" 175 + local received 176 + 177 + log_info "Probing path delay" 178 + 179 + isochron_do "$h1" "$h2" "$UDS_ADDRESS_H1" "" 0 \ 180 + "$CYCLE_TIME_NS" "" "" "$NUM_PKTS" \ 181 + "$STREAM_VID" "$STREAM_PRIO_1" "" "$isochron_dat" 182 + 183 + received=$(isochron_report_num_received "$isochron_dat") 184 + if [ "$received" != "$NUM_PKTS" ]; then 185 + echo "Cannot establish basic data path between $h1 and $h2" 186 + exit $ksft_fail 187 + fi 188 + 189 + printf "pdelay = {}\n" > isochron_data.py 190 + isochron report --input-file "$isochron_dat" \ 191 + --printf-format "pdelay[%u] = %d - %d\n" \ 192 + --printf-args "qRT" \ 193 + >> isochron_data.py 194 + cat <<-'EOF' > isochron_postprocess.py 195 + #!/usr/bin/env python3 196 + 197 + from isochron_data import pdelay 198 + import numpy as np 199 + 200 + w = np.array(list(pdelay.values())) 201 + print("{}".format(np.max(w))) 202 + EOF 203 + path_delay=$(python3 ./isochron_postprocess.py) 204 + 205 + log_info "Path delay from $h1 to $h2 estimated at $path_delay ns" 206 + 207 + if [ "$path_delay" -gt "$GATE_DURATION_NS" ]; then 208 + echo "Path delay larger than gate duration, aborting" 209 + exit $ksft_fail 210 + fi 211 + 212 + rm -f ./isochron_data.py 2> /dev/null 213 + rm -f ./isochron_postprocess.py 2> /dev/null 214 + rm -f "$isochron_dat" 2> /dev/null 215 + } 216 + 217 + setup_prepare() 218 + { 219 + vrf_prepare 220 + 221 + h1_create 222 + h2_create 223 + switch_create 224 + 225 + txtime_setup $h1 226 + 227 + # Temporarily set up PTP just to probe the end-to-end path delay. 228 + ptp_setup 229 + probe_path_delay 230 + ptp_cleanup 231 + } 232 + 233 + cleanup() 234 + { 235 + pre_cleanup 236 + 237 + isochron_recv_stop 238 + txtime_cleanup $h1 239 + 240 + switch_destroy 241 + h2_destroy 242 + h1_destroy 243 + 244 + vrf_cleanup 245 + } 246 + 247 + run_test() 248 + { 249 + local base_time=$1; shift 250 + local stream_prio=$1; shift 251 + local expected_delay=$1; shift 252 + local should_fail=$1; shift 253 + local test_name=$1; shift 254 + local isochron_dat="$(mktemp)" 255 + local received 256 + local median_delay 257 + 258 + RET=0 259 + 260 + # Set the shift time equal to the cycle time, which effectively 261 + # cancels the default advance time. Packets won't be sent early in 262 + # software, which ensures that they won't prematurely enter through 263 + # the open gate in __test_out_of_band(). Also, the gate is open for 264 + # long enough that this won't cause a problem in __test_in_band(). 265 + isochron_do "$h1" "$h2" "$UDS_ADDRESS_H1" "" "$base_time" \ 266 + "$CYCLE_TIME_NS" "$SHIFT_TIME_NS" "$GATE_DURATION_NS" \ 267 + "$NUM_PKTS" "$STREAM_VID" "$stream_prio" "" "$isochron_dat" 268 + 269 + received=$(isochron_report_num_received "$isochron_dat") 270 + [ "$received" = "$NUM_PKTS" ] 271 + check_err_fail $should_fail $? "Reception of $NUM_PKTS packets" 272 + 273 + if [ $should_fail = 0 ] && [ "$received" = "$NUM_PKTS" ]; then 274 + printf "pdelay = {}\n" > isochron_data.py 275 + isochron report --input-file "$isochron_dat" \ 276 + --printf-format "pdelay[%u] = %d - %d\n" \ 277 + --printf-args "qRT" \ 278 + >> isochron_data.py 279 + cat <<-'EOF' > isochron_postprocess.py 280 + #!/usr/bin/env python3 281 + 282 + from isochron_data import pdelay 283 + import numpy as np 284 + 285 + w = np.array(list(pdelay.values())) 286 + print("{}".format(int(np.median(w)))) 287 + EOF 288 + median_delay=$(python3 ./isochron_postprocess.py) 289 + 290 + # If the condition below is true, packets were delayed by a closed gate 291 + [ "$median_delay" -gt $((path_delay + expected_delay)) ] 292 + check_fail $? "Median delay $median_delay is greater than expected delay $expected_delay plus path delay $path_delay" 293 + 294 + # If the condition below is true, packets were sent expecting them to 295 + # hit a closed gate in the switch, but were not delayed 296 + [ "$expected_delay" -gt 0 ] && [ "$median_delay" -lt "$expected_delay" ] 297 + check_fail $? "Median delay $median_delay is less than expected delay $expected_delay" 298 + fi 299 + 300 + log_test "$test_name" 301 + 302 + rm -f ./isochron_data.py 2> /dev/null 303 + rm -f ./isochron_postprocess.py 2> /dev/null 304 + rm -f "$isochron_dat" 2> /dev/null 305 + } 306 + 307 + __test_always_open() 308 + { 309 + run_test 0.000000000 $STREAM_PRIO_1 0 0 "Gate always open" 310 + } 311 + 312 + __test_always_closed() 313 + { 314 + run_test 0.000000000 $STREAM_PRIO_3 0 1 "Gate always closed" 315 + } 316 + 317 + __test_in_band() 318 + { 319 + # Send packets in-band with the OPEN gate entry 320 + run_test 0.000000000 $STREAM_PRIO_2 0 0 "In band with gate" 321 + } 322 + 323 + __test_out_of_band() 324 + { 325 + # Send packets in-band with the CLOSE gate entry 326 + run_test 0.005000000 $STREAM_PRIO_2 \ 327 + $((GATE_DURATION_NS - SHIFT_TIME_NS)) 0 \ 328 + "Out of band with gate" 329 + } 330 + 331 + run_subtests() 332 + { 333 + __test_always_open 334 + __test_always_closed 335 + __test_in_band 336 + __test_out_of_band 337 + } 338 + 339 + test_taprio_after_ptp() 340 + { 341 + log_info "Setting up taprio after PTP" 342 + ptp_setup 343 + taprio_replace $swp2 344 + run_subtests 345 + taprio_cleanup $swp2 346 + ptp_cleanup 347 + } 348 + 349 + __test_under_max_sdu() 350 + { 351 + # Limit max-sdu for STREAM_PRIO_1 352 + taprio_replace "$swp2" "max-sdu 0 0 0 0 0 0 100 0" 353 + run_test 0.000000000 $STREAM_PRIO_1 0 0 "Under maximum SDU" 354 + } 355 + 356 + __test_over_max_sdu() 357 + { 358 + # Limit max-sdu for STREAM_PRIO_1 359 + taprio_replace "$swp2" "max-sdu 0 0 0 0 0 0 20 0" 360 + run_test 0.000000000 $STREAM_PRIO_1 0 1 "Over maximum SDU" 361 + } 362 + 363 + test_max_sdu() 364 + { 365 + ptp_setup 366 + __test_under_max_sdu 367 + __test_over_max_sdu 368 + taprio_cleanup $swp2 369 + ptp_cleanup 370 + } 371 + 372 + # Perform a clock jump in the past without synchronization running, so that the 373 + # time base remains where it was set by phc_ctl. 374 + test_clock_jump_backward() 375 + { 376 + # This is a more complex schedule specifically crafted in a way that 377 + # has been problematic on NXP LS1028A. Not much to test with it other 378 + # than the fact that it passes traffic. 379 + tc qdisc replace dev $swp2 root stab overhead 24 taprio num_tc 8 \ 380 + queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 map 0 1 2 3 4 5 6 7 \ 381 + base-time 0 sched-entry S 20 300000 sched-entry S 10 200000 \ 382 + sched-entry S 20 300000 sched-entry S 48 200000 \ 383 + sched-entry S 20 300000 sched-entry S 83 200000 \ 384 + sched-entry S 40 300000 sched-entry S 00 200000 flags 2 385 + 386 + log_info "Forcing a backward clock jump" 387 + phc_ctl $swp1 set 0 388 + 389 + ping_test $h1 192.0.2.2 390 + taprio_cleanup $swp2 391 + } 392 + 393 + # Test that taprio tolerates clock jumps. 394 + # Since ptp4l and phc2sys are running, it is expected for the time to 395 + # eventually recover (through yet another clock jump). Isochron waits 396 + # until that is the case. 397 + test_clock_jump_backward_forward() 398 + { 399 + log_info "Forcing a backward and a forward clock jump" 400 + taprio_replace $swp2 401 + phc_ctl $swp1 set 0 402 + ptp_setup 403 + ping_test $h1 192.0.2.2 404 + run_subtests 405 + ptp_cleanup 406 + taprio_cleanup $swp2 407 + } 408 + 409 + tc_offload_check 410 + if [[ $? -ne 0 ]]; then 411 + log_test_skip "Could not test offloaded functionality" 412 + exit $EXIT_STATUS 413 + fi 414 + 415 + trap cleanup EXIT 416 + 417 + setup_prepare 418 + setup_wait 419 + tests_run 420 + 421 + exit $EXIT_STATUS
+26
tools/testing/selftests/net/forwarding/tsn_lib.sh
··· 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 # Copyright 2021-2022 NXP 4 4 5 + tc_testing_scripts_dir=$(dirname $0)/../../tc-testing/scripts 6 + 5 7 REQUIRE_ISOCHRON=${REQUIRE_ISOCHRON:=yes} 6 8 REQUIRE_LINUXPTP=${REQUIRE_LINUXPTP:=yes} 7 9 ··· 20 18 if [[ "$REQUIRE_LINUXPTP" = "yes" ]]; then 21 19 require_command phc2sys 22 20 require_command ptp4l 21 + require_command phc_ctl 23 22 fi 24 23 25 24 phc2sys_start() ··· 185 182 local base_time=$1; shift 186 183 local cycle_time=$1; shift 187 184 local shift_time=$1; shift 185 + local window_size=$1; shift 188 186 local num_pkts=$1; shift 189 187 local vid=$1; shift 190 188 local priority=$1; shift ··· 214 210 215 211 if ! [ -z "${shift_time}" ]; then 216 212 extra_args="${extra_args} --shift-time=${shift_time}" 213 + fi 214 + 215 + if ! [ -z "${window_size}" ]; then 216 + extra_args="${extra_args} --window-size=${window_size}" 217 217 fi 218 218 219 219 if [ "${use_l2}" = "true" ]; then ··· 254 246 isochron_recv_stop 5000 255 247 256 248 cpufreq_restore ${ISOCHRON_CPU} 249 + } 250 + 251 + isochron_report_num_received() 252 + { 253 + local isochron_dat=$1; shift 254 + 255 + # Count all received packets by looking at the non-zero RX timestamps 256 + isochron report \ 257 + --input-file "${isochron_dat}" \ 258 + --printf-format "%u\n" --printf-args "R" | \ 259 + grep -w -v '0' | wc -l 260 + } 261 + 262 + taprio_wait_for_admin() 263 + { 264 + local if_name="$1"; shift 265 + 266 + "$tc_testing_scripts_dir/taprio_wait_for_admin.sh" "$(which tc)" "$if_name" 257 267 }
+186
tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
··· 352 352 "$TC qdisc del dev $DUMMY handle 1:0 root", 353 353 "$IP addr del 10.10.10.10/24 dev $DUMMY || true" 354 354 ] 355 + }, 356 + { 357 + "id": "90ec", 358 + "name": "Test DRR's enqueue reentrant behaviour with netem", 359 + "category": [ 360 + "qdisc", 361 + "drr" 362 + ], 363 + "plugins": { 364 + "requires": "nsPlugin" 365 + }, 366 + "setup": [ 367 + "$IP link set dev $DUMMY up || true", 368 + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", 369 + "$TC qdisc add dev $DUMMY handle 1:0 root drr", 370 + "$TC class replace dev $DUMMY parent 1:0 classid 1:1 drr", 371 + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", 372 + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1" 373 + ], 374 + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", 375 + "expExitCode": "0", 376 + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", 377 + "matchJSON": [ 378 + { 379 + "kind": "drr", 380 + "handle": "1:", 381 + "bytes": 196, 382 + "packets": 2 383 + } 384 + ], 385 + "matchCount": "1", 386 + "teardown": [ 387 + "$TC qdisc del dev $DUMMY handle 1:0 root", 388 + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" 389 + ] 390 + }, 391 + { 392 + "id": "1f1f", 393 + "name": "Test ETS's enqueue reentrant behaviour with netem", 394 + "category": [ 395 + "qdisc", 396 + "ets" 397 + ], 398 + "plugins": { 399 + "requires": "nsPlugin" 400 + }, 401 + "setup": [ 402 + "$IP link set dev $DUMMY up || true", 403 + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", 404 + "$TC qdisc add dev $DUMMY handle 1:0 root ets bands 2", 405 + "$TC class replace dev $DUMMY parent 1:0 classid 1:1 ets quantum 1500", 406 + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", 407 + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1" 408 + ], 409 + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", 410 + "expExitCode": "0", 411 + "verifyCmd": "$TC -j -s class show dev $DUMMY", 412 + "matchJSON": [ 413 + { 414 + "class": "ets", 415 + "handle": "1:1", 416 + "stats": { 417 + "bytes": 196, 418 + "packets": 2 419 + } 420 + } 421 + ], 422 + "matchCount": "1", 423 + "teardown": [ 424 + "$TC qdisc del dev $DUMMY handle 1:0 root", 425 + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" 426 + ] 427 + }, 428 + { 429 + "id": "5e6d", 430 + "name": "Test QFQ's enqueue reentrant behaviour with netem", 431 + "category": [ 432 + "qdisc", 433 + "qfq" 434 + ], 435 + "plugins": { 436 + "requires": "nsPlugin" 437 + }, 438 + "setup": [ 439 + "$IP link set dev $DUMMY up || true", 440 + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", 441 + "$TC qdisc add dev $DUMMY handle 1:0 root qfq", 442 + "$TC class replace dev $DUMMY parent 1:0 classid 1:1 qfq weight 100 maxpkt 1500", 443 + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", 444 + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1" 445 + ], 446 + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", 447 + "expExitCode": "0", 448 + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", 449 + "matchJSON": [ 450 + { 451 + "kind": "qfq", 452 + "handle": "1:", 453 + "bytes": 196, 454 + "packets": 2 455 + } 456 + ], 457 + "matchCount": "1", 458 + "teardown": [ 459 + "$TC qdisc del dev $DUMMY handle 1:0 root", 460 + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" 461 + ] 462 + }, 463 + { 464 + "id": "bf1d", 465 + "name": "Test HFSC's enqueue reentrant behaviour with netem", 466 + "category": [ 467 + "qdisc", 468 + "hfsc" 469 + ], 470 + "plugins": { 471 + "requires": "nsPlugin" 472 + }, 473 + "setup": [ 474 + "$IP link set dev $DUMMY up || true", 475 + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", 476 + "$TC qdisc add dev $DUMMY handle 1:0 root hfsc", 477 + "$TC class add dev $DUMMY parent 1:0 classid 1:1 hfsc ls m2 10Mbit", 478 + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", 479 + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip dst 10.10.10.1/32 flowid 1:1", 480 + "$TC class add dev $DUMMY parent 1:0 classid 1:2 hfsc ls m2 10Mbit", 481 + "$TC qdisc add dev $DUMMY parent 1:2 handle 3:0 netem duplicate 100%", 482 + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 2 u32 match ip dst 10.10.10.2/32 flowid 1:2", 483 + "ping -c 1 10.10.10.1 -I$DUMMY > /dev/null || true", 484 + "$TC filter del dev $DUMMY parent 1:0 protocol ip prio 1", 485 + "$TC class del dev $DUMMY classid 1:1" 486 + ], 487 + "cmdUnderTest": "ping -c 1 10.10.10.2 -I$DUMMY > /dev/null || true", 488 + "expExitCode": "0", 489 + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", 490 + "matchJSON": [ 491 + { 492 + "kind": "hfsc", 493 + "handle": "1:", 494 + "bytes": 392, 495 + "packets": 4 496 + } 497 + ], 498 + "matchCount": "1", 499 + "teardown": [ 500 + "$TC qdisc del dev $DUMMY handle 1:0 root", 501 + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" 502 + ] 503 + }, 504 + { 505 + "id": "7c3b", 506 + "name": "Test nested DRR's enqueue reentrant behaviour with netem", 507 + "category": [ 508 + "qdisc", 509 + "drr" 510 + ], 511 + "plugins": { 512 + "requires": "nsPlugin" 513 + }, 514 + "setup": [ 515 + "$IP link set dev $DUMMY up || true", 516 + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", 517 + "$TC qdisc add dev $DUMMY handle 1:0 root drr", 518 + "$TC class add dev $DUMMY parent 1:0 classid 1:1 drr", 519 + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1", 520 + "$TC qdisc add dev $DUMMY handle 2:0 parent 1:1 drr", 521 + "$TC class add dev $DUMMY classid 2:1 parent 2:0 drr", 522 + "$TC filter add dev $DUMMY parent 2:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 2:1", 523 + "$TC qdisc add dev $DUMMY parent 2:1 handle 3:0 netem duplicate 100%" 524 + ], 525 + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", 526 + "expExitCode": "0", 527 + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", 528 + "matchJSON": [ 529 + { 530 + "kind": "drr", 531 + "handle": "1:", 532 + "bytes": 196, 533 + "packets": 2 534 + } 535 + ], 536 + "matchCount": "1", 537 + "teardown": [ 538 + "$TC qdisc del dev $DUMMY handle 1:0 root", 539 + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" 540 + ] 355 541 } 356 542 ]
+1
tools/testing/selftests/ublk/Makefile
··· 9 9 TEST_PROGS += test_generic_04.sh 10 10 TEST_PROGS += test_generic_05.sh 11 11 TEST_PROGS += test_generic_06.sh 12 + TEST_PROGS += test_generic_07.sh 12 13 13 14 TEST_PROGS += test_null_01.sh 14 15 TEST_PROGS += test_null_02.sh
+14 -8
tools/testing/selftests/ublk/kublk.c
··· 536 536 if (!(io->flags & UBLKSRV_IO_FREE)) 537 537 return 0; 538 538 539 - /* we issue because we need either fetching or committing */ 539 + /* 540 + * we issue because we need either fetching or committing or 541 + * getting data 542 + */ 540 543 if (!(io->flags & 541 - (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP))) 544 + (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_NEED_GET_DATA))) 542 545 return 0; 543 546 544 - if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) 547 + if (io->flags & UBLKSRV_NEED_GET_DATA) 548 + cmd_op = UBLK_U_IO_NEED_GET_DATA; 549 + else if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) 545 550 cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ; 546 551 else if (io->flags & UBLKSRV_NEED_FETCH_RQ) 547 552 cmd_op = UBLK_U_IO_FETCH_REQ; ··· 663 658 assert(tag < q->q_depth); 664 659 if (q->tgt_ops->queue_io) 665 660 q->tgt_ops->queue_io(q, tag); 661 + } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) { 662 + io->flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE; 663 + ublk_queue_io_cmd(q, io, tag); 666 664 } else { 667 665 /* 668 666 * COMMIT_REQ will be completed immediately since no fetching ··· 1245 1237 1246 1238 printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n", 1247 1239 exe, recovery ? "recover" : "add"); 1248 - printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g 0|1]\n"); 1240 + printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g]\n"); 1249 1241 printf("\t[-e 0|1 ] [-i 0|1]\n"); 1250 1242 printf("\t[target options] [backfile1] [backfile2] ...\n"); 1251 1243 printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n"); ··· 1321 1313 1322 1314 opterr = 0; 1323 1315 optind = 2; 1324 - while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:az", 1316 + while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:gaz", 1325 1317 longopts, &option_idx)) != -1) { 1326 1318 switch (opt) { 1327 1319 case 'a': ··· 1359 1351 ctx.flags |= UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE; 1360 1352 break; 1361 1353 case 'g': 1362 - value = strtol(optarg, NULL, 10); 1363 - if (value) 1364 - ctx.flags |= UBLK_F_NEED_GET_DATA; 1354 + ctx.flags |= UBLK_F_NEED_GET_DATA; 1365 1355 break; 1366 1356 case 0: 1367 1357 if (!strcmp(longopts[option_idx].name, "debug_mask"))
+1
tools/testing/selftests/ublk/kublk.h
··· 115 115 #define UBLKSRV_NEED_FETCH_RQ (1UL << 0) 116 116 #define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1) 117 117 #define UBLKSRV_IO_FREE (1UL << 2) 118 + #define UBLKSRV_NEED_GET_DATA (1UL << 3) 118 119 unsigned short flags; 119 120 unsigned short refs; /* used by target code only */ 120 121
+28
tools/testing/selftests/ublk/test_generic_07.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + TID="generic_07" 7 + ERR_CODE=0 8 + 9 + if ! _have_program fio; then 10 + exit "$UBLK_SKIP_CODE" 11 + fi 12 + 13 + _prep_test "generic" "test UBLK_F_NEED_GET_DATA" 14 + 15 + _create_backfile 0 256M 16 + dev_id=$(_add_ublk_dev -t loop -q 2 -g "${UBLK_BACKFILES[0]}") 17 + _check_add_dev $TID $? 18 + 19 + # run fio over the ublk disk 20 + _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M 21 + ERR_CODE=$? 22 + if [ "$ERR_CODE" -eq 0 ]; then 23 + _mkfs_mount_test /dev/ublkb"${dev_id}" 24 + ERR_CODE=$? 25 + fi 26 + 27 + _cleanup_test "generic" 28 + _show_result $TID $ERR_CODE
+4 -4
tools/testing/selftests/ublk/test_stress_05.sh
··· 47 47 _create_backfile 1 256M 48 48 49 49 for reissue in $(seq 0 1); do 50 - ublk_io_and_remove 8G -t null -q 4 -g 1 -r 1 -i "$reissue" & 51 - ublk_io_and_remove 256M -t loop -q 4 -g 1 -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & 50 + ublk_io_and_remove 8G -t null -q 4 -g -r 1 -i "$reissue" & 51 + ublk_io_and_remove 256M -t loop -q 4 -g -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & 52 52 wait 53 53 done 54 54 55 55 if _have_feature "ZERO_COPY"; then 56 56 for reissue in $(seq 0 1); do 57 - ublk_io_and_remove 8G -t null -q 4 -g 1 -z -r 1 -i "$reissue" & 58 - ublk_io_and_remove 256M -t loop -q 4 -g 1 -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & 57 + ublk_io_and_remove 8G -t null -q 4 -g -z -r 1 -i "$reissue" & 58 + ublk_io_and_remove 256M -t loop -q 4 -g -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & 59 59 wait 60 60 done 61 61 fi