Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.17-rc3).

No conflicts or adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2980 -1481
+1 -1
Documentation/ABI/stable/sysfs-block
··· 731 731 Description: 732 732 [RW] If the device is registered for writeback throttling, then 733 733 this file shows the target minimum read latency. If this latency 734 - is exceeded in a given window of time (see wb_window_usec), then 734 + is exceeded in a given window of time (see curr_win_nsec), then 735 735 the writeback throttling will start scaling back writes. Writing 736 736 a value of '0' to this file disables the feature. Writing a 737 737 value of '-1' to this file resets the value to the default
+1 -1
Documentation/admin-guide/blockdev/zoned_loop.rst
··· 79 79 the zone size. Default: zone size. 80 80 conv_zones Total number of conventioanl zones starting from sector 0. 81 81 Default: 8. 82 - base_dir Path to the base directoy where to create the directory 82 + base_dir Path to the base directory where to create the directory 83 83 containing the zone files of the device. 84 84 Default=/var/local/zloop. 85 85 The device directory containing the zone files is always
+1 -1
Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
··· 214 214 Spectre_v2 X X 215 215 Spectre_v2_user X X * (Note 1) 216 216 SRBDS X X X X 217 - SRSO X X 217 + SRSO X X X X 218 218 SSB (Note 4) 219 219 TAA X X X X * (Note 2) 220 220 TSA X X X X
+6 -5
Documentation/core-api/symbol-namespaces.rst
··· 76 76 within the corresponding compilation unit before the #include for 77 77 <linux/export.h>. Typically it's placed before the first #include statement. 78 78 79 - Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro 80 - ----------------------------------------------- 79 + Using the EXPORT_SYMBOL_FOR_MODULES() macro 80 + ------------------------------------------- 81 81 82 82 Symbols exported using this macro are put into a module namespace. This 83 - namespace cannot be imported. 83 + namespace cannot be imported. These exports are GPL-only as they are only 84 + intended for in-tree modules. 84 85 85 86 The macro takes a comma separated list of module names, allowing only those 86 87 modules to access this symbol. Simple tail-globs are supported. 87 88 88 89 For example:: 89 90 90 - EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*") 91 + EXPORT_SYMBOL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*") 91 92 92 - will limit usage of this symbol to modules whoes name matches the given 93 + will limit usage of this symbol to modules whose name matches the given 93 94 patterns. 94 95 95 96 How to use Symbols exported in Namespaces
+2
Documentation/networking/mptcp-sysctl.rst
··· 12 12 resent to an MPTCP peer that has not acknowledged a previous 13 13 ADD_ADDR message. 14 14 15 + Do not retransmit if set to 0. 16 + 15 17 The default value matches TCP_RTO_MAX. This is a per-namespace 16 18 sysctl. 17 19
+1 -1
MAINTAINERS
··· 22183 22183 22184 22184 S390 NETWORK DRIVERS 22185 22185 M: Alexandra Winter <wintera@linux.ibm.com> 22186 - M: Thorsten Winkler <twinkler@linux.ibm.com> 22186 + R: Aswin Karuvally <aswin@linux.ibm.com> 22187 22187 L: linux-s390@vger.kernel.org 22188 22188 L: netdev@vger.kernel.org 22189 22189 S: Supported
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 17 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc1 5 + EXTRAVERSION = -rc2 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+6
arch/loongarch/Makefile
··· 102 102 103 103 ifdef CONFIG_OBJTOOL 104 104 ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP 105 + # The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled. 106 + # Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to 107 + # be passed via '-mllvm' to ld.lld. 105 108 KBUILD_CFLAGS += -mannotate-tablejump 109 + ifdef CONFIG_LTO_CLANG 110 + KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump 111 + endif 106 112 else 107 113 KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers 108 114 endif
+1 -1
arch/loongarch/include/asm/stackframe.h
··· 58 58 .endm 59 59 60 60 .macro STACKLEAK_ERASE 61 - #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 61 + #ifdef CONFIG_KSTACK_ERASE 62 62 bl stackleak_erase_on_task_stack 63 63 #endif 64 64 .endm
+8
arch/loongarch/include/uapi/asm/setup.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + 3 + #ifndef _UAPI_ASM_LOONGARCH_SETUP_H 4 + #define _UAPI_ASM_LOONGARCH_SETUP_H 5 + 6 + #define COMMAND_LINE_SIZE 4096 7 + 8 + #endif /* _UAPI_ASM_LOONGARCH_SETUP_H */
+19 -19
arch/loongarch/kernel/module-sections.c
··· 8 8 #include <linux/module.h> 9 9 #include <linux/moduleloader.h> 10 10 #include <linux/ftrace.h> 11 + #include <linux/sort.h> 11 12 12 13 Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val) 13 14 { ··· 62 61 return (Elf_Addr)&plt[nr]; 63 62 } 64 63 65 - static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) 64 + #define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b)) 65 + 66 + static int compare_rela(const void *x, const void *y) 66 67 { 67 - return x->r_info == y->r_info && x->r_addend == y->r_addend; 68 - } 68 + int ret; 69 + const Elf_Rela *rela_x = x, *rela_y = y; 69 70 70 - static bool duplicate_rela(const Elf_Rela *rela, int idx) 71 - { 72 - int i; 71 + ret = cmp_3way(rela_x->r_info, rela_y->r_info); 72 + if (ret == 0) 73 + ret = cmp_3way(rela_x->r_addend, rela_y->r_addend); 73 74 74 - for (i = 0; i < idx; i++) { 75 - if (is_rela_equal(&rela[i], &rela[idx])) 76 - return true; 77 - } 78 - 79 - return false; 75 + return ret; 80 76 } 81 77 82 78 static void count_max_entries(Elf_Rela *relas, int num, 83 79 unsigned int *plts, unsigned int *gots) 84 80 { 85 - unsigned int i, type; 81 + unsigned int i; 82 + 83 + sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL); 86 84 87 85 for (i = 0; i < num; i++) { 88 - type = ELF_R_TYPE(relas[i].r_info); 89 - switch (type) { 86 + if (i && !compare_rela(&relas[i-1], &relas[i])) 87 + continue; 88 + 89 + switch (ELF_R_TYPE(relas[i].r_info)) { 90 90 case R_LARCH_SOP_PUSH_PLT_PCREL: 91 91 case R_LARCH_B26: 92 - if (!duplicate_rela(relas, i)) 93 - (*plts)++; 92 + (*plts)++; 94 93 break; 95 94 case R_LARCH_GOT_PC_HI20: 96 - if (!duplicate_rela(relas, i)) 97 - (*gots)++; 95 + (*gots)++; 98 96 break; 99 97 default: 100 98 break; /* Do nothing. */
+5 -5
arch/loongarch/kernel/signal.c
··· 677 677 for (i = 1; i < 32; i++) 678 678 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 679 679 680 + #ifdef CONFIG_CPU_HAS_LBT 681 + if (extctx->lbt.addr) 682 + err |= protected_save_lbt_context(extctx); 683 + #endif 684 + 680 685 if (extctx->lasx.addr) 681 686 err |= protected_save_lasx_context(extctx); 682 687 else if (extctx->lsx.addr) 683 688 err |= protected_save_lsx_context(extctx); 684 689 else if (extctx->fpu.addr) 685 690 err |= protected_save_fpu_context(extctx); 686 - 687 - #ifdef CONFIG_CPU_HAS_LBT 688 - if (extctx->lbt.addr) 689 - err |= protected_save_lbt_context(extctx); 690 - #endif 691 691 692 692 /* Set the "end" magic */ 693 693 info = (struct sctx_info *)extctx->end.addr;
+22
arch/loongarch/kernel/time.c
··· 5 5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 6 6 */ 7 7 #include <linux/clockchips.h> 8 + #include <linux/cpuhotplug.h> 8 9 #include <linux/delay.h> 9 10 #include <linux/export.h> 10 11 #include <linux/init.h> ··· 103 102 return 0; 104 103 } 105 104 105 + static int arch_timer_starting(unsigned int cpu) 106 + { 107 + set_csr_ecfg(ECFGF_TIMER); 108 + 109 + return 0; 110 + } 111 + 112 + static int arch_timer_dying(unsigned int cpu) 113 + { 114 + constant_set_state_shutdown(this_cpu_ptr(&constant_clockevent_device)); 115 + 116 + /* Clear Timer Interrupt */ 117 + write_csr_tintclear(CSR_TINTCLR_TI); 118 + 119 + return 0; 120 + } 121 + 106 122 static unsigned long get_loops_per_jiffy(void) 107 123 { 108 124 unsigned long lpj = (unsigned long)const_clock_freq; ··· 189 171 190 172 lpj_fine = get_loops_per_jiffy(); 191 173 pr_info("Constant clock event device register\n"); 174 + 175 + cpuhp_setup_state(CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING, 176 + "clockevents/loongarch/timer:starting", 177 + arch_timer_starting, arch_timer_dying); 192 178 193 179 return 0; 194 180 }
+6 -1
arch/loongarch/kvm/intc/eiointc.c
··· 45 45 } 46 46 47 47 cpu = s->sw_coremap[irq]; 48 - vcpu = kvm_get_vcpu(s->kvm, cpu); 48 + vcpu = kvm_get_vcpu_by_id(s->kvm, cpu); 49 + if (unlikely(vcpu == NULL)) { 50 + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); 51 + return; 52 + } 53 + 49 54 if (level) { 50 55 /* if not enable return false */ 51 56 if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
+4 -4
arch/loongarch/kvm/intc/ipi.c
··· 99 99 static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) 100 100 { 101 101 int i, idx, ret; 102 - uint32_t val = 0, mask = 0; 102 + uint64_t val = 0, mask = 0; 103 103 104 104 /* 105 105 * Bit 27-30 is mask for byte writing. ··· 108 108 if ((data >> 27) & 0xf) { 109 109 /* Read the old val */ 110 110 idx = srcu_read_lock(&vcpu->kvm->srcu); 111 - ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 111 + ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val); 112 112 srcu_read_unlock(&vcpu->kvm->srcu, idx); 113 113 if (unlikely(ret)) { 114 114 kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); ··· 124 124 } 125 125 val |= ((uint32_t)(data >> 32) & ~mask); 126 126 idx = srcu_read_lock(&vcpu->kvm->srcu); 127 - ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 127 + ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val); 128 128 srcu_read_unlock(&vcpu->kvm->srcu, idx); 129 129 if (unlikely(ret)) 130 130 kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); ··· 298 298 cpu = (attr->attr >> 16) & 0x3ff; 299 299 addr = attr->attr & 0xff; 300 300 301 - vcpu = kvm_get_vcpu(dev->kvm, cpu); 301 + vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu); 302 302 if (unlikely(vcpu == NULL)) { 303 303 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); 304 304 return -EINVAL;
+10
arch/loongarch/kvm/intc/pch_pic.c
··· 195 195 return -EINVAL; 196 196 } 197 197 198 + if (addr & (len - 1)) { 199 + kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); 200 + return -EINVAL; 201 + } 202 + 198 203 /* statistics of pch pic reading */ 199 204 vcpu->stat.pch_pic_read_exits++; 200 205 ret = loongarch_pch_pic_read(s, addr, len, val); ··· 304 299 305 300 if (!s) { 306 301 kvm_err("%s: pch pic irqchip not valid!\n", __func__); 302 + return -EINVAL; 303 + } 304 + 305 + if (addr & (len - 1)) { 306 + kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); 307 307 return -EINVAL; 308 308 } 309 309
+5 -3
arch/loongarch/kvm/vcpu.c
··· 1283 1283 return -EINVAL; 1284 1284 1285 1285 preempt_disable(); 1286 - set_csr_euen(CSR_EUEN_LBTEN); 1287 - _restore_lbt(&vcpu->arch.lbt); 1288 - vcpu->arch.aux_inuse |= KVM_LARCH_LBT; 1286 + if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) { 1287 + set_csr_euen(CSR_EUEN_LBTEN); 1288 + _restore_lbt(&vcpu->arch.lbt); 1289 + vcpu->arch.aux_inuse |= KVM_LARCH_LBT; 1290 + } 1289 1291 preempt_enable(); 1290 1292 1291 1293 return 0;
+1
arch/x86/boot/startup/sev-shared.c
··· 785 785 pc->entry[0].page_size = RMP_PG_SIZE_4K; 786 786 pc->entry[0].action = validate; 787 787 pc->entry[0].ignore_cf = 0; 788 + pc->entry[0].rsvd = 0; 788 789 pc->entry[0].pfn = paddr >> PAGE_SHIFT; 789 790 790 791 /* Protocol 0, Call ID 1 */
+2
arch/x86/coco/sev/core.c
··· 227 227 pe->page_size = RMP_PG_SIZE_4K; 228 228 pe->action = action; 229 229 pe->ignore_cf = 0; 230 + pe->rsvd = 0; 230 231 pe->pfn = pfn; 231 232 232 233 pe++; ··· 258 257 pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; 259 258 pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; 260 259 pe->ignore_cf = 0; 260 + pe->rsvd = 0; 261 261 pe->pfn = e->gfn; 262 262 263 263 pe++;
+17 -16
arch/x86/coco/sev/vc-handle.c
··· 371 371 * executing with Secure TSC enabled, so special handling is required for 372 372 * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ. 373 373 */ 374 - static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write) 374 + static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write) 375 375 { 376 + struct pt_regs *regs = ctxt->regs; 376 377 u64 tsc; 377 378 378 379 /* 379 - * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled. 380 - * Terminate the SNP guest when the interception is enabled. 380 + * Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to 381 + * return undefined values, and GUEST_TSC_FREQ is read-only. Generate 382 + * a #GP on all writes. 383 + */ 384 + if (write) { 385 + ctxt->fi.vector = X86_TRAP_GP; 386 + ctxt->fi.error_code = 0; 387 + return ES_EXCEPTION; 388 + } 389 + 390 + /* 391 + * GUEST_TSC_FREQ read should not be intercepted when Secure TSC is 392 + * enabled. Terminate the guest if a read is attempted. 381 393 */ 382 394 if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ) 383 395 return ES_VMM_ERROR; 384 396 385 - /* 386 - * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC 387 - * to return undefined values, so ignore all writes. 388 - * 389 - * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use 390 - * the value returned by rdtsc_ordered(). 391 - */ 392 - if (write) { 393 - WARN_ONCE(1, "TSC MSR writes are verboten!\n"); 394 - return ES_OK; 395 - } 396 - 397 + /* Reads of MSR_IA32_TSC should return the current TSC value. */ 397 398 tsc = rdtsc_ordered(); 398 399 regs->ax = lower_32_bits(tsc); 399 400 regs->dx = upper_32_bits(tsc); ··· 417 416 case MSR_IA32_TSC: 418 417 case MSR_AMD64_GUEST_TSC_FREQ: 419 418 if (sev_status & MSR_AMD64_SNP_SECURE_TSC) 420 - return __vc_handle_secure_tsc_msrs(regs, write); 419 + return __vc_handle_secure_tsc_msrs(ctxt, write); 421 420 break; 422 421 default: 423 422 break;
-8
arch/x86/include/asm/cpuid.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - 3 - #ifndef _ASM_X86_CPUID_H 4 - #define _ASM_X86_CPUID_H 5 - 6 - #include <asm/cpuid/api.h> 7 - 8 - #endif /* _ASM_X86_CPUID_H */
+11 -2
arch/x86/kernel/cpu/bugs.c
··· 386 386 387 387 case X86_BUG_SPECTRE_V2: 388 388 case X86_BUG_RETBLEED: 389 - case X86_BUG_SRSO: 390 389 case X86_BUG_L1TF: 391 390 case X86_BUG_ITS: 392 391 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || ··· 3183 3184 } 3184 3185 3185 3186 if (srso_mitigation == SRSO_MITIGATION_AUTO) { 3186 - if (should_mitigate_vuln(X86_BUG_SRSO)) { 3187 + /* 3188 + * Use safe-RET if user->kernel or guest->host protection is 3189 + * required. Otherwise the 'microcode' mitigation is sufficient 3190 + * to protect the user->user and guest->guest vectors. 3191 + */ 3192 + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 3193 + (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && 3194 + !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { 3187 3195 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 3196 + } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 3197 + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 3198 + srso_mitigation = SRSO_MITIGATION_MICROCODE; 3188 3199 } else { 3189 3200 srso_mitigation = SRSO_MITIGATION_NONE; 3190 3201 return;
+10 -9
arch/x86/kernel/fpu/xstate.c
··· 1881 1881 #ifdef CONFIG_PROC_PID_ARCH_STATUS 1882 1882 /* 1883 1883 * Report the amount of time elapsed in millisecond since last AVX512 1884 - * use in the task. 1884 + * use in the task. Report -1 if no AVX-512 usage. 1885 1885 */ 1886 1886 static void avx512_status(struct seq_file *m, struct task_struct *task) 1887 1887 { 1888 - unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); 1889 - long delta; 1888 + unsigned long timestamp; 1889 + long delta = -1; 1890 1890 1891 - if (!timestamp) { 1892 - /* 1893 - * Report -1 if no AVX512 usage 1894 - */ 1895 - delta = -1; 1896 - } else { 1891 + /* AVX-512 usage is not tracked for kernel threads. Don't report anything. */ 1892 + if (task->flags & (PF_KTHREAD | PF_USER_WORKER)) 1893 + return; 1894 + 1895 + timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); 1896 + 1897 + if (timestamp) { 1897 1898 delta = (long)(jiffies - timestamp); 1898 1899 /* 1899 1900 * Cap to LONG_MAX if time difference > LONG_MAX
+1 -2
block/bfq-iosched.c
··· 5847 5847 goto out; 5848 5848 } 5849 5849 5850 - bfqq = kmem_cache_alloc_node(bfq_pool, 5851 - GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, 5850 + bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO, 5852 5851 bfqd->queue->node); 5853 5852 5854 5853 if (bfqq) {
+3 -3
block/blk-cgroup.c
··· 394 394 395 395 /* allocate */ 396 396 if (!new_blkg) { 397 - new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN); 397 + new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT); 398 398 if (unlikely(!new_blkg)) { 399 399 ret = -ENOMEM; 400 400 goto err_put_css; ··· 1467 1467 1468 1468 spin_lock_init(&blkcg->lock); 1469 1469 refcount_set(&blkcg->online_pin, 1); 1470 - INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1470 + INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT); 1471 1471 INIT_HLIST_HEAD(&blkcg->blkg_list); 1472 1472 #ifdef CONFIG_CGROUP_WRITEBACK 1473 1473 INIT_LIST_HEAD(&blkcg->cgwb_list); ··· 1630 1630 pd_prealloc = NULL; 1631 1631 } else { 1632 1632 pd = pol->pd_alloc_fn(disk, blkg->blkcg, 1633 - GFP_NOWAIT | __GFP_NOWARN); 1633 + GFP_NOWAIT); 1634 1634 } 1635 1635 1636 1636 if (!pd) {
+6 -8
block/blk-sysfs.c
··· 847 847 /* nothing to do here, all data is associated with the parent gendisk */ 848 848 } 849 849 850 - static const struct kobj_type blk_queue_ktype = { 850 + const struct kobj_type blk_queue_ktype = { 851 851 .default_groups = blk_queue_attr_groups, 852 852 .sysfs_ops = &queue_sysfs_ops, 853 853 .release = blk_queue_release, ··· 875 875 struct request_queue *q = disk->queue; 876 876 int ret; 877 877 878 - kobject_init(&disk->queue_kobj, &blk_queue_ktype); 879 878 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); 880 879 if (ret < 0) 881 - goto out_put_queue_kobj; 880 + return ret; 882 881 883 882 if (queue_is_mq(q)) { 884 883 ret = blk_mq_sysfs_register(disk); 885 884 if (ret) 886 - goto out_put_queue_kobj; 885 + goto out_del_queue_kobj; 887 886 } 888 887 mutex_lock(&q->sysfs_lock); 889 888 ··· 902 903 903 904 if (queue_is_mq(q)) 904 905 elevator_set_default(q); 905 - wbt_enable_default(disk); 906 906 907 907 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 908 + wbt_enable_default(disk); 908 909 909 910 /* Now everything is ready and send out KOBJ_ADD uevent */ 910 911 kobject_uevent(&disk->queue_kobj, KOBJ_ADD); ··· 933 934 mutex_unlock(&q->sysfs_lock); 934 935 if (queue_is_mq(q)) 935 936 blk_mq_sysfs_unregister(disk); 936 - out_put_queue_kobj: 937 - kobject_put(&disk->queue_kobj); 937 + out_del_queue_kobj: 938 + kobject_del(&disk->queue_kobj); 938 939 return ret; 939 940 } 940 941 ··· 985 986 elevator_set_none(q); 986 987 987 988 blk_debugfs_remove(disk); 988 - kobject_put(&disk->queue_kobj); 989 989 }
+8 -7
block/blk-wbt.c
··· 85 85 u64 sync_issue; 86 86 void *sync_cookie; 87 87 88 - unsigned long last_issue; /* last non-throttled issue */ 89 - unsigned long last_comp; /* last non-throttled comp */ 88 + unsigned long last_issue; /* issue time of last read rq */ 89 + unsigned long last_comp; /* completion time of last read rq */ 90 90 unsigned long min_lat_nsec; 91 91 struct rq_qos rqos; 92 92 struct rq_wait rq_wait[WBT_NUM_RWQ]; ··· 248 248 struct rq_wb *rwb = RQWB(rqos); 249 249 250 250 if (!wbt_is_tracked(rq)) { 251 - if (rwb->sync_cookie == rq) { 252 - rwb->sync_issue = 0; 253 - rwb->sync_cookie = NULL; 254 - } 251 + if (wbt_is_read(rq)) { 252 + if (rwb->sync_cookie == rq) { 253 + rwb->sync_issue = 0; 254 + rwb->sync_cookie = NULL; 255 + } 255 256 256 - if (wbt_is_read(rq)) 257 257 wb_timestamp(rwb, &rwb->last_comp); 258 + } 258 259 } else { 259 260 WARN_ON_ONCE(rq == rwb->sync_cookie); 260 261 __wbt_done(rqos, wbt_flags(rq));
+1
block/blk.h
··· 29 29 /* Max future timer expiry for timeouts */ 30 30 #define BLK_MAX_TIMEOUT (5 * HZ) 31 31 32 + extern const struct kobj_type blk_queue_ktype; 32 33 extern struct dentry *blk_debugfs_root; 33 34 34 35 struct blk_flush_queue {
+2
block/genhd.c
··· 1303 1303 disk_free_zone_resources(disk); 1304 1304 xa_destroy(&disk->part_tbl); 1305 1305 1306 + kobject_put(&disk->queue_kobj); 1306 1307 disk->queue->disk = NULL; 1307 1308 blk_put_queue(disk->queue); 1308 1309 ··· 1487 1486 INIT_LIST_HEAD(&disk->slave_bdevs); 1488 1487 #endif 1489 1488 mutex_init(&disk->rqos_state_mutex); 1489 + kobject_init(&disk->queue_kobj, &blk_queue_ktype); 1490 1490 return disk; 1491 1491 1492 1492 out_erase_part0:
+7 -3
drivers/acpi/ec.c
··· 2033 2033 goto out; 2034 2034 } 2035 2035 2036 - if (!strstarts(ecdt_ptr->id, "\\")) { 2036 + if (!strlen(ecdt_ptr->id)) { 2037 2037 /* 2038 2038 * The ECDT table on some MSI notebooks contains invalid data, together 2039 2039 * with an empty ID string (""). ··· 2042 2042 * a "fully qualified reference to the (...) embedded controller device", 2043 2043 * so this string always has to start with a backslash. 2044 2044 * 2045 - * By verifying this we can avoid such faulty ECDT tables in a safe way. 2045 + * However some ThinkBook machines have a ECDT table with a valid EC 2046 + * description but an invalid ID string ("_SB.PC00.LPCB.EC0"). 2047 + * 2048 + * Because of this we only check if the ID string is empty in order to 2049 + * avoid the obvious cases. 2046 2050 */ 2047 - pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id); 2051 + pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n"); 2048 2052 goto out; 2049 2053 } 2050 2054
+4 -1
drivers/acpi/processor_perflib.c
··· 180 180 struct acpi_processor *pr = per_cpu(processors, cpu); 181 181 int ret; 182 182 183 - if (!pr || !pr->performance) 183 + if (!pr) 184 184 continue; 185 185 186 186 /* ··· 196 196 if (ret < 0) 197 197 pr_err("Failed to add freq constraint for CPU%d (%d)\n", 198 198 cpu, ret); 199 + 200 + if (!pr->performance) 201 + continue; 199 202 200 203 ret = acpi_processor_get_platform_limit(pr); 201 204 if (ret)
+7 -2
drivers/ata/libata-eh.c
··· 2075 2075 * Check if a link is established. This is a relaxed version of 2076 2076 * ata_phys_link_online() which accounts for the fact that this is potentially 2077 2077 * called after changing the link power management policy, which may not be 2078 - * reflected immediately in the SSTAUS register (e.g., we may still be seeing 2078 + * reflected immediately in the SStatus register (e.g., we may still be seeing 2079 2079 * the PHY in partial, slumber or devsleep Partial power management state. 2080 2080 * So check that: 2081 2081 * - A device is still present, that is, DET is 1h (Device presence detected ··· 2089 2089 u32 sstatus; 2090 2090 u8 det, ipm; 2091 2091 2092 + /* 2093 + * For old IDE/PATA adapters that do not have a valid scr_read method, 2094 + * or if reading the SStatus register fails, assume that the device is 2095 + * present. Device probe will determine if that is really the case. 2096 + */ 2092 2097 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2093 - return false; 2098 + return true; 2094 2099 2095 2100 det = sstatus & 0x0f; 2096 2101 ipm = (sstatus >> 8) & 0x0f;
+3 -8
drivers/ata/libata-scsi.c
··· 3904 3904 /* Check cdl_ctrl */ 3905 3905 switch (buf[0] & 0x03) { 3906 3906 case 0: 3907 - /* Disable CDL if it is enabled */ 3908 - if (!(dev->flags & ATA_DFLAG_CDL_ENABLED)) 3909 - return 0; 3907 + /* Disable CDL */ 3910 3908 ata_dev_dbg(dev, "Disabling CDL\n"); 3911 3909 cdl_action = 0; 3912 3910 dev->flags &= ~ATA_DFLAG_CDL_ENABLED; 3913 3911 break; 3914 3912 case 0x02: 3915 3913 /* 3916 - * Enable CDL if not already enabled. Since this is mutually 3917 - * exclusive with NCQ priority, allow this only if NCQ priority 3918 - * is disabled. 3914 + * Enable CDL. Since CDL is mutually exclusive with NCQ 3915 + * priority, allow this only if NCQ priority is disabled. 3919 3916 */ 3920 - if (dev->flags & ATA_DFLAG_CDL_ENABLED) 3921 - return 0; 3922 3917 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { 3923 3918 ata_dev_err(dev, 3924 3919 "NCQ priority must be disabled to enable CDL\n");
+6 -33
drivers/block/drbd/drbd_int.h
··· 380 380 /* this is/was a write request */ 381 381 __EE_WRITE, 382 382 383 + /* hand back using mempool_free(e, drbd_buffer_page_pool) */ 384 + __EE_RELEASE_TO_MEMPOOL, 385 + 383 386 /* this is/was a write same request */ 384 387 __EE_WRITE_SAME, 385 388 ··· 405 402 #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) 406 403 #define EE_SUBMITTED (1<<__EE_SUBMITTED) 407 404 #define EE_WRITE (1<<__EE_WRITE) 405 + #define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL) 408 406 #define EE_WRITE_SAME (1<<__EE_WRITE_SAME) 409 407 #define EE_APPLICATION (1<<__EE_APPLICATION) 410 408 #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ) ··· 862 858 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ 863 859 struct list_head done_ee; /* need to send P_WRITE_ACK */ 864 860 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ 865 - struct list_head net_ee; /* zero-copy network send in progress */ 866 861 867 862 struct list_head resync_reads; 868 863 atomic_t pp_in_use; /* allocated from page pool */ ··· 1332 1329 extern mempool_t drbd_request_mempool; 1333 1330 extern mempool_t drbd_ee_mempool; 1334 1331 1335 - /* drbd's page pool, used to buffer data received from the peer, 1336 - * or data requested by the peer. 1337 - * 1338 - * This does not have an emergency reserve. 1339 - * 1340 - * When allocating from this pool, it first takes pages from the pool. 1341 - * Only if the pool is depleted will try to allocate from the system. 1342 - * 1343 - * The assumption is that pages taken from this pool will be processed, 1344 - * and given back, "quickly", and then can be recycled, so we can avoid 1345 - * frequent calls to alloc_page(), and still will be able to make progress even 1346 - * under memory pressure. 1347 - */ 1348 - extern struct page *drbd_pp_pool; 1349 - extern spinlock_t drbd_pp_lock; 1350 - extern int drbd_pp_vacant; 1351 - extern wait_queue_head_t drbd_pp_wait; 1352 - 1353 1332 /* We also need a standard (emergency-reserve backed) page pool 1354 1333 * for meta data IO (activity log, bitmap). 1355 1334 * We can keep it global, as long as it is used as "N pages at a time". ··· 1339 1354 */ 1340 1355 #define DRBD_MIN_POOL_PAGES 128 1341 1356 extern mempool_t drbd_md_io_page_pool; 1357 + extern mempool_t drbd_buffer_page_pool; 1342 1358 1343 1359 /* We also need to make sure we get a bio 1344 1360 * when we need it for housekeeping purposes */ ··· 1474 1488 sector_t, unsigned int, 1475 1489 unsigned int, 1476 1490 gfp_t) __must_hold(local); 1477 - extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, 1478 - int); 1479 - #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) 1480 - #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) 1491 + extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req); 1481 1492 extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool); 1482 1493 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); 1483 1494 extern int drbd_connected(struct drbd_peer_device *); ··· 1592 1609 #define page_chain_for_each_safe(page, n) \ 1593 1610 for (; page && ({ n = page_chain_next(page); 1; }); page = n) 1594 1611 1595 - 1596 - static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req) 1597 - { 1598 - struct page *page = peer_req->pages; 1599 - page_chain_for_each(page) { 1600 - if (page_count(page) > 1) 1601 - return 1; 1602 - } 1603 - return 0; 1604 - } 1605 1612 1606 1613 static inline union drbd_state drbd_read_state(struct drbd_device *device) 1607 1614 {
+15 -44
drivers/block/drbd/drbd_main.c
··· 114 114 mempool_t drbd_request_mempool; 115 115 mempool_t drbd_ee_mempool; 116 116 mempool_t drbd_md_io_page_pool; 117 + mempool_t drbd_buffer_page_pool; 117 118 struct bio_set drbd_md_io_bio_set; 118 119 struct bio_set drbd_io_bio_set; 119 - 120 - /* I do not use a standard mempool, because: 121 - 1) I want to hand out the pre-allocated objects first. 122 - 2) I want to be able to interrupt sleeping allocation with a signal. 123 - Note: This is a single linked list, the next pointer is the private 124 - member of struct page. 125 - */ 126 - struct page *drbd_pp_pool; 127 - DEFINE_SPINLOCK(drbd_pp_lock); 128 - int drbd_pp_vacant; 129 - wait_queue_head_t drbd_pp_wait; 130 120 131 121 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); 132 122 ··· 1601 1611 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, 1602 1612 struct drbd_peer_request *peer_req) 1603 1613 { 1614 + bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL); 1604 1615 struct page *page = peer_req->pages; 1605 1616 unsigned len = peer_req->i.size; 1606 1617 int err; ··· 1610 1619 page_chain_for_each(page) { 1611 1620 unsigned l = min_t(unsigned, len, PAGE_SIZE); 1612 1621 1613 - err = _drbd_send_page(peer_device, page, 0, l, 1614 - page_chain_next(page) ? MSG_MORE : 0); 1622 + if (likely(use_sendpage)) 1623 + err = _drbd_send_page(peer_device, page, 0, l, 1624 + page_chain_next(page) ? MSG_MORE : 0); 1625 + else 1626 + err = _drbd_no_send_page(peer_device, page, 0, l, 1627 + page_chain_next(page) ? MSG_MORE : 0); 1628 + 1615 1629 if (err) 1616 1630 return err; 1617 1631 len -= l; ··· 1958 1962 INIT_LIST_HEAD(&device->sync_ee); 1959 1963 INIT_LIST_HEAD(&device->done_ee); 1960 1964 INIT_LIST_HEAD(&device->read_ee); 1961 - INIT_LIST_HEAD(&device->net_ee); 1962 1965 INIT_LIST_HEAD(&device->resync_reads); 1963 1966 INIT_LIST_HEAD(&device->resync_work.list); 1964 1967 INIT_LIST_HEAD(&device->unplug_work.list); ··· 2038 2043 D_ASSERT(device, list_empty(&device->sync_ee)); 2039 2044 D_ASSERT(device, list_empty(&device->done_ee)); 2040 2045 D_ASSERT(device, list_empty(&device->read_ee)); 2041 - D_ASSERT(device, list_empty(&device->net_ee)); 2042 2046 D_ASSERT(device, list_empty(&device->resync_reads)); 2043 2047 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); 2044 2048 D_ASSERT(device, list_empty(&device->resync_work.list)); ··· 2049 2055 2050 2056 static void drbd_destroy_mempools(void) 2051 2057 { 2052 - struct page *page; 2053 - 2054 - while (drbd_pp_pool) { 2055 - page = drbd_pp_pool; 2056 - drbd_pp_pool = (struct page *)page_private(page); 2057 - __free_page(page); 2058 - drbd_pp_vacant--; 2059 - } 2060 - 2061 2058 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */ 2062 2059 2063 2060 bioset_exit(&drbd_io_bio_set); 2064 2061 bioset_exit(&drbd_md_io_bio_set); 2062 + mempool_exit(&drbd_buffer_page_pool); 2065 2063 mempool_exit(&drbd_md_io_page_pool); 2066 2064 mempool_exit(&drbd_ee_mempool); 2067 2065 mempool_exit(&drbd_request_mempool); ··· 2072 2086 2073 2087 static int drbd_create_mempools(void) 2074 2088 { 2075 - struct page *page; 2076 2089 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count; 2077 - int i, ret; 2090 + int ret; 2078 2091 2079 2092 /* caches */ 2080 2093 drbd_request_cache = kmem_cache_create( ··· 2110 2125 if (ret) 2111 2126 goto Enomem; 2112 2127 2128 + ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0); 2129 + if (ret) 2130 + goto Enomem; 2131 + 2113 2132 ret = mempool_init_slab_pool(&drbd_request_mempool, number, 2114 2133 drbd_request_cache); 2115 2134 if (ret) ··· 2122 2133 ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache); 2123 2134 if (ret) 2124 2135 goto Enomem; 2125 - 2126 - for (i = 0; i < number; i++) { 2127 - page = alloc_page(GFP_HIGHUSER); 2128 - if (!page) 2129 - goto Enomem; 2130 - set_page_private(page, (unsigned long)drbd_pp_pool); 2131 - drbd_pp_pool = page; 2132 - } 2133 - drbd_pp_vacant = number; 2134 2136 2135 2137 return 0; 2136 2138 ··· 2149 2169 rr = drbd_free_peer_reqs(device, &device->done_ee); 2150 2170 if (rr) 2151 2171 drbd_err(device, "%d EEs in done list found!\n", rr); 2152 - 2153 - rr = drbd_free_peer_reqs(device, &device->net_ee); 2154 - if (rr) 2155 - drbd_err(device, "%d EEs in net list found!\n", rr); 2156 2172 } 2157 2173 2158 2174 /* caution. no locking. */ ··· 2838 2862 DRBD_MAJOR); 2839 2863 return err; 2840 2864 } 2841 - 2842 - /* 2843 - * allocate all necessary structs 2844 - */ 2845 - init_waitqueue_head(&drbd_pp_wait); 2846 2865 2847 2866 drbd_proc = NULL; /* play safe for drbd_cleanup */ 2848 2867 idr_init(&drbd_devices);
+31 -231
drivers/block/drbd/drbd_receiver.c
··· 33 33 #include <linux/string.h> 34 34 #include <linux/scatterlist.h> 35 35 #include <linux/part_stat.h> 36 + #include <linux/mempool.h> 36 37 #include "drbd_int.h" 37 38 #include "drbd_protocol.h" 38 39 #include "drbd_req.h" ··· 64 63 65 64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 66 65 67 - /* 68 - * some helper functions to deal with single linked page lists, 69 - * page->private being our "next" pointer. 70 - */ 71 - 72 - /* If at least n pages are linked at head, get n pages off. 73 - * Otherwise, don't modify head, and return NULL. 74 - * Locking is the responsibility of the caller. 75 - */ 76 - static struct page *page_chain_del(struct page **head, int n) 77 - { 78 - struct page *page; 79 - struct page *tmp; 80 - 81 - BUG_ON(!n); 82 - BUG_ON(!head); 83 - 84 - page = *head; 85 - 86 - if (!page) 87 - return NULL; 88 - 89 - while (page) { 90 - tmp = page_chain_next(page); 91 - if (--n == 0) 92 - break; /* found sufficient pages */ 93 - if (tmp == NULL) 94 - /* insufficient pages, don't use any of them. */ 95 - return NULL; 96 - page = tmp; 97 - } 98 - 99 - /* add end of list marker for the returned list */ 100 - set_page_private(page, 0); 101 - /* actual return value, and adjustment of head */ 102 - page = *head; 103 - *head = tmp; 104 - return page; 105 - } 106 - 107 - /* may be used outside of locks to find the tail of a (usually short) 108 - * "private" page chain, before adding it back to a global chain head 109 - * with page_chain_add() under a spinlock. */ 110 - static struct page *page_chain_tail(struct page *page, int *len) 111 - { 112 - struct page *tmp; 113 - int i = 1; 114 - while ((tmp = page_chain_next(page))) { 115 - ++i; 116 - page = tmp; 117 - } 118 - if (len) 119 - *len = i; 120 - return page; 121 - } 122 - 123 - static int page_chain_free(struct page *page) 124 - { 125 - struct page *tmp; 126 - int i = 0; 127 - page_chain_for_each_safe(page, tmp) { 128 - put_page(page); 129 - ++i; 130 - } 131 - return i; 132 - } 133 - 134 - static void page_chain_add(struct page **head, 135 - struct page *chain_first, struct page *chain_last) 136 - { 137 - #if 1 138 - struct page *tmp; 139 - tmp = page_chain_tail(chain_first, NULL); 140 - BUG_ON(tmp != chain_last); 141 - #endif 142 - 143 - /* add chain to head */ 144 - set_page_private(chain_last, (unsigned long)*head); 145 - *head = chain_first; 146 - } 147 - 148 - static struct page *__drbd_alloc_pages(struct drbd_device *device, 149 - unsigned int number) 66 + static struct page *__drbd_alloc_pages(unsigned int number) 150 67 { 151 68 struct page *page = NULL; 152 69 struct page *tmp = NULL; 153 70 unsigned int i = 0; 154 71 155 - /* Yes, testing drbd_pp_vacant outside the lock is racy. 156 - * So what. It saves a spin_lock. */ 157 - if (drbd_pp_vacant >= number) { 158 - spin_lock(&drbd_pp_lock); 159 - page = page_chain_del(&drbd_pp_pool, number); 160 - if (page) 161 - drbd_pp_vacant -= number; 162 - spin_unlock(&drbd_pp_lock); 163 - if (page) 164 - return page; 165 - } 166 - 167 72 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD 168 73 * "criss-cross" setup, that might cause write-out on some other DRBD, 169 74 * which in turn might block on the other node at this very place. */ 170 75 for (i = 0; i < number; i++) { 171 - tmp = alloc_page(GFP_TRY); 76 + tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY); 172 77 if (!tmp) 173 - break; 78 + goto fail; 174 79 set_page_private(tmp, (unsigned long)page); 175 80 page = tmp; 176 81 } 177 - 178 - if (i == number) 179 - return page; 180 - 181 - /* Not enough pages immediately available this time. 182 - * No need to jump around here, drbd_alloc_pages will retry this 183 - * function "soon". */ 184 - if (page) { 185 - tmp = page_chain_tail(page, NULL); 186 - spin_lock(&drbd_pp_lock); 187 - page_chain_add(&drbd_pp_pool, page, tmp); 188 - drbd_pp_vacant += i; 189 - spin_unlock(&drbd_pp_lock); 82 + return page; 83 + fail: 84 + page_chain_for_each_safe(page, tmp) { 85 + set_page_private(page, 0); 86 + mempool_free(page, &drbd_buffer_page_pool); 190 87 } 191 88 return NULL; 192 - } 193 - 194 - static void reclaim_finished_net_peer_reqs(struct drbd_device *device, 195 - struct list_head *to_be_freed) 196 - { 197 - struct drbd_peer_request *peer_req, *tmp; 198 - 199 - /* The EEs are always appended to the end of the list. Since 200 - they are sent in order over the wire, they have to finish 201 - in order. As soon as we see the first not finished we can 202 - stop to examine the list... */ 203 - 204 - list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { 205 - if (drbd_peer_req_has_active_page(peer_req)) 206 - break; 207 - list_move(&peer_req->w.list, to_be_freed); 208 - } 209 - } 210 - 211 - static void drbd_reclaim_net_peer_reqs(struct drbd_device *device) 212 - { 213 - LIST_HEAD(reclaimed); 214 - struct drbd_peer_request *peer_req, *t; 215 - 216 - spin_lock_irq(&device->resource->req_lock); 217 - reclaim_finished_net_peer_reqs(device, &reclaimed); 218 - spin_unlock_irq(&device->resource->req_lock); 219 - list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 220 - drbd_free_net_peer_req(device, peer_req); 221 - } 222 - 223 - static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection) 224 - { 225 - struct drbd_peer_device *peer_device; 226 - int vnr; 227 - 228 - rcu_read_lock(); 229 - idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 230 - struct drbd_device *device = peer_device->device; 231 - if (!atomic_read(&device->pp_in_use_by_net)) 232 - continue; 233 - 234 - kref_get(&device->kref); 235 - rcu_read_unlock(); 236 - drbd_reclaim_net_peer_reqs(device); 237 - kref_put(&device->kref, drbd_destroy_device); 238 - rcu_read_lock(); 239 - } 240 - rcu_read_unlock(); 241 89 } 242 90 243 91 /** ··· 113 263 bool retry) 114 264 { 115 265 struct drbd_device *device = peer_device->device; 116 - struct page *page = NULL; 266 + struct page *page; 117 267 struct net_conf *nc; 118 - DEFINE_WAIT(wait); 119 268 unsigned int mxb; 120 269 121 270 rcu_read_lock(); ··· 122 273 mxb = nc ? nc->max_buffers : 1000000; 123 274 rcu_read_unlock(); 124 275 125 - if (atomic_read(&device->pp_in_use) < mxb) 126 - page = __drbd_alloc_pages(device, number); 127 - 128 - /* Try to keep the fast path fast, but occasionally we need 129 - * to reclaim the pages we lended to the network stack. */ 130 - if (page && atomic_read(&device->pp_in_use_by_net) > 512) 131 - drbd_reclaim_net_peer_reqs(device); 132 - 133 - while (page == NULL) { 134 - prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); 135 - 136 - drbd_reclaim_net_peer_reqs(device); 137 - 138 - if (atomic_read(&device->pp_in_use) < mxb) { 139 - page = __drbd_alloc_pages(device, number); 140 - if (page) 141 - break; 142 - } 143 - 144 - if (!retry) 145 - break; 146 - 147 - if (signal_pending(current)) { 148 - drbd_warn(device, "drbd_alloc_pages interrupted!\n"); 149 - break; 150 - } 151 - 152 - if (schedule_timeout(HZ/10) == 0) 153 - mxb = UINT_MAX; 154 - } 155 - finish_wait(&drbd_pp_wait, &wait); 276 + if (atomic_read(&device->pp_in_use) >= mxb) 277 + schedule_timeout_interruptible(HZ / 10); 278 + page = __drbd_alloc_pages(number); 156 279 157 280 if (page) 158 281 atomic_add(number, &device->pp_in_use); ··· 135 314 * Is also used from inside an other spin_lock_irq(&resource->req_lock); 136 315 * Either links the page chain back to the global pool, 137 316 * or returns all pages to the system. */ 138 - static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) 317 + static void drbd_free_pages(struct drbd_device *device, struct page *page) 139 318 { 140 - atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use; 141 - int i; 319 + struct page *tmp; 320 + int i = 0; 142 321 143 322 if (page == NULL) 144 323 return; 145 324 146 - if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count) 147 - i = page_chain_free(page); 148 - else { 149 - struct page *tmp; 150 - tmp = page_chain_tail(page, &i); 151 - spin_lock(&drbd_pp_lock); 152 - page_chain_add(&drbd_pp_pool, page, tmp); 153 - drbd_pp_vacant += i; 154 - spin_unlock(&drbd_pp_lock); 325 + page_chain_for_each_safe(page, tmp) { 326 + set_page_private(page, 0); 327 + if (page_count(page) == 1) 328 + mempool_free(page, &drbd_buffer_page_pool); 329 + else 330 + put_page(page); 331 + i++; 155 332 } 156 - i = atomic_sub_return(i, a); 333 + i = atomic_sub_return(i, &device->pp_in_use); 157 334 if (i < 0) 158 - drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n", 159 - is_net ? "pp_in_use_by_net" : "pp_in_use", i); 160 - wake_up(&drbd_pp_wait); 335 + drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); 161 336 } 162 337 163 338 /* ··· 197 380 gfpflags_allow_blocking(gfp_mask)); 198 381 if (!page) 199 382 goto fail; 383 + if (!mempool_is_saturated(&drbd_buffer_page_pool)) 384 + peer_req->flags |= EE_RELEASE_TO_MEMPOOL; 200 385 } 201 386 202 387 memset(peer_req, 0, sizeof(*peer_req)); ··· 222 403 return NULL; 223 404 } 224 405 225 - void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, 226 - int is_net) 406 + void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req) 227 407 { 228 408 might_sleep(); 229 409 if (peer_req->flags & EE_HAS_DIGEST) 230 410 kfree(peer_req->digest); 231 - drbd_free_pages(device, peer_req->pages, is_net); 411 + drbd_free_pages(device, peer_req->pages); 232 412 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); 233 413 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 234 414 if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { ··· 242 424 LIST_HEAD(work_list); 243 425 struct drbd_peer_request *peer_req, *t; 244 426 int count = 0; 245 - int is_net = list == &device->net_ee; 246 427 247 428 spin_lock_irq(&device->resource->req_lock); 248 429 list_splice_init(list, &work_list); 249 430 spin_unlock_irq(&device->resource->req_lock); 250 431 251 432 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 252 - __drbd_free_peer_req(device, peer_req, is_net); 433 + drbd_free_peer_req(device, peer_req); 253 434 count++; 254 435 } 255 436 return count; ··· 260 443 static int drbd_finish_peer_reqs(struct drbd_device *device) 261 444 { 262 445 LIST_HEAD(work_list); 263 - LIST_HEAD(reclaimed); 264 446 struct drbd_peer_request *peer_req, *t; 265 447 int err = 0; 266 448 267 449 spin_lock_irq(&device->resource->req_lock); 268 - reclaim_finished_net_peer_reqs(device, &reclaimed); 269 450 list_splice_init(&device->done_ee, &work_list); 270 451 spin_unlock_irq(&device->resource->req_lock); 271 - 272 - list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 273 - drbd_free_net_peer_req(device, peer_req); 274 452 275 453 /* possible callbacks here: 276 454 * e_end_block, and e_end_resync_block, e_send_superseded. ··· 1787 1975 data_size -= len; 1788 1976 } 1789 1977 kunmap(page); 1790 - drbd_free_pages(peer_device->device, page, 0); 1978 + drbd_free_pages(peer_device->device, page); 1791 1979 return err; 1792 1980 } 1793 1981 ··· 5036 5224 put_ldev(device); 5037 5225 } 5038 5226 5039 - /* tcp_close and release of sendpage pages can be deferred. I don't 5040 - * want to use SO_LINGER, because apparently it can be deferred for 5041 - * more than 20 seconds (longest time I checked). 5042 - * 5043 - * Actually we don't care for exactly when the network stack does its 5044 - * put_page(), but release our reference on these pages right here. 5045 - */ 5046 - i = drbd_free_peer_reqs(device, &device->net_ee); 5047 - if (i) 5048 - drbd_info(device, "net_ee not empty, killed %u entries\n", i); 5049 5227 i = atomic_read(&device->pp_in_use_by_net); 5050 5228 if (i) 5051 5229 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i); ··· 5781 5979 5782 5980 while (get_t_state(thi) == RUNNING) { 5783 5981 drbd_thread_current_set_cpu(thi); 5784 - 5785 - conn_reclaim_net_peer_reqs(connection); 5786 5982 5787 5983 if (test_and_clear_bit(SEND_PING, &connection->flags)) { 5788 5984 if (drbd_send_ping(connection)) {
+18 -38
drivers/block/drbd/drbd_worker.c
··· 1030 1030 return 1; 1031 1031 } 1032 1032 1033 - /* helper */ 1034 - static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) 1035 - { 1036 - if (drbd_peer_req_has_active_page(peer_req)) { 1037 - /* This might happen if sendpage() has not finished */ 1038 - int i = PFN_UP(peer_req->i.size); 1039 - atomic_add(i, &device->pp_in_use_by_net); 1040 - atomic_sub(i, &device->pp_in_use); 1041 - spin_lock_irq(&device->resource->req_lock); 1042 - list_add_tail(&peer_req->w.list, &device->net_ee); 1043 - spin_unlock_irq(&device->resource->req_lock); 1044 - wake_up(&drbd_pp_wait); 1045 - } else 1046 - drbd_free_peer_req(device, peer_req); 1047 - } 1048 - 1049 1033 /** 1050 1034 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST 1051 1035 * @w: work object. ··· 1043 1059 int err; 1044 1060 1045 1061 if (unlikely(cancel)) { 1046 - drbd_free_peer_req(device, peer_req); 1047 - dec_unacked(device); 1048 - return 0; 1062 + err = 0; 1063 + goto out; 1049 1064 } 1050 1065 1051 1066 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { ··· 1057 1074 err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req); 1058 1075 } 1059 1076 1060 - dec_unacked(device); 1061 - 1062 - move_to_net_ee_or_free(device, peer_req); 1063 - 1064 1077 if (unlikely(err)) 1065 1078 drbd_err(device, "drbd_send_block() failed\n"); 1079 + out: 1080 + dec_unacked(device); 1081 + drbd_free_peer_req(device, peer_req); 1082 + 1066 1083 return err; 1067 1084 } 1068 1085 ··· 1103 1120 int err; 1104 1121 1105 1122 if (unlikely(cancel)) { 1106 - drbd_free_peer_req(device, peer_req); 1107 - dec_unacked(device); 1108 - return 0; 1123 + err = 0; 1124 + goto out; 1109 1125 } 1110 1126 1111 1127 if (get_ldev_if_state(device, D_FAILED)) { ··· 1137 1155 /* update resync data with failure */ 1138 1156 drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size); 1139 1157 } 1140 - 1141 - dec_unacked(device); 1142 - 1143 - move_to_net_ee_or_free(device, peer_req); 1144 - 1145 1158 if (unlikely(err)) 1146 1159 drbd_err(device, "drbd_send_block() failed\n"); 1160 + out: 1161 + dec_unacked(device); 1162 + drbd_free_peer_req(device, peer_req); 1163 + 1147 1164 return err; 1148 1165 } 1149 1166 ··· 1157 1176 int err, eq = 0; 1158 1177 1159 1178 if (unlikely(cancel)) { 1160 - drbd_free_peer_req(device, peer_req); 1161 - dec_unacked(device); 1162 - return 0; 1179 + err = 0; 1180 + goto out; 1163 1181 } 1164 1182 1165 1183 if (get_ldev(device)) { ··· 1200 1220 if (drbd_ratelimit()) 1201 1221 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n"); 1202 1222 } 1203 - 1204 - dec_unacked(device); 1205 - move_to_net_ee_or_free(device, peer_req); 1206 - 1207 1223 if (unlikely(err)) 1208 1224 drbd_err(device, "drbd_send_block/ack() failed\n"); 1225 + out: 1226 + dec_unacked(device); 1227 + drbd_free_peer_req(device, peer_req); 1228 + 1209 1229 return err; 1210 1230 } 1211 1231
+12 -16
drivers/block/ublk_drv.c
··· 235 235 236 236 struct completion completion; 237 237 unsigned int nr_queues_ready; 238 - unsigned int nr_privileged_daemon; 238 + bool unprivileged_daemons; 239 239 struct mutex cancel_mutex; 240 240 bool canceling; 241 241 pid_t ublksrv_tgid; ··· 1389 1389 { 1390 1390 blk_status_t res; 1391 1391 1392 - if (unlikely(ubq->fail_io)) 1392 + if (unlikely(READ_ONCE(ubq->fail_io))) 1393 1393 return BLK_STS_TARGET; 1394 1394 1395 1395 /* With recovery feature enabled, force_abort is set in ··· 1401 1401 * Note: force_abort is guaranteed to be seen because it is set 1402 1402 * before request queue is unqiuesced. 1403 1403 */ 1404 - if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort)) 1404 + if (ublk_nosrv_should_queue_io(ubq) && 1405 + unlikely(READ_ONCE(ubq->force_abort))) 1405 1406 return BLK_STS_IOERR; 1406 1407 1407 1408 if (check_cancel && unlikely(ubq->canceling)) ··· 1551 1550 /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */ 1552 1551 ub->mm = NULL; 1553 1552 ub->nr_queues_ready = 0; 1554 - ub->nr_privileged_daemon = 0; 1553 + ub->unprivileged_daemons = false; 1555 1554 ub->ublksrv_tgid = -1; 1556 1555 } 1557 1556 ··· 1645 1644 * Transition the device to the nosrv state. What exactly this 1646 1645 * means depends on the recovery flags 1647 1646 */ 1648 - blk_mq_quiesce_queue(disk->queue); 1649 1647 if (ublk_nosrv_should_stop_dev(ub)) { 1650 1648 /* 1651 1649 * Allow any pending/future I/O to pass through quickly ··· 1652 1652 * waits for all pending I/O to complete 1653 1653 */ 1654 1654 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1655 - ublk_get_queue(ub, i)->force_abort = true; 1656 - blk_mq_unquiesce_queue(disk->queue); 1655 + WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true); 1657 1656 1658 1657 ublk_stop_dev_unlocked(ub); 1659 1658 } else { ··· 1662 1663 } else { 1663 1664 ub->dev_info.state = UBLK_S_DEV_FAIL_IO; 1664 1665 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1665 - ublk_get_queue(ub, i)->fail_io = true; 1666 + WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true); 1666 1667 } 1667 - blk_mq_unquiesce_queue(disk->queue); 1668 1668 } 1669 1669 unlock: 1670 1670 mutex_unlock(&ub->mutex); ··· 1978 1980 __must_hold(&ub->mutex) 1979 1981 { 1980 1982 ubq->nr_io_ready++; 1981 - if (ublk_queue_ready(ubq)) { 1983 + if (ublk_queue_ready(ubq)) 1982 1984 ub->nr_queues_ready++; 1983 - 1984 - if (capable(CAP_SYS_ADMIN)) 1985 - ub->nr_privileged_daemon++; 1986 - } 1985 + if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN)) 1986 + ub->unprivileged_daemons = true; 1987 1987 1988 1988 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) { 1989 1989 /* now we are ready for handling ublk io request */ ··· 2876 2880 2877 2881 ublk_apply_params(ub); 2878 2882 2879 - /* don't probe partitions if any one ubq daemon is un-trusted */ 2880 - if (ub->nr_privileged_daemon != ub->nr_queues_ready) 2883 + /* don't probe partitions if any daemon task is un-trusted */ 2884 + if (ub->unprivileged_daemons) 2881 2885 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 2882 2886 2883 2887 ublk_get_device(ub);
+1 -6
drivers/bluetooth/btmtk.c
··· 642 642 * WMT command. 643 643 */ 644 644 err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT, 645 - TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); 646 - if (err == -EINTR) { 647 - bt_dev_err(hdev, "Execution of wmt command interrupted"); 648 - clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); 649 - goto err_free_wc; 650 - } 645 + TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT); 651 646 652 647 if (err) { 653 648 bt_dev_err(hdev, "Execution of wmt command timed out");
+4 -4
drivers/bluetooth/btnxpuart.c
··· 543 543 } 544 544 545 545 if (psdata->wakeup_source) { 546 - ret = devm_request_irq(&serdev->dev, psdata->irq_handler, 547 - ps_host_wakeup_irq_handler, 548 - IRQF_ONESHOT | IRQF_TRIGGER_FALLING, 549 - dev_name(&serdev->dev), nxpdev); 546 + ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler, 547 + NULL, ps_host_wakeup_irq_handler, 548 + IRQF_ONESHOT, 549 + dev_name(&serdev->dev), nxpdev); 550 550 if (ret) 551 551 bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n"); 552 552 disable_irq(psdata->irq_handler);
+1
drivers/cpufreq/intel_pstate.c
··· 2793 2793 X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs), 2794 2794 X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs), 2795 2795 X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs), 2796 + X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs), 2796 2797 {} 2797 2798 }; 2798 2799 #endif
+17 -4
drivers/cpuidle/governors/menu.c
··· 97 97 98 98 static DEFINE_PER_CPU(struct menu_device, menu_devices); 99 99 100 + static void menu_update_intervals(struct menu_device *data, unsigned int interval_us) 101 + { 102 + /* Update the repeating-pattern data. */ 103 + data->intervals[data->interval_ptr++] = interval_us; 104 + if (data->interval_ptr >= INTERVALS) 105 + data->interval_ptr = 0; 106 + } 107 + 100 108 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); 101 109 102 110 /* ··· 230 222 if (data->needs_update) { 231 223 menu_update(drv, dev); 232 224 data->needs_update = 0; 225 + } else if (!dev->last_residency_ns) { 226 + /* 227 + * This happens when the driver rejects the previously selected 228 + * idle state and returns an error, so update the recent 229 + * intervals table to prevent invalid information from being 230 + * used going forward. 231 + */ 232 + menu_update_intervals(data, UINT_MAX); 233 233 } 234 234 235 235 /* Find the shortest expected idle interval. */ ··· 498 482 499 483 data->correction_factor[data->bucket] = new_factor; 500 484 501 - /* update the repeating-pattern data */ 502 - data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); 503 - if (data->interval_ptr >= INTERVALS) 504 - data->interval_ptr = 0; 485 + menu_update_intervals(data, ktime_to_us(measured_ns)); 505 486 } 506 487 507 488 /**
+81 -10
drivers/firewire/core-transaction.c
··· 550 550 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; 551 551 #endif /* 0 */ 552 552 553 + static void complete_address_handler(struct kref *kref) 554 + { 555 + struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref); 556 + 557 + complete(&handler->done); 558 + } 559 + 560 + static void get_address_handler(struct fw_address_handler *handler) 561 + { 562 + kref_get(&handler->kref); 563 + } 564 + 565 + static int put_address_handler(struct fw_address_handler *handler) 566 + { 567 + return kref_put(&handler->kref, complete_address_handler); 568 + } 569 + 553 570 /** 554 571 * fw_core_add_address_handler() - register for incoming requests 555 572 * @handler: callback ··· 613 596 if (other != NULL) { 614 597 handler->offset += other->length; 615 598 } else { 599 + init_completion(&handler->done); 600 + kref_init(&handler->kref); 616 601 list_add_tail_rcu(&handler->link, &address_handler_list); 617 602 ret = 0; 618 603 break; ··· 640 621 list_del_rcu(&handler->link); 641 622 642 623 synchronize_rcu(); 624 + 625 + if (!put_address_handler(handler)) 626 + wait_for_completion(&handler->done); 643 627 } 644 628 EXPORT_SYMBOL(fw_core_remove_address_handler); 645 629 ··· 936 914 handler = lookup_enclosing_address_handler(&address_handler_list, offset, 937 915 request->length); 938 916 if (handler) 939 - handler->address_callback(card, request, tcode, destination, source, 940 - p->generation, offset, request->data, 941 - request->length, handler->callback_data); 917 + get_address_handler(handler); 942 918 } 943 919 944 - if (!handler) 920 + if (!handler) { 945 921 fw_send_response(card, request, RCODE_ADDRESS_ERROR); 922 + return; 923 + } 924 + 925 + // Outside the RCU read-side critical section. Without spinlock. With reference count. 926 + handler->address_callback(card, request, tcode, destination, source, p->generation, offset, 927 + request->data, request->length, handler->callback_data); 928 + put_address_handler(handler); 946 929 } 930 + 931 + // To use kmalloc allocator efficiently, this should be power of two. 932 + #define BUFFER_ON_KERNEL_STACK_SIZE 4 947 933 948 934 static void handle_fcp_region_request(struct fw_card *card, 949 935 struct fw_packet *p, 950 936 struct fw_request *request, 951 937 unsigned long long offset) 952 938 { 953 - struct fw_address_handler *handler; 954 - int tcode, destination, source; 939 + struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE]; 940 + struct fw_address_handler *handler, **handlers; 941 + int tcode, destination, source, i, count, buffer_size; 955 942 956 943 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && 957 944 offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || ··· 981 950 return; 982 951 } 983 952 953 + count = 0; 954 + handlers = buffer_on_kernel_stack; 955 + buffer_size = ARRAY_SIZE(buffer_on_kernel_stack); 984 956 scoped_guard(rcu) { 985 957 list_for_each_entry_rcu(handler, &address_handler_list, link) { 986 - if (is_enclosing_handler(handler, offset, request->length)) 987 - handler->address_callback(card, request, tcode, destination, source, 988 - p->generation, offset, request->data, 989 - request->length, handler->callback_data); 958 + if (is_enclosing_handler(handler, offset, request->length)) { 959 + if (count >= buffer_size) { 960 + int next_size = buffer_size * 2; 961 + struct fw_address_handler **buffer_on_kernel_heap; 962 + 963 + if (handlers == buffer_on_kernel_stack) 964 + buffer_on_kernel_heap = NULL; 965 + else 966 + buffer_on_kernel_heap = handlers; 967 + 968 + buffer_on_kernel_heap = 969 + krealloc_array(buffer_on_kernel_heap, next_size, 970 + sizeof(*buffer_on_kernel_heap), GFP_ATOMIC); 971 + // FCP is used for purposes unrelated to significant system 972 + // resources (e.g. storage or networking), so allocation 973 + // failures are not considered so critical. 974 + if (!buffer_on_kernel_heap) 975 + break; 976 + 977 + if (handlers == buffer_on_kernel_stack) { 978 + memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack, 979 + sizeof(buffer_on_kernel_stack)); 980 + } 981 + 982 + handlers = buffer_on_kernel_heap; 983 + buffer_size = next_size; 984 + } 985 + get_address_handler(handler); 986 + handlers[count++] = handler; 987 + } 990 988 } 991 989 } 990 + 991 + for (i = 0; i < count; ++i) { 992 + handler = handlers[i]; 993 + handler->address_callback(card, request, tcode, destination, source, 994 + p->generation, offset, request->data, 995 + request->length, handler->callback_data); 996 + put_address_handler(handler); 997 + } 998 + 999 + if (handlers != buffer_on_kernel_stack) 1000 + kfree(handlers); 992 1001 993 1002 fw_send_response(card, request, RCODE_COMPLETE); 994 1003 }
+18 -34
drivers/gpio/gpio-mlxbf3.c
··· 190 190 struct mlxbf3_gpio_context *gs; 191 191 struct gpio_irq_chip *girq; 192 192 struct gpio_chip *gc; 193 - char *colon_ptr; 194 193 int ret, irq; 195 - long num; 196 194 197 195 gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); 198 196 if (!gs) ··· 227 229 gc->owner = THIS_MODULE; 228 230 gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges; 229 231 230 - colon_ptr = strchr(dev_name(dev), ':'); 231 - if (!colon_ptr) { 232 - dev_err(dev, "invalid device name format\n"); 233 - return -EINVAL; 234 - } 232 + irq = platform_get_irq_optional(pdev, 0); 233 + if (irq >= 0) { 234 + girq = &gs->gc.irq; 235 + gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); 236 + girq->default_type = IRQ_TYPE_NONE; 237 + /* This will let us handle the parent IRQ in the driver */ 238 + girq->num_parents = 0; 239 + girq->parents = NULL; 240 + girq->parent_handler = NULL; 241 + girq->handler = handle_bad_irq; 235 242 236 - ret = kstrtol(++colon_ptr, 16, &num); 237 - if (ret) { 238 - dev_err(dev, "invalid device instance\n"); 239 - return ret; 240 - } 241 - 242 - if (!num) { 243 - irq = platform_get_irq(pdev, 0); 244 - if (irq >= 0) { 245 - girq = &gs->gc.irq; 246 - gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); 247 - girq->default_type = IRQ_TYPE_NONE; 248 - /* This will let us handle the parent IRQ in the driver */ 249 - girq->num_parents = 0; 250 - girq->parents = NULL; 251 - girq->parent_handler = NULL; 252 - girq->handler = handle_bad_irq; 253 - 254 - /* 255 - * Directly request the irq here instead of passing 256 - * a flow-handler because the irq is shared. 257 - */ 258 - ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, 259 - IRQF_SHARED, dev_name(dev), gs); 260 - if (ret) 261 - return dev_err_probe(dev, ret, "failed to request IRQ"); 262 - } 243 + /* 244 + * Directly request the irq here instead of passing 245 + * a flow-handler because the irq is shared. 246 + */ 247 + ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, 248 + IRQF_SHARED, dev_name(dev), gs); 249 + if (ret) 250 + return dev_err_probe(dev, ret, "failed to request IRQ"); 263 251 } 264 252 265 253 platform_set_drvdata(pdev, gs);
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1139 1139 } 1140 1140 } 1141 1141 1142 + if (!amdgpu_vm_ready(vm)) 1143 + return -EINVAL; 1144 + 1142 1145 r = amdgpu_vm_clear_freed(adev, vm, NULL); 1143 1146 if (r) 1144 1147 return r;
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
··· 88 88 } 89 89 90 90 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, 91 - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 92 - AMDGPU_PTE_EXECUTABLE); 91 + AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | 92 + AMDGPU_VM_PAGE_EXECUTABLE); 93 93 94 94 if (r) { 95 95 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+16 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 1039 1039 { 1040 1040 int ret; 1041 1041 uint64_t reserv_addr, reserv_addr_ext; 1042 - uint32_t reserv_size, reserv_size_ext; 1042 + uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; 1043 1043 struct amdgpu_device *adev = psp->adev; 1044 + 1045 + mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); 1044 1046 1045 1047 if (amdgpu_sriov_vf(psp->adev)) 1046 1048 return 0; 1047 1049 1048 - if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) && 1049 - (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3))) 1050 + switch (mp0_ip_ver) { 1051 + case IP_VERSION(14, 0, 2): 1052 + if (adev->psp.sos.fw_version < 0x3b0e0d) 1053 + return 0; 1054 + break; 1055 + 1056 + case IP_VERSION(14, 0, 3): 1057 + if (adev->psp.sos.fw_version < 0x3a0e14) 1058 + return 0; 1059 + break; 1060 + 1061 + default: 1050 1062 return 0; 1063 + } 1051 1064 1052 1065 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); 1053 1066 if (ret)
+11 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 654 654 * Check if all VM PDs/PTs are ready for updates 655 655 * 656 656 * Returns: 657 - * True if VM is not evicting. 657 + * True if VM is not evicting and all VM entities are not stopped 658 658 */ 659 659 bool amdgpu_vm_ready(struct amdgpu_vm *vm) 660 660 { 661 - bool empty; 662 661 bool ret; 663 662 664 663 amdgpu_vm_eviction_lock(vm); ··· 665 666 amdgpu_vm_eviction_unlock(vm); 666 667 667 668 spin_lock(&vm->status_lock); 668 - empty = list_empty(&vm->evicted); 669 + ret &= list_empty(&vm->evicted); 669 670 spin_unlock(&vm->status_lock); 670 671 671 - return ret && empty; 672 + spin_lock(&vm->immediate.lock); 673 + ret &= !vm->immediate.stopped; 674 + spin_unlock(&vm->immediate.lock); 675 + 676 + spin_lock(&vm->delayed.lock); 677 + ret &= !vm->delayed.stopped; 678 + spin_unlock(&vm->delayed.lock); 679 + 680 + return ret; 672 681 } 673 682 674 683 /**
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 648 648 list_for_each_entry(block, &vres->blocks, link) 649 649 vis_usage += amdgpu_vram_mgr_vis_size(adev, block); 650 650 651 - amdgpu_vram_mgr_do_reserve(man); 652 - 653 651 drm_buddy_free_list(mm, &vres->blocks, vres->flags); 652 + amdgpu_vram_mgr_do_reserve(man); 654 653 mutex_unlock(&mgr->lock); 655 654 656 655 atomic64_sub(vis_usage, &mgr->vis_usage);
+2
drivers/gpu/drm/bridge/aux-bridge.c
··· 18 18 { 19 19 struct auxiliary_device *adev = to_auxiliary_dev(dev); 20 20 21 + of_node_put(dev->of_node); 21 22 ida_free(&drm_aux_bridge_ida, adev->id); 22 23 23 24 kfree(adev); ··· 66 65 67 66 ret = auxiliary_device_init(adev); 68 67 if (ret) { 68 + of_node_put(adev->dev.of_node); 69 69 ida_free(&drm_aux_bridge_ida, adev->id); 70 70 kfree(adev); 71 71 return ret;
+1
drivers/gpu/drm/drm_bridge.c
··· 1227 1227 /** 1228 1228 * drm_bridge_detect - check if anything is attached to the bridge output 1229 1229 * @bridge: bridge control structure 1230 + * @connector: attached connector 1230 1231 * 1231 1232 * If the bridge supports output detection, as reported by the 1232 1233 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
+4 -4
drivers/gpu/drm/i915/display/intel_fbc.c
··· 552 552 if (dpfc_ctl & DPFC_CTL_EN) { 553 553 dpfc_ctl &= ~DPFC_CTL_EN; 554 554 intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); 555 - 556 - /* wa_18038517565 Enable DPFC clock gating after FBC disable */ 557 - if (display->platform.dg2 || DISPLAY_VER(display) >= 14) 558 - fbc_compressor_clkgate_disable_wa(fbc, false); 559 555 } 560 556 } 561 557 ··· 1705 1709 intel_fbc_invalidate_dirty_rect(fbc); 1706 1710 1707 1711 __intel_fbc_cleanup_cfb(fbc); 1712 + 1713 + /* wa_18038517565 Enable DPFC clock gating after FBC disable */ 1714 + if (display->platform.dg2 || DISPLAY_VER(display) >= 14) 1715 + fbc_compressor_clkgate_disable_wa(fbc, false); 1708 1716 1709 1717 fbc->state.plane = NULL; 1710 1718 fbc->flip_pending = false;
+9 -5
drivers/gpu/drm/i915/display/intel_psr.c
··· 3275 3275 3276 3276 static void _psr_invalidate_handle(struct intel_dp *intel_dp) 3277 3277 { 3278 - if (intel_dp->psr.psr2_sel_fetch_enabled) { 3278 + struct intel_display *display = to_intel_display(intel_dp); 3279 + 3280 + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { 3279 3281 if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) { 3280 3282 intel_dp->psr.psr2_sel_fetch_cff_enabled = true; 3281 3283 intel_psr_configure_full_frame_update(intel_dp); ··· 3363 3361 { 3364 3362 struct intel_display *display = to_intel_display(intel_dp); 3365 3363 3366 - if (intel_dp->psr.psr2_sel_fetch_enabled) { 3364 + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { 3367 3365 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { 3368 3366 /* can we turn CFF off? */ 3369 3367 if (intel_dp->psr.busy_frontbuffer_bits == 0) ··· 3380 3378 * existing SU configuration 3381 3379 */ 3382 3380 intel_psr_configure_full_frame_update(intel_dp); 3381 + 3382 + intel_psr_force_update(intel_dp); 3383 + } else { 3384 + intel_psr_exit(intel_dp); 3383 3385 } 3384 3386 3385 - intel_psr_force_update(intel_dp); 3386 - 3387 - if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active && 3387 + if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) && 3388 3388 !intel_dp->psr.busy_frontbuffer_bits) 3389 3389 queue_work(display->wq.unordered, &intel_dp->psr.work); 3390 3390 }
+3 -6
drivers/gpu/drm/nouveau/nouveau_display.c
··· 253 253 254 254 int 255 255 nouveau_framebuffer_new(struct drm_device *dev, 256 + const struct drm_format_info *info, 256 257 const struct drm_mode_fb_cmd2 *mode_cmd, 257 258 struct drm_gem_object *gem, 258 259 struct drm_framebuffer **pfb) ··· 261 260 struct nouveau_drm *drm = nouveau_drm(dev); 262 261 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 263 262 struct drm_framebuffer *fb; 264 - const struct drm_format_info *info; 265 263 unsigned int height, i; 266 264 uint32_t tile_mode; 267 265 uint8_t kind; ··· 295 295 kind = nvbo->kind; 296 296 } 297 297 298 - info = drm_get_format_info(dev, mode_cmd->pixel_format, 299 - mode_cmd->modifier[0]); 300 - 301 298 for (i = 0; i < info->num_planes; i++) { 302 299 height = drm_format_info_plane_height(info, 303 300 mode_cmd->height, ··· 318 321 if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL))) 319 322 return -ENOMEM; 320 323 321 - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); 324 + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); 322 325 fb->obj[0] = gem; 323 326 324 327 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); ··· 341 344 if (!gem) 342 345 return ERR_PTR(-ENOENT); 343 346 344 - ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb); 347 + ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb); 345 348 if (ret == 0) 346 349 return fb; 347 350
+3
drivers/gpu/drm/nouveau/nouveau_display.h
··· 8 8 9 9 #include <drm/drm_framebuffer.h> 10 10 11 + struct drm_format_info; 12 + 11 13 int 12 14 nouveau_framebuffer_new(struct drm_device *dev, 15 + const struct drm_format_info *info, 13 16 const struct drm_mode_fb_cmd2 *mode_cmd, 14 17 struct drm_gem_object *gem, 15 18 struct drm_framebuffer **pfb);
+10 -13
drivers/gpu/drm/omapdrm/omap_fb.c
··· 351 351 } 352 352 } 353 353 354 - fb = omap_framebuffer_init(dev, mode_cmd, bos); 354 + fb = omap_framebuffer_init(dev, info, mode_cmd, bos); 355 355 if (IS_ERR(fb)) 356 356 goto error; 357 357 ··· 365 365 } 366 366 367 367 struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 368 + const struct drm_format_info *info, 368 369 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) 369 370 { 370 - const struct drm_format_info *format = NULL; 371 371 struct omap_framebuffer *omap_fb = NULL; 372 372 struct drm_framebuffer *fb = NULL; 373 373 unsigned int pitch = mode_cmd->pitches[0]; ··· 377 377 dev, mode_cmd, mode_cmd->width, mode_cmd->height, 378 378 (char *)&mode_cmd->pixel_format); 379 379 380 - format = drm_get_format_info(dev, mode_cmd->pixel_format, 381 - mode_cmd->modifier[0]); 382 - 383 380 for (i = 0; i < ARRAY_SIZE(formats); i++) { 384 381 if (formats[i] == mode_cmd->pixel_format) 385 382 break; 386 383 } 387 384 388 - if (!format || i == ARRAY_SIZE(formats)) { 385 + if (i == ARRAY_SIZE(formats)) { 389 386 dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n", 390 387 (char *)&mode_cmd->pixel_format); 391 388 ret = -EINVAL; ··· 396 399 } 397 400 398 401 fb = &omap_fb->base; 399 - omap_fb->format = format; 402 + omap_fb->format = info; 400 403 mutex_init(&omap_fb->lock); 401 404 402 405 /* ··· 404 407 * that the two planes of multiplane formats need the same number of 405 408 * bytes per pixel. 406 409 */ 407 - if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) { 410 + if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) { 408 411 dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n"); 409 412 ret = -EINVAL; 410 413 goto fail; 411 414 } 412 415 413 - if (pitch % format->cpp[0]) { 416 + if (pitch % info->cpp[0]) { 414 417 dev_dbg(dev->dev, 415 418 "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n", 416 - pitch, format->cpp[0]); 419 + pitch, info->cpp[0]); 417 420 ret = -EINVAL; 418 421 goto fail; 419 422 } 420 423 421 - for (i = 0; i < format->num_planes; i++) { 424 + for (i = 0; i < info->num_planes; i++) { 422 425 struct plane *plane = &omap_fb->planes[i]; 423 - unsigned int vsub = i == 0 ? 1 : format->vsub; 426 + unsigned int vsub = i == 0 ? 1 : info->vsub; 424 427 unsigned int size; 425 428 426 429 size = pitch * mode_cmd->height / vsub; ··· 437 440 plane->dma_addr = 0; 438 441 } 439 442 440 - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); 443 + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); 441 444 442 445 ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); 443 446 if (ret) {
+2
drivers/gpu/drm/omapdrm/omap_fb.h
··· 13 13 struct drm_device; 14 14 struct drm_file; 15 15 struct drm_framebuffer; 16 + struct drm_format_info; 16 17 struct drm_gem_object; 17 18 struct drm_mode_fb_cmd2; 18 19 struct drm_plane_state; ··· 24 23 struct drm_file *file, const struct drm_format_info *info, 25 24 const struct drm_mode_fb_cmd2 *mode_cmd); 26 25 struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 26 + const struct drm_format_info *info, 27 27 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 28 28 int omap_framebuffer_pin(struct drm_framebuffer *fb); 29 29 void omap_framebuffer_unpin(struct drm_framebuffer *fb);
+4 -1
drivers/gpu/drm/omapdrm/omap_fbdev.c
··· 197 197 goto fail; 198 198 } 199 199 200 - fb = omap_framebuffer_init(dev, &mode_cmd, &bo); 200 + fb = omap_framebuffer_init(dev, 201 + drm_get_format_info(dev, mode_cmd.pixel_format, 202 + mode_cmd.modifier[0]), 203 + &mode_cmd, &bo); 201 204 if (IS_ERR(fb)) { 202 205 dev_err(dev->dev, "failed to allocate fb\n"); 203 206 /* note: if fb creation failed, we can't rely on fb destroy
+1 -1
drivers/gpu/drm/panfrost/panfrost_gem.c
··· 432 432 if (!refcount) 433 433 return; 434 434 435 - resident_size = bo->base.pages ? bo->base.base.size : 0; 435 + resident_size = panfrost_gem_rss(&bo->base.base); 436 436 437 437 snprintf(creator_info, sizeof(creator_info), 438 438 "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+3 -2
drivers/gpu/drm/radeon/radeon_display.c
··· 1297 1297 int 1298 1298 radeon_framebuffer_init(struct drm_device *dev, 1299 1299 struct drm_framebuffer *fb, 1300 + const struct drm_format_info *info, 1300 1301 const struct drm_mode_fb_cmd2 *mode_cmd, 1301 1302 struct drm_gem_object *obj) 1302 1303 { 1303 1304 int ret; 1304 1305 fb->obj[0] = obj; 1305 - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); 1306 + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); 1306 1307 ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs); 1307 1308 if (ret) { 1308 1309 fb->obj[0] = NULL; ··· 1342 1341 return ERR_PTR(-ENOMEM); 1343 1342 } 1344 1343 1345 - ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); 1344 + ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj); 1346 1345 if (ret) { 1347 1346 kfree(fb); 1348 1347 drm_gem_object_put(obj);
+6 -5
drivers/gpu/drm/radeon/radeon_fbdev.c
··· 53 53 } 54 54 55 55 static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, 56 + const struct drm_format_info *info, 56 57 struct drm_mode_fb_cmd2 *mode_cmd, 57 58 struct drm_gem_object **gobj_p) 58 59 { 59 - const struct drm_format_info *info; 60 60 struct radeon_device *rdev = fb_helper->dev->dev_private; 61 61 struct drm_gem_object *gobj = NULL; 62 62 struct radeon_bo *rbo = NULL; ··· 67 67 int height = mode_cmd->height; 68 68 u32 cpp; 69 69 70 - info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format, 71 - mode_cmd->modifier[0]); 72 70 cpp = info->cpp[0]; 73 71 74 72 /* need to align pitch with crtc limits */ ··· 204 206 struct drm_fb_helper_surface_size *sizes) 205 207 { 206 208 struct radeon_device *rdev = fb_helper->dev->dev_private; 209 + const struct drm_format_info *format_info; 207 210 struct drm_mode_fb_cmd2 mode_cmd = { }; 208 211 struct fb_info *info; 209 212 struct drm_gem_object *gobj; ··· 223 224 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 224 225 sizes->surface_depth); 225 226 226 - ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj); 227 + format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format, 228 + mode_cmd.modifier[0]); 229 + ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj); 227 230 if (ret) { 228 231 DRM_ERROR("failed to create fbcon object %d\n", ret); 229 232 return ret; ··· 237 236 ret = -ENOMEM; 238 237 goto err_radeon_fbdev_destroy_pinned_object; 239 238 } 240 - ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj); 239 + ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj); 241 240 if (ret) { 242 241 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 243 242 goto err_kfree;
+2
drivers/gpu/drm/radeon/radeon_mode.h
··· 40 40 41 41 struct drm_fb_helper; 42 42 struct drm_fb_helper_surface_size; 43 + struct drm_format_info; 43 44 44 45 struct edid; 45 46 struct drm_edid; ··· 891 890 radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); 892 891 int radeon_framebuffer_init(struct drm_device *dev, 893 892 struct drm_framebuffer *rfb, 893 + const struct drm_format_info *info, 894 894 const struct drm_mode_fb_cmd2 *mode_cmd, 895 895 struct drm_gem_object *obj); 896 896
+1
drivers/gpu/drm/xe/regs/xe_bars.h
··· 7 7 8 8 #define GTTMMADR_BAR 0 /* MMIO + GTT */ 9 9 #define LMEM_BAR 2 /* VRAM */ 10 + #define VF_LMEM_BAR 9 /* VF VRAM */ 10 11 11 12 #endif
+29
drivers/gpu/drm/xe/xe_hwmon.c
··· 332 332 int ret = 0; 333 333 u32 reg_val, max; 334 334 struct xe_reg rapl_limit; 335 + u64 max_supp_power_limit = 0; 335 336 336 337 mutex_lock(&hwmon->hwmon_lock); 337 338 ··· 355 354 ret = -EOPNOTSUPP; 356 355 } 357 356 goto unlock; 357 + } 358 + 359 + /* 360 + * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to 361 + * the supported maximum (U12.3 format). 362 + * This is to avoid truncation during reg_val calculation below and ensure the valid 363 + * power limit is sent for pcode which would clamp it to card-supported value. 364 + */ 365 + max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER; 366 + if (value > max_supp_power_limit) { 367 + value = max_supp_power_limit; 368 + drm_info(&hwmon->xe->drm, 369 + "Power limit clamped as selected %s exceeds channel %d limit\n", 370 + PWR_ATTR_TO_STR(attr), channel); 358 371 } 359 372 360 373 /* Computation in 64-bits to avoid overflow. Round to nearest. */ ··· 754 739 { 755 740 int ret; 756 741 u32 uval; 742 + u64 max_crit_power_curr = 0; 757 743 758 744 mutex_lock(&hwmon->hwmon_lock); 759 745 746 + /* 747 + * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1 748 + * max supported value, clamp it to the command's max (U10.6 format). 749 + * This is to avoid truncation during uval calculation below and ensure the valid power 750 + * limit is sent for pcode which would clamp it to card-supported value. 751 + */ 752 + max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor; 753 + if (value > max_crit_power_curr) { 754 + value = max_crit_power_curr; 755 + drm_info(&hwmon->xe->drm, 756 + "Power limit clamped as selected exceeds channel %d limit\n", 757 + channel); 758 + } 760 759 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); 761 760 ret = xe_hwmon_pcode_write_i1(hwmon, uval); 762 761
+27 -15
drivers/gpu/drm/xe/xe_migrate.c
··· 1820 1820 if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || 1821 1821 !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { 1822 1822 int buf_offset = 0; 1823 + void *bounce; 1824 + int err; 1825 + 1826 + BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES)); 1827 + bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL); 1828 + if (!bounce) 1829 + return -ENOMEM; 1823 1830 1824 1831 /* 1825 1832 * Less than ideal for large unaligned access but this should be 1826 1833 * fairly rare, can fixup if this becomes common. 1827 1834 */ 1828 1835 do { 1829 - u8 bounce[XE_CACHELINE_BYTES]; 1830 - void *ptr = (void *)bounce; 1831 - int err; 1832 1836 int copy_bytes = min_t(int, bytes_left, 1833 1837 XE_CACHELINE_BYTES - 1834 1838 (offset & XE_CACHELINE_MASK)); ··· 1841 1837 err = xe_migrate_access_memory(m, bo, 1842 1838 offset & 1843 1839 ~XE_CACHELINE_MASK, 1844 - (void *)ptr, 1845 - sizeof(bounce), 0); 1840 + bounce, 1841 + XE_CACHELINE_BYTES, 0); 1846 1842 if (err) 1847 - return err; 1843 + break; 1848 1844 1849 1845 if (write) { 1850 - memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes); 1846 + memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes); 1851 1847 1852 1848 err = xe_migrate_access_memory(m, bo, 1853 1849 offset & ~XE_CACHELINE_MASK, 1854 - (void *)ptr, 1855 - sizeof(bounce), write); 1850 + bounce, 1851 + XE_CACHELINE_BYTES, write); 1856 1852 if (err) 1857 - return err; 1853 + break; 1858 1854 } else { 1859 - memcpy(buf + buf_offset, ptr + ptr_offset, 1855 + memcpy(buf + buf_offset, bounce + ptr_offset, 1860 1856 copy_bytes); 1861 1857 } 1862 1858 ··· 1865 1861 offset += copy_bytes; 1866 1862 } while (bytes_left); 1867 1863 1868 - return 0; 1864 + kfree(bounce); 1865 + return err; 1869 1866 } 1870 1867 1871 1868 dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); ··· 1887 1882 else 1888 1883 current_bytes = min_t(int, bytes_left, cursor.size); 1889 1884 1890 - if (fence) 1891 - dma_fence_put(fence); 1885 + if (current_bytes & ~PAGE_MASK) { 1886 + int pitch = 4; 1887 + 1888 + current_bytes = min_t(int, current_bytes, S16_MAX * pitch); 1889 + } 1892 1890 1893 1891 __fence = xe_migrate_vram(m, current_bytes, 1894 1892 (unsigned long)buf & ~PAGE_MASK, ··· 1900 1892 XE_MIGRATE_COPY_TO_VRAM : 1901 1893 XE_MIGRATE_COPY_TO_SRAM); 1902 1894 if (IS_ERR(__fence)) { 1903 - if (fence) 1895 + if (fence) { 1904 1896 dma_fence_wait(fence, false); 1897 + dma_fence_put(fence); 1898 + } 1905 1899 fence = __fence; 1906 1900 goto out_err; 1907 1901 } 1902 + 1903 + dma_fence_put(fence); 1908 1904 fence = __fence; 1909 1905 1910 1906 buf += current_bytes;
+22
drivers/gpu/drm/xe/xe_pci_sriov.c
··· 3 3 * Copyright © 2023-2024 Intel Corporation 4 4 */ 5 5 6 + #include <linux/bitops.h> 7 + #include <linux/pci.h> 8 + 9 + #include "regs/xe_bars.h" 6 10 #include "xe_assert.h" 7 11 #include "xe_device.h" 8 12 #include "xe_gt_sriov_pf_config.h" ··· 132 128 } 133 129 } 134 130 131 + static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs) 132 + { 133 + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 134 + u32 sizes; 135 + 136 + sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs); 137 + if (!sizes) 138 + return 0; 139 + 140 + return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes)); 141 + } 142 + 135 143 static int pf_enable_vfs(struct xe_device *xe, int num_vfs) 136 144 { 137 145 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); ··· 173 157 err = pf_provision_vfs(xe, num_vfs); 174 158 if (err < 0) 175 159 goto failed; 160 + 161 + if (IS_DGFX(xe)) { 162 + err = resize_vf_vram_bar(xe, num_vfs); 163 + if (err) 164 + xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err); 165 + } 176 166 177 167 err = pci_enable_sriov(pdev, num_vfs); 178 168 if (err < 0)
+47 -4
drivers/gpu/drm/xe/xe_shrinker.c
··· 54 54 write_unlock(&shrinker->lock); 55 55 } 56 56 57 - static s64 xe_shrinker_walk(struct xe_device *xe, 58 - struct ttm_operation_ctx *ctx, 59 - const struct xe_bo_shrink_flags flags, 60 - unsigned long to_scan, unsigned long *scanned) 57 + static s64 __xe_shrinker_walk(struct xe_device *xe, 58 + struct ttm_operation_ctx *ctx, 59 + const struct xe_bo_shrink_flags flags, 60 + unsigned long to_scan, unsigned long *scanned) 61 61 { 62 62 unsigned int mem_type; 63 63 s64 freed = 0, lret; ··· 88 88 } 89 89 /* Trylocks should never error, just fail. */ 90 90 xe_assert(xe, !IS_ERR(ttm_bo)); 91 + } 92 + 93 + return freed; 94 + } 95 + 96 + /* 97 + * Try shrinking idle objects without writeback first, then if not sufficient, 98 + * try also non-idle objects and finally if that's not sufficient either, 99 + * add writeback. This avoids stalls and explicit writebacks with light or 100 + * moderate memory pressure. 101 + */ 102 + static s64 xe_shrinker_walk(struct xe_device *xe, 103 + struct ttm_operation_ctx *ctx, 104 + const struct xe_bo_shrink_flags flags, 105 + unsigned long to_scan, unsigned long *scanned) 106 + { 107 + bool no_wait_gpu = true; 108 + struct xe_bo_shrink_flags save_flags = flags; 109 + s64 lret, freed; 110 + 111 + swap(no_wait_gpu, ctx->no_wait_gpu); 112 + save_flags.writeback = false; 113 + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); 114 + swap(no_wait_gpu, ctx->no_wait_gpu); 115 + if (lret < 0 || *scanned >= to_scan) 116 + return lret; 117 + 118 + freed = lret; 119 + if (!ctx->no_wait_gpu) { 120 + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); 121 + if (lret < 0) 122 + return lret; 123 + freed += lret; 124 + if (*scanned >= to_scan) 125 + return freed; 126 + } 127 + 128 + if (flags.writeback) { 129 + lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned); 130 + if (lret < 0) 131 + return lret; 132 + freed += lret; 91 133 } 92 134 93 135 return freed; ··· 241 199 runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup); 242 200 243 201 shrink_flags.purge = false; 202 + 244 203 lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, 245 204 nr_to_scan, &nr_scanned); 246 205 if (lret >= 0)
+1 -1
drivers/idle/intel_idle.c
··· 1679 1679 }; 1680 1680 1681 1681 static const struct x86_cpu_id intel_mwait_ids[] __initconst = { 1682 - X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), 1682 + X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL), 1683 1683 {} 1684 1684 }; 1685 1685
+49 -18
drivers/net/bonding/bond_3ad.c
··· 95 95 static void ad_mux_machine(struct port *port, bool *update_slave_arr); 96 96 static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); 97 97 static void ad_tx_machine(struct port *port); 98 - static void ad_periodic_machine(struct port *port, struct bond_params *bond_params); 98 + static void ad_periodic_machine(struct port *port); 99 99 static void ad_port_selection_logic(struct port *port, bool *update_slave_arr); 100 100 static void ad_agg_selection_logic(struct aggregator *aggregator, 101 101 bool *update_slave_arr); 102 102 static void ad_clear_agg(struct aggregator *aggregator); 103 103 static void ad_initialize_agg(struct aggregator *aggregator); 104 - static void ad_initialize_port(struct port *port, int lacp_fast); 104 + static void ad_initialize_port(struct port *port, const struct bond_params *bond_params); 105 105 static void ad_enable_collecting(struct port *port); 106 106 static void ad_disable_distributing(struct port *port, 107 107 bool *update_slave_arr); ··· 1307 1307 * case of EXPIRED even if LINK_DOWN didn't arrive for 1308 1308 * the port. 1309 1309 */ 1310 - port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; 1311 1310 port->sm_vars &= ~AD_PORT_MATCHED; 1311 + /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive 1312 + * machine state diagram, the statue should be 1313 + * Partner_Oper_Port_State.Synchronization = FALSE; 1314 + * Partner_Oper_Port_State.LACP_Timeout = Short Timeout; 1315 + * start current_while_timer(Short Timeout); 1316 + * Actor_Oper_Port_State.Expired = TRUE; 1317 + */ 1318 + port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; 1312 1319 port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT; 1313 - port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY; 1314 1320 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); 1315 1321 port->actor_oper_port_state |= LACP_STATE_EXPIRED; 1316 1322 port->sm_vars |= AD_PORT_CHURNED; ··· 1423 1417 /** 1424 1418 * ad_periodic_machine - handle a port's periodic state machine 1425 1419 * @port: the port we're looking at 1426 - * @bond_params: bond parameters we will use 1427 1420 * 1428 1421 * Turn ntt flag on priodically to perform periodic transmission of lacpdu's. 1429 1422 */ 1430 - static void ad_periodic_machine(struct port *port, struct bond_params *bond_params) 1423 + static void ad_periodic_machine(struct port *port) 1431 1424 { 1432 1425 periodic_states_t last_state; 1433 1426 ··· 1435 1430 1436 1431 /* check if port was reinitialized */ 1437 1432 if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || 1438 - (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) || 1439 - !bond_params->lacp_active) { 1433 + (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) { 1440 1434 port->sm_periodic_state = AD_NO_PERIODIC; 1441 1435 } 1442 1436 /* check if state machine should change state */ ··· 1959 1955 /** 1960 1956 * ad_initialize_port - initialize a given port's parameters 1961 1957 * @port: the port we're looking at 1962 - * @lacp_fast: boolean. whether fast periodic should be used 1958 + * @bond_params: bond parameters we will use 1963 1959 */ 1964 - static void ad_initialize_port(struct port *port, int lacp_fast) 1960 + static void ad_initialize_port(struct port *port, const struct bond_params *bond_params) 1965 1961 { 1966 1962 static const struct port_params tmpl = { 1967 1963 .system_priority = 0xffff, 1968 1964 .key = 1, 1969 1965 .port_number = 1, 1970 1966 .port_priority = 0xff, 1971 - .port_state = 1, 1967 + .port_state = 0, 1972 1968 }; 1973 1969 static const struct lacpdu lacpdu = { 1974 1970 .subtype = 0x01, ··· 1986 1982 port->actor_port_priority = 0xff; 1987 1983 port->actor_port_aggregator_identifier = 0; 1988 1984 port->ntt = false; 1989 - port->actor_admin_port_state = LACP_STATE_AGGREGATION | 1990 - LACP_STATE_LACP_ACTIVITY; 1991 - port->actor_oper_port_state = LACP_STATE_AGGREGATION | 1992 - LACP_STATE_LACP_ACTIVITY; 1985 + port->actor_admin_port_state = LACP_STATE_AGGREGATION; 1986 + port->actor_oper_port_state = LACP_STATE_AGGREGATION; 1987 + if (bond_params->lacp_active) { 1988 + port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY; 1989 + port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; 1990 + } 1993 1991 1994 - if (lacp_fast) 1992 + if (bond_params->lacp_fast) 1995 1993 port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT; 1996 1994 1997 1995 memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); ··· 2207 2201 /* port initialization */ 2208 2202 port = &(SLAVE_AD_INFO(slave)->port); 2209 2203 2210 - ad_initialize_port(port, bond->params.lacp_fast); 2204 + ad_initialize_port(port, &bond->params); 2211 2205 2212 2206 port->slave = slave; 2213 2207 port->actor_port_number = SLAVE_AD_INFO(slave)->id; ··· 2519 2513 } 2520 2514 2521 2515 ad_rx_machine(NULL, port); 2522 - ad_periodic_machine(port, &bond->params); 2516 + ad_periodic_machine(port); 2523 2517 ad_port_selection_logic(port, &update_slave_arr); 2524 2518 ad_mux_machine(port, &update_slave_arr); 2525 2519 ad_tx_machine(port); ··· 2885 2879 port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT; 2886 2880 else 2887 2881 port->actor_oper_port_state &= ~LACP_STATE_LACP_TIMEOUT; 2882 + } 2883 + spin_unlock_bh(&bond->mode_lock); 2884 + } 2885 + 2886 + /** 2887 + * bond_3ad_update_lacp_active - change the lacp active 2888 + * @bond: bonding struct 2889 + * 2890 + * Update actor_oper_port_state when lacp_active is modified. 2891 + */ 2892 + void bond_3ad_update_lacp_active(struct bonding *bond) 2893 + { 2894 + struct port *port = NULL; 2895 + struct list_head *iter; 2896 + struct slave *slave; 2897 + int lacp_active; 2898 + 2899 + lacp_active = bond->params.lacp_active; 2900 + spin_lock_bh(&bond->mode_lock); 2901 + bond_for_each_slave(bond, slave, iter) { 2902 + port = &(SLAVE_AD_INFO(slave)->port); 2903 + if (lacp_active) 2904 + port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; 2905 + else 2906 + port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY; 2888 2907 } 2889 2908 spin_unlock_bh(&bond->mode_lock); 2890 2909 }
+1
drivers/net/bonding/bond_options.c
··· 1660 1660 netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n", 1661 1661 newval->string, newval->value); 1662 1662 bond->params.lacp_active = newval->value; 1663 + bond_3ad_update_lacp_active(bond); 1663 1664 1664 1665 return 0; 1665 1666 }
+1 -1
drivers/net/dsa/b53/b53_common.c
··· 2078 2078 2079 2079 /* Start search operation */ 2080 2080 reg = ARL_SRCH_STDN; 2081 - b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg); 2081 + b53_write8(priv, B53_ARLIO_PAGE, offset, reg); 2082 2082 2083 2083 do { 2084 2084 ret = b53_arl_search_wait(priv);
+6
drivers/net/dsa/microchip/ksz_common.c
··· 2457 2457 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); 2458 2458 } 2459 2459 2460 + /* HSR ports are setup once so need to use the assigned membership 2461 + * when the port is enabled. 2462 + */ 2463 + if (!port_member && p->stp_state == BR_STATE_FORWARDING && 2464 + (dev->hsr_ports & BIT(port))) 2465 + port_member = dev->hsr_ports; 2460 2466 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); 2461 2467 } 2462 2468
+1 -3
drivers/net/ethernet/airoha/airoha_ppe.c
··· 781 781 continue; 782 782 } 783 783 784 - if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) { 785 - e->hash = 0xffff; 784 + if (!airoha_ppe_foe_compare_entry(e, hwe)) 786 785 continue; 787 - } 788 786 789 787 airoha_ppe_foe_commit_entry(ppe, &e->data, hash); 790 788 commit_done = true;
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 5336 5336 { 5337 5337 int i; 5338 5338 5339 - netdev_assert_locked(bp->dev); 5339 + netdev_assert_locked_or_invisible(bp->dev); 5340 5340 5341 5341 /* Under netdev instance lock and all our NAPIs have been disabled. 5342 5342 * It's safe to delete the hash table.
+2 -1
drivers/net/ethernet/cadence/macb_main.c
··· 5349 5349 5350 5350 static const struct macb_config sama7g5_emac_config = { 5351 5351 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | 5352 - MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP, 5352 + MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII | 5353 + MACB_CAPS_GEM_HAS_PTP, 5353 5354 .dma_burst_length = 16, 5354 5355 .clk_init = macb_clk_init, 5355 5356 .init = macb_init,
+2
drivers/net/ethernet/google/gve/gve_main.c
··· 2870 2870 struct gve_priv *priv = netdev_priv(netdev); 2871 2871 bool was_up = netif_running(priv->dev); 2872 2872 2873 + netif_device_detach(netdev); 2874 + 2873 2875 rtnl_lock(); 2874 2876 netdev_lock(netdev); 2875 2877 if (was_up && gve_close(priv->dev)) {
+7 -7
drivers/net/ethernet/intel/igc/igc_main.c
··· 7149 7149 adapter->port_num = hw->bus.func; 7150 7150 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 7151 7151 7152 + /* PCI config space info */ 7153 + hw->vendor_id = pdev->vendor; 7154 + hw->device_id = pdev->device; 7155 + hw->revision_id = pdev->revision; 7156 + hw->subsystem_vendor_id = pdev->subsystem_vendor; 7157 + hw->subsystem_device_id = pdev->subsystem_device; 7158 + 7152 7159 /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ 7153 7160 if (igc_is_device_id_i226(hw)) 7154 7161 pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); ··· 7181 7174 7182 7175 netdev->mem_start = pci_resource_start(pdev, 0); 7183 7176 netdev->mem_end = pci_resource_end(pdev, 0); 7184 - 7185 - /* PCI config space info */ 7186 - hw->vendor_id = pdev->vendor; 7187 - hw->device_id = pdev->device; 7188 - hw->revision_id = pdev->revision; 7189 - hw->subsystem_vendor_id = pdev->subsystem_vendor; 7190 - hw->subsystem_device_id = pdev->subsystem_device; 7191 7177 7192 7178 /* Copy the default MAC and PHY function pointers */ 7193 7179 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+11 -23
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 968 968 for (i = 0; i < adapter->num_tx_queues; i++) 969 969 clear_bit(__IXGBE_HANG_CHECK_ARMED, 970 970 &adapter->tx_ring[i]->state); 971 - 972 - for (i = 0; i < adapter->num_xdp_queues; i++) 973 - clear_bit(__IXGBE_HANG_CHECK_ARMED, 974 - &adapter->xdp_ring[i]->state); 975 971 } 976 972 977 973 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) ··· 1210 1214 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); 1211 1215 struct ixgbe_hw *hw = &adapter->hw; 1212 1216 1213 - e_err(drv, "Detected Tx Unit Hang%s\n" 1217 + e_err(drv, "Detected Tx Unit Hang\n" 1214 1218 " Tx Queue <%d>\n" 1215 1219 " TDH, TDT <%x>, <%x>\n" 1216 1220 " next_to_use <%x>\n" ··· 1218 1222 "tx_buffer_info[next_to_clean]\n" 1219 1223 " time_stamp <%lx>\n" 1220 1224 " jiffies <%lx>\n", 1221 - ring_is_xdp(tx_ring) ? " (XDP)" : "", 1222 1225 tx_ring->queue_index, 1223 1226 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), 1224 1227 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), 1225 1228 tx_ring->next_to_use, next, 1226 1229 tx_ring->tx_buffer_info[next].time_stamp, jiffies); 1227 1230 1228 - if (!ring_is_xdp(tx_ring)) 1229 - netif_stop_subqueue(tx_ring->netdev, 1230 - tx_ring->queue_index); 1231 + netif_stop_subqueue(tx_ring->netdev, 1232 + tx_ring->queue_index); 1231 1233 } 1232 1234 1233 1235 /** ··· 1445 1451 total_bytes); 1446 1452 adapter->tx_ipsec += total_ipsec; 1447 1453 1454 + if (ring_is_xdp(tx_ring)) 1455 + return !!budget; 1456 + 1448 1457 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 1449 1458 if (adapter->hw.mac.type == ixgbe_mac_e610) 1450 1459 ixgbe_handle_mdd_event(adapter, tx_ring); ··· 1464 1467 /* the adapter is about to reset, no point in enabling stuff */ 1465 1468 return true; 1466 1469 } 1467 - 1468 - if (ring_is_xdp(tx_ring)) 1469 - return !!budget; 1470 1470 1471 1471 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 1472 1472 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); ··· 7968 7974 return; 7969 7975 7970 7976 /* Force detection of hung controller */ 7971 - if (netif_carrier_ok(adapter->netdev)) { 7977 + if (netif_carrier_ok(adapter->netdev)) 7972 7978 for (i = 0; i < adapter->num_tx_queues; i++) 7973 7979 set_check_for_tx_hang(adapter->tx_ring[i]); 7974 - for (i = 0; i < adapter->num_xdp_queues; i++) 7975 - set_check_for_tx_hang(adapter->xdp_ring[i]); 7976 - } 7977 7980 7978 7981 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 7979 7982 /* ··· 8187 8196 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 8188 8197 8189 8198 if (tx_ring->next_to_use != tx_ring->next_to_clean) 8190 - return true; 8191 - } 8192 - 8193 - for (i = 0; i < adapter->num_xdp_queues; i++) { 8194 - struct ixgbe_ring *ring = adapter->xdp_ring[i]; 8195 - 8196 - if (ring->next_to_use != ring->next_to_clean) 8197 8199 return true; 8198 8200 } 8199 8201 ··· 10987 11003 int i; 10988 11004 10989 11005 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) 11006 + return -ENETDOWN; 11007 + 11008 + if (!netif_carrier_ok(adapter->netdev) || 11009 + !netif_running(adapter->netdev)) 10990 11010 return -ENETDOWN; 10991 11011 10992 11012 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+3 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
··· 398 398 dma_addr_t dma; 399 399 u32 cmd_type; 400 400 401 - while (budget-- > 0) { 401 + while (likely(budget)) { 402 402 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { 403 403 work_done = false; 404 404 break; ··· 433 433 xdp_ring->next_to_use++; 434 434 if (xdp_ring->next_to_use == xdp_ring->count) 435 435 xdp_ring->next_to_use = 0; 436 + 437 + budget--; 436 438 } 437 439 438 440 if (tx_desc) {
+2 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
··· 606 606 if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) 607 607 *features &= ~BIT_ULL(NPC_OUTER_VID); 608 608 609 - /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ 610 - if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && 609 + /* Allow extracting SPI field from AH and ESP headers at same offset */ 610 + if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) && 611 611 (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) 612 612 *features |= BIT_ULL(NPC_IPSEC_SPI); 613 613
+2
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
··· 101 101 if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) 102 102 return -1; 103 103 104 + rcu_read_lock(); 104 105 err = dev_fill_forward_path(dev, addr, &stack); 106 + rcu_read_unlock(); 105 107 if (err) 106 108 return err; 107 109
-1
drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
··· 26 26 u8 cap; 27 27 28 28 /* Buffer configuration */ 29 - bool manual_buffer; 30 29 u32 cable_len; 31 30 u32 xoff; 32 31 u16 port_buff_cell_sz;
+8 -10
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
··· 272 272 /* Total shared buffer size is split in a ratio of 3:1 between 273 273 * lossy and lossless pools respectively. 274 274 */ 275 - lossy_epool_size = (shared_buffer_size / 4) * 3; 276 275 lossless_ipool_size = shared_buffer_size / 4; 276 + lossy_epool_size = shared_buffer_size - lossless_ipool_size; 277 277 278 278 mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0, 279 279 lossy_epool_size); ··· 288 288 u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; 289 289 struct mlx5_core_dev *mdev = priv->mdev; 290 290 int sz = MLX5_ST_SZ_BYTES(pbmc_reg); 291 - u32 new_headroom_size = 0; 292 - u32 current_headroom_size; 291 + u32 current_headroom_cells = 0; 292 + u32 new_headroom_cells = 0; 293 293 void *in; 294 294 int err; 295 295 int i; 296 - 297 - current_headroom_size = port_buffer->headroom_size; 298 296 299 297 in = kzalloc(sz, GFP_KERNEL); 300 298 if (!in) ··· 304 306 305 307 for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { 306 308 void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); 309 + current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size); 310 + 307 311 u64 size = port_buffer->buffer[i].size; 308 312 u64 xoff = port_buffer->buffer[i].xoff; 309 313 u64 xon = port_buffer->buffer[i].xon; 310 314 311 - new_headroom_size += size; 312 315 do_div(size, port_buff_cell_sz); 316 + new_headroom_cells += size; 313 317 do_div(xoff, port_buff_cell_sz); 314 318 do_div(xon, port_buff_cell_sz); 315 319 MLX5_SET(bufferx_reg, buffer, size, size); ··· 320 320 MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); 321 321 } 322 322 323 - new_headroom_size /= port_buff_cell_sz; 324 - current_headroom_size /= port_buff_cell_sz; 325 - err = port_update_shared_buffer(priv->mdev, current_headroom_size, 326 - new_headroom_size); 323 + err = port_update_shared_buffer(priv->mdev, current_headroom_cells, 324 + new_headroom_cells); 327 325 if (err) 328 326 goto out; 329 327
+2
drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
··· 173 173 174 174 memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions)); 175 175 rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter); 176 + rule_actions[0].counter.offset = 177 + attr->counter->id - attr->counter->bulk->base_id; 176 178 /* Modify header is special, it may require extra arguments outside the action itself. */ 177 179 if (mh_action->mh_data) { 178 180 rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+9 -3
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 362 362 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, 363 363 struct ieee_pfc *pfc) 364 364 { 365 + u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN; 365 366 struct mlx5e_priv *priv = netdev_priv(dev); 366 367 struct mlx5_core_dev *mdev = priv->mdev; 367 368 u32 old_cable_len = priv->dcbx.cable_len; ··· 390 389 391 390 if (MLX5_BUFFER_SUPPORTED(mdev)) { 392 391 pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en; 393 - if (priv->dcbx.manual_buffer) 392 + ret = mlx5_query_port_buffer_ownership(mdev, 393 + &buffer_ownership); 394 + if (ret) 395 + netdev_err(dev, 396 + "%s, Failed to get buffer ownership: %d\n", 397 + __func__, ret); 398 + 399 + if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED) 394 400 ret = mlx5e_port_manual_buffer_config(priv, changed, 395 401 dev->mtu, &pfc_new, 396 402 NULL, NULL); ··· 990 982 if (!changed) 991 983 return 0; 992 984 993 - priv->dcbx.manual_buffer = true; 994 985 err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL, 995 986 buffer_size, prio2buffer); 996 987 return err; ··· 1259 1252 priv->dcbx.cap |= DCB_CAP_DCBX_HOST; 1260 1253 1261 1254 priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv); 1262 - priv->dcbx.manual_buffer = false; 1263 1255 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; 1264 1256 1265 1257 mlx5e_ets_init(priv);
+98 -85
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 102 102 u8 level; 103 103 /* Valid only when this node represents a traffic class. */ 104 104 u8 tc; 105 + /* Valid only for a TC arbiter node or vport TC arbiter. */ 106 + u32 tc_bw[DEVLINK_RATE_TCS_MAX]; 105 107 }; 106 108 107 109 static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node) ··· 464 462 esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, 465 463 struct netlink_ext_ack *extack) 466 464 { 465 + struct mlx5_esw_sched_node *parent = vport_node->parent; 467 466 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; 468 467 struct mlx5_core_dev *dev = vport_node->esw->dev; 469 468 void *attr; ··· 480 477 attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); 481 478 MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); 482 479 MLX5_SET(scheduling_context, sched_ctx, parent_element_id, 483 - vport_node->parent->ix); 480 + parent ? parent->ix : vport_node->esw->qos.root_tsar_ix); 484 481 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, 485 482 vport_node->max_rate); 486 483 ··· 611 608 esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node, 612 609 u32 *tc_bw) 613 610 { 614 - struct mlx5_esw_sched_node *vports_tc_node; 615 - 616 - list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) 617 - tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share; 611 + memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw)); 618 612 } 619 613 620 614 static void ··· 628 628 u8 tc = vports_tc_node->tc; 629 629 u32 bw_share; 630 630 631 + tc_arbiter_node->tc_bw[tc] = tc_bw[tc]; 631 632 bw_share = tc_bw[tc] * fw_max_bw_share; 632 633 bw_share = esw_qos_calc_bw_share(bw_share, divider, 633 634 fw_max_bw_share); ··· 787 786 return err; 788 787 } 789 788 790 - if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) { 791 - esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack); 792 - } else { 793 - /* The eswitch doesn't support scheduling nodes. 794 - * Create a software-only node0 using the root TSAR to attach vport QoS to. 795 - */ 796 - if (!__esw_qos_alloc_node(esw, 797 - esw->qos.root_tsar_ix, 798 - SCHED_NODE_TYPE_VPORTS_TSAR, 799 - NULL)) 800 - esw->qos.node0 = ERR_PTR(-ENOMEM); 801 - else 802 - list_add_tail(&esw->qos.node0->entry, 803 - &esw->qos.domain->nodes); 804 - } 805 - if (IS_ERR(esw->qos.node0)) { 806 - err = PTR_ERR(esw->qos.node0); 807 - esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err); 808 - goto err_node0; 809 - } 810 789 refcount_set(&esw->qos.refcnt, 1); 811 790 812 791 return 0; 813 - 814 - err_node0: 815 - if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, 816 - esw->qos.root_tsar_ix)) 817 - esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n"); 818 - 819 - return err; 820 792 } 821 793 822 794 static void esw_qos_destroy(struct mlx5_eswitch *esw) 823 795 { 824 796 int err; 825 - 826 - if (esw->qos.node0->ix != esw->qos.root_tsar_ix) 827 - __esw_qos_destroy_node(esw->qos.node0, NULL); 828 - else 829 - __esw_qos_free_node(esw->qos.node0); 830 - esw->qos.node0 = NULL; 831 797 832 798 err = mlx5_destroy_scheduling_element_cmd(esw->dev, 833 799 SCHEDULING_HIERARCHY_E_SWITCH, ··· 958 990 struct netlink_ext_ack *extack) 959 991 { 960 992 struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; 961 - int err, new_level, max_level; 993 + struct mlx5_esw_sched_node *parent = vport_node->parent; 994 + int err; 962 995 963 996 if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) { 997 + int new_level, max_level; 998 + 964 999 /* Increase the parent's level by 2 to account for both the 965 1000 * TC arbiter and the vports TC scheduling element. 966 1001 */ 967 - new_level = vport_node->parent->level + 2; 1002 + new_level = (parent ? parent->level : 2) + 2; 968 1003 max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev, 969 1004 log_esw_max_sched_depth); 970 1005 if (new_level > max_level) { ··· 1004 1033 err_sched_nodes: 1005 1034 if (type == SCHED_NODE_TYPE_RATE_LIMITER) { 1006 1035 esw_qos_node_destroy_sched_element(vport_node, NULL); 1007 - list_add_tail(&vport_node->entry, 1008 - &vport_node->parent->children); 1009 - vport_node->level = vport_node->parent->level + 1; 1036 + esw_qos_node_attach_to_parent(vport_node); 1010 1037 } else { 1011 1038 esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL); 1012 1039 } ··· 1052 1083 static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack) 1053 1084 { 1054 1085 struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; 1055 - struct mlx5_esw_sched_node *parent = vport_node->parent; 1056 1086 enum sched_node_type curr_type = vport_node->type; 1057 1087 1058 1088 if (curr_type == SCHED_NODE_TYPE_VPORT) ··· 1060 1092 esw_qos_vport_tc_disable(vport, extack); 1061 1093 1062 1094 vport_node->bw_share = 0; 1095 + memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw)); 1063 1096 list_del_init(&vport_node->entry); 1064 - esw_qos_normalize_min_rate(parent->esw, parent, extack); 1097 + esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack); 1065 1098 1066 1099 trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport); 1067 1100 } ··· 1072 1103 struct mlx5_esw_sched_node *parent, 1073 1104 struct netlink_ext_ack *extack) 1074 1105 { 1106 + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; 1075 1107 int err; 1076 1108 1077 1109 esw_assert_qos_lock_held(vport->dev->priv.eswitch); 1078 1110 1079 - esw_qos_node_set_parent(vport->qos.sched_node, parent); 1080 - if (type == SCHED_NODE_TYPE_VPORT) { 1081 - err = esw_qos_vport_create_sched_element(vport->qos.sched_node, 1082 - extack); 1083 - } else { 1111 + esw_qos_node_set_parent(vport_node, parent); 1112 + if (type == SCHED_NODE_TYPE_VPORT) 1113 + err = esw_qos_vport_create_sched_element(vport_node, extack); 1114 + else 1084 1115 err = esw_qos_vport_tc_enable(vport, type, extack); 1085 - } 1086 1116 if (err) 1087 1117 return err; 1088 1118 1089 - vport->qos.sched_node->type = type; 1090 - esw_qos_normalize_min_rate(parent->esw, parent, extack); 1091 - trace_mlx5_esw_vport_qos_create(vport->dev, vport, 1092 - vport->qos.sched_node->max_rate, 1093 - vport->qos.sched_node->bw_share); 1119 + vport_node->type = type; 1120 + esw_qos_normalize_min_rate(vport_node->esw, parent, extack); 1121 + trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate, 1122 + vport_node->bw_share); 1094 1123 1095 1124 return 0; 1096 1125 } ··· 1099 1132 { 1100 1133 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 1101 1134 struct mlx5_esw_sched_node *sched_node; 1135 + struct mlx5_eswitch *parent_esw; 1102 1136 int err; 1103 1137 1104 1138 esw_assert_qos_lock_held(esw); ··· 1107 1139 if (err) 1108 1140 return err; 1109 1141 1110 - parent = parent ?: esw->qos.node0; 1111 - sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent); 1112 - if (!sched_node) 1142 + parent_esw = parent ? parent->esw : esw; 1143 + sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent); 1144 + if (!sched_node) { 1145 + esw_qos_put(esw); 1113 1146 return -ENOMEM; 1147 + } 1148 + if (!parent) 1149 + list_add_tail(&sched_node->entry, &esw->qos.domain->nodes); 1114 1150 1115 1151 sched_node->max_rate = max_rate; 1116 1152 sched_node->min_rate = min_rate; ··· 1122 1150 vport->qos.sched_node = sched_node; 1123 1151 err = esw_qos_vport_enable(vport, type, parent, extack); 1124 1152 if (err) { 1153 + __esw_qos_free_node(sched_node); 1125 1154 esw_qos_put(esw); 1126 1155 vport->qos.sched_node = NULL; 1127 1156 } 1128 1157 1129 1158 return err; 1159 + } 1160 + 1161 + static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport) 1162 + { 1163 + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 1164 + 1165 + esw_assert_qos_lock_held(esw); 1166 + if (!vport->qos.sched_node) 1167 + return; 1168 + 1169 + esw_qos_vport_disable(vport, NULL); 1170 + mlx5_esw_qos_vport_qos_free(vport); 1171 + esw_qos_put(esw); 1130 1172 } 1131 1173 1132 1174 void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) ··· 1154 1168 goto unlock; 1155 1169 1156 1170 parent = vport->qos.sched_node->parent; 1157 - WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node"); 1171 + WARN(parent, "Disabling QoS on port before detaching it from node"); 1158 1172 1159 - esw_qos_vport_disable(vport, NULL); 1160 - mlx5_esw_qos_vport_qos_free(vport); 1161 - esw_qos_put(esw); 1173 + mlx5_esw_qos_vport_disable_locked(vport); 1162 1174 unlock: 1163 1175 esw_qos_unlock(esw); 1164 1176 } ··· 1246 1262 struct mlx5_esw_sched_node *parent, 1247 1263 struct netlink_ext_ack *extack) 1248 1264 { 1249 - struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent; 1250 - enum sched_node_type curr_type = vport->qos.sched_node->type; 1265 + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; 1266 + struct mlx5_esw_sched_node *curr_parent = vport_node->parent; 1267 + enum sched_node_type curr_type = vport_node->type; 1251 1268 u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0}; 1252 1269 int err; 1253 1270 1254 1271 esw_assert_qos_lock_held(vport->dev->priv.eswitch); 1255 - parent = parent ?: curr_parent; 1256 1272 if (curr_type == type && curr_parent == parent) 1257 1273 return 0; 1258 1274 ··· 1260 1276 if (err) 1261 1277 return err; 1262 1278 1263 - if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) { 1264 - esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node, 1265 - curr_tc_bw); 1266 - } 1279 + if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) 1280 + esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw); 1267 1281 1268 1282 esw_qos_vport_disable(vport, extack); 1269 1283 ··· 1272 1290 } 1273 1291 1274 1292 if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) { 1275 - esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node, 1276 - curr_tc_bw, extack); 1293 + esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw, 1294 + extack); 1277 1295 } 1278 1296 1279 1297 return err; ··· 1288 1306 1289 1307 esw_assert_qos_lock_held(esw); 1290 1308 curr_parent = vport->qos.sched_node->parent; 1291 - parent = parent ?: esw->qos.node0; 1292 1309 if (curr_parent == parent) 1293 1310 return 0; 1294 1311 1295 1312 /* Set vport QoS type based on parent node type if different from 1296 1313 * default QoS; otherwise, use the vport's current QoS type. 1297 1314 */ 1298 - if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) 1315 + if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) 1299 1316 type = SCHED_NODE_TYPE_RATE_LIMITER; 1300 - else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) 1317 + else if (curr_parent && 1318 + curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) 1301 1319 type = SCHED_NODE_TYPE_VPORT; 1302 1320 else 1303 1321 type = vport->qos.sched_node->type; ··· 1636 1654 static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport, 1637 1655 u32 *tc_bw) 1638 1656 { 1639 - struct mlx5_eswitch *esw = vport->qos.sched_node ? 1640 - vport->qos.sched_node->parent->esw : 1641 - vport->dev->priv.eswitch; 1657 + struct mlx5_esw_sched_node *node = vport->qos.sched_node; 1658 + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 1659 + 1660 + esw = (node && node->parent) ? node->parent->esw : esw; 1642 1661 1643 1662 return esw_qos_validate_unsupported_tc_bw(esw, tc_bw); 1644 1663 } ··· 1654 1671 } 1655 1672 1656 1673 return true; 1674 + } 1675 + 1676 + static void esw_vport_qos_prune_empty(struct mlx5_vport *vport) 1677 + { 1678 + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; 1679 + 1680 + esw_assert_qos_lock_held(vport->dev->priv.eswitch); 1681 + if (!vport_node) 1682 + return; 1683 + 1684 + if (vport_node->parent || vport_node->max_rate || 1685 + vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw)) 1686 + return; 1687 + 1688 + mlx5_esw_qos_vport_disable_locked(vport); 1657 1689 } 1658 1690 1659 1691 int mlx5_esw_qos_init(struct mlx5_eswitch *esw) ··· 1704 1706 1705 1707 esw_qos_lock(esw); 1706 1708 err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack); 1709 + if (err) 1710 + goto out; 1711 + esw_vport_qos_prune_empty(vport); 1712 + out: 1707 1713 esw_qos_unlock(esw); 1708 1714 return err; 1709 1715 } ··· 1729 1727 1730 1728 esw_qos_lock(esw); 1731 1729 err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack); 1730 + if (err) 1731 + goto out; 1732 + esw_vport_qos_prune_empty(vport); 1733 + out: 1732 1734 esw_qos_unlock(esw); 1733 1735 return err; 1734 1736 } ··· 1769 1763 if (disable) { 1770 1764 if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) 1771 1765 err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT, 1772 - NULL, extack); 1766 + vport_node->parent, extack); 1767 + esw_vport_qos_prune_empty(vport); 1773 1768 goto unlock; 1774 1769 } 1775 1770 ··· 1782 1775 } else { 1783 1776 err = esw_qos_vport_update(vport, 1784 1777 SCHED_NODE_TYPE_TC_ARBITER_TSAR, 1785 - NULL, extack); 1778 + vport_node->parent, extack); 1786 1779 } 1787 1780 if (!err) 1788 1781 esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack); ··· 1931 1924 void *priv, void *parent_priv, 1932 1925 struct netlink_ext_ack *extack) 1933 1926 { 1934 - struct mlx5_esw_sched_node *node; 1927 + struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL; 1935 1928 struct mlx5_vport *vport = priv; 1929 + int err; 1936 1930 1937 - if (!parent) 1938 - return mlx5_esw_qos_vport_update_parent(vport, NULL, extack); 1931 + err = mlx5_esw_qos_vport_update_parent(vport, node, extack); 1932 + if (!err) { 1933 + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; 1939 1934 1940 - node = parent_priv; 1941 - return mlx5_esw_qos_vport_update_parent(vport, node, extack); 1935 + esw_qos_lock(esw); 1936 + esw_vport_qos_prune_empty(vport); 1937 + esw_qos_unlock(esw); 1938 + } 1939 + 1940 + return err; 1942 1941 } 1943 1942 1944 1943 static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
-5
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 380 380 refcount_t refcnt; 381 381 u32 root_tsar_ix; 382 382 struct mlx5_qos_domain *domain; 383 - /* Contains all vports with QoS enabled but no explicit node. 384 - * Cannot be NULL if QoS is enabled, but may be a fake node 385 - * referencing the root TSAR if the esw doesn't support nodes. 386 - */ 387 - struct mlx5_esw_sched_node *node0; 388 383 } qos; 389 384 390 385 struct mlx5_esw_bridge_offloads *br_offloads;
+2
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 367 367 int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); 368 368 int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); 369 369 int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); 370 + int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev, 371 + u8 *buffer_ownership); 370 372 int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); 371 373 int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); 372 374
+20
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 968 968 return err; 969 969 } 970 970 971 + int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev, 972 + u8 *buffer_ownership) 973 + { 974 + u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {}; 975 + int err; 976 + 977 + if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) { 978 + *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN; 979 + return 0; 980 + } 981 + 982 + err = mlx5_query_pfcc_reg(mdev, out, sizeof(out)); 983 + if (err) 984 + return err; 985 + 986 + *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership); 987 + 988 + return 0; 989 + } 990 + 971 991 int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio) 972 992 { 973 993 int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+62 -19
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
··· 74 74 static int 75 75 hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) 76 76 { 77 - bool move_error = false, poll_error = false, drain_error = false; 78 77 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; 79 78 struct mlx5hws_matcher *matcher = bwc_matcher->matcher; 79 + int drain_error = 0, move_error = 0, poll_error = 0; 80 80 u16 bwc_queues = mlx5hws_bwc_queues(ctx); 81 81 struct mlx5hws_rule_attr rule_attr; 82 82 struct mlx5hws_bwc_rule *bwc_rule; ··· 84 84 struct list_head *rules_list; 85 85 u32 pending_rules; 86 86 int i, ret = 0; 87 + bool drain; 87 88 88 89 mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); 89 90 ··· 100 99 ret = mlx5hws_matcher_resize_rule_move(matcher, 101 100 bwc_rule->rule, 102 101 &rule_attr); 103 - if (unlikely(ret && !move_error)) { 104 - mlx5hws_err(ctx, 105 - "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n", 106 - ret); 107 - move_error = true; 102 + if (unlikely(ret)) { 103 + if (!move_error) { 104 + mlx5hws_err(ctx, 105 + "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n", 106 + ret); 107 + move_error = ret; 108 + } 109 + /* Rule wasn't queued, no need to poll */ 110 + continue; 108 111 } 109 112 110 113 pending_rules++; 114 + drain = pending_rules >= 115 + hws_bwc_get_burst_th(ctx, rule_attr.queue_id); 111 116 ret = mlx5hws_bwc_queue_poll(ctx, 112 117 rule_attr.queue_id, 113 118 &pending_rules, 114 - false); 115 - if (unlikely(ret && !poll_error)) { 116 - mlx5hws_err(ctx, 117 - "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n", 118 - ret); 119 - poll_error = true; 119 + drain); 120 + if (unlikely(ret)) { 121 + if (ret == -ETIMEDOUT) { 122 + mlx5hws_err(ctx, 123 + "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n", 124 + ret); 125 + return ret; 126 + } 127 + if (!poll_error) { 128 + mlx5hws_err(ctx, 129 + "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n", 130 + ret); 131 + poll_error = ret; 132 + } 120 133 } 121 134 } 122 135 ··· 141 126 rule_attr.queue_id, 142 127 &pending_rules, 143 128 true); 144 - if (unlikely(ret && !drain_error)) { 145 - mlx5hws_err(ctx, 146 - "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n", 147 - ret); 148 - drain_error = true; 129 + if (unlikely(ret)) { 130 + if (ret == -ETIMEDOUT) { 131 + mlx5hws_err(ctx, 132 + "Moving bwc rule: timeout draining completions (%d), aborting rehash\n", 133 + ret); 134 + return ret; 135 + } 136 + if (!drain_error) { 137 + mlx5hws_err(ctx, 138 + "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n", 139 + ret); 140 + drain_error = ret; 141 + } 149 142 } 150 143 } 151 144 } 152 145 153 - if (move_error || poll_error || drain_error) 154 - ret = -EINVAL; 146 + /* Return the first error that happened */ 147 + if (unlikely(move_error)) 148 + return move_error; 149 + if (unlikely(poll_error)) 150 + return poll_error; 151 + if (unlikely(drain_error)) 152 + return drain_error; 155 153 156 154 return ret; 157 155 } ··· 1061 1033 hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx); 1062 1034 mutex_unlock(queue_lock); 1063 1035 return 0; /* rule inserted successfully */ 1036 + } 1037 + 1038 + /* Rule insertion could fail due to queue being full, timeout, or 1039 + * matcher in resize. In such cases, no point in trying to rehash. 1040 + */ 1041 + if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) { 1042 + mutex_unlock(queue_lock); 1043 + mlx5hws_err(ctx, 1044 + "BWC rule insertion failed - %s (%d)\n", 1045 + ret == -EBUSY ? "queue is full" : 1046 + ret == -ETIMEDOUT ? "timeout" : 1047 + ret == -EAGAIN ? "matcher in resize" : "N/A", 1048 + ret); 1049 + hws_bwc_rule_cnt_dec(bwc_rule); 1050 + return ret; 1064 1051 } 1065 1052 1066 1053 /* At this point the rule wasn't added.
+28 -13
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
··· 1328 1328 { 1329 1329 struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; 1330 1330 struct mlx5hws_matcher *matcher = bwc_matcher->matcher; 1331 - bool move_error = false, poll_error = false; 1332 1331 u16 bwc_queues = mlx5hws_bwc_queues(ctx); 1333 1332 struct mlx5hws_bwc_rule *tmp_bwc_rule; 1334 1333 struct mlx5hws_rule_attr rule_attr; 1335 1334 struct mlx5hws_table *isolated_tbl; 1335 + int move_error = 0, poll_error = 0; 1336 1336 struct mlx5hws_rule *tmp_rule; 1337 1337 struct list_head *rules_list; 1338 1338 u32 expected_completions = 1; ··· 1391 1391 ret = mlx5hws_matcher_resize_rule_move(matcher, 1392 1392 tmp_rule, 1393 1393 &rule_attr); 1394 - if (unlikely(ret && !move_error)) { 1395 - mlx5hws_err(ctx, 1396 - "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n", 1397 - ret); 1398 - move_error = true; 1394 + if (unlikely(ret)) { 1395 + if (!move_error) { 1396 + mlx5hws_err(ctx, 1397 + "Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n", 1398 + ret); 1399 + move_error = ret; 1400 + } 1401 + /* Rule wasn't queued, no need to poll */ 1402 + continue; 1399 1403 } 1400 1404 1401 1405 expected_completions = 1; ··· 1407 1403 rule_attr.queue_id, 1408 1404 &expected_completions, 1409 1405 true); 1410 - if (unlikely(ret && !poll_error)) { 1411 - mlx5hws_err(ctx, 1412 - "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n", 1413 - ret); 1414 - poll_error = true; 1406 + if (unlikely(ret)) { 1407 + if (ret == -ETIMEDOUT) { 1408 + mlx5hws_err(ctx, 1409 + "Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n", 1410 + ret); 1411 + return ret; 1412 + } 1413 + if (!poll_error) { 1414 + mlx5hws_err(ctx, 1415 + "Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n", 1416 + ret); 1417 + poll_error = ret; 1418 + } 1415 1419 } 1416 1420 1417 1421 /* Done moving the rule to the new matcher, ··· 1434 1422 } 1435 1423 } 1436 1424 1437 - if (move_error || poll_error) 1438 - ret = -EINVAL; 1425 + /* Return the first error that happened */ 1426 + if (unlikely(move_error)) 1427 + return move_error; 1428 + if (unlikely(poll_error)) 1429 + return poll_error; 1439 1430 1440 1431 return ret; 1441 1432 }
+1
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
··· 55 55 56 56 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); 57 57 MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); 58 + MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid); 58 59 59 60 ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); 60 61 MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+1
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
··· 36 36 struct mlx5hws_cmd_ft_create_attr { 37 37 u8 type; 38 38 u8 level; 39 + u16 uid; 39 40 bool rtc_valid; 40 41 bool decap_en; 41 42 bool reformat_en;
+1
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
··· 267 267 268 268 tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB; 269 269 tbl_attr.level = ft_attr->level; 270 + tbl_attr.uid = ft_attr->uid; 270 271 tbl = mlx5hws_table_create(ctx, &tbl_attr); 271 272 if (!tbl) { 272 273 mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
··· 85 85 86 86 ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, 87 87 tbl, 88 + 0, 88 89 &matcher->end_ft_id); 89 90 if (ret) { 90 91 mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n"); ··· 113 112 if (mlx5hws_matcher_is_isolated(matcher)) 114 113 ret = hws_matcher_create_end_ft_isolated(matcher); 115 114 else 116 - ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, 115 + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, 116 + tbl, 117 + 0, 117 118 &matcher->end_ft_id); 118 119 119 120 if (ret) {
+1
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
··· 75 75 struct mlx5hws_table_attr { 76 76 enum mlx5hws_table_type type; 77 77 u32 level; 78 + u16 uid; 78 79 }; 79 80 80 81 enum mlx5hws_matcher_flow_src {
-1
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
··· 964 964 return -ENOMEM; 965 965 966 966 MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index); 967 - MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries); 968 967 MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries)); 969 968 970 969 err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+10 -3
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
··· 9 9 } 10 10 11 11 static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl, 12 + u16 uid, 12 13 struct mlx5hws_cmd_ft_create_attr *ft_attr) 13 14 { 14 15 ft_attr->type = tbl->fw_ft_type; ··· 17 16 ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1; 18 17 else 19 18 ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1; 19 + 20 20 ft_attr->rtc_valid = true; 21 + ft_attr->uid = uid; 21 22 } 22 23 23 24 static void hws_table_set_cap_attr(struct mlx5hws_table *tbl, ··· 122 119 123 120 int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, 124 121 struct mlx5hws_table *tbl, 125 - u32 *ft_id) 122 + u16 uid, u32 *ft_id) 126 123 { 127 124 struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; 128 125 int ret; 129 126 130 - hws_table_init_next_ft_attr(tbl, &ft_attr); 127 + hws_table_init_next_ft_attr(tbl, uid, &ft_attr); 131 128 hws_table_set_cap_attr(tbl, &ft_attr); 132 129 133 130 ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id); ··· 192 189 } 193 190 194 191 mutex_lock(&ctx->ctrl_lock); 195 - ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id); 192 + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, 193 + tbl, 194 + tbl->uid, 195 + &tbl->ft_id); 196 196 if (ret) { 197 197 mlx5hws_err(tbl->ctx, "Failed to create flow table object\n"); 198 198 mutex_unlock(&ctx->ctrl_lock); ··· 245 239 tbl->ctx = ctx; 246 240 tbl->type = attr->type; 247 241 tbl->level = attr->level; 242 + tbl->uid = attr->uid; 248 243 249 244 ret = hws_table_init(tbl); 250 245 if (ret) {
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
··· 18 18 enum mlx5hws_table_type type; 19 19 u32 fw_ft_type; 20 20 u32 level; 21 + u16 uid; 21 22 struct list_head matchers_list; 22 23 struct list_head tbl_list_node; 23 24 struct mlx5hws_default_miss default_miss; ··· 48 47 49 48 int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, 50 49 struct mlx5hws_table *tbl, 51 - u32 *ft_id); 50 + u16 uid, u32 *ft_id); 52 51 53 52 void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl, 54 53 u32 ft_id);
+2
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 2375 2375 ROUTER_EXP, false), 2376 2376 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2377 2377 ROUTER_EXP, false), 2378 + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD, 2379 + ROUTER_EXP, false), 2378 2380 /* Multicast Router Traps */ 2379 2381 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2380 2382 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+1
drivers/net/ethernet/mellanox/mlxsw/trap.h
··· 94 94 MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A, 95 95 MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B, 96 96 MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C, 97 + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D, 97 98 MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178, 98 99 MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179, 99 100 MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+21
drivers/net/ethernet/microchip/lan865x/lan865x.c
··· 32 32 /* MAC Specific Addr 1 Top Reg */ 33 33 #define LAN865X_REG_MAC_H_SADDR1 0x00010023 34 34 35 + /* MAC TSU Timer Increment Register */ 36 + #define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077 37 + #define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028 38 + 35 39 struct lan865x_priv { 36 40 struct work_struct multicast_work; 37 41 struct net_device *netdev; ··· 315 311 316 312 phy_start(netdev->phydev); 317 313 314 + netif_start_queue(netdev); 315 + 318 316 return 0; 319 317 } 320 318 ··· 348 342 if (!priv->tc6) { 349 343 ret = -ENODEV; 350 344 goto free_netdev; 345 + } 346 + 347 + /* LAN865x Rev.B0/B1 configuration parameters from AN1760 348 + * As per the Configuration Application Note AN1760 published in the 349 + * link, https://www.microchip.com/en-us/application-notes/an1760 350 + * Revision F (DS60001760G - June 2024), configure the MAC to set time 351 + * stamping at the end of the Start of Frame Delimiter (SFD) and set the 352 + * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock. 353 + */ 354 + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR, 355 + MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS); 356 + if (ret) { 357 + dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n", 358 + ret); 359 + goto oa_tc6_exit; 351 360 } 352 361 353 362 /* As per the point s3 in the below errata, SPI receive Ethernet frame
+1 -1
drivers/net/ethernet/realtek/rtase/rtase.h
··· 241 241 #define RTASE_RX_RES BIT(20) 242 242 #define RTASE_RX_RUNT BIT(19) 243 243 #define RTASE_RX_RWT BIT(18) 244 - #define RTASE_RX_CRC BIT(16) 244 + #define RTASE_RX_CRC BIT(17) 245 245 #define RTASE_RX_V6F BIT(31) 246 246 #define RTASE_RX_V4F BIT(30) 247 247 #define RTASE_RX_UDPT BIT(29)
+8 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
··· 152 152 static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat) 153 153 { 154 154 struct thead_dwmac *dwmac = plat->bsp_priv; 155 - u32 reg; 155 + u32 reg, div; 156 156 157 157 switch (plat->mac_interface) { 158 158 case PHY_INTERFACE_MODE_MII: ··· 164 164 case PHY_INTERFACE_MODE_RGMII_RXID: 165 165 case PHY_INTERFACE_MODE_RGMII_TXID: 166 166 /* use pll */ 167 + div = clk_get_rate(plat->stmmac_clk) / rgmii_clock(SPEED_1000); 168 + reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) | 169 + FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div); 170 + 171 + writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV); 172 + writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV); 173 + 167 174 writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL); 168 175 reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN | 169 176 GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN;
+41 -31
drivers/net/ethernet/ti/icssg/icssg_prueth.c
··· 203 203 } 204 204 } 205 205 206 + static void icssg_enable_fw_offload(struct prueth *prueth) 207 + { 208 + struct prueth_emac *emac; 209 + int mac; 210 + 211 + for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { 212 + emac = prueth->emac[mac]; 213 + if (prueth->is_hsr_offload_mode) { 214 + if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) 215 + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); 216 + else 217 + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); 218 + } 219 + 220 + if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) { 221 + if (netif_running(emac->ndev)) { 222 + icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, 223 + ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 224 + ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 225 + ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 226 + ICSSG_FDB_ENTRY_BLOCK, 227 + true); 228 + icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, 229 + BIT(emac->port_id) | DEFAULT_PORT_MASK, 230 + BIT(emac->port_id) | DEFAULT_UNTAG_MASK, 231 + true); 232 + if (prueth->is_hsr_offload_mode) 233 + icssg_vtbl_modify(emac, DEFAULT_VID, 234 + DEFAULT_PORT_MASK, 235 + DEFAULT_UNTAG_MASK, true); 236 + icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); 237 + if (prueth->is_switch_mode) 238 + icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); 239 + } 240 + } 241 + } 242 + } 243 + 206 244 static int prueth_emac_common_start(struct prueth *prueth) 207 245 { 208 246 struct prueth_emac *emac; ··· 791 753 ret = prueth_emac_common_start(prueth); 792 754 if (ret) 793 755 goto free_rx_irq; 756 + icssg_enable_fw_offload(prueth); 794 757 } 795 758 796 759 flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; ··· 1399 1360 1400 1361 static void icssg_change_mode(struct prueth *prueth) 1401 1362 { 1402 - struct prueth_emac *emac; 1403 - int mac, ret; 1363 + int ret; 1404 1364 1405 1365 ret = prueth_emac_restart(prueth); 1406 1366 if (ret) { ··· 1407 1369 return; 1408 1370 } 1409 1371 1410 - for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { 1411 - emac = prueth->emac[mac]; 1412 - if (prueth->is_hsr_offload_mode) { 1413 - if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) 1414 - icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); 1415 - else 1416 - icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); 1417 - } 1418 - 1419 - if (netif_running(emac->ndev)) { 1420 - icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, 1421 - ICSSG_FDB_ENTRY_P0_MEMBERSHIP | 1422 - ICSSG_FDB_ENTRY_P1_MEMBERSHIP | 1423 - ICSSG_FDB_ENTRY_P2_MEMBERSHIP | 1424 - ICSSG_FDB_ENTRY_BLOCK, 1425 - true); 1426 - icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, 1427 - BIT(emac->port_id) | DEFAULT_PORT_MASK, 1428 - BIT(emac->port_id) | DEFAULT_UNTAG_MASK, 1429 - true); 1430 - if (prueth->is_hsr_offload_mode) 1431 - icssg_vtbl_modify(emac, DEFAULT_VID, 1432 - DEFAULT_PORT_MASK, 1433 - DEFAULT_UNTAG_MASK, true); 1434 - icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); 1435 - if (prueth->is_switch_mode) 1436 - icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); 1437 - } 1438 - } 1372 + icssg_enable_fw_offload(prueth); 1439 1373 } 1440 1374 1441 1375 static int prueth_netdevice_port_link(struct net_device *ndev,
+1 -1
drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
··· 192 192 u8 i, j; 193 193 194 194 /* Fill out hash function seeds */ 195 - netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key)); 195 + netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE); 196 196 for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++) 197 197 wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]); 198 198
+6 -2
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1160 1160 struct axienet_local *lp = data; 1161 1161 struct sk_buff *skb; 1162 1162 u32 *app_metadata; 1163 + int i; 1163 1164 1164 1165 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1165 1166 skb = skbuf_dma->skb; ··· 1179 1178 u64_stats_add(&lp->rx_packets, 1); 1180 1179 u64_stats_add(&lp->rx_bytes, rx_len); 1181 1180 u64_stats_update_end(&lp->rx_stat_sync); 1182 - axienet_rx_submit_desc(lp->ndev); 1181 + 1182 + for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail, 1183 + RX_BUF_NUM_DEFAULT); i++) 1184 + axienet_rx_submit_desc(lp->ndev); 1183 1185 dma_async_issue_pending(lp->rx_chan); 1184 1186 } 1185 1187 ··· 1461 1457 if (!skbuf_dma) 1462 1458 return; 1463 1459 1464 - lp->rx_ring_head++; 1465 1460 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1466 1461 if (!skb) 1467 1462 return; ··· 1485 1482 skbuf_dma->desc = dma_rx_desc; 1486 1483 dma_rx_desc->callback_param = lp; 1487 1484 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1485 + lp->rx_ring_head++; 1488 1486 dmaengine_submit(dma_rx_desc); 1489 1487 1490 1488 return;
+12
drivers/net/phy/mscc/mscc.h
··· 365 365 u16 mask; 366 366 }; 367 367 368 + struct vsc8531_skb_cb { 369 + u32 ns; 370 + }; 371 + 372 + #define VSC8531_SKB_CB(skb) \ 373 + ((struct vsc8531_skb_cb *)((skb)->cb)) 374 + 368 375 struct vsc8531_private { 369 376 int rate_magic; 370 377 u16 supp_led_modes; ··· 420 413 */ 421 414 struct mutex ts_lock; 422 415 struct mutex phc_lock; 416 + 417 + /* list of skbs that were received and need timestamp information but it 418 + * didn't received it yet 419 + */ 420 + struct sk_buff_head rx_skbs_list; 423 421 }; 424 422 425 423 /* Shared structure between the PHYs of the same package.
+12
drivers/net/phy/mscc/mscc_main.c
··· 2357 2357 return vsc85xx_dt_led_modes_get(phydev, default_mode); 2358 2358 } 2359 2359 2360 + static void vsc85xx_remove(struct phy_device *phydev) 2361 + { 2362 + struct vsc8531_private *priv = phydev->priv; 2363 + 2364 + skb_queue_purge(&priv->rx_skbs_list); 2365 + } 2366 + 2360 2367 /* Microsemi VSC85xx PHYs */ 2361 2368 static struct phy_driver vsc85xx_driver[] = { 2362 2369 { ··· 2626 2619 .config_intr = &vsc85xx_config_intr, 2627 2620 .suspend = &genphy_suspend, 2628 2621 .resume = &genphy_resume, 2622 + .remove = &vsc85xx_remove, 2629 2623 .probe = &vsc8574_probe, 2630 2624 .set_wol = &vsc85xx_wol_set, 2631 2625 .get_wol = &vsc85xx_wol_get, ··· 2654 2646 .config_intr = &vsc85xx_config_intr, 2655 2647 .suspend = &genphy_suspend, 2656 2648 .resume = &genphy_resume, 2649 + .remove = &vsc85xx_remove, 2657 2650 .probe = &vsc8574_probe, 2658 2651 .set_wol = &vsc85xx_wol_set, 2659 2652 .get_wol = &vsc85xx_wol_get, ··· 2682 2673 .config_intr = &vsc85xx_config_intr, 2683 2674 .suspend = &genphy_suspend, 2684 2675 .resume = &genphy_resume, 2676 + .remove = &vsc85xx_remove, 2685 2677 .probe = &vsc8584_probe, 2686 2678 .get_tunable = &vsc85xx_get_tunable, 2687 2679 .set_tunable = &vsc85xx_set_tunable, ··· 2708 2698 .config_intr = &vsc85xx_config_intr, 2709 2699 .suspend = &genphy_suspend, 2710 2700 .resume = &genphy_resume, 2701 + .remove = &vsc85xx_remove, 2711 2702 .probe = &vsc8584_probe, 2712 2703 .get_tunable = &vsc85xx_get_tunable, 2713 2704 .set_tunable = &vsc85xx_set_tunable, ··· 2734 2723 .config_intr = &vsc85xx_config_intr, 2735 2724 .suspend = &genphy_suspend, 2736 2725 .resume = &genphy_resume, 2726 + .remove = &vsc85xx_remove, 2737 2727 .probe = &vsc8584_probe, 2738 2728 .get_tunable = &vsc85xx_get_tunable, 2739 2729 .set_tunable = &vsc85xx_set_tunable,
+37 -12
drivers/net/phy/mscc/mscc_ptp.c
··· 1194 1194 { 1195 1195 struct vsc8531_private *vsc8531 = 1196 1196 container_of(mii_ts, struct vsc8531_private, mii_ts); 1197 - struct skb_shared_hwtstamps *shhwtstamps = NULL; 1198 1197 struct vsc85xx_ptphdr *ptphdr; 1199 - struct timespec64 ts; 1200 1198 unsigned long ns; 1201 1199 1202 1200 if (!vsc8531->ptp->configured) ··· 1204 1206 type == PTP_CLASS_NONE) 1205 1207 return false; 1206 1208 1207 - vsc85xx_gettime(&vsc8531->ptp->caps, &ts); 1208 - 1209 1209 ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter); 1210 1210 if (!ptphdr) 1211 1211 return false; 1212 1212 1213 - shhwtstamps = skb_hwtstamps(skb); 1214 - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1215 - 1216 1213 ns = ntohl(ptphdr->rsrvd2); 1217 1214 1218 - /* nsec is in reserved field */ 1219 - if (ts.tv_nsec < ns) 1220 - ts.tv_sec--; 1215 + VSC8531_SKB_CB(skb)->ns = ns; 1216 + skb_queue_tail(&vsc8531->rx_skbs_list, skb); 1221 1217 1222 - shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns); 1223 - netif_rx(skb); 1218 + ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0); 1224 1219 1225 1220 return true; 1221 + } 1222 + 1223 + static long vsc85xx_do_aux_work(struct ptp_clock_info *info) 1224 + { 1225 + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); 1226 + struct skb_shared_hwtstamps *shhwtstamps = NULL; 1227 + struct phy_device *phydev = ptp->phydev; 1228 + struct vsc8531_private *priv = phydev->priv; 1229 + struct sk_buff_head received; 1230 + struct sk_buff *rx_skb; 1231 + struct timespec64 ts; 1232 + unsigned long flags; 1233 + 1234 + __skb_queue_head_init(&received); 1235 + spin_lock_irqsave(&priv->rx_skbs_list.lock, flags); 1236 + skb_queue_splice_tail_init(&priv->rx_skbs_list, &received); 1237 + spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags); 1238 + 1239 + vsc85xx_gettime(info, &ts); 1240 + while ((rx_skb = __skb_dequeue(&received)) != NULL) { 1241 + shhwtstamps = skb_hwtstamps(rx_skb); 1242 + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1243 + 1244 + if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns) 1245 + ts.tv_sec--; 1246 + 1247 + shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, 1248 + VSC8531_SKB_CB(rx_skb)->ns); 1249 + netif_rx(rx_skb); 1250 + } 1251 + 1252 + return -1; 1226 1253 } 1227 1254 1228 1255 static const struct ptp_clock_info vsc85xx_clk_caps = { ··· 1263 1240 .adjfine = &vsc85xx_adjfine, 1264 1241 .gettime64 = &vsc85xx_gettime, 1265 1242 .settime64 = &vsc85xx_settime, 1243 + .do_aux_work = &vsc85xx_do_aux_work, 1266 1244 }; 1267 1245 1268 1246 static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev) ··· 1591 1567 1592 1568 mutex_init(&vsc8531->phc_lock); 1593 1569 mutex_init(&vsc8531->ts_lock); 1570 + skb_queue_head_init(&vsc8531->rx_skbs_list); 1594 1571 1595 1572 /* Retrieve the shared load/save GPIO. Request it as non exclusive as 1596 1573 * the same GPIO can be requested by all the PHYs of the same package.
+11 -6
drivers/net/ppp/ppp_generic.c
··· 33 33 #include <linux/ppp_channel.h> 34 34 #include <linux/ppp-comp.h> 35 35 #include <linux/skbuff.h> 36 + #include <linux/rculist.h> 36 37 #include <linux/rtnetlink.h> 37 38 #include <linux/if_arp.h> 38 39 #include <linux/ip.h> ··· 1599 1598 if (ppp->flags & SC_MULTILINK) 1600 1599 return -EOPNOTSUPP; 1601 1600 1602 - if (list_empty(&ppp->channels)) 1601 + pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist); 1602 + if (!pch) 1603 1603 return -ENODEV; 1604 1604 1605 - pch = list_first_entry(&ppp->channels, struct channel, clist); 1606 - chan = pch->chan; 1605 + chan = READ_ONCE(pch->chan); 1606 + if (!chan) 1607 + return -ENODEV; 1608 + 1607 1609 if (!chan->ops->fill_forward_path) 1608 1610 return -EOPNOTSUPP; 1609 1611 ··· 2998 2994 */ 2999 2995 down_write(&pch->chan_sem); 3000 2996 spin_lock_bh(&pch->downl); 3001 - pch->chan = NULL; 2997 + WRITE_ONCE(pch->chan, NULL); 3002 2998 spin_unlock_bh(&pch->downl); 3003 2999 up_write(&pch->chan_sem); 3004 3000 ppp_disconnect_channel(pch); ··· 3519 3515 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 3520 3516 if (hdrlen > ppp->dev->hard_header_len) 3521 3517 ppp->dev->hard_header_len = hdrlen; 3522 - list_add_tail(&pch->clist, &ppp->channels); 3518 + list_add_tail_rcu(&pch->clist, &ppp->channels); 3523 3519 ++ppp->n_channels; 3524 3520 pch->ppp = ppp; 3525 3521 refcount_inc(&ppp->file.refcnt); ··· 3549 3545 if (ppp) { 3550 3546 /* remove it from the ppp unit's list */ 3551 3547 ppp_lock(ppp); 3552 - list_del(&pch->clist); 3548 + list_del_rcu(&pch->clist); 3553 3549 if (--ppp->n_channels == 0) 3554 3550 wake_up_interruptible(&ppp->file.rwait); 3555 3551 ppp_unlock(ppp); 3552 + synchronize_net(); 3556 3553 if (refcount_dec_and_test(&ppp->file.refcnt)) 3557 3554 ppp_destroy_interface(ppp); 3558 3555 err = 0;
+48 -15
drivers/net/pse-pd/pd692x0.c
··· 1041 1041 int pw_budget; 1042 1042 1043 1043 pw_budget = regulator_get_unclaimed_power_budget(supply); 1044 + if (!pw_budget) 1045 + /* Do nothing if no power budget */ 1046 + continue; 1047 + 1044 1048 /* Max power budget per manager */ 1045 1049 if (pw_budget > 6000000) 1046 1050 pw_budget = 6000000; ··· 1166 1162 return 0; 1167 1163 } 1168 1164 1165 + static void pd692x0_of_put_managers(struct pd692x0_priv *priv, 1166 + struct pd692x0_manager *manager, 1167 + int nmanagers) 1168 + { 1169 + int i, j; 1170 + 1171 + for (i = 0; i < nmanagers; i++) { 1172 + for (j = 0; j < manager[i].nports; j++) 1173 + of_node_put(manager[i].port_node[j]); 1174 + of_node_put(manager[i].node); 1175 + } 1176 + } 1177 + 1178 + static void pd692x0_managers_free_pw_budget(struct pd692x0_priv *priv) 1179 + { 1180 + int i; 1181 + 1182 + for (i = 0; i < PD692X0_MAX_MANAGERS; i++) { 1183 + struct regulator *supply; 1184 + 1185 + if (!priv->manager_reg[i] || !priv->manager_pw_budget[i]) 1186 + continue; 1187 + 1188 + supply = priv->manager_reg[i]->supply; 1189 + if (!supply) 1190 + continue; 1191 + 1192 + regulator_free_power_budget(supply, 1193 + priv->manager_pw_budget[i]); 1194 + } 1195 + } 1196 + 1169 1197 static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev) 1170 1198 { 1171 1199 struct pd692x0_manager *manager __free(kfree) = NULL; 1172 1200 struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); 1173 1201 struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]; 1174 - int ret, i, j, nmanagers; 1202 + int ret, nmanagers; 1175 1203 1176 1204 /* Should we flash the port matrix */ 1177 1205 if (priv->fw_state != PD692X0_FW_OK && ··· 1221 1185 nmanagers = ret; 1222 1186 ret = pd692x0_register_managers_regulator(priv, manager, nmanagers); 1223 1187 if (ret) 1224 - goto out; 1188 + goto err_of_managers; 1225 1189 1226 1190 ret = pd692x0_configure_managers(priv, nmanagers); 1227 1191 if (ret) 1228 - goto out; 1192 + goto err_of_managers; 1229 1193 1230 1194 ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix); 1231 1195 if (ret) 1232 - goto out; 1196 + goto err_managers_req_pw; 1233 1197 1234 1198 ret = pd692x0_write_ports_matrix(priv, port_matrix); 1235 1199 if (ret) 1236 - goto out; 1200 + goto err_managers_req_pw; 1237 1201 1238 - out: 1239 - for (i = 0; i < nmanagers; i++) { 1240 - struct regulator *supply = priv->manager_reg[i]->supply; 1202 + pd692x0_of_put_managers(priv, manager, nmanagers); 1203 + return 0; 1241 1204 1242 - regulator_free_power_budget(supply, 1243 - priv->manager_pw_budget[i]); 1244 - 1245 - for (j = 0; j < manager[i].nports; j++) 1246 - of_node_put(manager[i].port_node[j]); 1247 - of_node_put(manager[i].node); 1248 - } 1205 + err_managers_req_pw: 1206 + pd692x0_managers_free_pw_budget(priv); 1207 + err_of_managers: 1208 + pd692x0_of_put_managers(priv, manager, nmanagers); 1249 1209 return ret; 1250 1210 } 1251 1211 ··· 1780 1748 { 1781 1749 struct pd692x0_priv *priv = i2c_get_clientdata(client); 1782 1750 1751 + pd692x0_managers_free_pw_budget(priv); 1783 1752 firmware_upload_unregister(priv->fwl); 1784 1753 } 1785 1754
+1 -1
drivers/net/usb/asix_devices.c
··· 676 676 priv->mdio->read = &asix_mdio_bus_read; 677 677 priv->mdio->write = &asix_mdio_bus_write; 678 678 priv->mdio->name = "Asix MDIO Bus"; 679 - priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR)); 679 + priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR)); 680 680 /* mii bus name is usb-<usb bus number>-<usb device number> */ 681 681 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", 682 682 dev->udev->bus->busnum, dev->udev->devnum);
+7
drivers/net/usb/cdc_ncm.c
··· 2087 2087 .driver_info = (unsigned long)&wwan_info, 2088 2088 }, 2089 2089 2090 + /* Intel modem (label from OEM reads Fibocom L850-GL) */ 2091 + { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a, 2092 + USB_CLASS_COMM, 2093 + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 2094 + .driver_info = (unsigned long)&wwan_info, 2095 + }, 2096 + 2090 2097 /* DisplayLink docking stations */ 2091 2098 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO 2092 2099 | USB_DEVICE_ID_MATCH_VENDOR,
+1 -1
drivers/pci/controller/pcie-xilinx.c
··· 400 400 if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { 401 401 val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) & 402 402 XILINX_PCIE_RPIFR2_MSG_DATA; 403 - domain = pcie->msi_domain->parent; 403 + domain = pcie->msi_domain; 404 404 } else { 405 405 val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> 406 406 XILINX_PCIE_RPIFR1_INTR_SHIFT;
-3
drivers/pci/controller/vmd.c
··· 306 306 struct irq_domain *real_parent, 307 307 struct msi_domain_info *info) 308 308 { 309 - if (WARN_ON_ONCE(info->bus_token != DOMAIN_BUS_PCI_DEVICE_MSIX)) 310 - return false; 311 - 312 309 if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) 313 310 return false; 314 311
-1
drivers/scsi/lpfc/lpfc_debugfs.c
··· 6280 6280 } 6281 6281 phba->nvmeio_trc_on = 1; 6282 6282 phba->nvmeio_trc_output_idx = 0; 6283 - phba->nvmeio_trc = NULL; 6284 6283 } else { 6285 6284 nvmeio_off: 6286 6285 phba->nvmeio_trc_size = 0;
+1 -1
drivers/scsi/lpfc/lpfc_vport.c
··· 666 666 * Take early refcount for outstanding I/O requests we schedule during 667 667 * delete processing for unreg_vpi. Always keep this before 668 668 * scsi_remove_host() as we can no longer obtain a reference through 669 - * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. 669 + * scsi_host_get() after scsi_remove_host as shost is set to SHOST_DEL. 670 670 */ 671 671 if (!scsi_host_get(shost)) 672 672 return VPORT_INVAL;
+57 -34
drivers/scsi/scsi_debug.c
··· 2674 2674 2675 2675 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) 2676 2676 { /* Read-Write Error Recovery page for mode_sense */ 2677 - unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, 2678 - 5, 0, 0xff, 0xff}; 2677 + static const unsigned char err_recov_pg[] = { 2678 + 0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, 2679 + 5, 0, 0xff, 0xff 2680 + }; 2679 2681 2680 2682 memcpy(p, err_recov_pg, sizeof(err_recov_pg)); 2681 2683 if (1 == pcontrol) ··· 2687 2685 2688 2686 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) 2689 2687 { /* Disconnect-Reconnect page for mode_sense */ 2690 - unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0, 2691 - 0, 0, 0, 0, 0, 0, 0, 0}; 2688 + static const unsigned char disconnect_pg[] = { 2689 + 0x2, 0xe, 128, 128, 0, 10, 0, 0, 2690 + 0, 0, 0, 0, 0, 0, 0, 0 2691 + }; 2692 2692 2693 2693 memcpy(p, disconnect_pg, sizeof(disconnect_pg)); 2694 2694 if (1 == pcontrol) ··· 2700 2696 2701 2697 static int resp_format_pg(unsigned char *p, int pcontrol, int target) 2702 2698 { /* Format device page for mode_sense */ 2703 - unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, 2704 - 0, 0, 0, 0, 0, 0, 0, 0, 2705 - 0, 0, 0, 0, 0x40, 0, 0, 0}; 2699 + static const unsigned char format_pg[] = { 2700 + 0x3, 0x16, 0, 0, 0, 0, 0, 0, 2701 + 0, 0, 0, 0, 0, 0, 0, 0, 2702 + 0, 0, 0, 0, 0x40, 0, 0, 0 2703 + }; 2706 2704 2707 2705 memcpy(p, format_pg, sizeof(format_pg)); 2708 2706 put_unaligned_be16(sdebug_sectors_per, p + 10); ··· 2722 2716 2723 2717 static int resp_caching_pg(unsigned char *p, int pcontrol, int target) 2724 2718 { /* Caching page for mode_sense */ 2725 - unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, 2726 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 2727 - unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 2728 - 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; 2719 + static const unsigned char ch_caching_pg[] = { 2720 + /* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, 2721 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 2722 + }; 2723 + static const unsigned char d_caching_pg[] = { 2724 + 0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 2725 + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0 2726 + }; 2729 2727 2730 2728 if (SDEBUG_OPT_N_WCE & sdebug_opts) 2731 2729 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ ··· 2748 2738 { /* Control mode page for mode_sense */ 2749 2739 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, 2750 2740 0, 0, 0, 0}; 2751 - unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 2752 - 0, 0, 0x2, 0x4b}; 2741 + static const unsigned char d_ctrl_m_pg[] = { 2742 + 0xa, 10, 2, 0, 0, 0, 0, 0, 2743 + 0, 0, 0x2, 0x4b 2744 + }; 2753 2745 2754 2746 if (sdebug_dsense) 2755 2747 ctrl_m_pg[2] |= 0x4; ··· 2806 2794 2807 2795 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) 2808 2796 { /* Informational Exceptions control mode page for mode_sense */ 2809 - unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, 2810 - 0, 0, 0x0, 0x0}; 2811 - unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 2812 - 0, 0, 0x0, 0x0}; 2797 + static const unsigned char ch_iec_m_pg[] = { 2798 + /* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, 2799 + 0, 0, 0x0, 0x0 2800 + }; 2801 + static const unsigned char d_iec_m_pg[] = { 2802 + 0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 2803 + 0, 0, 0x0, 0x0 2804 + }; 2813 2805 2814 2806 memcpy(p, iec_m_pg, sizeof(iec_m_pg)); 2815 2807 if (1 == pcontrol) ··· 2825 2809 2826 2810 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target) 2827 2811 { /* SAS SSP mode page - short format for mode_sense */ 2828 - unsigned char sas_sf_m_pg[] = {0x19, 0x6, 2829 - 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0}; 2812 + static const unsigned char sas_sf_m_pg[] = { 2813 + 0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0 2814 + }; 2830 2815 2831 2816 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg)); 2832 2817 if (1 == pcontrol) ··· 2871 2854 2872 2855 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol) 2873 2856 { /* SAS SSP shared protocol specific port mode subpage */ 2874 - unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, 2875 - 0, 0, 0, 0, 0, 0, 0, 0, 2876 - }; 2857 + static const unsigned char sas_sha_m_pg[] = { 2858 + 0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, 2859 + 0, 0, 0, 0, 0, 0, 0, 0, 2860 + }; 2877 2861 2878 2862 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg)); 2879 2863 if (1 == pcontrol) ··· 2941 2923 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target, 2942 2924 unsigned char dce) 2943 2925 { /* Compression page for mode_sense (tape) */ 2944 - unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0, 2945 - 0, 0, 0, 0, 00, 00}; 2926 + static const unsigned char compression_pg[] = { 2927 + 0x0f, 14, 0x40, 0, 0, 0, 0, 0, 2928 + 0, 0, 0, 0, 0, 0 2929 + }; 2946 2930 2947 2931 memcpy(p, compression_pg, sizeof(compression_pg)); 2948 2932 if (dce) ··· 3302 3282 3303 3283 static int resp_temp_l_pg(unsigned char *arr) 3304 3284 { 3305 - unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38, 3306 - 0x0, 0x1, 0x3, 0x2, 0x0, 65, 3307 - }; 3285 + static const unsigned char temp_l_pg[] = { 3286 + 0x0, 0x0, 0x3, 0x2, 0x0, 38, 3287 + 0x0, 0x1, 0x3, 0x2, 0x0, 65, 3288 + }; 3308 3289 3309 3290 memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); 3310 3291 return sizeof(temp_l_pg); ··· 3313 3292 3314 3293 static int resp_ie_l_pg(unsigned char *arr) 3315 3294 { 3316 - unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, 3317 - }; 3295 + static const unsigned char ie_l_pg[] = { 3296 + 0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, 3297 + }; 3318 3298 3319 3299 memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); 3320 3300 if (iec_m_pg[2] & 0x4) { /* TEST bit set */ ··· 3327 3305 3328 3306 static int resp_env_rep_l_spg(unsigned char *arr) 3329 3307 { 3330 - unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8, 3331 - 0x0, 40, 72, 0xff, 45, 18, 0, 0, 3332 - 0x1, 0x0, 0x23, 0x8, 3333 - 0x0, 55, 72, 35, 55, 45, 0, 0, 3334 - }; 3308 + static const unsigned char env_rep_l_spg[] = { 3309 + 0x0, 0x0, 0x23, 0x8, 3310 + 0x0, 40, 72, 0xff, 45, 18, 0, 0, 3311 + 0x1, 0x0, 0x23, 0x8, 3312 + 0x0, 55, 72, 35, 55, 45, 0, 0, 3313 + }; 3335 3314 3336 3315 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg)); 3337 3316 return sizeof(env_rep_l_spg);
+2 -2
drivers/scsi/scsi_sysfs.c
··· 265 265 return show_shost_mode(supported_mode, buf); 266 266 } 267 267 268 - static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); 268 + static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL); 269 269 270 270 static ssize_t 271 271 show_shost_active_mode(struct device *dev, ··· 279 279 return show_shost_mode(shost->active_mode, buf); 280 280 } 281 281 282 - static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); 282 + static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL); 283 283 284 284 static int check_reset_type(const char *str) 285 285 {
+29 -22
drivers/soc/tegra/pmc.c
··· 1232 1232 } 1233 1233 1234 1234 static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, 1235 - struct device_node *np, bool off) 1235 + struct device_node *np) 1236 1236 { 1237 1237 struct device *dev = pg->pmc->dev; 1238 1238 int err; ··· 1247 1247 err = reset_control_acquire(pg->reset); 1248 1248 if (err < 0) { 1249 1249 pr_err("failed to acquire resets: %d\n", err); 1250 - goto out; 1251 - } 1252 - 1253 - if (off) { 1254 - err = reset_control_assert(pg->reset); 1255 - } else { 1256 - err = reset_control_deassert(pg->reset); 1257 - if (err < 0) 1258 - goto out; 1259 - 1260 - reset_control_release(pg->reset); 1261 - } 1262 - 1263 - out: 1264 - if (err) { 1265 - reset_control_release(pg->reset); 1266 1250 reset_control_put(pg->reset); 1267 1251 } 1268 1252 ··· 1292 1308 goto set_available; 1293 1309 } 1294 1310 1295 - err = tegra_powergate_of_get_resets(pg, np, off); 1311 + err = tegra_powergate_of_get_resets(pg, np); 1296 1312 if (err < 0) { 1297 1313 dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err); 1298 1314 goto remove_clks; 1299 1315 } 1300 1316 1301 - if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { 1302 - if (off) 1303 - WARN_ON(tegra_powergate_power_up(pg, true)); 1317 + /* 1318 + * If the power-domain is off, then ensure the resets are asserted. 1319 + * If the power-domain is on, then power down to ensure that when is 1320 + * it turned on the power-domain, clocks and resets are all in the 1321 + * expected state. 1322 + */ 1323 + if (off) { 1324 + err = reset_control_assert(pg->reset); 1325 + if (err) { 1326 + pr_err("failed to assert resets: %d\n", err); 1327 + goto remove_resets; 1328 + } 1329 + } else { 1330 + err = tegra_powergate_power_down(pg); 1331 + if (err) { 1332 + dev_err(dev, "failed to turn off PM domain %s: %d\n", 1333 + pg->genpd.name, err); 1334 + goto remove_resets; 1335 + } 1336 + } 1304 1337 1338 + /* 1339 + * If PM_GENERIC_DOMAINS is not enabled, power-on 1340 + * the domain and skip the genpd registration. 1341 + */ 1342 + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { 1343 + WARN_ON(tegra_powergate_power_up(pg, true)); 1305 1344 goto remove_resets; 1306 1345 } 1307 1346 1308 - err = pm_genpd_init(&pg->genpd, NULL, off); 1347 + err = pm_genpd_init(&pg->genpd, NULL, true); 1309 1348 if (err < 0) { 1310 1349 dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np, 1311 1350 err);
+4 -4
drivers/tty/serial/8250/8250_rsa.c
··· 147 147 if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) 148 148 serial_out(up, UART_RSA_FRR, 0); 149 149 } 150 - EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_enable, "8250_base"); 150 + EXPORT_SYMBOL_FOR_MODULES(rsa_enable, "8250_base"); 151 151 152 152 /* 153 153 * Attempts to turn off the RSA FIFO and resets the RSA board back to 115kbps compat mode. It is ··· 179 179 up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16; 180 180 uart_port_unlock_irq(&up->port); 181 181 } 182 - EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_disable, "8250_base"); 182 + EXPORT_SYMBOL_FOR_MODULES(rsa_disable, "8250_base"); 183 183 184 184 void rsa_autoconfig(struct uart_8250_port *up) 185 185 { ··· 192 192 if (__rsa_enable(up)) 193 193 up->port.type = PORT_RSA; 194 194 } 195 - EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_autoconfig, "8250_base"); 195 + EXPORT_SYMBOL_FOR_MODULES(rsa_autoconfig, "8250_base"); 196 196 197 197 void rsa_reset(struct uart_8250_port *up) 198 198 { ··· 201 201 202 202 serial_out(up, UART_RSA_FRR, 0); 203 203 } 204 - EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_reset, "8250_base"); 204 + EXPORT_SYMBOL_FOR_MODULES(rsa_reset, "8250_base"); 205 205 206 206 #ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS 207 207 #ifndef MODULE
+8 -4
drivers/ufs/core/ufshcd.c
··· 7138 7138 static irqreturn_t ufshcd_intr(int irq, void *__hba) 7139 7139 { 7140 7140 struct ufs_hba *hba = __hba; 7141 + u32 intr_status, enabled_intr_status; 7141 7142 7142 7143 /* Move interrupt handling to thread when MCQ & ESI are not enabled */ 7143 7144 if (!hba->mcq_enabled || !hba->mcq_esi_enabled) 7144 7145 return IRQ_WAKE_THREAD; 7145 7146 7147 + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 7148 + enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 7149 + 7150 + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 7151 + 7146 7152 /* Directly handle interrupts since MCQ ESI handlers does the hard job */ 7147 - return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) & 7148 - ufshcd_readl(hba, REG_INTERRUPT_ENABLE)); 7153 + return ufshcd_sl_intr(hba, enabled_intr_status); 7149 7154 } 7150 7155 7151 7156 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) ··· 10521 10516 err = devm_add_action_or_reset(dev, ufshcd_devres_release, 10522 10517 host); 10523 10518 if (err) 10524 - return dev_err_probe(dev, err, 10525 - "failed to add ufshcd dealloc action\n"); 10519 + return err; 10526 10520 10527 10521 host->nr_maps = HCTX_TYPE_POLL + 1; 10528 10522 hba = shost_priv(host);
+1 -1
drivers/ufs/host/ufs-mediatek.c
··· 818 818 unsigned int q_index; 819 819 820 820 q_index = map->mq_map[cpu]; 821 - if (q_index > nr) { 821 + if (q_index >= nr) { 822 822 dev_err(hba->dev, "hwq index %d exceed %d\n", 823 823 q_index, nr); 824 824 return MTK_MCQ_INVALID_IRQ;
+12 -15
drivers/virt/coco/sev-guest/sev-guest.c
··· 116 116 117 117 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 118 118 { 119 + struct snp_derived_key_resp *derived_key_resp __free(kfree) = NULL; 119 120 struct snp_derived_key_req *derived_key_req __free(kfree) = NULL; 120 - struct snp_derived_key_resp derived_key_resp = {0}; 121 121 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 122 122 struct snp_guest_req req = {}; 123 123 int rc, resp_len; 124 - /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ 125 - u8 buf[64 + 16]; 126 124 127 125 if (!arg->req_data || !arg->resp_data) 128 126 return -EINVAL; ··· 130 132 * response payload. Make sure that it has enough space to cover the 131 133 * authtag. 132 134 */ 133 - resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; 134 - if (sizeof(buf) < resp_len) 135 + resp_len = sizeof(derived_key_resp->data) + mdesc->ctx->authsize; 136 + derived_key_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 137 + if (!derived_key_resp) 135 138 return -ENOMEM; 136 139 137 140 derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT); ··· 148 149 req.vmpck_id = mdesc->vmpck_id; 149 150 req.req_buf = derived_key_req; 150 151 req.req_sz = sizeof(*derived_key_req); 151 - req.resp_buf = buf; 152 + req.resp_buf = derived_key_resp; 152 153 req.resp_sz = resp_len; 153 154 req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; 154 155 155 156 rc = snp_send_guest_request(mdesc, &req); 156 157 arg->exitinfo2 = req.exitinfo2; 157 - if (rc) 158 - return rc; 159 - 160 - memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data)); 161 - if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp, 162 - sizeof(derived_key_resp))) 163 - rc = -EFAULT; 158 + if (!rc) { 159 + if (copy_to_user((void __user *)arg->resp_data, derived_key_resp, 160 + sizeof(derived_key_resp->data))) 161 + rc = -EFAULT; 162 + } 164 163 165 164 /* The response buffer contains the sensitive data, explicitly clear it. */ 166 - memzero_explicit(buf, sizeof(buf)); 167 - memzero_explicit(&derived_key_resp, sizeof(derived_key_resp)); 165 + memzero_explicit(derived_key_resp, sizeof(*derived_key_resp)); 166 + 168 167 return rc; 169 168 } 170 169
+1 -1
fs/anon_inodes.c
··· 129 129 } 130 130 return inode; 131 131 } 132 - EXPORT_SYMBOL_GPL_FOR_MODULES(anon_inode_make_secure_inode, "kvm"); 132 + EXPORT_SYMBOL_FOR_MODULES(anon_inode_make_secure_inode, "kvm"); 133 133 134 134 static struct file *__anon_inode_getfile(const char *name, 135 135 const struct file_operations *fops,
+19 -5
fs/btrfs/extent_io.c
··· 1512 1512 1513 1513 /* 1514 1514 * Return 0 if we have submitted or queued the sector for submission. 1515 - * Return <0 for critical errors. 1515 + * Return <0 for critical errors, and the sector will have its dirty flag cleared. 1516 1516 * 1517 1517 * Caller should make sure filepos < i_size and handle filepos >= i_size case. 1518 1518 */ ··· 1535 1535 ASSERT(filepos < i_size); 1536 1536 1537 1537 em = btrfs_get_extent(inode, NULL, filepos, sectorsize); 1538 - if (IS_ERR(em)) 1538 + if (IS_ERR(em)) { 1539 + /* 1540 + * When submission failed, we should still clear the folio dirty. 1541 + * Or the folio will be written back again but without any 1542 + * ordered extent. 1543 + */ 1544 + btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize); 1545 + btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize); 1546 + btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize); 1539 1547 return PTR_ERR(em); 1548 + } 1540 1549 1541 1550 extent_offset = filepos - em->start; 1542 1551 em_end = btrfs_extent_map_end(em); ··· 1618 1609 folio_unlock(folio); 1619 1610 return 1; 1620 1611 } 1621 - if (ret < 0) 1612 + if (ret < 0) { 1613 + btrfs_folio_clear_dirty(fs_info, folio, start, len); 1614 + btrfs_folio_set_writeback(fs_info, folio, start, len); 1615 + btrfs_folio_clear_writeback(fs_info, folio, start, len); 1622 1616 return ret; 1617 + } 1623 1618 1624 1619 for (cur = start; cur < start + len; cur += fs_info->sectorsize) 1625 1620 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap); ··· 1679 1666 * Here we set writeback and clear for the range. If the full folio 1680 1667 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag. 1681 1668 * 1682 - * If we hit any error, the corresponding sector will still be dirty 1683 - * thus no need to clear PAGECACHE_TAG_DIRTY. 1669 + * If we hit any error, the corresponding sector will have its dirty 1670 + * flag cleared and writeback finished, thus no need to handle the error case. 1684 1671 */ 1685 1672 if (!submitted_io && !error) { 1686 1673 btrfs_folio_set_writeback(fs_info, folio, start, len); ··· 1826 1813 xas_load(&xas); 1827 1814 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); 1828 1815 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); 1816 + xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); 1829 1817 xas_unlock_irqrestore(&xas, flags); 1830 1818 1831 1819 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+19 -10
fs/btrfs/inode.c
··· 4189 4189 return ret; 4190 4190 } 4191 4191 4192 + static void update_time_after_link_or_unlink(struct btrfs_inode *dir) 4193 + { 4194 + struct timespec64 now; 4195 + 4196 + /* 4197 + * If we are replaying a log tree, we do not want to update the mtime 4198 + * and ctime of the parent directory with the current time, since the 4199 + * log replay procedure is responsible for setting them to their correct 4200 + * values (the ones it had when the fsync was done). 4201 + */ 4202 + if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags)) 4203 + return; 4204 + 4205 + now = inode_set_ctime_current(&dir->vfs_inode); 4206 + inode_set_mtime_to_ts(&dir->vfs_inode, now); 4207 + } 4208 + 4192 4209 /* 4193 4210 * unlink helper that gets used here in inode.c and in the tree logging 4194 4211 * recovery code. It remove a link in a directory with a given name, and ··· 4306 4289 inode_inc_iversion(&inode->vfs_inode); 4307 4290 inode_set_ctime_current(&inode->vfs_inode); 4308 4291 inode_inc_iversion(&dir->vfs_inode); 4309 - inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4292 + update_time_after_link_or_unlink(dir); 4310 4293 4311 4294 return btrfs_update_inode(trans, dir); 4312 4295 } ··· 6700 6683 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6701 6684 name->len * 2); 6702 6685 inode_inc_iversion(&parent_inode->vfs_inode); 6703 - /* 6704 - * If we are replaying a log tree, we do not want to update the mtime 6705 - * and ctime of the parent directory with the current time, since the 6706 - * log replay procedure is responsible for setting them to their correct 6707 - * values (the ones it had when the fsync was done). 6708 - */ 6709 - if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) 6710 - inode_set_mtime_to_ts(&parent_inode->vfs_inode, 6711 - inode_set_ctime_current(&parent_inode->vfs_inode)); 6686 + update_time_after_link_or_unlink(parent_inode); 6712 6687 6713 6688 ret = btrfs_update_inode(trans, parent_inode); 6714 6689 if (ret)
+18 -1
fs/btrfs/subpage.c
··· 448 448 449 449 spin_lock_irqsave(&bfs->lock, flags); 450 450 bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 451 + 452 + /* 453 + * Don't clear the TOWRITE tag when starting writeback on a still-dirty 454 + * folio. Doing so can cause WB_SYNC_ALL writepages() to overlook it, 455 + * assume writeback is complete, and exit too early — violating sync 456 + * ordering guarantees. 457 + */ 451 458 if (!folio_test_writeback(folio)) 452 - folio_start_writeback(folio); 459 + __folio_start_writeback(folio, true); 460 + if (!folio_test_dirty(folio)) { 461 + struct address_space *mapping = folio_mapping(folio); 462 + XA_STATE(xas, &mapping->i_pages, folio->index); 463 + unsigned long flags; 464 + 465 + xas_lock_irqsave(&xas, flags); 466 + xas_load(&xas); 467 + xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); 468 + xas_unlock_irqrestore(&xas, flags); 469 + } 453 470 spin_unlock_irqrestore(&bfs->lock, flags); 454 471 } 455 472
+8 -5
fs/btrfs/super.c
··· 88 88 refcount_t refs; 89 89 }; 90 90 91 + static void btrfs_emit_options(struct btrfs_fs_info *info, 92 + struct btrfs_fs_context *old); 93 + 91 94 enum { 92 95 Opt_acl, 93 96 Opt_clear_cache, ··· 701 698 702 699 if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) { 703 700 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) { 704 - btrfs_info(info, "disk space caching is enabled"); 705 701 btrfs_warn(info, 706 702 "space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2"); 707 703 } 708 - if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) 709 - btrfs_info(info, "using free-space-tree"); 710 704 } 711 705 712 706 return ret; ··· 979 979 btrfs_err(fs_info, "open_ctree failed: %d", ret); 980 980 return ret; 981 981 } 982 + 983 + btrfs_emit_options(fs_info, NULL); 982 984 983 985 inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root); 984 986 if (IS_ERR(inode)) { ··· 1439 1437 { 1440 1438 btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum"); 1441 1439 btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts"); 1442 - btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum"); 1440 + btrfs_info_if_set(info, old, NODATACOW, "setting nodatacow"); 1443 1441 btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations"); 1444 1442 btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme"); 1445 1443 btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers"); ··· 1461 1459 btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums"); 1462 1460 btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags"); 1463 1461 1462 + btrfs_info_if_unset(info, old, NODATASUM, "setting datasum"); 1464 1463 btrfs_info_if_unset(info, old, NODATACOW, "setting datacow"); 1465 1464 btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations"); 1466 1465 btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme"); 1467 - btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers"); 1466 + btrfs_info_if_unset(info, old, NOBARRIER, "turning on barriers"); 1468 1467 btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log"); 1469 1468 btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching"); 1470 1469 btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree");
+99 -34
fs/btrfs/zoned.c
··· 17 17 #include "accessors.h" 18 18 #include "bio.h" 19 19 #include "transaction.h" 20 + #include "sysfs.h" 20 21 21 22 /* Maximum number of zones to report per blkdev_report_zones() call */ 22 23 #define BTRFS_REPORT_NR_ZONES 4096 ··· 42 41 43 42 /* Number of superblock log zones */ 44 43 #define BTRFS_NR_SB_LOG_ZONES 2 44 + 45 + /* Default number of max active zones when the device has no limits. */ 46 + #define BTRFS_DEFAULT_MAX_ACTIVE_ZONES 128 45 47 46 48 /* 47 49 * Minimum of active zones we need: ··· 420 416 if (!IS_ALIGNED(nr_sectors, zone_sectors)) 421 417 zone_info->nr_zones++; 422 418 423 - max_active_zones = bdev_max_active_zones(bdev); 419 + max_active_zones = min_not_zero(bdev_max_active_zones(bdev), 420 + bdev_max_open_zones(bdev)); 421 + if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES) 422 + max_active_zones = BTRFS_DEFAULT_MAX_ACTIVE_ZONES; 424 423 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) { 425 424 btrfs_err(fs_info, 426 425 "zoned: %s: max active zones %u is too small, need at least %u active zones", ··· 2175 2168 goto out_unlock; 2176 2169 } 2177 2170 2178 - /* No space left */ 2179 - if (btrfs_zoned_bg_is_full(block_group)) { 2180 - ret = false; 2181 - goto out_unlock; 2171 + if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) { 2172 + /* The caller should check if the block group is full. */ 2173 + if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) { 2174 + ret = false; 2175 + goto out_unlock; 2176 + } 2177 + } else { 2178 + /* Since it is already written, it should have been active. */ 2179 + WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start); 2182 2180 } 2183 2181 2184 2182 for (i = 0; i < map->num_stripes; i++) { ··· 2242 2230 struct btrfs_fs_info *fs_info = block_group->fs_info; 2243 2231 const u64 end = block_group->start + block_group->length; 2244 2232 struct extent_buffer *eb; 2245 - unsigned long index, start = (block_group->start >> fs_info->sectorsize_bits); 2233 + unsigned long index, start = (block_group->start >> fs_info->nodesize_bits); 2246 2234 2247 2235 rcu_read_lock(); 2248 2236 xa_for_each_start(&fs_info->buffer_tree, index, eb, start) { ··· 2255 2243 rcu_read_lock(); 2256 2244 } 2257 2245 rcu_read_unlock(); 2246 + } 2247 + 2248 + static int call_zone_finish(struct btrfs_block_group *block_group, 2249 + struct btrfs_io_stripe *stripe) 2250 + { 2251 + struct btrfs_device *device = stripe->dev; 2252 + const u64 physical = stripe->physical; 2253 + struct btrfs_zoned_device_info *zinfo = device->zone_info; 2254 + int ret; 2255 + 2256 + if (!device->bdev) 2257 + return 0; 2258 + 2259 + if (zinfo->max_active_zones == 0) 2260 + return 0; 2261 + 2262 + if (btrfs_dev_is_sequential(device, physical)) { 2263 + unsigned int nofs_flags; 2264 + 2265 + nofs_flags = memalloc_nofs_save(); 2266 + ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH, 2267 + physical >> SECTOR_SHIFT, 2268 + zinfo->zone_size >> SECTOR_SHIFT); 2269 + memalloc_nofs_restore(nofs_flags); 2270 + 2271 + if (ret) 2272 + return ret; 2273 + } 2274 + 2275 + if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 2276 + zinfo->reserved_active_zones++; 2277 + btrfs_dev_clear_active_zone(device, physical); 2278 + 2279 + return 0; 2258 2280 } 2259 2281 2260 2282 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written) ··· 2375 2329 down_read(&dev_replace->rwsem); 2376 2330 map = block_group->physical_map; 2377 2331 for (i = 0; i < map->num_stripes; i++) { 2378 - struct btrfs_device *device = map->stripes[i].dev; 2379 - const u64 physical = map->stripes[i].physical; 2380 - struct btrfs_zoned_device_info *zinfo = device->zone_info; 2381 - unsigned int nofs_flags; 2382 2332 2383 - if (!device->bdev) 2384 - continue; 2385 - 2386 - if (zinfo->max_active_zones == 0) 2387 - continue; 2388 - 2389 - nofs_flags = memalloc_nofs_save(); 2390 - ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH, 2391 - physical >> SECTOR_SHIFT, 2392 - zinfo->zone_size >> SECTOR_SHIFT); 2393 - memalloc_nofs_restore(nofs_flags); 2394 - 2333 + ret = call_zone_finish(block_group, &map->stripes[i]); 2395 2334 if (ret) { 2396 2335 up_read(&dev_replace->rwsem); 2397 2336 return ret; 2398 2337 } 2399 - 2400 - if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 2401 - zinfo->reserved_active_zones++; 2402 - btrfs_dev_clear_active_zone(device, physical); 2403 2338 } 2404 2339 up_read(&dev_replace->rwsem); 2405 2340 ··· 2531 2504 void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info) 2532 2505 { 2533 2506 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 2534 - struct btrfs_space_info *space_info = data_sinfo->sub_group[0]; 2507 + struct btrfs_space_info *space_info = data_sinfo; 2535 2508 struct btrfs_trans_handle *trans; 2536 2509 struct btrfs_block_group *bg; 2537 2510 struct list_head *bg_list; 2538 2511 u64 alloc_flags; 2539 - bool initial = false; 2512 + bool first = true; 2540 2513 bool did_chunk_alloc = false; 2541 2514 int index; 2542 2515 int ret; ··· 2550 2523 if (sb_rdonly(fs_info->sb)) 2551 2524 return; 2552 2525 2553 - ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC); 2554 2526 alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags); 2555 2527 index = btrfs_bg_flags_to_raid_index(alloc_flags); 2556 2528 2557 - bg_list = &data_sinfo->block_groups[index]; 2529 + /* Scan the data space_info to find empty block groups. Take the second one. */ 2558 2530 again: 2531 + bg_list = &space_info->block_groups[index]; 2559 2532 list_for_each_entry(bg, bg_list, list) { 2560 - if (bg->used > 0) 2533 + if (bg->alloc_offset != 0) 2561 2534 continue; 2562 2535 2563 - if (!initial) { 2564 - initial = true; 2536 + if (first) { 2537 + first = false; 2565 2538 continue; 2539 + } 2540 + 2541 + if (space_info == data_sinfo) { 2542 + /* Migrate the block group to the data relocation space_info. */ 2543 + struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0]; 2544 + int factor; 2545 + 2546 + ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC); 2547 + factor = btrfs_bg_type_to_factor(bg->flags); 2548 + 2549 + down_write(&space_info->groups_sem); 2550 + list_del_init(&bg->list); 2551 + /* We can assume this as we choose the second empty one. */ 2552 + ASSERT(!list_empty(&space_info->block_groups[index])); 2553 + up_write(&space_info->groups_sem); 2554 + 2555 + spin_lock(&space_info->lock); 2556 + space_info->total_bytes -= bg->length; 2557 + space_info->disk_total -= bg->length * factor; 2558 + /* There is no allocation ever happened. */ 2559 + ASSERT(bg->used == 0); 2560 + ASSERT(bg->zone_unusable == 0); 2561 + /* No super block in a block group on the zoned setup. */ 2562 + ASSERT(bg->bytes_super == 0); 2563 + spin_unlock(&space_info->lock); 2564 + 2565 + bg->space_info = reloc_sinfo; 2566 + if (reloc_sinfo->block_group_kobjs[index] == NULL) 2567 + btrfs_sysfs_add_block_group_type(bg); 2568 + 2569 + btrfs_add_bg_to_space_info(fs_info, bg); 2566 2570 } 2567 2571 2568 2572 fs_info->data_reloc_bg = bg->start; ··· 2610 2552 if (IS_ERR(trans)) 2611 2553 return; 2612 2554 2555 + /* Allocate new BG in the data relocation space_info. */ 2556 + space_info = data_sinfo->sub_group[0]; 2557 + ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC); 2613 2558 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); 2614 2559 btrfs_end_transaction(trans); 2615 2560 if (ret == 1) { 2561 + /* 2562 + * We allocated a new block group in the data relocation space_info. We 2563 + * can take that one. 2564 + */ 2565 + first = false; 2616 2566 did_chunk_alloc = true; 2617 - bg_list = &space_info->block_groups[index]; 2618 2567 goto again; 2619 2568 } 2620 2569 }
+1 -1
fs/buffer.c
··· 157 157 */ 158 158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 159 159 { 160 - __end_buffer_read_notouch(bh, uptodate); 161 160 put_bh(bh); 161 + __end_buffer_read_notouch(bh, uptodate); 162 162 } 163 163 EXPORT_SYMBOL(end_buffer_read_sync); 164 164
+1 -1
fs/coredump.c
··· 345 345 was_space = false; 346 346 err = cn_printf(cn, "%c", '\0'); 347 347 if (err) 348 - return err; 348 + return false; 349 349 (*argv)[(*argc)++] = cn->used; 350 350 } 351 351 }
+3
fs/dax.c
··· 1743 1743 loff_t done = 0; 1744 1744 int ret; 1745 1745 1746 + if (WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC)) 1747 + return -EIO; 1748 + 1746 1749 if (!iomi.len) 1747 1750 return 0; 1748 1751
+20 -3
fs/ext4/fsmap.c
··· 393 393 /* Reserved GDT blocks */ 394 394 if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) { 395 395 len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 396 + 397 + /* 398 + * mkfs.ext4 can set s_reserved_gdt_blocks as 0 in some cases, 399 + * check for that. 400 + */ 401 + if (!len) 402 + return 0; 403 + 396 404 error = ext4_getfsmap_fill(meta_list, fsb, len, 397 405 EXT4_FMR_OWN_RESV_GDT); 398 406 if (error) ··· 534 526 ext4_group_t end_ag; 535 527 ext4_grpblk_t first_cluster; 536 528 ext4_grpblk_t last_cluster; 529 + struct ext4_fsmap irec; 537 530 int error = 0; 538 531 539 532 bofs = le32_to_cpu(sbi->s_es->s_first_data_block); ··· 618 609 goto err; 619 610 } 620 611 621 - /* Report any gaps at the end of the bg */ 612 + /* 613 + * The dummy record below will cause ext4_getfsmap_helper() to report 614 + * any allocated blocks at the end of the range. 615 + */ 616 + irec.fmr_device = 0; 617 + irec.fmr_physical = end_fsb + 1; 618 + irec.fmr_length = 0; 619 + irec.fmr_owner = EXT4_FMR_OWN_FREE; 620 + irec.fmr_flags = 0; 621 + 622 622 info->gfi_last = true; 623 - error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1, 624 - 0, info); 623 + error = ext4_getfsmap_helper(sb, info, &irec); 625 624 if (error) 626 625 goto err; 627 626
+2 -2
fs/ext4/indirect.c
··· 539 539 int indirect_blks; 540 540 int blocks_to_boundary = 0; 541 541 int depth; 542 - int count = 0; 542 + u64 count = 0; 543 543 ext4_fsblk_t first_block = 0; 544 544 545 545 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); ··· 588 588 count++; 589 589 /* Fill in size of a hole we found */ 590 590 map->m_pblk = 0; 591 - map->m_len = min_t(unsigned int, map->m_len, count); 591 + map->m_len = umin(map->m_len, count); 592 592 goto cleanup; 593 593 } 594 594
+2 -2
fs/ext4/inode.c
··· 146 146 */ 147 147 int ext4_inode_is_fast_symlink(struct inode *inode) 148 148 { 149 - if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { 149 + if (!ext4_has_feature_ea_inode(inode->i_sb)) { 150 150 int ea_blocks = EXT4_I(inode)->i_file_acl ? 151 151 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 152 152 ··· 3155 3155 folio_unlock(folio); 3156 3156 folio_put(folio); 3157 3157 /* 3158 - * block_write_begin may have instantiated a few blocks 3158 + * ext4_block_write_begin may have instantiated a few blocks 3159 3159 * outside i_size. Trim these off again. Don't need 3160 3160 * i_size_read because we hold inode lock. 3161 3161 */
-4
fs/ext4/namei.c
··· 2965 2965 struct inode *inode) 2966 2966 { 2967 2967 struct buffer_head *dir_block = NULL; 2968 - struct ext4_dir_entry_2 *de; 2969 2968 ext4_lblk_t block = 0; 2970 2969 int err; 2971 2970 ··· 2981 2982 dir_block = ext4_append(handle, inode, &block); 2982 2983 if (IS_ERR(dir_block)) 2983 2984 return PTR_ERR(dir_block); 2984 - de = (struct ext4_dir_entry_2 *)dir_block->b_data; 2985 2985 err = ext4_init_dirblock(handle, inode, dir_block, dir->i_ino, NULL, 0); 2986 - if (err) 2987 - goto out; 2988 2986 out: 2989 2987 brelse(dir_block); 2990 2988 return err;
+3 -2
fs/ext4/orphan.c
··· 589 589 } 590 590 oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; 591 591 oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; 592 - oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block), 593 - GFP_KERNEL); 592 + oi->of_binfo = kmalloc_array(oi->of_blocks, 593 + sizeof(struct ext4_orphan_block), 594 + GFP_KERNEL); 594 595 if (!oi->of_binfo) { 595 596 ret = -ENOMEM; 596 597 goto out_put;
+1 -1
fs/ext4/page-io.c
··· 547 547 * first page of the bio. Otherwise it can deadlock. 548 548 */ 549 549 if (io->io_bio) 550 - gfp_flags = GFP_NOWAIT | __GFP_NOWARN; 550 + gfp_flags = GFP_NOWAIT; 551 551 retry_encrypt: 552 552 bounce_page = fscrypt_encrypt_pagecache_blocks(folio, 553 553 enc_bytes, 0, gfp_flags);
+8 -4
fs/ext4/super.c
··· 268 268 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) 269 269 { 270 270 struct buffer_head *bh = bdev_getblk(sb->s_bdev, block, 271 - sb->s_blocksize, GFP_NOWAIT | __GFP_NOWARN); 271 + sb->s_blocksize, GFP_NOWAIT); 272 272 273 273 if (likely(bh)) { 274 274 if (trylock_buffer(bh)) ··· 1998 1998 fc->fs_private = ctx; 1999 1999 fc->ops = &ext4_context_ops; 2000 2000 2001 + /* i_version is always enabled now */ 2002 + fc->sb_flags |= SB_I_VERSION; 2003 + 2001 2004 return 0; 2002 2005 } 2003 2006 ··· 2978 2975 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 2979 2976 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 2980 2977 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 2978 + if (nodefs && sb->s_flags & SB_I_VERSION) 2979 + SEQ_OPTS_PUTS("i_version"); 2981 2980 if (nodefs || sbi->s_stripe) 2982 2981 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 2983 2982 if (nodefs || EXT4_MOUNT_DATA_FLAGS & ··· 5319 5314 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 5320 5315 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 5321 5316 5322 - /* i_version is always enabled now */ 5323 - sb->s_flags |= SB_I_VERSION; 5324 - 5325 5317 /* HSM events are allowed by default. */ 5326 5318 sb->s_iflags |= SB_I_ALLOW_HSM; 5327 5319 ··· 5416 5414 err = ext4_load_and_init_journal(sb, es, ctx); 5417 5415 if (err) 5418 5416 goto failed_mount3a; 5417 + if (bdev_read_only(sb->s_bdev)) 5418 + needs_recovery = 0; 5419 5419 } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && 5420 5420 ext4_has_feature_journal_needs_recovery(sb)) { 5421 5421 ext4_msg(sb, KERN_ERR, "required journal recovery "
+1 -1
fs/fhandle.c
··· 402 402 if (retval) 403 403 return retval; 404 404 405 - CLASS(get_unused_fd, fd)(O_CLOEXEC); 405 + CLASS(get_unused_fd, fd)(open_flag); 406 406 if (fd < 0) 407 407 return fd; 408 408
+5 -4
fs/fs-writeback.c
··· 2608 2608 wakeup_bdi = inode_io_list_move_locked(inode, wb, 2609 2609 dirty_list); 2610 2610 2611 - spin_unlock(&wb->list_lock); 2612 - spin_unlock(&inode->i_lock); 2613 - trace_writeback_dirty_inode_enqueue(inode); 2614 - 2615 2611 /* 2616 2612 * If this is the first dirty inode for this bdi, 2617 2613 * we have to wake-up the corresponding bdi thread ··· 2617 2621 if (wakeup_bdi && 2618 2622 (wb->bdi->capabilities & BDI_CAP_WRITEBACK)) 2619 2623 wb_wakeup_delayed(wb); 2624 + 2625 + spin_unlock(&wb->list_lock); 2626 + spin_unlock(&inode->i_lock); 2627 + trace_writeback_dirty_inode_enqueue(inode); 2628 + 2620 2629 return; 2621 2630 } 2622 2631 }
-5
fs/fuse/inode.c
··· 289 289 } 290 290 } 291 291 292 - if (attr->blksize != 0) 293 - inode->i_blkbits = ilog2(attr->blksize); 294 - else 295 - inode->i_blkbits = inode->i_sb->s_blocksize_bits; 296 - 297 292 /* 298 293 * Don't set the sticky bit in i_mode, unless we want the VFS 299 294 * to check permissions. This prevents failures due to the
+7 -7
fs/iomap/direct-io.c
··· 363 363 if (iomap->flags & IOMAP_F_SHARED) 364 364 dio->flags |= IOMAP_DIO_COW; 365 365 366 - if (iomap->flags & IOMAP_F_NEW) { 366 + if (iomap->flags & IOMAP_F_NEW) 367 367 need_zeroout = true; 368 - } else if (iomap->type == IOMAP_MAPPED) { 369 - if (iomap_dio_can_use_fua(iomap, dio)) 370 - bio_opf |= REQ_FUA; 371 - else 372 - dio->flags &= ~IOMAP_DIO_WRITE_THROUGH; 373 - } 368 + else if (iomap->type == IOMAP_MAPPED && 369 + iomap_dio_can_use_fua(iomap, dio)) 370 + bio_opf |= REQ_FUA; 371 + 372 + if (!(bio_opf & REQ_FUA)) 373 + dio->flags &= ~IOMAP_DIO_WRITE_THROUGH; 374 374 375 375 /* 376 376 * We can only do deferred completion for pure overwrites that
+1
fs/jbd2/checkpoint.c
··· 285 285 retry: 286 286 if (batch_count) 287 287 __flush_batch(journal, &batch_count); 288 + cond_resched(); 288 289 spin_lock(&journal->j_list_lock); 289 290 goto restart; 290 291 }
+2 -2
fs/kernfs/inode.c
··· 142 142 struct kernfs_node *kn = kernfs_dentry_node(dentry); 143 143 struct kernfs_iattrs *attrs; 144 144 145 - attrs = kernfs_iattrs_noalloc(kn); 145 + attrs = kernfs_iattrs(kn); 146 146 if (!attrs) 147 - return -ENODATA; 147 + return -ENOMEM; 148 148 149 149 return simple_xattr_list(d_inode(dentry), &attrs->xattrs, buf, size); 150 150 }
+44 -32
fs/namespace.c
··· 1197 1197 1198 1198 if (!mnt_ns_attached(mnt)) { 1199 1199 for (struct mount *m = mnt; m; m = next_mnt(m, mnt)) 1200 - if (unlikely(mnt_ns_attached(m))) 1201 - m = skip_mnt_tree(m); 1202 - else 1203 - mnt_add_to_ns(n, m); 1200 + mnt_add_to_ns(n, m); 1204 1201 n->nr_mounts += n->pending_mounts; 1205 1202 n->pending_mounts = 0; 1206 1203 } ··· 2701 2704 lock_mnt_tree(child); 2702 2705 q = __lookup_mnt(&child->mnt_parent->mnt, 2703 2706 child->mnt_mountpoint); 2707 + commit_tree(child); 2704 2708 if (q) { 2705 2709 struct mountpoint *mp = root.mp; 2706 2710 struct mount *r = child; ··· 2711 2713 mp = shorter; 2712 2714 mnt_change_mountpoint(r, mp, q); 2713 2715 } 2714 - commit_tree(child); 2715 2716 } 2716 2717 unpin_mountpoint(&root); 2717 2718 unlock_mount_hash(); ··· 2859 2862 return attach_recursive_mnt(mnt, p, mp); 2860 2863 } 2861 2864 2865 + static int may_change_propagation(const struct mount *m) 2866 + { 2867 + struct mnt_namespace *ns = m->mnt_ns; 2868 + 2869 + // it must be mounted in some namespace 2870 + if (IS_ERR_OR_NULL(ns)) // is_mounted() 2871 + return -EINVAL; 2872 + // and the caller must be admin in userns of that namespace 2873 + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) 2874 + return -EPERM; 2875 + return 0; 2876 + } 2877 + 2862 2878 /* 2863 2879 * Sanity check the flags to change_mnt_propagation. 2864 2880 */ ··· 2908 2898 return -EINVAL; 2909 2899 2910 2900 namespace_lock(); 2911 - if (!check_mnt(mnt)) { 2912 - err = -EINVAL; 2901 + err = may_change_propagation(mnt); 2902 + if (err) 2913 2903 goto out_unlock; 2914 - } 2904 + 2915 2905 if (type == MS_SHARED) { 2916 2906 err = invent_group_ids(mnt, recurse); 2917 2907 if (err) ··· 3357 3347 3358 3348 namespace_lock(); 3359 3349 3360 - err = -EINVAL; 3361 - /* To and From must be mounted */ 3362 - if (!is_mounted(&from->mnt)) 3350 + err = may_change_propagation(from); 3351 + if (err) 3363 3352 goto out; 3364 - if (!is_mounted(&to->mnt)) 3365 - goto out; 3366 - 3367 - err = -EPERM; 3368 - /* We should be allowed to modify mount namespaces of both mounts */ 3369 - if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN)) 3370 - goto out; 3371 - if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN)) 3353 + err = may_change_propagation(to); 3354 + if (err) 3372 3355 goto out; 3373 3356 3374 3357 err = -EINVAL; ··· 4554 4551 if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION; 4555 4552 if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH; 4556 4553 4557 - lflags = 0; 4558 - if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW; 4559 - if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 4560 4554 uflags = 0; 4561 - if (flags & MOVE_MOUNT_F_EMPTY_PATH) uflags = AT_EMPTY_PATH; 4562 - from_name = getname_maybe_null(from_pathname, uflags); 4563 - if (IS_ERR(from_name)) 4564 - return PTR_ERR(from_name); 4555 + if (flags & MOVE_MOUNT_T_EMPTY_PATH) 4556 + uflags = AT_EMPTY_PATH; 4565 4557 4566 - lflags = 0; 4567 - if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW; 4568 - if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 4569 - uflags = 0; 4570 - if (flags & MOVE_MOUNT_T_EMPTY_PATH) uflags = AT_EMPTY_PATH; 4571 4558 to_name = getname_maybe_null(to_pathname, uflags); 4572 4559 if (IS_ERR(to_name)) 4573 4560 return PTR_ERR(to_name); ··· 4570 4577 to_path = fd_file(f_to)->f_path; 4571 4578 path_get(&to_path); 4572 4579 } else { 4580 + lflags = 0; 4581 + if (flags & MOVE_MOUNT_T_SYMLINKS) 4582 + lflags |= LOOKUP_FOLLOW; 4583 + if (flags & MOVE_MOUNT_T_AUTOMOUNTS) 4584 + lflags |= LOOKUP_AUTOMOUNT; 4573 4585 ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL); 4574 4586 if (ret) 4575 4587 return ret; 4576 4588 } 4589 + 4590 + uflags = 0; 4591 + if (flags & MOVE_MOUNT_F_EMPTY_PATH) 4592 + uflags = AT_EMPTY_PATH; 4593 + 4594 + from_name = getname_maybe_null(from_pathname, uflags); 4595 + if (IS_ERR(from_name)) 4596 + return PTR_ERR(from_name); 4577 4597 4578 4598 if (!from_name && from_dfd >= 0) { 4579 4599 CLASS(fd_raw, f_from)(from_dfd); ··· 4596 4590 return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags); 4597 4591 } 4598 4592 4593 + lflags = 0; 4594 + if (flags & MOVE_MOUNT_F_SYMLINKS) 4595 + lflags |= LOOKUP_FOLLOW; 4596 + if (flags & MOVE_MOUNT_F_AUTOMOUNTS) 4597 + lflags |= LOOKUP_AUTOMOUNT; 4599 4598 ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL); 4600 4599 if (ret) 4601 4600 return ret; ··· 5187 5176 int ret; 5188 5177 struct mount_kattr kattr = {}; 5189 5178 5190 - kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE; 5179 + if (flags & OPEN_TREE_CLONE) 5180 + kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE; 5191 5181 if (flags & AT_RECURSIVE) 5192 5182 kattr.kflags |= MOUNT_KATTR_RECURSE; 5193 5183
+3 -1
fs/netfs/read_collect.c
··· 281 281 } else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) { 282 282 notes |= MADE_PROGRESS; 283 283 } else { 284 - if (!stream->failed) 284 + if (!stream->failed) { 285 285 stream->transferred += transferred; 286 + stream->transferred_valid = true; 287 + } 286 288 if (front->transferred < front->len) 287 289 set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags); 288 290 notes |= MADE_PROGRESS;
+8 -2
fs/netfs/write_collect.c
··· 254 254 if (front->start + front->transferred > stream->collected_to) { 255 255 stream->collected_to = front->start + front->transferred; 256 256 stream->transferred = stream->collected_to - wreq->start; 257 + stream->transferred_valid = true; 257 258 notes |= MADE_PROGRESS; 258 259 } 259 260 if (test_bit(NETFS_SREQ_FAILED, &front->flags)) { ··· 357 356 { 358 357 struct netfs_inode *ictx = netfs_inode(wreq->inode); 359 358 size_t transferred; 359 + bool transferred_valid = false; 360 360 int s; 361 361 362 362 _enter("R=%x", wreq->debug_id); ··· 378 376 continue; 379 377 if (!list_empty(&stream->subrequests)) 380 378 return false; 381 - if (stream->transferred < transferred) 379 + if (stream->transferred_valid && 380 + stream->transferred < transferred) { 382 381 transferred = stream->transferred; 382 + transferred_valid = true; 383 + } 383 384 } 384 385 385 386 /* Okay, declare that all I/O is complete. */ 386 - wreq->transferred = transferred; 387 + if (transferred_valid) 388 + wreq->transferred = transferred; 387 389 trace_netfs_rreq(wreq, netfs_rreq_trace_write_done); 388 390 389 391 if (wreq->io_streams[1].active &&
+2 -2
fs/netfs/write_issue.c
··· 118 118 wreq->io_streams[0].prepare_write = ictx->ops->prepare_write; 119 119 wreq->io_streams[0].issue_write = ictx->ops->issue_write; 120 120 wreq->io_streams[0].collected_to = start; 121 - wreq->io_streams[0].transferred = LONG_MAX; 121 + wreq->io_streams[0].transferred = 0; 122 122 123 123 wreq->io_streams[1].stream_nr = 1; 124 124 wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE; 125 125 wreq->io_streams[1].collected_to = start; 126 - wreq->io_streams[1].transferred = LONG_MAX; 126 + wreq->io_streams[1].transferred = 0; 127 127 if (fscache_resources_valid(&wreq->cache_resources)) { 128 128 wreq->io_streams[1].avail = true; 129 129 wreq->io_streams[1].active = true;
+1 -1
fs/overlayfs/dir.c
··· 225 225 struct ovl_cattr *attr) 226 226 { 227 227 struct dentry *ret; 228 - inode_lock(workdir->d_inode); 228 + inode_lock_nested(workdir->d_inode, I_MUTEX_PARENT); 229 229 ret = ovl_create_real(ofs, workdir, 230 230 ovl_lookup_temp(ofs, workdir), attr); 231 231 inode_unlock(workdir->d_inode);
+2 -1
fs/overlayfs/util.c
··· 1552 1552 int ovl_parent_lock(struct dentry *parent, struct dentry *child) 1553 1553 { 1554 1554 inode_lock_nested(parent->d_inode, I_MUTEX_PARENT); 1555 - if (!child || child->d_parent == parent) 1555 + if (!child || 1556 + (!d_unhashed(child) && child->d_parent == parent)) 1556 1557 return 0; 1557 1558 1558 1559 inode_unlock(parent->d_inode);
+1 -1
fs/pidfs.c
··· 296 296 static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) 297 297 { 298 298 struct pidfd_info __user *uinfo = (struct pidfd_info __user *)arg; 299 + struct task_struct *task __free(put_task) = NULL; 299 300 struct pid *pid = pidfd_pid(file); 300 301 size_t usize = _IOC_SIZE(cmd); 301 302 struct pidfd_info kinfo = {}; 302 303 struct pidfs_exit_info *exit_info; 303 304 struct user_namespace *user_ns; 304 - struct task_struct *task; 305 305 struct pidfs_attr *attr; 306 306 const struct cred *c; 307 307 __u64 mask;
+6 -4
fs/pnode.c
··· 111 111 return; 112 112 } 113 113 if (IS_MNT_SHARED(mnt)) { 114 - m = propagation_source(mnt); 114 + if (type == MS_SLAVE || !hlist_empty(&mnt->mnt_slave_list)) 115 + m = propagation_source(mnt); 115 116 if (list_empty(&mnt->mnt_share)) { 116 117 mnt_release_group_id(mnt); 117 118 } else { ··· 638 637 } 639 638 640 639 // now to_umount consists of all acceptable candidates 641 - // deal with reparenting of remaining overmounts on those 640 + // deal with reparenting of surviving overmounts on those 642 641 list_for_each_entry(m, &to_umount, mnt_list) { 643 - if (m->overmount) 644 - reparent(m->overmount); 642 + struct mount *over = m->overmount; 643 + if (over && !will_be_unmounted(over)) 644 + reparent(over); 645 645 } 646 646 647 647 // and fold them into the set
+18 -29
fs/smb/client/cifs_spnego.c
··· 124 124 dp = description; 125 125 /* start with version and hostname portion of UNC string */ 126 126 spnego_key = ERR_PTR(-EINVAL); 127 - sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION, 128 - hostname); 129 - dp = description + strlen(description); 127 + dp += sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION, 128 + hostname); 130 129 131 130 /* add the server address */ 132 131 if (server->dstaddr.ss_family == AF_INET) 133 - sprintf(dp, "ip4=%pI4", &sa->sin_addr); 132 + dp += sprintf(dp, "ip4=%pI4", &sa->sin_addr); 134 133 else if (server->dstaddr.ss_family == AF_INET6) 135 - sprintf(dp, "ip6=%pI6", &sa6->sin6_addr); 134 + dp += sprintf(dp, "ip6=%pI6", &sa6->sin6_addr); 136 135 else 137 136 goto out; 138 137 139 - dp = description + strlen(description); 140 - 141 138 /* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */ 142 139 if (server->sec_kerberos) 143 - sprintf(dp, ";sec=krb5"); 140 + dp += sprintf(dp, ";sec=krb5"); 144 141 else if (server->sec_mskerberos) 145 - sprintf(dp, ";sec=mskrb5"); 142 + dp += sprintf(dp, ";sec=mskrb5"); 146 143 else if (server->sec_iakerb) 147 - sprintf(dp, ";sec=iakerb"); 144 + dp += sprintf(dp, ";sec=iakerb"); 148 145 else { 149 146 cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n"); 150 - sprintf(dp, ";sec=krb5"); 147 + dp += sprintf(dp, ";sec=krb5"); 151 148 } 152 149 153 - dp = description + strlen(description); 154 - sprintf(dp, ";uid=0x%x", 155 - from_kuid_munged(&init_user_ns, sesInfo->linux_uid)); 150 + dp += sprintf(dp, ";uid=0x%x", 151 + from_kuid_munged(&init_user_ns, sesInfo->linux_uid)); 156 152 157 - dp = description + strlen(description); 158 - sprintf(dp, ";creduid=0x%x", 153 + dp += sprintf(dp, ";creduid=0x%x", 159 154 from_kuid_munged(&init_user_ns, sesInfo->cred_uid)); 160 155 161 - if (sesInfo->user_name) { 162 - dp = description + strlen(description); 163 - sprintf(dp, ";user=%s", sesInfo->user_name); 164 - } 156 + if (sesInfo->user_name) 157 + dp += sprintf(dp, ";user=%s", sesInfo->user_name); 165 158 166 - dp = description + strlen(description); 167 - sprintf(dp, ";pid=0x%x", current->pid); 159 + dp += sprintf(dp, ";pid=0x%x", current->pid); 168 160 169 - if (sesInfo->upcall_target == UPTARGET_MOUNT) { 170 - dp = description + strlen(description); 171 - sprintf(dp, ";upcall_target=mount"); 172 - } else { 173 - dp = description + strlen(description); 174 - sprintf(dp, ";upcall_target=app"); 175 - } 161 + if (sesInfo->upcall_target == UPTARGET_MOUNT) 162 + dp += sprintf(dp, ";upcall_target=mount"); 163 + else 164 + dp += sprintf(dp, ";upcall_target=app"); 176 165 177 166 cifs_dbg(FYI, "key description = %s\n", description); 178 167 saved_cred = override_creds(spnego_cred);
+2 -2
fs/smb/client/cifsfs.h
··· 145 145 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 146 146 147 147 /* when changing internal version - update following two lines at same time */ 148 - #define SMB3_PRODUCT_BUILD 55 149 - #define CIFS_VERSION "2.55" 148 + #define SMB3_PRODUCT_BUILD 56 149 + #define CIFS_VERSION "2.56" 150 150 #endif /* _CIFSFS_H */
+21
fs/smb/client/cifsglob.h
··· 1732 1732 int mid_rc; /* rc for MID_RC */ 1733 1733 __le16 command; /* smb command code */ 1734 1734 unsigned int optype; /* operation type */ 1735 + spinlock_t mid_lock; 1735 1736 bool wait_cancelled:1; /* Cancelled while waiting for response */ 1736 1737 bool deleted_from_q:1; /* Whether Mid has been dequeued frem pending_mid_q */ 1737 1738 bool large_buf:1; /* if valid response, is pointer to large buf */ ··· 2037 2036 * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo 2038 2037 * ->invalidHandle initiate_cifs_search 2039 2038 * ->oplock_break_cancelled 2039 + * mid_q_entry->mid_lock mid_q_entry->callback alloc_mid 2040 + * smb2_mid_entry_alloc 2041 + * (Any fields of mid_q_entry that will need protection) 2040 2042 ****************************************************************************/ 2041 2043 2042 2044 #ifdef DECLARE_GLOBALS_HERE ··· 2377 2373 } 2378 2374 } 2379 2375 return ret; 2376 + } 2377 + 2378 + /* 2379 + * Execute mid callback atomically - ensures callback runs exactly once 2380 + * and prevents sleeping in atomic context. 2381 + */ 2382 + static inline void mid_execute_callback(struct mid_q_entry *mid) 2383 + { 2384 + void (*callback)(struct mid_q_entry *mid); 2385 + 2386 + spin_lock(&mid->mid_lock); 2387 + callback = mid->callback; 2388 + mid->callback = NULL; /* Mark as executed, */ 2389 + spin_unlock(&mid->mid_lock); 2390 + 2391 + if (callback) 2392 + callback(mid); 2380 2393 } 2381 2394 2382 2395 #define CIFS_REPARSE_SUPPORT(tcon) \
+9 -10
fs/smb/client/cifstransport.c
··· 46 46 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 47 47 memset(temp, 0, sizeof(struct mid_q_entry)); 48 48 kref_init(&temp->refcount); 49 + spin_lock_init(&temp->mid_lock); 49 50 temp->mid = get_mid(smb_buffer); 50 51 temp->pid = current->pid; 51 52 temp->command = cpu_to_le16(smb_buffer->Command); ··· 346 345 rc = wait_for_response(server, midQ); 347 346 if (rc != 0) { 348 347 send_cancel(server, &rqst, midQ); 349 - spin_lock(&server->mid_queue_lock); 350 - if (midQ->mid_state == MID_REQUEST_SUBMITTED || 351 - midQ->mid_state == MID_RESPONSE_RECEIVED) { 348 + spin_lock(&midQ->mid_lock); 349 + if (midQ->callback) { 352 350 /* no longer considered to be "in-flight" */ 353 351 midQ->callback = release_mid; 354 - spin_unlock(&server->mid_queue_lock); 352 + spin_unlock(&midQ->mid_lock); 355 353 add_credits(server, &credits, 0); 356 354 return rc; 357 355 } 358 - spin_unlock(&server->mid_queue_lock); 356 + spin_unlock(&midQ->mid_lock); 359 357 } 360 358 361 359 rc = cifs_sync_mid_result(midQ, server); ··· 527 527 rc = wait_for_response(server, midQ); 528 528 if (rc) { 529 529 send_cancel(server, &rqst, midQ); 530 - spin_lock(&server->mid_queue_lock); 531 - if (midQ->mid_state == MID_REQUEST_SUBMITTED || 532 - midQ->mid_state == MID_RESPONSE_RECEIVED) { 530 + spin_lock(&midQ->mid_lock); 531 + if (midQ->callback) { 533 532 /* no longer considered to be "in-flight" */ 534 533 midQ->callback = release_mid; 535 - spin_unlock(&server->mid_queue_lock); 534 + spin_unlock(&midQ->mid_lock); 536 535 return rc; 537 536 } 538 - spin_unlock(&server->mid_queue_lock); 537 + spin_unlock(&midQ->mid_lock); 539 538 } 540 539 541 540 /* We got the response - restart system call. */
+16 -45
fs/smb/client/compress.c
··· 155 155 } 156 156 157 157 /* 158 - * TODO: 159 - * Support other iter types, if required. 160 - * Only ITER_XARRAY is supported for now. 158 + * Collect some 2K samples with 2K gaps between. 161 159 */ 162 - static int collect_sample(const struct iov_iter *iter, ssize_t max, u8 *sample) 160 + static int collect_sample(const struct iov_iter *source, ssize_t max, u8 *sample) 163 161 { 164 - struct folio *folios[16], *folio; 165 - unsigned int nr, i, j, npages; 166 - loff_t start = iter->xarray_start + iter->iov_offset; 167 - pgoff_t last, index = start / PAGE_SIZE; 168 - size_t len, off, foff; 169 - void *p; 170 - int s = 0; 162 + struct iov_iter iter = *source; 163 + size_t s = 0; 171 164 172 - last = (start + max - 1) / PAGE_SIZE; 173 - do { 174 - nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios), 175 - XA_PRESENT); 176 - if (nr == 0) 177 - return -EIO; 165 + while (iov_iter_count(&iter) >= SZ_2K) { 166 + size_t part = umin(umin(iov_iter_count(&iter), SZ_2K), max); 167 + size_t n; 178 168 179 - for (i = 0; i < nr; i++) { 180 - folio = folios[i]; 181 - npages = folio_nr_pages(folio); 182 - foff = start - folio_pos(folio); 183 - off = foff % PAGE_SIZE; 169 + n = copy_from_iter(sample + s, part, &iter); 170 + if (n != part) 171 + return -EFAULT; 184 172 185 - for (j = foff / PAGE_SIZE; j < npages; j++) { 186 - size_t len2; 173 + s += n; 174 + max -= n; 187 175 188 - len = min_t(size_t, max, PAGE_SIZE - off); 189 - len2 = min_t(size_t, len, SZ_2K); 176 + if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K) 177 + break; 190 178 191 - p = kmap_local_page(folio_page(folio, j)); 192 - memcpy(&sample[s], p, len2); 193 - kunmap_local(p); 194 - 195 - s += len2; 196 - 197 - if (len2 < SZ_2K || s >= max - SZ_2K) 198 - return s; 199 - 200 - max -= len; 201 - if (max <= 0) 202 - return s; 203 - 204 - start += len; 205 - off = 0; 206 - index++; 207 - } 208 - } 209 - } while (nr == ARRAY_SIZE(folios)); 179 + iov_iter_advance(&iter, SZ_2K); 180 + } 210 181 211 182 return s; 212 183 }
+4 -5
fs/smb/client/connect.c
··· 335 335 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); 336 336 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { 337 337 list_del_init(&mid->qhead); 338 - mid->callback(mid); 338 + mid_execute_callback(mid); 339 339 release_mid(mid); 340 340 } 341 341 ··· 919 919 list_del_init(&mid->qhead); 920 920 mid->mid_rc = mid_rc; 921 921 mid->mid_state = MID_RC; 922 - mid->callback(mid); 922 + mid_execute_callback(mid); 923 923 release_mid(mid); 924 924 } 925 925 ··· 1117 1117 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 1118 1118 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); 1119 1119 list_del_init(&mid_entry->qhead); 1120 - mid_entry->callback(mid_entry); 1120 + mid_execute_callback(mid_entry); 1121 1121 release_mid(mid_entry); 1122 1122 } 1123 1123 /* 1/8th of sec is more than enough time for them to exit */ ··· 1394 1394 } 1395 1395 1396 1396 if (!mids[i]->multiRsp || mids[i]->multiEnd) 1397 - mids[i]->callback(mids[i]); 1397 + mid_execute_callback(mids[i]); 1398 1398 1399 1399 release_mid(mids[i]); 1400 1400 } else if (server->ops->is_oplock_break && ··· 4205 4205 return 0; 4206 4206 } 4207 4207 4208 - server->lstrp = jiffies; 4209 4208 server->tcpStatus = CifsInNegotiate; 4210 4209 server->neg_start = jiffies; 4211 4210 spin_unlock(&server->srv_lock);
+32 -2
fs/smb/client/inode.c
··· 1943 1943 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 1944 1944 struct tcon_link *tlink; 1945 1945 struct cifs_tcon *tcon; 1946 + __u32 dosattr = 0, origattr = 0; 1946 1947 struct TCP_Server_Info *server; 1947 1948 struct iattr *attrs = NULL; 1948 - __u32 dosattr = 0, origattr = 0; 1949 + bool rehash = false; 1949 1950 1950 1951 cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry); 1951 1952 1952 1953 if (unlikely(cifs_forced_shutdown(cifs_sb))) 1953 1954 return -EIO; 1955 + 1956 + /* Unhash dentry in advance to prevent any concurrent opens */ 1957 + spin_lock(&dentry->d_lock); 1958 + if (!d_unhashed(dentry)) { 1959 + __d_drop(dentry); 1960 + rehash = true; 1961 + } 1962 + spin_unlock(&dentry->d_lock); 1954 1963 1955 1964 tlink = cifs_sb_tlink(cifs_sb); 1956 1965 if (IS_ERR(tlink)) ··· 2012 2003 cifs_drop_nlink(inode); 2013 2004 } 2014 2005 } else if (rc == -ENOENT) { 2015 - d_drop(dentry); 2006 + if (simple_positive(dentry)) 2007 + d_delete(dentry); 2016 2008 } else if (rc == -EBUSY) { 2017 2009 if (server->ops->rename_pending_delete) { 2018 2010 rc = server->ops->rename_pending_delete(full_path, ··· 2066 2056 kfree(attrs); 2067 2057 free_xid(xid); 2068 2058 cifs_put_tlink(tlink); 2059 + if (rehash) 2060 + d_rehash(dentry); 2069 2061 return rc; 2070 2062 } 2071 2063 ··· 2474 2462 struct cifs_sb_info *cifs_sb; 2475 2463 struct tcon_link *tlink; 2476 2464 struct cifs_tcon *tcon; 2465 + bool rehash = false; 2477 2466 unsigned int xid; 2478 2467 int rc, tmprc; 2479 2468 int retry_count = 0; ··· 2489 2476 cifs_sb = CIFS_SB(source_dir->i_sb); 2490 2477 if (unlikely(cifs_forced_shutdown(cifs_sb))) 2491 2478 return -EIO; 2479 + 2480 + /* 2481 + * Prevent any concurrent opens on the target by unhashing the dentry. 2482 + * VFS already unhashes the target when renaming directories. 2483 + */ 2484 + if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) { 2485 + if (!d_unhashed(target_dentry)) { 2486 + d_drop(target_dentry); 2487 + rehash = true; 2488 + } 2489 + } 2492 2490 2493 2491 tlink = cifs_sb_tlink(cifs_sb); 2494 2492 if (IS_ERR(tlink)) ··· 2542 2518 } 2543 2519 } 2544 2520 2521 + if (!rc) 2522 + rehash = false; 2545 2523 /* 2546 2524 * No-replace is the natural behavior for CIFS, so skip unlink hacks. 2547 2525 */ ··· 2602 2576 goto cifs_rename_exit; 2603 2577 rc = cifs_do_rename(xid, source_dentry, from_name, 2604 2578 target_dentry, to_name); 2579 + if (!rc) 2580 + rehash = false; 2605 2581 } 2606 2582 2607 2583 /* force revalidate to go get info when needed */ 2608 2584 CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; 2609 2585 2610 2586 cifs_rename_exit: 2587 + if (rehash) 2588 + d_rehash(target_dentry); 2611 2589 kfree(info_buf_source); 2612 2590 free_dentry_path(page2); 2613 2591 free_dentry_path(page1);
+12 -3
fs/smb/client/smb2ops.c
··· 772 772 bytes_left -= sizeof(*p); 773 773 break; 774 774 } 775 + /* Validate that Next doesn't point beyond the buffer */ 776 + if (next > bytes_left) { 777 + cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n", 778 + __func__, next, bytes_left); 779 + rc = -EINVAL; 780 + goto out; 781 + } 775 782 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); 776 783 bytes_left -= next; 777 784 } ··· 790 783 } 791 784 792 785 /* Azure rounds the buffer size up 8, to a 16 byte boundary */ 793 - if ((bytes_left > 8) || p->Next) 786 + if ((bytes_left > 8) || 787 + (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next) 788 + + sizeof(p->Next) && p->Next)) 794 789 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); 795 790 796 791 ses->iface_last_update = jiffies; ··· 4814 4805 dw->server->ops->is_network_name_deleted(dw->buf, 4815 4806 dw->server); 4816 4807 4817 - mid->callback(mid); 4808 + mid_execute_callback(mid); 4818 4809 } else { 4819 4810 spin_lock(&dw->server->srv_lock); 4820 4811 if (dw->server->tcpStatus == CifsNeedReconnect) { ··· 4822 4813 mid->mid_state = MID_RETRY_NEEDED; 4823 4814 spin_unlock(&dw->server->mid_queue_lock); 4824 4815 spin_unlock(&dw->server->srv_lock); 4825 - mid->callback(mid); 4816 + mid_execute_callback(mid); 4826 4817 } else { 4827 4818 spin_lock(&dw->server->mid_queue_lock); 4828 4819 mid->mid_state = MID_REQUEST_SUBMITTED;
+1
fs/smb/client/smb2transport.c
··· 771 771 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 772 772 memset(temp, 0, sizeof(struct mid_q_entry)); 773 773 kref_init(&temp->refcount); 774 + spin_lock_init(&temp->mid_lock); 774 775 temp->mid = le64_to_cpu(shdr->MessageId); 775 776 temp->credits = credits > 0 ? credits : 1; 776 777 temp->pid = current->pid;
+5 -5
fs/smb/client/smbdirect.c
··· 1337 1337 log_rdma_event(INFO, "cancelling idle timer\n"); 1338 1338 cancel_delayed_work_sync(&info->idle_timer_work); 1339 1339 1340 - log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); 1341 - wait_event(info->wait_send_pending, 1342 - atomic_read(&info->send_pending) == 0); 1343 - 1344 1340 /* It's not possible for upper layer to get to reassembly */ 1345 1341 log_rdma_event(INFO, "drain the reassembly queue\n"); 1346 1342 do { ··· 1982 1986 */ 1983 1987 1984 1988 wait_event(info->wait_send_pending, 1985 - atomic_read(&info->send_pending) == 0); 1989 + atomic_read(&info->send_pending) == 0 || 1990 + sc->status != SMBDIRECT_SOCKET_CONNECTED); 1991 + 1992 + if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0) 1993 + rc = -EAGAIN; 1986 1994 1987 1995 return rc; 1988 1996 }
+3 -4
fs/smb/client/transport.c
··· 1005 1005 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", 1006 1006 midQ[i]->mid, le16_to_cpu(midQ[i]->command)); 1007 1007 send_cancel(server, &rqst[i], midQ[i]); 1008 - spin_lock(&server->mid_queue_lock); 1008 + spin_lock(&midQ[i]->mid_lock); 1009 1009 midQ[i]->wait_cancelled = true; 1010 - if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED || 1011 - midQ[i]->mid_state == MID_RESPONSE_RECEIVED) { 1010 + if (midQ[i]->callback) { 1012 1011 midQ[i]->callback = cifs_cancelled_callback; 1013 1012 cancelled_mid[i] = true; 1014 1013 credits[i].value = 0; 1015 1014 } 1016 - spin_unlock(&server->mid_queue_lock); 1015 + spin_unlock(&midQ[i]->mid_lock); 1017 1016 } 1018 1017 } 1019 1018
+2 -1
fs/smb/server/connection.c
··· 504 504 { 505 505 mutex_lock(&init_lock); 506 506 ksmbd_tcp_destroy(); 507 - ksmbd_rdma_destroy(); 507 + ksmbd_rdma_stop_listening(); 508 508 stop_sessions(); 509 + ksmbd_rdma_destroy(); 509 510 mutex_unlock(&init_lock); 510 511 }
+6 -1
fs/smb/server/connection.h
··· 46 46 struct mutex srv_mutex; 47 47 int status; 48 48 unsigned int cli_cap; 49 - __be32 inet_addr; 49 + union { 50 + __be32 inet_addr; 51 + #if IS_ENABLED(CONFIG_IPV6) 52 + u8 inet6_addr[16]; 53 + #endif 54 + }; 50 55 char *request_buf; 51 56 struct ksmbd_transport *transport; 52 57 struct nls_table *local_nls;
+10 -3
fs/smb/server/oplock.c
··· 1102 1102 if (!atomic_inc_not_zero(&opinfo->refcount)) 1103 1103 continue; 1104 1104 1105 - if (ksmbd_conn_releasing(opinfo->conn)) 1105 + if (ksmbd_conn_releasing(opinfo->conn)) { 1106 + opinfo_put(opinfo); 1106 1107 continue; 1108 + } 1107 1109 1108 1110 oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); 1109 1111 opinfo_put(opinfo); ··· 1141 1139 if (!atomic_inc_not_zero(&opinfo->refcount)) 1142 1140 continue; 1143 1141 1144 - if (ksmbd_conn_releasing(opinfo->conn)) 1142 + if (ksmbd_conn_releasing(opinfo->conn)) { 1143 + opinfo_put(opinfo); 1145 1144 continue; 1145 + } 1146 + 1146 1147 oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); 1147 1148 opinfo_put(opinfo); 1148 1149 } ··· 1348 1343 if (!atomic_inc_not_zero(&brk_op->refcount)) 1349 1344 continue; 1350 1345 1351 - if (ksmbd_conn_releasing(brk_op->conn)) 1346 + if (ksmbd_conn_releasing(brk_op->conn)) { 1347 + opinfo_put(brk_op); 1352 1348 continue; 1349 + } 1353 1350 1354 1351 if (brk_op->is_lease && (brk_op->o_lease->state & 1355 1352 (~(SMB2_LEASE_READ_CACHING_LE |
+4 -1
fs/smb/server/transport_rdma.c
··· 2194 2194 return 0; 2195 2195 } 2196 2196 2197 - void ksmbd_rdma_destroy(void) 2197 + void ksmbd_rdma_stop_listening(void) 2198 2198 { 2199 2199 if (!smb_direct_listener.cm_id) 2200 2200 return; ··· 2203 2203 rdma_destroy_id(smb_direct_listener.cm_id); 2204 2204 2205 2205 smb_direct_listener.cm_id = NULL; 2206 + } 2206 2207 2208 + void ksmbd_rdma_destroy(void) 2209 + { 2207 2210 if (smb_direct_wq) { 2208 2211 destroy_workqueue(smb_direct_wq); 2209 2212 smb_direct_wq = NULL;
+3 -1
fs/smb/server/transport_rdma.h
··· 54 54 55 55 #ifdef CONFIG_SMB_SERVER_SMBDIRECT 56 56 int ksmbd_rdma_init(void); 57 + void ksmbd_rdma_stop_listening(void); 57 58 void ksmbd_rdma_destroy(void); 58 59 bool ksmbd_rdma_capable_netdev(struct net_device *netdev); 59 60 void init_smbd_max_io_size(unsigned int sz); 60 61 unsigned int get_smbd_max_read_write_size(void); 61 62 #else 62 63 static inline int ksmbd_rdma_init(void) { return 0; } 63 - static inline int ksmbd_rdma_destroy(void) { return 0; } 64 + static inline void ksmbd_rdma_stop_listening(void) { } 65 + static inline void ksmbd_rdma_destroy(void) { } 64 66 static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; } 65 67 static inline void init_smbd_max_io_size(unsigned int sz) { } 66 68 static inline unsigned int get_smbd_max_read_write_size(void) { return 0; }
+23 -3
fs/smb/server/transport_tcp.c
··· 85 85 return NULL; 86 86 } 87 87 88 + #if IS_ENABLED(CONFIG_IPV6) 89 + if (client_sk->sk->sk_family == AF_INET6) 90 + memcpy(&conn->inet6_addr, &client_sk->sk->sk_v6_daddr, 16); 91 + else 92 + conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr; 93 + #else 88 94 conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr; 95 + #endif 89 96 conn->transport = KSMBD_TRANS(t); 90 97 KSMBD_TRANS(t)->conn = conn; 91 98 KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops; ··· 236 229 { 237 230 struct socket *client_sk = NULL; 238 231 struct interface *iface = (struct interface *)p; 239 - struct inet_sock *csk_inet; 240 232 struct ksmbd_conn *conn; 241 233 int ret; 242 234 ··· 258 252 /* 259 253 * Limits repeated connections from clients with the same IP. 260 254 */ 261 - csk_inet = inet_sk(client_sk->sk); 262 255 down_read(&conn_list_lock); 263 256 list_for_each_entry(conn, &conn_list, conns_list) 264 - if (csk_inet->inet_daddr == conn->inet_addr) { 257 + #if IS_ENABLED(CONFIG_IPV6) 258 + if (client_sk->sk->sk_family == AF_INET6) { 259 + if (memcmp(&client_sk->sk->sk_v6_daddr, 260 + &conn->inet6_addr, 16) == 0) { 261 + ret = -EAGAIN; 262 + break; 263 + } 264 + } else if (inet_sk(client_sk->sk)->inet_daddr == 265 + conn->inet_addr) { 265 266 ret = -EAGAIN; 266 267 break; 267 268 } 269 + #else 270 + if (inet_sk(client_sk->sk)->inet_daddr == 271 + conn->inet_addr) { 272 + ret = -EAGAIN; 273 + break; 274 + } 275 + #endif 268 276 up_read(&conn_list_lock); 269 277 if (ret == -EAGAIN) 270 278 continue;
+3
fs/splice.c
··· 739 739 sd.pos = kiocb.ki_pos; 740 740 if (ret <= 0) 741 741 break; 742 + WARN_ONCE(ret > sd.total_len - left, 743 + "Splice Exceeded! ret=%zd tot=%zu left=%zu\n", 744 + ret, sd.total_len, left); 742 745 743 746 sd.num_spliced += ret; 744 747 sd.total_len -= ret;
+1 -1
fs/xfs/scrub/trace.h
··· 479 479 __field(xfs_exntst_t, state) 480 480 ), 481 481 TP_fast_assign( 482 - __entry->dev = cursor->sc->ip->i_mount->m_super->s_dev; 482 + __entry->dev = cursor->sc->mp->m_super->s_dev; 483 483 __entry->dqtype = cursor->dqtype; 484 484 __entry->ino = cursor->quota_ip->i_ino; 485 485 __entry->cur_id = cursor->id;
+3 -3
fs/xfs/xfs_file.c
··· 1101 1101 if (xfs_is_shutdown(ip->i_mount)) 1102 1102 return -EIO; 1103 1103 1104 - if (IS_DAX(inode)) 1105 - return xfs_file_dax_write(iocb, from); 1106 - 1107 1104 if (iocb->ki_flags & IOCB_ATOMIC) { 1108 1105 if (ocount < xfs_get_atomic_write_min(ip)) 1109 1106 return -EINVAL; ··· 1112 1115 if (ret) 1113 1116 return ret; 1114 1117 } 1118 + 1119 + if (IS_DAX(inode)) 1120 + return xfs_file_dax_write(iocb, from); 1115 1121 1116 1122 if (iocb->ki_flags & IOCB_DIRECT) { 1117 1123 /*
+11
fs/xfs/xfs_inode.h
··· 358 358 359 359 static inline bool xfs_inode_can_hw_atomic_write(const struct xfs_inode *ip) 360 360 { 361 + if (IS_DAX(VFS_IC(ip))) 362 + return false; 363 + 361 364 return xfs_inode_buftarg(ip)->bt_awu_max > 0; 365 + } 366 + 367 + static inline bool xfs_inode_can_sw_atomic_write(const struct xfs_inode *ip) 368 + { 369 + if (IS_DAX(VFS_IC(ip))) 370 + return false; 371 + 372 + return xfs_can_sw_atomic_write(ip->i_mount); 362 373 } 363 374 364 375 /*
+1 -1
fs/xfs/xfs_ioctl.c
··· 219 219 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno) 220 220 return -EINVAL; 221 221 222 - breq->flags |= XFS_IBULK_SAME_AG; 222 + breq->iwalk_flags |= XFS_IWALK_SAME_AG; 223 223 224 224 /* Asking for an inode past the end of the AG? We're done! */ 225 225 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
+3 -2
fs/xfs/xfs_iops.c
··· 616 616 * write of exactly one single fsblock if the bdev will make that 617 617 * guarantee for us. 618 618 */ 619 - if (xfs_inode_can_hw_atomic_write(ip) || xfs_can_sw_atomic_write(mp)) 619 + if (xfs_inode_can_hw_atomic_write(ip) || 620 + xfs_inode_can_sw_atomic_write(ip)) 620 621 return mp->m_sb.sb_blocksize; 621 622 622 623 return 0; ··· 634 633 * write of exactly one single fsblock if the bdev will make that 635 634 * guarantee for us. 636 635 */ 637 - if (!xfs_can_sw_atomic_write(mp)) { 636 + if (!xfs_inode_can_sw_atomic_write(ip)) { 638 637 if (xfs_inode_can_hw_atomic_write(ip)) 639 638 return mp->m_sb.sb_blocksize; 640 639 return 0;
+2 -6
fs/xfs/xfs_itable.c
··· 307 307 .breq = breq, 308 308 }; 309 309 struct xfs_trans *tp; 310 - unsigned int iwalk_flags = 0; 311 310 int error; 312 311 313 312 if (breq->idmap != &nop_mnt_idmap) { ··· 327 328 * locking abilities to detect cycles in the inobt without deadlocking. 328 329 */ 329 330 tp = xfs_trans_alloc_empty(breq->mp); 330 - if (breq->flags & XFS_IBULK_SAME_AG) 331 - iwalk_flags |= XFS_IWALK_SAME_AG; 332 - 333 - error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags, 331 + error = xfs_iwalk(breq->mp, tp, breq->startino, breq->iwalk_flags, 334 332 xfs_bulkstat_iwalk, breq->icount, &bc); 335 333 xfs_trans_cancel(tp); 336 334 kfree(bc.buf); ··· 453 457 * locking abilities to detect cycles in the inobt without deadlocking. 454 458 */ 455 459 tp = xfs_trans_alloc_empty(breq->mp); 456 - error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags, 460 + error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->iwalk_flags, 457 461 xfs_inumbers_walk, breq->icount, &ic); 458 462 xfs_trans_cancel(tp); 459 463
+4 -6
fs/xfs/xfs_itable.h
··· 13 13 xfs_ino_t startino; /* start with this inode */ 14 14 unsigned int icount; /* number of elements in ubuffer */ 15 15 unsigned int ocount; /* number of records returned */ 16 - unsigned int flags; /* see XFS_IBULK_FLAG_* */ 16 + unsigned int flags; /* XFS_IBULK_FLAG_* */ 17 + unsigned int iwalk_flags; /* XFS_IWALK_FLAG_* */ 17 18 }; 18 19 19 - /* Only iterate within the same AG as startino */ 20 - #define XFS_IBULK_SAME_AG (1U << 0) 21 - 22 20 /* Fill out the bs_extents64 field if set. */ 23 - #define XFS_IBULK_NREXT64 (1U << 1) 21 + #define XFS_IBULK_NREXT64 (1U << 0) 24 22 25 23 /* Signal that we can return metadata directories. */ 26 - #define XFS_IBULK_METADIR (1U << 2) 24 + #define XFS_IBULK_METADIR (1U << 1) 27 25 28 26 /* 29 27 * Advance the user buffer pointer by one record of the given size. If the
+19
fs/xfs/xfs_mount.c
··· 779 779 return -EINVAL; 780 780 } 781 781 782 + if (xfs_has_reflink(mp)) 783 + goto set_limit; 784 + 785 + if (new_max_fsbs == 1) { 786 + if (mp->m_ddev_targp->bt_awu_max || 787 + (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_awu_max)) { 788 + } else { 789 + xfs_warn(mp, 790 + "cannot support atomic writes of size %lluk with no reflink or HW support", 791 + new_max_bytes >> 10); 792 + return -EINVAL; 793 + } 794 + } else { 795 + xfs_warn(mp, 796 + "cannot support atomic writes of size %lluk with no reflink support", 797 + new_max_bytes >> 10); 798 + return -EINVAL; 799 + } 800 + 782 801 set_limit: 783 802 error = xfs_calc_atomic_write_reservation(mp, new_max_fsbs); 784 803 if (error) {
+1
fs/xfs/xfs_trace.h
··· 455 455 xfs_extlen_t len), \ 456 456 TP_ARGS(oz, rgbno, len)) 457 457 DEFINE_ZONE_ALLOC_EVENT(xfs_zone_record_blocks); 458 + DEFINE_ZONE_ALLOC_EVENT(xfs_zone_skip_blocks); 458 459 DEFINE_ZONE_ALLOC_EVENT(xfs_zone_alloc_blocks); 459 460 460 461 TRACE_EVENT(xfs_zone_gc_select_victim,
+1 -1
fs/xfs/xfs_trans.c
··· 253 253 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 254 254 */ 255 255 retry: 256 - WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 257 256 tp = __xfs_trans_alloc(mp, flags); 257 + WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 258 258 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 259 259 if (error == -ENOSPC && want_retry) { 260 260 xfs_trans_cancel(tp);
+29 -13
fs/xfs/xfs_zone_alloc.c
··· 166 166 static void 167 167 xfs_zone_record_blocks( 168 168 struct xfs_trans *tp, 169 - xfs_fsblock_t fsbno, 170 - xfs_filblks_t len, 171 169 struct xfs_open_zone *oz, 172 - bool used) 170 + xfs_fsblock_t fsbno, 171 + xfs_filblks_t len) 173 172 { 174 173 struct xfs_mount *mp = tp->t_mountp; 175 174 struct xfs_rtgroup *rtg = oz->oz_rtg; ··· 178 179 179 180 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); 180 181 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP); 181 - if (used) { 182 - rmapip->i_used_blocks += len; 183 - ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg)); 184 - } else { 185 - xfs_add_frextents(mp, len); 186 - } 182 + rmapip->i_used_blocks += len; 183 + ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg)); 187 184 oz->oz_written += len; 188 185 if (oz->oz_written == rtg_blocks(rtg)) 189 186 xfs_open_zone_mark_full(oz); 190 187 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE); 188 + } 189 + 190 + /* 191 + * Called for blocks that have been written to disk, but not actually linked to 192 + * an inode, which can happen when garbage collection races with user data 193 + * writes to a file. 194 + */ 195 + static void 196 + xfs_zone_skip_blocks( 197 + struct xfs_open_zone *oz, 198 + xfs_filblks_t len) 199 + { 200 + struct xfs_rtgroup *rtg = oz->oz_rtg; 201 + 202 + trace_xfs_zone_skip_blocks(oz, 0, len); 203 + 204 + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); 205 + oz->oz_written += len; 206 + if (oz->oz_written == rtg_blocks(rtg)) 207 + xfs_open_zone_mark_full(oz); 208 + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); 209 + 210 + xfs_add_frextents(rtg_mount(rtg), len); 191 211 } 192 212 193 213 static int ··· 268 250 } 269 251 } 270 252 271 - xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz, 272 - true); 253 + xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount); 273 254 274 255 /* Map the new blocks into the data fork. */ 275 256 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new); ··· 276 259 277 260 skip: 278 261 trace_xfs_reflink_cow_remap_skip(ip, new); 279 - xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz, 280 - false); 262 + xfs_zone_skip_blocks(oz, new->br_blockcount); 281 263 return 0; 282 264 } 283 265
+48
include/drm/drm_bridge.h
··· 866 866 struct drm_connector *connector, 867 867 bool enable, int direction); 868 868 869 + /** 870 + * @hdmi_cec_init: 871 + * 872 + * Initialize CEC part of the bridge. 873 + * 874 + * This callback is optional, it can be implemented by bridges that 875 + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their 876 + * &drm_bridge->ops. 877 + * 878 + * Returns: 879 + * 0 on success, a negative error code otherwise 880 + */ 869 881 int (*hdmi_cec_init)(struct drm_bridge *bridge, 870 882 struct drm_connector *connector); 871 883 884 + /** 885 + * @hdmi_cec_enable: 886 + * 887 + * Enable or disable the CEC adapter inside the bridge. 888 + * 889 + * This callback is optional, it can be implemented by bridges that 890 + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their 891 + * &drm_bridge->ops. 892 + * 893 + * Returns: 894 + * 0 on success, a negative error code otherwise 895 + */ 872 896 int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable); 873 897 898 + /** 899 + * @hdmi_cec_log_addr: 900 + * 901 + * Set the logical address of the CEC adapter inside the bridge. 902 + * 903 + * This callback is optional, it can be implemented by bridges that 904 + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their 905 + * &drm_bridge->ops. 906 + * 907 + * Returns: 908 + * 0 on success, a negative error code otherwise 909 + */ 874 910 int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr); 875 911 912 + /** 913 + * @hdmi_cec_transmit: 914 + * 915 + * Transmit the message using the CEC adapter inside the bridge. 916 + * 917 + * This callback is optional, it can be implemented by bridges that 918 + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their 919 + * &drm_bridge->ops. 920 + * 921 + * Returns: 922 + * 0 on success, a negative error code otherwise 923 + */ 876 924 int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts, 877 925 u32 signal_free_time, struct cec_msg *msg); 878 926
+1
include/linux/cpuhotplug.h
··· 168 168 CPUHP_AP_QCOM_TIMER_STARTING, 169 169 CPUHP_AP_TEGRA_TIMER_STARTING, 170 170 CPUHP_AP_ARMADA_TIMER_STARTING, 171 + CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING, 171 172 CPUHP_AP_MIPS_GIC_TIMER_STARTING, 172 173 CPUHP_AP_ARC_TIMER_STARTING, 173 174 CPUHP_AP_REALTEK_TIMER_STARTING,
+1 -1
include/linux/export.h
··· 91 91 #define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", ns) 92 92 #define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", ns) 93 93 94 - #define EXPORT_SYMBOL_GPL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods) 94 + #define EXPORT_SYMBOL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods) 95 95 96 96 #endif /* _LINUX_EXPORT_H */
+4
include/linux/firewire.h
··· 341 341 u64 length; 342 342 fw_address_callback_t address_callback; 343 343 void *callback_data; 344 + 345 + // Only for core functions. 344 346 struct list_head link; 347 + struct kref kref; 348 + struct completion done; 345 349 }; 346 350 347 351 struct fw_address_region {
+1
include/linux/netfs.h
··· 150 150 bool active; /* T if stream is active */ 151 151 bool need_retry; /* T if this stream needs retrying */ 152 152 bool failed; /* T if this stream failed */ 153 + bool transferred_valid; /* T is ->transferred is valid */ 153 154 }; 154 155 155 156 /*
+17 -12
include/linux/sched.h
··· 2152 2152 2153 2153 static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m) 2154 2154 { 2155 + struct mutex *blocked_on = READ_ONCE(p->blocked_on); 2156 + 2155 2157 WARN_ON_ONCE(!m); 2156 2158 /* The task should only be setting itself as blocked */ 2157 2159 WARN_ON_ONCE(p != current); ··· 2164 2162 * with a different mutex. Note, setting it to the same 2165 2163 * lock repeatedly is ok. 2166 2164 */ 2167 - WARN_ON_ONCE(p->blocked_on && p->blocked_on != m); 2168 - p->blocked_on = m; 2165 + WARN_ON_ONCE(blocked_on && blocked_on != m); 2166 + WRITE_ONCE(p->blocked_on, m); 2169 2167 } 2170 2168 2171 2169 static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m) ··· 2176 2174 2177 2175 static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m) 2178 2176 { 2179 - WARN_ON_ONCE(!m); 2180 - /* Currently we serialize blocked_on under the mutex::wait_lock */ 2181 - lockdep_assert_held_once(&m->wait_lock); 2182 - /* 2183 - * There may be cases where we re-clear already cleared 2184 - * blocked_on relationships, but make sure we are not 2185 - * clearing the relationship with a different lock. 2186 - */ 2187 - WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m); 2188 - p->blocked_on = NULL; 2177 + if (m) { 2178 + struct mutex *blocked_on = READ_ONCE(p->blocked_on); 2179 + 2180 + /* Currently we serialize blocked_on under the mutex::wait_lock */ 2181 + lockdep_assert_held_once(&m->wait_lock); 2182 + /* 2183 + * There may be cases where we re-clear already cleared 2184 + * blocked_on relationships, but make sure we are not 2185 + * clearing the relationship with a different lock. 2186 + */ 2187 + WARN_ON_ONCE(blocked_on && blocked_on != m); 2188 + } 2189 + WRITE_ONCE(p->blocked_on, NULL); 2189 2190 } 2190 2191 2191 2192 static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+2 -2
include/net/bluetooth/bluetooth.h
··· 647 647 #if IS_ENABLED(CONFIG_BT_LE) 648 648 int iso_init(void); 649 649 int iso_exit(void); 650 - bool iso_enabled(void); 650 + bool iso_inited(void); 651 651 #else 652 652 static inline int iso_init(void) 653 653 { ··· 659 659 return 0; 660 660 } 661 661 662 - static inline bool iso_enabled(void) 662 + static inline bool iso_inited(void) 663 663 { 664 664 return false; 665 665 }
+38 -6
include/net/bluetooth/hci_core.h
··· 129 129 struct list_head list; 130 130 unsigned int acl_num; 131 131 unsigned int sco_num; 132 - unsigned int iso_num; 132 + unsigned int cis_num; 133 + unsigned int bis_num; 134 + unsigned int pa_num; 133 135 unsigned int le_num; 134 136 unsigned int le_num_peripheral; 135 137 }; ··· 1016 1014 h->sco_num++; 1017 1015 break; 1018 1016 case CIS_LINK: 1017 + h->cis_num++; 1018 + break; 1019 1019 case BIS_LINK: 1020 + h->bis_num++; 1021 + break; 1020 1022 case PA_LINK: 1021 - h->iso_num++; 1023 + h->pa_num++; 1022 1024 break; 1023 1025 } 1024 1026 } ··· 1048 1042 h->sco_num--; 1049 1043 break; 1050 1044 case CIS_LINK: 1045 + h->cis_num--; 1046 + break; 1051 1047 case BIS_LINK: 1048 + h->bis_num--; 1049 + break; 1052 1050 case PA_LINK: 1053 - h->iso_num--; 1051 + h->pa_num--; 1054 1052 break; 1055 1053 } 1056 1054 } ··· 1071 1061 case ESCO_LINK: 1072 1062 return h->sco_num; 1073 1063 case CIS_LINK: 1064 + return h->cis_num; 1074 1065 case BIS_LINK: 1066 + return h->bis_num; 1075 1067 case PA_LINK: 1076 - return h->iso_num; 1068 + return h->pa_num; 1077 1069 default: 1078 1070 return 0; 1079 1071 } ··· 1085 1073 { 1086 1074 struct hci_conn_hash *c = &hdev->conn_hash; 1087 1075 1088 - return c->acl_num + c->sco_num + c->le_num + c->iso_num; 1076 + return c->acl_num + c->sco_num + c->le_num + c->cis_num + c->bis_num + 1077 + c->pa_num; 1078 + } 1079 + 1080 + static inline unsigned int hci_iso_count(struct hci_dev *hdev) 1081 + { 1082 + struct hci_conn_hash *c = &hdev->conn_hash; 1083 + 1084 + return c->cis_num + c->bis_num; 1089 1085 } 1090 1086 1091 1087 static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn) ··· 1935 1915 !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) 1936 1916 #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ 1937 1917 !adv->rpa_expired) 1918 + #define le_enabled(dev) (lmp_le_capable(dev) && \ 1919 + hci_dev_test_flag(dev, HCI_LE_ENABLED)) 1938 1920 1939 1921 #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ 1940 1922 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) ··· 1954 1932 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) 1955 1933 1956 1934 #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) 1935 + #define ll_privacy_enabled(dev) (le_enabled(dev) && ll_privacy_capable(dev)) 1957 1936 1958 1937 #define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \ 1959 1938 ((dev)->commands[39] & 0x04)) ··· 2004 1981 2005 1982 /* CIS Master/Slave and BIS support */ 2006 1983 #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev)) 1984 + #define iso_enabled(dev) (le_enabled(dev) && iso_capable(dev)) 2007 1985 #define cis_capable(dev) \ 2008 1986 (cis_central_capable(dev) || cis_peripheral_capable(dev)) 1987 + #define cis_enabled(dev) (le_enabled(dev) && cis_capable(dev)) 2009 1988 #define cis_central_capable(dev) \ 2010 1989 ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL) 1990 + #define cis_central_enabled(dev) \ 1991 + (le_enabled(dev) && cis_central_capable(dev)) 2011 1992 #define cis_peripheral_capable(dev) \ 2012 1993 ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) 1994 + #define cis_peripheral_enabled(dev) \ 1995 + (le_enabled(dev) && cis_peripheral_capable(dev)) 2013 1996 #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) 2014 - #define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) 1997 + #define bis_enabled(dev) (le_enabled(dev) && bis_capable(dev)) 1998 + #define sync_recv_capable(dev) \ 1999 + ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) 2000 + #define sync_recv_enabled(dev) (le_enabled(dev) && sync_recv_capable(dev)) 2015 2001 2016 2002 #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \ 2017 2003 (!hci_test_quirk((dev), HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG)))
+1
include/net/bond_3ad.h
··· 307 307 struct slave *slave); 308 308 int bond_3ad_set_carrier(struct bonding *bond); 309 309 void bond_3ad_update_lacp_rate(struct bonding *bond); 310 + void bond_3ad_update_lacp_active(struct bonding *bond); 310 311 void bond_3ad_update_ad_actor_settings(struct bonding *bond); 311 312 int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats); 312 313 size_t bond_3ad_stats_size(void);
+8 -3
include/net/sch_generic.h
··· 1038 1038 skb = __skb_dequeue(&sch->gso_skb); 1039 1039 if (skb) { 1040 1040 sch->q.qlen--; 1041 + qdisc_qstats_backlog_dec(sch, skb); 1041 1042 return skb; 1042 1043 } 1043 - if (direct) 1044 - return __qdisc_dequeue_head(&sch->q); 1045 - else 1044 + if (direct) { 1045 + skb = __qdisc_dequeue_head(&sch->q); 1046 + if (skb) 1047 + qdisc_qstats_backlog_dec(sch, skb); 1048 + return skb; 1049 + } else { 1046 1050 return sch->dequeue(sch); 1051 + } 1047 1052 } 1048 1053 1049 1054 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
+8
io_uring/io-wq.c
··· 357 357 worker = container_of(cb, struct io_worker, create_work); 358 358 wq = worker->wq; 359 359 acct = worker->acct; 360 + 361 + rcu_read_lock(); 362 + do_create = !io_acct_activate_free_worker(acct); 363 + rcu_read_unlock(); 364 + if (!do_create) 365 + goto no_need_create; 366 + 360 367 raw_spin_lock(&acct->workers_lock); 361 368 362 369 if (acct->nr_workers < acct->max_workers) { ··· 374 367 if (do_create) { 375 368 create_io_worker(wq, acct); 376 369 } else { 370 + no_need_create: 377 371 atomic_dec(&acct->nr_running); 378 372 io_worker_ref_put(wq); 379 373 }
+15 -12
io_uring/net.c
··· 494 494 return nbufs; 495 495 } 496 496 497 + static int io_net_kbuf_recyle(struct io_kiocb *req, 498 + struct io_async_msghdr *kmsg, int len) 499 + { 500 + req->flags |= REQ_F_BL_NO_RECYCLE; 501 + if (req->flags & REQ_F_BUFFERS_COMMIT) 502 + io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len)); 503 + return IOU_RETRY; 504 + } 505 + 497 506 static inline bool io_send_finish(struct io_kiocb *req, int *ret, 498 507 struct io_async_msghdr *kmsg, 499 508 unsigned issue_flags) ··· 571 562 kmsg->msg.msg_controllen = 0; 572 563 kmsg->msg.msg_control = NULL; 573 564 sr->done_io += ret; 574 - req->flags |= REQ_F_BL_NO_RECYCLE; 575 - return -EAGAIN; 565 + return io_net_kbuf_recyle(req, kmsg, ret); 576 566 } 577 567 if (ret == -ERESTARTSYS) 578 568 ret = -EINTR; ··· 682 674 sr->len -= ret; 683 675 sr->buf += ret; 684 676 sr->done_io += ret; 685 - req->flags |= REQ_F_BL_NO_RECYCLE; 686 - return -EAGAIN; 677 + return io_net_kbuf_recyle(req, kmsg, ret); 687 678 } 688 679 if (ret == -ERESTARTSYS) 689 680 ret = -EINTR; ··· 1078 1071 } 1079 1072 if (ret > 0 && io_net_retry(sock, flags)) { 1080 1073 sr->done_io += ret; 1081 - req->flags |= REQ_F_BL_NO_RECYCLE; 1082 - return IOU_RETRY; 1074 + return io_net_kbuf_recyle(req, kmsg, ret); 1083 1075 } 1084 1076 if (ret == -ERESTARTSYS) 1085 1077 ret = -EINTR; ··· 1224 1218 sr->len -= ret; 1225 1219 sr->buf += ret; 1226 1220 sr->done_io += ret; 1227 - req->flags |= REQ_F_BL_NO_RECYCLE; 1228 - return -EAGAIN; 1221 + return io_net_kbuf_recyle(req, kmsg, ret); 1229 1222 } 1230 1223 if (ret == -ERESTARTSYS) 1231 1224 ret = -EINTR; ··· 1505 1500 zc->len -= ret; 1506 1501 zc->buf += ret; 1507 1502 zc->done_io += ret; 1508 - req->flags |= REQ_F_BL_NO_RECYCLE; 1509 - return -EAGAIN; 1503 + return io_net_kbuf_recyle(req, kmsg, ret); 1510 1504 } 1511 1505 if (ret == -ERESTARTSYS) 1512 1506 ret = -EINTR; ··· 1575 1571 1576 1572 if (ret > 0 && io_net_retry(sock, flags)) { 1577 1573 sr->done_io += ret; 1578 - req->flags |= REQ_F_BL_NO_RECYCLE; 1579 - return -EAGAIN; 1574 + return io_net_kbuf_recyle(req, kmsg, ret); 1580 1575 } 1581 1576 if (ret == -ERESTARTSYS) 1582 1577 ret = -EINTR;
+3 -3
kernel/futex/futex.h
··· 319 319 { 320 320 if (can_do_masked_user_access()) 321 321 to = masked_user_access_begin(to); 322 - else if (!user_read_access_begin(to, sizeof(*to))) 322 + else if (!user_write_access_begin(to, sizeof(*to))) 323 323 return -EFAULT; 324 324 unsafe_put_user(val, to, Efault); 325 - user_read_access_end(); 325 + user_write_access_end(); 326 326 return 0; 327 327 Efault: 328 - user_read_access_end(); 328 + user_write_access_end(); 329 329 return -EFAULT; 330 330 } 331 331
+5 -1
kernel/locking/ww_mutex.h
··· 342 342 * When waking up the task to wound, be sure to clear the 343 343 * blocked_on pointer. Otherwise we can see circular 344 344 * blocked_on relationships that can't resolve. 345 + * 346 + * NOTE: We pass NULL here instead of lock, because we 347 + * are waking the mutex owner, who may be currently 348 + * blocked on a different mutex. 345 349 */ 346 - __clear_task_blocked_on(owner, lock); 350 + __clear_task_blocked_on(owner, NULL); 347 351 wake_q_add(wake_q, owner); 348 352 } 349 353 return true;
+5 -1
kernel/signal.c
··· 4067 4067 { 4068 4068 struct pid *pid; 4069 4069 enum pid_type type; 4070 + int ret; 4070 4071 4071 4072 /* Enforce flags be set to 0 until we add an extension. */ 4072 4073 if (flags & ~PIDFD_SEND_SIGNAL_FLAGS) ··· 4109 4108 } 4110 4109 } 4111 4110 4112 - return do_pidfd_send_signal(pid, sig, type, info, flags); 4111 + ret = do_pidfd_send_signal(pid, sig, type, info, flags); 4112 + put_pid(pid); 4113 + 4114 + return ret; 4113 4115 } 4114 4116 4115 4117 static int
+1 -1
kernel/trace/trace.h
··· 2204 2204 static inline void sanitize_event_name(char *name) 2205 2205 { 2206 2206 while (*name++ != '\0') 2207 - if (*name == ':' || *name == '.') 2207 + if (*name == ':' || *name == '.' || *name == '*') 2208 2208 *name = '_'; 2209 2209 } 2210 2210
+5 -5
lib/crypto/Kconfig
··· 140 140 config CRYPTO_LIB_SHA1 141 141 tristate 142 142 help 143 - The SHA-1 library functions. Select this if your module uses any of 144 - the functions from <crypto/sha1.h>. 143 + The SHA-1 and HMAC-SHA1 library functions. Select this if your module 144 + uses any of the functions from <crypto/sha1.h>. 145 145 146 146 config CRYPTO_LIB_SHA1_ARCH 147 147 bool ··· 157 157 config CRYPTO_LIB_SHA256 158 158 tristate 159 159 help 160 - Enable the SHA-256 library interface. This interface may be fulfilled 161 - by either the generic implementation or an arch-specific one, if one 162 - is available and enabled. 160 + The SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 library functions. 161 + Select this if your module uses any of these functions from 162 + <crypto/sha2.h>. 163 163 164 164 config CRYPTO_LIB_SHA256_ARCH 165 165 bool
+4 -4
lib/crypto/Makefile
··· 100 100 libsha256-y += arm/sha256-ce.o arm/sha256-core.o 101 101 $(obj)/arm/sha256-core.S: $(src)/arm/sha256-armv4.pl 102 102 $(call cmd,perlasm) 103 - clean-files += arm/sha256-core.S 104 103 AFLAGS_arm/sha256-core.o += $(aflags-thumb2-y) 105 104 endif 106 105 ··· 107 108 libsha256-y += arm64/sha256-core.o 108 109 $(obj)/arm64/sha256-core.S: $(src)/arm64/sha2-armv8.pl 109 110 $(call cmd,perlasm_with_args) 110 - clean-files += arm64/sha256-core.S 111 111 libsha256-$(CONFIG_KERNEL_MODE_NEON) += arm64/sha256-ce.o 112 112 endif 113 113 ··· 130 132 libsha512-y += arm/sha512-core.o 131 133 $(obj)/arm/sha512-core.S: $(src)/arm/sha512-armv4.pl 132 134 $(call cmd,perlasm) 133 - clean-files += arm/sha512-core.S 134 135 AFLAGS_arm/sha512-core.o += $(aflags-thumb2-y) 135 136 endif 136 137 ··· 137 140 libsha512-y += arm64/sha512-core.o 138 141 $(obj)/arm64/sha512-core.S: $(src)/arm64/sha2-armv8.pl 139 142 $(call cmd,perlasm_with_args) 140 - clean-files += arm64/sha512-core.S 141 143 libsha512-$(CONFIG_KERNEL_MODE_NEON) += arm64/sha512-ce-core.o 142 144 endif 143 145 ··· 163 167 obj-$(CONFIG_RISCV) += riscv/ 164 168 obj-$(CONFIG_S390) += s390/ 165 169 obj-$(CONFIG_X86) += x86/ 170 + 171 + # clean-files must be defined unconditionally 172 + clean-files += arm/sha256-core.S arm/sha512-core.S 173 + clean-files += arm64/sha256-core.S arm64/sha512-core.S
+14 -3
net/bluetooth/hci_conn.c
··· 339 339 case BT_CODEC_TRANSPARENT: 340 340 if (!find_next_esco_param(conn, esco_param_msbc, 341 341 ARRAY_SIZE(esco_param_msbc))) 342 - return false; 342 + return -EINVAL; 343 + 343 344 param = &esco_param_msbc[conn->attempt - 1]; 344 345 cp.tx_coding_format.id = 0x03; 345 346 cp.rx_coding_format.id = 0x03; ··· 831 830 /* Check if ISO connection is a BIS and terminate advertising 832 831 * set and BIG if there are no other connections using it. 833 832 */ 834 - bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big); 833 + bis = hci_conn_hash_lookup_big_state(hdev, 834 + conn->iso_qos.bcast.big, 835 + BT_CONNECTED, 836 + HCI_ROLE_MASTER); 837 + if (bis) 838 + return; 839 + 840 + bis = hci_conn_hash_lookup_big_state(hdev, 841 + conn->iso_qos.bcast.big, 842 + BT_CONNECT, 843 + HCI_ROLE_MASTER); 835 844 if (bis) 836 845 return; 837 846 ··· 2260 2249 * the start periodic advertising and create BIG commands have 2261 2250 * been queued 2262 2251 */ 2263 - hci_conn_hash_list_state(hdev, bis_mark_per_adv, PA_LINK, 2252 + hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK, 2264 2253 BT_BOUND, &data); 2265 2254 2266 2255 /* Queue start periodic advertising and create BIG */
+10 -5
net/bluetooth/hci_event.c
··· 6745 6745 qos->ucast.out.latency = 6746 6746 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 6747 6747 1000); 6748 - qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu); 6749 - qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu); 6748 + qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; 6749 + qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; 6750 6750 qos->ucast.in.phy = ev->c_phy; 6751 6751 qos->ucast.out.phy = ev->p_phy; 6752 6752 break; ··· 6760 6760 qos->ucast.in.latency = 6761 6761 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 6762 6762 1000); 6763 - qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu); 6764 - qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu); 6763 + qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; 6764 + qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; 6765 6765 qos->ucast.out.phy = ev->c_phy; 6766 6766 qos->ucast.in.phy = ev->p_phy; 6767 6767 break; ··· 6957 6957 continue; 6958 6958 } 6959 6959 6960 - if (ev->status != 0x42) 6960 + if (ev->status != 0x42) { 6961 6961 /* Mark PA sync as established */ 6962 6962 set_bit(HCI_CONN_PA_SYNC, &bis->flags); 6963 + /* Reset cleanup callback of PA Sync so it doesn't 6964 + * terminate the sync when deleting the connection. 6965 + */ 6966 + conn->cleanup = NULL; 6967 + } 6963 6968 6964 6969 bis->sync_handle = conn->sync_handle; 6965 6970 bis->iso_qos.bcast.big = ev->handle;
+16 -9
net/bluetooth/hci_sync.c
··· 3344 3344 * advertising data. This also applies to the case 3345 3345 * where BR/EDR was toggled during the AUTO_OFF phase. 3346 3346 */ 3347 - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 3347 + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && 3348 3348 list_empty(&hdev->adv_instances)) { 3349 3349 if (ext_adv_capable(hdev)) { 3350 3350 err = hci_setup_ext_adv_instance_sync(hdev, 0x00); ··· 4531 4531 { 4532 4532 struct hci_cp_le_set_host_feature cp; 4533 4533 4534 - if (!cis_capable(hdev)) 4534 + if (!iso_capable(hdev)) 4535 4535 return 0; 4536 4536 4537 4537 memset(&cp, 0, sizeof(cp)); 4538 4538 4539 4539 /* Connected Isochronous Channels (Host Support) */ 4540 4540 cp.bit_number = 32; 4541 - cp.bit_value = 1; 4541 + cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00; 4542 4542 4543 4543 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, 4544 4544 sizeof(cp), &cp, HCI_CMD_TIMEOUT); ··· 6985 6985 6986 6986 hci_dev_lock(hdev); 6987 6987 6988 - hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6989 - 6990 6988 if (!hci_conn_valid(hdev, conn)) 6991 6989 clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); 6992 6990 ··· 7045 7047 /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update 7046 7048 * it. 7047 7049 */ 7048 - if (conn->sid == HCI_SID_INVALID) 7049 - __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, 7050 - HCI_EV_LE_EXT_ADV_REPORT, 7051 - conn->conn_timeout, NULL); 7050 + if (conn->sid == HCI_SID_INVALID) { 7051 + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, 7052 + HCI_EV_LE_EXT_ADV_REPORT, 7053 + conn->conn_timeout, NULL); 7054 + if (err == -ETIMEDOUT) 7055 + goto done; 7056 + } 7052 7057 7053 7058 memset(&cp, 0, sizeof(cp)); 7054 7059 cp.options = qos->bcast.options; ··· 7080 7079 if (err == -ETIMEDOUT) 7081 7080 __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL, 7082 7081 0, NULL, HCI_CMD_TIMEOUT); 7082 + 7083 + done: 7084 + hci_dev_clear_flag(hdev, HCI_PA_SYNC); 7085 + 7086 + /* Update passive scan since HCI_PA_SYNC flag has been cleared */ 7087 + hci_update_passive_scan_sync(hdev); 7083 7088 7084 7089 return err; 7085 7090 }
+8 -8
net/bluetooth/iso.c
··· 1347 1347 bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); 1348 1348 sa->iso_bdaddr_type = iso_pi(sk)->dst_type; 1349 1349 1350 - if (hcon && hcon->type == BIS_LINK) { 1350 + if (hcon && (hcon->type == BIS_LINK || hcon->type == PA_LINK)) { 1351 1351 sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid; 1352 1352 sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis; 1353 1353 memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis, ··· 2483 2483 .create = iso_sock_create, 2484 2484 }; 2485 2485 2486 - static bool iso_inited; 2486 + static bool inited; 2487 2487 2488 - bool iso_enabled(void) 2488 + bool iso_inited(void) 2489 2489 { 2490 - return iso_inited; 2490 + return inited; 2491 2491 } 2492 2492 2493 2493 int iso_init(void) ··· 2496 2496 2497 2497 BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr)); 2498 2498 2499 - if (iso_inited) 2499 + if (inited) 2500 2500 return -EALREADY; 2501 2501 2502 2502 err = proto_register(&iso_proto, 0); ··· 2524 2524 iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs, 2525 2525 NULL, &iso_debugfs_fops); 2526 2526 2527 - iso_inited = true; 2527 + inited = true; 2528 2528 2529 2529 return 0; 2530 2530 ··· 2535 2535 2536 2536 int iso_exit(void) 2537 2537 { 2538 - if (!iso_inited) 2538 + if (!inited) 2539 2539 return -EALREADY; 2540 2540 2541 2541 bt_procfs_cleanup(&init_net, "iso"); ··· 2549 2549 2550 2550 proto_unregister(&iso_proto); 2551 2551 2552 - iso_inited = false; 2552 + inited = false; 2553 2553 2554 2554 return 0; 2555 2555 }
+6 -6
net/bluetooth/mgmt.c
··· 922 922 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) 923 923 settings |= MGMT_SETTING_WIDEBAND_SPEECH; 924 924 925 - if (cis_central_capable(hdev)) 925 + if (cis_central_enabled(hdev)) 926 926 settings |= MGMT_SETTING_CIS_CENTRAL; 927 927 928 - if (cis_peripheral_capable(hdev)) 928 + if (cis_peripheral_enabled(hdev)) 929 929 settings |= MGMT_SETTING_CIS_PERIPHERAL; 930 930 931 - if (bis_capable(hdev)) 931 + if (bis_enabled(hdev)) 932 932 settings |= MGMT_SETTING_ISO_BROADCASTER; 933 933 934 - if (sync_recv_capable(hdev)) 934 + if (sync_recv_enabled(hdev)) 935 935 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER; 936 936 937 - if (ll_privacy_capable(hdev)) 937 + if (ll_privacy_enabled(hdev)) 938 938 settings |= MGMT_SETTING_LL_PRIVACY; 939 939 940 940 return settings; ··· 4513 4513 } 4514 4514 4515 4515 if (IS_ENABLED(CONFIG_BT_LE)) { 4516 - flags = iso_enabled() ? BIT(0) : 0; 4516 + flags = iso_inited() ? BIT(0) : 0; 4517 4517 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16); 4518 4518 rp->features[idx].flags = cpu_to_le32(flags); 4519 4519 idx++;
+16
net/bridge/br_multicast.c
··· 4815 4815 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; 4816 4816 } 4817 4817 4818 + if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) { 4819 + br_info(brmctx->br, 4820 + "trying to set multicast query interval above maximum, setting to %lu (%ums)\n", 4821 + jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX), 4822 + jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX)); 4823 + intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX; 4824 + } 4825 + 4818 4826 brmctx->multicast_query_interval = intvl_jiffies; 4819 4827 } 4820 4828 ··· 4837 4829 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), 4838 4830 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); 4839 4831 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; 4832 + } 4833 + 4834 + if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) { 4835 + br_info(brmctx->br, 4836 + "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n", 4837 + jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX), 4838 + jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX)); 4839 + intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX; 4840 4840 } 4841 4841 4842 4842 brmctx->multicast_startup_query_interval = intvl_jiffies;
+2
net/bridge/br_private.h
··· 31 31 #define BR_MULTICAST_DEFAULT_HASH_MAX 4096 32 32 #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000) 33 33 #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN 34 + #define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */ 35 + #define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX 34 36 35 37 #define BR_HWDOM_MAX BITS_PER_LONG 36 38
+12
net/core/dev.c
··· 3779 3779 features &= ~NETIF_F_TSO_MANGLEID; 3780 3780 } 3781 3781 3782 + /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers, 3783 + * so neither does TSO that depends on it. 3784 + */ 3785 + if (features & NETIF_F_IPV6_CSUM && 3786 + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 || 3787 + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && 3788 + vlan_get_protocol(skb) == htons(ETH_P_IPV6))) && 3789 + skb_transport_header_was_set(skb) && 3790 + skb_network_header_len(skb) != sizeof(struct ipv6hdr) && 3791 + !ipv6_has_hopopt_jumbo(skb)) 3792 + features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4); 3793 + 3782 3794 return features; 3783 3795 } 3784 3796
+7 -1
net/hsr/hsr_slave.c
··· 63 63 skb_push(skb, ETH_HLEN); 64 64 skb_reset_mac_header(skb); 65 65 if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) || 66 - protocol == htons(ETH_P_HSR)) 66 + protocol == htons(ETH_P_HSR)) { 67 + if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) { 68 + kfree_skb(skb); 69 + goto finish_consume; 70 + } 71 + 67 72 skb_set_network_header(skb, ETH_HLEN + HSR_HLEN); 73 + } 68 74 skb_reset_mac_len(skb); 69 75 70 76 /* Only the frames received over the interlink port will assign a
+2 -4
net/ipv4/netfilter/nf_reject_ipv4.c
··· 247 247 if (!oth) 248 248 return; 249 249 250 - if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) && 251 - nf_reject_fill_skb_dst(oldskb) < 0) 250 + if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0) 252 251 return; 253 252 254 253 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) ··· 320 321 if (iph->frag_off & htons(IP_OFFSET)) 321 322 return; 322 323 323 - if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) && 324 - nf_reject_fill_skb_dst(skb_in) < 0) 324 + if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0) 325 325 return; 326 326 327 327 if (skb_csum_unnecessary(skb_in) ||
+2 -3
net/ipv6/netfilter/nf_reject_ipv6.c
··· 293 293 fl6.fl6_sport = otcph->dest; 294 294 fl6.fl6_dport = otcph->source; 295 295 296 - if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) { 296 + if (!skb_dst(oldskb)) { 297 297 nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); 298 298 if (!dst) 299 299 return; ··· 397 397 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) 398 398 skb_in->dev = net->loopback_dev; 399 399 400 - if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) && 401 - nf_reject6_fill_skb_dst(skb_in) < 0) 400 + if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0) 402 401 return; 403 402 404 403 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+5 -1
net/ipv6/seg6_hmac.c
··· 35 35 #include <net/xfrm.h> 36 36 37 37 #include <crypto/hash.h> 38 + #include <crypto/utils.h> 38 39 #include <net/seg6.h> 39 40 #include <net/genetlink.h> 40 41 #include <net/seg6_hmac.h> ··· 281 280 if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output)) 282 281 return false; 283 282 284 - if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0) 283 + if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN)) 285 284 return false; 286 285 287 286 return true; ··· 304 303 { 305 304 struct seg6_pernet_data *sdata = seg6_pernet(net); 306 305 int err; 306 + 307 + if (!__hmac_get_algo(hinfo->alg_id)) 308 + return -EINVAL; 307 309 308 310 err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node, 309 311 rht_params);
+4 -2
net/mptcp/options.c
··· 1118 1118 return hmac == mp_opt->ahmac; 1119 1119 } 1120 1120 1121 - /* Return false if a subflow has been reset, else return true */ 1121 + /* Return false in case of error (or subflow has been reset), 1122 + * else return true. 1123 + */ 1122 1124 bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) 1123 1125 { 1124 1126 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); ··· 1224 1222 1225 1223 mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 1226 1224 if (!mpext) 1227 - return true; 1225 + return false; 1228 1226 1229 1227 memset(mpext, 0, sizeof(*mpext)); 1230 1228
+12 -6
net/mptcp/pm.c
··· 274 274 add_timer); 275 275 struct mptcp_sock *msk = entry->sock; 276 276 struct sock *sk = (struct sock *)msk; 277 + unsigned int timeout; 277 278 278 279 pr_debug("msk=%p\n", msk); 279 280 ··· 292 291 goto out; 293 292 } 294 293 294 + timeout = mptcp_get_add_addr_timeout(sock_net(sk)); 295 + if (!timeout) 296 + goto out; 297 + 295 298 spin_lock_bh(&msk->pm.lock); 296 299 297 300 if (!mptcp_pm_should_add_signal_addr(msk)) { ··· 307 302 308 303 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) 309 304 sk_reset_timer(sk, timer, 310 - jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); 305 + jiffies + timeout); 311 306 312 307 spin_unlock_bh(&msk->pm.lock); 313 308 ··· 349 344 struct mptcp_pm_add_entry *add_entry = NULL; 350 345 struct sock *sk = (struct sock *)msk; 351 346 struct net *net = sock_net(sk); 347 + unsigned int timeout; 352 348 353 349 lockdep_assert_held(&msk->pm.lock); 354 350 ··· 359 353 if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) 360 354 return false; 361 355 362 - sk_reset_timer(sk, &add_entry->add_timer, 363 - jiffies + mptcp_get_add_addr_timeout(net)); 364 - return true; 356 + goto reset_timer; 365 357 } 366 358 367 359 add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); ··· 373 369 add_entry->retrans_times = 0; 374 370 375 371 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); 376 - sk_reset_timer(sk, &add_entry->add_timer, 377 - jiffies + mptcp_get_add_addr_timeout(net)); 372 + reset_timer: 373 + timeout = mptcp_get_add_addr_timeout(net); 374 + if (timeout) 375 + sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout); 378 376 379 377 return true; 380 378 }
-1
net/mptcp/pm_kernel.c
··· 1085 1085 static void __reset_counters(struct pm_nl_pernet *pernet) 1086 1086 { 1087 1087 WRITE_ONCE(pernet->add_addr_signal_max, 0); 1088 - WRITE_ONCE(pernet->add_addr_accept_max, 0); 1089 1088 WRITE_ONCE(pernet->local_addr_max, 0); 1090 1089 pernet->addrs = 0; 1091 1090 }
+12 -2
net/sched/sch_cake.c
··· 1750 1750 ktime_t now = ktime_get(); 1751 1751 struct cake_tin_data *b; 1752 1752 struct cake_flow *flow; 1753 - u32 idx; 1753 + u32 idx, tin; 1754 1754 1755 1755 /* choose flow to insert into */ 1756 1756 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); ··· 1760 1760 __qdisc_drop(skb, to_free); 1761 1761 return ret; 1762 1762 } 1763 + tin = (u32)(b - q->tins); 1763 1764 idx--; 1764 1765 flow = &b->flows[idx]; 1765 1766 ··· 1928 1927 q->buffer_max_used = q->buffer_used; 1929 1928 1930 1929 if (q->buffer_used > q->buffer_limit) { 1930 + bool same_flow = false; 1931 1931 u32 dropped = 0; 1932 + u32 drop_id; 1932 1933 1933 1934 while (q->buffer_used > q->buffer_limit) { 1934 1935 dropped++; 1935 - cake_drop(sch, to_free); 1936 + drop_id = cake_drop(sch, to_free); 1937 + 1938 + if ((drop_id >> 16) == tin && 1939 + (drop_id & 0xFFFF) == idx) 1940 + same_flow = true; 1936 1941 } 1937 1942 b->drop_overlimit += dropped; 1943 + 1944 + if (same_flow) 1945 + return NET_XMIT_CN; 1938 1946 } 1939 1947 return NET_XMIT_SUCCESS; 1940 1948 }
+7 -5
net/sched/sch_codel.c
··· 101 101 static int codel_change(struct Qdisc *sch, struct nlattr *opt, 102 102 struct netlink_ext_ack *extack) 103 103 { 104 + unsigned int dropped_pkts = 0, dropped_bytes = 0; 104 105 struct codel_sched_data *q = qdisc_priv(sch); 105 106 struct nlattr *tb[TCA_CODEL_MAX + 1]; 106 - unsigned int qlen, dropped = 0; 107 107 int err; 108 108 109 109 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt, ··· 142 142 WRITE_ONCE(q->params.ecn, 143 143 !!nla_get_u32(tb[TCA_CODEL_ECN])); 144 144 145 - qlen = sch->q.qlen; 146 145 while (sch->q.qlen > sch->limit) { 147 146 struct sk_buff *skb = qdisc_dequeue_internal(sch, true); 148 147 149 - dropped += qdisc_pkt_len(skb); 150 - qdisc_qstats_backlog_dec(sch, skb); 148 + if (!skb) 149 + break; 150 + 151 + dropped_pkts++; 152 + dropped_bytes += qdisc_pkt_len(skb); 151 153 rtnl_qdisc_drop(skb, sch); 152 154 } 153 - qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 155 + qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes); 154 156 155 157 sch_tree_unlock(sch); 156 158 return 0;
+3 -2
net/sched/sch_dualpi2.c
··· 927 927 928 928 q->sch = sch; 929 929 dualpi2_reset_default(sch); 930 - hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 930 + hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC, 931 + HRTIMER_MODE_ABS_PINNED_SOFT); 931 932 932 933 if (opt && nla_len(opt)) { 933 934 err = dualpi2_change(sch, opt, extack); ··· 938 937 } 939 938 940 939 hrtimer_start(&q->pi2_timer, next_pi2_timeout(q), 941 - HRTIMER_MODE_ABS_PINNED); 940 + HRTIMER_MODE_ABS_PINNED_SOFT); 942 941 return 0; 943 942 } 944 943
+7 -5
net/sched/sch_fq.c
··· 1013 1013 static int fq_change(struct Qdisc *sch, struct nlattr *opt, 1014 1014 struct netlink_ext_ack *extack) 1015 1015 { 1016 + unsigned int dropped_pkts = 0, dropped_bytes = 0; 1016 1017 struct fq_sched_data *q = qdisc_priv(sch); 1017 1018 struct nlattr *tb[TCA_FQ_MAX + 1]; 1018 - int err, drop_count = 0; 1019 - unsigned drop_len = 0; 1020 1019 u32 fq_log; 1020 + int err; 1021 1021 1022 1022 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy, 1023 1023 NULL); ··· 1135 1135 err = fq_resize(sch, fq_log); 1136 1136 sch_tree_lock(sch); 1137 1137 } 1138 + 1138 1139 while (sch->q.qlen > sch->limit) { 1139 1140 struct sk_buff *skb = qdisc_dequeue_internal(sch, false); 1140 1141 1141 1142 if (!skb) 1142 1143 break; 1143 - drop_len += qdisc_pkt_len(skb); 1144 + 1145 + dropped_pkts++; 1146 + dropped_bytes += qdisc_pkt_len(skb); 1144 1147 rtnl_kfree_skbs(skb, skb); 1145 - drop_count++; 1146 1148 } 1147 - qdisc_tree_reduce_backlog(sch, drop_count, drop_len); 1149 + qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes); 1148 1150 1149 1151 sch_tree_unlock(sch); 1150 1152 return err;
+7 -5
net/sched/sch_fq_codel.c
··· 366 366 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, 367 367 struct netlink_ext_ack *extack) 368 368 { 369 + unsigned int dropped_pkts = 0, dropped_bytes = 0; 369 370 struct fq_codel_sched_data *q = qdisc_priv(sch); 370 371 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; 371 372 u32 quantum = 0; ··· 444 443 q->memory_usage > q->memory_limit) { 445 444 struct sk_buff *skb = qdisc_dequeue_internal(sch, false); 446 445 447 - q->cstats.drop_len += qdisc_pkt_len(skb); 446 + if (!skb) 447 + break; 448 + 449 + dropped_pkts++; 450 + dropped_bytes += qdisc_pkt_len(skb); 448 451 rtnl_kfree_skbs(skb, skb); 449 - q->cstats.drop_count++; 450 452 } 451 - qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); 452 - q->cstats.drop_count = 0; 453 - q->cstats.drop_len = 0; 453 + qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes); 454 454 455 455 sch_tree_unlock(sch); 456 456 return 0;
+7 -5
net/sched/sch_fq_pie.c
··· 287 287 static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, 288 288 struct netlink_ext_ack *extack) 289 289 { 290 + unsigned int dropped_pkts = 0, dropped_bytes = 0; 290 291 struct fq_pie_sched_data *q = qdisc_priv(sch); 291 292 struct nlattr *tb[TCA_FQ_PIE_MAX + 1]; 292 - unsigned int len_dropped = 0; 293 - unsigned int num_dropped = 0; 294 293 int err; 295 294 296 295 err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack); ··· 367 368 while (sch->q.qlen > sch->limit) { 368 369 struct sk_buff *skb = qdisc_dequeue_internal(sch, false); 369 370 370 - len_dropped += qdisc_pkt_len(skb); 371 - num_dropped += 1; 371 + if (!skb) 372 + break; 373 + 374 + dropped_pkts++; 375 + dropped_bytes += qdisc_pkt_len(skb); 372 376 rtnl_kfree_skbs(skb, skb); 373 377 } 374 - qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped); 378 + qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes); 375 379 376 380 sch_tree_unlock(sch); 377 381 return 0;
+7 -5
net/sched/sch_hhf.c
··· 508 508 static int hhf_change(struct Qdisc *sch, struct nlattr *opt, 509 509 struct netlink_ext_ack *extack) 510 510 { 511 + unsigned int dropped_pkts = 0, dropped_bytes = 0; 511 512 struct hhf_sched_data *q = qdisc_priv(sch); 512 513 struct nlattr *tb[TCA_HHF_MAX + 1]; 513 - unsigned int qlen, prev_backlog; 514 514 int err; 515 515 u64 non_hh_quantum; 516 516 u32 new_quantum = q->quantum; ··· 561 561 usecs_to_jiffies(us)); 562 562 } 563 563 564 - qlen = sch->q.qlen; 565 - prev_backlog = sch->qstats.backlog; 566 564 while (sch->q.qlen > sch->limit) { 567 565 struct sk_buff *skb = qdisc_dequeue_internal(sch, false); 568 566 567 + if (!skb) 568 + break; 569 + 570 + dropped_pkts++; 571 + dropped_bytes += qdisc_pkt_len(skb); 569 572 rtnl_kfree_skbs(skb, skb); 570 573 } 571 - qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, 572 - prev_backlog - sch->qstats.backlog); 574 + qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes); 573 575 574 576 sch_tree_unlock(sch); 575 577 return 0;
+1 -1
net/sched/sch_htb.c
··· 592 592 */ 593 593 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) 594 594 { 595 - WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); 595 + WARN_ON(cl->level || !cl->leaf.q); 596 596 597 597 if (!cl->prio_activity) { 598 598 cl->prio_activity = 1 << cl->prio;
+7 -5
net/sched/sch_pie.c
··· 141 141 static int pie_change(struct Qdisc *sch, struct nlattr *opt, 142 142 struct netlink_ext_ack *extack) 143 143 { 144 + unsigned int dropped_pkts = 0, dropped_bytes = 0; 144 145 struct pie_sched_data *q = qdisc_priv(sch); 145 146 struct nlattr *tb[TCA_PIE_MAX + 1]; 146 - unsigned int qlen, dropped = 0; 147 147 int err; 148 148 149 149 err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy, ··· 193 193 nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR])); 194 194 195 195 /* Drop excess packets if new limit is lower */ 196 - qlen = sch->q.qlen; 197 196 while (sch->q.qlen > sch->limit) { 198 197 struct sk_buff *skb = qdisc_dequeue_internal(sch, true); 199 198 200 - dropped += qdisc_pkt_len(skb); 201 - qdisc_qstats_backlog_dec(sch, skb); 199 + if (!skb) 200 + break; 201 + 202 + dropped_pkts++; 203 + dropped_bytes += qdisc_pkt_len(skb); 202 204 rtnl_qdisc_drop(skb, sch); 203 205 } 204 - qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 206 + qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes); 205 207 206 208 sch_tree_unlock(sch); 207 209 return 0;
+2 -1
net/smc/af_smc.c
··· 2568 2568 goto out_decl; 2569 2569 } 2570 2570 2571 - smc_listen_out_connected(new_smc); 2572 2571 SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini); 2572 + /* smc_listen_out() will release smcsk */ 2573 + smc_listen_out_connected(new_smc); 2573 2574 goto out_free; 2574 2575 2575 2576 out_unlock:
+6 -1
net/tls/tls_sw.c
··· 1808 1808 return tls_decrypt_sg(sk, NULL, sgout, &darg); 1809 1809 } 1810 1810 1811 + /* All records returned from a recvmsg() call must have the same type. 1812 + * 0 is not a valid content type. Use it as "no type reported, yet". 1813 + */ 1811 1814 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, 1812 1815 u8 *control) 1813 1816 { ··· 2054 2051 if (err < 0) 2055 2052 goto end; 2056 2053 2054 + /* process_rx_list() will set @control if it processed any records */ 2057 2055 copied = err; 2058 - if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more) 2056 + if (len <= copied || rx_more || 2057 + (control && control != TLS_RECORD_TYPE_DATA)) 2059 2058 goto end; 2060 2059 2061 2060 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+13 -3
rust/Makefile
··· 65 65 66 66 core-edition := $(if $(call rustc-min-version,108700),2024,2021) 67 67 68 + # `rustdoc` did not save the target modifiers, thus workaround for 69 + # the time being (https://github.com/rust-lang/rust/issues/144521). 70 + rustdoc_modifiers_workaround := $(if $(call rustc-min-version,108800),-Cunsafe-allow-abi-mismatch=fixed-x18) 71 + 68 72 # `rustc` recognizes `--remap-path-prefix` since 1.26.0, but `rustdoc` only 69 73 # since Rust 1.81.0. Moreover, `rustdoc` ICEs on out-of-tree builds since Rust 70 74 # 1.82.0 (https://github.com/rust-lang/rust/issues/138520). Thus workaround both ··· 81 77 -Zunstable-options --generate-link-to-definition \ 82 78 --output $(rustdoc_output) \ 83 79 --crate-name $(subst rustdoc-,,$@) \ 80 + $(rustdoc_modifiers_workaround) \ 84 81 $(if $(rustdoc_host),,--sysroot=/dev/null) \ 85 82 @$(objtree)/include/generated/rustc_cfg $< 86 83 ··· 111 106 rustdoc-macros: private rustdoc_host = yes 112 107 rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \ 113 108 --extern proc_macro 114 - rustdoc-macros: $(src)/macros/lib.rs FORCE 109 + rustdoc-macros: $(src)/macros/lib.rs rustdoc-clean FORCE 115 110 +$(call if_changed,rustdoc) 116 111 117 112 # Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should 118 113 # not be needed -- see https://github.com/rust-lang/rust/pull/128307. 119 114 rustdoc-core: private skip_flags = --edition=2021 -Wrustdoc::unescaped_backticks 120 115 rustdoc-core: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs) 121 - rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE 116 + rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs rustdoc-clean FORCE 122 117 +$(call if_changed,rustdoc) 123 118 124 119 rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE ··· 130 125 rustdoc-pin_init_internal: private rustdoc_host = yes 131 126 rustdoc-pin_init_internal: private rustc_target_flags = --cfg kernel \ 132 127 --extern proc_macro --crate-type proc-macro 133 - rustdoc-pin_init_internal: $(src)/pin-init/internal/src/lib.rs FORCE 128 + rustdoc-pin_init_internal: $(src)/pin-init/internal/src/lib.rs \ 129 + rustdoc-clean FORCE 134 130 +$(call if_changed,rustdoc) 135 131 136 132 rustdoc-pin_init: private rustdoc_host = yes ··· 148 142 rustdoc-pin_init rustdoc-compiler_builtins $(obj)/$(libmacros_name) \ 149 143 $(obj)/bindings.o FORCE 150 144 +$(call if_changed,rustdoc) 145 + 146 + rustdoc-clean: FORCE 147 + $(Q)rm -rf $(rustdoc_output) 151 148 152 149 quiet_cmd_rustc_test_library = $(RUSTC_OR_CLIPPY_QUIET) TL $< 153 150 cmd_rustc_test_library = \ ··· 224 215 --extern bindings --extern uapi \ 225 216 --no-run --crate-name kernel -Zunstable-options \ 226 217 --sysroot=/dev/null \ 218 + $(rustdoc_modifiers_workaround) \ 227 219 --test-builder $(objtree)/scripts/rustdoc_test_builder \ 228 220 $< $(rustdoc_test_kernel_quiet); \ 229 221 $(objtree)/scripts/rustdoc_test_gen
+2
sound/hda/codecs/realtek/alc269.c
··· 7140 7140 SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), 7141 7141 SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), 7142 7142 SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), 7143 + SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC), 7143 7144 SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), 7144 7145 SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 7145 7146 SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ··· 7159 7158 SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 7160 7159 SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 7161 7160 SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 7161 + SND_PCI_QUIRK(0xf111, 0x000b, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 7162 7162 SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), 7163 7163 7164 7164 #if 0
+1 -1
sound/hda/codecs/side-codecs/tas2781_hda_i2c.c
··· 265 265 }; 266 266 267 267 static const struct snd_kcontrol_new tas2781_snd_controls[] = { 268 - ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL, 268 + ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Volume", TAS2781_AMP_LEVEL, 269 269 1, 0, 20, 0, tas2781_amp_getvol, 270 270 tas2781_amp_putvol, amp_vol_tlv), 271 271 ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load", 0,
-1
sound/hda/controllers/intel.c
··· 2077 2077 { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */ 2078 2078 { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */ 2079 2079 { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */ 2080 - { PCI_DEVICE_SUB(0x1022, 0x15e3, 0x1022, 0xd601) }, /* ASRock X670E Taichi */ 2081 2080 {} 2082 2081 }; 2083 2082
+4 -4
sound/pci/azt3328.c
··· 412 412 outl(value, chip->ctrl_io + reg); 413 413 } 414 414 415 - static inline void 415 + static inline void __maybe_unused 416 416 snd_azf3328_game_outb(const struct snd_azf3328 *chip, unsigned reg, u8 value) 417 417 { 418 418 outb(value, chip->game_io + reg); 419 419 } 420 420 421 - static inline void 421 + static inline void __maybe_unused 422 422 snd_azf3328_game_outw(const struct snd_azf3328 *chip, unsigned reg, u16 value) 423 423 { 424 424 outw(value, chip->game_io + reg); 425 425 } 426 426 427 - static inline u8 427 + static inline u8 __maybe_unused 428 428 snd_azf3328_game_inb(const struct snd_azf3328 *chip, unsigned reg) 429 429 { 430 430 return inb(chip->game_io + reg); 431 431 } 432 432 433 - static inline u16 433 + static inline u16 __maybe_unused 434 434 snd_azf3328_game_inw(const struct snd_azf3328 *chip, unsigned reg) 435 435 { 436 436 return inw(chip->game_io + reg);
+3 -1
sound/soc/Kconfig
··· 111 111 source "sound/soc/cirrus/Kconfig" 112 112 source "sound/soc/dwc/Kconfig" 113 113 source "sound/soc/fsl/Kconfig" 114 - source "sound/soc/generic/Kconfig" 115 114 source "sound/soc/google/Kconfig" 116 115 source "sound/soc/hisilicon/Kconfig" 117 116 source "sound/soc/jz4740/Kconfig" ··· 147 148 source "sound/soc/codecs/Kconfig" 148 149 149 150 source "sound/soc/sdw_utils/Kconfig" 151 + 152 + # generic frame-work 153 + source "sound/soc/generic/Kconfig" 150 154 151 155 endif # SND_SOC 152 156
+3 -5
sound/soc/codecs/aw87390.c
··· 177 177 { 178 178 struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); 179 179 struct aw87390 *aw87390 = snd_soc_component_get_drvdata(codec); 180 - char *prof_name, *name; 180 + char *prof_name; 181 181 int count, ret; 182 182 183 183 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; ··· 194 194 if (uinfo->value.enumerated.item >= count) 195 195 uinfo->value.enumerated.item = count - 1; 196 196 197 - name = uinfo->value.enumerated.name; 198 197 count = uinfo->value.enumerated.item; 199 198 200 199 ret = aw87390_dev_get_prof_name(aw87390->aw_pa, count, &prof_name); 201 200 if (ret) { 202 - strscpy(uinfo->value.enumerated.name, "null", 203 - strlen("null") + 1); 201 + strscpy(uinfo->value.enumerated.name, "null"); 204 202 return 0; 205 203 } 206 204 207 - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); 205 + strscpy(uinfo->value.enumerated.name, prof_name); 208 206 209 207 return 0; 210 208 }
+2 -3
sound/soc/codecs/aw88081.c
··· 914 914 915 915 ret = aw88081_dev_get_prof_name(aw88081->aw_pa, count, &prof_name); 916 916 if (ret) { 917 - strscpy(uinfo->value.enumerated.name, "null", 918 - sizeof(uinfo->value.enumerated.name)); 917 + strscpy(uinfo->value.enumerated.name, "null"); 919 918 return 0; 920 919 } 921 920 922 - strscpy(uinfo->value.enumerated.name, prof_name, sizeof(uinfo->value.enumerated.name)); 921 + strscpy(uinfo->value.enumerated.name, prof_name); 923 922 924 923 return 0; 925 924 }
+3 -5
sound/soc/codecs/aw88166.c
··· 1478 1478 { 1479 1479 struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); 1480 1480 struct aw88166 *aw88166 = snd_soc_component_get_drvdata(codec); 1481 - char *prof_name, *name; 1481 + char *prof_name; 1482 1482 int count, ret; 1483 1483 1484 1484 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; ··· 1495 1495 if (uinfo->value.enumerated.item >= count) 1496 1496 uinfo->value.enumerated.item = count - 1; 1497 1497 1498 - name = uinfo->value.enumerated.name; 1499 1498 count = uinfo->value.enumerated.item; 1500 1499 1501 1500 ret = aw88166_dev_get_prof_name(aw88166->aw_pa, count, &prof_name); 1502 1501 if (ret) { 1503 - strscpy(uinfo->value.enumerated.name, "null", 1504 - strlen("null") + 1); 1502 + strscpy(uinfo->value.enumerated.name, "null"); 1505 1503 return 0; 1506 1504 } 1507 1505 1508 - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); 1506 + strscpy(uinfo->value.enumerated.name, prof_name); 1509 1507 1510 1508 return 0; 1511 1509 }
+3 -5
sound/soc/codecs/aw88261.c
··· 819 819 { 820 820 struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); 821 821 struct aw88261 *aw88261 = snd_soc_component_get_drvdata(codec); 822 - char *prof_name, *name; 822 + char *prof_name; 823 823 int count, ret; 824 824 825 825 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; ··· 836 836 if (uinfo->value.enumerated.item >= count) 837 837 uinfo->value.enumerated.item = count - 1; 838 838 839 - name = uinfo->value.enumerated.name; 840 839 count = uinfo->value.enumerated.item; 841 840 842 841 ret = aw88261_dev_get_prof_name(aw88261->aw_pa, count, &prof_name); 843 842 if (ret) { 844 - strscpy(uinfo->value.enumerated.name, "null", 845 - strlen("null") + 1); 843 + strscpy(uinfo->value.enumerated.name, "null"); 846 844 return 0; 847 845 } 848 846 849 - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); 847 + strscpy(uinfo->value.enumerated.name, prof_name); 850 848 851 849 return 0; 852 850 }
+3 -5
sound/soc/codecs/aw88395/aw88395.c
··· 175 175 { 176 176 struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); 177 177 struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); 178 - char *prof_name, *name; 178 + char *prof_name; 179 179 int count, ret; 180 180 181 181 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; ··· 192 192 if (uinfo->value.enumerated.item >= count) 193 193 uinfo->value.enumerated.item = count - 1; 194 194 195 - name = uinfo->value.enumerated.name; 196 195 count = uinfo->value.enumerated.item; 197 196 198 197 ret = aw88395_dev_get_prof_name(aw88395->aw_pa, count, &prof_name); 199 198 if (ret) { 200 - strscpy(uinfo->value.enumerated.name, "null", 201 - strlen("null") + 1); 199 + strscpy(uinfo->value.enumerated.name, "null"); 202 200 return 0; 203 201 } 204 202 205 - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); 203 + strscpy(uinfo->value.enumerated.name, prof_name); 206 204 207 205 return 0; 208 206 }
+3 -5
sound/soc/codecs/aw88399.c
··· 1831 1831 { 1832 1832 struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); 1833 1833 struct aw88399 *aw88399 = snd_soc_component_get_drvdata(codec); 1834 - char *prof_name, *name; 1834 + char *prof_name; 1835 1835 int count, ret; 1836 1836 1837 1837 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; ··· 1848 1848 if (uinfo->value.enumerated.item >= count) 1849 1849 uinfo->value.enumerated.item = count - 1; 1850 1850 1851 - name = uinfo->value.enumerated.name; 1852 1851 count = uinfo->value.enumerated.item; 1853 1852 1854 1853 ret = aw88399_dev_get_prof_name(aw88399->aw_pa, count, &prof_name); 1855 1854 if (ret) { 1856 - strscpy(uinfo->value.enumerated.name, "null", 1857 - strlen("null") + 1); 1855 + strscpy(uinfo->value.enumerated.name, "null"); 1858 1856 return 0; 1859 1857 } 1860 1858 1861 - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); 1859 + strscpy(uinfo->value.enumerated.name, prof_name); 1862 1860 1863 1861 return 0; 1864 1862 }
+1 -1
sound/soc/codecs/lpass-tx-macro.c
··· 2229 2229 } 2230 2230 2231 2231 static const struct snd_soc_component_driver tx_macro_component_drv = { 2232 - .name = "RX-MACRO", 2232 + .name = "TX-MACRO", 2233 2233 .probe = tx_macro_component_probe, 2234 2234 .controls = tx_macro_snd_controls, 2235 2235 .num_controls = ARRAY_SIZE(tx_macro_snd_controls),
+2 -1
sound/soc/codecs/rt1320-sdw.c
··· 109 109 { 0x0000d540, 0x01 }, 110 110 { 0xd172, 0x2a }, 111 111 { 0xc5d6, 0x01 }, 112 + { 0xd478, 0xff }, 112 113 }; 113 114 114 115 static const struct reg_sequence rt1320_vc_blind_write[] = { ··· 160 159 { 0xd471, 0x3a }, 161 160 { 0xd474, 0x11 }, 162 161 { 0xd475, 0x32 }, 163 - { 0xd478, 0x64 }, 162 + { 0xd478, 0xff }, 164 163 { 0xd479, 0x20 }, 165 164 { 0xd47a, 0x10 }, 166 165 { 0xd47c, 0xff },
+2
sound/soc/codecs/rt721-sdca.c
··· 278 278 RT721_ENT_FLOAT_CTL1, 0x4040); 279 279 rt_sdca_index_write(rt721->mbq_regmap, RT721_HDA_SDCA_FLOAT, 280 280 RT721_ENT_FLOAT_CTL4, 0x1201); 281 + rt_sdca_index_write(rt721->mbq_regmap, RT721_BOOST_CTRL, 282 + RT721_BST_4CH_TOP_GATING_CTRL1, 0x002a); 281 283 regmap_write(rt721->regmap, 0x2f58, 0x07); 282 284 } 283 285
+4
sound/soc/codecs/rt721-sdca.h
··· 56 56 #define RT721_CBJ_CTRL 0x0a 57 57 #define RT721_CAP_PORT_CTRL 0x0c 58 58 #define RT721_CLASD_AMP_CTRL 0x0d 59 + #define RT721_BOOST_CTRL 0x0f 59 60 #define RT721_VENDOR_REG 0x20 60 61 #define RT721_RC_CALIB_CTRL 0x40 61 62 #define RT721_VENDOR_EQ_L 0x53 ··· 93 92 94 93 /* Index (NID:0dh) */ 95 94 #define RT721_CLASD_AMP_2CH_CAL 0x14 95 + 96 + /* Index (NID:0fh) */ 97 + #define RT721_BST_4CH_TOP_GATING_CTRL1 0x05 96 98 97 99 /* Index (NID:20h) */ 98 100 #define RT721_JD_PRODUCT_NUM 0x00
+1 -1
sound/soc/codecs/sma1307.c
··· 1749 1749 sma1307->set.header_size * sizeof(int)); 1750 1750 1751 1751 if ((sma1307->set.checksum >> 8) != SMA1307_SETTING_CHECKSUM) { 1752 - dev_err(sma1307->dev, "%s: failed by dismatch \"%s\"\n", 1752 + dev_err(sma1307->dev, "%s: checksum failed \"%s\"\n", 1753 1753 __func__, setting_file); 1754 1754 sma1307->set.status = false; 1755 1755 return;
+3 -3
sound/soc/codecs/tas2781-i2c.c
··· 908 908 }; 909 909 910 910 static const struct snd_kcontrol_new tas2781_snd_controls[] = { 911 - SOC_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL, 911 + SOC_SINGLE_RANGE_EXT_TLV("Speaker Analog Volume", TAS2781_AMP_LEVEL, 912 912 1, 0, 20, 0, tas2781_amp_getvol, 913 913 tas2781_amp_putvol, amp_vol_tlv), 914 - SOC_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain", TAS2781_DVC_LVL, 914 + SOC_SINGLE_RANGE_EXT_TLV("Speaker Digital Volume", TAS2781_DVC_LVL, 915 915 0, 0, 200, 1, tas2781_digital_getvol, 916 916 tas2781_digital_putvol, dvc_tlv), 917 917 }; ··· 1480 1480 return PTR_ERR(src); 1481 1481 1482 1482 if (src[0] > max_pkg_len && src[0] != count) { 1483 - dev_err(priv->dev, "pkg(%u), max(%u), count(%u) dismatch.\n", 1483 + dev_err(priv->dev, "pkg(%u), max(%u), count(%u) mismatch.\n", 1484 1484 src[0], max_pkg_len, (unsigned int)count); 1485 1485 ret = 0; 1486 1486 goto exit;
+10 -10
sound/soc/fsl/fsl_sai.c
··· 809 809 * are running concurrently. 810 810 */ 811 811 /* Software Reset */ 812 - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); 812 + regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); 813 813 /* Clear SR bit to finish the reset */ 814 - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); 814 + regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, 0); 815 815 } 816 816 817 817 static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, ··· 930 930 unsigned int ofs = sai->soc_data->reg_offset; 931 931 932 932 /* Software Reset for both Tx and Rx */ 933 - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); 934 - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); 933 + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); 934 + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); 935 935 /* Clear SR bit to finish the reset */ 936 - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); 937 - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); 936 + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); 937 + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0); 938 938 939 939 regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs), 940 940 FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth), ··· 1824 1824 1825 1825 regcache_cache_only(sai->regmap, false); 1826 1826 regcache_mark_dirty(sai->regmap); 1827 - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); 1828 - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); 1827 + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); 1828 + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); 1829 1829 usleep_range(1000, 2000); 1830 - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); 1831 - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); 1830 + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); 1831 + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0); 1832 1832 1833 1833 ret = regcache_sync(sai->regmap); 1834 1834 if (ret)
+2 -5
sound/soc/stm/stm32_i2s.c
··· 469 469 int ret; 470 470 471 471 ret = stm32_i2s_calc_clk_div(i2s, req->best_parent_rate, req->rate); 472 - if (ret) { 473 - req->rate = ret; 474 - 475 - return 0; 476 - } 472 + if (ret) 473 + return ret; 477 474 478 475 mclk->freq = req->best_parent_rate / i2s->divider; 479 476
+22 -3
sound/usb/stream.c
··· 341 341 342 342 len = le16_to_cpu(cluster->wLength); 343 343 c = 0; 344 - p += sizeof(struct uac3_cluster_header_descriptor); 344 + p += sizeof(*cluster); 345 + len -= sizeof(*cluster); 345 346 346 - while (((p - (void *)cluster) < len) && (c < channels)) { 347 + while (len > 0 && (c < channels)) { 347 348 struct uac3_cluster_segment_descriptor *cs_desc = p; 348 349 u16 cs_len; 349 350 u8 cs_type; 350 351 352 + if (len < sizeof(*p)) 353 + break; 351 354 cs_len = le16_to_cpu(cs_desc->wLength); 355 + if (len < cs_len) 356 + break; 352 357 cs_type = cs_desc->bSegmentType; 353 358 354 359 if (cs_type == UAC3_CHANNEL_INFORMATION) { 355 360 struct uac3_cluster_information_segment_descriptor *is = p; 356 361 unsigned char map; 362 + 363 + if (cs_len < sizeof(*is)) 364 + break; 357 365 358 366 /* 359 367 * TODO: this conversion is not complete, update it ··· 464 456 chmap->map[c++] = map; 465 457 } 466 458 p += cs_len; 459 + len -= cs_len; 467 460 } 468 461 469 462 if (channels < c) ··· 890 881 u64 badd_formats = 0; 891 882 unsigned int num_channels; 892 883 struct audioformat *fp; 893 - u16 cluster_id, wLength; 884 + u16 cluster_id, wLength, cluster_wLength; 894 885 int clock = 0; 895 886 int err; 896 887 ··· 1015 1006 } else if (err != wLength) { 1016 1007 dev_err(&dev->dev, 1017 1008 "%u:%d : can't get Cluster Descriptor\n", 1009 + iface_no, altno); 1010 + kfree(cluster); 1011 + return ERR_PTR(-EIO); 1012 + } 1013 + 1014 + cluster_wLength = le16_to_cpu(cluster->wLength); 1015 + if (cluster_wLength < sizeof(*cluster) || 1016 + cluster_wLength > wLength) { 1017 + dev_err(&dev->dev, 1018 + "%u:%d : invalid Cluster Descriptor size\n", 1018 1019 iface_no, altno); 1019 1020 kfree(cluster); 1020 1021 return ERR_PTR(-EIO);
+12
sound/usb/validate.c
··· 221 221 return d->bLength >= sizeof(*d) + 4 + 2; 222 222 } 223 223 224 + static bool validate_uac3_power_domain_unit(const void *p, 225 + const struct usb_desc_validator *v) 226 + { 227 + const struct uac3_power_domain_descriptor *d = p; 228 + 229 + if (d->bLength < sizeof(*d)) 230 + return false; 231 + /* baEntities[] + wPDomainDescrStr */ 232 + return d->bLength >= sizeof(*d) + d->bNrEntities + 2; 233 + } 234 + 224 235 static bool validate_midi_out_jack(const void *p, 225 236 const struct usb_desc_validator *v) 226 237 { ··· 296 285 struct uac3_clock_multiplier_descriptor), 297 286 /* UAC_VERSION_3, UAC3_SAMPLE_RATE_CONVERTER: not implemented yet */ 298 287 /* UAC_VERSION_3, UAC3_CONNECTORS: not implemented yet */ 288 + FUNC(UAC_VERSION_3, UAC3_POWER_DOMAIN, validate_uac3_power_domain_unit), 299 289 { } /* terminator */ 300 290 }; 301 291
+2 -2
tools/bootconfig/main.c
··· 193 193 if (stat.st_size < BOOTCONFIG_FOOTER_SIZE) 194 194 return 0; 195 195 196 - if (lseek(fd, -BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0) 196 + if (lseek(fd, -(off_t)BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0) 197 197 return pr_errno("Failed to lseek for magic", -errno); 198 198 199 199 if (read(fd, magic, BOOTCONFIG_MAGIC_LEN) < 0) ··· 203 203 if (memcmp(magic, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN) != 0) 204 204 return 0; 205 205 206 - if (lseek(fd, -BOOTCONFIG_FOOTER_SIZE, SEEK_END) < 0) 206 + if (lseek(fd, -(off_t)BOOTCONFIG_FOOTER_SIZE, SEEK_END) < 0) 207 207 return pr_errno("Failed to lseek for size", -errno); 208 208 209 209 if (read(fd, &size, sizeof(uint32_t)) < 0)
+23
tools/objtool/arch/loongarch/special.c
··· 27 27 struct table_info *next_table; 28 28 unsigned long tmp_insn_offset; 29 29 unsigned long tmp_rodata_offset; 30 + bool is_valid_list = false; 30 31 31 32 rsec = find_section_by_name(file->elf, ".rela.discard.tablejump_annotate"); 32 33 if (!rsec) ··· 36 35 INIT_LIST_HEAD(&table_list); 37 36 38 37 for_each_reloc(rsec, reloc) { 38 + if (reloc->sym->sec->rodata) 39 + continue; 40 + 41 + if (strcmp(insn->sec->name, reloc->sym->sec->name)) 42 + continue; 43 + 39 44 orig_table = malloc(sizeof(struct table_info)); 40 45 if (!orig_table) { 41 46 WARN("malloc failed"); ··· 56 49 57 50 if (reloc_idx(reloc) + 1 == sec_num_entries(rsec)) 58 51 break; 52 + 53 + if (strcmp(insn->sec->name, (reloc + 1)->sym->sec->name)) { 54 + list_for_each_entry(orig_table, &table_list, jump_info) { 55 + if (orig_table->insn_offset == insn->offset) { 56 + is_valid_list = true; 57 + break; 58 + } 59 + } 60 + 61 + if (!is_valid_list) { 62 + list_del_init(&table_list); 63 + continue; 64 + } 65 + 66 + break; 67 + } 59 68 } 60 69 61 70 list_for_each_entry(orig_table, &table_list, jump_info) {
-3
tools/testing/selftests/coredump/stackdump_test.c
··· 446 446 if (info.coredump_mask & PIDFD_COREDUMPED) 447 447 goto out; 448 448 449 - if (read(fd_coredump, &c, 1) < 1) 450 - goto out; 451 - 452 449 exit_code = EXIT_SUCCESS; 453 450 out: 454 451 if (fd_peer_pidfd >= 0)
+2 -1
tools/testing/selftests/drivers/net/bonding/Makefile
··· 10 10 mode-2-recovery-updelay.sh \ 11 11 bond_options.sh \ 12 12 bond-eth-type-change.sh \ 13 - bond_macvlan_ipvlan.sh 13 + bond_macvlan_ipvlan.sh \ 14 + bond_passive_lacp.sh 14 15 15 16 TEST_FILES := \ 16 17 lag_lib.sh \
+105
tools/testing/selftests/drivers/net/bonding/bond_passive_lacp.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # Test if a bond interface works with lacp_active=off. 5 + 6 + # shellcheck disable=SC2034 7 + REQUIRE_MZ=no 8 + NUM_NETIFS=0 9 + lib_dir=$(dirname "$0") 10 + # shellcheck disable=SC1091 11 + source "$lib_dir"/../../../net/forwarding/lib.sh 12 + 13 + # shellcheck disable=SC2317 14 + check_port_state() 15 + { 16 + local netns=$1 17 + local port=$2 18 + local state=$3 19 + 20 + ip -n "${netns}" -d -j link show "$port" | \ 21 + jq -e ".[].linkinfo.info_slave_data.ad_actor_oper_port_state_str | index(\"${state}\") != null" > /dev/null 22 + } 23 + 24 + check_pkt_count() 25 + { 26 + RET=0 27 + local ns="$1" 28 + local iface="$2" 29 + 30 + # wait 65s, one per 30s 31 + slowwait_for_counter 65 2 tc_rule_handle_stats_get \ 32 + "dev ${iface} egress" 101 ".packets" "-n ${ns}" &> /dev/null 33 + } 34 + 35 + setup() { 36 + setup_ns c_ns s_ns 37 + 38 + # shellcheck disable=SC2154 39 + ip -n "${c_ns}" link add eth0 type veth peer name eth0 netns "${s_ns}" 40 + ip -n "${c_ns}" link add eth1 type veth peer name eth1 netns "${s_ns}" 41 + 42 + # Add tc filter to count the pkts 43 + tc -n "${c_ns}" qdisc add dev eth0 clsact 44 + tc -n "${c_ns}" filter add dev eth0 egress handle 101 protocol 0x8809 matchall action pass 45 + tc -n "${s_ns}" qdisc add dev eth1 clsact 46 + tc -n "${s_ns}" filter add dev eth1 egress handle 101 protocol 0x8809 matchall action pass 47 + 48 + ip -n "${s_ns}" link add bond0 type bond mode 802.3ad lacp_active on lacp_rate fast 49 + ip -n "${s_ns}" link set eth0 master bond0 50 + ip -n "${s_ns}" link set eth1 master bond0 51 + 52 + ip -n "${c_ns}" link add bond0 type bond mode 802.3ad lacp_active off lacp_rate fast 53 + ip -n "${c_ns}" link set eth0 master bond0 54 + ip -n "${c_ns}" link set eth1 master bond0 55 + 56 + } 57 + 58 + trap cleanup_all_ns EXIT 59 + setup 60 + 61 + # The bond will send 2 lacpdu pkts during init time, let's wait at least 2s 62 + # after interface up 63 + ip -n "${c_ns}" link set bond0 up 64 + sleep 2 65 + 66 + # 1. The passive side shouldn't send LACPDU. 67 + check_pkt_count "${c_ns}" "eth0" && RET=1 68 + log_test "802.3ad lacp_active off" "init port" 69 + 70 + ip -n "${s_ns}" link set bond0 up 71 + # 2. The passive side should not have the 'active' flag. 72 + RET=0 73 + slowwait 2 check_port_state "${c_ns}" "eth0" "active" && RET=1 74 + log_test "802.3ad lacp_active off" "port state active" 75 + 76 + # 3. The active side should have the 'active' flag. 77 + RET=0 78 + slowwait 2 check_port_state "${s_ns}" "eth0" "active" || RET=1 79 + log_test "802.3ad lacp_active on" "port state active" 80 + 81 + # 4. Make sure the connection is not expired. 82 + RET=0 83 + slowwait 5 check_port_state "${s_ns}" "eth0" "distributing" 84 + slowwait 10 check_port_state "${s_ns}" "eth0" "expired" && RET=1 85 + log_test "bond 802.3ad lacp_active off" "port connection" 86 + 87 + # After testing, disconnect one port on each side to check the state. 88 + ip -n "${s_ns}" link set eth0 nomaster 89 + ip -n "${s_ns}" link set eth0 up 90 + ip -n "${c_ns}" link set eth1 nomaster 91 + ip -n "${c_ns}" link set eth1 up 92 + # Due to Periodic Machine and Rx Machine state change, the bond will still 93 + # send lacpdu pkts in a few seconds. sleep at lease 5s to make sure 94 + # negotiation finished 95 + sleep 5 96 + 97 + # 5. The active side should keep sending LACPDU. 98 + check_pkt_count "${s_ns}" "eth1" || RET=1 99 + log_test "bond 802.3ad lacp_active on" "port pkt after disconnect" 100 + 101 + # 6. The passive side shouldn't send LACPDU anymore. 102 + check_pkt_count "${c_ns}" "eth0" && RET=1 103 + log_test "bond 802.3ad lacp_active off" "port pkt after disconnect" 104 + 105 + exit "$EXIT_STATUS"
+1
tools/testing/selftests/drivers/net/bonding/config
··· 6 6 CONFIG_IPVLAN=y 7 7 CONFIG_NET_ACT_GACT=y 8 8 CONFIG_NET_CLS_FLOWER=y 9 + CONFIG_NET_CLS_MATCHALL=m 9 10 CONFIG_NET_SCH_INGRESS=y 10 11 CONFIG_NLMON=y 11 12 CONFIG_VETH=y
+64 -13
tools/testing/selftests/mount_setattr/mount_setattr_test.c
··· 107 107 #endif 108 108 #endif 109 109 110 + #ifndef __NR_open_tree_attr 111 + #if defined __alpha__ 112 + #define __NR_open_tree_attr 577 113 + #elif defined _MIPS_SIM 114 + #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ 115 + #define __NR_open_tree_attr (467 + 4000) 116 + #endif 117 + #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ 118 + #define __NR_open_tree_attr (467 + 6000) 119 + #endif 120 + #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ 121 + #define __NR_open_tree_attr (467 + 5000) 122 + #endif 123 + #elif defined __ia64__ 124 + #define __NR_open_tree_attr (467 + 1024) 125 + #else 126 + #define __NR_open_tree_attr 467 127 + #endif 128 + #endif 129 + 110 130 #ifndef MOUNT_ATTR_IDMAP 111 131 #define MOUNT_ATTR_IDMAP 0x00100000 112 132 #endif ··· 139 119 struct mount_attr *attr, size_t size) 140 120 { 141 121 return syscall(__NR_mount_setattr, dfd, path, flags, attr, size); 122 + } 123 + 124 + static inline int sys_open_tree_attr(int dfd, const char *path, unsigned int flags, 125 + struct mount_attr *attr, size_t size) 126 + { 127 + return syscall(__NR_open_tree_attr, dfd, path, flags, attr, size); 142 128 } 143 129 144 130 static ssize_t write_nointr(int fd, const void *buf, size_t count) ··· 1248 1222 attr.userns_fd = get_userns_fd(0, 10000, 10000); 1249 1223 ASSERT_GE(attr.userns_fd, 0); 1250 1224 ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1225 + /* 1226 + * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way 1227 + * to bypass this mount_setattr() restriction. 1228 + */ 1229 + ASSERT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1230 + 1251 1231 ASSERT_EQ(close(attr.userns_fd), 0); 1252 1232 ASSERT_EQ(close(open_tree_fd), 0); 1253 1233 } ··· 1287 1255 ASSERT_GE(attr.userns_fd, 0); 1288 1256 ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, 1289 1257 sizeof(attr)), 0); 1258 + /* 1259 + * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way 1260 + * to bypass this mount_setattr() restriction. 1261 + */ 1262 + ASSERT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1263 + 1290 1264 ASSERT_EQ(close(attr.userns_fd), 0); 1291 1265 ASSERT_EQ(close(open_tree_fd), 0); 1292 1266 } ··· 1359 1321 ASSERT_EQ(close(open_tree_fd), 0); 1360 1322 } 1361 1323 1324 + static bool expected_uid_gid(int dfd, const char *path, int flags, 1325 + uid_t expected_uid, gid_t expected_gid) 1326 + { 1327 + int ret; 1328 + struct stat st; 1329 + 1330 + ret = fstatat(dfd, path, &st, flags); 1331 + if (ret < 0) 1332 + return false; 1333 + 1334 + return st.st_uid == expected_uid && st.st_gid == expected_gid; 1335 + } 1336 + 1362 1337 /** 1363 1338 * Validate that currently changing the idmapping of an idmapped mount fails. 1364 1339 */ ··· 1381 1330 struct mount_attr attr = { 1382 1331 .attr_set = MOUNT_ATTR_IDMAP, 1383 1332 }; 1333 + 1334 + ASSERT_TRUE(expected_uid_gid(-EBADF, "/mnt/D", 0, 0, 0)); 1384 1335 1385 1336 if (!mount_setattr_supported()) 1386 1337 SKIP(return, "mount_setattr syscall not supported"); ··· 1401 1348 AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1402 1349 ASSERT_EQ(close(attr.userns_fd), 0); 1403 1350 1351 + EXPECT_FALSE(expected_uid_gid(open_tree_fd, ".", 0, 0, 0)); 1352 + EXPECT_TRUE(expected_uid_gid(open_tree_fd, ".", 0, 10000, 10000)); 1353 + 1404 1354 /* Change idmapping on a detached mount that is already idmapped. */ 1405 1355 attr.userns_fd = get_userns_fd(0, 20000, 10000); 1406 1356 ASSERT_GE(attr.userns_fd, 0); 1407 1357 ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1358 + /* 1359 + * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way 1360 + * to bypass this mount_setattr() restriction. 1361 + */ 1362 + EXPECT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0); 1363 + EXPECT_FALSE(expected_uid_gid(open_tree_fd, ".", 0, 20000, 20000)); 1364 + EXPECT_TRUE(expected_uid_gid(open_tree_fd, ".", 0, 10000, 10000)); 1365 + 1408 1366 ASSERT_EQ(close(attr.userns_fd), 0); 1409 1367 ASSERT_EQ(close(open_tree_fd), 0); 1410 - } 1411 - 1412 - static bool expected_uid_gid(int dfd, const char *path, int flags, 1413 - uid_t expected_uid, gid_t expected_gid) 1414 - { 1415 - int ret; 1416 - struct stat st; 1417 - 1418 - ret = fstatat(dfd, path, &st, flags); 1419 - if (ret < 0) 1420 - return false; 1421 - 1422 - return st.st_uid == expected_uid && st.st_gid == expected_gid; 1423 1368 } 1424 1369 1425 1370 TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
+29
tools/testing/selftests/net/forwarding/router.sh
··· 18 18 # | 2001:db8:1::1/64 2001:db8:2::1/64 | 19 19 # | | 20 20 # +-----------------------------------------------------------------+ 21 + # 22 + #shellcheck disable=SC2034 # SC doesn't see our uses of global variables 21 23 22 24 ALL_TESTS=" 23 25 ping_ipv4 ··· 29 27 ipv4_sip_equal_dip 30 28 ipv6_sip_equal_dip 31 29 ipv4_dip_link_local 30 + ipv4_sip_link_local 32 31 " 33 32 34 33 NUM_NETIFS=4 ··· 331 328 ip route del 169.254.1.0/24 dev $rp2 332 329 ip neigh del 169.254.1.1 lladdr 00:11:22:33:44:55 dev $rp2 333 330 tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower 331 + } 332 + 333 + ipv4_sip_link_local() 334 + { 335 + local sip=169.254.1.1 336 + 337 + RET=0 338 + 339 + # Disable rpfilter to prevent packets to be dropped because of it. 340 + sysctl_set net.ipv4.conf.all.rp_filter 0 341 + sysctl_set net.ipv4.conf."$rp1".rp_filter 0 342 + 343 + tc filter add dev "$rp2" egress protocol ip pref 1 handle 101 \ 344 + flower src_ip "$sip" action pass 345 + 346 + $MZ "$h1" -t udp "sp=54321,dp=12345" -c 5 -d 1msec -b "$rp1mac" \ 347 + -A "$sip" -B 198.51.100.2 -q 348 + 349 + tc_check_packets "dev $rp2 egress" 101 5 350 + check_err $? "Packets were dropped" 351 + 352 + log_test "IPv4 source IP is link-local" 353 + 354 + tc filter del dev "$rp2" egress protocol ip pref 1 handle 101 flower 355 + sysctl_restore net.ipv4.conf."$rp1".rp_filter 356 + sysctl_restore net.ipv4.conf.all.rp_filter 334 357 } 335 358 336 359 trap cleanup EXIT
+3 -2
tools/testing/selftests/net/mptcp/mptcp_connect.c
··· 183 183 struct addrinfo *hints, 184 184 struct addrinfo **res) 185 185 { 186 - again: 187 - int err = getaddrinfo(node, service, hints, res); 186 + int err; 188 187 188 + again: 189 + err = getaddrinfo(node, service, hints, res); 189 190 if (err) { 190 191 const char *errstr; 191 192
+3 -2
tools/testing/selftests/net/mptcp/mptcp_inq.c
··· 75 75 struct addrinfo *hints, 76 76 struct addrinfo **res) 77 77 { 78 - again: 79 - int err = getaddrinfo(node, service, hints, res); 78 + int err; 80 79 80 + again: 81 + err = getaddrinfo(node, service, hints, res); 81 82 if (err) { 82 83 const char *errstr; 83 84
+1
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 3842 3842 # remove and re-add 3843 3843 if reset_with_events "delete re-add signal" && 3844 3844 mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3845 + ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0 3845 3846 pm_nl_set_limits $ns1 0 3 3846 3847 pm_nl_set_limits $ns2 3 3 3847 3848 pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
+3 -2
tools/testing/selftests/net/mptcp/mptcp_sockopt.c
··· 162 162 struct addrinfo *hints, 163 163 struct addrinfo **res) 164 164 { 165 - again: 166 - int err = getaddrinfo(node, service, hints, res); 165 + int err; 167 166 167 + again: 168 + err = getaddrinfo(node, service, hints, res); 168 169 if (err) { 169 170 const char *errstr; 170 171
+1
tools/testing/selftests/net/mptcp/pm_netlink.sh
··· 198 198 check "get_limits" "${default_limits}" "subflows above hard limit" 199 199 200 200 set_limits 8 8 201 + flush_endpoint ## to make sure it doesn't affect the limits 201 202 check "get_limits" "$(format_limits 8 8)" "set limits" 202 203 203 204 flush_endpoint
+300 -12
tools/testing/selftests/net/tls.c
··· 181 181 return sendmsg(fd, &msg, flags); 182 182 } 183 183 184 - static int tls_recv_cmsg(struct __test_metadata *_metadata, 185 - int fd, unsigned char record_type, 186 - void *data, size_t len, int flags) 184 + static int __tls_recv_cmsg(struct __test_metadata *_metadata, 185 + int fd, unsigned char *ctype, 186 + void *data, size_t len, int flags) 187 187 { 188 188 char cbuf[CMSG_SPACE(sizeof(char))]; 189 189 struct cmsghdr *cmsg; 190 - unsigned char ctype; 191 190 struct msghdr msg; 192 191 struct iovec vec; 193 192 int n; ··· 205 206 EXPECT_NE(cmsg, NULL); 206 207 EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); 207 208 EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); 208 - ctype = *((unsigned char *)CMSG_DATA(cmsg)); 209 + if (ctype) 210 + *ctype = *((unsigned char *)CMSG_DATA(cmsg)); 211 + 212 + return n; 213 + } 214 + 215 + static int tls_recv_cmsg(struct __test_metadata *_metadata, 216 + int fd, unsigned char record_type, 217 + void *data, size_t len, int flags) 218 + { 219 + unsigned char ctype; 220 + int n; 221 + 222 + n = __tls_recv_cmsg(_metadata, fd, &ctype, data, len, flags); 209 223 EXPECT_EQ(ctype, record_type); 210 224 211 225 return n; ··· 2181 2169 } 2182 2170 } 2183 2171 2172 + struct raw_rec { 2173 + unsigned int plain_len; 2174 + unsigned char plain_data[100]; 2175 + unsigned int cipher_len; 2176 + unsigned char cipher_data[128]; 2177 + }; 2178 + 2179 + /* TLS 1.2, AES_CCM, data, seqno:0, plaintext: 'Hello world' */ 2180 + static const struct raw_rec id0_data_l11 = { 2181 + .plain_len = 11, 2182 + .plain_data = { 2183 + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 2184 + 0x72, 0x6c, 0x64, 2185 + }, 2186 + .cipher_len = 40, 2187 + .cipher_data = { 2188 + 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, 2189 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xa2, 0x33, 2190 + 0xde, 0x8d, 0x94, 0xf0, 0x29, 0x6c, 0xb1, 0xaf, 2191 + 0x6a, 0x75, 0xb2, 0x93, 0xad, 0x45, 0xd5, 0xfd, 2192 + 0x03, 0x51, 0x57, 0x8f, 0xf9, 0xcc, 0x3b, 0x42, 2193 + }, 2194 + }; 2195 + 2196 + /* TLS 1.2, AES_CCM, ctrl, seqno:0, plaintext: '' */ 2197 + static const struct raw_rec id0_ctrl_l0 = { 2198 + .plain_len = 0, 2199 + .plain_data = { 2200 + }, 2201 + .cipher_len = 29, 2202 + .cipher_data = { 2203 + 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 2204 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x38, 0x7b, 2205 + 0xa6, 0x1c, 0xdd, 0xa7, 0x19, 0x33, 0xab, 0xae, 2206 + 0x88, 0xe1, 0xd2, 0x08, 0x4f, 2207 + }, 2208 + }; 2209 + 2210 + /* TLS 1.2, AES_CCM, data, seqno:0, plaintext: '' */ 2211 + static const struct raw_rec id0_data_l0 = { 2212 + .plain_len = 0, 2213 + .plain_data = { 2214 + }, 2215 + .cipher_len = 29, 2216 + .cipher_data = { 2217 + 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 2218 + 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x37, 0x90, 2219 + 0x70, 0x45, 0x89, 0xfb, 0x5c, 0xc7, 0x89, 0x03, 2220 + 0x68, 0x80, 0xd3, 0xd8, 0xcc, 2221 + }, 2222 + }; 2223 + 2224 + /* TLS 1.2, AES_CCM, data, seqno:1, plaintext: 'Hello world' */ 2225 + static const struct raw_rec id1_data_l11 = { 2226 + .plain_len = 11, 2227 + .plain_data = { 2228 + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 2229 + 0x72, 0x6c, 0x64, 2230 + }, 2231 + .cipher_len = 40, 2232 + .cipher_data = { 2233 + 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, 2234 + 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x1a, 0x9c, 2235 + 0xd0, 0xa8, 0x9a, 0xd6, 0x69, 0xd6, 0x1a, 0xe3, 2236 + 0xb5, 0x1f, 0x0d, 0x2c, 0xe2, 0x97, 0x46, 0xff, 2237 + 0x2b, 0xcc, 0x5a, 0xc4, 0xa3, 0xb9, 0xef, 0xba, 2238 + }, 2239 + }; 2240 + 2241 + /* TLS 1.2, AES_CCM, ctrl, seqno:1, plaintext: '' */ 2242 + static const struct raw_rec id1_ctrl_l0 = { 2243 + .plain_len = 0, 2244 + .plain_data = { 2245 + }, 2246 + .cipher_len = 29, 2247 + .cipher_data = { 2248 + 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 2249 + 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0xf0, 0xfe, 2250 + 0xee, 0xd9, 0xe2, 0x5d, 0xc7, 0x11, 0x4c, 0xe6, 2251 + 0xb4, 0x7e, 0xef, 0x40, 0x2b, 2252 + }, 2253 + }; 2254 + 2255 + /* TLS 1.2, AES_CCM, data, seqno:1, plaintext: '' */ 2256 + static const struct raw_rec id1_data_l0 = { 2257 + .plain_len = 0, 2258 + .plain_data = { 2259 + }, 2260 + .cipher_len = 29, 2261 + .cipher_data = { 2262 + 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 2263 + 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0xfc, 0x86, 2264 + 0xc8, 0xf0, 0x55, 0xf9, 0x47, 0x3f, 0x74, 0xdc, 2265 + 0xc9, 0xbf, 0xfe, 0x5b, 0xb1, 2266 + }, 2267 + }; 2268 + 2269 + /* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: 'Hello world' */ 2270 + static const struct raw_rec id2_ctrl_l11 = { 2271 + .plain_len = 11, 2272 + .plain_data = { 2273 + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 2274 + 0x72, 0x6c, 0x64, 2275 + }, 2276 + .cipher_len = 40, 2277 + .cipher_data = { 2278 + 0x16, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, 2279 + 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19, 2280 + 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87, 2281 + 0x2a, 0x04, 0x11, 0x3d, 0xf8, 0x64, 0x5f, 0x36, 2282 + 0x8b, 0xa8, 0xee, 0x4c, 0x6d, 0x62, 0xa5, 0x00, 2283 + }, 2284 + }; 2285 + 2286 + /* TLS 1.2, AES_CCM, data, seqno:2, plaintext: 'Hello world' */ 2287 + static const struct raw_rec id2_data_l11 = { 2288 + .plain_len = 11, 2289 + .plain_data = { 2290 + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 2291 + 0x72, 0x6c, 0x64, 2292 + }, 2293 + .cipher_len = 40, 2294 + .cipher_data = { 2295 + 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, 2296 + 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19, 2297 + 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87, 2298 + 0x8e, 0xa1, 0xd0, 0xcd, 0x33, 0xb5, 0x86, 0x2b, 2299 + 0x17, 0xf1, 0x52, 0x2a, 0x55, 0x62, 0x65, 0x11, 2300 + }, 2301 + }; 2302 + 2303 + /* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: '' */ 2304 + static const struct raw_rec id2_ctrl_l0 = { 2305 + .plain_len = 0, 2306 + .plain_data = { 2307 + }, 2308 + .cipher_len = 29, 2309 + .cipher_data = { 2310 + 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 2311 + 0x00, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x5c, 0x0e, 2312 + 0x41, 0xdd, 0xba, 0xd3, 0xcc, 0xcf, 0x6d, 0xd9, 2313 + 0x06, 0xdb, 0x79, 0xe5, 0x5d, 2314 + }, 2315 + }; 2316 + 2317 + /* TLS 1.2, AES_CCM, data, seqno:2, plaintext: '' */ 2318 + static const struct raw_rec id2_data_l0 = { 2319 + .plain_len = 0, 2320 + .plain_data = { 2321 + }, 2322 + .cipher_len = 29, 2323 + .cipher_data = { 2324 + 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 2325 + 0x00, 0x00, 0x00, 0x00, 0x02, 0xc3, 0xca, 0x26, 2326 + 0x22, 0xe4, 0x25, 0xfb, 0x5f, 0x6d, 0xbf, 0x83, 2327 + 0x30, 0x48, 0x69, 0x1a, 0x47, 2328 + }, 2329 + }; 2330 + 2331 + FIXTURE(zero_len) 2332 + { 2333 + int fd, cfd; 2334 + bool notls; 2335 + }; 2336 + 2337 + FIXTURE_VARIANT(zero_len) 2338 + { 2339 + const struct raw_rec *recs[4]; 2340 + ssize_t recv_ret[4]; 2341 + }; 2342 + 2343 + FIXTURE_VARIANT_ADD(zero_len, data_data_data) 2344 + { 2345 + .recs = { &id0_data_l11, &id1_data_l11, &id2_data_l11, }, 2346 + .recv_ret = { 33, -EAGAIN, }, 2347 + }; 2348 + 2349 + FIXTURE_VARIANT_ADD(zero_len, data_0ctrl_data) 2350 + { 2351 + .recs = { &id0_data_l11, &id1_ctrl_l0, &id2_data_l11, }, 2352 + .recv_ret = { 11, 0, 11, -EAGAIN, }, 2353 + }; 2354 + 2355 + FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0data) 2356 + { 2357 + .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l0, }, 2358 + .recv_ret = { -EAGAIN, }, 2359 + }; 2360 + 2361 + FIXTURE_VARIANT_ADD(zero_len, 0data_0data_ctrl) 2362 + { 2363 + .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l11, }, 2364 + .recv_ret = { 0, 11, -EAGAIN, }, 2365 + }; 2366 + 2367 + FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0ctrl) 2368 + { 2369 + .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l0, }, 2370 + .recv_ret = { 0, 0, -EAGAIN, }, 2371 + }; 2372 + 2373 + FIXTURE_VARIANT_ADD(zero_len, 0ctrl_0ctrl_0ctrl) 2374 + { 2375 + .recs = { &id0_ctrl_l0, &id1_ctrl_l0, &id2_ctrl_l0, }, 2376 + .recv_ret = { 0, 0, 0, -EAGAIN, }, 2377 + }; 2378 + 2379 + FIXTURE_VARIANT_ADD(zero_len, 0data_0data_data) 2380 + { 2381 + .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l11, }, 2382 + .recv_ret = { 11, -EAGAIN, }, 2383 + }; 2384 + 2385 + FIXTURE_VARIANT_ADD(zero_len, data_0data_0data) 2386 + { 2387 + .recs = { &id0_data_l11, &id1_data_l0, &id2_data_l0, }, 2388 + .recv_ret = { 11, -EAGAIN, }, 2389 + }; 2390 + 2391 + FIXTURE_SETUP(zero_len) 2392 + { 2393 + struct tls_crypto_info_keys tls12; 2394 + int ret; 2395 + 2396 + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128, 2397 + &tls12, 0); 2398 + 2399 + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); 2400 + if (self->notls) 2401 + return; 2402 + 2403 + /* Don't install keys on fd, we'll send raw records */ 2404 + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len); 2405 + ASSERT_EQ(ret, 0); 2406 + } 2407 + 2408 + FIXTURE_TEARDOWN(zero_len) 2409 + { 2410 + close(self->fd); 2411 + close(self->cfd); 2412 + } 2413 + 2414 + TEST_F(zero_len, test) 2415 + { 2416 + const struct raw_rec *const *rec; 2417 + unsigned char buf[128]; 2418 + int rec_off; 2419 + int i; 2420 + 2421 + for (i = 0; i < 4 && variant->recs[i]; i++) 2422 + EXPECT_EQ(send(self->fd, variant->recs[i]->cipher_data, 2423 + variant->recs[i]->cipher_len, 0), 2424 + variant->recs[i]->cipher_len); 2425 + 2426 + rec = &variant->recs[0]; 2427 + rec_off = 0; 2428 + for (i = 0; i < 4; i++) { 2429 + int j, ret; 2430 + 2431 + ret = variant->recv_ret[i] >= 0 ? variant->recv_ret[i] : -1; 2432 + EXPECT_EQ(__tls_recv_cmsg(_metadata, self->cfd, NULL, 2433 + buf, sizeof(buf), MSG_DONTWAIT), ret); 2434 + if (ret == -1) 2435 + EXPECT_EQ(errno, -variant->recv_ret[i]); 2436 + if (variant->recv_ret[i] == -EAGAIN) 2437 + break; 2438 + 2439 + for (j = 0; j < ret; j++) { 2440 + while (rec_off == (*rec)->plain_len) { 2441 + rec++; 2442 + rec_off = 0; 2443 + } 2444 + EXPECT_EQ(buf[j], (*rec)->plain_data[rec_off]); 2445 + rec_off++; 2446 + } 2447 + } 2448 + }; 2449 + 2184 2450 FIXTURE(tls_err) 2185 2451 { 2186 2452 int fd, cfd; ··· 3043 2753 pid = fork(); 3044 2754 ASSERT_GE(pid, 0); 3045 2755 if (!pid) { 3046 - EXPECT_EQ(recv(cfd, buf, sizeof(buf), MSG_WAITALL), 3047 - sizeof(buf)); 2756 + EXPECT_EQ(recv(cfd, buf, sizeof(buf) / 2, MSG_WAITALL), 2757 + sizeof(buf) / 2); 3048 2758 exit(!__test_passed(_metadata)); 3049 2759 } 3050 2760 3051 - usleep(2000); 2761 + usleep(10000); 3052 2762 ASSERT_EQ(setsockopt(fd, SOL_TLS, TLS_TX, &tls, tls.len), 0); 3053 2763 ASSERT_EQ(setsockopt(cfd, SOL_TLS, TLS_RX, &tls, tls.len), 0); 3054 2764 3055 2765 EXPECT_EQ(send(fd, buf, sizeof(buf), 0), sizeof(buf)); 3056 - usleep(2000); 2766 + EXPECT_EQ(wait(&status), pid); 2767 + EXPECT_EQ(status, 0); 3057 2768 EXPECT_EQ(recv(cfd, buf2, sizeof(buf2), MSG_DONTWAIT), -1); 3058 2769 /* Don't check errno, the error will be different depending 3059 2770 * on what random bytes TLS interpreted as the record length. ··· 3062 2771 3063 2772 close(fd); 3064 2773 close(cfd); 3065 - 3066 - EXPECT_EQ(wait(&status), pid); 3067 - EXPECT_EQ(status, 0); 3068 2774 } 3069 2775 3070 2776 static void __attribute__((constructor)) fips_check(void) {
+198
tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
··· 186 186 ] 187 187 }, 188 188 { 189 + "id": "34c0", 190 + "name": "Test TBF with HHF Backlog Accounting in gso_skb case against underflow", 191 + "category": [ 192 + "qdisc", 193 + "tbf", 194 + "hhf" 195 + ], 196 + "plugins": { 197 + "requires": [ 198 + "nsPlugin" 199 + ] 200 + }, 201 + "setup": [ 202 + "$IP link set dev $DUMMY up || true", 203 + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", 204 + "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms", 205 + "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 hhf limit 1000", 206 + [ 207 + "ping -I $DUMMY -c2 10.10.11.11", 208 + 1 209 + ], 210 + "$TC qdisc change dev $DUMMY handle 2: parent 1:1 hhf limit 1" 211 + ], 212 + "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1", 213 + "expExitCode": "0", 214 + "verifyCmd": "$TC -s qdisc show dev $DUMMY", 215 + "matchPattern": "backlog 0b 0p", 216 + "matchCount": "1", 217 + "teardown": [ 218 + "$TC qdisc del dev $DUMMY handle 1: root" 219 + ] 220 + }, 221 + { 222 + "id": "fd68", 223 + "name": "Test TBF with CODEL Backlog Accounting in gso_skb case against underflow", 224 + "category": [ 225 + "qdisc", 226 + "tbf", 227 + "codel" 228 + ], 229 + "plugins": { 230 + "requires": [ 231 + "nsPlugin" 232 + ] 233 + }, 234 + "setup": [ 235 + "$IP link set dev $DUMMY up || true", 236 + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", 237 + "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms", 238 + "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 codel limit 1000", 239 + [ 240 + "ping -I $DUMMY -c2 10.10.11.11", 241 + 1 242 + ], 243 + "$TC qdisc change dev $DUMMY handle 2: parent 1:1 codel limit 1" 244 + ], 245 + "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1", 246 + "expExitCode": "0", 247 + "verifyCmd": "$TC -s qdisc show dev $DUMMY", 248 + "matchPattern": "backlog 0b 0p", 249 + "matchCount": "1", 250 + "teardown": [ 251 + "$TC qdisc del dev $DUMMY handle 1: root" 252 + ] 253 + }, 254 + { 255 + "id": "514e", 256 + "name": "Test TBF with PIE Backlog Accounting in gso_skb case against underflow", 257 + "category": [ 258 + "qdisc", 259 + "tbf", 260 + "pie" 261 + ], 262 + "plugins": { 263 + "requires": [ 264 + "nsPlugin" 265 + ] 266 + }, 267 + "setup": [ 268 + "$IP link set dev $DUMMY up || true", 269 + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", 270 + "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms", 271 + "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 pie limit 1000", 272 + [ 273 + "ping -I $DUMMY -c2 10.10.11.11", 274 + 1 275 + ], 276 + "$TC qdisc change dev $DUMMY handle 2: parent 1:1 pie limit 1" 277 + ], 278 + "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1", 279 + "expExitCode": "0", 280 + "verifyCmd": "$TC -s qdisc show dev $DUMMY", 281 + "matchPattern": "backlog 0b 0p", 282 + "matchCount": "1", 283 + "teardown": [ 284 + "$TC qdisc del dev $DUMMY handle 1: root" 285 + ] 286 + }, 287 + { 288 + "id": "6c97", 289 + "name": "Test TBF with FQ Backlog Accounting in gso_skb case against underflow", 290 + "category": [ 291 + "qdisc", 292 + "tbf", 293 + "fq" 294 + ], 295 + "plugins": { 296 + "requires": [ 297 + "nsPlugin" 298 + ] 299 + }, 300 + "setup": [ 301 + "$IP link set dev $DUMMY up || true", 302 + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", 303 + "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms", 304 + "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 fq limit 1000", 305 + [ 306 + "ping -I $DUMMY -c2 10.10.11.11", 307 + 1 308 + ], 309 + "$TC qdisc change dev $DUMMY handle 2: parent 1:1 fq limit 1" 310 + ], 311 + "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1", 312 + "expExitCode": "0", 313 + "verifyCmd": "$TC -s qdisc show dev $DUMMY", 314 + "matchPattern": "backlog 0b 0p", 315 + "matchCount": "1", 316 + "teardown": [ 317 + "$TC qdisc del dev $DUMMY handle 1: root" 318 + ] 319 + }, 320 + { 321 + "id": "5d0b", 322 + "name": "Test TBF with FQ_CODEL Backlog Accounting in gso_skb case against underflow", 323 + "category": [ 324 + "qdisc", 325 + "tbf", 326 + "fq_codel" 327 + ], 328 + "plugins": { 329 + "requires": [ 330 + "nsPlugin" 331 + ] 332 + }, 333 + "setup": [ 334 + "$IP link set dev $DUMMY up || true", 335 + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", 336 + "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms", 337 + "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 fq_codel limit 1000", 338 + [ 339 + "ping -I $DUMMY -c2 10.10.11.11", 340 + 1 341 + ], 342 + "$TC qdisc change dev $DUMMY handle 2: parent 1:1 fq_codel limit 1" 343 + ], 344 + "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1", 345 + "expExitCode": "0", 346 + "verifyCmd": "$TC -s qdisc show dev $DUMMY", 347 + "matchPattern": "backlog 0b 0p", 348 + "matchCount": "1", 349 + "teardown": [ 350 + "$TC qdisc del dev $DUMMY handle 1: root" 351 + ] 352 + }, 353 + { 354 + "id": "21c3", 355 + "name": "Test TBF with FQ_PIE Backlog Accounting in gso_skb case against underflow", 356 + "category": [ 357 + "qdisc", 358 + "tbf", 359 + "fq_pie" 360 + ], 361 + "plugins": { 362 + "requires": [ 363 + "nsPlugin" 364 + ] 365 + }, 366 + "setup": [ 367 + "$IP link set dev $DUMMY up || true", 368 + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", 369 + "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms", 370 + "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 fq_pie limit 1000", 371 + [ 372 + "ping -I $DUMMY -c2 10.10.11.11", 373 + 1 374 + ], 375 + "$TC qdisc change dev $DUMMY handle 2: parent 1:1 fq_pie limit 1" 376 + ], 377 + "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1", 378 + "expExitCode": "0", 379 + "verifyCmd": "$TC -s qdisc show dev $DUMMY", 380 + "matchPattern": "backlog 0b 0p", 381 + "matchCount": "1", 382 + "teardown": [ 383 + "$TC qdisc del dev $DUMMY handle 1: root" 384 + ] 385 + }, 386 + { 189 387 "id": "a4bb", 190 388 "name": "Test FQ_CODEL with HTB parent - force packet drop with empty queue", 191 389 "category": [