Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

Conflicts:

drivers/net/ethernet/faraday/ftgmac100.c
4186c8d9e6af ("net: ftgmac100: Ensure tx descriptor updates are visible")
e24a6c874601 ("net: ftgmac100: Get link speed and duplex for NC-SI")
https://lore.kernel.org/0b851ec5-f91d-4dd3-99da-e81b98c9ed28@kernel.org

net/ipv4/tcp.c
bac76cf89816 ("tcp: fix forever orphan socket caused by tcp_abort")
edefba66d929 ("tcp: rstreason: introduce SK_RST_REASON_TCP_STATE for active reset")
https://lore.kernel.org/20240828112207.5c199d41@canb.auug.org.au

No adjacent changes.

Link: https://patch.msgid.link/20240829130829.39148-1-pabeni@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2619 -1193
+1
.mailmap
··· 614 614 Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org> 615 615 Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org> 616 616 Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org> 617 + Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech> 617 618 Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com> 618 619 Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com> 619 620 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
+1 -1
Documentation/core-api/workqueue.rst
··· 260 260 is in flight at any given time and the work items are processed in 261 261 queueing order. While the combination of ``@max_active`` of 1 and 262 262 ``WQ_UNBOUND`` used to achieve this behavior, this is no longer the 263 - case. Use ``alloc_ordered_queue()`` instead. 263 + case. Use alloc_ordered_workqueue() instead. 264 264 265 265 266 266 Example Execution Scenarios
+1
Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
··· 42 42 - focaltech,ft5426 43 43 - focaltech,ft5452 44 44 - focaltech,ft6236 45 + - focaltech,ft8201 45 46 - focaltech,ft8719 46 47 47 48 reg:
-12
Documentation/process/coding-style.rst
··· 629 629 * with beginning and ending almost-blank lines. 630 630 */ 631 631 632 - For files in net/ and drivers/net/ the preferred style for long (multi-line) 633 - comments is a little different. 634 - 635 - .. code-block:: c 636 - 637 - /* The preferred comment style for files in net/ and drivers/net 638 - * looks like this. 639 - * 640 - * It is nearly the same as the generally preferred comment style, 641 - * but there is no initial almost-blank line. 642 - */ 643 - 644 632 It's also important to comment data, whether they are basic types or derived 645 633 types. To this end, use just one data declaration per line (no commas for 646 634 multiple data declarations). This leaves you room for a small comment on each
-17
Documentation/process/maintainer-netdev.rst
··· 355 355 with better review coverage. Re-posting large series also increases the mailing 356 356 list traffic. 357 357 358 - Multi-line comments 359 - ~~~~~~~~~~~~~~~~~~~ 360 - 361 - Comment style convention is slightly different for networking and most of 362 - the tree. Instead of this:: 363 - 364 - /* 365 - * foobar blah blah blah 366 - * another line of text 367 - */ 368 - 369 - it is requested that you make it look like this:: 370 - 371 - /* foobar blah blah blah 372 - * another line of text 373 - */ 374 - 375 358 Local variable ordering ("reverse xmas tree", "RCS") 376 359 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 377 360
+3 -3
MAINTAINERS
··· 10176 10176 F: drivers/infiniband/hw/hns/ 10177 10177 10178 10178 HISILICON SAS Controller 10179 - M: Xiang Chen <chenxiang66@hisilicon.com> 10179 + M: Yihang Li <liyihang9@huawei.com> 10180 10180 S: Supported 10181 10181 W: http://www.hisilicon.com 10182 10182 F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt ··· 12168 12168 M: Chuck Lever <chuck.lever@oracle.com> 12169 12169 M: Jeff Layton <jlayton@kernel.org> 12170 12170 R: Neil Brown <neilb@suse.de> 12171 - R: Olga Kornievskaia <kolga@netapp.com> 12171 + R: Olga Kornievskaia <okorniev@redhat.com> 12172 12172 R: Dai Ngo <Dai.Ngo@oracle.com> 12173 12173 R: Tom Talpey <tom@talpey.com> 12174 12174 L: linux-nfs@vger.kernel.org ··· 18547 18547 18548 18548 QCOM AUDIO (ASoC) DRIVERS 18549 18549 M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 18550 - M: Banajit Goswami <bgoswami@quicinc.com> 18551 18550 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 18552 18551 L: linux-arm-msm@vger.kernel.org 18553 18552 S: Supported ··· 20376 20377 F: drivers/scsi/ 20377 20378 F: drivers/ufs/ 20378 20379 F: include/scsi/ 20380 + F: include/uapi/scsi/ 20379 20381 20380 20382 SCSI TAPE DRIVER 20381 20383 M: Kai Mäkisara <Kai.Makisara@kolumbus.fi>
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 11 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+8 -1
arch/arm64/kvm/mmu.c
··· 1540 1540 vma_pagesize = min(vma_pagesize, (long)max_map_size); 1541 1541 } 1542 1542 1543 - if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) 1543 + /* 1544 + * Both the canonical IPA and fault IPA must be hugepage-aligned to 1545 + * ensure we find the right PFN and lay down the mapping in the right 1546 + * place. 1547 + */ 1548 + if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) { 1544 1549 fault_ipa &= ~(vma_pagesize - 1); 1550 + ipa &= ~(vma_pagesize - 1); 1551 + } 1545 1552 1546 1553 gfn = ipa >> PAGE_SHIFT; 1547 1554 mte_allowed = kvm_vma_mte_allowed(vma);
+6
arch/arm64/kvm/sys_regs.c
··· 33 33 #include <trace/events/kvm.h> 34 34 35 35 #include "sys_regs.h" 36 + #include "vgic/vgic.h" 36 37 37 38 #include "trace.h" 38 39 ··· 435 434 const struct sys_reg_desc *r) 436 435 { 437 436 bool g1; 437 + 438 + if (!kvm_has_gicv3(vcpu->kvm)) { 439 + kvm_inject_undefined(vcpu); 440 + return false; 441 + } 438 442 439 443 if (!p->is_write) 440 444 return read_from_write_only(vcpu, p, r);
+1 -1
arch/arm64/kvm/vgic/vgic-debug.c
··· 85 85 struct vgic_irq *irq; 86 86 unsigned long intid; 87 87 88 - xa_for_each(&dist->lpi_xa, intid, irq) { 88 + xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) { 89 89 xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER); 90 90 vgic_put_irq(kvm, irq); 91 91 }
+6 -3
arch/arm64/kvm/vgic/vgic-init.c
··· 417 417 kfree(vgic_cpu->private_irqs); 418 418 vgic_cpu->private_irqs = NULL; 419 419 420 - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 421 - vgic_unregister_redist_iodev(vcpu); 420 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 422 421 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 423 - } 424 422 } 425 423 426 424 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) ··· 446 448 kvm_vgic_dist_destroy(kvm); 447 449 448 450 mutex_unlock(&kvm->arch.config_lock); 451 + 452 + if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 453 + kvm_for_each_vcpu(i, vcpu, kvm) 454 + vgic_unregister_redist_iodev(vcpu); 455 + 449 456 mutex_unlock(&kvm->slots_lock); 450 457 } 451 458
+5
arch/arm64/kvm/vgic/vgic.c
··· 36 36 * we have to disable IRQs before taking this lock and everything lower 37 37 * than it. 38 38 * 39 + * The config_lock has additional ordering requirements: 40 + * kvm->slots_lock 41 + * kvm->srcu 42 + * kvm->arch.config_lock 43 + * 39 44 * If you need to take multiple locks, always take the upper lock first, 40 45 * then the lower ones, e.g. first take the its_lock, then the irq_lock. 41 46 * If you are already holding a lock and need to take a higher one, you
+7
arch/arm64/kvm/vgic/vgic.h
··· 346 346 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); 347 347 int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); 348 348 349 + static inline bool kvm_has_gicv3(struct kvm *kvm) 350 + { 351 + return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && 352 + irqchip_in_kernel(kvm) && 353 + kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3); 354 + } 355 + 349 356 #endif
-11
arch/loongarch/include/asm/dma-direct.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 - */ 5 - #ifndef _LOONGARCH_DMA_DIRECT_H 6 - #define _LOONGARCH_DMA_DIRECT_H 7 - 8 - dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 9 - phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 10 - 11 - #endif /* _LOONGARCH_DMA_DIRECT_H */
+2
arch/loongarch/include/asm/hw_irq.h
··· 9 9 10 10 extern atomic_t irq_err_count; 11 11 12 + #define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE 13 + 12 14 /* 13 15 * interrupt-retrigger: NOP for now. This may not be appropriate for all 14 16 * machines, we'll see ...
-1
arch/loongarch/include/asm/kvm_vcpu.h
··· 76 76 #endif 77 77 78 78 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); 79 - void kvm_reset_timer(struct kvm_vcpu *vcpu); 80 79 void kvm_save_timer(struct kvm_vcpu *vcpu); 81 80 void kvm_restore_timer(struct kvm_vcpu *vcpu); 82 81
+4
arch/loongarch/kernel/fpu.S
··· 530 530 531 531 #ifdef CONFIG_CPU_HAS_LBT 532 532 STACK_FRAME_NON_STANDARD _restore_fp 533 + #ifdef CONFIG_CPU_HAS_LSX 533 534 STACK_FRAME_NON_STANDARD _restore_lsx 535 + #endif 536 + #ifdef CONFIG_CPU_HAS_LASX 534 537 STACK_FRAME_NON_STANDARD _restore_lasx 538 + #endif 535 539 #endif
-3
arch/loongarch/kernel/irq.c
··· 102 102 mp_ops.init_ipi(); 103 103 #endif 104 104 105 - for (i = 0; i < NR_IRQS; i++) 106 - irq_set_noprobe(i); 107 - 108 105 for_each_possible_cpu(i) { 109 106 page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order); 110 107
+4
arch/loongarch/kvm/switch.S
··· 277 277 278 278 #ifdef CONFIG_CPU_HAS_LBT 279 279 STACK_FRAME_NON_STANDARD kvm_restore_fpu 280 + #ifdef CONFIG_CPU_HAS_LSX 280 281 STACK_FRAME_NON_STANDARD kvm_restore_lsx 282 + #endif 283 + #ifdef CONFIG_CPU_HAS_LASX 281 284 STACK_FRAME_NON_STANDARD kvm_restore_lasx 285 + #endif 282 286 #endif
-7
arch/loongarch/kvm/timer.c
··· 188 188 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); 189 189 preempt_enable(); 190 190 } 191 - 192 - void kvm_reset_timer(struct kvm_vcpu *vcpu) 193 - { 194 - write_gcsr_timercfg(0); 195 - kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0); 196 - hrtimer_cancel(&vcpu->arch.swtimer); 197 - }
+1 -1
arch/loongarch/kvm/vcpu.c
··· 647 647 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); 648 648 break; 649 649 case KVM_REG_LOONGARCH_VCPU_RESET: 650 - kvm_reset_timer(vcpu); 650 + vcpu->arch.st.guest_addr = 0; 651 651 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); 652 652 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); 653 653 break;
+7 -8
arch/mips/kernel/cevt-r4k.c
··· 303 303 if (!c0_compare_int_usable()) 304 304 return -ENXIO; 305 305 306 - /* 307 - * With vectored interrupts things are getting platform specific. 308 - * get_c0_compare_int is a hook to allow a platform to return the 309 - * interrupt number of its liking. 310 - */ 311 - irq = get_c0_compare_int(); 312 - 313 306 cd = &per_cpu(mips_clockevent_device, cpu); 314 307 315 308 cd->name = "MIPS"; ··· 313 320 min_delta = calculate_min_delta(); 314 321 315 322 cd->rating = 300; 316 - cd->irq = irq; 317 323 cd->cpumask = cpumask_of(cpu); 318 324 cd->set_next_event = mips_next_event; 319 325 cd->event_handler = mips_event_handler; ··· 323 331 return 0; 324 332 325 333 cp0_timer_irq_installed = 1; 334 + 335 + /* 336 + * With vectored interrupts things are getting platform specific. 337 + * get_c0_compare_int is a hook to allow a platform to return the 338 + * interrupt number of its liking. 339 + */ 340 + irq = get_c0_compare_int(); 326 341 327 342 if (request_irq(irq, c0_compare_interrupt, flags, "timer", 328 343 c0_compare_interrupt))
+4
arch/mips/kernel/cpu-probe.c
··· 1724 1724 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | 1725 1725 MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); 1726 1726 c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ 1727 + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, 1728 + LOONGSON_CONF6_INTIMER); 1727 1729 break; 1728 1730 case PRID_IMP_LOONGSON_64G: 1729 1731 __cpu_name[cpu] = "ICT Loongson-3"; 1730 1732 set_elf_platform(cpu, "loongson3a"); 1731 1733 set_isa(c, MIPS_CPU_ISA_M64R2); 1732 1734 decode_cpucfg(c); 1735 + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, 1736 + LOONGSON_CONF6_INTIMER); 1733 1737 break; 1734 1738 default: 1735 1739 panic("Unknown Loongson Processor ID!");
+13
arch/s390/Kconfig
··· 604 604 as a security feature that deters exploit attempts relying on 605 605 knowledge of the location of kernel internals. 606 606 607 + config RANDOMIZE_IDENTITY_BASE 608 + bool "Randomize the address of the identity mapping base" 609 + depends on RANDOMIZE_BASE 610 + default DEBUG_VM 611 + help 612 + The identity mapping base address is pinned to zero by default. 613 + Allow randomization of that base to expose otherwise missed 614 + notion of physical and virtual addresses of data structures. 615 + That does not have any impact on the base address at which the 616 + kernel image is loaded. 617 + 618 + If unsure, say N 619 + 607 620 config KERNEL_IMAGE_BASE 608 621 hex "Kernel image base address" 609 622 range 0x100000 0x1FFFFFE0000000 if !KASAN
+32 -26
arch/s390/boot/startup.c
··· 162 162 loc = (long)*reloc + phys_offset; 163 163 if (loc < min_addr || loc > max_addr) 164 164 error("64-bit relocation outside of kernel!\n"); 165 - *(u64 *)loc += offset - __START_KERNEL; 165 + *(u64 *)loc += offset; 166 166 } 167 167 } 168 168 ··· 177 177 */ 178 178 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) { 179 179 if (*entry) 180 - *entry += offset - __START_KERNEL; 180 + *entry += offset; 181 181 } 182 182 } 183 183 ··· 252 252 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); 253 253 254 254 /* choose kernel address space layout: 4 or 3 levels. */ 255 - BUILD_BUG_ON(!IS_ALIGNED(__START_KERNEL, THREAD_SIZE)); 255 + BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE)); 256 256 BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); 257 257 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); 258 258 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); ··· 341 341 BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS)); 342 342 max_mappable = max(ident_map_size, MAX_DCSS_ADDR); 343 343 max_mappable = min(max_mappable, vmemmap_start); 344 - __identity_base = round_down(vmemmap_start - max_mappable, rte_size); 344 + if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE)) 345 + __identity_base = round_down(vmemmap_start - max_mappable, rte_size); 345 346 346 347 return asce_limit; 347 348 } ··· 389 388 #endif 390 389 } 391 390 392 - static void fixup_vmlinux_info(void) 393 - { 394 - vmlinux.entry -= __START_KERNEL; 395 - kaslr_adjust_vmlinux_info(-__START_KERNEL); 396 - } 397 - 398 391 void startup_kernel(void) 399 392 { 400 - unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size; 401 - unsigned long nokaslr_offset_phys, kaslr_large_page_offset; 402 - unsigned long amode31_lma = 0; 393 + unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size; 394 + unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0; 395 + unsigned long kernel_size = TEXT_OFFSET + vmlinux_size; 396 + unsigned long kaslr_large_page_offset; 403 397 unsigned long max_physmem_end; 404 398 unsigned long asce_limit; 405 399 unsigned long safe_addr; 406 400 psw_t psw; 407 401 408 - fixup_vmlinux_info(); 409 402 setup_lpp(); 410 403 411 404 /* 412 405 * Non-randomized kernel physical start address must be _SEGMENT_SIZE 413 406 * aligned (see blow). 414 407 */ 415 - nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE); 416 - safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size); 408 + nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE); 409 + safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size); 417 410 418 411 /* 419 412 * Reserve decompressor memory together with decompression heap, ··· 451 456 */ 452 457 kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK; 453 458 if (kaslr_enabled()) { 454 - unsigned long end = ident_map_size - kaslr_large_page_offset; 459 + unsigned long size = vmlinux_size + kaslr_large_page_offset; 455 460 456 - __kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end); 461 + text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size); 457 462 } 458 - if (!__kaslr_offset_phys) 459 - __kaslr_offset_phys = nokaslr_offset_phys; 460 - __kaslr_offset_phys |= kaslr_large_page_offset; 463 + if (!text_lma) 464 + text_lma = nokaslr_text_lma; 465 + text_lma |= kaslr_large_page_offset; 466 + 467 + /* 468 + * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is 469 + * never accessed via the kernel image mapping as per the linker script: 470 + * 471 + * . = TEXT_OFFSET; 472 + * 473 + * Therefore, this region could be used for something else and does 474 + * not need to be reserved. See how it is skipped in setup_vmem(). 475 + */ 476 + __kaslr_offset_phys = text_lma - TEXT_OFFSET; 461 477 kaslr_adjust_vmlinux_info(__kaslr_offset_phys); 462 - physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size); 463 - deploy_kernel((void *)__kaslr_offset_phys); 478 + physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size); 479 + deploy_kernel((void *)text_lma); 464 480 465 481 /* vmlinux decompression is done, shrink reserved low memory */ 466 482 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end); ··· 494 488 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G); 495 489 } 496 490 if (!amode31_lma) 497 - amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size; 491 + amode31_lma = text_lma - vmlinux.amode31_size; 498 492 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); 499 493 500 494 /* ··· 510 504 * - copy_bootdata() must follow setup_vmem() to propagate changes 511 505 * to bootdata made by setup_vmem() 512 506 */ 513 - clear_bss_section(__kaslr_offset_phys); 514 - kaslr_adjust_relocs(__kaslr_offset_phys, __kaslr_offset_phys + vmlinux.image_size, 507 + clear_bss_section(text_lma); 508 + kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size, 515 509 __kaslr_offset, __kaslr_offset_phys); 516 510 kaslr_adjust_got(__kaslr_offset); 517 511 setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
+12 -2
arch/s390/boot/vmem.c
··· 90 90 } 91 91 memgap_start = end; 92 92 } 93 - kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW); 93 + kasan_populate(kernel_start + TEXT_OFFSET, kernel_end, POPULATE_KASAN_MAP_SHADOW); 94 94 kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW); 95 95 kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW); 96 96 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { ··· 475 475 (unsigned long)__identity_va(end), 476 476 POPULATE_IDENTITY); 477 477 } 478 - pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL); 478 + 479 + /* 480 + * [kernel_start..kernel_start + TEXT_OFFSET] region is never 481 + * accessed as per the linker script: 482 + * 483 + * . = TEXT_OFFSET; 484 + * 485 + * Therefore, skip mapping TEXT_OFFSET bytes to prevent access to 486 + * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region. 487 + */ 488 + pgtable_populate(kernel_start + TEXT_OFFSET, kernel_end, POPULATE_KERNEL); 479 489 pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT); 480 490 pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore), 481 491 POPULATE_ABS_LOWCORE);
+6 -1
arch/s390/boot/vmlinux.lds.S
··· 109 109 #ifdef CONFIG_KERNEL_UNCOMPRESSED 110 110 . = ALIGN(PAGE_SIZE); 111 111 . += AMODE31_SIZE; /* .amode31 section */ 112 - . = ALIGN(1 << 20); /* _SEGMENT_SIZE */ 112 + 113 + /* 114 + * Make sure the location counter is not less than TEXT_OFFSET. 115 + * _SEGMENT_SIZE is not available, use ALIGN(1 << 20) instead. 116 + */ 117 + . = MAX(TEXT_OFFSET, ALIGN(1 << 20)); 113 118 #else 114 119 . = ALIGN(8); 115 120 #endif
+2 -1
arch/s390/include/asm/page.h
··· 279 279 #define AMODE31_SIZE (3 * PAGE_SIZE) 280 280 281 281 #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 282 - #define __START_KERNEL 0x100000 283 282 #define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE 284 283 #define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE) 284 + 285 + #define TEXT_OFFSET 0x100000 285 286 286 287 #endif /* _S390_PAGE_H */
+18 -1
arch/s390/kernel/setup.c
··· 734 734 } 735 735 736 736 /* 737 - * Reserve memory used for lowcore/command line/kernel image. 737 + * Reserve memory used for lowcore. 738 + */ 739 + static void __init reserve_lowcore(void) 740 + { 741 + void *lowcore_start = get_lowcore(); 742 + void *lowcore_end = lowcore_start + sizeof(struct lowcore); 743 + void *start, *end; 744 + 745 + if ((void *)__identity_base < lowcore_end) { 746 + start = max(lowcore_start, (void *)__identity_base); 747 + end = min(lowcore_end, (void *)(__identity_base + ident_map_size)); 748 + memblock_reserve(__pa(start), __pa(end)); 749 + } 750 + } 751 + 752 + /* 753 + * Reserve memory used for absolute lowcore/command line/kernel image. 738 754 */ 739 755 static void __init reserve_kernel(void) 740 756 { ··· 934 918 935 919 /* Do some memory reservations *before* memory is added to memblock */ 936 920 reserve_pgtables(); 921 + reserve_lowcore(); 937 922 reserve_kernel(); 938 923 reserve_initrd(); 939 924 reserve_certificate_list();
+1 -1
arch/s390/kernel/vmlinux.lds.S
··· 39 39 40 40 SECTIONS 41 41 { 42 - . = __START_KERNEL; 42 + . = TEXT_OFFSET; 43 43 .text : { 44 44 _stext = .; /* Start of text section */ 45 45 _text = .; /* Text and read-only data */
+1 -1
arch/s390/tools/relocs.c
··· 280 280 case R_390_GOTOFF64: 281 281 break; 282 282 case R_390_64: 283 - add_reloc(&relocs64, offset - ehdr.e_entry); 283 + add_reloc(&relocs64, offset); 284 284 break; 285 285 default: 286 286 die("Unsupported relocation type: %d\n", r_type);
+18 -7
block/blk-lib.c
··· 111 111 (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask); 112 112 } 113 113 114 + /* 115 + * There is no reliable way for the SCSI subsystem to determine whether a 116 + * device supports a WRITE SAME operation without actually performing a write 117 + * to media. As a result, write_zeroes is enabled by default and will be 118 + * disabled if a zeroing operation subsequently fails. This means that this 119 + * queue limit is likely to change at runtime. 120 + */ 114 121 static void __blkdev_issue_write_zeroes(struct block_device *bdev, 115 122 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, 116 - struct bio **biop, unsigned flags) 123 + struct bio **biop, unsigned flags, sector_t limit) 117 124 { 125 + 118 126 while (nr_sects) { 119 - unsigned int len = min_t(sector_t, nr_sects, 120 - bio_write_zeroes_limit(bdev)); 127 + unsigned int len = min(nr_sects, limit); 121 128 struct bio *bio; 122 129 123 130 if ((flags & BLKDEV_ZERO_KILLABLE) && ··· 148 141 static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, 149 142 sector_t nr_sects, gfp_t gfp, unsigned flags) 150 143 { 144 + sector_t limit = bio_write_zeroes_limit(bdev); 151 145 struct bio *bio = NULL; 152 146 struct blk_plug plug; 153 147 int ret = 0; 154 148 155 149 blk_start_plug(&plug); 156 - __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags); 150 + __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, 151 + flags, limit); 157 152 if (bio) { 158 153 if ((flags & BLKDEV_ZERO_KILLABLE) && 159 154 fatal_signal_pending(current)) { ··· 174 165 * on an I/O error, in which case we'll turn any error into 175 166 * "not supported" here. 176 167 */ 177 - if (ret && !bdev_write_zeroes_sectors(bdev)) 168 + if (ret && !limit) 178 169 return -EOPNOTSUPP; 179 170 return ret; 180 171 } ··· 274 265 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 275 266 unsigned flags) 276 267 { 268 + sector_t limit = bio_write_zeroes_limit(bdev); 269 + 277 270 if (bdev_read_only(bdev)) 278 271 return -EPERM; 279 272 280 - if (bdev_write_zeroes_sectors(bdev)) { 273 + if (limit) { 281 274 __blkdev_issue_write_zeroes(bdev, sector, nr_sects, 282 - gfp_mask, biop, flags); 275 + gfp_mask, biop, flags, limit); 283 276 } else { 284 277 if (flags & BLKDEV_ZERO_NOFALLBACK) 285 278 return -EOPNOTSUPP;
+22
drivers/acpi/video_detect.c
··· 54 54 acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec; 55 55 if (!strcmp("apple_gmux", acpi_video_backlight_string)) 56 56 acpi_backlight_cmdline = acpi_backlight_apple_gmux; 57 + if (!strcmp("dell_uart", acpi_video_backlight_string)) 58 + acpi_backlight_cmdline = acpi_backlight_dell_uart; 57 59 if (!strcmp("none", acpi_video_backlight_string)) 58 60 acpi_backlight_cmdline = acpi_backlight_none; 59 61 } ··· 824 822 }, 825 823 826 824 /* 825 + * Dell AIO (All in Ones) which advertise an UART attached backlight 826 + * controller board in their ACPI tables (and may even have one), but 827 + * which need native backlight control nevertheless. 828 + */ 829 + { 830 + /* https://bugzilla.redhat.com/show_bug.cgi?id=2303936 */ 831 + .callback = video_detect_force_native, 832 + /* Dell OptiPlex 7760 AIO */ 833 + .matches = { 834 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 835 + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7760 AIO"), 836 + }, 837 + }, 838 + 839 + /* 827 840 * Models which have nvidia-ec-wmi support, but should not use it. 828 841 * Note this indicates a likely firmware bug on these models and should 829 842 * be revisited if/when Linux gets support for dynamic mux mode. ··· 935 918 static DEFINE_MUTEX(init_mutex); 936 919 static bool nvidia_wmi_ec_present; 937 920 static bool apple_gmux_present; 921 + static bool dell_uart_present; 938 922 static bool native_available; 939 923 static bool init_done; 940 924 static long video_caps; ··· 950 932 &video_caps, NULL); 951 933 nvidia_wmi_ec_present = nvidia_wmi_ec_supported(); 952 934 apple_gmux_present = apple_gmux_detect(NULL, NULL); 935 + dell_uart_present = acpi_dev_present("DELL0501", NULL, -1); 953 936 init_done = true; 954 937 } 955 938 if (native) ··· 980 961 981 962 if (apple_gmux_present) 982 963 return acpi_backlight_apple_gmux; 964 + 965 + if (dell_uart_present) 966 + return acpi_backlight_dell_uart; 983 967 984 968 /* Use ACPI video if available, except when native should be preferred. */ 985 969 if ((video_caps & ACPI_VIDEO_BACKLIGHT) &&
+20 -10
drivers/ata/pata_macio.c
··· 208 208 /* Don't let a DMA segment go all the way to 64K */ 209 209 #define MAX_DBDMA_SEG 0xff00 210 210 211 + #ifdef CONFIG_PAGE_SIZE_64KB 212 + /* 213 + * The SCSI core requires the segment size to cover at least a page, so 214 + * for 64K page size kernels it must be at least 64K. However the 215 + * hardware can't handle 64K, so pata_macio_qc_prep() will split large 216 + * requests. To handle the split requests the tablesize must be halved. 217 + */ 218 + #define PATA_MACIO_MAX_SEGMENT_SIZE SZ_64K 219 + #define PATA_MACIO_SG_TABLESIZE (MAX_DCMDS / 2) 220 + #else 221 + #define PATA_MACIO_MAX_SEGMENT_SIZE MAX_DBDMA_SEG 222 + #define PATA_MACIO_SG_TABLESIZE MAX_DCMDS 223 + #endif 211 224 212 225 /* 213 226 * Wait 1s for disk to answer on IDE bus after a hard reset ··· 554 541 555 542 while (sg_len) { 556 543 /* table overflow should never happen */ 557 - BUG_ON (pi++ >= MAX_DCMDS); 544 + if (WARN_ON_ONCE(pi >= MAX_DCMDS)) 545 + return AC_ERR_SYSTEM; 558 546 559 547 len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG; 560 548 table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE); ··· 567 553 addr += len; 568 554 sg_len -= len; 569 555 ++table; 556 + ++pi; 570 557 } 571 558 } 572 559 573 560 /* Should never happen according to Tejun */ 574 - BUG_ON(!pi); 561 + if (WARN_ON_ONCE(!pi)) 562 + return AC_ERR_SYSTEM; 575 563 576 564 /* Convert the last command to an input/output */ 577 565 table--; ··· 928 912 929 913 static const struct scsi_host_template pata_macio_sht = { 930 914 __ATA_BASE_SHT(DRV_NAME), 931 - .sg_tablesize = MAX_DCMDS, 915 + .sg_tablesize = PATA_MACIO_SG_TABLESIZE, 932 916 /* We may not need that strict one */ 933 917 .dma_boundary = ATA_DMA_BOUNDARY, 934 - /* 935 - * The SCSI core requires the segment size to cover at least a page, so 936 - * for 64K page size kernels this must be at least 64K. However the 937 - * hardware can't handle 64K, so pata_macio_qc_prep() will split large 938 - * requests. 939 - */ 940 - .max_segment_size = SZ_64K, 918 + .max_segment_size = PATA_MACIO_MAX_SEGMENT_SIZE, 941 919 .device_configure = pata_macio_device_configure, 942 920 .sdev_groups = ata_common_sdev_groups, 943 921 .can_queue = ATA_DEF_QUEUE,
+124
drivers/bluetooth/btintel.c
··· 12 12 #include <linux/acpi.h> 13 13 #include <acpi/acpi_bus.h> 14 14 #include <asm/unaligned.h> 15 + #include <linux/efi.h> 15 16 16 17 #include <net/bluetooth/bluetooth.h> 17 18 #include <net/bluetooth/hci_core.h> ··· 26 25 #define CSS_HEADER_OFFSET 8 27 26 #define ECDSA_OFFSET 644 28 27 #define ECDSA_HEADER_LEN 320 28 + 29 + #define BTINTEL_EFI_DSBR L"UefiCnvCommonDSBR" 29 30 30 31 enum { 31 32 DSM_SET_WDISABLE2_DELAY = 1, ··· 2619 2616 return hci_skb_pkt_type(skb); 2620 2617 } 2621 2618 2619 + /* 2620 + * UefiCnvCommonDSBR UEFI variable provides information from the OEM platforms 2621 + * if they have replaced the BRI (Bluetooth Radio Interface) resistor to 2622 + * overcome the potential STEP errors on their designs. Based on the 2623 + * configauration, bluetooth firmware shall adjust the BRI response line drive 2624 + * strength. The below structure represents DSBR data. 2625 + * struct { 2626 + * u8 header; 2627 + * u32 dsbr; 2628 + * } __packed; 2629 + * 2630 + * header - defines revision number of the structure 2631 + * dsbr - defines drive strength BRI response 2632 + * bit0 2633 + * 0 - instructs bluetooth firmware to use default values 2634 + * 1 - instructs bluetooth firmware to override default values 2635 + * bit3:1 2636 + * Reserved 2637 + * bit7:4 2638 + * DSBR override values (only if bit0 is set. Default value is 0xF 2639 + * bit31:7 2640 + * Reserved 2641 + * Expected values for dsbr field: 2642 + * 1. 0xF1 - indicates that the resistor on board is 33 Ohm 2643 + * 2. 0x00 or 0xB1 - indicates that the resistor on board is 10 Ohm 2644 + * 3. Non existing UEFI variable or invalid (none of the above) - indicates 2645 + * that the resistor on board is 10 Ohm 2646 + * Even if uefi variable is not present, driver shall send 0xfc0a command to 2647 + * firmware to use default values. 2648 + * 2649 + */ 2650 + static int btintel_uefi_get_dsbr(u32 *dsbr_var) 2651 + { 2652 + struct btintel_dsbr { 2653 + u8 header; 2654 + u32 dsbr; 2655 + } __packed data; 2656 + 2657 + efi_status_t status; 2658 + unsigned long data_size = 0; 2659 + efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03, 2660 + 0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31); 2661 + 2662 + if (!IS_ENABLED(CONFIG_EFI)) 2663 + return -EOPNOTSUPP; 2664 + 2665 + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) 2666 + return -EOPNOTSUPP; 2667 + 2668 + status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size, 2669 + NULL); 2670 + 2671 + if (status != EFI_BUFFER_TOO_SMALL || !data_size) 2672 + return -EIO; 2673 + 2674 + status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size, 2675 + &data); 2676 + 2677 + if (status != EFI_SUCCESS) 2678 + return -ENXIO; 2679 + 2680 + *dsbr_var = data.dsbr; 2681 + return 0; 2682 + } 2683 + 2684 + static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver) 2685 + { 2686 + struct btintel_dsbr_cmd { 2687 + u8 enable; 2688 + u8 dsbr; 2689 + } __packed; 2690 + 2691 + struct btintel_dsbr_cmd cmd; 2692 + struct sk_buff *skb; 2693 + u8 status; 2694 + u32 dsbr; 2695 + bool apply_dsbr; 2696 + int err; 2697 + 2698 + /* DSBR command needs to be sent for BlazarI + B0 step product after 2699 + * downloading IML image. 2700 + */ 2701 + apply_dsbr = (ver->img_type == BTINTEL_IMG_IML && 2702 + ((ver->cnvi_top & 0xfff) == BTINTEL_CNVI_BLAZARI) && 2703 + INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01); 2704 + 2705 + if (!apply_dsbr) 2706 + return 0; 2707 + 2708 + dsbr = 0; 2709 + err = btintel_uefi_get_dsbr(&dsbr); 2710 + if (err < 0) 2711 + bt_dev_dbg(hdev, "Error reading efi: %ls (%d)", 2712 + BTINTEL_EFI_DSBR, err); 2713 + 2714 + cmd.enable = dsbr & BIT(0); 2715 + cmd.dsbr = dsbr >> 4 & 0xF; 2716 + 2717 + bt_dev_info(hdev, "dsbr: enable: 0x%2.2x value: 0x%2.2x", cmd.enable, 2718 + cmd.dsbr); 2719 + 2720 + skb = __hci_cmd_sync(hdev, 0xfc0a, sizeof(cmd), &cmd, HCI_CMD_TIMEOUT); 2721 + if (IS_ERR(skb)) 2722 + return -bt_to_errno(PTR_ERR(skb)); 2723 + 2724 + status = skb->data[0]; 2725 + kfree_skb(skb); 2726 + 2727 + if (status) 2728 + return -bt_to_errno(status); 2729 + 2730 + return 0; 2731 + } 2732 + 2622 2733 int btintel_bootloader_setup_tlv(struct hci_dev *hdev, 2623 2734 struct intel_version_tlv *ver) 2624 2735 { ··· 2766 2649 err = btintel_read_version_tlv(hdev, ver); 2767 2650 if (err) 2768 2651 return err; 2652 + 2653 + /* set drive strength of BRI response */ 2654 + err = btintel_set_dsbr(hdev, ver); 2655 + if (err) { 2656 + bt_dev_err(hdev, "Failed to send dsbr command (%d)", err); 2657 + return err; 2658 + } 2769 2659 2770 2660 /* If image type returned is BTINTEL_IMG_IML, then controller supports 2771 2661 * intermediate loader image
+18 -2
drivers/bluetooth/btnxpuart.c
··· 449 449 return false; 450 450 } 451 451 452 + static void ps_cleanup(struct btnxpuart_dev *nxpdev) 453 + { 454 + struct ps_data *psdata = &nxpdev->psdata; 455 + u8 ps_state; 456 + 457 + mutex_lock(&psdata->ps_lock); 458 + ps_state = psdata->ps_state; 459 + mutex_unlock(&psdata->ps_lock); 460 + 461 + if (ps_state != PS_STATE_AWAKE) 462 + ps_control(psdata->hdev, PS_STATE_AWAKE); 463 + 464 + ps_cancel_timer(nxpdev); 465 + cancel_work_sync(&psdata->work); 466 + mutex_destroy(&psdata->ps_lock); 467 + } 468 + 452 469 static int send_ps_cmd(struct hci_dev *hdev, void *data) 453 470 { 454 471 struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); ··· 1380 1363 { 1381 1364 struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); 1382 1365 1383 - ps_wakeup(nxpdev); 1384 1366 serdev_device_close(nxpdev->serdev); 1385 1367 skb_queue_purge(&nxpdev->txq); 1386 1368 if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { ··· 1532 1516 nxpdev->new_baudrate = nxpdev->fw_init_baudrate; 1533 1517 nxp_set_baudrate_cmd(hdev, NULL); 1534 1518 } 1535 - ps_cancel_timer(nxpdev); 1536 1519 } 1520 + ps_cleanup(nxpdev); 1537 1521 hci_unregister_dev(hdev); 1538 1522 hci_free_dev(hdev); 1539 1523 }
+4
drivers/char/tpm/tpm_ibmvtpm.c
··· 698 698 rc = tpm2_get_cc_attrs_tbl(chip); 699 699 if (rc) 700 700 goto init_irq_cleanup; 701 + 702 + rc = tpm2_sessions_init(chip); 703 + if (rc) 704 + goto init_irq_cleanup; 701 705 } 702 706 703 707 return tpm_chip_register(chip);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 278 278 msg = RREG32(mmMP0_SMN_C2PMSG_33); 279 279 if (msg & 0x80000000) 280 280 break; 281 - usleep_range(1000, 1100); 281 + msleep(1); 282 282 } 283 283 } 284 284
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
··· 166 166 if (ret) 167 167 return -EFAULT; 168 168 169 + if (ta_bin_len > PSP_1_MEG) 170 + return -EINVAL; 171 + 169 172 copy_pos += sizeof(uint32_t); 170 173 171 174 ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
+3 -2
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 4116 4116 4117 4117 static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) 4118 4118 { 4119 + char fw_name[53]; 4119 4120 char ucode_prefix[30]; 4120 4121 const char *wks = ""; 4121 4122 int err; ··· 4150 4149 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE); 4151 4150 4152 4151 if (!amdgpu_sriov_vf(adev)) { 4153 - err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 4154 - "amdgpu/%s_rlc.bin", ucode_prefix); 4152 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); 4153 + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 4155 4154 if (err) 4156 4155 goto out; 4157 4156
+10 -8
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 176 176 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", 177 177 ring->doorbell_index, ring->wptr << 2); 178 178 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); 179 - /* SDMA seems to miss doorbells sometimes when powergating kicks in. 180 - * Updating the wptr directly will wake it. This is only safe because 181 - * we disallow gfxoff in begin_use() and then allow it again in end_use(). 182 - */ 183 - WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), 184 - lower_32_bits(ring->wptr << 2)); 185 - WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), 186 - upper_32_bits(ring->wptr << 2)); 179 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(5, 2, 1)) { 180 + /* SDMA seems to miss doorbells sometimes when powergating kicks in. 181 + * Updating the wptr directly will wake it. This is only safe because 182 + * we disallow gfxoff in begin_use() and then allow it again in end_use(). 183 + */ 184 + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), 185 + lower_32_bits(ring->wptr << 2)); 186 + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), 187 + upper_32_bits(ring->wptr << 2)); 188 + } 187 189 } else { 188 190 DRM_DEBUG("Not using doorbell -- " 189 191 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+3 -1
drivers/gpu/drm/i915/display/intel_dp_hdcp.c
··· 39 39 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_connector *connector, 40 40 int timeout) 41 41 { 42 - struct intel_hdcp *hdcp = &connector->hdcp; 42 + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 43 + struct intel_dp *dp = &dig_port->dp; 44 + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 43 45 long ret; 44 46 45 47 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+1 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 99 99 * was a bad idea, and is only provided for backwards 100 100 * compatibility for older targets. 101 101 */ 102 - return -ENODEV; 102 + return -ENOENT; 103 103 } 104 104 105 105 if (IS_ERR(fw)) {
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 1171 1171 1172 1172 cstate->num_mixers = num_lm; 1173 1173 1174 - dpu_enc->connector = conn_state->connector; 1175 - 1176 1174 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1177 1175 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1178 1176 ··· 1267 1269 mutex_lock(&dpu_enc->enc_lock); 1268 1270 1269 1271 dpu_enc->commit_done_timedout = false; 1272 + 1273 + dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 1270 1274 1271 1275 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; 1272 1276
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
··· 308 308 { \ 309 309 .maxdwnscale = SSPP_UNITY_SCALE, \ 310 310 .maxupscale = SSPP_UNITY_SCALE, \ 311 - .format_list = plane_formats_yuv, \ 312 - .num_formats = ARRAY_SIZE(plane_formats_yuv), \ 311 + .format_list = plane_formats, \ 312 + .num_formats = ARRAY_SIZE(plane_formats), \ 313 313 .virt_format_list = plane_formats, \ 314 314 .virt_num_formats = ARRAY_SIZE(plane_formats), \ 315 315 }
+2 -12
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
··· 31 31 * @fmt: Pointer to format string 32 32 */ 33 33 #define DPU_DEBUG(fmt, ...) \ 34 - do { \ 35 - if (drm_debug_enabled(DRM_UT_KMS)) \ 36 - DRM_DEBUG(fmt, ##__VA_ARGS__); \ 37 - else \ 38 - pr_debug(fmt, ##__VA_ARGS__); \ 39 - } while (0) 34 + DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 40 35 41 36 /** 42 37 * DPU_DEBUG_DRIVER - macro for hardware driver logging 43 38 * @fmt: Pointer to format string 44 39 */ 45 40 #define DPU_DEBUG_DRIVER(fmt, ...) \ 46 - do { \ 47 - if (drm_debug_enabled(DRM_UT_DRIVER)) \ 48 - DRM_ERROR(fmt, ##__VA_ARGS__); \ 49 - else \ 50 - pr_debug(fmt, ##__VA_ARGS__); \ 51 - } while (0) 41 + DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 52 42 53 43 #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__) 54 44 #define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
+17 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 681 681 new_state->fb, &layout); 682 682 if (ret) { 683 683 DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); 684 + if (pstate->aspace) 685 + msm_framebuffer_cleanup(new_state->fb, pstate->aspace, 686 + pstate->needs_dirtyfb); 684 687 return ret; 685 688 } 686 689 ··· 747 744 min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1; 748 745 749 746 if (MSM_FORMAT_IS_YUV(fmt) && 750 - (!pipe->sspp->cap->sblk->scaler_blk.len || 751 - !pipe->sspp->cap->sblk->csc_blk.len)) { 747 + !pipe->sspp->cap->sblk->csc_blk.len) { 752 748 DPU_DEBUG_PLANE(pdpu, 753 - "plane doesn't have scaler/csc for yuv\n"); 749 + "plane doesn't have csc for yuv\n"); 754 750 return -EINVAL; 755 751 } 756 752 ··· 866 864 867 865 max_linewidth = pdpu->catalog->caps->max_linewidth; 868 866 867 + drm_rect_rotate(&pipe_cfg->src_rect, 868 + new_plane_state->fb->width, new_plane_state->fb->height, 869 + new_plane_state->rotation); 870 + 869 871 if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || 870 872 _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { 871 873 /* ··· 918 912 r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; 919 913 r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; 920 914 } 915 + 916 + drm_rect_rotate_inv(&pipe_cfg->src_rect, 917 + new_plane_state->fb->width, new_plane_state->fb->height, 918 + new_plane_state->rotation); 919 + if (r_pipe->sspp) 920 + drm_rect_rotate_inv(&r_pipe_cfg->src_rect, 921 + new_plane_state->fb->width, new_plane_state->fb->height, 922 + new_plane_state->rotation); 921 923 922 924 ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode); 923 925 if (ret)
+2
drivers/gpu/drm/msm/dp/dp_ctrl.c
··· 1286 1286 link_info.rate = ctrl->link->link_params.rate; 1287 1287 link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; 1288 1288 1289 + dp_link_reset_phy_params_vx_px(ctrl->link); 1290 + 1289 1291 dp_aux_link_configure(ctrl->aux, &link_info); 1290 1292 1291 1293 if (drm_dp_max_downspread(dpcd))
+10 -9
drivers/gpu/drm/msm/dp/dp_panel.c
··· 90 90 static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, 91 91 u32 mode_edid_bpp, u32 mode_pclk_khz) 92 92 { 93 - struct dp_link_info *link_info; 93 + const struct dp_link_info *link_info; 94 94 const u32 max_supported_bpp = 30, min_supported_bpp = 18; 95 - u32 bpp = 0, data_rate_khz = 0; 95 + u32 bpp, data_rate_khz; 96 96 97 - bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); 97 + bpp = min(mode_edid_bpp, max_supported_bpp); 98 98 99 99 link_info = &dp_panel->link_info; 100 100 data_rate_khz = link_info->num_lanes * link_info->rate * 8; 101 101 102 - while (bpp > min_supported_bpp) { 102 + do { 103 103 if (mode_pclk_khz * bpp <= data_rate_khz) 104 - break; 104 + return bpp; 105 105 bpp -= 6; 106 - } 106 + } while (bpp > min_supported_bpp); 107 107 108 - return bpp; 108 + return min_supported_bpp; 109 109 } 110 110 111 111 int dp_panel_read_sink_caps(struct dp_panel *dp_panel, ··· 423 423 drm_mode->clock); 424 424 drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp); 425 425 426 - dp_panel->dp_mode.bpp = max_t(u32, 18, 427 - min_t(u32, dp_panel->dp_mode.bpp, 30)); 426 + dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp, 427 + dp_panel->dp_mode.drm_mode.clock); 428 + 428 429 drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n", 429 430 dp_panel->dp_mode.bpp); 430 431
+1 -1
drivers/gpu/drm/msm/msm_mdss.c
··· 577 577 .ubwc_enc_version = UBWC_2_0, 578 578 .ubwc_dec_version = UBWC_2_0, 579 579 .ubwc_static = 0x1e, 580 - .highest_bank_bit = 0x3, 580 + .highest_bank_bit = 0x1, 581 581 .reg_bus_bw = 76800, 582 582 }; 583 583
+6 -3
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
··· 205 205 break; 206 206 case NVKM_FIRMWARE_IMG_DMA: 207 207 nvkm_memory_unref(&memory); 208 - dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys); 208 + dma_free_noncoherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), 209 + fw->img, fw->phys, DMA_TO_DEVICE); 209 210 break; 210 211 case NVKM_FIRMWARE_IMG_SGT: 211 212 nvkm_memory_unref(&memory); ··· 237 236 break; 238 237 case NVKM_FIRMWARE_IMG_DMA: { 239 238 dma_addr_t addr; 240 - 241 239 len = ALIGN(fw->len, PAGE_SIZE); 242 240 243 - fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL); 241 + fw->img = dma_alloc_noncoherent(fw->device->dev, 242 + len, &addr, 243 + DMA_TO_DEVICE, 244 + GFP_KERNEL); 244 245 if (fw->img) { 245 246 memcpy(fw->img, src, fw->len); 246 247 fw->phys = addr;
+6
drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
··· 89 89 nvkm_falcon_fw_dtor_sigs(fw); 90 90 } 91 91 92 + /* after last write to the img, sync dma mappings */ 93 + dma_sync_single_for_device(fw->fw.device->dev, 94 + fw->fw.phys, 95 + sg_dma_len(&fw->fw.mem.sgl), 96 + DMA_TO_DEVICE); 97 + 92 98 FLCNFW_DBG(fw, "resetting"); 93 99 fw->func->reset(fw); 94 100
+2
drivers/gpu/drm/xe/Makefile
··· 25 25 26 26 uses_generated_oob := \ 27 27 $(obj)/xe_ggtt.o \ 28 + $(obj)/xe_device.o \ 28 29 $(obj)/xe_gsc.o \ 29 30 $(obj)/xe_gt.o \ 30 31 $(obj)/xe_guc.o \ 31 32 $(obj)/xe_guc_ads.o \ 32 33 $(obj)/xe_guc_pc.o \ 33 34 $(obj)/xe_migrate.o \ 35 + $(obj)/xe_pat.o \ 34 36 $(obj)/xe_ring_ops.o \ 35 37 $(obj)/xe_vm.o \ 36 38 $(obj)/xe_wa.o \
+27 -1
drivers/gpu/drm/xe/display/xe_display.c
··· 132 132 return; 133 133 134 134 intel_display_driver_remove_noirq(xe); 135 + intel_opregion_cleanup(xe); 135 136 } 136 137 137 138 int xe_display_init_noirq(struct xe_device *xe) ··· 158 157 intel_display_device_info_runtime_init(xe); 159 158 160 159 err = intel_display_driver_probe_noirq(xe); 161 - if (err) 160 + if (err) { 161 + intel_opregion_cleanup(xe); 162 162 return err; 163 + } 163 164 164 165 return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe); 165 166 } ··· 283 280 return false; 284 281 } 285 282 283 + static void xe_display_flush_cleanup_work(struct xe_device *xe) 284 + { 285 + struct intel_crtc *crtc; 286 + 287 + for_each_intel_crtc(&xe->drm, crtc) { 288 + struct drm_crtc_commit *commit; 289 + 290 + spin_lock(&crtc->base.commit_lock); 291 + commit = list_first_entry_or_null(&crtc->base.commit_list, 292 + struct drm_crtc_commit, commit_entry); 293 + if (commit) 294 + drm_crtc_commit_get(commit); 295 + spin_unlock(&crtc->base.commit_lock); 296 + 297 + if (commit) { 298 + wait_for_completion(&commit->cleanup_done); 299 + drm_crtc_commit_put(commit); 300 + } 301 + } 302 + } 303 + 286 304 void xe_display_pm_suspend(struct xe_device *xe, bool runtime) 287 305 { 288 306 bool s2idle = suspend_to_idle(); ··· 320 296 321 297 if (!runtime) 322 298 intel_display_driver_suspend(xe); 299 + 300 + xe_display_flush_cleanup_work(xe); 323 301 324 302 intel_dp_mst_suspend(xe); 325 303
+8
drivers/gpu/drm/xe/display/xe_dsb_buffer.c
··· 7 7 #include "intel_display_types.h" 8 8 #include "intel_dsb_buffer.h" 9 9 #include "xe_bo.h" 10 + #include "xe_device.h" 11 + #include "xe_device_types.h" 10 12 #include "xe_gt.h" 11 13 12 14 u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) ··· 18 16 19 17 void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) 20 18 { 19 + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; 20 + 21 21 iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); 22 + xe_device_l2_flush(xe); 22 23 } 23 24 24 25 u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) ··· 31 26 32 27 void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) 33 28 { 29 + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; 30 + 34 31 WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); 35 32 36 33 iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); 34 + xe_device_l2_flush(xe); 37 35 } 38 36 39 37 bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
+3
drivers/gpu/drm/xe/display/xe_fb_pin.c
··· 10 10 #include "intel_fb.h" 11 11 #include "intel_fb_pin.h" 12 12 #include "xe_bo.h" 13 + #include "xe_device.h" 13 14 #include "xe_ggtt.h" 14 15 #include "xe_gt.h" 15 16 #include "xe_pm.h" ··· 305 304 if (ret) 306 305 goto err_unpin; 307 306 307 + /* Ensure DPT writes are flushed */ 308 + xe_device_l2_flush(xe); 308 309 return vma; 309 310 310 311 err_unpin:
+9
drivers/gpu/drm/xe/regs/xe_gt_regs.h
··· 80 80 #define LE_CACHEABILITY_MASK REG_GENMASK(1, 0) 81 81 #define LE_CACHEABILITY(value) REG_FIELD_PREP(LE_CACHEABILITY_MASK, value) 82 82 83 + #define XE2_GAMREQSTRM_CTRL XE_REG(0x4194) 84 + #define CG_DIS_CNTLBUS REG_BIT(6) 85 + 83 86 #define CCS_AUX_INV XE_REG(0x4208) 84 87 85 88 #define VD0_AUX_INV XE_REG(0x4218) ··· 375 372 376 373 #define XEHPC_L3CLOS_MASK(i) XE_REG_MCR(0xb194 + (i) * 8) 377 374 375 + #define XE2_GLOBAL_INVAL XE_REG(0xb404) 376 + 377 + #define SCRATCH1LPFC XE_REG(0xb474) 378 + #define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0) 379 + 378 380 #define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658) 379 381 380 382 #define XE2_TDF_CTRL XE_REG(0xb418) ··· 437 429 #define DIS_FIX_EOT1_FLUSH REG_BIT(9) 438 430 439 431 #define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED) 432 + #define STK_ID_RESTRICT REG_BIT(12) 440 433 #define SLM_WMTP_RESTORE REG_BIT(11) 441 434 442 435 #define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
+3 -3
drivers/gpu/drm/xe/xe_bo.c
··· 1575 1575 return bo; 1576 1576 } 1577 1577 1578 - static void __xe_bo_unpin_map_no_vm(struct drm_device *drm, void *arg) 1578 + static void __xe_bo_unpin_map_no_vm(void *arg) 1579 1579 { 1580 1580 xe_bo_unpin_map_no_vm(arg); 1581 1581 } ··· 1590 1590 if (IS_ERR(bo)) 1591 1591 return bo; 1592 1592 1593 - ret = drmm_add_action_or_reset(&xe->drm, __xe_bo_unpin_map_no_vm, bo); 1593 + ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); 1594 1594 if (ret) 1595 1595 return ERR_PTR(ret); 1596 1596 ··· 1638 1638 if (IS_ERR(bo)) 1639 1639 return PTR_ERR(bo); 1640 1640 1641 - drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src); 1641 + devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); 1642 1642 *src = bo; 1643 1643 1644 1644 return 0;
+32
drivers/gpu/drm/xe/xe_device.c
··· 54 54 #include "xe_vm.h" 55 55 #include "xe_vram.h" 56 56 #include "xe_wait_user_fence.h" 57 + #include "xe_wa.h" 58 + 59 + #include <generated/xe_wa_oob.h> 57 60 58 61 static int xe_file_open(struct drm_device *dev, struct drm_file *file) 59 62 { ··· 823 820 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) 824 821 return; 825 822 823 + if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { 824 + xe_device_l2_flush(xe); 825 + return; 826 + } 827 + 826 828 for_each_gt(gt, xe, id) { 827 829 if (xe_gt_is_media_type(gt)) 828 830 continue; ··· 849 841 850 842 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 851 843 } 844 + } 845 + 846 + void xe_device_l2_flush(struct xe_device *xe) 847 + { 848 + struct xe_gt *gt; 849 + int err; 850 + 851 + gt = xe_root_mmio_gt(xe); 852 + 853 + if (!XE_WA(gt, 16023588340)) 854 + return; 855 + 856 + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 857 + if (err) 858 + return; 859 + 860 + spin_lock(&gt->global_invl_lock); 861 + xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); 862 + 863 + if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) 864 + xe_gt_err_once(gt, "Global invalidation timeout\n"); 865 + spin_unlock(&gt->global_invl_lock); 866 + 867 + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 852 868 } 853 869 854 870 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
+1
drivers/gpu/drm/xe/xe_device.h
··· 162 162 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address); 163 163 164 164 void xe_device_td_flush(struct xe_device *xe); 165 + void xe_device_l2_flush(struct xe_device *xe); 165 166 166 167 static inline bool xe_device_wedged(struct xe_device *xe) 167 168 {
+14 -10
drivers/gpu/drm/xe/xe_exec_queue.c
··· 105 105 106 106 static int __xe_exec_queue_init(struct xe_exec_queue *q) 107 107 { 108 + struct xe_vm *vm = q->vm; 108 109 int i, err; 110 + 111 + if (vm) { 112 + err = xe_vm_lock(vm, true); 113 + if (err) 114 + return err; 115 + } 109 116 110 117 for (i = 0; i < q->width; ++i) { 111 118 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K); 112 119 if (IS_ERR(q->lrc[i])) { 113 120 err = PTR_ERR(q->lrc[i]); 114 - goto err_lrc; 121 + goto err_unlock; 115 122 } 116 123 } 124 + 125 + if (vm) 126 + xe_vm_unlock(vm); 117 127 118 128 err = q->ops->init(q); 119 129 if (err) ··· 131 121 132 122 return 0; 133 123 124 + err_unlock: 125 + if (vm) 126 + xe_vm_unlock(vm); 134 127 err_lrc: 135 128 for (i = i - 1; i >= 0; --i) 136 129 xe_lrc_put(q->lrc[i]); ··· 153 140 if (IS_ERR(q)) 154 141 return q; 155 142 156 - if (vm) { 157 - err = xe_vm_lock(vm, true); 158 - if (err) 159 - goto err_post_alloc; 160 - } 161 - 162 143 err = __xe_exec_queue_init(q); 163 - if (vm) 164 - xe_vm_unlock(vm); 165 144 if (err) 166 145 goto err_post_alloc; 167 146 ··· 643 638 644 639 if (xe_vm_in_preempt_fence_mode(vm)) { 645 640 q->lr.context = dma_fence_context_alloc(1); 646 - spin_lock_init(&q->lr.lock); 647 641 648 642 err = xe_vm_add_compute_exec_queue(vm, q); 649 643 if (XE_IOCTL_DBG(xe, err))
-2
drivers/gpu/drm/xe/xe_exec_queue_types.h
··· 126 126 u32 seqno; 127 127 /** @lr.link: link into VM's list of exec queues */ 128 128 struct list_head link; 129 - /** @lr.lock: preemption fences lock */ 130 - spinlock_t lock; 131 129 } lr; 132 130 133 131 /** @ops: submission backend exec queue operations */
+4 -4
drivers/gpu/drm/xe/xe_gsc.c
··· 260 260 struct xe_tile *tile = gt_to_tile(gt); 261 261 int ret; 262 262 263 - if (XE_WA(gt, 14018094691)) { 263 + if (XE_WA(tile->primary_gt, 14018094691)) { 264 264 ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); 265 265 266 266 /* ··· 278 278 279 279 ret = gsc_upload(gsc); 280 280 281 - if (XE_WA(gt, 14018094691)) 281 + if (XE_WA(tile->primary_gt, 14018094691)) 282 282 xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); 283 283 284 284 if (ret) ··· 437 437 return ret; 438 438 } 439 439 440 - static void free_resources(struct drm_device *drm, void *arg) 440 + static void free_resources(void *arg) 441 441 { 442 442 struct xe_gsc *gsc = arg; 443 443 ··· 501 501 gsc->q = q; 502 502 gsc->wq = wq; 503 503 504 - err = drmm_add_action_or_reset(&xe->drm, free_resources, gsc); 504 + err = devm_add_action_or_reset(xe->drm.dev, free_resources, gsc); 505 505 if (err) 506 506 return err; 507 507
+55
drivers/gpu/drm/xe/xe_gt.c
··· 11 11 #include <drm/xe_drm.h> 12 12 #include <generated/xe_wa_oob.h> 13 13 14 + #include <generated/xe_wa_oob.h> 15 + 14 16 #include "instructions/xe_gfxpipe_commands.h" 15 17 #include "instructions/xe_mi_commands.h" 16 18 #include "regs/xe_gt_regs.h" ··· 97 95 gt->uc.guc.submission_state.enabled = false; 98 96 } 99 97 98 + static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) 99 + { 100 + u32 reg; 101 + int err; 102 + 103 + if (!XE_WA(gt, 16023588340)) 104 + return; 105 + 106 + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 107 + if (WARN_ON(err)) 108 + return; 109 + 110 + if (!xe_gt_is_media_type(gt)) { 111 + xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH); 112 + reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL); 113 + reg |= CG_DIS_CNTLBUS; 114 + xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg); 115 + } 116 + 117 + xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); 118 + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 119 + } 120 + 121 + static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) 122 + { 123 + u32 reg; 124 + int err; 125 + 126 + if (!XE_WA(gt, 16023588340)) 127 + return; 128 + 129 + if (xe_gt_is_media_type(gt)) 130 + return; 131 + 132 + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 133 + if (WARN_ON(err)) 134 + return; 135 + 136 + reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL); 137 + reg &= ~CG_DIS_CNTLBUS; 138 + xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg); 139 + 140 + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 141 + } 142 + 100 143 /** 101 144 * xe_gt_remove() - Clean up the GT structures before driver removal 102 145 * @gt: the GT object ··· 158 111 159 112 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) 160 113 xe_hw_fence_irq_finish(&gt->fence_irq[i]); 114 + 115 + xe_gt_disable_host_l2_vram(gt); 161 116 } 162 117 163 118 static void gt_reset_worker(struct work_struct *w); ··· 388 339 389 340 xe_force_wake_init_gt(gt, gt_to_fw(gt)); 390 341 xe_pcode_init(gt); 342 + spin_lock_init(&gt->global_invl_lock); 391 343 392 344 return 0; 393 345 } ··· 558 508 559 509 xe_gt_mcr_init_early(gt); 560 510 xe_pat_init(gt); 511 + xe_gt_enable_host_l2_vram(gt); 561 512 562 513 err = xe_uc_init(&gt->uc); 563 514 if (err) ··· 693 642 return vf_gt_restart(gt); 694 643 695 644 xe_pat_init(gt); 645 + 646 + xe_gt_enable_host_l2_vram(gt); 696 647 697 648 xe_gt_mcr_set_implicit_defaults(gt); 698 649 xe_reg_sr_apply_mmio(&gt->reg_sr, gt); ··· 848 795 goto err_force_wake; 849 796 850 797 xe_gt_idle_disable_pg(gt); 798 + 799 + xe_gt_disable_host_l2_vram(gt); 851 800 852 801 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); 853 802 xe_gt_dbg(gt, "suspended\n");
+16 -2
drivers/gpu/drm/xe/xe_gt_pagefault.c
··· 382 382 383 383 static void acc_queue_work_func(struct work_struct *w); 384 384 385 + static void pagefault_fini(void *arg) 386 + { 387 + struct xe_gt *gt = arg; 388 + struct xe_device *xe = gt_to_xe(gt); 389 + 390 + if (!xe->info.has_usm) 391 + return; 392 + 393 + destroy_workqueue(gt->usm.acc_wq); 394 + destroy_workqueue(gt->usm.pf_wq); 395 + } 396 + 385 397 int xe_gt_pagefault_init(struct xe_gt *gt) 386 398 { 387 399 struct xe_device *xe = gt_to_xe(gt); ··· 421 409 gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue", 422 410 WQ_UNBOUND | WQ_HIGHPRI, 423 411 NUM_ACC_QUEUE); 424 - if (!gt->usm.acc_wq) 412 + if (!gt->usm.acc_wq) { 413 + destroy_workqueue(gt->usm.pf_wq); 425 414 return -ENOMEM; 415 + } 426 416 427 - return 0; 417 + return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt); 428 418 } 429 419 430 420 void xe_gt_pagefault_reset(struct xe_gt *gt)
+6
drivers/gpu/drm/xe/xe_gt_types.h
··· 362 362 */ 363 363 spinlock_t mcr_lock; 364 364 365 + /** 366 + * @global_invl_lock: protects the register for the duration 367 + * of a global invalidation of l2 cache 368 + */ 369 + spinlock_t global_invl_lock; 370 + 365 371 /** @wa_active: keep track of active workarounds */ 366 372 struct { 367 373 /** @wa_active.gt: bitmap with active GT workarounds */
+2 -2
drivers/gpu/drm/xe/xe_guc_submit.c
··· 284 284 free_submit_wq(guc); 285 285 } 286 286 287 - static void guc_submit_wedged_fini(struct drm_device *drm, void *arg) 287 + static void guc_submit_wedged_fini(void *arg) 288 288 { 289 289 struct xe_guc *guc = arg; 290 290 struct xe_exec_queue *q; ··· 877 877 878 878 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); 879 879 880 - err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm, 880 + err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, 881 881 guc_submit_wedged_fini, guc); 882 882 if (err) { 883 883 drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
+5 -4
drivers/gpu/drm/xe/xe_hw_fence.c
··· 148 148 { 149 149 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence); 150 150 151 - return dev_name(gt_to_xe(fence->ctx->gt)->drm.dev); 151 + return dev_name(fence->xe->drm.dev); 152 152 } 153 153 154 154 static const char *xe_hw_fence_get_timeline_name(struct dma_fence *dma_fence) 155 155 { 156 156 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence); 157 157 158 - return fence->ctx->name; 158 + return fence->name; 159 159 } 160 160 161 161 static bool xe_hw_fence_signaled(struct dma_fence *dma_fence) 162 162 { 163 163 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence); 164 - struct xe_device *xe = gt_to_xe(fence->ctx->gt); 164 + struct xe_device *xe = fence->xe; 165 165 u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32); 166 166 167 167 return dma_fence->error || ··· 253 253 struct xe_hw_fence *hw_fence = 254 254 container_of(fence, typeof(*hw_fence), dma); 255 255 256 - hw_fence->ctx = ctx; 256 + hw_fence->xe = gt_to_xe(ctx->gt); 257 + snprintf(hw_fence->name, sizeof(hw_fence->name), "%s", ctx->name); 257 258 hw_fence->seqno_map = seqno_map; 258 259 INIT_LIST_HEAD(&hw_fence->irq_link); 259 260
+5 -2
drivers/gpu/drm/xe/xe_hw_fence_types.h
··· 12 12 #include <linux/list.h> 13 13 #include <linux/spinlock.h> 14 14 15 + struct xe_device; 15 16 struct xe_gt; 16 17 17 18 /** ··· 62 61 struct xe_hw_fence { 63 62 /** @dma: base dma fence for hardware fence context */ 64 63 struct dma_fence dma; 65 - /** @ctx: hardware fence context */ 66 - struct xe_hw_fence_ctx *ctx; 64 + /** @xe: Xe device for hw fence driver name */ 65 + struct xe_device *xe; 66 + /** @name: name of hardware fence context */ 67 + char name[MAX_FENCE_NAME_LEN]; 67 68 /** @seqno_map: I/O map for seqno */ 68 69 struct iosys_map seqno_map; 69 70 /** @irq_link: Link in struct xe_hw_fence_irq.pending */
+27 -1
drivers/gpu/drm/xe/xe_mmio.c
··· 30 30 int id; 31 31 32 32 for_each_tile(tile, xe, id) 33 - tile->mmio.regs = NULL; 33 + if (tile != xe_device_get_root_tile(xe)) 34 + tile->mmio.regs = NULL; 34 35 } 35 36 36 37 int xe_mmio_probe_tiles(struct xe_device *xe) ··· 92 91 static void mmio_fini(void *arg) 93 92 { 94 93 struct xe_device *xe = arg; 94 + struct xe_tile *root_tile = xe_device_get_root_tile(xe); 95 95 96 96 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs); 97 97 xe->mmio.regs = NULL; 98 + root_tile->mmio.regs = NULL; 98 99 } 99 100 100 101 int xe_mmio_init(struct xe_device *xe) ··· 124 121 return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); 125 122 } 126 123 124 + static void mmio_flush_pending_writes(struct xe_gt *gt) 125 + { 126 + #define DUMMY_REG_OFFSET 0x130030 127 + struct xe_tile *tile = gt_to_tile(gt); 128 + int i; 129 + 130 + if (tile->xe->info.platform != XE_LUNARLAKE) 131 + return; 132 + 133 + /* 4 dummy writes */ 134 + for (i = 0; i < 4; i++) 135 + writel(0, tile->mmio.regs + DUMMY_REG_OFFSET); 136 + } 137 + 127 138 u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg) 128 139 { 129 140 struct xe_tile *tile = gt_to_tile(gt); 130 141 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); 131 142 u8 val; 143 + 144 + /* Wa_15015404425 */ 145 + mmio_flush_pending_writes(gt); 132 146 133 147 val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); 134 148 trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); ··· 158 138 struct xe_tile *tile = gt_to_tile(gt); 159 139 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); 160 140 u16 val; 141 + 142 + /* Wa_15015404425 */ 143 + mmio_flush_pending_writes(gt); 161 144 162 145 val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); 163 146 trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); ··· 182 159 struct xe_tile *tile = gt_to_tile(gt); 183 160 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); 184 161 u32 val; 162 + 163 + /* Wa_15015404425 */ 164 + mmio_flush_pending_writes(gt); 185 165 186 166 if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) 187 167 val = xe_gt_sriov_vf_read32(gt, reg);
-1
drivers/gpu/drm/xe/xe_observation.c
··· 66 66 .extra1 = SYSCTL_ZERO, 67 67 .extra2 = SYSCTL_ONE, 68 68 }, 69 - {} 70 69 }; 71 70 72 71 /**
+10 -1
drivers/gpu/drm/xe/xe_pat.c
··· 7 7 8 8 #include <drm/xe_drm.h> 9 9 10 + #include <generated/xe_wa_oob.h> 11 + 10 12 #include "regs/xe_reg_defs.h" 11 13 #include "xe_assert.h" 12 14 #include "xe_device.h" ··· 17 15 #include "xe_gt_mcr.h" 18 16 #include "xe_mmio.h" 19 17 #include "xe_sriov.h" 18 + #include "xe_wa.h" 20 19 21 20 #define _PAT_ATS 0x47fc 22 21 #define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \ ··· 385 382 if (GRAPHICS_VER(xe) == 20) { 386 383 xe->pat.ops = &xe2_pat_ops; 387 384 xe->pat.table = xe2_pat_table; 388 - xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table); 385 + 386 + /* Wa_16023588340. XXX: Should use XE_WA */ 387 + if (GRAPHICS_VERx100(xe) == 2001) 388 + xe->pat.n_entries = 28; /* Disable CLOS3 */ 389 + else 390 + xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table); 391 + 389 392 xe->pat.idx[XE_CACHE_NONE] = 3; 390 393 xe->pat.idx[XE_CACHE_WT] = 15; 391 394 xe->pat.idx[XE_CACHE_WB] = 2;
+6 -5
drivers/gpu/drm/xe/xe_pm.c
··· 91 91 for_each_gt(gt, xe, id) 92 92 xe_gt_suspend_prepare(gt); 93 93 94 + xe_display_pm_suspend(xe, false); 95 + 94 96 /* FIXME: Super racey... */ 95 97 err = xe_bo_evict_all(xe); 96 98 if (err) 97 99 goto err; 98 - 99 - xe_display_pm_suspend(xe, false); 100 100 101 101 for_each_gt(gt, xe, id) { 102 102 err = xe_gt_suspend(gt); ··· 151 151 152 152 xe_irq_resume(xe); 153 153 154 - xe_display_pm_resume(xe, false); 155 - 156 154 for_each_gt(gt, xe, id) 157 155 xe_gt_resume(gt); 156 + 157 + xe_display_pm_resume(xe, false); 158 158 159 159 err = xe_bo_restore_user(xe); 160 160 if (err) ··· 363 363 mutex_unlock(&xe->mem_access.vram_userfault.lock); 364 364 365 365 if (xe->d3cold.allowed) { 366 + xe_display_pm_suspend(xe, true); 367 + 366 368 err = xe_bo_evict_all(xe); 367 369 if (err) 368 370 goto out; 369 - xe_display_pm_suspend(xe, true); 370 371 } 371 372 372 373 for_each_gt(gt, xe, id) {
+2 -1
drivers/gpu/drm/xe/xe_preempt_fence.c
··· 128 128 { 129 129 list_del_init(&pfence->link); 130 130 pfence->q = xe_exec_queue_get(q); 131 + spin_lock_init(&pfence->lock); 131 132 dma_fence_init(&pfence->base, &preempt_fence_ops, 132 - &q->lr.lock, context, seqno); 133 + &pfence->lock, context, seqno); 133 134 134 135 return &pfence->base; 135 136 }
+2
drivers/gpu/drm/xe/xe_preempt_fence_types.h
··· 25 25 struct xe_exec_queue *q; 26 26 /** @preempt_work: work struct which issues preemption */ 27 27 struct work_struct preempt_work; 28 + /** @lock: dma-fence fence lock */ 29 + spinlock_t lock; 28 30 /** @error: preempt fence is in error state */ 29 31 int error; 30 32 };
+2 -1
drivers/gpu/drm/xe/xe_sched_job.c
··· 171 171 struct xe_sched_job *job = 172 172 container_of(ref, struct xe_sched_job, refcount); 173 173 struct xe_device *xe = job_to_xe(job); 174 + struct xe_exec_queue *q = job->q; 174 175 175 176 xe_sched_job_free_fences(job); 176 - xe_exec_queue_put(job->q); 177 177 dma_fence_put(job->fence); 178 178 drm_sched_job_cleanup(&job->drm); 179 179 job_free(job); 180 + xe_exec_queue_put(q); 180 181 xe_pm_runtime_put(xe); 181 182 } 182 183
+1 -1
drivers/gpu/drm/xe/xe_trace.h
··· 309 309 TP_ARGS(fence), 310 310 311 311 TP_STRUCT__entry( 312 - __string(dev, __dev_name_gt(fence->ctx->gt)) 312 + __string(dev, __dev_name_xe(fence->xe)) 313 313 __field(u64, ctx) 314 314 __field(u32, seqno) 315 315 __field(struct xe_hw_fence *, fence)
+18
drivers/gpu/drm/xe/xe_wa.c
··· 486 486 XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)), 487 487 XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE)) 488 488 }, 489 + { XE_RTP_NAME("14021402888"), 490 + XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)), 491 + XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) 492 + }, 489 493 490 494 /* Xe2_HPG */ 491 495 ··· 541 537 { XE_RTP_NAME("14021402888"), 542 538 XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), 543 539 XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) 540 + }, 541 + { XE_RTP_NAME("14021821874"), 542 + XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), 543 + XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT)) 544 + }, 545 + 546 + /* Xe2_LPM */ 547 + 548 + { XE_RTP_NAME("16021639441"), 549 + XE_RTP_RULES(MEDIA_VERSION(2000)), 550 + XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), 551 + GHWSP_CSB_REPORT_DIS | 552 + PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS, 553 + XE_RTP_ACTION_FLAG(ENGINE_BASE))) 544 554 }, 545 555 546 556 /* Xe2_HPM */
+1
drivers/gpu/drm/xe/xe_wa_oob.rules
··· 29 29 13011645652 GRAPHICS_VERSION(2004) 30 30 22019338487 MEDIA_VERSION(2000) 31 31 GRAPHICS_VERSION(2001) 32 + 16023588340 GRAPHICS_VERSION(2001)
+5 -2
drivers/input/joystick/adc-joystick.c
··· 182 182 swap(range[0], range[1]); 183 183 } 184 184 185 - fwnode_property_read_u32(child, "abs-fuzz", &fuzz); 186 - fwnode_property_read_u32(child, "abs-flat", &flat); 185 + if (fwnode_property_read_u32(child, "abs-fuzz", &fuzz)) 186 + fuzz = 0; 187 + 188 + if (fwnode_property_read_u32(child, "abs-flat", &flat)) 189 + flat = 0; 187 190 188 191 input_set_abs_params(joy->input, axes[i].code, 189 192 range[0], range[1], fuzz, flat);
+14
drivers/input/misc/uinput.c
··· 417 417 return -EINVAL; 418 418 } 419 419 420 + /* 421 + * Limit number of contacts to a reasonable value (100). This 422 + * ensures that we need less than 2 pages for struct input_mt 423 + * (we are not using in-kernel slot assignment so not going to 424 + * allocate memory for the "red" table), and we should have no 425 + * trouble getting this much memory. 426 + */ 427 + if (code == ABS_MT_SLOT && max > 99) { 428 + printk(KERN_DEBUG 429 + "%s: unreasonably large number of slots requested: %d\n", 430 + UINPUT_NAME, max); 431 + return -EINVAL; 432 + } 433 + 420 434 return 0; 421 435 } 422 436
+1
drivers/input/mouse/synaptics.c
··· 189 189 "LEN2054", /* E480 */ 190 190 "LEN2055", /* E580 */ 191 191 "LEN2068", /* T14 Gen 1 */ 192 + "SYN3015", /* HP EliteBook 840 G2 */ 192 193 "SYN3052", /* HP EliteBook 840 G4 */ 193 194 "SYN3221", /* HP 15-ay000 */ 194 195 "SYN323d", /* HP Spectre X360 13-w013dx */
+17 -12
drivers/input/serio/i8042-acpipnpio.h
··· 83 83 #define SERIO_QUIRK_KBDRESET BIT(12) 84 84 #define SERIO_QUIRK_DRITEK BIT(13) 85 85 #define SERIO_QUIRK_NOPNP BIT(14) 86 + #define SERIO_QUIRK_FORCENORESTORE BIT(15) 86 87 87 88 /* Quirk table for different mainboards. Options similar or identical to i8042 88 89 * module parameters. ··· 628 627 .driver_data = (void *)(SERIO_QUIRK_NOMUX) 629 628 }, 630 629 { 630 + /* Fujitsu Lifebook E756 */ 631 + /* https://bugzilla.suse.com/show_bug.cgi?id=1229056 */ 632 + .matches = { 633 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 634 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E756"), 635 + }, 636 + .driver_data = (void *)(SERIO_QUIRK_NOMUX) 637 + }, 638 + { 631 639 /* Fujitsu Lifebook E5411 */ 632 640 .matches = { 633 641 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"), ··· 1159 1149 SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) 1160 1150 }, 1161 1151 { 1162 - /* 1163 - * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes 1164 - * the keyboard very laggy for ~5 seconds after boot and 1165 - * sometimes also after resume. 1166 - * However both are required for the keyboard to not fail 1167 - * completely sometimes after boot or resume. 1168 - */ 1169 1152 .matches = { 1170 1153 DMI_MATCH(DMI_BOARD_NAME, "N150CU"), 1171 1154 }, 1172 - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | 1173 - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) 1155 + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) 1174 1156 }, 1175 1157 { 1176 1158 .matches = { ··· 1687 1685 if (quirks & SERIO_QUIRK_NOPNP) 1688 1686 i8042_nopnp = true; 1689 1687 #endif 1688 + if (quirks & SERIO_QUIRK_FORCENORESTORE) 1689 + i8042_forcenorestore = true; 1690 1690 } 1691 1691 #else 1692 1692 static inline void i8042_check_quirks(void) {} ··· 1722 1718 1723 1719 i8042_check_quirks(); 1724 1720 1725 - pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 1721 + pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 1726 1722 i8042_nokbd ? " nokbd" : "", 1727 1723 i8042_noaux ? " noaux" : "", 1728 1724 i8042_nomux ? " nomux" : "", ··· 1742 1738 "", 1743 1739 #endif 1744 1740 #ifdef CONFIG_PNP 1745 - i8042_nopnp ? " nopnp" : ""); 1741 + i8042_nopnp ? " nopnp" : "", 1746 1742 #else 1747 - ""); 1743 + "", 1748 1744 #endif 1745 + i8042_forcenorestore ? " forcenorestore" : ""); 1749 1746 1750 1747 retval = i8042_pnp_init(); 1751 1748 if (retval)
+7 -3
drivers/input/serio/i8042.c
··· 115 115 MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings"); 116 116 #endif 117 117 118 + static bool i8042_forcenorestore; 119 + module_param_named(forcenorestore, i8042_forcenorestore, bool, 0); 120 + MODULE_PARM_DESC(forcenorestore, "Force no restore on s3 resume, copying s2idle behaviour"); 121 + 118 122 #define DEBUG 119 123 #ifdef DEBUG 120 124 static bool i8042_debug; ··· 1236 1232 { 1237 1233 int i; 1238 1234 1239 - if (pm_suspend_via_firmware()) 1235 + if (!i8042_forcenorestore && pm_suspend_via_firmware()) 1240 1236 i8042_controller_reset(true); 1241 1237 1242 1238 /* Set up serio interrupts for system wakeup. */ ··· 1252 1248 1253 1249 static int i8042_pm_resume_noirq(struct device *dev) 1254 1250 { 1255 - if (!pm_resume_via_firmware()) 1251 + if (i8042_forcenorestore || !pm_resume_via_firmware()) 1256 1252 i8042_interrupt(0, NULL); 1257 1253 1258 1254 return 0; ··· 1275 1271 * not restore the controller state to whatever it had been at boot 1276 1272 * time, so we do not need to do anything. 1277 1273 */ 1278 - if (!pm_suspend_via_firmware()) 1274 + if (i8042_forcenorestore || !pm_suspend_via_firmware()) 1279 1275 return 0; 1280 1276 1281 1277 /*
+1 -1
drivers/input/touchscreen/ads7846.c
··· 824 824 m = &ts->msg[msg_idx]; 825 825 error = spi_sync(ts->spi, m); 826 826 if (error) { 827 - dev_err(&ts->spi->dev, "spi_sync --> %d\n", error); 827 + dev_err_ratelimited(&ts->spi->dev, "spi_sync --> %d\n", error); 828 828 packet->ignore = true; 829 829 return; 830 830 }
+6
drivers/input/touchscreen/edt-ft5x06.c
··· 1474 1474 .max_support_points = 2, 1475 1475 }; 1476 1476 1477 + static const struct edt_i2c_chip_data edt_ft8201_data = { 1478 + .max_support_points = 10, 1479 + }; 1480 + 1477 1481 static const struct edt_i2c_chip_data edt_ft8719_data = { 1478 1482 .max_support_points = 10, 1479 1483 }; ··· 1489 1485 { .name = "ft5452", .driver_data = (long)&edt_ft5452_data }, 1490 1486 /* Note no edt- prefix for compatibility with the ft6236.c driver */ 1491 1487 { .name = "ft6236", .driver_data = (long)&edt_ft6236_data }, 1488 + { .name = "ft8201", .driver_data = (long)&edt_ft8201_data }, 1492 1489 { .name = "ft8719", .driver_data = (long)&edt_ft8719_data }, 1493 1490 { /* sentinel */ } 1494 1491 }; ··· 1505 1500 { .compatible = "focaltech,ft5452", .data = &edt_ft5452_data }, 1506 1501 /* Note focaltech vendor prefix for compatibility with ft6236.c */ 1507 1502 { .compatible = "focaltech,ft6236", .data = &edt_ft6236_data }, 1503 + { .compatible = "focaltech,ft8201", .data = &edt_ft8201_data }, 1508 1504 { .compatible = "focaltech,ft8719", .data = &edt_ft8719_data }, 1509 1505 { /* sentinel */ } 1510 1506 };
+2 -12
drivers/input/touchscreen/himax_hx83112b.c
··· 130 130 return 0; 131 131 } 132 132 133 - static int himax_read_mcu(struct himax_ts_data *ts, u32 address, u32 *dst) 134 - { 135 - int error; 136 - 137 - error = himax_bus_read(ts, address, dst, sizeof(dst)); 138 - if (error) 139 - return error; 140 - 141 - return 0; 142 - } 143 - 144 133 static void himax_reset(struct himax_ts_data *ts) 145 134 { 146 135 gpiod_set_value_cansleep(ts->gpiod_rst, 1); ··· 149 160 { 150 161 int error; 151 162 152 - error = himax_read_mcu(ts, HIMAX_REG_ADDR_ICID, product_id); 163 + error = himax_bus_read(ts, HIMAX_REG_ADDR_ICID, product_id, 164 + sizeof(*product_id)); 153 165 if (error) 154 166 return error; 155 167
+5 -4
drivers/mmc/core/mmc_test.c
··· 3125 3125 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 3126 3126 #ifdef CONFIG_HIGHMEM 3127 3127 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3128 + if (!test->highmem) { 3129 + count = -ENOMEM; 3130 + goto free_test_buffer; 3131 + } 3128 3132 #endif 3129 3133 3130 - #ifdef CONFIG_HIGHMEM 3131 - if (test->buffer && test->highmem) { 3132 - #else 3133 3134 if (test->buffer) { 3134 - #endif 3135 3135 mutex_lock(&mmc_test_lock); 3136 3136 mmc_test_run(test, testcase); 3137 3137 mutex_unlock(&mmc_test_lock); ··· 3139 3139 3140 3140 #ifdef CONFIG_HIGHMEM 3141 3141 __free_pages(test->highmem, BUFFER_ORDER); 3142 + free_test_buffer: 3142 3143 #endif 3143 3144 kfree(test->buffer); 3144 3145 kfree(test);
+8
drivers/mmc/host/dw_mmc.c
··· 3299 3299 host->biu_clk = devm_clk_get(host->dev, "biu"); 3300 3300 if (IS_ERR(host->biu_clk)) { 3301 3301 dev_dbg(host->dev, "biu clock not available\n"); 3302 + ret = PTR_ERR(host->biu_clk); 3303 + if (ret == -EPROBE_DEFER) 3304 + return ret; 3305 + 3302 3306 } else { 3303 3307 ret = clk_prepare_enable(host->biu_clk); 3304 3308 if (ret) { ··· 3314 3310 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3315 3311 if (IS_ERR(host->ciu_clk)) { 3316 3312 dev_dbg(host->dev, "ciu clock not available\n"); 3313 + ret = PTR_ERR(host->ciu_clk); 3314 + if (ret == -EPROBE_DEFER) 3315 + goto err_clk_biu; 3316 + 3317 3317 host->bus_hz = host->pdata->bus_hz; 3318 3318 } else { 3319 3319 ret = clk_prepare_enable(host->ciu_clk);
+4 -4
drivers/mmc/host/mtk-sd.c
··· 1230 1230 } 1231 1231 1232 1232 if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { 1233 - if (events & MSDC_INT_CMDTMO || 1233 + if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) || 1234 1234 (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning)) 1235 1235 /* 1236 1236 * should not clear fifo/interrupt as the tune data ··· 1323 1323 static void msdc_cmd_next(struct msdc_host *host, 1324 1324 struct mmc_request *mrq, struct mmc_command *cmd) 1325 1325 { 1326 - if ((cmd->error && 1327 - !(cmd->error == -EILSEQ && 1328 - (mmc_op_tuning(cmd->opcode) || host->hs400_tuning))) || 1326 + if ((cmd->error && !host->hs400_tuning && 1327 + !(cmd->error == -EILSEQ && 1328 + mmc_op_tuning(cmd->opcode))) || 1329 1329 (mrq->sbc && mrq->sbc->error)) 1330 1330 msdc_request_done(host, mrq); 1331 1331 else if (cmd == mrq->sbc)
+105 -54
drivers/net/bonding/bond_main.c
··· 427 427 struct netlink_ext_ack *extack) 428 428 { 429 429 struct net_device *bond_dev = xs->xso.dev; 430 + struct net_device *real_dev; 431 + netdevice_tracker tracker; 430 432 struct bond_ipsec *ipsec; 431 433 struct bonding *bond; 432 434 struct slave *slave; ··· 440 438 rcu_read_lock(); 441 439 bond = netdev_priv(bond_dev); 442 440 slave = rcu_dereference(bond->curr_active_slave); 443 - if (!slave) { 444 - rcu_read_unlock(); 445 - return -ENODEV; 441 + real_dev = slave ? slave->dev : NULL; 442 + netdev_hold(real_dev, &tracker, GFP_ATOMIC); 443 + rcu_read_unlock(); 444 + if (!real_dev) { 445 + err = -ENODEV; 446 + goto out; 446 447 } 447 448 448 - if (!slave->dev->xfrmdev_ops || 449 - !slave->dev->xfrmdev_ops->xdo_dev_state_add || 450 - netif_is_bond_master(slave->dev)) { 449 + if (!real_dev->xfrmdev_ops || 450 + !real_dev->xfrmdev_ops->xdo_dev_state_add || 451 + netif_is_bond_master(real_dev)) { 451 452 NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload"); 452 - rcu_read_unlock(); 453 - return -EINVAL; 453 + err = -EINVAL; 454 + goto out; 454 455 } 455 456 456 - ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); 457 + ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL); 457 458 if (!ipsec) { 458 - rcu_read_unlock(); 459 - return -ENOMEM; 459 + err = -ENOMEM; 460 + goto out; 460 461 } 461 - xs->xso.real_dev = slave->dev; 462 462 463 - err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); 463 + xs->xso.real_dev = real_dev; 464 + err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); 464 465 if (!err) { 465 466 ipsec->xs = xs; 466 467 INIT_LIST_HEAD(&ipsec->list); 467 - spin_lock_bh(&bond->ipsec_lock); 468 + mutex_lock(&bond->ipsec_lock); 468 469 list_add(&ipsec->list, &bond->ipsec_list); 469 - spin_unlock_bh(&bond->ipsec_lock); 470 + mutex_unlock(&bond->ipsec_lock); 470 471 } else { 471 472 kfree(ipsec); 472 473 } 473 - rcu_read_unlock(); 474 + out: 475 + netdev_put(real_dev, &tracker); 474 476 return err; 475 477 } 476 478 477 479 static void bond_ipsec_add_sa_all(struct bonding *bond) 478 480 { 479 481 struct net_device *bond_dev = bond->dev; 482 + struct net_device *real_dev; 480 483 struct bond_ipsec *ipsec; 481 484 struct slave *slave; 482 485 483 - rcu_read_lock(); 484 - slave = rcu_dereference(bond->curr_active_slave); 485 - if (!slave) 486 - goto out; 486 + slave = rtnl_dereference(bond->curr_active_slave); 487 + real_dev = slave ? slave->dev : NULL; 488 + if (!real_dev) 489 + return; 487 490 488 - if (!slave->dev->xfrmdev_ops || 489 - !slave->dev->xfrmdev_ops->xdo_dev_state_add || 490 - netif_is_bond_master(slave->dev)) { 491 - spin_lock_bh(&bond->ipsec_lock); 491 + mutex_lock(&bond->ipsec_lock); 492 + if (!real_dev->xfrmdev_ops || 493 + !real_dev->xfrmdev_ops->xdo_dev_state_add || 494 + netif_is_bond_master(real_dev)) { 492 495 if (!list_empty(&bond->ipsec_list)) 493 - slave_warn(bond_dev, slave->dev, 496 + slave_warn(bond_dev, real_dev, 494 497 "%s: no slave xdo_dev_state_add\n", 495 498 __func__); 496 - spin_unlock_bh(&bond->ipsec_lock); 497 499 goto out; 498 500 } 499 501 500 - spin_lock_bh(&bond->ipsec_lock); 501 502 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 502 - ipsec->xs->xso.real_dev = slave->dev; 503 - if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { 504 - slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); 503 + /* If new state is added before ipsec_lock acquired */ 504 + if (ipsec->xs->xso.real_dev == real_dev) 505 + continue; 506 + 507 + ipsec->xs->xso.real_dev = real_dev; 508 + if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { 509 + slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__); 505 510 ipsec->xs->xso.real_dev = NULL; 506 511 } 507 512 } 508 - spin_unlock_bh(&bond->ipsec_lock); 509 513 out: 510 - rcu_read_unlock(); 514 + mutex_unlock(&bond->ipsec_lock); 511 515 } 512 516 513 517 /** ··· 523 515 static void bond_ipsec_del_sa(struct xfrm_state *xs) 524 516 { 525 517 struct net_device *bond_dev = xs->xso.dev; 518 + struct net_device *real_dev; 519 + netdevice_tracker tracker; 526 520 struct bond_ipsec *ipsec; 527 521 struct bonding *bond; 528 522 struct slave *slave; ··· 535 525 rcu_read_lock(); 536 526 bond = netdev_priv(bond_dev); 537 527 slave = rcu_dereference(bond->curr_active_slave); 528 + real_dev = slave ? slave->dev : NULL; 529 + netdev_hold(real_dev, &tracker, GFP_ATOMIC); 530 + rcu_read_unlock(); 538 531 539 532 if (!slave) 540 533 goto out; ··· 545 532 if (!xs->xso.real_dev) 546 533 goto out; 547 534 548 - WARN_ON(xs->xso.real_dev != slave->dev); 535 + WARN_ON(xs->xso.real_dev != real_dev); 549 536 550 - if (!slave->dev->xfrmdev_ops || 551 - !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 552 - netif_is_bond_master(slave->dev)) { 553 - slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); 537 + if (!real_dev->xfrmdev_ops || 538 + !real_dev->xfrmdev_ops->xdo_dev_state_delete || 539 + netif_is_bond_master(real_dev)) { 540 + slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__); 554 541 goto out; 555 542 } 556 543 557 - slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); 544 + real_dev->xfrmdev_ops->xdo_dev_state_delete(xs); 558 545 out: 559 - spin_lock_bh(&bond->ipsec_lock); 546 + netdev_put(real_dev, &tracker); 547 + mutex_lock(&bond->ipsec_lock); 560 548 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 561 549 if (ipsec->xs == xs) { 562 550 list_del(&ipsec->list); ··· 565 551 break; 566 552 } 567 553 } 568 - spin_unlock_bh(&bond->ipsec_lock); 569 - rcu_read_unlock(); 554 + mutex_unlock(&bond->ipsec_lock); 570 555 } 571 556 572 557 static void bond_ipsec_del_sa_all(struct bonding *bond) 573 558 { 574 559 struct net_device *bond_dev = bond->dev; 560 + struct net_device *real_dev; 575 561 struct bond_ipsec *ipsec; 576 562 struct slave *slave; 577 563 578 - rcu_read_lock(); 579 - slave = rcu_dereference(bond->curr_active_slave); 580 - if (!slave) { 581 - rcu_read_unlock(); 564 + slave = rtnl_dereference(bond->curr_active_slave); 565 + real_dev = slave ? slave->dev : NULL; 566 + if (!real_dev) 582 567 return; 583 - } 584 568 585 - spin_lock_bh(&bond->ipsec_lock); 569 + mutex_lock(&bond->ipsec_lock); 586 570 list_for_each_entry(ipsec, &bond->ipsec_list, list) { 587 571 if (!ipsec->xs->xso.real_dev) 588 572 continue; 589 573 590 - if (!slave->dev->xfrmdev_ops || 591 - !slave->dev->xfrmdev_ops->xdo_dev_state_delete || 592 - netif_is_bond_master(slave->dev)) { 593 - slave_warn(bond_dev, slave->dev, 574 + if (!real_dev->xfrmdev_ops || 575 + !real_dev->xfrmdev_ops->xdo_dev_state_delete || 576 + netif_is_bond_master(real_dev)) { 577 + slave_warn(bond_dev, real_dev, 594 578 "%s: no slave xdo_dev_state_delete\n", 595 579 __func__); 596 580 } else { 597 - slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); 581 + real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); 582 + if (real_dev->xfrmdev_ops->xdo_dev_state_free) 583 + real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs); 598 584 } 599 585 } 600 - spin_unlock_bh(&bond->ipsec_lock); 586 + mutex_unlock(&bond->ipsec_lock); 587 + } 588 + 589 + static void bond_ipsec_free_sa(struct xfrm_state *xs) 590 + { 591 + struct net_device *bond_dev = xs->xso.dev; 592 + struct net_device *real_dev; 593 + netdevice_tracker tracker; 594 + struct bonding *bond; 595 + struct slave *slave; 596 + 597 + if (!bond_dev) 598 + return; 599 + 600 + rcu_read_lock(); 601 + bond = netdev_priv(bond_dev); 602 + slave = rcu_dereference(bond->curr_active_slave); 603 + real_dev = slave ? slave->dev : NULL; 604 + netdev_hold(real_dev, &tracker, GFP_ATOMIC); 601 605 rcu_read_unlock(); 606 + 607 + if (!slave) 608 + goto out; 609 + 610 + if (!xs->xso.real_dev) 611 + goto out; 612 + 613 + WARN_ON(xs->xso.real_dev != real_dev); 614 + 615 + if (real_dev && real_dev->xfrmdev_ops && 616 + real_dev->xfrmdev_ops->xdo_dev_state_free) 617 + real_dev->xfrmdev_ops->xdo_dev_state_free(xs); 618 + out: 619 + netdev_put(real_dev, &tracker); 602 620 } 603 621 604 622 /** ··· 673 627 static const struct xfrmdev_ops bond_xfrmdev_ops = { 674 628 .xdo_dev_state_add = bond_ipsec_add_sa, 675 629 .xdo_dev_state_delete = bond_ipsec_del_sa, 630 + .xdo_dev_state_free = bond_ipsec_free_sa, 676 631 .xdo_dev_offload_ok = bond_ipsec_offload_ok, 677 632 }; 678 633 #endif /* CONFIG_XFRM_OFFLOAD */ ··· 5924 5877 /* set up xfrm device ops (only supported in active-backup right now) */ 5925 5878 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; 5926 5879 INIT_LIST_HEAD(&bond->ipsec_list); 5927 - spin_lock_init(&bond->ipsec_lock); 5880 + mutex_init(&bond->ipsec_lock); 5928 5881 #endif /* CONFIG_XFRM_OFFLOAD */ 5929 5882 5930 5883 /* don't acquire bond device's netif_tx_lock when transmitting */ ··· 5972 5925 bond_for_each_slave(bond, slave, iter) 5973 5926 __bond_release_one(bond_dev, slave->dev, true, true); 5974 5927 netdev_info(bond_dev, "Released all slaves\n"); 5928 + 5929 + #ifdef CONFIG_XFRM_OFFLOAD 5930 + mutex_destroy(&bond->ipsec_lock); 5931 + #endif /* CONFIG_XFRM_OFFLOAD */ 5975 5932 5976 5933 bond_set_slave_arr(bond, NULL, NULL); 5977 5934
+18 -8
drivers/net/ethernet/faraday/ftgmac100.c
··· 582 582 (*processed)++; 583 583 return true; 584 584 585 - drop: 585 + drop: 586 586 /* Clean rxdes0 (which resets own bit) */ 587 587 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 588 588 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); ··· 665 665 netdev->stats.tx_bytes += skb->len; 666 666 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 667 667 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 668 + 669 + /* Ensure the descriptor config is visible before setting the tx 670 + * pointer. 671 + */ 672 + smp_wmb(); 668 673 669 674 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); 670 675 ··· 824 819 dma_wmb(); 825 820 first->txdes0 = cpu_to_le32(f_ctl_stat); 826 821 822 + /* Ensure the descriptor config is visible before setting the tx 823 + * pointer. 824 + */ 825 + smp_wmb(); 826 + 827 827 /* Update next TX pointer */ 828 828 priv->tx_pointer = pointer; 829 829 ··· 849 839 850 840 return NETDEV_TX_OK; 851 841 852 - dma_err: 842 + dma_err: 853 843 if (net_ratelimit()) 854 844 netdev_err(netdev, "map tx fragment failed\n"); 855 845 ··· 871 861 * last fragment, so we know ftgmac100_free_tx_packet() 872 862 * hasn't freed the skb yet. 873 863 */ 874 - drop: 864 + drop: 875 865 /* Drop the packet */ 876 866 dev_kfree_skb_any(skb); 877 867 netdev->stats.tx_dropped++; ··· 1364 1354 ftgmac100_init_all(priv, true); 1365 1355 1366 1356 netdev_dbg(netdev, "Reset done !\n"); 1367 - bail: 1357 + bail: 1368 1358 if (priv->mii_bus) 1369 1359 mutex_unlock(&priv->mii_bus->mdio_lock); 1370 1360 if (netdev->phydev) ··· 1564 1554 1565 1555 return 0; 1566 1556 1567 - err_ncsi: 1557 + err_ncsi: 1568 1558 phy_stop(netdev->phydev); 1569 1559 napi_disable(&priv->napi); 1570 1560 netif_stop_queue(netdev); 1571 - err_alloc: 1561 + err_alloc: 1572 1562 ftgmac100_free_buffers(priv); 1573 1563 free_irq(netdev->irq, netdev); 1574 - err_irq: 1564 + err_irq: 1575 1565 netif_napi_del(&priv->napi); 1576 - err_hw: 1566 + err_hw: 1577 1567 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1578 1568 ftgmac100_free_rings(priv); 1579 1569 return err;
+37 -31
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 52 52 return 0; 53 53 } 54 54 55 - static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len, 56 - const struct gdma_resp_hdr *resp_msg) 57 - { 58 - struct hwc_caller_ctx *ctx; 59 - int err; 60 - 61 - if (!test_bit(resp_msg->response.hwc_msg_id, 62 - hwc->inflight_msg_res.map)) { 63 - dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n", 64 - resp_msg->response.hwc_msg_id); 65 - return; 66 - } 67 - 68 - ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id; 69 - err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len); 70 - if (err) 71 - goto out; 72 - 73 - ctx->status_code = resp_msg->status; 74 - 75 - memcpy(ctx->output_buf, resp_msg, resp_len); 76 - out: 77 - ctx->error = err; 78 - complete(&ctx->comp_event); 79 - } 80 - 81 55 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq, 82 56 struct hwc_work_request *req) 83 57 { ··· 73 99 if (err) 74 100 dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err); 75 101 return err; 102 + } 103 + 104 + static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len, 105 + struct hwc_work_request *rx_req) 106 + { 107 + const struct gdma_resp_hdr *resp_msg = rx_req->buf_va; 108 + struct hwc_caller_ctx *ctx; 109 + int err; 110 + 111 + if (!test_bit(resp_msg->response.hwc_msg_id, 112 + hwc->inflight_msg_res.map)) { 113 + dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n", 114 + resp_msg->response.hwc_msg_id); 115 + mana_hwc_post_rx_wqe(hwc->rxq, rx_req); 116 + return; 117 + } 118 + 119 + ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id; 120 + err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len); 121 + if (err) 122 + goto out; 123 + 124 + ctx->status_code = resp_msg->status; 125 + 126 + memcpy(ctx->output_buf, resp_msg, resp_len); 127 + out: 128 + ctx->error = err; 129 + 130 + /* Must post rx wqe before complete(), otherwise the next rx may 131 + * hit no_wqe error. 132 + */ 133 + mana_hwc_post_rx_wqe(hwc->rxq, rx_req); 134 + 135 + complete(&ctx->comp_event); 76 136 } 77 137 78 138 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, ··· 243 235 return; 244 236 } 245 237 246 - mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp); 238 + mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req); 247 239 248 - /* Do no longer use 'resp', because the buffer is posted to the HW 249 - * in the below mana_hwc_post_rx_wqe(). 240 + /* Can no longer use 'resp', because the buffer is posted to the HW 241 + * in mana_hwc_handle_resp() above. 250 242 */ 251 243 resp = NULL; 252 - 253 - mana_hwc_post_rx_wqe(hwc_rxq, rx_req); 254 244 } 255 245 256 246 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
+1 -1
drivers/net/ethernet/pensando/ionic/ionic_dev.h
··· 32 32 #define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */ 33 33 #define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ 34 34 #define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ 35 - #define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */ 35 + #define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */ 36 36 37 37 struct ionic_dev_bar { 38 38 void __iomem *vaddr;
+1 -1
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 3220 3220 netdev->netdev_ops = &ionic_netdev_ops; 3221 3221 ionic_ethtool_set_ops(netdev); 3222 3222 3223 - netdev->watchdog_timeo = 2 * HZ; 3223 + netdev->watchdog_timeo = 5 * HZ; 3224 3224 netif_carrier_off(netdev); 3225 3225 3226 3226 lif->identity = lid;
+1
drivers/net/ethernet/ti/icssg/icssg_prueth.c
··· 1459 1459 1460 1460 static const struct prueth_pdata am64x_icssg_pdata = { 1461 1461 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 1462 + .quirk_10m_link_issue = 1, 1462 1463 .switch_mode = 1, 1463 1464 }; 1464 1465
+1 -1
drivers/net/gtp.c
··· 1653 1653 sock = sockfd_lookup(fd, &err); 1654 1654 if (!sock) { 1655 1655 pr_debug("gtp socket fd=%d not found\n", fd); 1656 - return NULL; 1656 + return ERR_PTR(err); 1657 1657 } 1658 1658 1659 1659 sk = sock->sk;
+8 -5
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
··· 725 725 entry = &wifi_pkg->package.elements[entry_idx]; 726 726 entry_idx++; 727 727 if (entry->type != ACPI_TYPE_INTEGER || 728 - entry->integer.value > num_profiles) { 728 + entry->integer.value > num_profiles || 729 + entry->integer.value < 730 + rev_data[idx].min_profiles) { 729 731 ret = -EINVAL; 730 732 goto out_free; 731 733 } 732 - num_profiles = entry->integer.value; 733 734 734 735 /* 735 - * this also validates >= min_profiles since we 736 - * otherwise wouldn't have gotten the data when 737 - * looking up in ACPI 736 + * Check to see if we received package count 737 + * same as max # of profiles 738 738 */ 739 739 if (wifi_pkg->package.count != 740 740 hdr_size + profile_size * num_profiles) { 741 741 ret = -EINVAL; 742 742 goto out_free; 743 743 } 744 + 745 + /* Number of valid profiles */ 746 + num_profiles = entry->integer.value; 744 747 } 745 748 goto read_table; 746 749 }
+1 -1
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
··· 3348 3348 { 3349 3349 int ret __maybe_unused = 0; 3350 3350 3351 - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) 3351 + if (!iwl_trans_fw_running(fwrt->trans)) 3352 3352 return; 3353 3353 3354 3354 if (fw_has_capa(&fwrt->fw->ucode_capa,
+12
drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
··· 85 85 * May sleep 86 86 * @wimax_active: invoked when WiMax becomes active. May sleep 87 87 * @time_point: called when transport layer wants to collect debug data 88 + * @device_powered_off: called upon resume from hibernation but not only. 89 + * Op_mode needs to reset its internal state because the device did not 90 + * survive the system state transition. The firmware is no longer running, 91 + * etc... 88 92 */ 89 93 struct iwl_op_mode_ops { 90 94 struct iwl_op_mode *(*start)(struct iwl_trans *trans, ··· 111 107 void (*time_point)(struct iwl_op_mode *op_mode, 112 108 enum iwl_fw_ini_time_point tp_id, 113 109 union iwl_dbg_tlv_tp_data *tp_data); 110 + void (*device_powered_off)(struct iwl_op_mode *op_mode); 114 111 }; 115 112 116 113 int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); ··· 207 202 if (!op_mode || !op_mode->ops || !op_mode->ops->time_point) 208 203 return; 209 204 op_mode->ops->time_point(op_mode, tp_id, tp_data); 205 + } 206 + 207 + static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode) 208 + { 209 + if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off) 210 + return; 211 + op_mode->ops->device_powered_off(op_mode); 210 212 } 211 213 212 214 #endif /* __iwl_op_mode_h__ */
+1 -1
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
··· 1128 1128 1129 1129 /* prevent double restarts due to the same erroneous FW */ 1130 1130 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { 1131 - iwl_op_mode_nic_error(trans->op_mode, sync); 1132 1131 trans->state = IWL_TRANS_NO_FW; 1132 + iwl_op_mode_nic_error(trans->op_mode, sync); 1133 1133 } 1134 1134 } 1135 1135
+10
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 3439 3439 3440 3440 mutex_lock(&mvm->mutex); 3441 3441 3442 + /* Apparently, the device went away and device_powered_off() was called, 3443 + * don't even try to read the rt_status, the device is currently 3444 + * inaccessible. 3445 + */ 3446 + if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) { 3447 + IWL_INFO(mvm, 3448 + "Can't resume, device_powered_off() was called during wowlan\n"); 3449 + goto err; 3450 + } 3451 + 3442 3452 mvm->last_reset_or_resume_time_jiffies = jiffies; 3443 3453 3444 3454 /* get the BSS vif pointer again */
+8 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 5818 5818 int i; 5819 5819 5820 5820 if (!iwl_mvm_has_new_tx_api(mvm)) { 5821 + /* we can't ask the firmware anything if it is dead */ 5822 + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 5823 + &mvm->status)) 5824 + return; 5821 5825 if (drop) { 5822 5826 guard(mvm)(mvm); 5823 5827 iwl_mvm_flush_tx_path(mvm, ··· 5915 5911 5916 5912 /* this can take a while, and we may need/want other operations 5917 5913 * to succeed while doing this, so do it without the mutex held 5914 + * If the firmware is dead, this can't work... 5918 5915 */ 5919 - if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 5916 + if (!drop && !iwl_mvm_has_new_tx_api(mvm) && 5917 + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 5918 + &mvm->status)) 5920 5919 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 5921 5920 } 5922 5921
+20 -1
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 1198 1198 struct iwl_mvm *mvm = 1199 1199 container_of(wk, struct iwl_mvm, trig_link_selection_wk); 1200 1200 1201 + mutex_lock(&mvm->mutex); 1201 1202 ieee80211_iterate_active_interfaces(mvm->hw, 1202 1203 IEEE80211_IFACE_ITER_NORMAL, 1203 1204 iwl_mvm_find_link_selection_vif, 1204 1205 NULL); 1206 + mutex_unlock(&mvm->mutex); 1205 1207 } 1206 1208 1207 1209 static struct iwl_op_mode * ··· 1512 1510 iwl_fw_cancel_timestamp(&mvm->fwrt); 1513 1511 1514 1512 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1513 + 1514 + iwl_mvm_pause_tcm(mvm, false); 1515 1515 1516 1516 iwl_fw_dbg_stop_sync(&mvm->fwrt); 1517 1517 iwl_trans_stop_device(mvm->trans); ··· 2094 2090 iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); 2095 2091 } 2096 2092 2093 + static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) 2094 + { 2095 + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 2096 + 2097 + mutex_lock(&mvm->mutex); 2098 + clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 2099 + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2100 + iwl_mvm_stop_device(mvm); 2101 + #ifdef CONFIG_PM 2102 + mvm->fast_resume = false; 2103 + #endif 2104 + mutex_unlock(&mvm->mutex); 2105 + } 2106 + 2097 2107 #define IWL_MVM_COMMON_OPS \ 2098 2108 /* these could be differentiated */ \ 2099 2109 .queue_full = iwl_mvm_stop_sw_queue, \ ··· 2120 2102 /* as we only register one, these MUST be common! */ \ 2121 2103 .start = iwl_op_mode_mvm_start, \ 2122 2104 .stop = iwl_op_mode_mvm_stop, \ 2123 - .time_point = iwl_op_mode_mvm_time_point 2105 + .time_point = iwl_op_mode_mvm_time_point, \ 2106 + .device_powered_off = iwl_op_mode_mvm_device_powered_off 2124 2107 2125 2108 static const struct iwl_op_mode_ops iwl_mvm_ops = { 2126 2109 IWL_MVM_COMMON_OPS,
+27 -15
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
··· 48 48 /* Number of iterations on the channel for mei filtered scan */ 49 49 #define IWL_MEI_SCAN_NUM_ITER 5U 50 50 51 + #define WFA_TPC_IE_LEN 9 52 + 51 53 struct iwl_mvm_scan_timing_params { 52 54 u32 suspend_time; 53 55 u32 max_out_time; ··· 305 303 306 304 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; 307 305 308 - /* we create the 802.11 header and SSID element */ 309 - max_probe_len -= 24 + 2; 306 + /* we create the 802.11 header SSID element and WFA TPC element */ 307 + max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN; 310 308 311 309 /* DS parameter set element is added on 2.4GHZ band if required */ 312 310 if (iwl_mvm_rrm_scan_needed(mvm)) ··· 733 731 return newpos; 734 732 } 735 733 736 - #define WFA_TPC_IE_LEN 9 737 - 738 734 static void iwl_mvm_add_tpc_report_ie(u8 *pos) 739 735 { 740 736 pos[0] = WLAN_EID_VENDOR_SPECIFIC; ··· 837 837 return ((n_ssids <= PROBE_OPTION_MAX) && 838 838 (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & 839 839 (ies->common_ie_len + 840 - ies->len[NL80211_BAND_2GHZ] + 841 - ies->len[NL80211_BAND_5GHZ] <= 840 + ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] + 841 + ies->len[NL80211_BAND_6GHZ] <= 842 842 iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); 843 843 } 844 844 ··· 1659 1659 cfg->v2.channel_num = channels[i]->hw_value; 1660 1660 if (cfg80211_channel_is_psc(channels[i])) 1661 1661 cfg->flags = 0; 1662 + 1663 + if (band == NL80211_BAND_6GHZ) { 1664 + /* 6 GHz channels should only appear in a scan request 1665 + * that has scan_6ghz set. The only exception is MLO 1666 + * scan, which has to be passive. 1667 + */ 1668 + WARN_ON_ONCE(cfg->flags != 0); 1669 + cfg->flags = 1670 + cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE); 1671 + } 1672 + 1662 1673 cfg->v2.iter_count = 1; 1663 1674 cfg->v2.iter_interval = 0; 1664 1675 if (version < 17) ··· 3179 3168 params.n_channels = j; 3180 3169 } 3181 3170 3182 - if (non_psc_included && 3183 - !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { 3184 - kfree(params.channels); 3185 - return -ENOBUFS; 3171 + if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { 3172 + ret = -ENOBUFS; 3173 + goto out; 3186 3174 } 3187 3175 3188 3176 uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type); 3189 - 3190 - if (non_psc_included) 3191 - kfree(params.channels); 3192 - if (uid < 0) 3193 - return uid; 3177 + if (uid < 0) { 3178 + ret = uid; 3179 + goto out; 3180 + } 3194 3181 3195 3182 ret = iwl_mvm_send_cmd(mvm, &hcmd); 3196 3183 if (!ret) { ··· 3206 3197 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 3207 3198 } 3208 3199 3200 + out: 3201 + if (non_psc_included) 3202 + kfree(params.channels); 3209 3203 return ret; 3210 3204 } 3211 3205
+2 -1
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
··· 89 89 } 90 90 break; 91 91 default: 92 - IWL_ERR(trans, "WRT: Invalid buffer destination\n"); 92 + IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n", 93 + le32_to_cpu(fw_mon_cfg->buf_location)); 93 94 } 94 95 out: 95 96 if (dbg_flags)
+38 -3
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 1577 1577 return 0; 1578 1578 } 1579 1579 1580 - static int iwl_pci_resume(struct device *device) 1580 + static int _iwl_pci_resume(struct device *device, bool restore) 1581 1581 { 1582 1582 struct pci_dev *pdev = to_pci_dev(device); 1583 1583 struct iwl_trans *trans = pci_get_drvdata(pdev); 1584 1584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1585 + bool device_was_powered_off = false; 1585 1586 1586 1587 /* Before you put code here, think about WoWLAN. You cannot check here 1587 1588 * whether WoWLAN is enabled or not, and your code will run even if ··· 1597 1596 1598 1597 if (!trans->op_mode) 1599 1598 return 0; 1599 + 1600 + /* 1601 + * Scratch value was altered, this means the device was powered off, we 1602 + * need to reset it completely. 1603 + * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan, 1604 + * so assume that any bits there mean that the device is usable. 1605 + */ 1606 + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && 1607 + !iwl_read32(trans, CSR_FUNC_SCRATCH)) 1608 + device_was_powered_off = true; 1609 + 1610 + if (restore || device_was_powered_off) { 1611 + trans->state = IWL_TRANS_NO_FW; 1612 + /* Hope for the best here ... If one of those steps fails we 1613 + * won't really know how to recover. 1614 + */ 1615 + iwl_pcie_prepare_card_hw(trans); 1616 + iwl_finish_nic_init(trans); 1617 + iwl_op_mode_device_powered_off(trans->op_mode); 1618 + } 1600 1619 1601 1620 /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ 1602 1621 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) ··· 1638 1617 return 0; 1639 1618 } 1640 1619 1620 + static int iwl_pci_restore(struct device *device) 1621 + { 1622 + return _iwl_pci_resume(device, true); 1623 + } 1624 + 1625 + static int iwl_pci_resume(struct device *device) 1626 + { 1627 + return _iwl_pci_resume(device, false); 1628 + } 1629 + 1641 1630 static const struct dev_pm_ops iwl_dev_pm_ops = { 1642 - SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, 1643 - iwl_pci_resume) 1631 + .suspend = pm_sleep_ptr(iwl_pci_suspend), 1632 + .resume = pm_sleep_ptr(iwl_pci_resume), 1633 + .freeze = pm_sleep_ptr(iwl_pci_suspend), 1634 + .thaw = pm_sleep_ptr(iwl_pci_resume), 1635 + .poweroff = pm_sleep_ptr(iwl_pci_suspend), 1636 + .restore = pm_sleep_ptr(iwl_pci_restore), 1644 1637 }; 1645 1638 1646 1639 #define IWL_PM_OPS (&iwl_dev_pm_ops)
+26 -6
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 4363 4363 if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) 4364 4364 wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 4365 4365 4366 - wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz; 4367 - if (adapter->config_bands & BAND_A) 4368 - wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz; 4369 - else 4366 + wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev, 4367 + &mwifiex_band_2ghz, 4368 + sizeof(mwifiex_band_2ghz), 4369 + GFP_KERNEL); 4370 + if (!wiphy->bands[NL80211_BAND_2GHZ]) { 4371 + ret = -ENOMEM; 4372 + goto err; 4373 + } 4374 + 4375 + if (adapter->config_bands & BAND_A) { 4376 + wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev, 4377 + &mwifiex_band_5ghz, 4378 + sizeof(mwifiex_band_5ghz), 4379 + GFP_KERNEL); 4380 + if (!wiphy->bands[NL80211_BAND_5GHZ]) { 4381 + ret = -ENOMEM; 4382 + goto err; 4383 + } 4384 + } else { 4370 4385 wiphy->bands[NL80211_BAND_5GHZ] = NULL; 4386 + } 4371 4387 4372 4388 if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) 4373 4389 wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs; ··· 4477 4461 if (ret < 0) { 4478 4462 mwifiex_dbg(adapter, ERROR, 4479 4463 "%s: wiphy_register failed: %d\n", __func__, ret); 4480 - wiphy_free(wiphy); 4481 - return ret; 4464 + goto err; 4482 4465 } 4483 4466 4484 4467 if (!adapter->regd) { ··· 4518 4503 wiphy->retry_long = (u8) retry; 4519 4504 4520 4505 adapter->wiphy = wiphy; 4506 + return ret; 4507 + 4508 + err: 4509 + wiphy_free(wiphy); 4510 + 4521 4511 return ret; 4522 4512 }
+4 -1
drivers/net/wireless/silabs/wfx/sta.c
··· 352 352 353 353 ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, 354 354 skb->len - ieoffset); 355 - if (unlikely(!ptr)) 355 + if (!ptr) { 356 + /* No RSN IE is fine in open networks */ 357 + ret = 0; 356 358 goto free_skb; 359 + } 357 360 358 361 ptr += pairwise_cipher_suite_count_offset; 359 362 if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+5
drivers/nfc/pn533/pn533.c
··· 1723 1723 } 1724 1724 1725 1725 pn533_poll_create_mod_list(dev, im_protocols, tm_protocols); 1726 + if (!dev->poll_mod_count) { 1727 + nfc_err(dev->dev, 1728 + "Poll mod list is empty\n"); 1729 + return -EINVAL; 1730 + } 1726 1731 1727 1732 /* Do not always start polling from the same modulation */ 1728 1733 get_random_bytes(&rand_mod, sizeof(rand_mod));
+1 -1
drivers/nvme/host/core.c
··· 4612 4612 { 4613 4613 nvme_mpath_stop(ctrl); 4614 4614 nvme_auth_stop(ctrl); 4615 - nvme_stop_keep_alive(ctrl); 4616 4615 nvme_stop_failfast_work(ctrl); 4617 4616 flush_work(&ctrl->async_event_work); 4618 4617 cancel_work_sync(&ctrl->fw_act_work); ··· 4647 4648 4648 4649 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4649 4650 { 4651 + nvme_stop_keep_alive(ctrl); 4650 4652 nvme_hwmon_exit(ctrl); 4651 4653 nvme_fault_inject_fini(&ctrl->fault_inject); 4652 4654 dev_pm_qos_hide_latency_tolerance(ctrl->device);
-1
drivers/nvme/host/nvme.h
··· 301 301 302 302 struct opal_dev *opal_dev; 303 303 304 - char name[12]; 305 304 u16 cntlid; 306 305 307 306 u16 mtfa;
+29 -26
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
··· 709 709 { 710 710 int err, rsel_val; 711 711 712 - if (!pullup && arg == MTK_DISABLE) 713 - return 0; 714 - 715 712 if (hw->rsel_si_unit) { 716 713 /* find pin rsel_index from pin_rsel array*/ 717 714 err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val); 718 715 if (err) 719 - goto out; 716 + return err; 720 717 } else { 721 - if (arg < MTK_PULL_SET_RSEL_000 || 722 - arg > MTK_PULL_SET_RSEL_111) { 723 - err = -EINVAL; 724 - goto out; 725 - } 718 + if (arg < MTK_PULL_SET_RSEL_000 || arg > MTK_PULL_SET_RSEL_111) 719 + return -EINVAL; 726 720 727 721 rsel_val = arg - MTK_PULL_SET_RSEL_000; 728 722 } 729 723 730 - err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val); 731 - if (err) 732 - goto out; 724 + return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val); 725 + } 733 726 734 - err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, MTK_ENABLE); 727 + static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw, 728 + const struct mtk_pin_desc *desc, 729 + u32 pullup, u32 arg) 730 + { 731 + u32 enable = arg == MTK_DISABLE ? MTK_DISABLE : MTK_ENABLE; 732 + int err; 735 733 736 - out: 737 - return err; 734 + if (arg != MTK_DISABLE) { 735 + err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg); 736 + if (err) 737 + return err; 738 + } 739 + 740 + return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable); 738 741 } 739 742 740 743 int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw, ··· 753 750 try_all_type = MTK_PULL_TYPE_MASK; 754 751 755 752 if (try_all_type & MTK_PULL_RSEL_TYPE) { 756 - err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg); 753 + err = mtk_pinconf_bias_set_pu_pd_rsel(hw, desc, pullup, arg); 757 754 if (!err) 758 - return err; 755 + return 0; 759 756 } 760 757 761 758 if (try_all_type & MTK_PULL_PU_PD_TYPE) { 762 759 err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg); 763 760 if (!err) 764 - return err; 761 + return 0; 765 762 } 766 763 767 764 if (try_all_type & MTK_PULL_PULLSEL_TYPE) { 768 765 err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc, 769 766 pullup, arg); 770 767 if (!err) 771 - return err; 768 + return 0; 772 769 } 773 770 774 771 if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE) ··· 806 803 return 0; 807 804 } 808 805 809 - static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw, 810 - const struct mtk_pin_desc *desc, 811 - u32 *pullup, u32 *enable) 806 + static int mtk_pinconf_bias_get_pu_pd_rsel(struct mtk_pinctrl *hw, 807 + const struct mtk_pin_desc *desc, 808 + u32 *pullup, u32 *enable) 812 809 { 813 810 int pu, pd, rsel, err; 814 811 ··· 942 939 try_all_type = MTK_PULL_TYPE_MASK; 943 940 944 941 if (try_all_type & MTK_PULL_RSEL_TYPE) { 945 - err = mtk_pinconf_bias_get_rsel(hw, desc, pullup, enable); 942 + err = mtk_pinconf_bias_get_pu_pd_rsel(hw, desc, pullup, enable); 946 943 if (!err) 947 - return err; 944 + return 0; 948 945 } 949 946 950 947 if (try_all_type & MTK_PULL_PU_PD_TYPE) { 951 948 err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable); 952 949 if (!err) 953 - return err; 950 + return 0; 954 951 } 955 952 956 953 if (try_all_type & MTK_PULL_PULLSEL_TYPE) { 957 954 err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc, 958 955 pullup, enable); 959 956 if (!err) 960 - return err; 957 + return 0; 961 958 } 962 959 963 960 if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
+4 -1
drivers/pinctrl/pinctrl-at91.c
··· 1403 1403 1404 1404 /* We will handle a range of GPIO pins */ 1405 1405 for (i = 0; i < gpio_banks; i++) 1406 - if (gpio_chips[i]) 1406 + if (gpio_chips[i]) { 1407 1407 pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); 1408 + gpiochip_add_pin_range(&gpio_chips[i]->chip, dev_name(info->pctl->dev), 0, 1409 + gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins); 1410 + } 1408 1411 1409 1412 dev_info(dev, "initialized AT91 pinctrl driver\n"); 1410 1413
+1 -1
drivers/pinctrl/pinctrl-rockchip.c
··· 3795 3795 PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0), 3796 3796 PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0), 3797 3797 PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, 3798 - 0, 3798 + IOMUX_WIDTH_2BIT, 3799 3799 IOMUX_WIDTH_3BIT, 3800 3800 0), 3801 3801 PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
+2
drivers/pinctrl/pinctrl-single.c
··· 345 345 return -ENOTSUPP; 346 346 fselector = setting->func; 347 347 function = pinmux_generic_get_function(pctldev, fselector); 348 + if (!function) 349 + return -EINVAL; 348 350 *func = function->data; 349 351 if (!(*func)) { 350 352 dev_err(pcs->dev, "%s could not find function%i\n",
+19 -16
drivers/pinctrl/qcom/pinctrl-x1e80100.c
··· 1805 1805 [235] = PINGROUP(235, aon_cci, qdss_gpio, _, _, _, _, _, _, _), 1806 1806 [236] = PINGROUP(236, aon_cci, qdss_gpio, _, _, _, _, _, _, _), 1807 1807 [237] = PINGROUP(237, _, _, _, _, _, _, _, _, _), 1808 - [238] = UFS_RESET(ufs_reset, 0x1f9000), 1809 - [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1f2000, 14, 6), 1810 - [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1f2000, 11, 3), 1811 - [241] = SDC_QDSD_PINGROUP(sdc2_data, 0x1f2000, 9, 0), 1808 + [238] = UFS_RESET(ufs_reset, 0xf9000), 1809 + [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0xf2000, 14, 6), 1810 + [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xf2000, 11, 3), 1811 + [241] = SDC_QDSD_PINGROUP(sdc2_data, 0xf2000, 9, 0), 1812 1812 }; 1813 1813 1814 1814 static const struct msm_gpio_wakeirq_map x1e80100_pdc_map[] = { 1815 1815 { 0, 72 }, { 2, 70 }, { 3, 71 }, { 6, 123 }, { 7, 67 }, { 11, 85 }, 1816 - { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 }, { 26, 129 }, 1817 - { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 }, { 32, 145 }, { 33, 115 }, 1818 - { 34, 130 }, { 35, 146 }, { 36, 147 }, { 39, 80 }, { 43, 148 }, { 47, 149 }, 1819 - { 51, 79 }, { 53, 89 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 }, 1820 - { 67, 88 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 }, 1821 - { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 }, { 94, 140 }, 1822 - { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 }, { 129, 76 }, { 131, 82 }, 1823 - { 134, 83 }, { 141, 93 }, { 144, 94 }, { 147, 96 }, { 148, 97 }, { 150, 102 }, 1824 - { 151, 103 }, { 153, 104 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 }, 1825 - { 172, 99 }, { 181, 101 }, { 184, 116 }, { 193, 40 }, { 193, 117 }, { 196, 108 }, 1826 - { 203, 133 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 }, 1827 - { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 }, 1816 + { 13, 86 }, { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 }, 1817 + { 24, 126 }, { 26, 129 }, { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 }, 1818 + { 31, 159 }, { 32, 145 }, { 33, 115 }, { 34, 130 }, { 35, 146 }, { 36, 147 }, 1819 + { 38, 113 }, { 39, 80 }, { 43, 148 }, { 47, 149 }, { 51, 79 }, { 53, 89 }, 1820 + { 55, 81 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 }, { 67, 88 }, 1821 + { 68, 151 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 }, 1822 + { 83, 154 }, { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 }, 1823 + { 93, 139 }, { 94, 140 }, { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 }, 1824 + { 125, 75 }, { 129, 76 }, { 131, 82 }, { 134, 83 }, { 141, 93 }, { 144, 94 }, 1825 + { 145, 95 }, { 147, 96 }, { 148, 97 }, { 150, 102 }, { 151, 103 }, { 153, 104 }, 1826 + { 154, 100 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 }, { 172, 99 }, 1827 + { 175, 114 }, { 181, 101 }, { 184, 116 }, { 193, 117 }, { 196, 108 }, { 203, 133 }, 1828 + { 208, 134 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 }, 1829 + { 219, 119 }, { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 }, 1830 + { 228, 135 }, { 230, 136 }, { 232, 162 }, 1828 1831 }; 1829 1832 1830 1833 static const struct msm_pinctrl_soc_data x1e80100_pinctrl = {
+2 -2
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
··· 793 793 case IRQ_TYPE_LEVEL_HIGH: 794 794 irq_type = 0; /* 0: level triggered */ 795 795 edge_both = 0; /* 0: ignored */ 796 - polarity = mask; /* 1: high level */ 796 + polarity = 0; /* 0: high level */ 797 797 break; 798 798 case IRQ_TYPE_LEVEL_LOW: 799 799 irq_type = 0; /* 0: level triggered */ 800 800 edge_both = 0; /* 0: ignored */ 801 - polarity = 0; /* 0: low level */ 801 + polarity = mask; /* 1: low level */ 802 802 break; 803 803 default: 804 804 return -EINVAL;
+3
drivers/platform/x86/amd/pmc/pmc.c
··· 359 359 dev->smu_msg = 0x538; 360 360 break; 361 361 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 362 + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 362 363 dev->num_ips = 22; 363 364 dev->s2d_msg_id = 0xDE; 364 365 dev->smu_msg = 0x938; ··· 598 597 val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC); 599 598 break; 600 599 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 600 + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 601 601 val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_1AH); 602 602 break; 603 603 default: ··· 632 630 case AMD_CPU_ID_CB: 633 631 case AMD_CPU_ID_PS: 634 632 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 633 + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 635 634 return true; 636 635 default: 637 636 return false;
+19 -1
drivers/platform/x86/asus-nb-wmi.c
··· 145 145 .wmi_ignore_fan = true, 146 146 }; 147 147 148 + static struct quirk_entry quirk_asus_zenbook_duo_kbd = { 149 + .ignore_key_wlan = true, 150 + }; 151 + 148 152 static int dmi_matched(const struct dmi_system_id *dmi) 149 153 { 150 154 pr_info("Identified laptop model '%s'\n", dmi->ident); ··· 520 516 }, 521 517 .driver_data = &quirk_asus_ignore_fan, 522 518 }, 519 + { 520 + .callback = dmi_matched, 521 + .ident = "ASUS Zenbook Duo UX8406MA", 522 + .matches = { 523 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 524 + DMI_MATCH(DMI_PRODUCT_NAME, "UX8406MA"), 525 + }, 526 + .driver_data = &quirk_asus_zenbook_duo_kbd, 527 + }, 523 528 {}, 524 529 }; 525 530 ··· 643 630 case 0x32: /* Volume Mute */ 644 631 if (atkbd_reports_vol_keys) 645 632 *code = ASUS_WMI_KEY_IGNORE; 646 - 633 + break; 634 + case 0x5D: /* Wireless console Toggle */ 635 + case 0x5E: /* Wireless console Enable */ 636 + case 0x5F: /* Wireless console Disable */ 637 + if (quirks->ignore_key_wlan) 638 + *code = ASUS_WMI_KEY_IGNORE; 647 639 break; 648 640 } 649 641 }
+1
drivers/platform/x86/asus-wmi.h
··· 40 40 bool wmi_force_als_set; 41 41 bool wmi_ignore_fan; 42 42 bool filter_i8042_e1_extended_codes; 43 + bool ignore_key_wlan; 43 44 enum asus_wmi_tablet_switch_mode tablet_switch_mode; 44 45 int wapf; 45 46 /*
+1
drivers/platform/x86/dell/Kconfig
··· 161 161 config DELL_UART_BACKLIGHT 162 162 tristate "Dell AIO UART Backlight driver" 163 163 depends on ACPI 164 + depends on ACPI_VIDEO 164 165 depends on BACKLIGHT_CLASS_DEVICE 165 166 depends on SERIAL_DEV_BUS 166 167 help
+8
drivers/platform/x86/dell/dell-uart-backlight.c
··· 20 20 #include <linux/string.h> 21 21 #include <linux/types.h> 22 22 #include <linux/wait.h> 23 + #include <acpi/video.h> 23 24 #include "../serdev_helpers.h" 24 25 25 26 /* The backlight controller must respond within 1 second */ ··· 333 332 334 333 static int dell_uart_bl_pdev_probe(struct platform_device *pdev) 335 334 { 335 + enum acpi_backlight_type bl_type; 336 336 struct serdev_device *serdev; 337 337 struct device *ctrl_dev; 338 338 int ret; 339 + 340 + bl_type = acpi_video_get_backlight_type(); 341 + if (bl_type != acpi_backlight_dell_uart) { 342 + dev_dbg(&pdev->dev, "Not loading (ACPI backlight type = %d)\n", bl_type); 343 + return -ENODEV; 344 + } 339 345 340 346 ctrl_dev = get_serdev_controller("DELL0501", NULL, 0, "serial0"); 341 347 if (IS_ERR(ctrl_dev))
-1
drivers/platform/x86/x86-android-tablets/dmi.c
··· 140 140 /* Lenovo Yoga Tab 3 Pro YT3-X90F */ 141 141 .matches = { 142 142 DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), 143 - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), 144 143 DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), 145 144 }, 146 145 .driver_data = (void *)&lenovo_yt3_info,
+3 -2
drivers/pmdomain/imx/imx93-pd.c
··· 20 20 #define FUNC_STAT_PSW_STAT_MASK BIT(0) 21 21 #define FUNC_STAT_RST_STAT_MASK BIT(2) 22 22 #define FUNC_STAT_ISO_STAT_MASK BIT(4) 23 + #define FUNC_STAT_SSAR_STAT_MASK BIT(8) 23 24 24 25 struct imx93_power_domain { 25 26 struct generic_pm_domain genpd; ··· 51 50 writel(val, addr + MIX_SLICE_SW_CTRL_OFF); 52 51 53 52 ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val, 54 - !(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000); 53 + !(val & FUNC_STAT_SSAR_STAT_MASK), 1, 10000); 55 54 if (ret) { 56 55 dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val); 57 56 return ret; ··· 73 72 writel(val, addr + MIX_SLICE_SW_CTRL_OFF); 74 73 75 74 ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val, 76 - val & FUNC_STAT_PSW_STAT_MASK, 1, 1000); 75 + val & FUNC_STAT_PSW_STAT_MASK, 1, 10000); 77 76 if (ret) { 78 77 dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val); 79 78 return ret;
-5
drivers/pmdomain/imx/scu-pd.c
··· 223 223 { "lvds1-pwm", IMX_SC_R_LVDS_1_PWM_0, 1, false, 0 }, 224 224 { "lvds1-lpi2c", IMX_SC_R_LVDS_1_I2C_0, 2, true, 0 }, 225 225 226 - { "mipi1", IMX_SC_R_MIPI_1, 1, 0 }, 227 - { "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, 0 }, 228 - { "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, 1 }, 229 - { "lvds1", IMX_SC_R_LVDS_1, 1, 0 }, 230 - 231 226 /* DC SS */ 232 227 { "dc0", IMX_SC_R_DC_0, 1, false, 0 }, 233 228 { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+1 -1
drivers/power/sequencing/pwrseq-qcom-wcn.c
··· 283 283 "Failed to get the Bluetooth enable GPIO\n"); 284 284 285 285 ctx->wlan_gpio = devm_gpiod_get_optional(dev, "wlan-enable", 286 - GPIOD_OUT_LOW); 286 + GPIOD_ASIS); 287 287 if (IS_ERR(ctx->wlan_gpio)) 288 288 return dev_err_probe(dev, PTR_ERR(ctx->wlan_gpio), 289 289 "Failed to get the WLAN enable GPIO\n");
+6 -1
drivers/s390/crypto/ap_bus.c
··· 971 971 char *name) 972 972 { 973 973 struct device_driver *drv = &ap_drv->driver; 974 + int rc; 974 975 975 976 drv->bus = &ap_bus_type; 976 977 drv->owner = owner; 977 978 drv->name = name; 978 - return driver_register(drv); 979 + rc = driver_register(drv); 980 + 981 + ap_check_bindings_complete(); 982 + 983 + return rc; 979 984 } 980 985 EXPORT_SYMBOL(ap_driver_register); 981 986
+3
drivers/scsi/sd.c
··· 3308 3308 3309 3309 static unsigned int sd_discard_mode(struct scsi_disk *sdkp) 3310 3310 { 3311 + if (!sdkp->lbpme) 3312 + return SD_LBP_FULL; 3313 + 3311 3314 if (!sdkp->lbpvpd) { 3312 3315 /* LBP VPD page not provided */ 3313 3316 if (sdkp->max_unmap_blocks)
+13 -1
drivers/spi/spi-cadence-quadspi.c
··· 2000 2000 static int cqspi_suspend(struct device *dev) 2001 2001 { 2002 2002 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2003 + int ret; 2003 2004 2004 - return spi_controller_suspend(cqspi->host); 2005 + ret = spi_controller_suspend(cqspi->host); 2006 + if (ret) 2007 + return ret; 2008 + 2009 + return pm_runtime_force_suspend(dev); 2005 2010 } 2006 2011 2007 2012 static int cqspi_resume(struct device *dev) 2008 2013 { 2009 2014 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2015 + int ret; 2016 + 2017 + ret = pm_runtime_force_resume(dev); 2018 + if (ret) { 2019 + dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 2020 + return ret; 2021 + } 2010 2022 2011 2023 return spi_controller_resume(cqspi->host); 2012 2024 }
+29 -2
drivers/spi/spi-fsl-lpspi.c
··· 82 82 #define TCR_RXMSK BIT(19) 83 83 #define TCR_TXMSK BIT(18) 84 84 85 + struct fsl_lpspi_devtype_data { 86 + u8 prescale_max; 87 + }; 88 + 85 89 struct lpspi_config { 86 90 u8 bpw; 87 91 u8 chip_select; ··· 123 119 bool usedma; 124 120 struct completion dma_rx_completion; 125 121 struct completion dma_tx_completion; 122 + 123 + const struct fsl_lpspi_devtype_data *devtype_data; 124 + }; 125 + 126 + /* 127 + * ERR051608 fixed or not: 128 + * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf 129 + */ 130 + static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = { 131 + .prescale_max = 1, 132 + }; 133 + 134 + static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = { 135 + .prescale_max = 8, 126 136 }; 127 137 128 138 static const struct of_device_id fsl_lpspi_dt_ids[] = { 129 - { .compatible = "fsl,imx7ulp-spi", }, 139 + { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,}, 140 + { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,}, 130 141 { /* sentinel */ } 131 142 }; 132 143 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); ··· 316 297 { 317 298 struct lpspi_config config = fsl_lpspi->config; 318 299 unsigned int perclk_rate, scldiv, div; 300 + u8 prescale_max; 319 301 u8 prescale; 320 302 321 303 perclk_rate = clk_get_rate(fsl_lpspi->clk_per); 304 + prescale_max = fsl_lpspi->devtype_data->prescale_max; 322 305 323 306 if (!config.speed_hz) { 324 307 dev_err(fsl_lpspi->dev, ··· 336 315 337 316 div = DIV_ROUND_UP(perclk_rate, config.speed_hz); 338 317 339 - for (prescale = 0; prescale < 8; prescale++) { 318 + for (prescale = 0; prescale < prescale_max; prescale++) { 340 319 scldiv = div / (1 << prescale) - 2; 341 320 if (scldiv < 256) { 342 321 fsl_lpspi->config.prescale = prescale; ··· 843 822 844 823 static int fsl_lpspi_probe(struct platform_device *pdev) 845 824 { 825 + const struct fsl_lpspi_devtype_data *devtype_data; 846 826 struct fsl_lpspi_data *fsl_lpspi; 847 827 struct spi_controller *controller; 848 828 struct resource *res; ··· 851 829 u32 num_cs; 852 830 u32 temp; 853 831 bool is_target; 832 + 833 + devtype_data = of_device_get_match_data(&pdev->dev); 834 + if (!devtype_data) 835 + return -ENODEV; 854 836 855 837 is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave"); 856 838 if (is_target) ··· 874 848 fsl_lpspi->is_target = is_target; 875 849 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node, 876 850 "fsl,spi-only-use-cs1-sel"); 851 + fsl_lpspi->devtype_data = devtype_data; 877 852 878 853 init_completion(&fsl_lpspi->xfer_done); 879 854
+14 -1
drivers/spi/spi-pxa2xx-pci.c
··· 11 11 #include <linux/module.h> 12 12 #include <linux/pci.h> 13 13 #include <linux/pm.h> 14 + #include <linux/pm_runtime.h> 14 15 #include <linux/sprintf.h> 15 16 #include <linux/string.h> 16 17 #include <linux/types.h> ··· 298 297 return ret; 299 298 ssp->irq = pci_irq_vector(dev, 0); 300 299 301 - return pxa2xx_spi_probe(&dev->dev, ssp); 300 + ret = pxa2xx_spi_probe(&dev->dev, ssp, pdata); 301 + if (ret) 302 + return ret; 303 + 304 + pm_runtime_set_autosuspend_delay(&dev->dev, 50); 305 + pm_runtime_use_autosuspend(&dev->dev); 306 + pm_runtime_put_autosuspend(&dev->dev); 307 + pm_runtime_allow(&dev->dev); 308 + 309 + return 0; 302 310 } 303 311 304 312 static void pxa2xx_spi_pci_remove(struct pci_dev *dev) 305 313 { 314 + pm_runtime_forbid(&dev->dev); 315 + pm_runtime_get_noresume(&dev->dev); 316 + 306 317 pxa2xx_spi_remove(&dev->dev); 307 318 } 308 319
+21 -5
drivers/spi/spi-pxa2xx-platform.c
··· 7 7 #include <linux/init.h> 8 8 #include <linux/mod_devicetable.h> 9 9 #include <linux/platform_device.h> 10 + #include <linux/pm_runtime.h> 10 11 #include <linux/property.h> 11 12 #include <linux/types.h> 12 13 ··· 64 63 65 64 ssp = pxa_ssp_request(pdev->id, pdev->name); 66 65 if (!ssp) 67 - return ssp; 66 + return NULL; 68 67 69 68 status = devm_add_action_or_reset(&pdev->dev, pxa2xx_spi_ssp_release, ssp); 70 69 if (status) ··· 143 142 struct pxa2xx_spi_controller *platform_info; 144 143 struct device *dev = &pdev->dev; 145 144 struct ssp_device *ssp; 145 + int ret; 146 146 147 147 platform_info = dev_get_platdata(dev); 148 148 if (!platform_info) { 149 149 platform_info = pxa2xx_spi_init_pdata(pdev); 150 150 if (IS_ERR(platform_info)) 151 151 return dev_err_probe(dev, PTR_ERR(platform_info), "missing platform data\n"); 152 - 153 - dev->platform_data = platform_info; 154 152 } 155 153 156 154 ssp = pxa2xx_spi_ssp_request(pdev); ··· 158 158 if (!ssp) 159 159 ssp = &platform_info->ssp; 160 160 161 - return pxa2xx_spi_probe(dev, ssp); 161 + pm_runtime_set_autosuspend_delay(dev, 50); 162 + pm_runtime_use_autosuspend(dev); 163 + pm_runtime_set_active(dev); 164 + pm_runtime_enable(dev); 165 + 166 + ret = pxa2xx_spi_probe(dev, ssp, platform_info); 167 + if (ret) 168 + pm_runtime_disable(dev); 169 + 170 + return ret; 162 171 } 163 172 164 173 static void pxa2xx_spi_platform_remove(struct platform_device *pdev) 165 174 { 166 - pxa2xx_spi_remove(&pdev->dev); 175 + struct device *dev = &pdev->dev; 176 + 177 + pm_runtime_get_sync(dev); 178 + 179 + pxa2xx_spi_remove(dev); 180 + 181 + pm_runtime_put_noidle(dev); 182 + pm_runtime_disable(dev); 167 183 } 168 184 169 185 static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+3 -17
drivers/spi/spi-pxa2xx.c
··· 1277 1277 return MAX_DMA_LEN; 1278 1278 } 1279 1279 1280 - int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp) 1280 + int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp, 1281 + struct pxa2xx_spi_controller *platform_info) 1281 1282 { 1282 - struct pxa2xx_spi_controller *platform_info; 1283 1283 struct spi_controller *controller; 1284 1284 struct driver_data *drv_data; 1285 1285 const struct lpss_config *config; 1286 1286 int status; 1287 1287 u32 tmp; 1288 1288 1289 - platform_info = dev_get_platdata(dev); 1290 1289 if (platform_info->is_target) 1291 1290 controller = devm_spi_alloc_target(dev, sizeof(*drv_data)); 1292 1291 else ··· 1449 1450 } 1450 1451 } 1451 1452 1452 - pm_runtime_set_autosuspend_delay(dev, 50); 1453 - pm_runtime_use_autosuspend(dev); 1454 - pm_runtime_set_active(dev); 1455 - pm_runtime_enable(dev); 1456 - 1457 1453 /* Register with the SPI framework */ 1458 1454 dev_set_drvdata(dev, drv_data); 1459 1455 status = spi_register_controller(controller); 1460 1456 if (status) { 1461 1457 dev_err_probe(dev, status, "problem registering SPI controller\n"); 1462 - goto out_error_pm_runtime_enabled; 1458 + goto out_error_clock_enabled; 1463 1459 } 1464 1460 1465 1461 return status; 1466 - 1467 - out_error_pm_runtime_enabled: 1468 - pm_runtime_disable(dev); 1469 1462 1470 1463 out_error_clock_enabled: 1471 1464 clk_disable_unprepare(ssp->clk); ··· 1475 1484 struct driver_data *drv_data = dev_get_drvdata(dev); 1476 1485 struct ssp_device *ssp = drv_data->ssp; 1477 1486 1478 - pm_runtime_get_sync(dev); 1479 - 1480 1487 spi_unregister_controller(drv_data->controller); 1481 1488 1482 1489 /* Disable the SSP at the peripheral and SOC level */ ··· 1484 1495 /* Release DMA */ 1485 1496 if (drv_data->controller_info->enable_dma) 1486 1497 pxa2xx_spi_dma_release(drv_data); 1487 - 1488 - pm_runtime_put_noidle(dev); 1489 - pm_runtime_disable(dev); 1490 1498 1491 1499 /* Release IRQ */ 1492 1500 free_irq(ssp->irq, drv_data);
+2 -1
drivers/spi/spi-pxa2xx.h
··· 132 132 extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data); 133 133 extern void pxa2xx_spi_dma_release(struct driver_data *drv_data); 134 134 135 - int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp); 135 + int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp, 136 + struct pxa2xx_spi_controller *platform_info); 136 137 void pxa2xx_spi_remove(struct device *dev); 137 138 138 139 extern const struct dev_pm_ops pxa2xx_spi_pm_ops;
+24 -6
drivers/spi/spi-zynqmp-gqspi.c
··· 1033 1033 return 0; 1034 1034 } 1035 1035 1036 + static unsigned long zynqmp_qspi_timeout(struct zynqmp_qspi *xqspi, u8 bits, 1037 + unsigned long bytes) 1038 + { 1039 + unsigned long timeout; 1040 + 1041 + /* Assume we are at most 2x slower than the nominal bus speed */ 1042 + timeout = mult_frac(bytes, 2 * 8 * MSEC_PER_SEC, 1043 + bits * xqspi->speed_hz); 1044 + /* And add 100 ms for scheduling delays */ 1045 + return msecs_to_jiffies(timeout + 100); 1046 + } 1047 + 1036 1048 /** 1037 1049 * zynqmp_qspi_exec_op() - Initiates the QSPI transfer 1038 1050 * @mem: The SPI memory ··· 1061 1049 { 1062 1050 struct zynqmp_qspi *xqspi = spi_controller_get_devdata 1063 1051 (mem->spi->controller); 1052 + unsigned long timeout; 1064 1053 int err = 0, i; 1065 1054 u32 genfifoentry = 0; 1066 1055 u16 opcode = op->cmd.opcode; ··· 1090 1077 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST, 1091 1078 GQSPI_IER_GENFIFOEMPTY_MASK | 1092 1079 GQSPI_IER_TXNOT_FULL_MASK); 1093 - if (!wait_for_completion_timeout 1094 - (&xqspi->data_completion, msecs_to_jiffies(1000))) { 1080 + timeout = zynqmp_qspi_timeout(xqspi, op->cmd.buswidth, 1081 + op->cmd.nbytes); 1082 + if (!wait_for_completion_timeout(&xqspi->data_completion, 1083 + timeout)) { 1095 1084 err = -ETIMEDOUT; 1096 1085 goto return_err; 1097 1086 } ··· 1119 1104 GQSPI_IER_TXEMPTY_MASK | 1120 1105 GQSPI_IER_GENFIFOEMPTY_MASK | 1121 1106 GQSPI_IER_TXNOT_FULL_MASK); 1122 - if (!wait_for_completion_timeout 1123 - (&xqspi->data_completion, msecs_to_jiffies(1000))) { 1107 + timeout = zynqmp_qspi_timeout(xqspi, op->addr.buswidth, 1108 + op->addr.nbytes); 1109 + if (!wait_for_completion_timeout(&xqspi->data_completion, 1110 + timeout)) { 1124 1111 err = -ETIMEDOUT; 1125 1112 goto return_err; 1126 1113 } ··· 1190 1173 GQSPI_IER_RXEMPTY_MASK); 1191 1174 } 1192 1175 } 1193 - if (!wait_for_completion_timeout 1194 - (&xqspi->data_completion, msecs_to_jiffies(1000))) 1176 + timeout = zynqmp_qspi_timeout(xqspi, op->data.buswidth, 1177 + op->data.nbytes); 1178 + if (!wait_for_completion_timeout(&xqspi->data_completion, timeout)) 1195 1179 err = -ETIMEDOUT; 1196 1180 } 1197 1181
+3 -3
drivers/thermal/thermal_debugfs.c
··· 178 178 void thermal_debug_init(void) 179 179 { 180 180 d_root = debugfs_create_dir("thermal", NULL); 181 - if (!d_root) 181 + if (IS_ERR(d_root)) 182 182 return; 183 183 184 184 d_cdev = debugfs_create_dir("cooling_devices", d_root); 185 - if (!d_cdev) 185 + if (IS_ERR(d_cdev)) 186 186 return; 187 187 188 188 d_tz = debugfs_create_dir("thermal_zones", d_root); ··· 202 202 snprintf(ids, IDSLENGTH, "%d", id); 203 203 204 204 thermal_dbg->d_top = debugfs_create_dir(ids, d); 205 - if (!thermal_dbg->d_top) { 205 + if (IS_ERR(thermal_dbg->d_top)) { 206 206 kfree(thermal_dbg); 207 207 return NULL; 208 208 }
+14 -9
drivers/thermal/thermal_of.c
··· 125 125 static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips) 126 126 { 127 127 struct thermal_trip *tt; 128 - struct device_node *trips, *trip; 128 + struct device_node *trips; 129 129 int ret, count; 130 130 131 131 trips = of_get_child_by_name(np, "trips"); ··· 150 150 *ntrips = count; 151 151 152 152 count = 0; 153 - for_each_child_of_node(trips, trip) { 153 + for_each_child_of_node_scoped(trips, trip) { 154 154 ret = thermal_of_populate_trip(trip, &tt[count++]); 155 155 if (ret) 156 156 goto out_kfree; ··· 184 184 * Search for each thermal zone, a defined sensor 185 185 * corresponding to the one passed as parameter 186 186 */ 187 - for_each_available_child_of_node(np, tz) { 187 + for_each_available_child_of_node_scoped(np, child) { 188 188 189 189 int count, i; 190 190 191 - count = of_count_phandle_with_args(tz, "thermal-sensors", 191 + count = of_count_phandle_with_args(child, "thermal-sensors", 192 192 "#thermal-sensor-cells"); 193 193 if (count <= 0) { 194 - pr_err("%pOFn: missing thermal sensor\n", tz); 194 + pr_err("%pOFn: missing thermal sensor\n", child); 195 195 tz = ERR_PTR(-EINVAL); 196 196 goto out; 197 197 } ··· 200 200 201 201 int ret; 202 202 203 - ret = of_parse_phandle_with_args(tz, "thermal-sensors", 203 + ret = of_parse_phandle_with_args(child, "thermal-sensors", 204 204 "#thermal-sensor-cells", 205 205 i, &sensor_specs); 206 206 if (ret < 0) { 207 - pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret); 207 + pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", child, ret); 208 208 tz = ERR_PTR(ret); 209 209 goto out; 210 210 } 211 211 212 212 if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ? 213 213 sensor_specs.args[0] : 0)) { 214 - pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz); 214 + pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child); 215 + tz = no_free_ptr(child); 215 216 goto out; 216 217 } 217 218 } ··· 492 491 trips = thermal_of_trips_init(np, &ntrips); 493 492 if (IS_ERR(trips)) { 494 493 pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id); 495 - return ERR_CAST(trips); 494 + ret = PTR_ERR(trips); 495 + goto out_of_node_put; 496 496 } 497 497 498 498 ret = thermal_of_monitor_init(np, &delay, &pdelay); ··· 521 519 goto out_kfree_trips; 522 520 } 523 521 522 + of_node_put(np); 524 523 kfree(trips); 525 524 526 525 ret = thermal_zone_device_enable(tz); ··· 536 533 537 534 out_kfree_trips: 538 535 kfree(trips); 536 + out_of_node_put: 537 + of_node_put(np); 539 538 540 539 return ERR_PTR(ret); 541 540 }
+5 -1
drivers/ufs/core/ufshcd.c
··· 2426 2426 * 0h: legacy single doorbell support is available 2427 2427 * 1h: indicate that legacy single doorbell support has been removed 2428 2428 */ 2429 - hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); 2429 + if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP)) 2430 + hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); 2431 + else 2432 + hba->lsdb_sup = true; 2433 + 2430 2434 if (!hba->mcq_sup) 2431 2435 return 0; 2432 2436
+5 -1
drivers/ufs/host/ufs-qcom.c
··· 857 857 858 858 if (host->hw_ver.major > 0x3) 859 859 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; 860 + 861 + if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc")) 862 + hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP; 860 863 } 861 864 862 865 static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) ··· 1850 1847 } 1851 1848 1852 1849 static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { 1853 - { .compatible = "qcom,ufshc"}, 1850 + { .compatible = "qcom,ufshc" }, 1851 + { .compatible = "qcom,sm8550-ufshc" }, 1854 1852 {}, 1855 1853 }; 1856 1854 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
+8 -3
fs/afs/inode.c
··· 695 695 { 696 696 struct afs_vnode_param *vp = &op->file[0]; 697 697 struct afs_vnode *vnode = vp->vnode; 698 + struct inode *inode = &vnode->netfs.inode; 698 699 699 700 if (op->setattr.attr->ia_valid & ATTR_SIZE) { 700 701 loff_t size = op->setattr.attr->ia_size; 701 - loff_t i_size = op->setattr.old_i_size; 702 + loff_t old = op->setattr.old_i_size; 702 703 703 - if (size != i_size) { 704 - truncate_setsize(&vnode->netfs.inode, size); 704 + /* Note: inode->i_size was updated by afs_apply_status() inside 705 + * the I/O and callback locks. 706 + */ 707 + 708 + if (size != old) { 709 + truncate_pagecache(inode, size); 705 710 netfs_resize_file(&vnode->netfs, size, true); 706 711 fscache_resize_cookie(afs_vnode_cache(vnode), size); 707 712 }
+11 -3
fs/attr.c
··· 487 487 error = security_inode_setattr(idmap, dentry, attr); 488 488 if (error) 489 489 return error; 490 - error = try_break_deleg(inode, delegated_inode); 491 - if (error) 492 - return error; 490 + 491 + /* 492 + * If ATTR_DELEG is set, then these attributes are being set on 493 + * behalf of the holder of a write delegation. We want to avoid 494 + * breaking the delegation in this case. 495 + */ 496 + if (!(ia_valid & ATTR_DELEG)) { 497 + error = try_break_deleg(inode, delegated_inode); 498 + if (error) 499 + return error; 500 + } 493 501 494 502 if (inode->i_op->setattr) 495 503 error = inode->i_op->setattr(idmap, dentry, attr);
+4 -1
fs/backing-file.c
··· 303 303 if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING))) 304 304 return -EIO; 305 305 306 + if (!out->f_op->splice_write) 307 + return -EINVAL; 308 + 306 309 ret = file_remove_privs(ctx->user_file); 307 310 if (ret) 308 311 return ret; 309 312 310 313 old_cred = override_creds(ctx->cred); 311 314 file_start_write(out); 312 - ret = iter_file_splice_write(pipe, out, ppos, len, flags); 315 + ret = out->f_op->splice_write(pipe, out, ppos, len, flags); 313 316 file_end_write(out); 314 317 revert_creds(old_cred); 315 318
+34 -32
fs/bcachefs/alloc_background.c
··· 240 240 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, 241 241 enum bch_validate_flags flags) 242 242 { 243 - struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); 243 + struct bch_alloc_v4 a; 244 244 int ret = 0; 245 245 246 - bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k), 246 + bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k)); 247 + 248 + bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k), 247 249 c, alloc_v4_val_size_bad, 248 250 "bad val size (%u > %zu)", 249 - alloc_v4_u64s_noerror(a.v), bkey_val_u64s(k.k)); 251 + alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k)); 250 252 251 - bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) && 252 - BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), 253 + bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) && 254 + BCH_ALLOC_V4_NR_BACKPOINTERS(&a), 253 255 c, alloc_v4_backpointers_start_bad, 254 256 "invalid backpointers_start"); 255 257 256 - bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, 258 + bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type, 257 259 c, alloc_key_data_type_bad, 258 260 "invalid data type (got %u should be %u)", 259 - a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); 261 + a.data_type, alloc_data_type(a, a.data_type)); 260 262 261 263 for (unsigned i = 0; i < 2; i++) 262 - bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX, 264 + bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX, 263 265 c, alloc_key_io_time_bad, 264 266 "invalid io_time[%s]: %llu, max %llu", 265 267 i == READ ? "read" : "write", 266 - a.v->io_time[i], LRU_TIME_MAX); 268 + a.io_time[i], LRU_TIME_MAX); 267 269 268 - unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(a.v) * sizeof(u64) > 270 + unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) > 269 271 offsetof(struct bch_alloc_v4, stripe_sectors) 270 - ? a.v->stripe_sectors 272 + ? a.stripe_sectors 271 273 : 0; 272 274 273 - switch (a.v->data_type) { 275 + switch (a.data_type) { 274 276 case BCH_DATA_free: 275 277 case BCH_DATA_need_gc_gens: 276 278 case BCH_DATA_need_discard: 277 279 bkey_fsck_err_on(stripe_sectors || 278 - a.v->dirty_sectors || 279 - a.v->cached_sectors || 280 - a.v->stripe, 280 + a.dirty_sectors || 281 + a.cached_sectors || 282 + a.stripe, 281 283 c, alloc_key_empty_but_have_data, 282 284 "empty data type free but have data %u.%u.%u %u", 283 285 stripe_sectors, 284 - a.v->dirty_sectors, 285 - a.v->cached_sectors, 286 - a.v->stripe); 286 + a.dirty_sectors, 287 + a.cached_sectors, 288 + a.stripe); 287 289 break; 288 290 case BCH_DATA_sb: 289 291 case BCH_DATA_journal: 290 292 case BCH_DATA_btree: 291 293 case BCH_DATA_user: 292 294 case BCH_DATA_parity: 293 - bkey_fsck_err_on(!a.v->dirty_sectors && 295 + bkey_fsck_err_on(!a.dirty_sectors && 294 296 !stripe_sectors, 295 297 c, alloc_key_dirty_sectors_0, 296 298 "data_type %s but dirty_sectors==0", 297 - bch2_data_type_str(a.v->data_type)); 299 + bch2_data_type_str(a.data_type)); 298 300 break; 299 301 case BCH_DATA_cached: 300 - bkey_fsck_err_on(!a.v->cached_sectors || 301 - a.v->dirty_sectors || 302 + bkey_fsck_err_on(!a.cached_sectors || 303 + a.dirty_sectors || 302 304 stripe_sectors || 303 - a.v->stripe, 305 + a.stripe, 304 306 c, alloc_key_cached_inconsistency, 305 307 "data type inconsistency"); 306 308 307 - bkey_fsck_err_on(!a.v->io_time[READ] && 309 + bkey_fsck_err_on(!a.io_time[READ] && 308 310 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, 309 311 c, alloc_key_cached_but_read_time_zero, 310 312 "cached bucket with read_time == 0"); ··· 558 556 struct bpos pos = alloc_gens_pos(iter.pos, &offset); 559 557 int ret2 = 0; 560 558 561 - if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) { 559 + if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) { 562 560 ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?: 563 561 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 564 562 if (ret2) ··· 831 829 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { 832 830 new_a = bkey_s_to_alloc_v4(new).v; 833 831 } else { 834 - BUG_ON(!(flags & BTREE_TRIGGER_gc)); 832 + BUG_ON(!(flags & (BTREE_TRIGGER_gc|BTREE_TRIGGER_check_repair))); 835 833 836 834 struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c); 837 835 ret = PTR_ERR_OR_ZERO(new_ka); ··· 1874 1872 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, 1875 1873 bch2_err_str(ret)); 1876 1874 1877 - bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1878 1875 percpu_ref_put(&ca->io_ref); 1876 + bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1879 1877 } 1880 1878 1881 1879 void bch2_dev_do_discards(struct bch_dev *ca) 1882 1880 { 1883 1881 struct bch_fs *c = ca->fs; 1884 1882 1885 - if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 1883 + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) 1886 1884 return; 1887 1885 1888 - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) 1889 - goto put_ioref; 1886 + if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 1887 + goto put_write_ref; 1890 1888 1891 1889 if (queue_work(c->write_ref_wq, &ca->discard_work)) 1892 1890 return; 1893 1891 1894 - bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1895 - put_ioref: 1896 1892 percpu_ref_put(&ca->io_ref); 1893 + put_write_ref: 1894 + bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1897 1895 } 1898 1896 1899 1897 void bch2_do_discards(struct bch_fs *c)
+1
fs/bcachefs/alloc_background_format.h
··· 69 69 __u64 io_time[2]; 70 70 __u32 stripe; 71 71 __u32 nr_external_backpointers; 72 + /* end of fields in original version of alloc_v4 */ 72 73 __u64 fragmentation_lru; 73 74 __u32 stripe_sectors; 74 75 __u32 pad;
+2 -1
fs/bcachefs/bcachefs_format.h
··· 677 677 x(bucket_stripe_sectors, BCH_VERSION(1, 8)) \ 678 678 x(disk_accounting_v2, BCH_VERSION(1, 9)) \ 679 679 x(disk_accounting_v3, BCH_VERSION(1, 10)) \ 680 - x(disk_accounting_inum, BCH_VERSION(1, 11)) 680 + x(disk_accounting_inum, BCH_VERSION(1, 11)) \ 681 + x(rebalance_work_acct_fix, BCH_VERSION(1, 12)) 681 682 682 683 enum bcachefs_metadata_version { 683 684 bcachefs_metadata_version_min = 9,
+25
fs/bcachefs/btree_cache.c
··· 159 159 return b; 160 160 } 161 161 162 + void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) 163 + { 164 + mutex_lock(&c->btree_cache.lock); 165 + list_move(&b->list, &c->btree_cache.freeable); 166 + mutex_unlock(&c->btree_cache.lock); 167 + 168 + six_unlock_write(&b->c.lock); 169 + six_unlock_intent(&b->c.lock); 170 + } 171 + 162 172 /* Btree in memory cache - hash table */ 163 173 164 174 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) ··· 746 736 start_time); 747 737 748 738 memalloc_nofs_restore(flags); 739 + 740 + int ret = bch2_trans_relock(trans); 741 + if (unlikely(ret)) { 742 + bch2_btree_node_to_freelist(c, b); 743 + return ERR_PTR(ret); 744 + } 745 + 749 746 return b; 750 747 err: 751 748 mutex_lock(&bc->lock); ··· 873 856 874 857 bch2_btree_node_read(trans, b, sync); 875 858 859 + int ret = bch2_trans_relock(trans); 860 + if (ret) 861 + return ERR_PTR(ret); 862 + 876 863 if (!sync) 877 864 return NULL; 878 865 ··· 994 973 need_relock = true; 995 974 996 975 bch2_btree_node_wait_on_read(b); 976 + 977 + ret = bch2_trans_relock(trans); 978 + if (ret) 979 + return ERR_PTR(ret); 997 980 998 981 /* 999 982 * should_be_locked is not set on this path yet, so we need to
+2
fs/bcachefs/btree_cache.h
··· 12 12 13 13 void bch2_recalc_btree_reserve(struct bch_fs *); 14 14 15 + void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *); 16 + 15 17 void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); 16 18 int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *); 17 19 int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
+9
fs/bcachefs/btree_iter.h
··· 569 569 bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \ 570 570 _btree_id, _pos, _flags, KEY_TYPE_##_type)) 571 571 572 + #define bkey_val_copy(_dst_v, _src_k) \ 573 + do { \ 574 + unsigned b = min_t(unsigned, sizeof(*_dst_v), \ 575 + bkey_val_bytes(_src_k.k)); \ 576 + memcpy(_dst_v, _src_k.v, b); \ 577 + if (b < sizeof(*_dst_v)) \ 578 + memset((void *) (_dst_v) + b, 0, sizeof(*_dst_v) - b); \ 579 + } while (0) 580 + 572 581 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, 573 582 unsigned btree_id, struct bpos pos, 574 583 unsigned flags, unsigned type,
+28 -3
fs/bcachefs/btree_key_cache.c
··· 726 726 727 727 mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED); 728 728 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); 729 + path->should_be_locked = false; 729 730 } 730 731 731 732 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, ··· 778 777 779 778 rcu_read_lock(); 780 779 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); 780 + 781 + /* 782 + * Scanning is expensive while a rehash is in progress - most elements 783 + * will be on the new hashtable, if it's in progress 784 + * 785 + * A rehash could still start while we're scanning - that's ok, we'll 786 + * still see most elements. 787 + */ 788 + if (unlikely(tbl->nest)) { 789 + rcu_read_unlock(); 790 + srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); 791 + return SHRINK_STOP; 792 + } 793 + 781 794 if (bc->shrink_iter >= tbl->size) 782 795 bc->shrink_iter = 0; 783 796 start = bc->shrink_iter; ··· 799 784 do { 800 785 struct rhash_head *pos, *next; 801 786 802 - pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter)); 787 + pos = rht_ptr_rcu(&tbl->buckets[bc->shrink_iter]); 803 788 804 789 while (!rht_is_a_nulls(pos)) { 805 790 next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter); ··· 880 865 while (atomic_long_read(&bc->nr_keys)) { 881 866 rcu_read_lock(); 882 867 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); 883 - if (tbl) 868 + if (tbl) { 869 + if (tbl->nest) { 870 + /* wait for in progress rehash */ 871 + rcu_read_unlock(); 872 + mutex_lock(&bc->table.mutex); 873 + mutex_unlock(&bc->table.mutex); 874 + rcu_read_lock(); 875 + continue; 876 + } 884 877 for (i = 0; i < tbl->size; i++) 885 - rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { 878 + while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) { 879 + ck = container_of(pos, struct bkey_cached, hash); 886 880 bkey_cached_evict(bc, ck); 887 881 list_add(&ck->list, &items); 888 882 } 883 + } 889 884 rcu_read_unlock(); 890 885 } 891 886
+25 -21
fs/bcachefs/btree_update_interior.c
··· 317 317 : 0; 318 318 int ret; 319 319 320 + b = bch2_btree_node_mem_alloc(trans, interior_node); 321 + if (IS_ERR(b)) 322 + return b; 323 + 324 + BUG_ON(b->ob.nr); 325 + 320 326 mutex_lock(&c->btree_reserve_cache_lock); 321 327 if (c->btree_reserve_cache_nr > nr_reserve) { 322 328 struct btree_alloc *a = ··· 331 325 obs = a->ob; 332 326 bkey_copy(&tmp.k, &a->k); 333 327 mutex_unlock(&c->btree_reserve_cache_lock); 334 - goto mem_alloc; 328 + goto out; 335 329 } 336 330 mutex_unlock(&c->btree_reserve_cache_lock); 337 - 338 331 retry: 339 332 ret = bch2_alloc_sectors_start_trans(trans, 340 333 c->opts.metadata_target ?: ··· 346 341 c->opts.metadata_replicas_required), 347 342 watermark, 0, cl, &wp); 348 343 if (unlikely(ret)) 349 - return ERR_PTR(ret); 344 + goto err; 350 345 351 346 if (wp->sectors_free < btree_sectors(c)) { 352 347 struct open_bucket *ob; ··· 365 360 366 361 bch2_open_bucket_get(c, wp, &obs); 367 362 bch2_alloc_sectors_done(c, wp); 368 - mem_alloc: 369 - b = bch2_btree_node_mem_alloc(trans, interior_node); 363 + out: 364 + bkey_copy(&b->key, &tmp.k); 365 + b->ob = obs; 370 366 six_unlock_write(&b->c.lock); 371 367 six_unlock_intent(&b->c.lock); 372 368 373 - /* we hold cannibalize_lock: */ 374 - BUG_ON(IS_ERR(b)); 375 - BUG_ON(b->ob.nr); 376 - 377 - bkey_copy(&b->key, &tmp.k); 378 - b->ob = obs; 379 - 380 369 return b; 370 + err: 371 + bch2_btree_node_to_freelist(c, b); 372 + return ERR_PTR(ret); 381 373 } 382 374 383 375 static struct btree *bch2_btree_node_alloc(struct btree_update *as, ··· 2441 2439 } 2442 2440 2443 2441 new_hash = bch2_btree_node_mem_alloc(trans, false); 2442 + ret = PTR_ERR_OR_ZERO(new_hash); 2443 + if (ret) 2444 + goto err; 2444 2445 } 2445 2446 2446 2447 path->intent_ref++; ··· 2451 2446 commit_flags, skip_triggers); 2452 2447 --path->intent_ref; 2453 2448 2454 - if (new_hash) { 2455 - mutex_lock(&c->btree_cache.lock); 2456 - list_move(&new_hash->list, &c->btree_cache.freeable); 2457 - mutex_unlock(&c->btree_cache.lock); 2458 - 2459 - six_unlock_write(&new_hash->c.lock); 2460 - six_unlock_intent(&new_hash->c.lock); 2461 - } 2449 + if (new_hash) 2450 + bch2_btree_node_to_freelist(c, new_hash); 2451 + err: 2462 2452 closure_sync(&cl); 2463 2453 bch2_btree_cache_cannibalize_unlock(trans); 2464 2454 return ret; ··· 2522 2522 b = bch2_btree_node_mem_alloc(trans, false); 2523 2523 bch2_btree_cache_cannibalize_unlock(trans); 2524 2524 2525 + ret = PTR_ERR_OR_ZERO(b); 2526 + if (ret) 2527 + return ret; 2528 + 2525 2529 set_btree_node_fake(b); 2526 2530 set_btree_node_need_rewrite(b); 2527 2531 b->c.level = level; ··· 2557 2553 2558 2554 void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level) 2559 2555 { 2560 - bch2_trans_run(c, bch2_btree_root_alloc_fake_trans(trans, id, level)); 2556 + bch2_trans_run(c, lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level))); 2561 2557 } 2562 2558 2563 2559 static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
+49 -25
fs/bcachefs/buckets.c
··· 699 699 static int __trigger_extent(struct btree_trans *trans, 700 700 enum btree_id btree_id, unsigned level, 701 701 struct bkey_s_c k, 702 - enum btree_iter_update_trigger_flags flags) 702 + enum btree_iter_update_trigger_flags flags, 703 + s64 *replicas_sectors) 703 704 { 704 705 bool gc = flags & BTREE_TRIGGER_gc; 705 706 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); ··· 709 708 enum bch_data_type data_type = bkey_is_btree_ptr(k.k) 710 709 ? BCH_DATA_btree 711 710 : BCH_DATA_user; 712 - s64 replicas_sectors = 0; 713 711 int ret = 0; 714 712 715 713 struct disk_accounting_pos acc_replicas_key = { ··· 739 739 if (ret) 740 740 return ret; 741 741 } else if (!p.has_ec) { 742 - replicas_sectors += disk_sectors; 742 + *replicas_sectors += disk_sectors; 743 743 acc_replicas_key.replicas.devs[acc_replicas_key.replicas.nr_devs++] = p.ptr.dev; 744 744 } else { 745 745 ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags); ··· 777 777 } 778 778 779 779 if (acc_replicas_key.replicas.nr_devs) { 780 - ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, &replicas_sectors, 1, gc); 780 + ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc); 781 781 if (ret) 782 782 return ret; 783 783 } ··· 787 787 .type = BCH_DISK_ACCOUNTING_snapshot, 788 788 .snapshot.id = k.k->p.snapshot, 789 789 }; 790 - ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, &replicas_sectors, 1, gc); 790 + ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, replicas_sectors, 1, gc); 791 791 if (ret) 792 792 return ret; 793 793 } ··· 807 807 .type = BCH_DISK_ACCOUNTING_btree, 808 808 .btree.id = btree_id, 809 809 }; 810 - ret = bch2_disk_accounting_mod(trans, &acc_btree_key, &replicas_sectors, 1, gc); 810 + ret = bch2_disk_accounting_mod(trans, &acc_btree_key, replicas_sectors, 1, gc); 811 811 if (ret) 812 812 return ret; 813 813 } else { ··· 819 819 s64 v[3] = { 820 820 insert ? 1 : -1, 821 821 insert ? k.k->size : -((s64) k.k->size), 822 - replicas_sectors, 822 + *replicas_sectors, 823 823 }; 824 824 ret = bch2_disk_accounting_mod(trans, &acc_inum_key, v, ARRAY_SIZE(v), gc); 825 - if (ret) 826 - return ret; 827 - } 828 - 829 - if (bch2_bkey_rebalance_opts(k)) { 830 - struct disk_accounting_pos acc = { 831 - .type = BCH_DISK_ACCOUNTING_rebalance_work, 832 - }; 833 - ret = bch2_disk_accounting_mod(trans, &acc, &replicas_sectors, 1, gc); 834 825 if (ret) 835 826 return ret; 836 827 } ··· 834 843 struct bkey_s_c old, struct bkey_s new, 835 844 enum btree_iter_update_trigger_flags flags) 836 845 { 846 + struct bch_fs *c = trans->c; 837 847 struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c); 838 848 struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old); 839 849 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start; ··· 850 858 new_ptrs_bytes)) 851 859 return 0; 852 860 853 - if (flags & BTREE_TRIGGER_transactional) { 854 - struct bch_fs *c = trans->c; 855 - int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) - 856 - (int) bch2_bkey_needs_rebalance(c, old); 861 + if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { 862 + s64 old_replicas_sectors = 0, new_replicas_sectors = 0; 857 863 858 - if (mod) { 864 + if (old.k->type) { 865 + int ret = __trigger_extent(trans, btree, level, old, 866 + flags & ~BTREE_TRIGGER_insert, 867 + &old_replicas_sectors); 868 + if (ret) 869 + return ret; 870 + } 871 + 872 + if (new.k->type) { 873 + int ret = __trigger_extent(trans, btree, level, new.s_c, 874 + flags & ~BTREE_TRIGGER_overwrite, 875 + &new_replicas_sectors); 876 + if (ret) 877 + return ret; 878 + } 879 + 880 + int need_rebalance_delta = 0; 881 + s64 need_rebalance_sectors_delta = 0; 882 + 883 + s64 s = bch2_bkey_sectors_need_rebalance(c, old); 884 + need_rebalance_delta -= s != 0; 885 + need_rebalance_sectors_delta -= s; 886 + 887 + s = bch2_bkey_sectors_need_rebalance(c, old); 888 + need_rebalance_delta += s != 0; 889 + need_rebalance_sectors_delta += s; 890 + 891 + if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) { 859 892 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, 860 - new.k->p, mod > 0); 893 + new.k->p, need_rebalance_delta > 0); 894 + if (ret) 895 + return ret; 896 + } 897 + 898 + if (need_rebalance_sectors_delta) { 899 + struct disk_accounting_pos acc = { 900 + .type = BCH_DISK_ACCOUNTING_rebalance_work, 901 + }; 902 + int ret = bch2_disk_accounting_mod(trans, &acc, &need_rebalance_sectors_delta, 1, 903 + flags & BTREE_TRIGGER_gc); 861 904 if (ret) 862 905 return ret; 863 906 } 864 907 } 865 - 866 - if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) 867 - return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree, level, old, new, flags); 868 908 869 909 return 0; 870 910 }
+3 -1
fs/bcachefs/buckets_waiting_for_journal.c
··· 107 107 nr_elements += t->d[i].journal_seq > flushed_seq; 108 108 109 109 new_bits = ilog2(roundup_pow_of_two(nr_elements * 3)); 110 - 110 + realloc: 111 111 n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL); 112 112 if (!n) { 113 113 ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set; ··· 118 118 if (nr_rehashes_this_size == 3) { 119 119 new_bits++; 120 120 nr_rehashes_this_size = 0; 121 + kvfree(n); 122 + goto realloc; 121 123 } 122 124 123 125 nr_rehashes++;
+115 -94
fs/bcachefs/data_update.c
··· 20 20 #include "subvolume.h" 21 21 #include "trace.h" 22 22 23 + static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k) 24 + { 25 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 26 + 27 + bkey_for_each_ptr(ptrs, ptr) 28 + bch2_dev_put(bch2_dev_have_ref(c, ptr->dev)); 29 + } 30 + 31 + static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k) 32 + { 33 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 34 + 35 + bkey_for_each_ptr(ptrs, ptr) { 36 + if (!bch2_dev_tryget(c, ptr->dev)) { 37 + bkey_for_each_ptr(ptrs, ptr2) { 38 + if (ptr2 == ptr) 39 + break; 40 + bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev)); 41 + } 42 + return false; 43 + } 44 + } 45 + return true; 46 + } 47 + 48 + static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k) 49 + { 50 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 51 + 52 + bkey_for_each_ptr(ptrs, ptr) { 53 + struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); 54 + struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 55 + 56 + bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0); 57 + } 58 + } 59 + 60 + static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k) 61 + { 62 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 63 + 64 + bkey_for_each_ptr(ptrs, ptr) { 65 + struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); 66 + struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 67 + 68 + if (ctxt) { 69 + bool locked; 70 + 71 + move_ctxt_wait_event(ctxt, 72 + (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) || 73 + list_empty(&ctxt->ios)); 74 + 75 + if (!locked) 76 + bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0); 77 + } else { 78 + if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) { 79 + bkey_for_each_ptr(ptrs, ptr2) { 80 + if (ptr2 == ptr) 81 + break; 82 + 83 + bucket = PTR_BUCKET_POS(ca, ptr2); 84 + bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0); 85 + } 86 + return false; 87 + } 88 + } 89 + } 90 + return true; 91 + } 92 + 23 93 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k) 24 94 { 25 95 if (trace_move_extent_finish_enabled()) { ··· 425 355 void bch2_data_update_exit(struct data_update *update) 426 356 { 427 357 struct bch_fs *c = update->op.c; 428 - struct bkey_ptrs_c ptrs = 429 - bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k)); 358 + struct bkey_s_c k = bkey_i_to_s_c(update->k.k); 430 359 431 - bkey_for_each_ptr(ptrs, ptr) { 432 - struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); 433 - if (c->opts.nocow_enabled) 434 - bch2_bucket_nocow_unlock(&c->nocow_locks, 435 - PTR_BUCKET_POS(ca, ptr), 0); 436 - bch2_dev_put(ca); 437 - } 438 - 360 + if (c->opts.nocow_enabled) 361 + bkey_nocow_unlock(c, k); 362 + bkey_put_dev_refs(c, k); 439 363 bch2_bkey_buf_exit(&update->k, c); 440 364 bch2_disk_reservation_put(c, &update->op.res); 441 365 bch2_bio_free_pages_pool(c, &update->op.wbio.bio); ··· 539 475 bch2_compression_opt_to_text(out, background_compression(*io_opts)); 540 476 prt_newline(out); 541 477 478 + prt_str(out, "opts.replicas:\t"); 479 + prt_u64(out, io_opts->data_replicas); 480 + 542 481 prt_str(out, "extra replicas:\t"); 543 482 prt_u64(out, data_opts->extra_replicas); 544 483 } ··· 610 543 const union bch_extent_entry *entry; 611 544 struct extent_ptr_decoded p; 612 545 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas; 613 - unsigned ptrs_locked = 0; 614 546 int ret = 0; 615 547 616 548 /* ··· 619 553 */ 620 554 if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot))) 621 555 return -BCH_ERR_data_update_done; 556 + 557 + if (!bkey_get_dev_refs(c, k)) 558 + return -BCH_ERR_data_update_done; 559 + 560 + if (c->opts.nocow_enabled && 561 + !bkey_nocow_lock(c, ctxt, k)) { 562 + bkey_put_dev_refs(c, k); 563 + return -BCH_ERR_nocow_lock_blocked; 564 + } 622 565 623 566 bch2_bkey_buf_init(&m->k); 624 567 bch2_bkey_buf_reassemble(&m->k, c, k); ··· 650 575 m->op.compression_opt = background_compression(io_opts); 651 576 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK; 652 577 653 - bkey_for_each_ptr(ptrs, ptr) { 654 - if (!bch2_dev_tryget(c, ptr->dev)) { 655 - bkey_for_each_ptr(ptrs, ptr2) { 656 - if (ptr2 == ptr) 657 - break; 658 - bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev)); 659 - } 660 - return -BCH_ERR_data_update_done; 661 - } 662 - } 663 - 664 578 unsigned durability_have = 0, durability_removing = 0; 665 579 666 580 i = 0; 667 581 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 668 - struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev); 669 - struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); 670 - bool locked; 582 + if (!p.ptr.cached) { 583 + rcu_read_lock(); 584 + if (BIT(i) & m->data_opts.rewrite_ptrs) { 585 + if (crc_is_compressed(p.crc)) 586 + reserve_sectors += k.k->size; 671 587 672 - rcu_read_lock(); 673 - if (((1U << i) & m->data_opts.rewrite_ptrs)) { 674 - BUG_ON(p.ptr.cached); 675 - 676 - if (crc_is_compressed(p.crc)) 677 - reserve_sectors += k.k->size; 678 - 679 - m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); 680 - durability_removing += bch2_extent_ptr_desired_durability(c, &p); 681 - } else if (!p.ptr.cached && 682 - !((1U << i) & m->data_opts.kill_ptrs)) { 683 - bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev); 684 - durability_have += bch2_extent_ptr_durability(c, &p); 588 + m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); 589 + durability_removing += bch2_extent_ptr_desired_durability(c, &p); 590 + } else if (!(BIT(i) & m->data_opts.kill_ptrs)) { 591 + bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev); 592 + durability_have += bch2_extent_ptr_durability(c, &p); 593 + } 594 + rcu_read_unlock(); 685 595 } 686 - rcu_read_unlock(); 687 596 688 597 /* 689 598 * op->csum_type is normally initialized from the fs/file's ··· 682 623 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) 683 624 m->op.incompressible = true; 684 625 685 - if (c->opts.nocow_enabled) { 686 - if (ctxt) { 687 - move_ctxt_wait_event(ctxt, 688 - (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, 689 - bucket, 0)) || 690 - list_empty(&ctxt->ios)); 691 - 692 - if (!locked) 693 - bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0); 694 - } else { 695 - if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) { 696 - ret = -BCH_ERR_nocow_lock_blocked; 697 - goto err; 698 - } 699 - } 700 - ptrs_locked |= (1U << i); 701 - } 702 - 703 626 i++; 704 627 } 705 628 ··· 695 654 * Increasing replication is an explicit operation triggered by 696 655 * rereplicate, currently, so that users don't get an unexpected -ENOSPC 697 656 */ 698 - if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) && 699 - !durability_required) { 700 - m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs; 701 - m->data_opts.rewrite_ptrs = 0; 702 - /* if iter == NULL, it's just a promote */ 703 - if (iter) 704 - ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts); 705 - goto done; 706 - } 707 - 708 657 m->op.nr_replicas = min(durability_removing, durability_required) + 709 658 m->data_opts.extra_replicas; 710 659 ··· 706 675 if (!(durability_have + durability_removing)) 707 676 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1); 708 677 709 - if (!m->op.nr_replicas) { 710 - struct printbuf buf = PRINTBUF; 711 - 712 - bch2_data_update_to_text(&buf, m); 713 - WARN(1, "trying to move an extent, but nr_replicas=0\n%s", buf.buf); 714 - printbuf_exit(&buf); 715 - ret = -BCH_ERR_data_update_done; 716 - goto done; 717 - } 718 - 719 678 m->op.nr_replicas_required = m->op.nr_replicas; 679 + 680 + /* 681 + * It might turn out that we don't need any new replicas, if the 682 + * replicas or durability settings have been changed since the extent 683 + * was written: 684 + */ 685 + if (!m->op.nr_replicas) { 686 + m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs; 687 + m->data_opts.rewrite_ptrs = 0; 688 + /* if iter == NULL, it's just a promote */ 689 + if (iter) 690 + ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts); 691 + goto out; 692 + } 720 693 721 694 if (reserve_sectors) { 722 695 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors, ··· 728 693 ? 0 729 694 : BCH_DISK_RESERVATION_NOFAIL); 730 695 if (ret) 731 - goto err; 696 + goto out; 732 697 } 733 698 734 699 if (bkey_extent_is_unwritten(k)) { 735 700 bch2_update_unwritten_extent(trans, m); 736 - goto done; 701 + goto out; 737 702 } 738 703 739 704 return 0; 740 - err: 741 - i = 0; 742 - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 743 - struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev); 744 - struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); 745 - if ((1U << i) & ptrs_locked) 746 - bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0); 747 - bch2_dev_put(ca); 748 - i++; 749 - } 750 - 751 - bch2_bkey_buf_exit(&m->k, c); 752 - bch2_bio_free_pages_pool(c, &m->op.wbio.bio); 753 - return ret; 754 - done: 705 + out: 755 706 bch2_data_update_exit(m); 756 707 return ret ?: -BCH_ERR_data_update_done; 757 708 }
+41
fs/bcachefs/extents.c
··· 1017 1017 1018 1018 prt_printf(out, "ptr: %u:%llu:%u gen %u", 1019 1019 ptr->dev, b, offset, ptr->gen); 1020 + if (ca->mi.durability != 1) 1021 + prt_printf(out, " d=%u", ca->mi.durability); 1020 1022 if (ptr->cached) 1021 1023 prt_str(out, " cached"); 1022 1024 if (ptr->unwritten) ··· 1377 1375 r = NULL; 1378 1376 1379 1377 return r != NULL; 1378 + } 1379 + 1380 + static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k, 1381 + unsigned target, unsigned compression) 1382 + { 1383 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1384 + const union bch_extent_entry *entry; 1385 + struct extent_ptr_decoded p; 1386 + u64 sectors = 0; 1387 + 1388 + if (compression) { 1389 + unsigned compression_type = bch2_compression_opt_to_type(compression); 1390 + 1391 + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 1392 + if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || 1393 + p.ptr.unwritten) { 1394 + sectors = 0; 1395 + goto incompressible; 1396 + } 1397 + 1398 + if (!p.ptr.cached && p.crc.compression_type != compression_type) 1399 + sectors += p.crc.compressed_size; 1400 + } 1401 + } 1402 + incompressible: 1403 + if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { 1404 + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 1405 + if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target)) 1406 + sectors += p.crc.compressed_size; 1407 + } 1408 + 1409 + return sectors; 1410 + } 1411 + 1412 + u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) 1413 + { 1414 + const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); 1415 + 1416 + return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0; 1380 1417 } 1381 1418 1382 1419 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
+1
fs/bcachefs/extents.h
··· 692 692 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c, 693 693 unsigned, unsigned); 694 694 bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c); 695 + u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c); 695 696 696 697 int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *, 697 698 struct bch_io_opts *);
+1 -1
fs/bcachefs/fs-io-buffered.c
··· 534 534 535 535 if (f_sectors > w->tmp_sectors) { 536 536 kfree(w->tmp); 537 - w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), __GFP_NOFAIL); 537 + w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), GFP_NOFS|__GFP_NOFAIL); 538 538 w->tmp_sectors = f_sectors; 539 539 } 540 540
+1 -2
fs/bcachefs/fs-ioctl.c
··· 328 328 329 329 mutex_lock(&c->sb_lock); 330 330 strscpy(c->disk_sb.sb->label, label, BCH_SB_LABEL_SIZE); 331 - mutex_unlock(&c->sb_lock); 332 - 333 331 ret = bch2_write_super(c); 332 + mutex_unlock(&c->sb_lock); 334 333 335 334 mnt_drop_write_file(file); 336 335 return ret;
+3 -3
fs/bcachefs/fsck.c
··· 2006 2006 if (ret) { 2007 2007 bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum); 2008 2008 ret = -BCH_ERR_fsck_repair_unimplemented; 2009 - ret = 0; 2010 2009 goto err; 2011 2010 } 2012 2011 ··· 2215 2216 NULL, NULL, 2216 2217 BCH_TRANS_COMMIT_no_enospc, 2217 2218 check_xattr(trans, &iter, k, &hash_info, &inode))); 2219 + 2220 + inode_walker_exit(&inode); 2218 2221 bch_err_fn(c, ret); 2219 2222 return ret; 2220 2223 } ··· 2470 2469 : bch2_inode_unpack(inode_k, &inode); 2471 2470 if (ret) { 2472 2471 /* Should have been caught in dirents pass */ 2473 - if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2474 - bch_err(c, "error looking up parent directory: %i", ret); 2472 + bch_err_msg(c, ret, "error looking up parent directory"); 2475 2473 break; 2476 2474 } 2477 2475
+1 -1
fs/bcachefs/journal.c
··· 1260 1260 } 1261 1261 1262 1262 if (!had_entries) 1263 - j->last_empty_seq = cur_seq; 1263 + j->last_empty_seq = cur_seq - 1; /* to match j->seq */ 1264 1264 1265 1265 spin_lock(&j->lock); 1266 1266
+15
fs/bcachefs/journal_sb.c
··· 104 104 struct bch_sb_field_journal_v2 *journal = field_to_type(f, journal_v2); 105 105 struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx); 106 106 int ret = -BCH_ERR_invalid_sb_journal; 107 + u64 sum = 0; 107 108 unsigned nr; 108 109 unsigned i; 109 110 struct u64_range *b; ··· 120 119 for (i = 0; i < nr; i++) { 121 120 b[i].start = le64_to_cpu(journal->d[i].start); 122 121 b[i].end = b[i].start + le64_to_cpu(journal->d[i].nr); 122 + 123 + if (b[i].end <= b[i].start) { 124 + prt_printf(err, "journal buckets entry with bad nr: %llu+%llu", 125 + le64_to_cpu(journal->d[i].start), 126 + le64_to_cpu(journal->d[i].nr)); 127 + goto err; 128 + } 129 + 130 + sum += le64_to_cpu(journal->d[i].nr); 123 131 } 124 132 125 133 sort(b, nr, sizeof(*b), u64_range_cmp, NULL); ··· 156 146 b[i].start, b[i].end, b[i + 1].start, b[i + 1].end); 157 147 goto err; 158 148 } 149 + } 150 + 151 + if (sum > UINT_MAX) { 152 + prt_printf(err, "too many journal buckets: %llu > %u", sum, UINT_MAX); 153 + goto err; 159 154 } 160 155 161 156 ret = 0;
+1 -1
fs/bcachefs/movinggc.c
··· 383 383 if (min_member_capacity == U64_MAX) 384 384 min_member_capacity = 128 * 2048; 385 385 386 - bch2_trans_unlock_long(ctxt.trans); 386 + move_buckets_wait(&ctxt, buckets, true); 387 387 bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6), 388 388 MAX_SCHEDULE_TIMEOUT); 389 389 }
+8 -1
fs/bcachefs/recovery.c
··· 241 241 const struct journal_key *l = *((const struct journal_key **)_l); 242 242 const struct journal_key *r = *((const struct journal_key **)_r); 243 243 244 - return cmp_int(l->journal_seq, r->journal_seq); 244 + /* 245 + * Map 0 to U64_MAX, so that keys with journal_seq === 0 come last 246 + * 247 + * journal_seq == 0 means that the key comes from early repair, and 248 + * should be inserted last so as to avoid overflowing the journal 249 + */ 250 + return cmp_int(l->journal_seq - 1, r->journal_seq - 1); 245 251 } 246 252 247 253 int bch2_journal_replay(struct bch_fs *c) ··· 328 322 } 329 323 } 330 324 325 + bch2_trans_unlock_long(trans); 331 326 /* 332 327 * Now, replay any remaining keys in the order in which they appear in 333 328 * the journal, unpinning those journal entries as we go:
+2 -1
fs/bcachefs/replicas.c
··· 451 451 .type = BCH_DISK_ACCOUNTING_replicas, 452 452 }; 453 453 454 - memcpy(&k.replicas, e, replicas_entry_bytes(e)); 454 + unsafe_memcpy(&k.replicas, e, replicas_entry_bytes(e), 455 + "embedded variable length struct"); 455 456 456 457 struct bpos p = disk_accounting_pos_to_bpos(&k); 457 458
+7 -1
fs/bcachefs/sb-downgrade.c
··· 75 75 BCH_FSCK_ERR_accounting_key_junk_at_end) \ 76 76 x(disk_accounting_inum, \ 77 77 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 78 + BCH_FSCK_ERR_accounting_mismatch) \ 79 + x(rebalance_work_acct_fix, \ 80 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 78 81 BCH_FSCK_ERR_accounting_mismatch) 79 82 80 83 #define DOWNGRADE_TABLE() \ ··· 111 108 BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \ 112 109 BCH_FSCK_ERR_fs_usage_replicas_wrong, \ 113 110 BCH_FSCK_ERR_accounting_replicas_not_marked, \ 114 - BCH_FSCK_ERR_bkey_version_in_future) 111 + BCH_FSCK_ERR_bkey_version_in_future) \ 112 + x(rebalance_work_acct_fix, \ 113 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 114 + BCH_FSCK_ERR_accounting_mismatch) 115 115 116 116 struct upgrade_downgrade_entry { 117 117 u64 recovery_passes;
-1
fs/bcachefs/util.c
··· 416 416 printbuf_tabstop_push(out, TABSTOP_SIZE + 2); 417 417 418 418 prt_printf(out, "\tsince mount\r\trecent\r\n"); 419 - prt_printf(out, "recent"); 420 419 421 420 printbuf_tabstops_reset(out); 422 421 printbuf_tabstop_push(out, out->indent + 20);
+11 -1
fs/bcachefs/xattr.c
··· 612 612 name, buffer, size, true); 613 613 } 614 614 615 + /* Noop - xattrs in the bcachefs_effective namespace are inherited */ 616 + static int bch2_xattr_bcachefs_set_effective(const struct xattr_handler *handler, 617 + struct mnt_idmap *idmap, 618 + struct dentry *dentry, struct inode *vinode, 619 + const char *name, const void *value, 620 + size_t size, int flags) 621 + { 622 + return 0; 623 + } 624 + 615 625 static const struct xattr_handler bch_xattr_bcachefs_effective_handler = { 616 626 .prefix = "bcachefs_effective.", 617 627 .get = bch2_xattr_bcachefs_get_effective, 618 - .set = bch2_xattr_bcachefs_set, 628 + .set = bch2_xattr_bcachefs_set_effective, 619 629 }; 620 630 621 631 #endif /* NO_BCACHEFS_FS */
+18 -8
fs/btrfs/bio.c
··· 668 668 { 669 669 struct btrfs_inode *inode = bbio->inode; 670 670 struct btrfs_fs_info *fs_info = bbio->fs_info; 671 - struct btrfs_bio *orig_bbio = bbio; 672 671 struct bio *bio = &bbio->bio; 673 672 u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 674 673 u64 length = bio->bi_iter.bi_size; ··· 705 706 bbio->saved_iter = bio->bi_iter; 706 707 ret = btrfs_lookup_bio_sums(bbio); 707 708 if (ret) 708 - goto fail_put_bio; 709 + goto fail; 709 710 } 710 711 711 712 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { ··· 739 740 740 741 ret = btrfs_bio_csum(bbio); 741 742 if (ret) 742 - goto fail_put_bio; 743 + goto fail; 743 744 } else if (use_append || 744 745 (btrfs_is_zoned(fs_info) && inode && 745 746 inode->flags & BTRFS_INODE_NODATASUM)) { 746 747 ret = btrfs_alloc_dummy_sum(bbio); 747 748 if (ret) 748 - goto fail_put_bio; 749 + goto fail; 749 750 } 750 751 } 751 752 ··· 753 754 done: 754 755 return map_length == length; 755 756 756 - fail_put_bio: 757 - if (map_length < length) 758 - btrfs_cleanup_bio(bbio); 759 757 fail: 760 758 btrfs_bio_counter_dec(fs_info); 761 - btrfs_bio_end_io(orig_bbio, ret); 759 + /* 760 + * We have split the original bbio, now we have to end both the current 761 + * @bbio and remaining one, as the remaining one will never be submitted. 762 + */ 763 + if (map_length < length) { 764 + struct btrfs_bio *remaining = bbio->private; 765 + 766 + ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset); 767 + ASSERT(remaining); 768 + 769 + remaining->bio.bi_status = ret; 770 + btrfs_orig_bbio_end_io(remaining); 771 + } 772 + bbio->bio.bi_status = ret; 773 + btrfs_orig_bbio_end_io(bbio); 762 774 /* Do not submit another chunk */ 763 775 return true; 764 776 }
+1 -1
fs/btrfs/fiemap.c
··· 637 637 struct btrfs_path *path; 638 638 struct fiemap_cache cache = { 0 }; 639 639 struct btrfs_backref_share_check_ctx *backref_ctx; 640 - u64 last_extent_end; 640 + u64 last_extent_end = 0; 641 641 u64 prev_extent_end; 642 642 u64 range_start; 643 643 u64 range_end;
+2
fs/btrfs/qgroup.c
··· 4185 4185 return 0; 4186 4186 } 4187 4187 4188 + btrfs_run_delayed_iputs(root->fs_info); 4189 + btrfs_wait_on_delayed_iputs(root->fs_info); 4188 4190 ret = btrfs_start_delalloc_snapshot(root, true); 4189 4191 if (ret < 0) 4190 4192 goto out;
+5 -12
fs/btrfs/space-info.c
··· 1985 1985 return unalloc < data_chunk_size; 1986 1986 } 1987 1987 1988 - static int do_reclaim_sweep(struct btrfs_fs_info *fs_info, 1989 - struct btrfs_space_info *space_info, int raid) 1988 + static void do_reclaim_sweep(struct btrfs_fs_info *fs_info, 1989 + struct btrfs_space_info *space_info, int raid) 1990 1990 { 1991 1991 struct btrfs_block_group *bg; 1992 1992 int thresh_pct; ··· 2031 2031 } 2032 2032 2033 2033 up_read(&space_info->groups_sem); 2034 - return 0; 2035 2034 } 2036 2035 2037 2036 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes) ··· 2073 2074 return ret; 2074 2075 } 2075 2076 2076 - int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info) 2077 + void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info) 2077 2078 { 2078 - int ret; 2079 2079 int raid; 2080 2080 struct btrfs_space_info *space_info; 2081 2081 2082 2082 list_for_each_entry(space_info, &fs_info->space_info, list) { 2083 2083 if (!btrfs_should_periodic_reclaim(space_info)) 2084 2084 continue; 2085 - for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) { 2086 - ret = do_reclaim_sweep(fs_info, space_info, raid); 2087 - if (ret) 2088 - return ret; 2089 - } 2085 + for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) 2086 + do_reclaim_sweep(fs_info, space_info, raid); 2090 2087 } 2091 - 2092 - return ret; 2093 2088 }
+1 -1
fs/btrfs/space-info.h
··· 294 294 void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready); 295 295 bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info); 296 296 int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info); 297 - int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info); 297 + void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info); 298 298 299 299 #endif /* BTRFS_SPACE_INFO_H */
+1
fs/ceph/inode.c
··· 695 695 696 696 percpu_counter_dec(&mdsc->metric.total_inodes); 697 697 698 + netfs_wait_for_outstanding_io(inode); 698 699 truncate_inode_pages_final(&inode->i_data); 699 700 if (inode->i_state & I_PINNING_NETFS_WB) 700 701 ceph_fscache_unuse_cookie(inode, true);
+1
fs/netfs/io.c
··· 313 313 netfs_reset_subreq_iter(rreq, subreq); 314 314 netfs_read_from_server(rreq, subreq); 315 315 } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { 316 + netfs_reset_subreq_iter(rreq, subreq); 316 317 netfs_rreq_short_read(rreq, subreq); 317 318 } 318 319 }
+45 -15
fs/netfs/misc.c
··· 97 97 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) 98 98 { 99 99 struct netfs_folio *finfo; 100 + struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); 100 101 size_t flen = folio_size(folio); 101 102 102 103 _enter("{%lx},%zx,%zx", folio->index, offset, length); 104 + 105 + if (offset == 0 && length == flen) { 106 + unsigned long long i_size = i_size_read(&ctx->inode); 107 + unsigned long long fpos = folio_pos(folio), end; 108 + 109 + end = umin(fpos + flen, i_size); 110 + if (fpos < i_size && end > ctx->zero_point) 111 + ctx->zero_point = end; 112 + } 113 + 114 + folio_wait_private_2(folio); /* [DEPRECATED] */ 103 115 104 116 if (!folio_test_private(folio)) 105 117 return; ··· 125 113 /* We have a partially uptodate page from a streaming write. */ 126 114 unsigned int fstart = finfo->dirty_offset; 127 115 unsigned int fend = fstart + finfo->dirty_len; 128 - unsigned int end = offset + length; 116 + unsigned int iend = offset + length; 129 117 130 118 if (offset >= fend) 131 119 return; 132 - if (end <= fstart) 120 + if (iend <= fstart) 133 121 return; 134 - if (offset <= fstart && end >= fend) 135 - goto erase_completely; 136 - if (offset <= fstart && end > fstart) 137 - goto reduce_len; 138 - if (offset > fstart && end >= fend) 139 - goto move_start; 122 + 123 + /* The invalidation region overlaps the data. If the region 124 + * covers the start of the data, we either move along the start 125 + * or just erase the data entirely. 126 + */ 127 + if (offset <= fstart) { 128 + if (iend >= fend) 129 + goto erase_completely; 130 + /* Move the start of the data. */ 131 + finfo->dirty_len = fend - iend; 132 + finfo->dirty_offset = offset; 133 + return; 134 + } 135 + 136 + /* Reduce the length of the data if the invalidation region 137 + * covers the tail part. 138 + */ 139 + if (iend >= fend) { 140 + finfo->dirty_len = offset - fstart; 141 + return; 142 + } 143 + 140 144 /* A partial write was split. The caller has already zeroed 141 145 * it, so just absorb the hole. 142 146 */ ··· 165 137 folio_clear_uptodate(folio); 166 138 kfree(finfo); 167 139 return; 168 - reduce_len: 169 - finfo->dirty_len = offset + length - finfo->dirty_offset; 170 - return; 171 - move_start: 172 - finfo->dirty_len -= offset - finfo->dirty_offset; 173 - finfo->dirty_offset = offset; 174 140 } 175 141 EXPORT_SYMBOL(netfs_invalidate_folio); 176 142 ··· 181 159 struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); 182 160 unsigned long long end; 183 161 184 - end = folio_pos(folio) + folio_size(folio); 162 + if (folio_test_dirty(folio)) 163 + return false; 164 + 165 + end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode)); 185 166 if (end > ctx->zero_point) 186 167 ctx->zero_point = end; 187 168 188 169 if (folio_test_private(folio)) 189 170 return false; 171 + if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */ 172 + if (current_is_kswapd() || !(gfp & __GFP_FS)) 173 + return false; 174 + folio_wait_private_2(folio); 175 + } 190 176 fscache_note_page_release(netfs_i_cookie(ctx)); 191 177 return true; 192 178 }
+7
fs/netfs/write_collect.c
··· 33 33 int netfs_folio_written_back(struct folio *folio) 34 34 { 35 35 enum netfs_folio_trace why = netfs_folio_trace_clear; 36 + struct netfs_inode *ictx = netfs_inode(folio->mapping->host); 36 37 struct netfs_folio *finfo; 37 38 struct netfs_group *group = NULL; 38 39 int gcount = 0; ··· 42 41 /* Streaming writes cannot be redirtied whilst under writeback, 43 42 * so discard the streaming record. 44 43 */ 44 + unsigned long long fend; 45 + 46 + fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len; 47 + if (fend > ictx->zero_point) 48 + ictx->zero_point = fend; 49 + 45 50 folio_detach_private(folio); 46 51 group = finfo->netfs_group; 47 52 gcount++;
+4 -2
fs/nfs/callback_xdr.c
··· 118 118 if (likely(attrlen > 0)) 119 119 bitmap[0] = ntohl(*p++); 120 120 if (attrlen > 1) 121 - bitmap[1] = ntohl(*p); 121 + bitmap[1] = ntohl(*p++); 122 + if (attrlen > 2) 123 + bitmap[2] = ntohl(*p); 122 124 return 0; 123 125 } 124 126 ··· 448 446 void *argp) 449 447 { 450 448 struct cb_recallanyargs *args = argp; 451 - uint32_t bitmap[2]; 449 + uint32_t bitmap[3]; 452 450 __be32 *p, status; 453 451 454 452 p = xdr_inline_decode(xdr, 4);
+5 -10
fs/nfs/delegation.c
··· 647 647 prev = delegation; 648 648 continue; 649 649 } 650 + inode = nfs_delegation_grab_inode(delegation); 651 + if (inode == NULL) 652 + continue; 650 653 651 654 if (prev) { 652 655 struct inode *tmp = nfs_delegation_grab_inode(prev); ··· 660 657 } 661 658 } 662 659 663 - inode = nfs_delegation_grab_inode(delegation); 664 - if (inode == NULL) { 665 - rcu_read_unlock(); 666 - iput(to_put); 667 - goto restart; 668 - } 669 660 delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 670 661 rcu_read_unlock(); 671 662 ··· 1181 1184 struct inode *inode; 1182 1185 restart: 1183 1186 rcu_read_lock(); 1184 - restart_locked: 1185 1187 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1186 1188 if (test_bit(NFS_DELEGATION_INODE_FREEING, 1187 1189 &delegation->flags) || ··· 1191 1195 continue; 1192 1196 inode = nfs_delegation_grab_inode(delegation); 1193 1197 if (inode == NULL) 1194 - goto restart_locked; 1198 + continue; 1195 1199 delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 1196 1200 rcu_read_unlock(); 1197 1201 if (delegation != NULL) { ··· 1314 1318 1315 1319 restart: 1316 1320 rcu_read_lock(); 1317 - restart_locked: 1318 1321 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1319 1322 if (test_bit(NFS_DELEGATION_INODE_FREEING, 1320 1323 &delegation->flags) || ··· 1325 1330 continue; 1326 1331 inode = nfs_delegation_grab_inode(delegation); 1327 1332 if (inode == NULL) 1328 - goto restart_locked; 1333 + continue; 1329 1334 spin_lock(&delegation->lock); 1330 1335 cred = get_cred_rcu(delegation->cred); 1331 1336 nfs4_stateid_copy(&stateid, &delegation->stateid);
+8 -4
fs/nfs/nfs4proc.c
··· 3931 3931 FATTR4_WORD0_CASE_INSENSITIVE | 3932 3932 FATTR4_WORD0_CASE_PRESERVING; 3933 3933 if (minorversion) 3934 - bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3934 + bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3935 + FATTR4_WORD2_OPEN_ARGUMENTS; 3935 3936 3936 3937 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3937 3938 if (status == 0) { ··· 9998 9997 fallthrough; 9999 9998 default: 10000 9999 task->tk_status = 0; 10000 + lrp->res.lrs_present = 0; 10001 10001 fallthrough; 10002 10002 case 0: 10003 10003 break; ··· 10012 10010 task->tk_status = 0; 10013 10011 break; 10014 10012 case -NFS4ERR_DELAY: 10015 - if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 10016 - break; 10017 - goto out_restart; 10013 + if (nfs4_async_handle_error(task, server, NULL, NULL) == 10014 + -EAGAIN) 10015 + goto out_restart; 10016 + lrp->res.lrs_present = 0; 10017 + break; 10018 10018 } 10019 10019 return; 10020 10020 out_restart:
+2 -3
fs/nfs/pnfs.c
··· 1284 1284 LIST_HEAD(freeme); 1285 1285 1286 1286 spin_lock(&inode->i_lock); 1287 - if (!pnfs_layout_is_valid(lo) || 1288 - !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid)) 1287 + if (!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid)) 1289 1288 goto out_unlock; 1290 - if (stateid) { 1289 + if (stateid && pnfs_layout_is_valid(lo)) { 1291 1290 u32 seq = be32_to_cpu(arg_stateid->seqid); 1292 1291 1293 1292 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
+2
fs/nfs/super.c
··· 47 47 #include <linux/vfs.h> 48 48 #include <linux/inet.h> 49 49 #include <linux/in6.h> 50 + #include <linux/sched.h> 50 51 #include <linux/slab.h> 51 52 #include <net/ipv6.h> 52 53 #include <linux/netdevice.h> ··· 229 228 ret = fn(server, data); 230 229 if (ret) 231 230 goto out; 231 + cond_resched(); 232 232 rcu_read_lock(); 233 233 } 234 234 rcu_read_unlock();
+33 -18
fs/nfsd/nfs4state.c
··· 2789 2789 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2790 2790 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2791 2791 2792 - spin_lock(&nf->fi_lock); 2793 - file = find_any_file_locked(nf); 2794 - if (file) { 2795 - nfs4_show_superblock(s, file); 2796 - seq_puts(s, ", "); 2797 - nfs4_show_fname(s, file); 2798 - seq_puts(s, ", "); 2799 - } 2800 - spin_unlock(&nf->fi_lock); 2792 + if (nf) { 2793 + spin_lock(&nf->fi_lock); 2794 + file = find_any_file_locked(nf); 2795 + if (file) { 2796 + nfs4_show_superblock(s, file); 2797 + seq_puts(s, ", "); 2798 + nfs4_show_fname(s, file); 2799 + seq_puts(s, ", "); 2800 + } 2801 + spin_unlock(&nf->fi_lock); 2802 + } else 2803 + seq_puts(s, "closed, "); 2801 2804 nfs4_show_owner(s, oo); 2802 2805 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2803 2806 seq_puts(s, ", admin-revoked"); ··· 3078 3075 struct nfs4_delegation *dp = 3079 3076 container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 3080 3077 3081 - nfs4_put_stid(&dp->dl_stid); 3082 3078 clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags); 3083 3079 wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY); 3080 + nfs4_put_stid(&dp->dl_stid); 3084 3081 } 3085 3082 3086 3083 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = { ··· 8815 8812 /** 8816 8813 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict 8817 8814 * @rqstp: RPC transaction context 8818 - * @inode: file to be checked for a conflict 8815 + * @dentry: dentry of inode to be checked for a conflict 8819 8816 * @modified: return true if file was modified 8820 8817 * @size: new size of file if modified is true 8821 8818 * ··· 8830 8827 * code is returned. 8831 8828 */ 8832 8829 __be32 8833 - nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode, 8830 + nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry, 8834 8831 bool *modified, u64 *size) 8835 8832 { 8836 8833 __be32 status; 8837 8834 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8838 8835 struct file_lock_context *ctx; 8839 8836 struct file_lease *fl; 8840 - struct nfs4_delegation *dp; 8841 8837 struct iattr attrs; 8842 8838 struct nfs4_cb_fattr *ncf; 8839 + struct inode *inode = d_inode(dentry); 8843 8840 8844 8841 *modified = false; 8845 8842 ctx = locks_inode_context(inode); ··· 8862 8859 goto break_lease; 8863 8860 } 8864 8861 if (type == F_WRLCK) { 8865 - dp = fl->c.flc_owner; 8862 + struct nfs4_delegation *dp = fl->c.flc_owner; 8863 + 8866 8864 if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { 8867 8865 spin_unlock(&ctx->flc_lock); 8868 8866 return 0; ··· 8871 8867 break_lease: 8872 8868 nfsd_stats_wdeleg_getattr_inc(nn); 8873 8869 dp = fl->c.flc_owner; 8870 + refcount_inc(&dp->dl_stid.sc_count); 8874 8871 ncf = &dp->dl_cb_fattr; 8875 8872 nfs4_cb_getattr(&dp->dl_cb_fattr); 8876 8873 spin_unlock(&ctx->flc_lock); ··· 8881 8876 /* Recall delegation only if client didn't respond */ 8882 8877 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 8883 8878 if (status != nfserr_jukebox || 8884 - !nfsd_wait_for_delegreturn(rqstp, inode)) 8879 + !nfsd_wait_for_delegreturn(rqstp, inode)) { 8880 + nfs4_put_stid(&dp->dl_stid); 8885 8881 return status; 8882 + } 8886 8883 } 8887 8884 if (!ncf->ncf_file_modified && 8888 8885 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || 8889 8886 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) 8890 8887 ncf->ncf_file_modified = true; 8891 8888 if (ncf->ncf_file_modified) { 8889 + int err; 8890 + 8892 8891 /* 8893 8892 * Per section 10.4.3 of RFC 8881, the server would 8894 8893 * not update the file's metadata with the client's 8895 8894 * modified size 8896 8895 */ 8897 8896 attrs.ia_mtime = attrs.ia_ctime = current_time(inode); 8898 - attrs.ia_valid = ATTR_MTIME | ATTR_CTIME; 8899 - setattr_copy(&nop_mnt_idmap, inode, &attrs); 8900 - mark_inode_dirty(inode); 8897 + attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG; 8898 + inode_lock(inode); 8899 + err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL); 8900 + inode_unlock(inode); 8901 + if (err) { 8902 + nfs4_put_stid(&dp->dl_stid); 8903 + return nfserrno(err); 8904 + } 8901 8905 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; 8902 8906 *size = ncf->ncf_cur_fsize; 8903 8907 *modified = true; 8904 8908 } 8909 + nfs4_put_stid(&dp->dl_stid); 8905 8910 return 0; 8906 8911 } 8907 8912 break;
+4 -2
fs/nfsd/nfs4xdr.c
··· 3545 3545 args.dentry = dentry; 3546 3546 args.ignore_crossmnt = (ignore_crossmnt != 0); 3547 3547 args.acl = NULL; 3548 + #ifdef CONFIG_NFSD_V4_SECURITY_LABEL 3549 + args.context = NULL; 3550 + #endif 3548 3551 3549 3552 /* 3550 3553 * Make a local copy of the attribute bitmap that can be modified. ··· 3565 3562 } 3566 3563 args.size = 0; 3567 3564 if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) { 3568 - status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry), 3565 + status = nfsd4_deleg_getattr_conflict(rqstp, dentry, 3569 3566 &file_modified, &size); 3570 3567 if (status) 3571 3568 goto out; ··· 3620 3617 args.contextsupport = false; 3621 3618 3622 3619 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL 3623 - args.context = NULL; 3624 3620 if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) || 3625 3621 attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { 3626 3622 if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
+1 -1
fs/nfsd/state.h
··· 781 781 } 782 782 783 783 extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, 784 - struct inode *inode, bool *file_modified, u64 *size); 784 + struct dentry *dentry, bool *file_modified, u64 *size); 785 785 #endif /* NFSD4_STATE_H */
+12 -39
fs/overlayfs/params.c
··· 353 353 case Opt_datadir_add: 354 354 ctx->nr_data++; 355 355 fallthrough; 356 + case Opt_lowerdir: 357 + fallthrough; 356 358 case Opt_lowerdir_add: 357 359 WARN_ON(ctx->nr >= ctx->capacity); 358 360 l = &ctx->lower[ctx->nr++]; ··· 367 365 } 368 366 } 369 367 370 - static int ovl_parse_layer(struct fs_context *fc, struct fs_parameter *param, 371 - enum ovl_opt layer) 368 + static int ovl_parse_layer(struct fs_context *fc, const char *layer_name, enum ovl_opt layer) 372 369 { 373 - char *name = kstrdup(param->string, GFP_KERNEL); 370 + char *name = kstrdup(layer_name, GFP_KERNEL); 374 371 bool upper = (layer == Opt_upperdir || layer == Opt_workdir); 375 372 struct path path; 376 373 int err; ··· 377 376 if (!name) 378 377 return -ENOMEM; 379 378 380 - if (upper) 379 + if (upper || layer == Opt_lowerdir) 381 380 err = ovl_mount_dir(name, &path); 382 381 else 383 382 err = ovl_mount_dir_noesc(name, &path); ··· 433 432 { 434 433 int err; 435 434 struct ovl_fs_context *ctx = fc->fs_private; 436 - struct ovl_fs_context_layer *l; 437 435 char *dup = NULL, *iter; 438 436 ssize_t nr_lower, nr; 439 437 bool data_layer = false; ··· 449 449 return 0; 450 450 451 451 if (*name == ':') { 452 - pr_err("cannot append lower layer"); 452 + pr_err("cannot append lower layer\n"); 453 453 return -EINVAL; 454 454 } 455 455 ··· 472 472 goto out_err; 473 473 } 474 474 475 - if (nr_lower > ctx->capacity) { 476 - err = -ENOMEM; 477 - l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower), 478 - GFP_KERNEL_ACCOUNT); 479 - if (!l) 480 - goto out_err; 481 - 482 - ctx->lower = l; 483 - ctx->capacity = nr_lower; 484 - } 485 - 486 475 iter = dup; 487 - l = ctx->lower; 488 - for (nr = 0; nr < nr_lower; nr++, l++) { 489 - ctx->nr++; 490 - memset(l, 0, sizeof(*l)); 491 - 492 - err = ovl_mount_dir(iter, &l->path); 476 + for (nr = 0; nr < nr_lower; nr++) { 477 + err = ovl_parse_layer(fc, iter, Opt_lowerdir); 493 478 if (err) 494 - goto out_put; 495 - 496 - err = ovl_mount_dir_check(fc, &l->path, Opt_lowerdir, iter, false); 497 - if (err) 498 - goto out_put; 499 - 500 - err = -ENOMEM; 501 - l->name = kstrdup(iter, GFP_KERNEL_ACCOUNT); 502 - if (!l->name) 503 - goto out_put; 479 + goto out_err; 504 480 505 481 if (data_layer) 506 482 ctx->nr_data++; ··· 493 517 * there are no data layers. 494 518 */ 495 519 if (ctx->nr_data > 0) { 496 - pr_err("regular lower layers cannot follow data lower layers"); 497 - goto out_put; 520 + pr_err("regular lower layers cannot follow data lower layers\n"); 521 + goto out_err; 498 522 } 499 523 500 524 data_layer = false; ··· 507 531 } 508 532 kfree(dup); 509 533 return 0; 510 - 511 - out_put: 512 - ovl_reset_lowerdirs(ctx); 513 534 514 535 out_err: 515 536 kfree(dup); ··· 555 582 case Opt_datadir_add: 556 583 case Opt_upperdir: 557 584 case Opt_workdir: 558 - err = ovl_parse_layer(fc, param, opt); 585 + err = ovl_parse_layer(fc, param->string, opt); 559 586 break; 560 587 case Opt_default_permissions: 561 588 config->default_permissions = true;
+1 -1
fs/romfs/super.c
··· 126 126 } 127 127 } 128 128 129 - buf = folio_zero_tail(folio, fillsize, buf); 129 + buf = folio_zero_tail(folio, fillsize, buf + fillsize); 130 130 kunmap_local(buf); 131 131 folio_end_read(folio, ret == 0); 132 132 return ret;
+3 -3
fs/smb/client/cifsfs.c
··· 75 75 /* 76 76 * Global transaction id (XID) information 77 77 */ 78 - unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 79 - unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 80 - unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 78 + unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */ 79 + unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */ 80 + unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */ 81 81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 82 82 83 83 /*
+3 -4
fs/smb/client/cifsglob.h
··· 254 254 struct smb_rqst { 255 255 struct kvec *rq_iov; /* array of kvecs */ 256 256 unsigned int rq_nvec; /* number of kvecs in array */ 257 - size_t rq_iter_size; /* Amount of data in ->rq_iter */ 258 257 struct iov_iter rq_iter; /* Data iterator */ 259 258 struct xarray rq_buffer; /* Page buffer for encryption */ 260 259 }; ··· 2016 2017 /* 2017 2018 * Global transaction id (XID) information 2018 2019 */ 2019 - extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 2020 - extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 2021 - extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 2020 + extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */ 2021 + extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */ 2022 + extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */ 2022 2023 extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 2023 2024 2024 2025 /*
-1
fs/smb/client/cifssmb.c
··· 1713 1713 rqst.rq_iov = iov; 1714 1714 rqst.rq_nvec = 2; 1715 1715 rqst.rq_iter = wdata->subreq.io_iter; 1716 - rqst.rq_iter_size = iov_iter_count(&wdata->subreq.io_iter); 1717 1716 1718 1717 cifs_dbg(FYI, "async write at %llu %zu bytes\n", 1719 1718 wdata->subreq.start, wdata->subreq.len);
+3
fs/smb/client/connect.c
··· 4194 4194 * 4195 4195 * If one doesn't exist then insert a new tcon_link struct into the tree and 4196 4196 * try to construct a new one. 4197 + * 4198 + * REMEMBER to call cifs_put_tlink() after successful calls to cifs_sb_tlink, 4199 + * to avoid refcount issues 4197 4200 */ 4198 4201 struct tcon_link * 4199 4202 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
+1 -3
fs/smb/client/file.c
··· 2912 2912 if (!CIFS_CACHE_READ(cinode)) 2913 2913 return netfs_unbuffered_read_iter(iocb, to); 2914 2914 2915 - if (cap_unix(tcon->ses) && 2916 - (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2917 - ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 2915 + if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) { 2918 2916 if (iocb->ki_flags & IOCB_DIRECT) 2919 2917 return netfs_unbuffered_read_iter(iocb, to); 2920 2918 return netfs_buffered_read_iter(iocb, to);
+2
fs/smb/client/ioctl.c
··· 229 229 230 230 shutdown_good: 231 231 trace_smb3_shutdown_done(flags, tcon->tid); 232 + cifs_put_tlink(tlink); 232 233 return 0; 233 234 shutdown_out_err: 234 235 trace_smb3_shutdown_err(rc, flags, tcon->tid); 236 + cifs_put_tlink(tlink); 235 237 return rc; 236 238 } 237 239
+1
fs/smb/client/link.c
··· 588 588 tlink = cifs_sb_tlink(cifs_sb); 589 589 if (IS_ERR(tlink)) { 590 590 rc = PTR_ERR(tlink); 591 + /* BB could be clearer if skipped put_tlink on error here, but harmless */ 591 592 goto symlink_exit; 592 593 } 593 594 pTcon = tlink_tcon(tlink);
+7 -4
fs/smb/client/reparse.c
··· 378 378 u32 plen, struct cifs_sb_info *cifs_sb, 379 379 bool unicode, struct cifs_open_info_data *data) 380 380 { 381 + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 382 + 381 383 data->reparse.buf = buf; 382 384 383 385 /* See MS-FSCC 2.1.2 */ ··· 396 394 case IO_REPARSE_TAG_LX_FIFO: 397 395 case IO_REPARSE_TAG_LX_CHR: 398 396 case IO_REPARSE_TAG_LX_BLK: 399 - return 0; 397 + break; 400 398 default: 401 - cifs_dbg(VFS, "%s: unhandled reparse tag: 0x%08x\n", 402 - __func__, le32_to_cpu(buf->ReparseTag)); 403 - return -EOPNOTSUPP; 399 + cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n", 400 + le32_to_cpu(buf->ReparseTag)); 401 + break; 404 402 } 403 + return 0; 405 404 } 406 405 407 406 int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
+22 -2
fs/smb/client/smb2ops.c
··· 3305 3305 struct inode *inode = file_inode(file); 3306 3306 struct cifsFileInfo *cfile = file->private_data; 3307 3307 struct file_zero_data_information fsctl_buf; 3308 + unsigned long long end = offset + len, i_size, remote_i_size; 3308 3309 long rc; 3309 3310 unsigned int xid; 3310 3311 __u8 set_sparse = 1; ··· 3337 3336 (char *)&fsctl_buf, 3338 3337 sizeof(struct file_zero_data_information), 3339 3338 CIFSMaxBufSize, NULL, NULL); 3339 + 3340 + if (rc) 3341 + goto unlock; 3342 + 3343 + /* If there's dirty data in the buffer that would extend the EOF if it 3344 + * were written, then we need to move the EOF marker over to the lower 3345 + * of the high end of the hole and the proposed EOF. The problem is 3346 + * that we locally hole-punch the tail of the dirty data, the proposed 3347 + * EOF update will end up in the wrong place. 3348 + */ 3349 + i_size = i_size_read(inode); 3350 + remote_i_size = netfs_inode(inode)->remote_i_size; 3351 + if (end > remote_i_size && i_size > remote_i_size) { 3352 + unsigned long long extend_to = umin(end, i_size); 3353 + rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3354 + cfile->fid.volatile_fid, cfile->pid, extend_to); 3355 + if (rc >= 0) 3356 + netfs_inode(inode)->remote_i_size = extend_to; 3357 + } 3358 + 3359 + unlock: 3340 3360 filemap_invalidate_unlock(inode->i_mapping); 3341 3361 out: 3342 3362 inode_unlock(inode); ··· 4468 4446 } 4469 4447 iov_iter_xarray(&new->rq_iter, ITER_SOURCE, 4470 4448 buffer, 0, size); 4471 - new->rq_iter_size = size; 4472 4449 } 4473 4450 } 4474 4451 ··· 4513 4492 rqst.rq_nvec = 2; 4514 4493 if (iter) { 4515 4494 rqst.rq_iter = *iter; 4516 - rqst.rq_iter_size = iov_iter_count(iter); 4517 4495 iter_size = iov_iter_count(iter); 4518 4496 } 4519 4497
+21 -23
fs/smb/client/smb2pdu.c
··· 4441 4441 * If we want to do a RDMA write, fill in and append 4442 4442 * smbd_buffer_descriptor_v1 to the end of read request 4443 4443 */ 4444 - if (smb3_use_rdma_offload(io_parms)) { 4444 + if (rdata && smb3_use_rdma_offload(io_parms)) { 4445 4445 struct smbd_buffer_descriptor_v1 *v1; 4446 4446 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4447 4447 ··· 4523 4523 4524 4524 if (rdata->got_bytes) { 4525 4525 rqst.rq_iter = rdata->subreq.io_iter; 4526 - rqst.rq_iter_size = iov_iter_count(&rdata->subreq.io_iter); 4527 4526 } 4528 4527 4529 4528 WARN_ONCE(rdata->server != mid->server, ··· 4913 4914 if (rc) 4914 4915 goto out; 4915 4916 4917 + rqst.rq_iov = iov; 4918 + rqst.rq_iter = wdata->subreq.io_iter; 4919 + 4920 + rqst.rq_iov[0].iov_len = total_len - 1; 4921 + rqst.rq_iov[0].iov_base = (char *)req; 4922 + rqst.rq_nvec += 1; 4923 + 4916 4924 if (smb3_encryption_required(tcon)) 4917 4925 flags |= CIFS_TRANSFORM_REQ; 4918 4926 ··· 4931 4925 req->WriteChannelInfoOffset = 0; 4932 4926 req->WriteChannelInfoLength = 0; 4933 4927 req->Channel = SMB2_CHANNEL_NONE; 4928 + req->Length = cpu_to_le32(io_parms->length); 4934 4929 req->Offset = cpu_to_le64(io_parms->offset); 4935 4930 req->DataOffset = cpu_to_le16( 4936 4931 offsetof(struct smb2_write_req, Buffer)); ··· 4951 4944 */ 4952 4945 if (smb3_use_rdma_offload(io_parms)) { 4953 4946 struct smbd_buffer_descriptor_v1 *v1; 4954 - size_t data_size = iov_iter_count(&wdata->subreq.io_iter); 4955 4947 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4956 4948 4957 4949 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter, ··· 4959 4953 rc = -EAGAIN; 4960 4954 goto async_writev_out; 4961 4955 } 4956 + /* For RDMA read, I/O size is in RemainingBytes not in Length */ 4957 + req->RemainingBytes = req->Length; 4962 4958 req->Length = 0; 4963 4959 req->DataOffset = 0; 4964 - req->RemainingBytes = cpu_to_le32(data_size); 4965 4960 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4966 4961 if (need_invalidate) 4967 4962 req->Channel = SMB2_CHANNEL_RDMA_V1; ··· 4974 4967 v1->offset = cpu_to_le64(wdata->mr->mr->iova); 4975 4968 v1->token = cpu_to_le32(wdata->mr->mr->rkey); 4976 4969 v1->length = cpu_to_le32(wdata->mr->mr->length); 4970 + 4971 + rqst.rq_iov[0].iov_len += sizeof(*v1); 4972 + 4973 + /* 4974 + * We keep wdata->subreq.io_iter, 4975 + * but we have to truncate rqst.rq_iter 4976 + */ 4977 + iov_iter_truncate(&rqst.rq_iter, 0); 4977 4978 } 4978 4979 #endif 4979 - iov[0].iov_len = total_len - 1; 4980 - iov[0].iov_base = (char *)req; 4981 4980 4982 - rqst.rq_iov = iov; 4983 - rqst.rq_nvec = 1; 4984 - rqst.rq_iter = wdata->subreq.io_iter; 4985 - rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter); 4986 4981 if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags)) 4987 4982 smb2_set_replay(server, &rqst); 4988 - #ifdef CONFIG_CIFS_SMB_DIRECT 4989 - if (wdata->mr) 4990 - iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1); 4991 - #endif 4992 - cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 4993 - io_parms->offset, io_parms->length, iov_iter_count(&rqst.rq_iter)); 4994 4983 4995 - #ifdef CONFIG_CIFS_SMB_DIRECT 4996 - /* For RDMA read, I/O size is in RemainingBytes not in Length */ 4997 - if (!wdata->mr) 4998 - req->Length = cpu_to_le32(io_parms->length); 4999 - #else 5000 - req->Length = cpu_to_le32(io_parms->length); 5001 - #endif 4984 + cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 4985 + io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter)); 5002 4986 5003 4987 if (wdata->credits.value > 0) { 5004 4988 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
+1 -1
fs/smb/server/oplock.c
··· 1510 1510 * parse_lease_state() - parse lease context containted in file open request 1511 1511 * @open_req: buffer containing smb2 file open(create) request 1512 1512 * 1513 - * Return: oplock state, -ENOENT if create lease context not found 1513 + * Return: allocated lease context object on success, otherwise NULL 1514 1514 */ 1515 1515 struct lease_ctx_info *parse_lease_state(void *open_req) 1516 1516 {
+9 -9
fs/smb/server/smb2pdu.c
··· 519 519 * smb2_allocate_rsp_buf() - allocate smb2 response buffer 520 520 * @work: smb work containing smb request buffer 521 521 * 522 - * Return: 0 on success, otherwise -ENOMEM 522 + * Return: 0 on success, otherwise error 523 523 */ 524 524 int smb2_allocate_rsp_buf(struct ksmbd_work *work) 525 525 { ··· 2770 2770 } 2771 2771 } 2772 2772 2773 - if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || 2774 - req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) { 2773 + if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || 2774 + req_op_level == SMB2_OPLOCK_LEVEL_BATCH) { 2775 2775 dh_info->CreateGuid = 2776 2776 durable_v2_blob->CreateGuid; 2777 2777 dh_info->persistent = ··· 2791 2791 goto out; 2792 2792 } 2793 2793 2794 - if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || 2795 - req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) { 2794 + if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) || 2795 + req_op_level == SMB2_OPLOCK_LEVEL_BATCH) { 2796 2796 ksmbd_debug(SMB, "Request for durable open\n"); 2797 2797 dh_info->type = dh_idx; 2798 2798 } ··· 3096 3096 goto err_out; 3097 3097 } 3098 3098 3099 - file_present = true; 3100 3099 idmap = mnt_idmap(path.mnt); 3101 3100 } else { 3102 3101 if (rc != -ENOENT) ··· 3413 3414 goto err_out1; 3414 3415 } 3415 3416 } else { 3416 - if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) { 3417 + if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) { 3417 3418 if (S_ISDIR(file_inode(filp)->i_mode)) { 3418 3419 lc->req_state &= ~SMB2_LEASE_WRITE_CACHING_LE; 3419 3420 lc->is_dir = true; ··· 3712 3713 kfree(name); 3713 3714 kfree(lc); 3714 3715 3715 - return 0; 3716 + return rc; 3716 3717 } 3717 3718 3718 3719 static int readdir_info_level_struct_sz(int info_level) ··· 4408 4409 rsp->OutputBufferLength = cpu_to_le32(0); 4409 4410 rsp->Buffer[0] = 0; 4410 4411 rc = ksmbd_iov_pin_rsp(work, (void *)rsp, 4411 - sizeof(struct smb2_query_directory_rsp)); 4412 + offsetof(struct smb2_query_directory_rsp, Buffer) 4413 + + 1); 4412 4414 if (rc) 4413 4415 goto err_out; 4414 4416 } else {
+2 -2
fs/super.c
··· 1802 1802 return error; 1803 1803 1804 1804 if (!fc->root) { 1805 - pr_err("Filesystem %s get_tree() didn't set fc->root\n", 1806 - fc->fs_type->name); 1805 + pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n", 1806 + fc->fs_type->name, error); 1807 1807 /* We don't know what the locking state of the superblock is - 1808 1808 * if there is a superblock. 1809 1809 */
+1
include/acpi/video.h
··· 50 50 acpi_backlight_native, 51 51 acpi_backlight_nvidia_wmi_ec, 52 52 acpi_backlight_apple_gmux, 53 + acpi_backlight_dell_uart, 53 54 }; 54 55 55 56 #if IS_ENABLED(CONFIG_ACPI_VIDEO)
+1 -6
include/linux/blkdev.h
··· 1296 1296 1297 1297 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1298 1298 { 1299 - struct request_queue *q = bdev_get_queue(bdev); 1300 - 1301 - if (q) 1302 - return q->limits.max_write_zeroes_sectors; 1303 - 1304 - return 0; 1299 + return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors; 1305 1300 } 1306 1301 1307 1302 static inline bool bdev_nonrot(struct block_device *bdev)
+1
include/linux/fs.h
··· 210 210 #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ 211 211 #define ATTR_TIMES_SET (1 << 16) 212 212 #define ATTR_TOUCH (1 << 17) 213 + #define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */ 213 214 214 215 /* 215 216 * Whiteout is represented by a char device. The following constants define the
+1 -1
include/net/bonding.h
··· 260 260 #ifdef CONFIG_XFRM_OFFLOAD 261 261 struct list_head ipsec_list; 262 262 /* protecting ipsec_list */ 263 - spinlock_t ipsec_lock; 263 + struct mutex ipsec_lock; 264 264 #endif /* CONFIG_XFRM_OFFLOAD */ 265 265 struct bpf_prog *xdp_prog; 266 266 };
+1 -1
include/net/busy_poll.h
··· 68 68 static inline unsigned long busy_loop_current_time(void) 69 69 { 70 70 #ifdef CONFIG_NET_RX_BUSY_POLL 71 - return (unsigned long)(local_clock() >> 10); 71 + return (unsigned long)(ktime_get_ns() >> 10); 72 72 #else 73 73 return 0; 74 74 #endif
+6 -4
include/net/netfilter/nf_tables_ipv4.h
··· 19 19 static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) 20 20 { 21 21 struct iphdr *iph, _iph; 22 - u32 len, thoff; 22 + u32 len, thoff, skb_len; 23 23 24 24 iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), 25 25 sizeof(*iph), &_iph); ··· 30 30 return -1; 31 31 32 32 len = iph_totlen(pkt->skb, iph); 33 - thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4); 34 - if (pkt->skb->len < len) 33 + thoff = iph->ihl * 4; 34 + skb_len = pkt->skb->len - skb_network_offset(pkt->skb); 35 + 36 + if (skb_len < len) 35 37 return -1; 36 38 else if (len < thoff) 37 39 return -1; ··· 42 40 43 41 pkt->flags = NFT_PKTINFO_L4PROTO; 44 42 pkt->tprot = iph->protocol; 45 - pkt->thoff = thoff; 43 + pkt->thoff = skb_network_offset(pkt->skb) + thoff; 46 44 pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; 47 45 48 46 return 0;
+3 -2
include/net/netfilter/nf_tables_ipv6.h
··· 31 31 struct ipv6hdr *ip6h, _ip6h; 32 32 unsigned int thoff = 0; 33 33 unsigned short frag_off; 34 + u32 pkt_len, skb_len; 34 35 int protohdr; 35 - u32 pkt_len; 36 36 37 37 ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), 38 38 sizeof(*ip6h), &_ip6h); ··· 43 43 return -1; 44 44 45 45 pkt_len = ntohs(ip6h->payload_len); 46 - if (pkt_len + sizeof(*ip6h) > pkt->skb->len) 46 + skb_len = pkt->skb->len - skb_network_offset(pkt->skb); 47 + if (pkt_len + sizeof(*ip6h) > skb_len) 47 48 return -1; 48 49 49 50 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+1 -1
include/scsi/scsi_cmnd.h
··· 234 234 235 235 static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd) 236 236 { 237 - unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT; 237 + unsigned int shift = ilog2(scmd->device->sector_size); 238 238 239 239 return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift; 240 240 }
+36
include/trace/events/rpcrdma.h
··· 2277 2277 DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_wait_on); 2278 2278 DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one_done); 2279 2279 2280 + DECLARE_EVENT_CLASS(rpcrdma_client_register_class, 2281 + TP_PROTO( 2282 + const struct ib_device *device, 2283 + const struct rpcrdma_notification *rn 2284 + ), 2285 + 2286 + TP_ARGS(device, rn), 2287 + 2288 + TP_STRUCT__entry( 2289 + __string(name, device->name) 2290 + __field(void *, callback) 2291 + __field(u32, index) 2292 + ), 2293 + 2294 + TP_fast_assign( 2295 + __assign_str(name); 2296 + __entry->callback = rn->rn_done; 2297 + __entry->index = rn->rn_index; 2298 + ), 2299 + 2300 + TP_printk("device=%s index=%u done callback=%pS\n", 2301 + __get_str(name), __entry->index, __entry->callback 2302 + ) 2303 + ); 2304 + 2305 + #define DEFINE_CLIENT_REGISTER_EVENT(name) \ 2306 + DEFINE_EVENT(rpcrdma_client_register_class, name, \ 2307 + TP_PROTO( \ 2308 + const struct ib_device *device, \ 2309 + const struct rpcrdma_notification *rn \ 2310 + ), \ 2311 + TP_ARGS(device, rn)) 2312 + 2313 + DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_register); 2314 + DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_unregister); 2315 + 2280 2316 #endif /* _TRACE_RPCRDMA_H */ 2281 2317 2282 2318 #include <trace/define_trace.h>
+4 -4
include/uapi/drm/xe_drm.h
··· 1590 1590 * b. Counter select c. Counter size and d. BC report. Also refer to the 1591 1591 * oa_formats array in drivers/gpu/drm/xe/xe_oa.c. 1592 1592 */ 1593 - #define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xff << 0) 1594 - #define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xff << 8) 1595 - #define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xff << 16) 1596 - #define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xff << 24) 1593 + #define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0) 1594 + #define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8) 1595 + #define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16) 1596 + #define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24) 1597 1597 1598 1598 /** 1599 1599 * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit
+8
include/ufs/ufshcd.h
··· 676 676 * the standard best practice for managing keys). 677 677 */ 678 678 UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24, 679 + 680 + /* 681 + * This quirk indicates that the controller reports the value 1 (not 682 + * supported) in the Legacy Single DoorBell Support (LSDBS) bit of the 683 + * Controller Capabilities register although it supports the legacy 684 + * single doorbell mode. 685 + */ 686 + UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25, 679 687 }; 680 688 681 689 enum ufshcd_caps {
+6 -3
io_uring/kbuf.c
··· 218 218 219 219 buf = io_ring_head_to_buf(br, head, bl->mask); 220 220 if (arg->max_len) { 221 - int needed; 221 + u32 len = READ_ONCE(buf->len); 222 + size_t needed; 222 223 223 - needed = (arg->max_len + buf->len - 1) / buf->len; 224 - needed = min(needed, PEEK_MAX_IMPORT); 224 + if (unlikely(!len)) 225 + return -ENOBUFS; 226 + needed = (arg->max_len + len - 1) / len; 227 + needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT); 225 228 if (nr_avail > needed) 226 229 nr_avail = needed; 227 230 }
+21 -17
kernel/cgroup/cpuset.c
··· 233 233 static struct list_head remote_children; 234 234 235 235 /* 236 + * A flag to force sched domain rebuild at the end of an operation while 237 + * inhibiting it in the intermediate stages when set. Currently it is only 238 + * set in hotplug code. 239 + */ 240 + static bool force_sd_rebuild; 241 + 242 + /* 236 243 * Partition root states: 237 244 * 238 245 * 0 - member (not a partition root) ··· 1482 1475 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1483 1476 } 1484 1477 1485 - if (rebuild_domains) 1478 + if (rebuild_domains && !force_sd_rebuild) 1486 1479 rebuild_sched_domains_locked(); 1487 1480 } 1488 1481 ··· 1840 1833 remote_partition_disable(child, tmp); 1841 1834 disable_cnt++; 1842 1835 } 1843 - if (disable_cnt) 1836 + if (disable_cnt && !force_sd_rebuild) 1844 1837 rebuild_sched_domains_locked(); 1845 1838 } 1846 1839 ··· 1998 1991 part_error = PERR_CPUSEMPTY; 1999 1992 goto write_error; 2000 1993 } 1994 + /* Check newmask again, whether cpus are available for parent/cs */ 1995 + nocpu |= tasks_nocpu_error(parent, cs, newmask); 2001 1996 2002 1997 /* 2003 1998 * partcmd_update with newmask: ··· 2449 2440 } 2450 2441 rcu_read_unlock(); 2451 2442 2452 - if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD)) 2443 + if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) && 2444 + !force_sd_rebuild) 2453 2445 rebuild_sched_domains_locked(); 2454 2446 } 2455 2447 ··· 2533 2523 */ 2534 2524 if (!*buf) { 2535 2525 cpumask_clear(trialcs->cpus_allowed); 2536 - cpumask_clear(trialcs->effective_xcpus); 2526 + if (cpumask_empty(trialcs->exclusive_cpus)) 2527 + cpumask_clear(trialcs->effective_xcpus); 2537 2528 } else { 2538 2529 retval = cpulist_parse(buf, trialcs->cpus_allowed); 2539 2530 if (retval < 0) ··· 3112 3101 cs->flags = trialcs->flags; 3113 3102 spin_unlock_irq(&callback_lock); 3114 3103 3115 - if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 3104 + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed && 3105 + !force_sd_rebuild) 3116 3106 rebuild_sched_domains_locked(); 3117 3107 3118 3108 if (spread_flag_changed) ··· 4510 4498 update_tasks_nodemask(cs); 4511 4499 } 4512 4500 4513 - static bool force_rebuild; 4514 - 4515 4501 void cpuset_force_rebuild(void) 4516 4502 { 4517 - force_rebuild = true; 4503 + force_sd_rebuild = true; 4518 4504 } 4519 4505 4520 4506 /** ··· 4660 4650 !cpumask_empty(subpartitions_cpus); 4661 4651 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 4662 4652 4663 - /* 4664 - * In the rare case that hotplug removes all the cpus in 4665 - * subpartitions_cpus, we assumed that cpus are updated. 4666 - */ 4667 - if (!cpus_updated && !cpumask_empty(subpartitions_cpus)) 4668 - cpus_updated = true; 4669 - 4670 4653 /* For v1, synchronize cpus_allowed to cpu_active_mask */ 4671 4654 if (cpus_updated) { 4655 + cpuset_force_rebuild(); 4672 4656 spin_lock_irq(&callback_lock); 4673 4657 if (!on_dfl) 4674 4658 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); ··· 4718 4714 } 4719 4715 4720 4716 /* rebuild sched domains if cpus_allowed has changed */ 4721 - if (cpus_updated || force_rebuild) { 4722 - force_rebuild = false; 4717 + if (force_sd_rebuild) { 4718 + force_sd_rebuild = false; 4723 4719 rebuild_sched_domains_cpuslocked(); 4724 4720 } 4725 4721
+3 -22
kernel/fork.c
··· 2053 2053 */ 2054 2054 int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) 2055 2055 { 2056 - if (!pid) 2056 + bool thread = flags & PIDFD_THREAD; 2057 + 2058 + if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID)) 2057 2059 return -EINVAL; 2058 - 2059 - scoped_guard(rcu) { 2060 - struct task_struct *tsk; 2061 - 2062 - if (flags & PIDFD_THREAD) 2063 - tsk = pid_task(pid, PIDTYPE_PID); 2064 - else 2065 - tsk = pid_task(pid, PIDTYPE_TGID); 2066 - if (!tsk) 2067 - return -EINVAL; 2068 - 2069 - /* Don't create pidfds for kernel threads for now. */ 2070 - if (tsk->flags & PF_KTHREAD) 2071 - return -EINVAL; 2072 - } 2073 2060 2074 2061 return __pidfd_prepare(pid, flags, ret); 2075 2062 } ··· 2402 2415 */ 2403 2416 if (clone_flags & CLONE_PIDFD) { 2404 2417 int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0; 2405 - 2406 - /* Don't create pidfds for kernel threads for now. */ 2407 - if (args->kthread) { 2408 - retval = -EINVAL; 2409 - goto bad_fork_free_pid; 2410 - } 2411 2418 2412 2419 /* Note that no task has been attached to @pid yet. */ 2413 2420 retval = __pidfd_prepare(pid, flags, &pidfile);
+27 -23
kernel/workqueue.c
··· 377 377 378 378 /* hot fields used during command issue, aligned to cacheline */ 379 379 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 380 - struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 380 + struct pool_workqueue __rcu * __percpu *cpu_pwq; /* I: per-cpu pwqs */ 381 381 struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */ 382 382 }; 383 383 ··· 897 897 898 898 static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits) 899 899 { 900 - return (v >> shift) & ((1 << bits) - 1); 900 + return (v >> shift) & ((1U << bits) - 1); 901 901 } 902 902 903 903 static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data) ··· 3351 3351 set_pf_worker(false); 3352 3352 3353 3353 ida_free(&pool->worker_ida, worker->id); 3354 - WARN_ON_ONCE(!list_empty(&worker->entry)); 3355 3354 return 0; 3356 3355 } 3357 3356 ··· 4166 4167 static bool __flush_work(struct work_struct *work, bool from_cancel) 4167 4168 { 4168 4169 struct wq_barrier barr; 4169 - unsigned long data; 4170 4170 4171 4171 if (WARN_ON(!wq_online)) 4172 4172 return false; ··· 4183 4185 * was queued on a BH workqueue, we also know that it was running in the 4184 4186 * BH context and thus can be busy-waited. 4185 4187 */ 4186 - data = *work_data_bits(work); 4187 - if (from_cancel && 4188 - !WARN_ON_ONCE(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_BH)) { 4189 - /* 4190 - * On RT, prevent a live lock when %current preempted soft 4191 - * interrupt processing or prevents ksoftirqd from running by 4192 - * keeping flipping BH. If the BH work item runs on a different 4193 - * CPU then this has no effect other than doing the BH 4194 - * disable/enable dance for nothing. This is copied from 4195 - * kernel/softirq.c::tasklet_unlock_spin_wait(). 4196 - */ 4197 - while (!try_wait_for_completion(&barr.done)) { 4198 - if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 4199 - local_bh_disable(); 4200 - local_bh_enable(); 4201 - } else { 4202 - cpu_relax(); 4188 + if (from_cancel) { 4189 + unsigned long data = *work_data_bits(work); 4190 + 4191 + if (!WARN_ON_ONCE(data & WORK_STRUCT_PWQ) && 4192 + (data & WORK_OFFQ_BH)) { 4193 + /* 4194 + * On RT, prevent a live lock when %current preempted 4195 + * soft interrupt processing or prevents ksoftirqd from 4196 + * running by keeping flipping BH. If the BH work item 4197 + * runs on a different CPU then this has no effect other 4198 + * than doing the BH disable/enable dance for nothing. 4199 + * This is copied from 4200 + * kernel/softirq.c::tasklet_unlock_spin_wait(). 4201 + */ 4202 + while (!try_wait_for_completion(&barr.done)) { 4203 + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 4204 + local_bh_disable(); 4205 + local_bh_enable(); 4206 + } else { 4207 + cpu_relax(); 4208 + } 4203 4209 } 4210 + goto out_destroy; 4204 4211 } 4205 - } else { 4206 - wait_for_completion(&barr.done); 4207 4212 } 4208 4213 4214 + wait_for_completion(&barr.done); 4215 + 4216 + out_destroy: 4209 4217 destroy_work_on_stack(&barr.work); 4210 4218 return true; 4211 4219 }
+4
lib/vdso/getrandom.c
··· 85 85 if (unlikely(((unsigned long)opaque_state & ~PAGE_MASK) + sizeof(*state) > PAGE_SIZE)) 86 86 return -EFAULT; 87 87 88 + /* Handle unexpected flags by falling back to the kernel. */ 89 + if (unlikely(flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))) 90 + goto fallback_syscall; 91 + 88 92 /* If the caller passes the wrong size, which might happen due to CRIU, fallback. */ 89 93 if (unlikely(opaque_len != sizeof(*state))) 90 94 goto fallback_syscall;
+2 -2
mm/truncate.c
··· 157 157 if (folio_mapped(folio)) 158 158 unmap_mapping_folio(folio); 159 159 160 - if (folio_has_private(folio)) 160 + if (folio_needs_release(folio)) 161 161 folio_invalidate(folio, 0, folio_size(folio)); 162 162 163 163 /* ··· 219 219 if (!mapping_inaccessible(folio->mapping)) 220 220 folio_zero_range(folio, offset, length); 221 221 222 - if (folio_has_private(folio)) 222 + if (folio_needs_release(folio)) 223 223 folio_invalidate(folio, offset, length); 224 224 if (!folio_test_large(folio)) 225 225 return true;
+8 -2
net/bluetooth/hci_core.c
··· 2406 2406 /* To avoid a potential race with hci_unregister_dev. */ 2407 2407 hci_dev_hold(hdev); 2408 2408 2409 - if (action == PM_SUSPEND_PREPARE) 2409 + switch (action) { 2410 + case PM_HIBERNATION_PREPARE: 2411 + case PM_SUSPEND_PREPARE: 2410 2412 ret = hci_suspend_dev(hdev); 2411 - else if (action == PM_POST_SUSPEND) 2413 + break; 2414 + case PM_POST_HIBERNATION: 2415 + case PM_POST_SUSPEND: 2412 2416 ret = hci_resume_dev(hdev); 2417 + break; 2418 + } 2413 2419 2414 2420 if (ret) 2415 2421 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
+1 -1
net/core/net-sysfs.c
··· 235 235 if (!rtnl_trylock()) 236 236 return restart_syscall(); 237 237 238 - if (netif_running(netdev) && netif_device_present(netdev)) { 238 + if (netif_running(netdev)) { 239 239 struct ethtool_link_ksettings cmd; 240 240 241 241 if (!__ethtool_get_link_ksettings(netdev, &cmd))
+3 -1
net/core/pktgen.c
··· 3654 3654 struct pktgen_dev *pkt_dev = NULL; 3655 3655 int cpu = t->cpu; 3656 3656 3657 - WARN_ON(smp_processor_id() != cpu); 3657 + WARN_ON_ONCE(smp_processor_id() != cpu); 3658 3658 3659 3659 init_waitqueue_head(&t->queue); 3660 3660 complete(&t->start_done); ··· 3989 3989 goto remove; 3990 3990 } 3991 3991 3992 + cpus_read_lock(); 3992 3993 for_each_online_cpu(cpu) { 3993 3994 int err; 3994 3995 ··· 3998 3997 pr_warn("Cannot create thread for cpu %d (%d)\n", 3999 3998 cpu, err); 4000 3999 } 4000 + cpus_read_unlock(); 4001 4001 4002 4002 if (list_empty(&pn->pktgen_threads)) { 4003 4003 pr_err("Initialization failed for all threads\n");
+3
net/ethtool/ioctl.c
··· 442 442 if (!dev->ethtool_ops->get_link_ksettings) 443 443 return -EOPNOTSUPP; 444 444 445 + if (!netif_device_present(dev)) 446 + return -ENODEV; 447 + 445 448 memset(link_ksettings, 0, sizeof(*link_ksettings)); 446 449 return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); 447 450 }
+11 -7
net/ipv4/tcp.c
··· 4640 4640 /* Don't race with userspace socket closes such as tcp_close. */ 4641 4641 lock_sock(sk); 4642 4642 4643 + /* Avoid closing the same socket twice. */ 4644 + if (sk->sk_state == TCP_CLOSE) { 4645 + if (!has_current_bpf_ctx()) 4646 + release_sock(sk); 4647 + return -ENOENT; 4648 + } 4649 + 4643 4650 if (sk->sk_state == TCP_LISTEN) { 4644 4651 tcp_set_state(sk, TCP_CLOSE); 4645 4652 inet_csk_listen_stop(sk); ··· 4656 4649 local_bh_disable(); 4657 4650 bh_lock_sock(sk); 4658 4651 4659 - if (!sock_flag(sk, SOCK_DEAD)) { 4660 - if (tcp_need_reset(sk->sk_state)) 4661 - tcp_send_active_reset(sk, GFP_ATOMIC, 4662 - SK_RST_REASON_TCP_STATE); 4663 - tcp_done_with_error(sk, err); 4664 - } 4652 + if (tcp_need_reset(sk->sk_state)) 4653 + tcp_send_active_reset(sk, GFP_ATOMIC, 4654 + SK_RST_REASON_TCP_STATE); 4655 + tcp_done_with_error(sk, err); 4665 4656 4666 4657 bh_unlock_sock(sk); 4667 4658 local_bh_enable(); 4668 - tcp_write_queue_purge(sk); 4669 4659 if (!has_current_bpf_ctx()) 4670 4660 release_sock(sk); 4671 4661 return 0;
+1 -1
net/mac80211/mlme.c
··· 6664 6664 return true; 6665 6665 6666 6666 /* hidden SSID: zeroed out */ 6667 - if (memcmp(elems->ssid, zero_ssid, elems->ssid_len)) 6667 + if (!memcmp(elems->ssid, zero_ssid, elems->ssid_len)) 6668 6668 return false; 6669 6669 6670 6670 return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len);
+3 -1
net/mac80211/tx.c
··· 5348 5348 if (beacon->tail) 5349 5349 skb_put_data(skb, beacon->tail, beacon->tail_len); 5350 5350 5351 - if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) 5351 + if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) { 5352 + dev_kfree_skb(skb); 5352 5353 return NULL; 5354 + } 5353 5355 5354 5356 ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, 5355 5357 chanctx_conf, csa_off_base);
+2 -2
net/mptcp/fastopen.c
··· 68 68 skb = skb_peek_tail(&sk->sk_receive_queue); 69 69 if (skb) { 70 70 WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq); 71 - pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk, 71 + pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk, 72 72 MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq, 73 73 MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq); 74 74 MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq; 75 75 MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq; 76 76 } 77 77 78 - pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq); 78 + pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq); 79 79 }
+25 -25
net/mptcp/options.c
··· 117 117 mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; 118 118 ptr += 2; 119 119 } 120 - pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u", 120 + pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n", 121 121 version, flags, opsize, mp_opt->sndr_key, 122 122 mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum); 123 123 break; ··· 131 131 ptr += 4; 132 132 mp_opt->nonce = get_unaligned_be32(ptr); 133 133 ptr += 4; 134 - pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u", 134 + pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n", 135 135 mp_opt->backup, mp_opt->join_id, 136 136 mp_opt->token, mp_opt->nonce); 137 137 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) { ··· 142 142 ptr += 8; 143 143 mp_opt->nonce = get_unaligned_be32(ptr); 144 144 ptr += 4; 145 - pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u", 145 + pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n", 146 146 mp_opt->backup, mp_opt->join_id, 147 147 mp_opt->thmac, mp_opt->nonce); 148 148 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) { 149 149 mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK; 150 150 ptr += 2; 151 151 memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); 152 - pr_debug("MP_JOIN hmac"); 152 + pr_debug("MP_JOIN hmac\n"); 153 153 } 154 154 break; 155 155 156 156 case MPTCPOPT_DSS: 157 - pr_debug("DSS"); 157 + pr_debug("DSS\n"); 158 158 ptr++; 159 159 160 160 /* we must clear 'mpc_map' be able to detect MP_CAPABLE ··· 169 169 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0; 170 170 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK); 171 171 172 - pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d", 172 + pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n", 173 173 mp_opt->data_fin, mp_opt->dsn64, 174 174 mp_opt->use_map, mp_opt->ack64, 175 175 mp_opt->use_ack); ··· 207 207 ptr += 4; 208 208 } 209 209 210 - pr_debug("data_ack=%llu", mp_opt->data_ack); 210 + pr_debug("data_ack=%llu\n", mp_opt->data_ack); 211 211 } 212 212 213 213 if (mp_opt->use_map) { ··· 231 231 ptr += 2; 232 232 } 233 233 234 - pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", 234 + pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n", 235 235 mp_opt->data_seq, mp_opt->subflow_seq, 236 236 mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD), 237 237 mp_opt->csum); ··· 293 293 mp_opt->ahmac = get_unaligned_be64(ptr); 294 294 ptr += 8; 295 295 } 296 - pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d", 296 + pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n", 297 297 (mp_opt->addr.family == AF_INET6) ? "6" : "", 298 298 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port)); 299 299 break; ··· 309 309 mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE; 310 310 for (i = 0; i < mp_opt->rm_list.nr; i++) 311 311 mp_opt->rm_list.ids[i] = *ptr++; 312 - pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr); 312 + pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr); 313 313 break; 314 314 315 315 case MPTCPOPT_MP_PRIO: ··· 318 318 319 319 mp_opt->suboptions |= OPTION_MPTCP_PRIO; 320 320 mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP; 321 - pr_debug("MP_PRIO: prio=%d", mp_opt->backup); 321 + pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup); 322 322 break; 323 323 324 324 case MPTCPOPT_MP_FASTCLOSE: ··· 329 329 mp_opt->rcvr_key = get_unaligned_be64(ptr); 330 330 ptr += 8; 331 331 mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE; 332 - pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key); 332 + pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key); 333 333 break; 334 334 335 335 case MPTCPOPT_RST: ··· 343 343 flags = *ptr++; 344 344 mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT; 345 345 mp_opt->reset_reason = *ptr; 346 - pr_debug("MP_RST: transient=%u reason=%u", 346 + pr_debug("MP_RST: transient=%u reason=%u\n", 347 347 mp_opt->reset_transient, mp_opt->reset_reason); 348 348 break; 349 349 ··· 354 354 ptr += 2; 355 355 mp_opt->suboptions |= OPTION_MPTCP_FAIL; 356 356 mp_opt->fail_seq = get_unaligned_be64(ptr); 357 - pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq); 357 + pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq); 358 358 break; 359 359 360 360 default: ··· 417 417 *size = TCPOLEN_MPTCP_MPC_SYN; 418 418 return true; 419 419 } else if (subflow->request_join) { 420 - pr_debug("remote_token=%u, nonce=%u", subflow->remote_token, 420 + pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token, 421 421 subflow->local_nonce); 422 422 opts->suboptions = OPTION_MPTCP_MPJ_SYN; 423 423 opts->join_id = subflow->local_id; ··· 500 500 *size = TCPOLEN_MPTCP_MPC_ACK; 501 501 } 502 502 503 - pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d", 503 + pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n", 504 504 subflow, subflow->local_key, subflow->remote_key, 505 505 data_len); 506 506 ··· 509 509 opts->suboptions = OPTION_MPTCP_MPJ_ACK; 510 510 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN); 511 511 *size = TCPOLEN_MPTCP_MPJ_ACK; 512 - pr_debug("subflow=%p", subflow); 512 + pr_debug("subflow=%p\n", subflow); 513 513 514 514 /* we can use the full delegate action helper only from BH context 515 515 * If we are in process context - sk is flushing the backlog at ··· 675 675 676 676 *size = len; 677 677 if (drop_other_suboptions) { 678 - pr_debug("drop other suboptions"); 678 + pr_debug("drop other suboptions\n"); 679 679 opts->suboptions = 0; 680 680 681 681 /* note that e.g. DSS could have written into the memory ··· 695 695 } else { 696 696 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX); 697 697 } 698 - pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d", 698 + pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n", 699 699 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port)); 700 700 701 701 return true; ··· 726 726 opts->rm_list = rm_list; 727 727 728 728 for (i = 0; i < opts->rm_list.nr; i++) 729 - pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]); 729 + pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]); 730 730 MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr); 731 731 return true; 732 732 } ··· 752 752 opts->suboptions |= OPTION_MPTCP_PRIO; 753 753 opts->backup = subflow->request_bkup; 754 754 755 - pr_debug("prio=%d", opts->backup); 755 + pr_debug("prio=%d\n", opts->backup); 756 756 757 757 return true; 758 758 } ··· 794 794 opts->suboptions |= OPTION_MPTCP_FASTCLOSE; 795 795 opts->rcvr_key = READ_ONCE(msk->remote_key); 796 796 797 - pr_debug("FASTCLOSE key=%llu", opts->rcvr_key); 797 + pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key); 798 798 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX); 799 799 return true; 800 800 } ··· 816 816 opts->suboptions |= OPTION_MPTCP_FAIL; 817 817 opts->fail_seq = subflow->map_seq; 818 818 819 - pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq); 819 + pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq); 820 820 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX); 821 821 822 822 return true; ··· 904 904 opts->csum_reqd = subflow_req->csum_reqd; 905 905 opts->allow_join_id0 = subflow_req->allow_join_id0; 906 906 *size = TCPOLEN_MPTCP_MPC_SYNACK; 907 - pr_debug("subflow_req=%p, local_key=%llu", 907 + pr_debug("subflow_req=%p, local_key=%llu\n", 908 908 subflow_req, subflow_req->local_key); 909 909 return true; 910 910 } else if (subflow_req->mp_join) { ··· 913 913 opts->join_id = subflow_req->local_id; 914 914 opts->thmac = subflow_req->thmac; 915 915 opts->nonce = subflow_req->local_nonce; 916 - pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u", 916 + pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n", 917 917 subflow_req, opts->backup, opts->join_id, 918 918 opts->thmac, opts->nonce); 919 919 *size = TCPOLEN_MPTCP_MPJ_SYNACK;
+17 -15
net/mptcp/pm.c
··· 19 19 { 20 20 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 21 21 22 - pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); 22 + pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo); 23 23 24 24 lockdep_assert_held(&msk->pm.lock); 25 25 ··· 45 45 { 46 46 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 47 47 48 - pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 48 + pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr); 49 49 50 50 if (rm_addr) { 51 51 MPTCP_ADD_STATS(sock_net((struct sock *)msk), ··· 66 66 { 67 67 struct mptcp_pm_data *pm = &msk->pm; 68 68 69 - pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side); 69 + pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side); 70 70 71 71 WRITE_ONCE(pm->server_side, server_side); 72 72 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); ··· 90 90 91 91 subflows_max = mptcp_pm_get_subflows_max(msk); 92 92 93 - pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 93 + pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows, 94 94 subflows_max, READ_ONCE(pm->accept_subflow)); 95 95 96 96 /* try to avoid acquiring the lock below */ ··· 114 114 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 115 115 enum mptcp_pm_status new_status) 116 116 { 117 - pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 117 + pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status, 118 118 BIT(new_status)); 119 119 if (msk->pm.status & BIT(new_status)) 120 120 return false; ··· 129 129 struct mptcp_pm_data *pm = &msk->pm; 130 130 bool announce = false; 131 131 132 - pr_debug("msk=%p", msk); 132 + pr_debug("msk=%p\n", msk); 133 133 134 134 spin_lock_bh(&pm->lock); 135 135 ··· 153 153 154 154 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 155 155 { 156 - pr_debug("msk=%p", msk); 156 + pr_debug("msk=%p\n", msk); 157 157 } 158 158 159 159 void mptcp_pm_subflow_established(struct mptcp_sock *msk) 160 160 { 161 161 struct mptcp_pm_data *pm = &msk->pm; 162 162 163 - pr_debug("msk=%p", msk); 163 + pr_debug("msk=%p\n", msk); 164 164 165 165 if (!READ_ONCE(pm->work_pending)) 166 166 return; ··· 212 212 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 213 213 struct mptcp_pm_data *pm = &msk->pm; 214 214 215 - pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 215 + pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id, 216 216 READ_ONCE(pm->accept_addr)); 217 217 218 218 mptcp_event_addr_announced(ssk, addr); ··· 226 226 } else { 227 227 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); 228 228 } 229 - } else if (!READ_ONCE(pm->accept_addr)) { 229 + /* id0 should not have a different address */ 230 + } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) || 231 + (addr->id > 0 && !READ_ONCE(pm->accept_addr))) { 230 232 mptcp_pm_announce_addr(msk, addr, true); 231 233 mptcp_pm_add_addr_send_ack(msk); 232 234 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { ··· 245 243 { 246 244 struct mptcp_pm_data *pm = &msk->pm; 247 245 248 - pr_debug("msk=%p", msk); 246 + pr_debug("msk=%p\n", msk); 249 247 250 248 spin_lock_bh(&pm->lock); 251 249 ··· 269 267 struct mptcp_pm_data *pm = &msk->pm; 270 268 u8 i; 271 269 272 - pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); 270 + pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr); 273 271 274 272 for (i = 0; i < rm_list->nr; i++) 275 273 mptcp_event_addr_removed(msk, rm_list->ids[i]); ··· 301 299 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 302 300 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 303 301 304 - pr_debug("fail_seq=%llu", fail_seq); 302 + pr_debug("fail_seq=%llu\n", fail_seq); 305 303 306 304 if (!READ_ONCE(msk->allow_infinite_fallback)) 307 305 return; 308 306 309 307 if (!subflow->fail_tout) { 310 - pr_debug("send MP_FAIL response and infinite map"); 308 + pr_debug("send MP_FAIL response and infinite map\n"); 311 309 312 310 subflow->send_mp_fail = 1; 313 311 subflow->send_infinite_map = 1; 314 312 tcp_send_ack(sk); 315 313 } else { 316 - pr_debug("MP_FAIL response received"); 314 + pr_debug("MP_FAIL response received\n"); 317 315 WRITE_ONCE(subflow->fail_tout, 0); 318 316 } 319 317 }
+74 -33
net/mptcp/pm_netlink.c
··· 130 130 { 131 131 struct mptcp_subflow_context *subflow; 132 132 struct mptcp_addr_info cur; 133 - struct sock_common *skc; 134 133 135 134 list_for_each_entry(subflow, list, node) { 136 - skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); 135 + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 137 136 138 - remote_address(skc, &cur); 137 + if (!((1 << inet_sk_state_load(ssk)) & 138 + (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) 139 + continue; 140 + 141 + remote_address((struct sock_common *)ssk, &cur); 139 142 if (mptcp_addresses_equal(&cur, daddr, daddr->port)) 140 143 return true; 141 144 } ··· 290 287 struct mptcp_sock *msk = entry->sock; 291 288 struct sock *sk = (struct sock *)msk; 292 289 293 - pr_debug("msk=%p", msk); 290 + pr_debug("msk=%p\n", msk); 294 291 295 292 if (!msk) 296 293 return; ··· 309 306 spin_lock_bh(&msk->pm.lock); 310 307 311 308 if (!mptcp_pm_should_add_signal_addr(msk)) { 312 - pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id); 309 + pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); 313 310 mptcp_pm_announce_addr(msk, &entry->addr, false); 314 311 mptcp_pm_add_addr_send_ack(msk); 315 312 entry->retrans_times++; ··· 390 387 struct sock *sk = (struct sock *)msk; 391 388 LIST_HEAD(free_list); 392 389 393 - pr_debug("msk=%p", msk); 390 + pr_debug("msk=%p\n", msk); 394 391 395 392 spin_lock_bh(&msk->pm.lock); 396 393 list_splice_init(&msk->pm.anno_list, &free_list); ··· 476 473 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 477 474 bool slow; 478 475 479 - pr_debug("send ack for %s", 476 + pr_debug("send ack for %s\n", 480 477 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); 481 478 482 479 slow = lock_sock_fast(ssk); ··· 588 585 589 586 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 590 587 msk->pm.add_addr_signaled++; 588 + 589 + /* Special case for ID0: set the correct ID */ 590 + if (local.addr.id == msk->mpc_endpoint_id) 591 + local.addr.id = 0; 592 + 591 593 mptcp_pm_announce_addr(msk, &local.addr, false); 592 594 mptcp_pm_nl_addr_send_ack(msk); 593 595 ··· 615 607 616 608 fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); 617 609 618 - msk->pm.local_addr_used++; 619 610 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); 611 + 612 + /* Special case for ID0: set the correct ID */ 613 + if (local.addr.id == msk->mpc_endpoint_id) 614 + local.addr.id = 0; 615 + else /* local_addr_used is not decr for ID 0 */ 616 + msk->pm.local_addr_used++; 617 + 620 618 nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); 621 619 if (nr == 0) 622 620 continue; ··· 722 708 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); 723 709 subflows_max = mptcp_pm_get_subflows_max(msk); 724 710 725 - pr_debug("accepted %d:%d remote family %d", 711 + pr_debug("accepted %d:%d remote family %d\n", 726 712 msk->pm.add_addr_accepted, add_addr_accept_max, 727 713 msk->pm.remote.family); 728 714 ··· 751 737 spin_lock_bh(&msk->pm.lock); 752 738 753 739 if (sf_created) { 754 - msk->pm.add_addr_accepted++; 740 + /* add_addr_accepted is not decr for ID 0 */ 741 + if (remote.id) 742 + msk->pm.add_addr_accepted++; 755 743 if (msk->pm.add_addr_accepted >= add_addr_accept_max || 756 744 msk->pm.subflows >= subflows_max) 757 745 WRITE_ONCE(msk->pm.accept_addr, false); 758 746 } 747 + } 748 + 749 + bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, 750 + const struct mptcp_addr_info *remote) 751 + { 752 + struct mptcp_addr_info mpc_remote; 753 + 754 + remote_address((struct sock_common *)msk, &mpc_remote); 755 + return mptcp_addresses_equal(&mpc_remote, remote, remote->port); 759 756 } 760 757 761 758 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) ··· 780 755 !mptcp_pm_should_rm_signal(msk)) 781 756 return; 782 757 783 - subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node); 784 - if (subflow) 785 - mptcp_pm_send_ack(msk, subflow, false, false); 758 + mptcp_for_each_subflow(msk, subflow) { 759 + if (__mptcp_subflow_active(subflow)) { 760 + mptcp_pm_send_ack(msk, subflow, false, false); 761 + break; 762 + } 763 + } 786 764 } 787 765 788 766 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, ··· 795 767 { 796 768 struct mptcp_subflow_context *subflow; 797 769 798 - pr_debug("bkup=%d", bkup); 770 + pr_debug("bkup=%d\n", bkup); 799 771 800 772 mptcp_for_each_subflow(msk, subflow) { 801 773 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ··· 818 790 return -EINVAL; 819 791 } 820 792 821 - static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id) 822 - { 823 - return local_id == id || (!local_id && msk->mpc_endpoint_id == id); 824 - } 825 - 826 793 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, 827 794 const struct mptcp_rm_list *rm_list, 828 795 enum linux_mptcp_mib_field rm_type) ··· 826 803 struct sock *sk = (struct sock *)msk; 827 804 u8 i; 828 805 829 - pr_debug("%s rm_list_nr %d", 806 + pr_debug("%s rm_list_nr %d\n", 830 807 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); 831 808 832 809 msk_owned_by_me(msk); ··· 850 827 int how = RCV_SHUTDOWN | SEND_SHUTDOWN; 851 828 u8 id = subflow_get_local_id(subflow); 852 829 830 + if (inet_sk_state_load(ssk) == TCP_CLOSE) 831 + continue; 853 832 if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) 854 833 continue; 855 - if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id)) 834 + if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) 856 835 continue; 857 836 858 - pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u", 837 + pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", 859 838 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", 860 839 i, rm_id, id, remote_id, msk->mpc_endpoint_id); 861 840 spin_unlock_bh(&msk->pm.lock); ··· 914 889 915 890 spin_lock_bh(&msk->pm.lock); 916 891 917 - pr_debug("msk=%p status=%x", msk, pm->status); 892 + pr_debug("msk=%p status=%x\n", msk, pm->status); 918 893 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { 919 894 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); 920 895 mptcp_pm_nl_add_addr_received(msk); ··· 1332 1307 return pm_nl_get_pernet(genl_info_net(info)); 1333 1308 } 1334 1309 1335 - static int mptcp_nl_add_subflow_or_signal_addr(struct net *net) 1310 + static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, 1311 + struct mptcp_addr_info *addr) 1336 1312 { 1337 1313 struct mptcp_sock *msk; 1338 1314 long s_slot = 0, s_num = 0; 1339 1315 1340 1316 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1341 1317 struct sock *sk = (struct sock *)msk; 1318 + struct mptcp_addr_info mpc_addr; 1342 1319 1343 1320 if (!READ_ONCE(msk->fully_established) || 1344 1321 mptcp_pm_is_userspace(msk)) 1345 1322 goto next; 1346 1323 1324 + /* if the endp linked to the init sf is re-added with a != ID */ 1325 + mptcp_local_address((struct sock_common *)msk, &mpc_addr); 1326 + 1347 1327 lock_sock(sk); 1348 1328 spin_lock_bh(&msk->pm.lock); 1329 + if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) 1330 + msk->mpc_endpoint_id = addr->id; 1349 1331 mptcp_pm_create_subflow_or_signal_addr(msk); 1350 1332 spin_unlock_bh(&msk->pm.lock); 1351 1333 release_sock(sk); ··· 1425 1393 goto out_free; 1426 1394 } 1427 1395 1428 - mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk)); 1396 + mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); 1429 1397 return 0; 1430 1398 1431 1399 out_free: ··· 1470 1438 return false; 1471 1439 } 1472 1440 1441 + static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, 1442 + const struct mptcp_addr_info *addr) 1443 + { 1444 + return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; 1445 + } 1446 + 1473 1447 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, 1474 1448 const struct mptcp_addr_info *addr, 1475 1449 bool force) ··· 1483 1445 struct mptcp_rm_list list = { .nr = 0 }; 1484 1446 bool ret; 1485 1447 1486 - list.ids[list.nr++] = addr->id; 1448 + list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1487 1449 1488 1450 ret = remove_anno_list_by_saddr(msk, addr); 1489 1451 if (ret || force) { ··· 1510 1472 const struct mptcp_pm_addr_entry *entry) 1511 1473 { 1512 1474 const struct mptcp_addr_info *addr = &entry->addr; 1513 - struct mptcp_rm_list list = { .nr = 0 }; 1475 + struct mptcp_rm_list list = { .nr = 1 }; 1514 1476 long s_slot = 0, s_num = 0; 1515 1477 struct mptcp_sock *msk; 1516 1478 1517 - pr_debug("remove_id=%d", addr->id); 1518 - 1519 - list.ids[list.nr++] = addr->id; 1479 + pr_debug("remove_id=%d\n", addr->id); 1520 1480 1521 1481 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { 1522 1482 struct sock *sk = (struct sock *)msk; ··· 1533 1497 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && 1534 1498 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); 1535 1499 1500 + list.ids[0] = mptcp_endp_get_local_id(msk, addr); 1536 1501 if (remove_subflow) { 1537 1502 spin_lock_bh(&msk->pm.lock); 1538 1503 mptcp_pm_nl_rm_subflow_received(msk, &list); ··· 1546 1509 spin_unlock_bh(&msk->pm.lock); 1547 1510 } 1548 1511 1512 + if (msk->mpc_endpoint_id == entry->addr.id) 1513 + msk->mpc_endpoint_id = 0; 1549 1514 release_sock(sk); 1550 1515 1551 1516 next: ··· 1642 1603 return ret; 1643 1604 } 1644 1605 1606 + /* Called from the userspace PM only */ 1645 1607 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) 1646 1608 { 1647 1609 struct mptcp_rm_list alist = { .nr = 0 }; ··· 1671 1631 } 1672 1632 } 1673 1633 1634 + /* Called from the in-kernel PM only */ 1674 1635 static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, 1675 1636 struct list_head *rm_list) 1676 1637 { ··· 1681 1640 list_for_each_entry(entry, rm_list, list) { 1682 1641 if (slist.nr < MPTCP_RM_IDS_MAX && 1683 1642 lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) 1684 - slist.ids[slist.nr++] = entry->addr.id; 1643 + slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1685 1644 1686 1645 if (alist.nr < MPTCP_RM_IDS_MAX && 1687 1646 remove_anno_list_by_saddr(msk, &entry->addr)) 1688 - alist.ids[alist.nr++] = entry->addr.id; 1647 + alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); 1689 1648 } 1690 1649 1691 1650 spin_lock_bh(&msk->pm.lock); ··· 1982 1941 { 1983 1942 struct mptcp_rm_list list = { .nr = 0 }; 1984 1943 1985 - list.ids[list.nr++] = addr->id; 1944 + list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); 1986 1945 1987 1946 spin_lock_bh(&msk->pm.lock); 1988 1947 mptcp_pm_nl_rm_subflow_received(msk, &list);
+37 -28
net/mptcp/protocol.c
··· 139 139 !skb_try_coalesce(to, from, &fragstolen, &delta)) 140 140 return false; 141 141 142 - pr_debug("colesced seq %llx into %llx new len %d new end seq %llx", 142 + pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n", 143 143 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, 144 144 to->len, MPTCP_SKB_CB(from)->end_seq); 145 145 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; ··· 217 217 end_seq = MPTCP_SKB_CB(skb)->end_seq; 218 218 max_seq = atomic64_read(&msk->rcv_wnd_sent); 219 219 220 - pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq, 220 + pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, 221 221 RB_EMPTY_ROOT(&msk->out_of_order_queue)); 222 222 if (after64(end_seq, max_seq)) { 223 223 /* out of window */ ··· 643 643 } 644 644 } 645 645 646 - pr_debug("msk=%p ssk=%p", msk, ssk); 646 + pr_debug("msk=%p ssk=%p\n", msk, ssk); 647 647 tp = tcp_sk(ssk); 648 648 do { 649 649 u32 map_remaining, offset; ··· 724 724 u64 end_seq; 725 725 726 726 p = rb_first(&msk->out_of_order_queue); 727 - pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); 727 + pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); 728 728 while (p) { 729 729 skb = rb_to_skb(p); 730 730 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) ··· 746 746 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; 747 747 748 748 /* skip overlapping data, if any */ 749 - pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d", 749 + pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n", 750 750 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, 751 751 delta); 752 752 MPTCP_SKB_CB(skb)->offset += delta; ··· 1240 1240 size_t copy; 1241 1241 int i; 1242 1242 1243 - pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u", 1243 + pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n", 1244 1244 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); 1245 1245 1246 1246 if (WARN_ON_ONCE(info->sent > info->limit || ··· 1341 1341 mpext->use_map = 1; 1342 1342 mpext->dsn64 = 1; 1343 1343 1344 - pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", 1344 + pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n", 1345 1345 mpext->data_seq, mpext->subflow_seq, mpext->data_len, 1346 1346 mpext->dsn64); 1347 1347 ··· 1892 1892 if (!msk->first_pending) 1893 1893 WRITE_ONCE(msk->first_pending, dfrag); 1894 1894 } 1895 - pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk, 1895 + pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, 1896 1896 dfrag->data_seq, dfrag->data_len, dfrag->already_sent, 1897 1897 !dfrag_collapsed); 1898 1898 ··· 2248 2248 } 2249 2249 } 2250 2250 2251 - pr_debug("block timeout %ld", timeo); 2251 + pr_debug("block timeout %ld\n", timeo); 2252 2252 sk_wait_data(sk, &timeo, NULL); 2253 2253 } 2254 2254 ··· 2264 2264 } 2265 2265 } 2266 2266 2267 - pr_debug("msk=%p rx queue empty=%d:%d copied=%d", 2267 + pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", 2268 2268 msk, skb_queue_empty_lockless(&sk->sk_receive_queue), 2269 2269 skb_queue_empty(&msk->receive_queue), copied); 2270 2270 if (!(flags & MSG_PEEK)) ··· 2326 2326 continue; 2327 2327 } 2328 2328 2329 - if (subflow->backup) { 2329 + if (subflow->backup || subflow->request_bkup) { 2330 2330 if (!backup) 2331 2331 backup = ssk; 2332 2332 continue; ··· 2508 2508 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, 2509 2509 struct mptcp_subflow_context *subflow) 2510 2510 { 2511 + /* The first subflow can already be closed and still in the list */ 2512 + if (subflow->close_event_done) 2513 + return; 2514 + 2515 + subflow->close_event_done = true; 2516 + 2511 2517 if (sk->sk_state == TCP_ESTABLISHED) 2512 2518 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); 2513 2519 ··· 2539 2533 2540 2534 mptcp_for_each_subflow_safe(msk, subflow, tmp) { 2541 2535 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2536 + int ssk_state = inet_sk_state_load(ssk); 2542 2537 2543 - if (inet_sk_state_load(ssk) != TCP_CLOSE) 2538 + if (ssk_state != TCP_CLOSE && 2539 + (ssk_state != TCP_CLOSE_WAIT || 2540 + inet_sk_state_load(sk) != TCP_ESTABLISHED)) 2544 2541 continue; 2545 2542 2546 2543 /* 'subflow_data_ready' will re-sched once rx queue is empty */ ··· 2723 2714 if (!ssk) 2724 2715 return; 2725 2716 2726 - pr_debug("MP_FAIL doesn't respond, reset the subflow"); 2717 + pr_debug("MP_FAIL doesn't respond, reset the subflow\n"); 2727 2718 2728 2719 slow = lock_sock_fast(ssk); 2729 2720 mptcp_subflow_reset(ssk); ··· 2897 2888 break; 2898 2889 default: 2899 2890 if (__mptcp_check_fallback(mptcp_sk(sk))) { 2900 - pr_debug("Fallback"); 2891 + pr_debug("Fallback\n"); 2901 2892 ssk->sk_shutdown |= how; 2902 2893 tcp_shutdown(ssk, how); 2903 2894 ··· 2907 2898 WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt); 2908 2899 mptcp_schedule_work(sk); 2909 2900 } else { 2910 - pr_debug("Sending DATA_FIN on subflow %p", ssk); 2901 + pr_debug("Sending DATA_FIN on subflow %p\n", ssk); 2911 2902 tcp_send_ack(ssk); 2912 2903 if (!mptcp_rtx_timer_pending(sk)) 2913 2904 mptcp_reset_rtx_timer(sk); ··· 2973 2964 struct mptcp_subflow_context *subflow; 2974 2965 struct mptcp_sock *msk = mptcp_sk(sk); 2975 2966 2976 - pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu", 2967 + pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n", 2977 2968 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), 2978 2969 msk->snd_nxt, msk->write_seq); 2979 2970 ··· 2997 2988 { 2998 2989 struct mptcp_sock *msk = mptcp_sk(sk); 2999 2990 3000 - pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d", 2991 + pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n", 3001 2992 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, 3002 2993 !!mptcp_send_head(sk)); 3003 2994 ··· 3012 3003 { 3013 3004 struct mptcp_sock *msk = mptcp_sk(sk); 3014 3005 3015 - pr_debug("msk=%p", msk); 3006 + pr_debug("msk=%p\n", msk); 3016 3007 3017 3008 might_sleep(); 3018 3009 ··· 3120 3111 mptcp_set_state(sk, TCP_CLOSE); 3121 3112 3122 3113 sock_hold(sk); 3123 - pr_debug("msk=%p state=%d", sk, sk->sk_state); 3114 + pr_debug("msk=%p state=%d\n", sk, sk->sk_state); 3124 3115 if (msk->token) 3125 3116 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL); 3126 3117 ··· 3552 3543 { 3553 3544 struct mptcp_sock *msk = mptcp_sk(sk); 3554 3545 3555 - pr_debug("msk=%p, ssk=%p", msk, msk->first); 3546 + pr_debug("msk=%p, ssk=%p\n", msk, msk->first); 3556 3547 if (WARN_ON_ONCE(!msk->first)) 3557 3548 return -EINVAL; 3558 3549 ··· 3569 3560 sk = subflow->conn; 3570 3561 msk = mptcp_sk(sk); 3571 3562 3572 - pr_debug("msk=%p, token=%u", sk, subflow->token); 3563 + pr_debug("msk=%p, token=%u\n", sk, subflow->token); 3573 3564 3574 3565 subflow->map_seq = subflow->iasn; 3575 3566 subflow->map_subflow_seq = 1; ··· 3598 3589 struct sock *parent = (void *)msk; 3599 3590 bool ret = true; 3600 3591 3601 - pr_debug("msk=%p, subflow=%p", msk, subflow); 3592 + pr_debug("msk=%p, subflow=%p\n", msk, subflow); 3602 3593 3603 3594 /* mptcp socket already closing? */ 3604 3595 if (!mptcp_is_fully_established(parent)) { ··· 3644 3635 3645 3636 static void mptcp_shutdown(struct sock *sk, int how) 3646 3637 { 3647 - pr_debug("sk=%p, how=%d", sk, how); 3638 + pr_debug("sk=%p, how=%d\n", sk, how); 3648 3639 3649 3640 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) 3650 3641 __mptcp_wr_shutdown(sk); ··· 3865 3856 struct sock *ssk; 3866 3857 int err; 3867 3858 3868 - pr_debug("msk=%p", msk); 3859 + pr_debug("msk=%p\n", msk); 3869 3860 3870 3861 lock_sock(sk); 3871 3862 ··· 3904 3895 struct mptcp_sock *msk = mptcp_sk(sock->sk); 3905 3896 struct sock *ssk, *newsk; 3906 3897 3907 - pr_debug("msk=%p", msk); 3898 + pr_debug("msk=%p\n", msk); 3908 3899 3909 3900 /* Buggy applications can call accept on socket states other then LISTEN 3910 3901 * but no need to allocate the first subflow just to error out. ··· 3913 3904 if (!ssk) 3914 3905 return -EINVAL; 3915 3906 3916 - pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk)); 3907 + pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk)); 3917 3908 newsk = inet_csk_accept(ssk, arg); 3918 3909 if (!newsk) 3919 3910 return arg->err; 3920 3911 3921 - pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk)); 3912 + pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk)); 3922 3913 if (sk_is_mptcp(newsk)) { 3923 3914 struct mptcp_subflow_context *subflow; 3924 3915 struct sock *new_mptcp_sock; ··· 4011 4002 sock_poll_wait(file, sock, wait); 4012 4003 4013 4004 state = inet_sk_state_load(sk); 4014 - pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); 4005 + pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); 4015 4006 if (state == TCP_LISTEN) { 4016 4007 struct sock *ssk = READ_ONCE(msk->first); 4017 4008
+6 -3
net/mptcp/protocol.h
··· 524 524 stale : 1, /* unable to snd/rcv data, do not use for xmit */ 525 525 valid_csum_seen : 1, /* at least one csum validated */ 526 526 is_mptfo : 1, /* subflow is doing TFO */ 527 - __unused : 10; 527 + close_event_done : 1, /* has done the post-closed part */ 528 + __unused : 9; 528 529 bool data_avail; 529 530 bool scheduled; 530 531 u32 remote_nonce; ··· 993 992 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, 994 993 const struct mptcp_addr_info *addr); 995 994 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk); 995 + bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, 996 + const struct mptcp_addr_info *remote); 996 997 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk); 997 998 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, 998 999 const struct mptcp_rm_list *rm_list); ··· 1179 1176 static inline void __mptcp_do_fallback(struct mptcp_sock *msk) 1180 1177 { 1181 1178 if (__mptcp_check_fallback(msk)) { 1182 - pr_debug("TCP fallback already done (msk=%p)", msk); 1179 + pr_debug("TCP fallback already done (msk=%p)\n", msk); 1183 1180 return; 1184 1181 } 1185 1182 set_bit(MPTCP_FALLBACK_DONE, &msk->flags); ··· 1215 1212 } 1216 1213 } 1217 1214 1218 - #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a) 1215 + #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a) 1219 1216 1220 1217 static inline bool mptcp_check_infinite_map(struct sk_buff *skb) 1221 1218 {
+2 -2
net/mptcp/sched.c
··· 86 86 list_add_tail_rcu(&sched->list, &mptcp_sched_list); 87 87 spin_unlock(&mptcp_sched_list_lock); 88 88 89 - pr_debug("%s registered", sched->name); 89 + pr_debug("%s registered\n", sched->name); 90 90 return 0; 91 91 } 92 92 ··· 118 118 if (msk->sched->init) 119 119 msk->sched->init(msk); 120 120 121 - pr_debug("sched=%s", msk->sched->name); 121 + pr_debug("sched=%s\n", msk->sched->name); 122 122 123 123 return 0; 124 124 }
+2 -2
net/mptcp/sockopt.c
··· 873 873 struct mptcp_sock *msk = mptcp_sk(sk); 874 874 struct sock *ssk; 875 875 876 - pr_debug("msk=%p", msk); 876 + pr_debug("msk=%p\n", msk); 877 877 878 878 if (level == SOL_SOCKET) 879 879 return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); ··· 1453 1453 struct mptcp_sock *msk = mptcp_sk(sk); 1454 1454 struct sock *ssk; 1455 1455 1456 - pr_debug("msk=%p", msk); 1456 + pr_debug("msk=%p\n", msk); 1457 1457 1458 1458 /* @@ the meaning of setsockopt() when the socket is connected and 1459 1459 * there are multiple subflows is not yet defined. It is up to the
+30 -26
net/mptcp/subflow.c
··· 39 39 { 40 40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 41 41 42 - pr_debug("subflow_req=%p", subflow_req); 42 + pr_debug("subflow_req=%p\n", subflow_req); 43 43 44 44 if (subflow_req->msk) 45 45 sock_put((struct sock *)subflow_req->msk); ··· 146 146 struct mptcp_options_received mp_opt; 147 147 bool opt_mp_capable, opt_mp_join; 148 148 149 - pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 149 + pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener); 150 150 151 151 #ifdef CONFIG_TCP_MD5SIG 152 152 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of ··· 221 221 } 222 222 223 223 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { 224 - pr_debug("syn inet_sport=%d %d", 224 + pr_debug("syn inet_sport=%d %d\n", 225 225 ntohs(inet_sk(sk_listener)->inet_sport), 226 226 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); 227 227 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { ··· 243 243 subflow_init_req_cookie_join_save(subflow_req, skb); 244 244 } 245 245 246 - pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, 246 + pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token, 247 247 subflow_req->remote_nonce, subflow_req->msk); 248 248 } 249 249 ··· 527 527 subflow->rel_write_seq = 1; 528 528 subflow->conn_finished = 1; 529 529 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 530 - pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); 530 + pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset); 531 531 532 532 mptcp_get_options(skb, &mp_opt); 533 533 if (subflow->request_mptcp) { ··· 559 559 subflow->thmac = mp_opt.thmac; 560 560 subflow->remote_nonce = mp_opt.nonce; 561 561 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); 562 - pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d", 562 + pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n", 563 563 subflow, subflow->thmac, subflow->remote_nonce, 564 564 subflow->backup); 565 565 ··· 585 585 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); 586 586 587 587 if (subflow_use_different_dport(msk, sk)) { 588 - pr_debug("synack inet_dport=%d %d", 588 + pr_debug("synack inet_dport=%d %d\n", 589 589 ntohs(inet_sk(sk)->inet_dport), 590 590 ntohs(inet_sk(parent)->inet_dport)); 591 591 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); ··· 655 655 { 656 656 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 657 657 658 - pr_debug("subflow=%p", subflow); 658 + pr_debug("subflow=%p\n", subflow); 659 659 660 660 /* Never answer to SYNs sent to broadcast or multicast */ 661 661 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) ··· 686 686 { 687 687 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 688 688 689 - pr_debug("subflow=%p", subflow); 689 + pr_debug("subflow=%p\n", subflow); 690 690 691 691 if (skb->protocol == htons(ETH_P_IP)) 692 692 return subflow_v4_conn_request(sk, skb); ··· 807 807 struct mptcp_sock *owner; 808 808 struct sock *child; 809 809 810 - pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 810 + pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn); 811 811 812 812 /* After child creation we must look for MPC even when options 813 813 * are not parsed ··· 898 898 ctx->conn = (struct sock *)owner; 899 899 900 900 if (subflow_use_different_sport(owner, sk)) { 901 - pr_debug("ack inet_sport=%d %d", 901 + pr_debug("ack inet_sport=%d %d\n", 902 902 ntohs(inet_sk(sk)->inet_sport), 903 903 ntohs(inet_sk((struct sock *)owner)->inet_sport)); 904 904 if (!mptcp_pm_sport_in_anno_list(owner, sk)) { ··· 961 961 962 962 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 963 963 { 964 - pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d", 964 + pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n", 965 965 ssn, subflow->map_subflow_seq, subflow->map_data_len); 966 966 } 967 967 ··· 1121 1121 1122 1122 data_len = mpext->data_len; 1123 1123 if (data_len == 0) { 1124 - pr_debug("infinite mapping received"); 1124 + pr_debug("infinite mapping received\n"); 1125 1125 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); 1126 1126 subflow->map_data_len = 0; 1127 1127 return MAPPING_INVALID; ··· 1133 1133 if (data_len == 1) { 1134 1134 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, 1135 1135 mpext->dsn64); 1136 - pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq); 1136 + pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq); 1137 1137 if (subflow->map_valid) { 1138 1138 /* A DATA_FIN might arrive in a DSS 1139 1139 * option before the previous mapping ··· 1159 1159 data_fin_seq &= GENMASK_ULL(31, 0); 1160 1160 1161 1161 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); 1162 - pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d", 1162 + pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n", 1163 1163 data_fin_seq, mpext->dsn64); 1164 1164 1165 1165 /* Adjust for DATA_FIN using 1 byte of sequence space */ ··· 1205 1205 if (unlikely(subflow->map_csum_reqd != csum_reqd)) 1206 1206 return MAPPING_INVALID; 1207 1207 1208 - pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", 1208 + pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n", 1209 1209 subflow->map_seq, subflow->map_subflow_seq, 1210 1210 subflow->map_data_len, subflow->map_csum_reqd, 1211 1211 subflow->map_data_csum); ··· 1240 1240 avail_len = skb->len - offset; 1241 1241 incr = limit >= avail_len ? avail_len + fin : limit; 1242 1242 1243 - pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len, 1243 + pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len, 1244 1244 offset, subflow->map_subflow_seq); 1245 1245 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); 1246 1246 tcp_sk(ssk)->copied_seq += incr; ··· 1255 1255 /* sched mptcp worker to remove the subflow if no more data is pending */ 1256 1256 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) 1257 1257 { 1258 - if (likely(ssk->sk_state != TCP_CLOSE)) 1258 + struct sock *sk = (struct sock *)msk; 1259 + 1260 + if (likely(ssk->sk_state != TCP_CLOSE && 1261 + (ssk->sk_state != TCP_CLOSE_WAIT || 1262 + inet_sk_state_load(sk) != TCP_ESTABLISHED))) 1259 1263 return; 1260 1264 1261 1265 if (skb_queue_empty(&ssk->sk_receive_queue) && 1262 1266 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) 1263 - mptcp_schedule_work((struct sock *)msk); 1267 + mptcp_schedule_work(sk); 1264 1268 } 1265 1269 1266 1270 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) ··· 1341 1337 1342 1338 old_ack = READ_ONCE(msk->ack_seq); 1343 1339 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); 1344 - pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, 1340 + pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack, 1345 1341 ack_seq); 1346 1342 if (unlikely(before64(ack_seq, old_ack))) { 1347 1343 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); ··· 1413 1409 subflow->map_valid = 0; 1414 1410 WRITE_ONCE(subflow->data_avail, false); 1415 1411 1416 - pr_debug("Done with mapping: seq=%u data_len=%u", 1412 + pr_debug("Done with mapping: seq=%u data_len=%u\n", 1417 1413 subflow->map_subflow_seq, 1418 1414 subflow->map_data_len); 1419 1415 } ··· 1523 1519 1524 1520 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); 1525 1521 1526 - pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", 1522 + pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n", 1527 1523 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); 1528 1524 1529 1525 if (likely(icsk->icsk_af_ops == target)) ··· 1616 1612 goto failed; 1617 1613 1618 1614 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); 1619 - pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk, 1615 + pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk, 1620 1616 remote_token, local_id, remote_id); 1621 1617 subflow->remote_token = remote_token; 1622 1618 WRITE_ONCE(subflow->remote_id, remote_id); ··· 1751 1747 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; 1752 1748 1753 1749 subflow = mptcp_subflow_ctx(sf->sk); 1754 - pr_debug("subflow=%p", subflow); 1750 + pr_debug("subflow=%p\n", subflow); 1755 1751 1756 1752 *new_sock = sf; 1757 1753 sock_hold(sk); ··· 1780 1776 INIT_LIST_HEAD(&ctx->node); 1781 1777 INIT_LIST_HEAD(&ctx->delegated_node); 1782 1778 1783 - pr_debug("subflow=%p", ctx); 1779 + pr_debug("subflow=%p\n", ctx); 1784 1780 1785 1781 ctx->tcp_sock = sk; 1786 1782 WRITE_ONCE(ctx->local_id, -1); ··· 1931 1927 goto out; 1932 1928 } 1933 1929 1934 - pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); 1930 + pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family); 1935 1931 1936 1932 tp->is_mptcp = 1; 1937 1933 ctx->icsk_af_ops = icsk->icsk_af_ops;
+3 -1
net/sched/sch_fq.c
··· 663 663 pband = &q->band_flows[q->band_nr]; 664 664 pband->credit = min(pband->credit + pband->quantum, 665 665 pband->quantum); 666 - goto begin; 666 + if (pband->credit > 0) 667 + goto begin; 668 + retry = 0; 667 669 } 668 670 if (q->time_next_delayed_flow != ~0ULL) 669 671 qdisc_watchdog_schedule_range_ns(&q->watchdog,
+16 -6
net/sctp/sm_statefuns.c
··· 2260 2260 } 2261 2261 } 2262 2262 2263 - /* Update socket peer label if first association. */ 2264 - if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) { 2265 - sctp_association_free(new_asoc); 2266 - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2267 - } 2268 - 2269 2263 /* Set temp so that it won't be added into hashtable */ 2270 2264 new_asoc->temp = 1; 2271 2265 ··· 2267 2273 * current association. 2268 2274 */ 2269 2275 action = sctp_tietags_compare(new_asoc, asoc); 2276 + 2277 + /* In cases C and E the association doesn't enter the ESTABLISHED 2278 + * state, so there is no need to call security_sctp_assoc_request(). 2279 + */ 2280 + switch (action) { 2281 + case 'A': /* Association restart. */ 2282 + case 'B': /* Collision case B. */ 2283 + case 'D': /* Collision case D. */ 2284 + /* Update socket peer label if first association. */ 2285 + if (security_sctp_assoc_request((struct sctp_association *)asoc, 2286 + chunk->head_skb ?: chunk->skb)) { 2287 + sctp_association_free(new_asoc); 2288 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 2289 + } 2290 + break; 2291 + } 2270 2292 2271 2293 switch (action) { 2272 2294 case 'A': /* Association restart. */
+4 -2
net/sunrpc/xprtrdma/ib_client.c
··· 62 62 if (!rd || test_bit(RPCRDMA_RD_F_REMOVING, &rd->rd_flags)) 63 63 return -ENETUNREACH; 64 64 65 - kref_get(&rd->rd_kref); 66 65 if (xa_alloc(&rd->rd_xa, &rn->rn_index, rn, xa_limit_32b, GFP_KERNEL) < 0) 67 66 return -ENOMEM; 67 + kref_get(&rd->rd_kref); 68 68 rn->rn_done = done; 69 + trace_rpcrdma_client_register(device, rn); 69 70 return 0; 70 71 } 71 72 ··· 92 91 if (!rd) 93 92 return; 94 93 94 + trace_rpcrdma_client_unregister(device, rn); 95 95 xa_erase(&rd->rd_xa, rn->rn_index); 96 96 kref_put(&rd->rd_kref, rpcrdma_rn_release); 97 97 } ··· 113 111 return -ENOMEM; 114 112 115 113 kref_init(&rd->rd_kref); 116 - xa_init_flags(&rd->rd_xa, XA_FLAGS_ALLOC1); 114 + xa_init_flags(&rd->rd_xa, XA_FLAGS_ALLOC); 117 115 rd->rd_device = device; 118 116 init_completion(&rd->rd_done); 119 117 ib_set_client_data(device, &rpcrdma_ib_client, rd);
-10
scripts/checkpatch.pl
··· 4015 4015 } 4016 4016 } 4017 4017 4018 - # Block comment styles 4019 - # Networking with an initial /* 4020 - if ($realfile =~ m@^(drivers/net/|net/)@ && 4021 - $prevrawline =~ /^\+[ \t]*\/\*[ \t]*$/ && 4022 - $rawline =~ /^\+[ \t]*\*/ && 4023 - $realline > 3) { # Do not warn about the initial copyright comment block after SPDX-License-Identifier 4024 - WARN("NETWORKING_BLOCK_COMMENT_STYLE", 4025 - "networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev); 4026 - } 4027 - 4028 4018 # Block comments use * on subsequent lines 4029 4019 if ($prevline =~ /$;[ \t]*$/ && #ends in comment 4030 4020 $prevrawline =~ /^\+.*?\/\*/ && #starting /*
+3
sound/core/seq/seq_clientmgr.c
··· 537 537 return NULL; 538 538 if (! dest->accept_input) 539 539 goto __not_avail; 540 + if (snd_seq_ev_is_ump(event)) 541 + return dest; /* ok - no filter checks */ 542 + 540 543 if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) && 541 544 ! test_bit(event->type, dest->event_filter)) 542 545 goto __not_avail;
+1 -1
sound/pci/hda/cs35l56_hda.c
··· 1003 1003 goto err; 1004 1004 } 1005 1005 1006 - cs35l56->base.cal_index = cs35l56->index; 1006 + cs35l56->base.cal_index = -1; 1007 1007 1008 1008 cs35l56_init_cs_dsp(&cs35l56->base, &cs35l56->cs_dsp); 1009 1009 cs35l56->cs_dsp.client_ops = &cs35l56_hda_client_ops;
+3 -2
sound/pci/hda/hda_component.c
··· 141 141 int ret; 142 142 143 143 /* Init shared and component specific data */ 144 - memset(parent, 0, sizeof(*parent)); 145 - mutex_init(&parent->mutex); 144 + memset(parent->comps, 0, sizeof(parent->comps)); 146 145 parent->codec = cdc; 147 146 148 147 mutex_lock(&parent->mutex); ··· 162 163 struct component_match *match = NULL; 163 164 struct hda_scodec_match *sm; 164 165 int ret, i; 166 + 167 + mutex_init(&parent->mutex); 165 168 166 169 for (i = 0; i < count; i++) { 167 170 sm = devm_kmalloc(dev, sizeof(*sm), GFP_KERNEL);
+54 -25
sound/pci/hda/patch_realtek.c
··· 4930 4930 } 4931 4931 } 4932 4932 4933 + static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay) 4934 + { 4935 + if (delay <= 0) 4936 + delay = 75; 4937 + snd_hda_codec_write(codec, 0x21, 0, 4938 + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); 4939 + msleep(delay); 4940 + snd_hda_codec_write(codec, 0x21, 0, 4941 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); 4942 + msleep(delay); 4943 + } 4944 + 4945 + static void alc_hp_enable_unmute(struct hda_codec *codec, unsigned int delay) 4946 + { 4947 + if (delay <= 0) 4948 + delay = 75; 4949 + snd_hda_codec_write(codec, 0x21, 0, 4950 + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); 4951 + msleep(delay); 4952 + snd_hda_codec_write(codec, 0x21, 0, 4953 + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); 4954 + msleep(delay); 4955 + } 4956 + 4933 4957 static const struct coef_fw alc225_pre_hsmode[] = { 4934 4958 UPDATE_COEF(0x4a, 1<<8, 0), 4935 4959 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), ··· 5055 5031 case 0x10ec0236: 5056 5032 case 0x10ec0256: 5057 5033 case 0x19e58326: 5034 + alc_hp_mute_disable(codec, 75); 5058 5035 alc_process_coef_fw(codec, coef0256); 5059 5036 break; 5060 5037 case 0x10ec0234: ··· 5090 5065 case 0x10ec0295: 5091 5066 case 0x10ec0289: 5092 5067 case 0x10ec0299: 5068 + alc_hp_mute_disable(codec, 75); 5093 5069 alc_process_coef_fw(codec, alc225_pre_hsmode); 5094 5070 alc_process_coef_fw(codec, coef0225); 5095 5071 break; ··· 5316 5290 case 0x10ec0299: 5317 5291 alc_process_coef_fw(codec, alc225_pre_hsmode); 5318 5292 alc_process_coef_fw(codec, coef0225); 5293 + alc_hp_enable_unmute(codec, 75); 5319 5294 break; 5320 5295 case 0x10ec0255: 5321 5296 alc_process_coef_fw(codec, coef0255); ··· 5329 5302 alc_write_coef_idx(codec, 0x45, 0xc089); 5330 5303 msleep(50); 5331 5304 alc_process_coef_fw(codec, coef0256); 5305 + alc_hp_enable_unmute(codec, 75); 5332 5306 break; 5333 5307 case 0x10ec0234: 5334 5308 case 0x10ec0274: ··· 5427 5399 case 0x10ec0256: 5428 5400 case 0x19e58326: 5429 5401 alc_process_coef_fw(codec, coef0256); 5402 + alc_hp_enable_unmute(codec, 75); 5430 5403 break; 5431 5404 case 0x10ec0234: 5432 5405 case 0x10ec0274: ··· 5476 5447 alc_process_coef_fw(codec, coef0225_2); 5477 5448 else 5478 5449 alc_process_coef_fw(codec, coef0225_1); 5450 + alc_hp_enable_unmute(codec, 75); 5479 5451 break; 5480 5452 case 0x10ec0867: 5481 5453 alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); ··· 5544 5514 case 0x10ec0256: 5545 5515 case 0x19e58326: 5546 5516 alc_process_coef_fw(codec, coef0256); 5517 + alc_hp_enable_unmute(codec, 75); 5547 5518 break; 5548 5519 case 0x10ec0234: 5549 5520 case 0x10ec0274: ··· 5582 5551 case 0x10ec0289: 5583 5552 case 0x10ec0299: 5584 5553 alc_process_coef_fw(codec, coef0225); 5554 + alc_hp_enable_unmute(codec, 75); 5585 5555 break; 5586 5556 } 5587 5557 codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n"); ··· 5651 5619 alc_write_coef_idx(codec, 0x06, 0x6104); 5652 5620 alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); 5653 5621 5654 - snd_hda_codec_write(codec, 0x21, 0, 5655 - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); 5656 - msleep(80); 5657 - snd_hda_codec_write(codec, 0x21, 0, 5658 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); 5659 - 5660 5622 alc_process_coef_fw(codec, coef0255); 5661 5623 msleep(300); 5662 5624 val = alc_read_coef_idx(codec, 0x46); 5663 5625 is_ctia = (val & 0x0070) == 0x0070; 5664 - 5626 + if (!is_ctia) { 5627 + alc_write_coef_idx(codec, 0x45, 0xe089); 5628 + msleep(100); 5629 + val = alc_read_coef_idx(codec, 0x46); 5630 + if ((val & 0x0070) == 0x0070) 5631 + is_ctia = false; 5632 + else 5633 + is_ctia = true; 5634 + } 5665 5635 alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3); 5666 5636 alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); 5667 - 5668 - snd_hda_codec_write(codec, 0x21, 0, 5669 - AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); 5670 - msleep(80); 5671 - snd_hda_codec_write(codec, 0x21, 0, 5672 - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); 5673 5637 break; 5674 5638 case 0x10ec0234: 5675 5639 case 0x10ec0274: ··· 5742 5714 case 0x10ec0295: 5743 5715 case 0x10ec0289: 5744 5716 case 0x10ec0299: 5745 - snd_hda_codec_write(codec, 0x21, 0, 5746 - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); 5747 - msleep(80); 5748 - snd_hda_codec_write(codec, 0x21, 0, 5749 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); 5750 - 5751 5717 alc_process_coef_fw(codec, alc225_pre_hsmode); 5752 5718 alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000); 5753 5719 val = alc_read_coef_idx(codec, 0x45); ··· 5758 5736 val = alc_read_coef_idx(codec, 0x46); 5759 5737 is_ctia = (val & 0x00f0) == 0x00f0; 5760 5738 } 5739 + if (!is_ctia) { 5740 + alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x38<<10); 5741 + alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8); 5742 + msleep(100); 5743 + val = alc_read_coef_idx(codec, 0x46); 5744 + if ((val & 0x00f0) == 0x00f0) 5745 + is_ctia = false; 5746 + else 5747 + is_ctia = true; 5748 + } 5761 5749 alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6); 5762 5750 alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4); 5763 5751 alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); 5764 - 5765 - snd_hda_codec_write(codec, 0x21, 0, 5766 - AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); 5767 - msleep(80); 5768 - snd_hda_codec_write(codec, 0x21, 0, 5769 - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); 5770 5752 break; 5771 5753 case 0x10ec0867: 5772 5754 is_ctia = true; ··· 10341 10315 SND_PCI_QUIRK(0x103c, 0x8c15, "HP Spectre x360 2-in-1 Laptop 14-eu0xxx", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), 10342 10316 SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2), 10343 10317 SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2), 10318 + SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS), 10344 10319 SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 10345 10320 SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 10346 10321 SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), ··· 10380 10353 SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED), 10381 10354 SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 10382 10355 SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 10356 + SND_PCI_QUIRK(0x103c, 0x8cbd, "HP Pavilion Aero Laptop 13-bg0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), 10383 10357 SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), 10384 10358 SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), 10385 10359 SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), ··· 10541 10513 SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP), 10542 10514 SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP), 10543 10515 SND_PCI_QUIRK(0x144d, 0xc1ca, "Samsung Galaxy Book3 Pro 360 (NP960QFG-KB1US)", ALC298_FIXUP_SAMSUNG_AMP2), 10516 + SND_PCI_QUIRK(0x144d, 0xc1cc, "Samsung Galaxy Book3 Ultra (NT960XFH-XD92G))", ALC298_FIXUP_SAMSUNG_AMP2), 10544 10517 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 10545 10518 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 10546 10519 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+2
sound/soc/amd/acp/acp-legacy-mach.c
··· 227 227 }, 228 228 { } 229 229 }; 230 + MODULE_DEVICE_TABLE(platform, board_ids); 231 + 230 232 static struct platform_driver acp_asoc_audio = { 231 233 .driver = { 232 234 .pm = &snd_soc_pm_ops,
+2
sound/soc/amd/acp/acp-sof-mach.c
··· 158 158 }, 159 159 { } 160 160 }; 161 + MODULE_DEVICE_TABLE(platform, board_ids); 162 + 161 163 static struct platform_driver acp_asoc_audio = { 162 164 .driver = { 163 165 .name = "sof_mach",
+1
sound/soc/au1x/db1200.c
··· 44 44 }, 45 45 {}, 46 46 }; 47 + MODULE_DEVICE_TABLE(platform, db1200_pids); 47 48 48 49 /*------------------------- AC97 PART ---------------------------*/ 49 50
+9
sound/soc/codecs/cs-amp-lib-test.c
··· 38 38 { 39 39 struct cs_amp_lib_test_priv *priv = test->priv; 40 40 unsigned int blob_size; 41 + int i; 41 42 42 43 blob_size = offsetof(struct cirrus_amp_efi_data, data) + 43 44 sizeof(struct cirrus_amp_cal_data) * num_amps; ··· 50 49 priv->cal_blob->count = num_amps; 51 50 52 51 get_random_bytes(priv->cal_blob->data, sizeof(struct cirrus_amp_cal_data) * num_amps); 52 + 53 + /* Ensure all timestamps are non-zero to mark the entry valid. */ 54 + for (i = 0; i < num_amps; i++) 55 + priv->cal_blob->data[i].calTime[0] |= 1; 56 + 57 + /* Ensure that all UIDs are non-zero and unique. */ 58 + for (i = 0; i < num_amps; i++) 59 + *(u8 *)&priv->cal_blob->data[i].calTarget[0] = i + 1; 53 60 } 54 61 55 62 static u64 cs_amp_lib_test_get_target_uid(struct kunit *test)
+6 -1
sound/soc/codecs/cs-amp-lib.c
··· 182 182 for (i = 0; i < efi_data->count; ++i) { 183 183 u64 cal_target = cs_amp_cal_target_u64(&efi_data->data[i]); 184 184 185 + /* Skip empty entries */ 186 + if (!efi_data->data[i].calTime[0] && !efi_data->data[i].calTime[1]) 187 + continue; 188 + 185 189 /* Skip entries with unpopulated silicon ID */ 186 190 if (cal_target == 0) 187 191 continue; ··· 197 193 } 198 194 } 199 195 200 - if (!cal && (amp_index >= 0) && (amp_index < efi_data->count)) { 196 + if (!cal && (amp_index >= 0) && (amp_index < efi_data->count) && 197 + (efi_data->data[amp_index].calTime[0] || efi_data->data[amp_index].calTime[1])) { 201 198 u64 cal_target = cs_amp_cal_target_u64(&efi_data->data[amp_index]); 202 199 203 200 /*
+6
sound/soc/codecs/lpass-macro-common.h
··· 49 49 static inline const char *lpass_macro_get_codec_version_string(int version) 50 50 { 51 51 switch (version) { 52 + case LPASS_CODEC_VERSION_1_0: 53 + return "v1.0"; 54 + case LPASS_CODEC_VERSION_1_1: 55 + return "v1.1"; 56 + case LPASS_CODEC_VERSION_1_2: 57 + return "v1.2"; 52 58 case LPASS_CODEC_VERSION_2_0: 53 59 return "v2.0"; 54 60 case LPASS_CODEC_VERSION_2_1:
+4
sound/soc/codecs/lpass-va-macro.c
··· 1485 1485 if ((core_id_0 == 0x02) && (core_id_1 == 0x0F) && (core_id_2 == 0x80 || core_id_2 == 0x81)) 1486 1486 version = LPASS_CODEC_VERSION_2_8; 1487 1487 1488 + if (version == LPASS_CODEC_VERSION_UNKNOWN) 1489 + dev_warn(va->dev, "Unknown Codec version, ID: %02x / %02x / %02x\n", 1490 + core_id_0, core_id_1, core_id_2); 1491 + 1488 1492 lpass_macro_set_codec_version(version); 1489 1493 1490 1494 dev_dbg(va->dev, "LPASS Codec Version %s\n", lpass_macro_get_codec_version_string(version));
+2 -3
sound/soc/codecs/wcd937x.c
··· 242 242 243 243 static void wcd937x_reset(struct wcd937x_priv *wcd937x) 244 244 { 245 - usleep_range(20, 30); 246 - 247 245 gpiod_set_value(wcd937x->reset_gpio, 1); 248 - 246 + usleep_range(20, 30); 247 + gpiod_set_value(wcd937x->reset_gpio, 0); 249 248 usleep_range(20, 30); 250 249 } 251 250
+1
sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
··· 2748 2748 case AFE_ASRC12_NEW_CON9: 2749 2749 case AFE_LRCK_CNT: 2750 2750 case AFE_DAC_MON0: 2751 + case AFE_DAC_CON0: 2751 2752 case AFE_DL2_CUR: 2752 2753 case AFE_DL3_CUR: 2753 2754 case AFE_DL6_CUR:
+4 -2
sound/soc/sof/amd/acp-dsp-offset.h
··· 76 76 #define DSP_SW_INTR_CNTL_OFFSET 0x0 77 77 #define DSP_SW_INTR_STAT_OFFSET 0x4 78 78 #define DSP_SW_INTR_TRIG_OFFSET 0x8 79 - #define ACP_ERROR_STATUS 0x18C4 79 + #define ACP3X_ERROR_STATUS 0x18C4 80 + #define ACP6X_ERROR_STATUS 0x1A4C 80 81 #define ACP3X_AXI2DAGB_SEM_0 0x1880 81 82 #define ACP5X_AXI2DAGB_SEM_0 0x1884 82 83 #define ACP6X_AXI2DAGB_SEM_0 0x1874 83 84 84 85 /* ACP common registers to report errors related to I2S & SoundWire interfaces */ 85 - #define ACP_SW0_I2S_ERROR_REASON 0x18B4 86 + #define ACP3X_SW_I2S_ERROR_REASON 0x18C8 87 + #define ACP6X_SW0_I2S_ERROR_REASON 0x18B4 86 88 #define ACP_SW1_I2S_ERROR_REASON 0x1A50 87 89 88 90 /* Registers from ACP_SHA block */
+35 -17
sound/soc/sof/amd/acp.c
··· 92 92 unsigned int idx, unsigned int dscr_count) 93 93 { 94 94 struct snd_sof_dev *sdev = adata->dev; 95 + const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 95 96 unsigned int val, status; 96 97 int ret; 97 98 ··· 103 102 val & (1 << ch), ACP_REG_POLL_INTERVAL, 104 103 ACP_REG_POLL_TIMEOUT_US); 105 104 if (ret < 0) { 106 - status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS); 105 + status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->acp_error_stat); 107 106 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32)); 108 107 109 108 dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status); ··· 264 263 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr); 265 264 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr); 266 265 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length); 266 + 267 + /* psp_send_cmd only required for vangogh platform (rev - 5) */ 268 + if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) { 269 + /* Modify IRAM and DRAM size */ 270 + ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2); 271 + if (ret) 272 + return ret; 273 + ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG); 274 + if (ret) 275 + return ret; 276 + } 267 277 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN); 268 278 269 279 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT, ··· 288 276 /* psp_send_cmd only required for renoir platform (rev - 3) */ 289 277 if (desc->rev == 3) { 290 278 ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND); 291 - if (ret) 292 - return ret; 293 - } 294 - 295 - /* psp_send_cmd only required for vangogh platform (rev - 5) */ 296 - if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) { 297 - /* Modify IRAM and DRAM size */ 298 - ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2); 299 - if (ret) 300 - return ret; 301 - ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG); 302 279 if (ret) 303 280 return ret; 304 281 } ··· 403 402 404 403 if (val & ACP_ERROR_IRQ_MASK) { 405 404 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_ERROR_IRQ_MASK); 406 - snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_SW0_I2S_ERROR_REASON, 0); 407 - snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_SW1_I2S_ERROR_REASON, 0); 408 - snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_ERROR_STATUS, 0); 405 + snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_sw0_i2s_err_reason, 0); 406 + /* ACP_SW1_I2S_ERROR_REASON is newly added register from rmb platform onwards */ 407 + if (desc->rev >= 6) 408 + snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SW1_I2S_ERROR_REASON, 0); 409 + snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_error_stat, 0); 409 410 irq_flag = 1; 410 411 } 411 412 ··· 433 430 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 434 431 unsigned int base = desc->pgfsm_base; 435 432 unsigned int val; 433 + unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask; 436 434 int ret; 437 435 438 436 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET); ··· 441 437 if (val == ACP_POWERED_ON) 442 438 return 0; 443 439 444 - if (val & ACP_PGFSM_STATUS_MASK) 440 + switch (desc->rev) { 441 + case 3: 442 + case 5: 443 + acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK; 444 + acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK; 445 + break; 446 + case 6: 447 + acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK; 448 + acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK; 449 + break; 450 + default: 451 + return -EINVAL; 452 + } 453 + 454 + if (val & acp_pgfsm_status_mask) 445 455 snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET, 446 - ACP_PGFSM_CNTL_POWER_ON_MASK); 456 + acp_pgfsm_cntl_mask); 447 457 448 458 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val, 449 459 !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
+7 -2
sound/soc/sof/amd/acp.h
··· 25 25 #define ACP_REG_POLL_TIMEOUT_US 2000 26 26 #define ACP_DMA_COMPLETE_TIMEOUT_US 5000 27 27 28 - #define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01 29 - #define ACP_PGFSM_STATUS_MASK 0x03 28 + #define ACP3X_PGFSM_CNTL_POWER_ON_MASK 0x01 29 + #define ACP3X_PGFSM_STATUS_MASK 0x03 30 + #define ACP6X_PGFSM_CNTL_POWER_ON_MASK 0x07 31 + #define ACP6X_PGFSM_STATUS_MASK 0x0F 32 + 30 33 #define ACP_POWERED_ON 0x00 31 34 #define ACP_ASSERT_RESET 0x01 32 35 #define ACP_RELEASE_RESET 0x00 ··· 206 203 u32 probe_reg_offset; 207 204 u32 reg_start_addr; 208 205 u32 reg_end_addr; 206 + u32 acp_error_stat; 207 + u32 acp_sw0_i2s_err_reason; 209 208 u32 sdw_max_link_count; 210 209 u64 sdw_acpi_dev_addr; 211 210 };
+2
sound/soc/sof/amd/pci-acp63.c
··· 35 35 .ext_intr_cntl = ACP6X_EXTERNAL_INTR_CNTL, 36 36 .ext_intr_stat = ACP6X_EXT_INTR_STAT, 37 37 .ext_intr_stat1 = ACP6X_EXT_INTR_STAT1, 38 + .acp_error_stat = ACP6X_ERROR_STATUS, 39 + .acp_sw0_i2s_err_reason = ACP6X_SW0_I2S_ERROR_REASON, 38 40 .dsp_intr_base = ACP6X_DSP_SW_INTR_BASE, 39 41 .sram_pte_offset = ACP6X_SRAM_PTE_OFFSET, 40 42 .hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
+2
sound/soc/sof/amd/pci-rmb.c
··· 33 33 .pgfsm_base = ACP6X_PGFSM_BASE, 34 34 .ext_intr_stat = ACP6X_EXT_INTR_STAT, 35 35 .dsp_intr_base = ACP6X_DSP_SW_INTR_BASE, 36 + .acp_error_stat = ACP6X_ERROR_STATUS, 37 + .acp_sw0_i2s_err_reason = ACP6X_SW0_I2S_ERROR_REASON, 36 38 .sram_pte_offset = ACP6X_SRAM_PTE_OFFSET, 37 39 .hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0, 38 40 .fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
+2
sound/soc/sof/amd/pci-rn.c
··· 33 33 .pgfsm_base = ACP3X_PGFSM_BASE, 34 34 .ext_intr_stat = ACP3X_EXT_INTR_STAT, 35 35 .dsp_intr_base = ACP3X_DSP_SW_INTR_BASE, 36 + .acp_error_stat = ACP3X_ERROR_STATUS, 37 + .acp_sw0_i2s_err_reason = ACP3X_SW_I2S_ERROR_REASON, 36 38 .sram_pte_offset = ACP3X_SRAM_PTE_OFFSET, 37 39 .hw_semaphore_offset = ACP3X_AXI2DAGB_SEM_0, 38 40 .acp_clkmux_sel = ACP3X_CLKMUX_SEL,
+3
sound/soc/sof/mediatek/mt8195/mt8195.c
··· 575 575 .compatible = "google,tomato", 576 576 .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg" 577 577 }, { 578 + .compatible = "google,dojo", 579 + .sof_tplg_filename = "sof-mt8195-mt6359-max98390-rt5682.tplg" 580 + }, { 578 581 .compatible = "mediatek,mt8195", 579 582 .sof_tplg_filename = "sof-mt8195.tplg" 580 583 }, {
+2 -5
tools/testing/selftests/livepatch/test-livepatch.sh
··· 139 139 grep 'live patched' /proc/cmdline > /dev/kmsg 140 140 grep 'live patched' /proc/meminfo > /dev/kmsg 141 141 142 - mods=(/sys/kernel/livepatch/*) 143 - nmods=${#mods[@]} 144 - if [ "$nmods" -ne 1 ]; then 145 - die "Expecting only one moduled listed, found $nmods" 146 - fi 142 + loop_until 'mods=(/sys/kernel/livepatch/*); nmods=${#mods[@]}; [[ "$nmods" -eq 1 ]]' || 143 + die "Expecting only one moduled listed, found $nmods" 147 144 148 145 # These modules were disabled by the atomic replace 149 146 for mod in $MOD_LIVEPATCH3 $MOD_LIVEPATCH2 $MOD_LIVEPATCH1; do
+4
tools/testing/selftests/net/forwarding/local_termination.sh
··· 571 571 cleanup() 572 572 { 573 573 pre_cleanup 574 + 575 + ip link set $h2 down 576 + ip link set $h1 down 577 + 574 578 vrf_cleanup 575 579 } 576 580
+3
tools/testing/selftests/net/forwarding/no_forwarding.sh
··· 233 233 { 234 234 pre_cleanup 235 235 236 + ip link set dev $swp2 down 237 + ip link set dev $swp1 down 238 + 236 239 h2_destroy 237 240 h1_destroy 238 241
+132 -32
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 420 420 fi 421 421 } 422 422 423 + start_events() 424 + { 425 + mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid 426 + mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid 427 + } 428 + 423 429 reset_with_events() 424 430 { 425 431 reset "${1}" || return 1 426 432 427 - mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid 428 - mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid 433 + start_events 429 434 } 430 435 431 436 reset_with_tcp_filter() ··· 1117 1112 1118 1113 print_check "sum" 1119 1114 count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr") 1120 - if [ "$count" != "$csum_ns1" ]; then 1115 + if [ -n "$count" ] && [ "$count" != "$csum_ns1" ]; then 1121 1116 extra_msg+=" ns1=$count" 1122 1117 fi 1123 1118 if [ -z "$count" ]; then 1124 1119 print_skip 1125 1120 elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } || 1126 - { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then 1121 + { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then 1127 1122 fail_test "got $count data checksum error[s] expected $csum_ns1" 1128 1123 else 1129 1124 print_ok 1130 1125 fi 1131 1126 print_check "csum" 1132 1127 count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr") 1133 - if [ "$count" != "$csum_ns2" ]; then 1128 + if [ -n "$count" ] && [ "$count" != "$csum_ns2" ]; then 1134 1129 extra_msg+=" ns2=$count" 1135 1130 fi 1136 1131 if [ -z "$count" ]; then 1137 1132 print_skip 1138 1133 elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } || 1139 - { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then 1134 + { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then 1140 1135 fail_test "got $count data checksum error[s] expected $csum_ns2" 1141 1136 else 1142 1137 print_ok ··· 1174 1169 1175 1170 print_check "ftx" 1176 1171 count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx") 1177 - if [ "$count" != "$fail_tx" ]; then 1172 + if [ -n "$count" ] && [ "$count" != "$fail_tx" ]; then 1178 1173 extra_msg+=",tx=$count" 1179 1174 fi 1180 1175 if [ -z "$count" ]; then 1181 1176 print_skip 1182 1177 elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } || 1183 - { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then 1178 + { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then 1184 1179 fail_test "got $count MP_FAIL[s] TX expected $fail_tx" 1185 1180 else 1186 1181 print_ok ··· 1188 1183 1189 1184 print_check "failrx" 1190 1185 count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx") 1191 - if [ "$count" != "$fail_rx" ]; then 1186 + if [ -n "$count" ] && [ "$count" != "$fail_rx" ]; then 1192 1187 extra_msg+=",rx=$count" 1193 1188 fi 1194 1189 if [ -z "$count" ]; then 1195 1190 print_skip 1196 1191 elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } || 1197 - { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then 1192 + { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then 1198 1193 fail_test "got $count MP_FAIL[s] RX expected $fail_rx" 1199 1194 else 1200 1195 print_ok ··· 3338 3333 fi 3339 3334 } 3340 3335 3336 + # $1: ns ; $2: event type ; $3: count 3337 + chk_evt_nr() 3338 + { 3339 + local ns=${1} 3340 + local evt_name="${2}" 3341 + local exp="${3}" 3342 + 3343 + local evts="${evts_ns1}" 3344 + local evt="${!evt_name}" 3345 + local count 3346 + 3347 + evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_ 3348 + [ "${ns}" == "ns2" ] && evts="${evts_ns2}" 3349 + 3350 + print_check "event ${ns} ${evt_name} (${exp})" 3351 + 3352 + if [[ "${evt_name}" = "LISTENER_"* ]] && 3353 + ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then 3354 + print_skip "event not supported" 3355 + return 3356 + fi 3357 + 3358 + count=$(grep -cw "type:${evt}" "${evts}") 3359 + if [ "${count}" != "${exp}" ]; then 3360 + fail_test "got ${count} events, expected ${exp}" 3361 + else 3362 + print_ok 3363 + fi 3364 + } 3365 + 3341 3366 userspace_tests() 3342 3367 { 3343 3368 # userspace pm type prevents add_addr ··· 3464 3429 "signal" 3465 3430 userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1" 3466 3431 userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1" 3467 - userspace_pm_rm_addr $ns1 10 3468 3432 userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $MPTCP_LIB_EVENT_SUB_ESTABLISHED 3469 3433 userspace_pm_chk_dump_addr "${ns1}" \ 3470 - "id 20 flags signal 10.0.3.1" "after rm_addr 10" 3434 + "id 20 flags signal 10.0.3.1" "after rm_sf 10" 3471 3435 userspace_pm_rm_addr $ns1 20 3472 - userspace_pm_rm_sf $ns1 10.0.3.1 $MPTCP_LIB_EVENT_SUB_ESTABLISHED 3473 3436 userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20" 3474 - chk_rm_nr 2 2 invert 3437 + chk_rm_nr 1 1 invert 3475 3438 chk_mptcp_info subflows 0 subflows 0 3476 3439 chk_subflows_total 1 1 3477 3440 kill_events_pids ··· 3493 3460 "id 20 flags subflow 10.0.3.2" \ 3494 3461 "subflow" 3495 3462 userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2" 3496 - userspace_pm_rm_addr $ns2 20 3497 3463 userspace_pm_rm_sf $ns2 10.0.3.2 $MPTCP_LIB_EVENT_SUB_ESTABLISHED 3498 3464 userspace_pm_chk_dump_addr "${ns2}" \ 3499 3465 "" \ 3500 - "after rm_addr 20" 3501 - chk_rm_nr 1 1 3466 + "after rm_sf 20" 3467 + chk_rm_nr 0 1 3502 3468 chk_mptcp_info subflows 0 subflows 0 3503 3469 chk_subflows_total 1 1 3504 3470 kill_events_pids ··· 3607 3575 3608 3576 if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT && 3609 3577 mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3610 - pm_nl_set_limits $ns1 0 2 3611 - pm_nl_set_limits $ns2 0 2 3578 + start_events 3579 + pm_nl_set_limits $ns1 0 3 3580 + pm_nl_set_limits $ns2 0 3 3581 + pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow 3612 3582 pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow 3613 - test_linkfail=4 speed=20 \ 3583 + test_linkfail=4 speed=5 \ 3614 3584 run_tests $ns1 $ns2 10.0.1.1 & 3615 3585 local tests_pid=$! 3616 3586 3617 3587 wait_mpj $ns2 3618 3588 pm_nl_check_endpoint "creation" \ 3619 3589 $ns2 10.0.2.2 id 2 flags subflow dev ns2eth2 3620 - chk_subflow_nr "before delete" 2 3590 + chk_subflow_nr "before delete id 2" 2 3621 3591 chk_mptcp_info subflows 1 subflows 1 3622 3592 3623 3593 pm_nl_del_endpoint $ns2 2 10.0.2.2 3624 3594 sleep 0.5 3625 - chk_subflow_nr "after delete" 1 3595 + chk_subflow_nr "after delete id 2" 1 3626 3596 chk_mptcp_info subflows 0 subflows 0 3627 3597 3628 3598 pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow 3629 3599 wait_mpj $ns2 3630 - chk_subflow_nr "after re-add" 2 3600 + chk_subflow_nr "after re-add id 2" 2 3631 3601 chk_mptcp_info subflows 1 subflows 1 3632 3602 3633 3603 pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow ··· 3644 3610 chk_subflow_nr "after no reject" 3 3645 3611 chk_mptcp_info subflows 2 subflows 2 3646 3612 3613 + local i 3614 + for i in $(seq 3); do 3615 + pm_nl_del_endpoint $ns2 1 10.0.1.2 3616 + sleep 0.5 3617 + chk_subflow_nr "after delete id 0 ($i)" 2 3618 + chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf 3619 + 3620 + pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow 3621 + wait_mpj $ns2 3622 + chk_subflow_nr "after re-add id 0 ($i)" 3 3623 + chk_mptcp_info subflows 3 subflows 3 3624 + done 3625 + 3647 3626 mptcp_lib_kill_wait $tests_pid 3648 3627 3649 - chk_join_nr 3 3 3 3650 - chk_rm_nr 1 1 3628 + kill_events_pids 3629 + chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1 3630 + chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1 3631 + chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1 3632 + chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0 3633 + chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 4 3634 + chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6 3635 + chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 4 3636 + 3637 + chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1 3638 + chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1 3639 + chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 0 3640 + chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 0 3641 + chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6 3642 + chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 5 # one has been closed before estab 3643 + 3644 + chk_join_nr 6 6 6 3645 + chk_rm_nr 4 4 3651 3646 fi 3652 3647 3653 3648 # remove and re-add 3654 - if reset "delete re-add signal" && 3649 + if reset_with_events "delete re-add signal" && 3655 3650 mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3656 - pm_nl_set_limits $ns1 0 2 3657 - pm_nl_set_limits $ns2 2 2 3651 + pm_nl_set_limits $ns1 0 3 3652 + pm_nl_set_limits $ns2 3 3 3658 3653 pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal 3659 3654 # broadcast IP: no packet for this address will be received on ns1 3660 3655 pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal 3661 - test_linkfail=4 speed=20 \ 3656 + pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal 3657 + test_linkfail=4 speed=5 \ 3662 3658 run_tests $ns1 $ns2 10.0.1.1 & 3663 3659 local tests_pid=$! 3664 3660 ··· 3709 3645 wait_mpj $ns2 3710 3646 chk_subflow_nr "after re-add" 3 3711 3647 chk_mptcp_info subflows 2 subflows 2 3648 + 3649 + pm_nl_del_endpoint $ns1 42 10.0.1.1 3650 + sleep 0.5 3651 + chk_subflow_nr "after delete ID 0" 2 3652 + chk_mptcp_info subflows 2 subflows 2 3653 + 3654 + pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal 3655 + wait_mpj $ns2 3656 + chk_subflow_nr "after re-add ID 0" 3 3657 + chk_mptcp_info subflows 3 subflows 3 3658 + 3659 + pm_nl_del_endpoint $ns1 99 10.0.1.1 3660 + sleep 0.5 3661 + chk_subflow_nr "after re-delete ID 0" 2 3662 + chk_mptcp_info subflows 2 subflows 2 3663 + 3664 + pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal 3665 + wait_mpj $ns2 3666 + chk_subflow_nr "after re-re-add ID 0" 3 3667 + chk_mptcp_info subflows 3 subflows 3 3712 3668 mptcp_lib_kill_wait $tests_pid 3713 3669 3714 - chk_join_nr 3 3 3 3715 - chk_add_nr 4 4 3716 - chk_rm_nr 2 1 invert 3670 + kill_events_pids 3671 + chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1 3672 + chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1 3673 + chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1 3674 + chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0 3675 + chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 0 3676 + chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5 3677 + chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 3 3678 + 3679 + chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1 3680 + chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1 3681 + chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 6 3682 + chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 4 3683 + chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5 3684 + chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 3 3685 + 3686 + chk_join_nr 5 5 5 3687 + chk_add_nr 6 6 3688 + chk_rm_nr 4 3 invert 3717 3689 fi 3718 3690 3719 3691 # flush and re-add
+4
tools/testing/selftests/net/mptcp/mptcp_lib.sh
··· 12 12 readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}" 13 13 14 14 # These variables are used in some selftests, read-only 15 + declare -rx MPTCP_LIB_EVENT_CREATED=1 # MPTCP_EVENT_CREATED 16 + declare -rx MPTCP_LIB_EVENT_ESTABLISHED=2 # MPTCP_EVENT_ESTABLISHED 17 + declare -rx MPTCP_LIB_EVENT_CLOSED=3 # MPTCP_EVENT_CLOSED 15 18 declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED 16 19 declare -rx MPTCP_LIB_EVENT_REMOVED=7 # MPTCP_EVENT_REMOVED 17 20 declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED 18 21 declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED 22 + declare -rx MPTCP_LIB_EVENT_SUB_PRIORITY=13 # MPTCP_EVENT_SUB_PRIORITY 19 23 declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED 20 24 declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16 # MPTCP_EVENT_LISTENER_CLOSED 21 25