Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

Conflicts:

net/sched/sch_taprio.c
d636fc5dd692 ("net: sched: add rcu annotations around qdisc->qdisc_sleeping")
dced11ef84fb ("net/sched: taprio: don't overwrite "sch" variable in taprio_dump_class_stats()")

net/ipv4/sysctl_net_ipv4.c
e209fee4118f ("net/ipv4: ping_group_range: allow GID from 2147483648 to 4294967294")
ccce324dabfe ("tcp: make the first N SYN RTO backoffs linear")
https://lore.kernel.org/all/20230605100816.08d41a7b@canb.auug.org.au/

No adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2819 -1355
+1 -1
Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml
··· 7 7 title: Lattice Slave SPI sysCONFIG FPGA manager 8 8 9 9 maintainers: 10 - - Ivan Bornyakov <i.bornyakov@metrotek.ru> 10 + - Vladimir Georgiev <v.georgiev@metrotek.ru> 11 11 12 12 description: | 13 13 Lattice sysCONFIG port, which is used for FPGA configuration, among others,
+1 -1
Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
··· 7 7 title: Microchip Polarfire FPGA manager. 8 8 9 9 maintainers: 10 - - Ivan Bornyakov <i.bornyakov@metrotek.ru> 10 + - Vladimir Georgiev <v.georgiev@metrotek.ru> 11 11 12 12 description: 13 13 Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to
+7
Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
··· 39 39 power-domains: 40 40 maxItems: 1 41 41 42 + vref-supply: 43 + description: | 44 + External ADC reference voltage supply on VREFH pad. If VERID[MVI] is 45 + set, there are additional, internal reference voltages selectable. 46 + VREFH1 is always from VREFH pad. 47 + 42 48 "#io-channel-cells": 43 49 const: 1 44 50 ··· 78 72 assigned-clocks = <&clk IMX_SC_R_ADC_0>; 79 73 assigned-clock-rates = <24000000>; 80 74 power-domains = <&pd IMX_SC_R_ADC_0>; 75 + vref-supply = <&reg_1v8>; 81 76 #io-channel-cells = <1>; 82 77 }; 83 78 };
+1 -1
Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml
··· 90 90 of the MAX chips to the GyroADC, while MISO line of each Maxim 91 91 ADC connects to a shared input pin of the GyroADC. 92 92 enum: 93 - - adi,7476 93 + - adi,ad7476 94 94 - fujitsu,mb88101a 95 95 - maxim,max1162 96 96 - maxim,max11100
+1
Documentation/devicetree/bindings/serial/8250_omap.yaml
··· 70 70 dsr-gpios: true 71 71 rng-gpios: true 72 72 dcd-gpios: true 73 + rs485-rts-active-high: true 73 74 rts-gpio: true 74 75 power-domains: true 75 76 clock-frequency: true
+1 -1
Documentation/devicetree/bindings/usb/snps,dwc3.yaml
··· 287 287 description: 288 288 High-Speed PHY interface selection between UTMI+ and ULPI when the 289 289 DWC_USB3_HSPHY_INTERFACE has value 3. 290 - $ref: /schemas/types.yaml#/definitions/uint8 290 + $ref: /schemas/types.yaml#/definitions/string 291 291 enum: [utmi, ulpi] 292 292 293 293 snps,quirk-frame-length-adjustment:
+19
Documentation/mm/page_table_check.rst
··· 52 52 53 53 Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page 54 54 table support without extra kernel parameter. 55 + 56 + Implementation notes 57 + ==================== 58 + 59 + We specifically decided not to use VMA information in order to avoid relying on 60 + MM states (except for limited "struct page" info). The page table check is a 61 + separate from Linux-MM state machine that verifies that the user accessible 62 + pages are not falsely shared. 63 + 64 + PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without 65 + EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory 66 + regions into the userspace via /dev/mem. At the same time, pages may change 67 + their properties (e.g., from anonymous pages to named pages) while they are 68 + still being mapped in the userspace, leading to "corruption" detected by the 69 + page table check. 70 + 71 + Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via 72 + /dev/mem. However, these pages are always considered as named pages, so they 73 + won't break the logic used in the page table check.
+16 -16
Documentation/netlink/specs/ethtool.yaml
··· 223 223 name: tx-min-frag-size 224 224 type: u32 225 225 - 226 - name: tx-min-frag-size 226 + name: rx-min-frag-size 227 227 type: u32 228 228 - 229 229 name: verify-enabled ··· 294 294 name: master-slave-state 295 295 type: u8 296 296 - 297 - name: master-slave-lanes 297 + name: lanes 298 298 type: u32 299 299 - 300 300 name: rate-matching ··· 322 322 name: ext-substate 323 323 type: u8 324 324 - 325 - name: down-cnt 325 + name: ext-down-cnt 326 326 type: u32 327 327 - 328 328 name: debug ··· 577 577 name: phc-index 578 578 type: u32 579 579 - 580 - name: cable-test-nft-nest-result 580 + name: cable-test-ntf-nest-result 581 581 attributes: 582 582 - 583 583 name: pair ··· 586 586 name: code 587 587 type: u8 588 588 - 589 - name: cable-test-nft-nest-fault-length 589 + name: cable-test-ntf-nest-fault-length 590 590 attributes: 591 591 - 592 592 name: pair ··· 595 595 name: cm 596 596 type: u32 597 597 - 598 - name: cable-test-nft-nest 598 + name: cable-test-ntf-nest 599 599 attributes: 600 600 - 601 601 name: result 602 602 type: nest 603 - nested-attributes: cable-test-nft-nest-result 603 + nested-attributes: cable-test-ntf-nest-result 604 604 - 605 605 name: fault-length 606 606 type: nest 607 - nested-attributes: cable-test-nft-nest-fault-length 607 + nested-attributes: cable-test-ntf-nest-fault-length 608 608 - 609 609 name: cable-test 610 610 attributes: ··· 618 618 - 619 619 name: nest 620 620 type: nest 621 - nested-attributes: cable-test-nft-nest 621 + nested-attributes: cable-test-ntf-nest 622 622 - 623 623 name: cable-test-tdr-cfg 624 624 attributes: ··· 776 776 name: hist-bkt-hi 777 777 type: u32 778 778 - 779 - name: hist-bkt-val 779 + name: hist-val 780 780 type: u64 781 781 - 782 782 name: stats ··· 965 965 - duplex 966 966 - master-slave-cfg 967 967 - master-slave-state 968 - - master-slave-lanes 968 + - lanes 969 969 - rate-matching 970 970 dump: *linkmodes-get-op 971 971 - ··· 999 999 - sqi-max 1000 1000 - ext-state 1001 1001 - ext-substate 1002 - - down-cnt 1002 + - ext-down-cnt 1003 1003 dump: *linkstate-get-op 1004 1004 - 1005 1005 name: debug-get ··· 1351 1351 reply: 1352 1352 attributes: 1353 1353 - header 1354 - - cable-test-nft-nest 1354 + - cable-test-ntf-nest 1355 1355 - 1356 1356 name: cable-test-tdr-act 1357 1357 doc: Cable test TDR. ··· 1539 1539 - hkey 1540 1540 dump: *rss-get-op 1541 1541 - 1542 - name: plca-get 1542 + name: plca-get-cfg 1543 1543 doc: Get PLCA params. 1544 1544 1545 1545 attribute-set: plca ··· 1561 1561 - burst-tmr 1562 1562 dump: *plca-get-op 1563 1563 - 1564 - name: plca-set 1564 + name: plca-set-cfg 1565 1565 doc: Set PLCA params. 1566 1566 1567 1567 attribute-set: plca ··· 1585 1585 - 1586 1586 name: plca-ntf 1587 1587 doc: Notification for change in PLCA params. 1588 - notify: plca-get 1588 + notify: plca-get-cfg 1589 1589 - 1590 1590 name: mm-get 1591 1591 doc: Get MAC Merge configuration and state
+2 -2
Documentation/networking/ip-sysctl.rst
··· 1363 1363 Restrict ICMP_PROTO datagram sockets to users in the group range. 1364 1364 The default is "1 0", meaning, that nobody (not even root) may 1365 1365 create ping sockets. Setting it to "100 100" would grant permissions 1366 - to the single group. "0 4294967295" would enable it for the world, "100 1367 - 4294967295" would enable it for the users, but not daemons. 1366 + to the single group. "0 4294967294" would enable it for the world, "100 1367 + 4294967294" would enable it for the users, but not daemons. 1368 1368 1369 1369 tcp_early_demux - BOOLEAN 1370 1370 Enable early demux for established TCP sockets.
+13 -5
MAINTAINERS
··· 1594 1594 1595 1595 ARASAN NAND CONTROLLER DRIVER 1596 1596 M: Miquel Raynal <miquel.raynal@bootlin.com> 1597 - M: Naga Sureshkumar Relli <nagasure@xilinx.com> 1597 + R: Michal Simek <michal.simek@amd.com> 1598 1598 L: linux-mtd@lists.infradead.org 1599 1599 S: Maintained 1600 1600 F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml ··· 1757 1757 1758 1758 ARM PRIMECELL PL35X NAND CONTROLLER DRIVER 1759 1759 M: Miquel Raynal <miquel.raynal@bootlin.com> 1760 - M: Naga Sureshkumar Relli <nagasure@xilinx.com> 1760 + R: Michal Simek <michal.simek@amd.com> 1761 1761 L: linux-mtd@lists.infradead.org 1762 1762 S: Maintained 1763 1763 F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml ··· 1765 1765 1766 1766 ARM PRIMECELL PL35X SMC DRIVER 1767 1767 M: Miquel Raynal <miquel.raynal@bootlin.com> 1768 - M: Naga Sureshkumar Relli <nagasure@xilinx.com> 1768 + R: Michal Simek <michal.simek@amd.com> 1769 1769 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1770 1770 S: Maintained 1771 1771 F: Documentation/devicetree/bindings/memory-controllers/arm,pl35x-smc.yaml ··· 5720 5720 F: include/linux/tfrc.h 5721 5721 F: include/uapi/linux/dccp.h 5722 5722 F: net/dccp/ 5723 + 5724 + DEBUGOBJECTS: 5725 + M: Thomas Gleixner <tglx@linutronix.de> 5726 + L: linux-kernel@vger.kernel.org 5727 + S: Maintained 5728 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/debugobjects 5729 + F: lib/debugobjects.c 5730 + F: include/linux/debugobjects.h 5723 5731 5724 5732 DECSTATION PLATFORM SUPPORT 5725 5733 M: "Maciej W. Rozycki" <macro@orcam.me.uk> ··· 10122 10114 F: Documentation/process/kernel-docs.rst 10123 10115 10124 10116 INDUSTRY PACK SUBSYSTEM (IPACK) 10125 - M: Samuel Iglesias Gonsalvez <siglesias@igalia.com> 10117 + M: Vaibhav Gupta <vaibhavgupta40@gmail.com> 10126 10118 M: Jens Taprogge <jens.taprogge@taprogge.org> 10127 10119 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 10128 10120 L: industrypack-devel@lists.sourceforge.net ··· 13850 13842 13851 13843 MICROCHIP POLARFIRE FPGA DRIVERS 13852 13844 M: Conor Dooley <conor.dooley@microchip.com> 13853 - R: Ivan Bornyakov <i.bornyakov@metrotek.ru> 13845 + R: Vladimir Georgiev <v.georgiev@metrotek.ru> 13854 13846 L: linux-fpga@vger.kernel.org 13855 13847 S: Supported 13856 13848 F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 4 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION*
+3 -3
arch/arm64/include/asm/kvm_pgtable.h
··· 632 632 * 633 633 * The walker will walk the page-table entries corresponding to the input 634 634 * address range specified, visiting entries according to the walker flags. 635 - * Invalid entries are treated as leaf entries. Leaf entries are reloaded 636 - * after invoking the walker callback, allowing the walker to descend into 637 - * a newly installed table. 635 + * Invalid entries are treated as leaf entries. The visited page table entry is 636 + * reloaded after invoking the walker callback, allowing the walker to descend 637 + * into a newly installed table. 638 638 * 639 639 * Returning a negative error code from the walker callback function will 640 640 * terminate the walk immediately with the same error code.
+6
arch/arm64/include/asm/sysreg.h
··· 115 115 #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) 116 116 117 117 #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) 118 + #define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) 119 + #define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) 118 120 #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) 121 + #define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4) 122 + #define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6) 119 123 #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) 124 + #define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) 125 + #define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) 120 126 121 127 /* 122 128 * Automatically generated definitions for system registers, the
+6 -2
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 412 412 return false; 413 413 } 414 414 415 - static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 415 + static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) 416 416 { 417 417 if (!__populate_fault_info(vcpu)) 418 418 return true; 419 419 420 420 return false; 421 421 } 422 + static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 423 + __alias(kvm_hyp_handle_memory_fault); 424 + static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 425 + __alias(kvm_hyp_handle_memory_fault); 422 426 423 427 static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 424 428 { 425 - if (!__populate_fault_info(vcpu)) 429 + if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) 426 430 return true; 427 431 428 432 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
+7 -7
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 575 575 576 576 struct check_walk_data { 577 577 enum pkvm_page_state desired; 578 - enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); 578 + enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr); 579 579 }; 580 580 581 581 static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, ··· 583 583 { 584 584 struct check_walk_data *d = ctx->arg; 585 585 586 - if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old))) 587 - return -EINVAL; 588 - 589 - return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM; 586 + return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; 590 587 } 591 588 592 589 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, ··· 598 601 return kvm_pgtable_walk(pgt, addr, size, &walker); 599 602 } 600 603 601 - static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) 604 + static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr) 602 605 { 606 + if (!addr_is_allowed_memory(addr)) 607 + return PKVM_NOPAGE; 608 + 603 609 if (!kvm_pte_valid(pte) && pte) 604 610 return PKVM_NOPAGE; 605 611 ··· 709 709 return host_stage2_set_owner_locked(addr, size, host_id); 710 710 } 711 711 712 - static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) 712 + static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr) 713 713 { 714 714 if (!kvm_pte_valid(pte)) 715 715 return PKVM_NOPAGE;
+2
arch/arm64/kvm/hyp/nvhe/switch.c
··· 186 186 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, 187 187 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, 188 188 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, 189 + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, 189 190 [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, 190 191 }; 191 192 ··· 197 196 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, 198 197 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, 199 198 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, 199 + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, 200 200 [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, 201 201 }; 202 202
+16 -1
arch/arm64/kvm/hyp/pgtable.c
··· 209 209 .flags = flags, 210 210 }; 211 211 int ret = 0; 212 + bool reload = false; 212 213 kvm_pteref_t childp; 213 214 bool table = kvm_pte_table(ctx.old, level); 214 215 215 - if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) 216 + if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) { 216 217 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); 218 + reload = true; 219 + } 217 220 218 221 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) { 219 222 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); 223 + reload = true; 224 + } 225 + 226 + /* 227 + * Reload the page table after invoking the walker callback for leaf 228 + * entries or after pre-order traversal, to allow the walker to descend 229 + * into a newly installed or replaced table. 230 + */ 231 + if (reload) { 220 232 ctx.old = READ_ONCE(*ptep); 221 233 table = kvm_pte_table(ctx.old, level); 222 234 } ··· 1332 1320 }; 1333 1321 1334 1322 WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1)); 1323 + 1324 + WARN_ON(mm_ops->page_count(pgtable) != 1); 1325 + mm_ops->put_page(pgtable); 1335 1326 }
+1
arch/arm64/kvm/hyp/vhe/switch.c
··· 110 110 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, 111 111 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, 112 112 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, 113 + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, 113 114 [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, 114 115 }; 115 116
+23 -35
arch/arm64/kvm/pmu-emul.c
··· 694 694 695 695 static struct arm_pmu *kvm_pmu_probe_armpmu(void) 696 696 { 697 - struct perf_event_attr attr = { }; 698 - struct perf_event *event; 699 - struct arm_pmu *pmu = NULL; 697 + struct arm_pmu *tmp, *pmu = NULL; 698 + struct arm_pmu_entry *entry; 699 + int cpu; 700 700 701 - /* 702 - * Create a dummy event that only counts user cycles. As we'll never 703 - * leave this function with the event being live, it will never 704 - * count anything. But it allows us to probe some of the PMU 705 - * details. Yes, this is terrible. 706 - */ 707 - attr.type = PERF_TYPE_RAW; 708 - attr.size = sizeof(attr); 709 - attr.pinned = 1; 710 - attr.disabled = 0; 711 - attr.exclude_user = 0; 712 - attr.exclude_kernel = 1; 713 - attr.exclude_hv = 1; 714 - attr.exclude_host = 1; 715 - attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; 716 - attr.sample_period = GENMASK(63, 0); 701 + mutex_lock(&arm_pmus_lock); 717 702 718 - event = perf_event_create_kernel_counter(&attr, -1, current, 719 - kvm_pmu_perf_overflow, &attr); 703 + cpu = smp_processor_id(); 704 + list_for_each_entry(entry, &arm_pmus, entry) { 705 + tmp = entry->arm_pmu; 720 706 721 - if (IS_ERR(event)) { 722 - pr_err_once("kvm: pmu event creation failed %ld\n", 723 - PTR_ERR(event)); 724 - return NULL; 707 + if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) { 708 + pmu = tmp; 709 + break; 710 + } 725 711 } 726 712 727 - if (event->pmu) { 728 - pmu = to_arm_pmu(event->pmu); 729 - if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI || 730 - pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 731 - pmu = NULL; 732 - } 733 - 734 - perf_event_disable(event); 735 - perf_event_release_kernel(event); 713 + mutex_unlock(&arm_pmus_lock); 736 714 737 715 return pmu; 738 716 } ··· 890 912 return -EBUSY; 891 913 892 914 if (!kvm->arch.arm_pmu) { 893 - /* No PMU set, get the default one */ 915 + /* 916 + * No PMU set, get the default one. 917 + * 918 + * The observant among you will notice that the supported_cpus 919 + * mask does not get updated for the default PMU even though it 920 + * is quite possible the selected instance supports only a 921 + * subset of cores in the system. This is intentional, and 922 + * upholds the preexisting behavior on heterogeneous systems 923 + * where vCPUs can be scheduled on any core but the guest 924 + * counters could stop working. 925 + */ 894 926 kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); 895 927 if (!kvm->arch.arm_pmu) 896 928 return -ENODEV;
+19
arch/arm64/kvm/sys_regs.c
··· 211 211 return true; 212 212 } 213 213 214 + static bool access_dcgsw(struct kvm_vcpu *vcpu, 215 + struct sys_reg_params *p, 216 + const struct sys_reg_desc *r) 217 + { 218 + if (!kvm_has_mte(vcpu->kvm)) { 219 + kvm_inject_undefined(vcpu); 220 + return false; 221 + } 222 + 223 + /* Treat MTE S/W ops as we treat the classic ones: with contempt */ 224 + return access_dcsw(vcpu, p, r); 225 + } 226 + 214 227 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) 215 228 { 216 229 switch (r->aarch32_map) { ··· 1769 1756 */ 1770 1757 static const struct sys_reg_desc sys_reg_descs[] = { 1771 1758 { SYS_DESC(SYS_DC_ISW), access_dcsw }, 1759 + { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, 1760 + { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, 1772 1761 { SYS_DESC(SYS_DC_CSW), access_dcsw }, 1762 + { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, 1763 + { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, 1773 1764 { SYS_DESC(SYS_DC_CISW), access_dcsw }, 1765 + { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, 1766 + { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, 1774 1767 1775 1768 DBG_BCR_BVR_WCR_WVR_EL1(0), 1776 1769 DBG_BCR_BVR_WCR_WVR_EL1(1),
+21 -6
arch/arm64/kvm/vgic/vgic-init.c
··· 235 235 * KVM io device for the redistributor that belongs to this VCPU. 236 236 */ 237 237 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 238 - mutex_lock(&vcpu->kvm->arch.config_lock); 238 + mutex_lock(&vcpu->kvm->slots_lock); 239 239 ret = vgic_register_redist_iodev(vcpu); 240 - mutex_unlock(&vcpu->kvm->arch.config_lock); 240 + mutex_unlock(&vcpu->kvm->slots_lock); 241 241 } 242 242 return ret; 243 243 } ··· 406 406 407 407 /** 408 408 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest 409 - * is a GICv2. A GICv3 must be explicitly initialized by the guest using the 409 + * is a GICv2. A GICv3 must be explicitly initialized by userspace using the 410 410 * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. 411 411 * @kvm: kvm struct pointer 412 412 */ ··· 446 446 int kvm_vgic_map_resources(struct kvm *kvm) 447 447 { 448 448 struct vgic_dist *dist = &kvm->arch.vgic; 449 + gpa_t dist_base; 449 450 int ret = 0; 450 451 451 452 if (likely(vgic_ready(kvm))) 452 453 return 0; 453 454 455 + mutex_lock(&kvm->slots_lock); 454 456 mutex_lock(&kvm->arch.config_lock); 455 457 if (vgic_ready(kvm)) 456 458 goto out; ··· 465 463 else 466 464 ret = vgic_v3_map_resources(kvm); 467 465 468 - if (ret) 466 + if (ret) { 469 467 __kvm_vgic_destroy(kvm); 470 - else 471 - dist->ready = true; 468 + goto out; 469 + } 470 + dist->ready = true; 471 + dist_base = dist->vgic_dist_base; 472 + mutex_unlock(&kvm->arch.config_lock); 473 + 474 + ret = vgic_register_dist_iodev(kvm, dist_base, 475 + kvm_vgic_global_state.type); 476 + if (ret) { 477 + kvm_err("Unable to register VGIC dist MMIO regions\n"); 478 + kvm_vgic_destroy(kvm); 479 + } 480 + mutex_unlock(&kvm->slots_lock); 481 + return ret; 472 482 473 483 out: 474 484 mutex_unlock(&kvm->arch.config_lock); 485 + mutex_unlock(&kvm->slots_lock); 475 486 return ret; 476 487 } 477 488
+10 -4
arch/arm64/kvm/vgic/vgic-its.c
··· 1936 1936 1937 1937 static int vgic_its_create(struct kvm_device *dev, u32 type) 1938 1938 { 1939 + int ret; 1939 1940 struct vgic_its *its; 1940 1941 1941 1942 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) ··· 1946 1945 if (!its) 1947 1946 return -ENOMEM; 1948 1947 1948 + mutex_lock(&dev->kvm->arch.config_lock); 1949 + 1949 1950 if (vgic_initialized(dev->kvm)) { 1950 - int ret = vgic_v4_init(dev->kvm); 1951 + ret = vgic_v4_init(dev->kvm); 1951 1952 if (ret < 0) { 1953 + mutex_unlock(&dev->kvm->arch.config_lock); 1952 1954 kfree(its); 1953 1955 return ret; 1954 1956 } ··· 1964 1960 1965 1961 /* Yep, even more trickery for lock ordering... */ 1966 1962 #ifdef CONFIG_LOCKDEP 1967 - mutex_lock(&dev->kvm->arch.config_lock); 1968 1963 mutex_lock(&its->cmd_lock); 1969 1964 mutex_lock(&its->its_lock); 1970 1965 mutex_unlock(&its->its_lock); 1971 1966 mutex_unlock(&its->cmd_lock); 1972 - mutex_unlock(&dev->kvm->arch.config_lock); 1973 1967 #endif 1974 1968 1975 1969 its->vgic_its_base = VGIC_ADDR_UNDEF; ··· 1988 1986 1989 1987 dev->private = its; 1990 1988 1991 - return vgic_its_set_abi(its, NR_ITS_ABIS - 1); 1989 + ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1); 1990 + 1991 + mutex_unlock(&dev->kvm->arch.config_lock); 1992 + 1993 + return ret; 1992 1994 } 1993 1995 1994 1996 static void vgic_its_destroy(struct kvm_device *kvm_dev)
+8 -2
arch/arm64/kvm/vgic/vgic-kvm-device.c
··· 102 102 if (get_user(addr, uaddr)) 103 103 return -EFAULT; 104 104 105 - mutex_lock(&kvm->arch.config_lock); 105 + /* 106 + * Since we can't hold config_lock while registering the redistributor 107 + * iodevs, take the slots_lock immediately. 108 + */ 109 + mutex_lock(&kvm->slots_lock); 106 110 switch (attr->attr) { 107 111 case KVM_VGIC_V2_ADDR_TYPE_DIST: 108 112 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); ··· 186 182 if (r) 187 183 goto out; 188 184 185 + mutex_lock(&kvm->arch.config_lock); 189 186 if (write) { 190 187 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size); 191 188 if (!r) ··· 194 189 } else { 195 190 addr = *addr_ptr; 196 191 } 192 + mutex_unlock(&kvm->arch.config_lock); 197 193 198 194 out: 199 - mutex_unlock(&kvm->arch.config_lock); 195 + mutex_unlock(&kvm->slots_lock); 200 196 201 197 if (!r && !write) 202 198 r = put_user(addr, uaddr);
+21 -10
arch/arm64/kvm/vgic/vgic-mmio-v3.c
··· 769 769 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; 770 770 struct vgic_redist_region *rdreg; 771 771 gpa_t rd_base; 772 - int ret; 772 + int ret = 0; 773 + 774 + lockdep_assert_held(&kvm->slots_lock); 775 + mutex_lock(&kvm->arch.config_lock); 773 776 774 777 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) 775 - return 0; 778 + goto out_unlock; 776 779 777 780 /* 778 781 * We may be creating VCPUs before having set the base address for the ··· 785 782 */ 786 783 rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); 787 784 if (!rdreg) 788 - return 0; 785 + goto out_unlock; 789 786 790 - if (!vgic_v3_check_base(kvm)) 791 - return -EINVAL; 787 + if (!vgic_v3_check_base(kvm)) { 788 + ret = -EINVAL; 789 + goto out_unlock; 790 + } 792 791 793 792 vgic_cpu->rdreg = rdreg; 794 793 vgic_cpu->rdreg_index = rdreg->free_index; ··· 804 799 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); 805 800 rd_dev->redist_vcpu = vcpu; 806 801 807 - mutex_lock(&kvm->slots_lock); 802 + mutex_unlock(&kvm->arch.config_lock); 803 + 808 804 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, 809 805 2 * SZ_64K, &rd_dev->dev); 810 - mutex_unlock(&kvm->slots_lock); 811 - 812 806 if (ret) 813 807 return ret; 814 808 809 + /* Protected by slots_lock */ 815 810 rdreg->free_index++; 816 811 return 0; 812 + 813 + out_unlock: 814 + mutex_unlock(&kvm->arch.config_lock); 815 + return ret; 817 816 } 818 817 819 818 static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) ··· 843 834 /* The current c failed, so iterate over the previous ones. */ 844 835 int i; 845 836 846 - mutex_lock(&kvm->slots_lock); 847 837 for (i = 0; i < c; i++) { 848 838 vcpu = kvm_get_vcpu(kvm, i); 849 839 vgic_unregister_redist_iodev(vcpu); 850 840 } 851 - mutex_unlock(&kvm->slots_lock); 852 841 } 853 842 854 843 return ret; ··· 945 938 { 946 939 int ret; 947 940 941 + mutex_lock(&kvm->arch.config_lock); 948 942 ret = vgic_v3_alloc_redist_region(kvm, index, addr, count); 943 + mutex_unlock(&kvm->arch.config_lock); 949 944 if (ret) 950 945 return ret; 951 946 ··· 959 950 if (ret) { 960 951 struct vgic_redist_region *rdreg; 961 952 953 + mutex_lock(&kvm->arch.config_lock); 962 954 rdreg = vgic_v3_rdist_region_from_index(kvm, index); 963 955 vgic_v3_free_redist_region(rdreg); 956 + mutex_unlock(&kvm->arch.config_lock); 964 957 return ret; 965 958 } 966 959
+2 -7
arch/arm64/kvm/vgic/vgic-mmio.c
··· 1096 1096 enum vgic_type type) 1097 1097 { 1098 1098 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; 1099 - int ret = 0; 1100 1099 unsigned int len; 1101 1100 1102 1101 switch (type) { ··· 1113 1114 io_device->iodev_type = IODEV_DIST; 1114 1115 io_device->redist_vcpu = NULL; 1115 1116 1116 - mutex_lock(&kvm->slots_lock); 1117 - ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, 1118 - len, &io_device->dev); 1119 - mutex_unlock(&kvm->slots_lock); 1120 - 1121 - return ret; 1117 + return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, 1118 + len, &io_device->dev); 1122 1119 }
-6
arch/arm64/kvm/vgic/vgic-v2.c
··· 312 312 return ret; 313 313 } 314 314 315 - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); 316 - if (ret) { 317 - kvm_err("Unable to register VGIC MMIO regions\n"); 318 - return ret; 319 - } 320 - 321 315 if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { 322 316 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, 323 317 kvm_vgic_global_state.vcpu_base,
-7
arch/arm64/kvm/vgic/vgic-v3.c
··· 539 539 { 540 540 struct vgic_dist *dist = &kvm->arch.vgic; 541 541 struct kvm_vcpu *vcpu; 542 - int ret = 0; 543 542 unsigned long c; 544 543 545 544 kvm_for_each_vcpu(c, vcpu, kvm) { ··· 566 567 */ 567 568 if (!vgic_initialized(kvm)) { 568 569 return -EBUSY; 569 - } 570 - 571 - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); 572 - if (ret) { 573 - kvm_err("Unable to register VGICv3 dist MMIO regions\n"); 574 - return ret; 575 570 } 576 571 577 572 if (kvm_vgic_global_state.has_gicv4_1)
+2 -1
arch/arm64/kvm/vgic/vgic-v4.c
··· 184 184 } 185 185 } 186 186 187 - /* Must be called with the kvm lock held */ 188 187 void vgic_v4_configure_vsgis(struct kvm *kvm) 189 188 { 190 189 struct vgic_dist *dist = &kvm->arch.vgic; 191 190 struct kvm_vcpu *vcpu; 192 191 unsigned long i; 192 + 193 + lockdep_assert_held(&kvm->arch.config_lock); 193 194 194 195 kvm_arm_halt_guest(kvm); 195 196
+5 -5
arch/powerpc/crypto/Makefile
··· 22 22 sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o 23 23 crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o 24 24 crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o 25 - aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o 25 + aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o 26 26 27 27 quiet_cmd_perl = PERL $@ 28 28 cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@ 29 29 30 - targets += aesp8-ppc.S ghashp8-ppc.S 30 + targets += aesp10-ppc.S ghashp10-ppc.S 31 31 32 - $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE 32 + $(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE 33 33 $(call if_changed,perl) 34 34 35 - OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y 36 - OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y 35 + OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y 36 + OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
+9 -9
arch/powerpc/crypto/aes-gcm-p10-glue.c
··· 30 30 MODULE_LICENSE("GPL v2"); 31 31 MODULE_ALIAS_CRYPTO("aes"); 32 32 33 - asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits, 33 + asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits, 34 34 void *key); 35 - asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key); 35 + asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key); 36 36 asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len, 37 37 void *rkey, u8 *iv, void *Xi); 38 38 asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len, 39 39 void *rkey, u8 *iv, void *Xi); 40 40 asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]); 41 - asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable, 41 + asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable, 42 42 unsigned char *aad, unsigned int alen); 43 43 44 44 struct aes_key { ··· 93 93 gctx->aadLen = alen; 94 94 i = alen & ~0xf; 95 95 if (i) { 96 - gcm_ghash_p8(nXi, hash->Htable+32, aad, i); 96 + gcm_ghash_p10(nXi, hash->Htable+32, aad, i); 97 97 aad += i; 98 98 alen -= i; 99 99 } ··· 102 102 nXi[i] ^= aad[i]; 103 103 104 104 memset(gctx->aad_hash, 0, 16); 105 - gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16); 105 + gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16); 106 106 } else { 107 107 memcpy(gctx->aad_hash, nXi, 16); 108 108 } ··· 115 115 { 116 116 __be32 counter = cpu_to_be32(1); 117 117 118 - aes_p8_encrypt(hash->H, hash->H, rdkey); 118 + aes_p10_encrypt(hash->H, hash->H, rdkey); 119 119 set_subkey(hash->H); 120 120 gcm_init_htable(hash->Htable+32, hash->H); 121 121 ··· 126 126 /* 127 127 * Encrypt counter vector as iv tag and increment counter. 128 128 */ 129 - aes_p8_encrypt(iv, gctx->ivtag, rdkey); 129 + aes_p10_encrypt(iv, gctx->ivtag, rdkey); 130 130 131 131 counter = cpu_to_be32(2); 132 132 *((__be32 *)(iv+12)) = counter; ··· 160 160 /* 161 161 * hash (AAD len and len) 162 162 */ 163 - gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16); 163 + gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16); 164 164 165 165 for (i = 0; i < 16; i++) 166 166 hash->Htable[i] ^= gctx->ivtag[i]; ··· 192 192 int ret; 193 193 194 194 vsx_begin(); 195 - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 195 + ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 196 196 vsx_end(); 197 197 198 198 return ret ? -EINVAL : 0;
+1 -1
arch/powerpc/crypto/aesp8-ppc.pl arch/powerpc/crypto/aesp10-ppc.pl
··· 110 110 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; 111 111 112 112 $FRAME=8*$SIZE_T; 113 - $prefix="aes_p8"; 113 + $prefix="aes_p10"; 114 114 115 115 $sp="r1"; 116 116 $vrsave="r12";
+6 -6
arch/powerpc/crypto/ghashp8-ppc.pl arch/powerpc/crypto/ghashp10-ppc.pl
··· 64 64 65 65 .text 66 66 67 - .globl .gcm_init_p8 67 + .globl .gcm_init_p10 68 68 lis r0,0xfff0 69 69 li r8,0x10 70 70 mfspr $vrsave,256 ··· 110 110 .long 0 111 111 .byte 0,12,0x14,0,0,0,2,0 112 112 .long 0 113 - .size .gcm_init_p8,.-.gcm_init_p8 113 + .size .gcm_init_p10,.-.gcm_init_p10 114 114 115 115 .globl .gcm_init_htable 116 116 lis r0,0xfff0 ··· 237 237 .long 0 238 238 .size .gcm_init_htable,.-.gcm_init_htable 239 239 240 - .globl .gcm_gmult_p8 240 + .globl .gcm_gmult_p10 241 241 lis r0,0xfff8 242 242 li r8,0x10 243 243 mfspr $vrsave,256 ··· 283 283 .long 0 284 284 .byte 0,12,0x14,0,0,0,2,0 285 285 .long 0 286 - .size .gcm_gmult_p8,.-.gcm_gmult_p8 286 + .size .gcm_gmult_p10,.-.gcm_gmult_p10 287 287 288 - .globl .gcm_ghash_p8 288 + .globl .gcm_ghash_p10 289 289 lis r0,0xfff8 290 290 li r8,0x10 291 291 mfspr $vrsave,256 ··· 350 350 .long 0 351 351 .byte 0,12,0x14,0,0,0,4,0 352 352 .long 0 353 - .size .gcm_ghash_p8,.-.gcm_ghash_p8 353 + .size .gcm_ghash_p10,.-.gcm_ghash_p10 354 354 355 355 .asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>" 356 356 .align 2
+11 -2
arch/powerpc/platforms/pseries/iommu.c
··· 317 317 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) 318 318 { 319 319 u64 rc; 320 + long rpages = npages; 321 + unsigned long limit; 320 322 321 323 if (!firmware_has_feature(FW_FEATURE_STUFF_TCE)) 322 324 return tce_free_pSeriesLP(tbl->it_index, tcenum, 323 325 tbl->it_page_shift, npages); 324 326 325 - rc = plpar_tce_stuff((u64)tbl->it_index, 326 - (u64)tcenum << tbl->it_page_shift, 0, npages); 327 + do { 328 + limit = min_t(unsigned long, rpages, 512); 329 + 330 + rc = plpar_tce_stuff((u64)tbl->it_index, 331 + (u64)tcenum << tbl->it_page_shift, 0, limit); 332 + 333 + rpages -= limit; 334 + tcenum += limit; 335 + } while (rpages > 0 && !rc); 327 336 328 337 if (rc && printk_ratelimit()) { 329 338 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
+1 -1
arch/powerpc/xmon/xmon.c
··· 88 88 static unsigned long nidump = 16; 89 89 static unsigned long ncsum = 4096; 90 90 static int termch; 91 - static char tmpstr[128]; 91 + static char tmpstr[KSYM_NAME_LEN]; 92 92 static int tracing_enabled; 93 93 94 94 static long bus_error_jmp[JMP_BUF_LEN];
+4 -1
arch/riscv/Kconfig
··· 799 799 800 800 source "kernel/power/Kconfig" 801 801 802 + # Hibernation is only possible on systems where the SBI implementation has 803 + # marked its reserved memory as not accessible from, or does not run 804 + # from the same memory as, Linux 802 805 config ARCH_HIBERNATION_POSSIBLE 803 - def_bool y 806 + def_bool NONPORTABLE 804 807 805 808 config ARCH_HIBERNATION_HEADER 806 809 def_bool HIBERNATION
+4
arch/riscv/errata/Makefile
··· 1 + ifdef CONFIG_RELOCATABLE 2 + KBUILD_CFLAGS += -fno-pie 3 + endif 4 + 1 5 obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ 2 6 obj-$(CONFIG_ERRATA_THEAD) += thead/
+3
arch/riscv/include/asm/hugetlb.h
··· 36 36 unsigned long addr, pte_t *ptep, 37 37 pte_t pte, int dirty); 38 38 39 + #define __HAVE_ARCH_HUGE_PTEP_GET 40 + pte_t huge_ptep_get(pte_t *ptep); 41 + 39 42 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); 40 43 #define arch_make_huge_pte arch_make_huge_pte 41 44
+7
arch/riscv/include/asm/perf_event.h
··· 10 10 11 11 #include <linux/perf_event.h> 12 12 #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs 13 + 14 + #define perf_arch_fetch_caller_regs(regs, __ip) { \ 15 + (regs)->epc = (__ip); \ 16 + (regs)->s0 = (unsigned long) __builtin_frame_address(0); \ 17 + (regs)->sp = current_stack_pointer; \ 18 + (regs)->status = SR_PP; \ 19 + } 13 20 #endif /* _ASM_RISCV_PERF_EVENT_H */
+4
arch/riscv/kernel/Makefile
··· 23 23 CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE) 24 24 CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE) 25 25 endif 26 + ifdef CONFIG_RELOCATABLE 27 + CFLAGS_alternative.o += -fno-pie 28 + CFLAGS_cpufeature.o += -fno-pie 29 + endif 26 30 ifdef CONFIG_KASAN 27 31 KASAN_SANITIZE_alternative.o := n 28 32 KASAN_SANITIZE_cpufeature.o := n
+29 -1
arch/riscv/mm/hugetlbpage.c
··· 3 3 #include <linux/err.h> 4 4 5 5 #ifdef CONFIG_RISCV_ISA_SVNAPOT 6 + pte_t huge_ptep_get(pte_t *ptep) 7 + { 8 + unsigned long pte_num; 9 + int i; 10 + pte_t orig_pte = ptep_get(ptep); 11 + 12 + if (!pte_present(orig_pte) || !pte_napot(orig_pte)) 13 + return orig_pte; 14 + 15 + pte_num = napot_pte_num(napot_cont_order(orig_pte)); 16 + 17 + for (i = 0; i < pte_num; i++, ptep++) { 18 + pte_t pte = ptep_get(ptep); 19 + 20 + if (pte_dirty(pte)) 21 + orig_pte = pte_mkdirty(orig_pte); 22 + 23 + if (pte_young(pte)) 24 + orig_pte = pte_mkyoung(orig_pte); 25 + } 26 + 27 + return orig_pte; 28 + } 29 + 6 30 pte_t *huge_pte_alloc(struct mm_struct *mm, 7 31 struct vm_area_struct *vma, 8 32 unsigned long addr, ··· 242 218 { 243 219 pte_t pte = ptep_get(ptep); 244 220 unsigned long order; 221 + pte_t orig_pte; 245 222 int i, pte_num; 246 223 247 224 if (!pte_napot(pte)) { ··· 253 228 order = napot_cont_order(pte); 254 229 pte_num = napot_pte_num(order); 255 230 ptep = huge_pte_offset(mm, addr, napot_cont_size(order)); 231 + orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num); 232 + 233 + orig_pte = pte_wrprotect(orig_pte); 256 234 257 235 for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) 258 - ptep_set_wrprotect(mm, addr, ptep); 236 + set_pte_at(mm, addr, ptep, orig_pte); 259 237 } 260 238 261 239 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+1 -1
arch/riscv/mm/init.c
··· 922 922 static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, 923 923 uintptr_t dtb_pa) 924 924 { 925 + #ifndef CONFIG_BUILTIN_DTB 925 926 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); 926 927 927 - #ifndef CONFIG_BUILTIN_DTB 928 928 /* Make sure the fdt fixmap address is always aligned on PMD size */ 929 929 BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); 930 930
+18 -2
arch/x86/kvm/lapic.c
··· 229 229 u32 physical_id; 230 230 231 231 /* 232 + * For simplicity, KVM always allocates enough space for all possible 233 + * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on 234 + * without the optimized map. 235 + */ 236 + if (WARN_ON_ONCE(xapic_id > new->max_apic_id)) 237 + return -EINVAL; 238 + 239 + /* 240 + * Bail if a vCPU was added and/or enabled its APIC between allocating 241 + * the map and doing the actual calculations for the map. Note, KVM 242 + * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if 243 + * the compiler decides to reload x2apic_id after this check. 244 + */ 245 + if (x2apic_id > new->max_apic_id) 246 + return -E2BIG; 247 + 248 + /* 232 249 * Deliberately truncate the vCPU ID when detecting a mismatched APIC 233 250 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a 234 251 * 32-bit value. Any unwanted aliasing due to truncation results will ··· 270 253 */ 271 254 if (vcpu->kvm->arch.x2apic_format) { 272 255 /* See also kvm_apic_match_physical_addr(). */ 273 - if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) && 274 - x2apic_id <= new->max_apic_id) 256 + if (apic_x2apic_mode(apic) || x2apic_id > 0xff) 275 257 new->phys_map[x2apic_id] = apic; 276 258 277 259 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
+4 -1
arch/x86/kvm/mmu/mmu.c
··· 7091 7091 */ 7092 7092 slot = NULL; 7093 7093 if (atomic_read(&kvm->nr_memslots_dirty_logging)) { 7094 - slot = gfn_to_memslot(kvm, sp->gfn); 7094 + struct kvm_memslots *slots; 7095 + 7096 + slots = kvm_memslots_for_spte_role(kvm, sp->role); 7097 + slot = __gfn_to_memslot(slots, sp->gfn); 7095 7098 WARN_ON_ONCE(!slot); 7096 7099 } 7097 7100
+1 -1
arch/x86/kvm/svm/svm.c
··· 3510 3510 if (!is_vnmi_enabled(svm)) 3511 3511 return false; 3512 3512 3513 - return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK); 3513 + return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); 3514 3514 } 3515 3515 3516 3516 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
+3
arch/x86/kvm/x86.c
··· 10758 10758 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 10759 10759 break; 10760 10760 } 10761 + 10762 + /* Note, VM-Exits that go down the "slow" path are accounted below. */ 10763 + ++vcpu->stat.exits; 10761 10764 } 10762 10765 10763 10766 /*
+2 -1
block/blk-settings.c
··· 915 915 void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) 916 916 { 917 917 struct request_queue *q = disk->queue; 918 + unsigned int old_model = q->limits.zoned; 918 919 919 920 switch (model) { 920 921 case BLK_ZONED_HM: ··· 953 952 */ 954 953 blk_queue_zone_write_granularity(q, 955 954 queue_logical_block_size(q)); 956 - } else { 955 + } else if (old_model != BLK_ZONED_NONE) { 957 956 disk_clear_zone_settings(disk); 958 957 } 959 958 }
+21 -17
crypto/asymmetric_keys/public_key.c
··· 380 380 struct crypto_wait cwait; 381 381 struct crypto_akcipher *tfm; 382 382 struct akcipher_request *req; 383 - struct scatterlist src_sg[2]; 383 + struct scatterlist src_sg; 384 384 char alg_name[CRYPTO_MAX_ALG_NAME]; 385 - char *key, *ptr; 385 + char *buf, *ptr; 386 + size_t buf_len; 386 387 int ret; 387 388 388 389 pr_devel("==>%s()\n", __func__); ··· 421 420 if (!req) 422 421 goto error_free_tfm; 423 422 424 - key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, 425 - GFP_KERNEL); 426 - if (!key) 423 + buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, 424 + sig->s_size + sig->digest_size); 425 + 426 + buf = kmalloc(buf_len, GFP_KERNEL); 427 + if (!buf) 427 428 goto error_free_req; 428 429 429 - memcpy(key, pkey->key, pkey->keylen); 430 - ptr = key + pkey->keylen; 430 + memcpy(buf, pkey->key, pkey->keylen); 431 + ptr = buf + pkey->keylen; 431 432 ptr = pkey_pack_u32(ptr, pkey->algo); 432 433 ptr = pkey_pack_u32(ptr, pkey->paramlen); 433 434 memcpy(ptr, pkey->params, pkey->paramlen); 434 435 435 436 if (pkey->key_is_private) 436 - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); 437 + ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen); 437 438 else 438 - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); 439 + ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen); 439 440 if (ret) 440 - goto error_free_key; 441 + goto error_free_buf; 441 442 442 443 if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { 443 444 ret = cert_sig_digest_update(sig, tfm); 444 445 if (ret) 445 - goto error_free_key; 446 + goto error_free_buf; 446 447 } 447 448 448 - sg_init_table(src_sg, 2); 449 - sg_set_buf(&src_sg[0], sig->s, sig->s_size); 450 - sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); 451 - akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size, 449 + memcpy(buf, sig->s, sig->s_size); 450 + memcpy(buf + sig->s_size, sig->digest, sig->digest_size); 451 + 452 + sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size); 453 + akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size, 452 454 sig->digest_size); 453 455 crypto_init_wait(&cwait); 454 456 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | ··· 459 455 crypto_req_done, &cwait); 460 456 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); 461 457 462 - error_free_key: 463 - kfree(key); 458 + error_free_buf: 459 + kfree(buf); 464 460 error_free_req: 465 461 akcipher_request_free(req); 466 462 error_free_tfm:
-6
drivers/acpi/apei/apei-internal.h
··· 7 7 #ifndef APEI_INTERNAL_H 8 8 #define APEI_INTERNAL_H 9 9 10 - #include <linux/cper.h> 11 10 #include <linux/acpi.h> 12 11 13 12 struct apei_exec_context; ··· 128 129 else 129 130 return sizeof(*estatus) + estatus->data_length; 130 131 } 131 - 132 - void cper_estatus_print(const char *pfx, 133 - const struct acpi_hest_generic_status *estatus); 134 - int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); 135 - int cper_estatus_check(const struct acpi_hest_generic_status *estatus); 136 132 137 133 int apei_osc_setup(void); 138 134 #endif
+1
drivers/acpi/apei/bert.c
··· 23 23 #include <linux/module.h> 24 24 #include <linux/init.h> 25 25 #include <linux/acpi.h> 26 + #include <linux/cper.h> 26 27 #include <linux/io.h> 27 28 28 29 #include "apei-internal.h"
+26
drivers/base/cacheinfo.c
··· 388 388 continue;/* skip if itself or no cacheinfo */ 389 389 for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { 390 390 sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); 391 + 392 + /* 393 + * Comparing cache IDs only makes sense if the leaves 394 + * belong to the same cache level of same type. Skip 395 + * the check if level and type do not match. 396 + */ 397 + if (sib_leaf->level != this_leaf->level || 398 + sib_leaf->type != this_leaf->type) 399 + continue; 400 + 391 401 if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 392 402 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 393 403 cpumask_set_cpu(i, &this_leaf->shared_cpu_map); ··· 410 400 coherency_max_size = this_leaf->coherency_line_size; 411 401 } 412 402 403 + /* shared_cpu_map is now populated for the cpu */ 404 + this_cpu_ci->cpu_map_populated = true; 413 405 return 0; 414 406 } 415 407 416 408 static void cache_shared_cpu_map_remove(unsigned int cpu) 417 409 { 410 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 418 411 struct cacheinfo *this_leaf, *sib_leaf; 419 412 unsigned int sibling, index, sib_index; 420 413 ··· 432 419 433 420 for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { 434 421 sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); 422 + 423 + /* 424 + * Comparing cache IDs only makes sense if the leaves 425 + * belong to the same cache level of same type. Skip 426 + * the check if level and type do not match. 427 + */ 428 + if (sib_leaf->level != this_leaf->level || 429 + sib_leaf->type != this_leaf->type) 430 + continue; 431 + 435 432 if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 436 433 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 437 434 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); ··· 450 427 } 451 428 } 452 429 } 430 + 431 + /* cpu is no longer populated in the shared map */ 432 + this_cpu_ci->cpu_map_populated = false; 453 433 } 454 434 455 435 static void free_cache_attributes(unsigned int cpu)
+1 -1
drivers/base/firmware_loader/main.c
··· 812 812 char *outbuf; 813 813 814 814 alg = crypto_alloc_shash("sha256", 0, 0); 815 - if (!alg) 815 + if (IS_ERR(alg)) 816 816 return; 817 817 818 818 sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
+5 -1
drivers/bluetooth/hci_qca.c
··· 78 78 QCA_HW_ERROR_EVENT, 79 79 QCA_SSR_TRIGGERED, 80 80 QCA_BT_OFF, 81 - QCA_ROM_FW 81 + QCA_ROM_FW, 82 + QCA_DEBUGFS_CREATED, 82 83 }; 83 84 84 85 enum qca_capabilities { ··· 634 633 umode_t mode; 635 634 636 635 if (!hdev->debugfs) 636 + return; 637 + 638 + if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) 637 639 return; 638 640 639 641 ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+4 -4
drivers/char/tpm/tpm_tis_core.h
··· 84 84 #define ILB_REMAP_SIZE 0x100 85 85 86 86 enum tpm_tis_flags { 87 - TPM_TIS_ITPM_WORKAROUND = BIT(0), 88 - TPM_TIS_INVALID_STATUS = BIT(1), 89 - TPM_TIS_DEFAULT_CANCELLATION = BIT(2), 90 - TPM_TIS_IRQ_TESTED = BIT(3), 87 + TPM_TIS_ITPM_WORKAROUND = 0, 88 + TPM_TIS_INVALID_STATUS = 1, 89 + TPM_TIS_DEFAULT_CANCELLATION = 2, 90 + TPM_TIS_IRQ_TESTED = 3, 91 91 }; 92 92 93 93 struct tpm_tis_data {
+2 -1
drivers/firmware/efi/libstub/Makefile.zboot
··· 32 32 $(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE 33 33 $(call if_changed,$(zboot-method-y)) 34 34 35 - OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \ 35 + # avoid eager evaluation to prevent references to non-existent build artifacts 36 + OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \ 36 37 --rename-section .data=.gzdata,load,alloc,readonly,contents 37 38 $(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE 38 39 $(call if_changed,objcopy)
+3
drivers/firmware/efi/libstub/efistub.h
··· 1133 1133 void efi_remap_image(unsigned long image_base, unsigned alloc_size, 1134 1134 unsigned long code_size); 1135 1135 1136 + asmlinkage efi_status_t __efiapi 1137 + efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab); 1138 + 1136 1139 #endif
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 593 593 case IP_VERSION(9, 3, 0): 594 594 /* GC 10.3.7 */ 595 595 case IP_VERSION(10, 3, 7): 596 + /* GC 11.0.1 */ 597 + case IP_VERSION(11, 0, 1): 596 598 if (amdgpu_tmz == 0) { 597 599 adev->gmc.tmz_enabled = false; 598 600 dev_info(adev->dev, ··· 618 616 case IP_VERSION(10, 3, 1): 619 617 /* YELLOW_CARP*/ 620 618 case IP_VERSION(10, 3, 3): 621 - case IP_VERSION(11, 0, 1): 622 619 case IP_VERSION(11, 0, 4): 623 620 /* Don't enable it by default yet. 624 621 */
+26 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
··· 241 241 return 0; 242 242 } 243 243 244 + int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 245 + { 246 + int r, i; 247 + 248 + r = amdgpu_ras_block_late_init(adev, ras_block); 249 + if (r) 250 + return r; 251 + 252 + if (amdgpu_ras_is_supported(adev, ras_block->block)) { 253 + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 254 + if (adev->jpeg.harvest_config & (1 << i)) 255 + continue; 256 + 257 + r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); 258 + if (r) 259 + goto late_fini; 260 + } 261 + } 262 + return 0; 263 + 264 + late_fini: 265 + amdgpu_ras_block_late_fini(adev, ras_block); 266 + return r; 267 + } 268 + 244 269 int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) 245 270 { 246 271 int err; ··· 287 262 adev->jpeg.ras_if = &ras->ras_block.ras_comm; 288 263 289 264 if (!ras->ras_block.ras_late_init) 290 - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; 265 + ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init; 291 266 292 267 return 0; 293 268 }
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
··· 38 38 struct amdgpu_jpeg_inst { 39 39 struct amdgpu_ring ring_dec; 40 40 struct amdgpu_irq_src irq; 41 + struct amdgpu_irq_src ras_poison_irq; 41 42 struct amdgpu_jpeg_reg external; 42 43 }; 43 44 ··· 73 72 int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, 74 73 struct amdgpu_irq_src *source, 75 74 struct amdgpu_iv_entry *entry); 75 + int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, 76 + struct ras_common_if *ras_block); 76 77 int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); 77 78 78 79 #endif /*__AMDGPU_JPEG_H__*/
+26 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 1181 1181 return 0; 1182 1182 } 1183 1183 1184 + int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1185 + { 1186 + int r, i; 1187 + 1188 + r = amdgpu_ras_block_late_init(adev, ras_block); 1189 + if (r) 1190 + return r; 1191 + 1192 + if (amdgpu_ras_is_supported(adev, ras_block->block)) { 1193 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1194 + if (adev->vcn.harvest_config & (1 << i)) 1195 + continue; 1196 + 1197 + r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); 1198 + if (r) 1199 + goto late_fini; 1200 + } 1201 + } 1202 + return 0; 1203 + 1204 + late_fini: 1205 + amdgpu_ras_block_late_fini(adev, ras_block); 1206 + return r; 1207 + } 1208 + 1184 1209 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) 1185 1210 { 1186 1211 int err; ··· 1227 1202 adev->vcn.ras_if = &ras->ras_block.ras_comm; 1228 1203 1229 1204 if (!ras->ras_block.ras_late_init) 1230 - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; 1205 + ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; 1231 1206 1232 1207 return 0; 1233 1208 }
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
··· 234 234 struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; 235 235 atomic_t sched_score; 236 236 struct amdgpu_irq_src irq; 237 + struct amdgpu_irq_src ras_poison_irq; 237 238 struct amdgpu_vcn_reg external; 238 239 struct amdgpu_bo *dpg_sram_bo; 239 240 struct dpg_pause_state pause_state; ··· 401 400 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, 402 401 struct amdgpu_irq_src *source, 403 402 struct amdgpu_iv_entry *entry); 403 + int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, 404 + struct ras_common_if *ras_block); 404 405 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); 405 406 406 407 #endif
+22 -6
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
··· 102 102 103 103 /* JPEG DJPEG POISON EVENT */ 104 104 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 105 - VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); 105 + VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); 106 106 if (r) 107 107 return r; 108 108 109 109 /* JPEG EJPEG POISON EVENT */ 110 110 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 111 - VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); 111 + VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); 112 112 if (r) 113 113 return r; 114 114 } ··· 221 221 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 222 222 RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) 223 223 jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 224 + 225 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 226 + amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); 224 227 } 225 228 226 229 return 0; ··· 572 569 return 0; 573 570 } 574 571 572 + static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, 573 + struct amdgpu_irq_src *source, 574 + unsigned int type, 575 + enum amdgpu_interrupt_state state) 576 + { 577 + return 0; 578 + } 579 + 575 580 static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, 576 581 struct amdgpu_irq_src *source, 577 582 struct amdgpu_iv_entry *entry) ··· 603 592 switch (entry->src_id) { 604 593 case VCN_2_0__SRCID__JPEG_DECODE: 605 594 amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); 606 - break; 607 - case VCN_2_6__SRCID_DJPEG0_POISON: 608 - case VCN_2_6__SRCID_EJPEG0_POISON: 609 - amdgpu_jpeg_process_poison_irq(adev, source, entry); 610 595 break; 611 596 default: 612 597 DRM_ERROR("Unhandled interrupt: %d %d\n", ··· 732 725 .process = jpeg_v2_5_process_interrupt, 733 726 }; 734 727 728 + static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = { 729 + .set = jpeg_v2_6_set_ras_interrupt_state, 730 + .process = amdgpu_jpeg_process_poison_irq, 731 + }; 732 + 735 733 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) 736 734 { 737 735 int i; ··· 747 735 748 736 adev->jpeg.inst[i].irq.num_types = 1; 749 737 adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; 738 + 739 + adev->jpeg.inst[i].ras_poison_irq.num_types = 1; 740 + adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs; 750 741 } 751 742 } 752 743 ··· 815 800 static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { 816 801 .ras_block = { 817 802 .hw_ops = &jpeg_v2_6_ras_hw_ops, 803 + .ras_late_init = amdgpu_jpeg_ras_late_init, 818 804 }, 819 805 }; 820 806
+21 -7
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
··· 87 87 88 88 /* JPEG DJPEG POISON EVENT */ 89 89 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 90 - VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); 90 + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 91 91 if (r) 92 92 return r; 93 93 94 94 /* JPEG EJPEG POISON EVENT */ 95 95 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 96 - VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); 96 + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 97 97 if (r) 98 98 return r; 99 99 ··· 202 202 RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) 203 203 jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 204 204 } 205 - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); 205 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 206 + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); 206 207 207 208 return 0; 208 209 } ··· 671 670 return 0; 672 671 } 673 672 673 + static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, 674 + struct amdgpu_irq_src *source, 675 + unsigned int type, 676 + enum amdgpu_interrupt_state state) 677 + { 678 + return 0; 679 + } 680 + 674 681 static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, 675 682 struct amdgpu_irq_src *source, 676 683 struct amdgpu_iv_entry *entry) ··· 688 679 switch (entry->src_id) { 689 680 case VCN_4_0__SRCID__JPEG_DECODE: 690 681 amdgpu_fence_process(&adev->jpeg.inst->ring_dec); 691 - break; 692 - case VCN_4_0__SRCID_DJPEG0_POISON: 693 - case VCN_4_0__SRCID_EJPEG0_POISON: 694 - amdgpu_jpeg_process_poison_irq(adev, source, entry); 695 682 break; 696 683 default: 697 684 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", ··· 758 753 .process = jpeg_v4_0_process_interrupt, 759 754 }; 760 755 756 + static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = { 757 + .set = jpeg_v4_0_set_ras_interrupt_state, 758 + .process = amdgpu_jpeg_process_poison_irq, 759 + }; 760 + 761 761 static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev) 762 762 { 763 763 adev->jpeg.inst->irq.num_types = 1; 764 764 adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs; 765 + 766 + adev->jpeg.inst->ras_poison_irq.num_types = 1; 767 + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs; 765 768 } 766 769 767 770 const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = { ··· 824 811 static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { 825 812 .ras_block = { 826 813 .hw_ops = &jpeg_v4_0_ras_hw_ops, 814 + .ras_late_init = amdgpu_jpeg_ras_late_init, 827 815 }, 828 816 }; 829 817
+21 -4
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 143 143 144 144 /* VCN POISON TRAP */ 145 145 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], 146 - VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq); 146 + VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq); 147 147 if (r) 148 148 return r; 149 149 } ··· 354 354 (adev->vcn.cur_state != AMD_PG_STATE_GATE && 355 355 RREG32_SOC15(VCN, i, mmUVD_STATUS))) 356 356 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 357 + 358 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 359 + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); 357 360 } 358 361 359 362 return 0; ··· 1810 1807 return 0; 1811 1808 } 1812 1809 1810 + static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, 1811 + struct amdgpu_irq_src *source, 1812 + unsigned int type, 1813 + enum amdgpu_interrupt_state state) 1814 + { 1815 + return 0; 1816 + } 1817 + 1813 1818 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, 1814 1819 struct amdgpu_irq_src *source, 1815 1820 struct amdgpu_iv_entry *entry) ··· 1848 1837 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: 1849 1838 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); 1850 1839 break; 1851 - case VCN_2_6__SRCID_UVD_POISON: 1852 - amdgpu_vcn_process_poison_irq(adev, source, entry); 1853 - break; 1854 1840 default: 1855 1841 DRM_ERROR("Unhandled interrupt: %d %d\n", 1856 1842 entry->src_id, entry->src_data[0]); ··· 1862 1854 .process = vcn_v2_5_process_interrupt, 1863 1855 }; 1864 1856 1857 + static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = { 1858 + .set = vcn_v2_6_set_ras_interrupt_state, 1859 + .process = amdgpu_vcn_process_poison_irq, 1860 + }; 1861 + 1865 1862 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) 1866 1863 { 1867 1864 int i; ··· 1876 1863 continue; 1877 1864 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; 1878 1865 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; 1866 + 1867 + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; 1868 + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs; 1879 1869 } 1880 1870 } 1881 1871 ··· 1981 1965 static struct amdgpu_vcn_ras vcn_v2_6_ras = { 1982 1966 .ras_block = { 1983 1967 .hw_ops = &vcn_v2_6_ras_hw_ops, 1968 + .ras_late_init = amdgpu_vcn_ras_late_init, 1984 1969 }, 1985 1970 }; 1986 1971
+30 -6
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
··· 139 139 140 140 /* VCN POISON TRAP */ 141 141 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 142 - VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); 142 + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); 143 143 if (r) 144 144 return r; 145 145 ··· 305 305 vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 306 306 } 307 307 } 308 - 309 - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); 308 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 309 + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); 310 310 } 311 311 312 312 return 0; ··· 1976 1976 } 1977 1977 1978 1978 /** 1979 + * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state 1980 + * 1981 + * @adev: amdgpu_device pointer 1982 + * @source: interrupt sources 1983 + * @type: interrupt types 1984 + * @state: interrupt states 1985 + * 1986 + * Set VCN block RAS interrupt state 1987 + */ 1988 + static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, 1989 + struct amdgpu_irq_src *source, 1990 + unsigned int type, 1991 + enum amdgpu_interrupt_state state) 1992 + { 1993 + return 0; 1994 + } 1995 + 1996 + /** 1979 1997 * vcn_v4_0_process_interrupt - process VCN block interrupt 1980 1998 * 1981 1999 * @adev: amdgpu_device pointer ··· 2025 2007 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 2026 2008 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 2027 2009 break; 2028 - case VCN_4_0__SRCID_UVD_POISON: 2029 - amdgpu_vcn_process_poison_irq(adev, source, entry); 2030 - break; 2031 2010 default: 2032 2011 DRM_ERROR("Unhandled interrupt: %d %d\n", 2033 2012 entry->src_id, entry->src_data[0]); ··· 2037 2022 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { 2038 2023 .set = vcn_v4_0_set_interrupt_state, 2039 2024 .process = vcn_v4_0_process_interrupt, 2025 + }; 2026 + 2027 + static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = { 2028 + .set = vcn_v4_0_set_ras_interrupt_state, 2029 + .process = amdgpu_vcn_process_poison_irq, 2040 2030 }; 2041 2031 2042 2032 /** ··· 2061 2041 2062 2042 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; 2063 2043 adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; 2044 + 2045 + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; 2046 + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs; 2064 2047 } 2065 2048 } 2066 2049 ··· 2137 2114 static struct amdgpu_vcn_ras vcn_v4_0_ras = { 2138 2115 .ras_block = { 2139 2116 .hw_ops = &vcn_v4_0_ras_hw_ops, 2117 + .ras_late_init = amdgpu_vcn_ras_late_init, 2140 2118 }, 2141 2119 }; 2142 2120
-9
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 2113 2113 if (hubbub->funcs->program_compbuf_size) 2114 2114 hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); 2115 2115 2116 - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { 2117 - dc_dmub_srv_p_state_delegate(dc, 2118 - true, context); 2119 - context->bw_ctx.bw.dcn.clk.p_state_change_support = true; 2120 - dc->clk_mgr->clks.fw_based_mclk_switching = true; 2121 - } else { 2122 - dc->clk_mgr->clks.fw_based_mclk_switching = false; 2123 - } 2124 - 2125 2116 dc->clk_mgr->funcs->update_clocks( 2126 2117 dc->clk_mgr, 2127 2118 context,
+1 -24
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
··· 983 983 } 984 984 985 985 void dcn30_prepare_bandwidth(struct dc *dc, 986 - struct dc_state *context) 986 + struct dc_state *context) 987 987 { 988 - bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; 989 - /* Any transition into an FPO config should disable MCLK switching first to avoid 990 - * driver and FW P-State synchronization issues. 991 - */ 992 - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { 993 - dc->optimized_required = true; 994 - context->bw_ctx.bw.dcn.clk.p_state_change_support = false; 995 - } 996 - 997 988 if (dc->clk_mgr->dc_mode_softmax_enabled) 998 989 if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && 999 990 context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) 1000 991 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); 1001 992 1002 993 dcn20_prepare_bandwidth(dc, context); 1003 - /* 1004 - * enabled -> enabled: do not disable 1005 - * enabled -> disabled: disable 1006 - * disabled -> enabled: don't care 1007 - * disabled -> disabled: don't care 1008 - */ 1009 - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) 1010 - dc_dmub_srv_p_state_delegate(dc, false, context); 1011 - 1012 - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { 1013 - /* After disabling P-State, restore the original value to ensure we get the correct P-State 1014 - * on the next optimize. */ 1015 - context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; 1016 - } 1017 994 } 1018 995
-29
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 6925 6925 return 0; 6926 6926 } 6927 6927 6928 - static int si_set_temperature_range(struct amdgpu_device *adev) 6929 - { 6930 - int ret; 6931 - 6932 - ret = si_thermal_enable_alert(adev, false); 6933 - if (ret) 6934 - return ret; 6935 - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 6936 - if (ret) 6937 - return ret; 6938 - ret = si_thermal_enable_alert(adev, true); 6939 - if (ret) 6940 - return ret; 6941 - 6942 - return ret; 6943 - } 6944 - 6945 6928 static void si_dpm_disable(struct amdgpu_device *adev) 6946 6929 { 6947 6930 struct rv7xx_power_info *pi = rv770_get_pi(adev); ··· 7609 7626 7610 7627 static int si_dpm_late_init(void *handle) 7611 7628 { 7612 - int ret; 7613 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7614 - 7615 - if (!adev->pm.dpm_enabled) 7616 - return 0; 7617 - 7618 - ret = si_set_temperature_range(adev); 7619 - if (ret) 7620 - return ret; 7621 - #if 0 //TODO ? 7622 - si_dpm_powergate_uvd(adev, true); 7623 - #endif 7624 7629 return 0; 7625 7630 } 7626 7631
+6 -4
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 582 582 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 583 583 SmuMetrics_legacy_t metrics; 584 584 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 585 - int i, size = 0, ret = 0; 585 + int i, idx, size = 0, ret = 0; 586 586 uint32_t cur_value = 0, value = 0, count = 0; 587 587 bool cur_value_match_level = false; 588 588 ··· 656 656 case SMU_MCLK: 657 657 case SMU_FCLK: 658 658 for (i = 0; i < count; i++) { 659 - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 659 + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 660 + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 660 661 if (ret) 661 662 return ret; 662 663 if (!value) ··· 684 683 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 685 684 SmuMetrics_t metrics; 686 685 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 687 - int i, size = 0, ret = 0; 686 + int i, idx, size = 0, ret = 0; 688 687 uint32_t cur_value = 0, value = 0, count = 0; 689 688 bool cur_value_match_level = false; 690 689 uint32_t min, max; ··· 766 765 case SMU_MCLK: 767 766 case SMU_FCLK: 768 767 for (i = 0; i < count; i++) { 769 - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 768 + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 769 + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 770 770 if (ret) 771 771 return ret; 772 772 if (!value)
+3 -2
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 494 494 static int renoir_print_clk_levels(struct smu_context *smu, 495 495 enum smu_clk_type clk_type, char *buf) 496 496 { 497 - int i, size = 0, ret = 0; 497 + int i, idx, size = 0, ret = 0; 498 498 uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; 499 499 SmuMetrics_t metrics; 500 500 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); ··· 594 594 case SMU_VCLK: 595 595 case SMU_DCLK: 596 596 for (i = 0; i < count; i++) { 597 - ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); 597 + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 598 + ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); 598 599 if (ret) 599 600 return ret; 600 601 if (!value)
+3 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
··· 478 478 static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, 479 479 enum smu_clk_type clk_type, char *buf) 480 480 { 481 - int i, size = 0, ret = 0; 481 + int i, idx, size = 0, ret = 0; 482 482 uint32_t cur_value = 0, value = 0, count = 0; 483 483 uint32_t min, max; 484 484 ··· 512 512 break; 513 513 514 514 for (i = 0; i < count; i++) { 515 - ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); 515 + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 516 + ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); 516 517 if (ret) 517 518 break; 518 519
+3 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
··· 866 866 static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, 867 867 enum smu_clk_type clk_type, char *buf) 868 868 { 869 - int i, size = 0, ret = 0; 869 + int i, idx, size = 0, ret = 0; 870 870 uint32_t cur_value = 0, value = 0, count = 0; 871 871 uint32_t min = 0, max = 0; 872 872 ··· 898 898 goto print_clk_out; 899 899 900 900 for (i = 0; i < count; i++) { 901 - ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); 901 + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; 902 + ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value); 902 903 if (ret) 903 904 goto print_clk_out; 904 905
+3 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
··· 1000 1000 static int yellow_carp_print_clk_levels(struct smu_context *smu, 1001 1001 enum smu_clk_type clk_type, char *buf) 1002 1002 { 1003 - int i, size = 0, ret = 0; 1003 + int i, idx, size = 0, ret = 0; 1004 1004 uint32_t cur_value = 0, value = 0, count = 0; 1005 1005 uint32_t min, max; 1006 1006 ··· 1033 1033 goto print_clk_out; 1034 1034 1035 1035 for (i = 0; i < count; i++) { 1036 - ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); 1036 + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 1037 + ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); 1037 1038 if (ret) 1038 1039 goto print_clk_out; 1039 1040
+11 -6
drivers/gpu/drm/i915/i915_perf.c
··· 877 877 stream->oa_buffer.last_ctx_id = ctx_id; 878 878 } 879 879 880 - /* 881 - * Clear out the report id and timestamp as a means to detect unlanded 882 - * reports. 883 - */ 884 - oa_report_id_clear(stream, report32); 885 - oa_timestamp_clear(stream, report32); 880 + if (is_power_of_2(report_size)) { 881 + /* 882 + * Clear out the report id and timestamp as a means 883 + * to detect unlanded reports. 884 + */ 885 + oa_report_id_clear(stream, report32); 886 + oa_timestamp_clear(stream, report32); 887 + } else { 888 + /* Zero out the entire report */ 889 + memset(report32, 0, report_size); 890 + } 886 891 } 887 892 888 893 if (start_offset != *offset) {
+6 -7
drivers/hid/hid-logitech-hidpp.c
··· 286 286 struct hidpp_report *message, 287 287 struct hidpp_report *response) 288 288 { 289 - int ret; 289 + int ret = -1; 290 290 int max_retries = 3; 291 291 292 292 mutex_lock(&hidpp->send_mutex); ··· 300 300 */ 301 301 *response = *message; 302 302 303 - for (; max_retries != 0; max_retries--) { 303 + for (; max_retries != 0 && ret; max_retries--) { 304 304 ret = __hidpp_send_report(hidpp->hid_dev, message); 305 305 306 306 if (ret) { 307 307 dbg_hid("__hidpp_send_report returned err: %d\n", ret); 308 308 memset(response, 0, sizeof(struct hidpp_report)); 309 - goto exit; 309 + break; 310 310 } 311 311 312 312 if (!wait_event_timeout(hidpp->wait, hidpp->answer_available, ··· 314 314 dbg_hid("%s:timeout waiting for response\n", __func__); 315 315 memset(response, 0, sizeof(struct hidpp_report)); 316 316 ret = -ETIMEDOUT; 317 - goto exit; 317 + break; 318 318 } 319 319 320 320 if (response->report_id == REPORT_ID_HIDPP_SHORT && 321 321 response->rap.sub_id == HIDPP_ERROR) { 322 322 ret = response->rap.params[1]; 323 323 dbg_hid("%s:got hidpp error %02X\n", __func__, ret); 324 - goto exit; 324 + break; 325 325 } 326 326 327 327 if ((response->report_id == REPORT_ID_HIDPP_LONG || ··· 330 330 ret = response->fap.params[1]; 331 331 if (ret != HIDPP20_ERROR_BUSY) { 332 332 dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret); 333 - goto exit; 333 + break; 334 334 } 335 335 dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret); 336 336 } 337 337 } 338 338 339 - exit: 340 339 mutex_unlock(&hidpp->send_mutex); 341 340 return ret; 342 341
+1 -1
drivers/iio/accel/kionix-kx022a.c
··· 1048 1048 data->ien_reg = KX022A_REG_INC4; 1049 1049 } else { 1050 1050 irq = fwnode_irq_get_byname(fwnode, "INT2"); 1051 - if (irq <= 0) 1051 + if (irq < 0) 1052 1052 return dev_err_probe(dev, irq, "No suitable IRQ\n"); 1053 1053 1054 1054 data->inc_reg = KX022A_REG_INC5;
+2 -2
drivers/iio/accel/st_accel_core.c
··· 1291 1291 1292 1292 adev = ACPI_COMPANION(indio_dev->dev.parent); 1293 1293 if (!adev) 1294 - return 0; 1294 + return -ENXIO; 1295 1295 1296 1296 /* Read _ONT data, which should be a package of 6 integers. */ 1297 1297 status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer); 1298 1298 if (status == AE_NOT_FOUND) { 1299 - return 0; 1299 + return -ENXIO; 1300 1300 } else if (ACPI_FAILURE(status)) { 1301 1301 dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n", 1302 1302 status);
+11 -1
drivers/iio/adc/ad4130.c
··· 1817 1817 .unprepare = ad4130_int_clk_unprepare, 1818 1818 }; 1819 1819 1820 + static void ad4130_clk_del_provider(void *of_node) 1821 + { 1822 + of_clk_del_provider(of_node); 1823 + } 1824 + 1820 1825 static int ad4130_setup_int_clk(struct ad4130_state *st) 1821 1826 { 1822 1827 struct device *dev = &st->spi->dev; ··· 1829 1824 struct clk_init_data init; 1830 1825 const char *clk_name; 1831 1826 struct clk *clk; 1827 + int ret; 1832 1828 1833 1829 if (st->int_pin_sel == AD4130_INT_PIN_CLK || 1834 1830 st->mclk_sel != AD4130_MCLK_76_8KHZ) ··· 1849 1843 if (IS_ERR(clk)) 1850 1844 return PTR_ERR(clk); 1851 1845 1852 - return of_clk_add_provider(of_node, of_clk_src_simple_get, clk); 1846 + ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk); 1847 + if (ret) 1848 + return ret; 1849 + 1850 + return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node); 1853 1851 } 1854 1852 1855 1853 static int ad4130_setup(struct iio_dev *indio_dev)
+2 -6
drivers/iio/adc/ad7192.c
··· 897 897 __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \ 898 898 BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) 899 899 900 - #define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \ 901 - __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \ 902 - BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) 903 - 904 900 #define AD719x_TEMP_CHANNEL(_si, _address) \ 905 901 __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL) 906 902 ··· 904 908 AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M), 905 909 AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M), 906 910 AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP), 907 - AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M), 911 + AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M), 908 912 AD719x_CHANNEL(4, 1, AD7192_CH_AIN1), 909 913 AD719x_CHANNEL(5, 2, AD7192_CH_AIN2), 910 914 AD719x_CHANNEL(6, 3, AD7192_CH_AIN3), ··· 918 922 AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M), 919 923 AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M), 920 924 AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP), 921 - AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M), 925 + AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M), 922 926 AD719x_CHANNEL(6, 1, AD7193_CH_AIN1), 923 927 AD719x_CHANNEL(7, 2, AD7193_CH_AIN2), 924 928 AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
+4
drivers/iio/adc/ad_sigma_delta.c
··· 584 584 init_completion(&sigma_delta->completion); 585 585 586 586 sigma_delta->irq_dis = true; 587 + 588 + /* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */ 589 + irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY); 590 + 587 591 ret = devm_request_irq(dev, sigma_delta->spi->irq, 588 592 ad_sd_data_rdy_trig_poll, 589 593 sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
+3 -4
drivers/iio/adc/imx93_adc.c
··· 236 236 { 237 237 struct imx93_adc *adc = iio_priv(indio_dev); 238 238 struct device *dev = adc->dev; 239 - long ret; 240 - u32 vref_uv; 239 + int ret; 241 240 242 241 switch (mask) { 243 242 case IIO_CHAN_INFO_RAW: ··· 252 253 return IIO_VAL_INT; 253 254 254 255 case IIO_CHAN_INFO_SCALE: 255 - ret = vref_uv = regulator_get_voltage(adc->vref); 256 + ret = regulator_get_voltage(adc->vref); 256 257 if (ret < 0) 257 258 return ret; 258 - *val = vref_uv / 1000; 259 + *val = ret / 1000; 259 260 *val2 = 12; 260 261 return IIO_VAL_FRACTIONAL_LOG2; 261 262
+51 -2
drivers/iio/adc/mt6370-adc.c
··· 19 19 20 20 #include <dt-bindings/iio/adc/mediatek,mt6370_adc.h> 21 21 22 + #define MT6370_REG_DEV_INFO 0x100 22 23 #define MT6370_REG_CHG_CTRL3 0x113 23 24 #define MT6370_REG_CHG_CTRL7 0x117 24 25 #define MT6370_REG_CHG_ADC 0x121 ··· 28 27 #define MT6370_ADC_START_MASK BIT(0) 29 28 #define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4) 30 29 #define MT6370_AICR_ICHG_MASK GENMASK(7, 2) 30 + #define MT6370_VENID_MASK GENMASK(7, 4) 31 31 32 32 #define MT6370_AICR_100_mA 0x0 33 33 #define MT6370_AICR_150_mA 0x1 ··· 49 47 #define ADC_CONV_TIME_MS 35 50 48 #define ADC_CONV_POLLING_TIME_US 1000 51 49 50 + #define MT6370_VID_RT5081 0x8 51 + #define MT6370_VID_RT5081A 0xA 52 + #define MT6370_VID_MT6370 0xE 53 + 52 54 struct mt6370_adc_data { 53 55 struct device *dev; 54 56 struct regmap *regmap; ··· 61 55 * from being read at the same time. 62 56 */ 63 57 struct mutex adc_lock; 58 + unsigned int vid; 64 59 }; 65 60 66 61 static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan, ··· 105 98 return ret; 106 99 } 107 100 101 + static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv) 102 + { 103 + switch (priv->vid) { 104 + case MT6370_VID_RT5081: 105 + case MT6370_VID_RT5081A: 106 + case MT6370_VID_MT6370: 107 + return 3350; 108 + default: 109 + return 3875; 110 + } 111 + } 112 + 113 + static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv) 114 + { 115 + switch (priv->vid) { 116 + case MT6370_VID_RT5081: 117 + case MT6370_VID_RT5081A: 118 + case MT6370_VID_MT6370: 119 + return 2680; 120 + default: 121 + return 3870; 122 + } 123 + } 124 + 108 125 static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, 109 126 int chan, int *val1, int *val2) 110 127 { ··· 154 123 case MT6370_AICR_250_mA: 155 124 case MT6370_AICR_300_mA: 156 125 case MT6370_AICR_350_mA: 157 - *val1 = 3350; 126 + *val1 = mt6370_adc_get_ibus_scale(priv); 158 127 break; 159 128 default: 160 129 *val1 = 5000; ··· 181 150 case MT6370_ICHG_600_mA: 182 151 case MT6370_ICHG_700_mA: 183 152 case MT6370_ICHG_800_mA: 184 - *val1 = 2680; 153 + *val1 = mt6370_adc_get_ibat_scale(priv); 185 154 break; 186 155 default: 187 156 *val1 = 5000; ··· 282 251 MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)), 283 252 }; 284 253 254 + static int mt6370_get_vendor_info(struct mt6370_adc_data *priv) 255 + { 256 + unsigned int dev_info; 257 + int ret; 258 + 259 + ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info); 260 + if (ret) 261 + return ret; 262 + 263 + priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info); 264 + 265 + return 0; 266 + } 267 + 285 268 static int mt6370_adc_probe(struct platform_device *pdev) 286 269 { 287 270 struct device *dev = &pdev->dev; ··· 316 271 priv->dev = dev; 317 272 priv->regmap = regmap; 318 273 mutex_init(&priv->adc_lock); 274 + 275 + ret = mt6370_get_vendor_info(priv); 276 + if (ret) 277 + return dev_err_probe(dev, ret, "Failed to get vid\n"); 319 278 320 279 ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0); 321 280 if (ret)
+5 -5
drivers/iio/adc/mxs-lradc-adc.c
··· 757 757 758 758 ret = mxs_lradc_adc_trigger_init(iio); 759 759 if (ret) 760 - goto err_trig; 760 + return ret; 761 761 762 762 ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time, 763 763 &mxs_lradc_adc_trigger_handler, 764 764 &mxs_lradc_adc_buffer_ops); 765 765 if (ret) 766 - return ret; 766 + goto err_trig; 767 767 768 768 adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc]; 769 769 ··· 801 801 802 802 err_dev: 803 803 mxs_lradc_adc_hw_stop(adc); 804 - mxs_lradc_adc_trigger_remove(iio); 805 - err_trig: 806 804 iio_triggered_buffer_cleanup(iio); 805 + err_trig: 806 + mxs_lradc_adc_trigger_remove(iio); 807 807 return ret; 808 808 } 809 809 ··· 814 814 815 815 iio_device_unregister(iio); 816 816 mxs_lradc_adc_hw_stop(adc); 817 - mxs_lradc_adc_trigger_remove(iio); 818 817 iio_triggered_buffer_cleanup(iio); 818 + mxs_lradc_adc_trigger_remove(iio); 819 819 820 820 return 0; 821 821 }
+5 -5
drivers/iio/adc/palmas_gpadc.c
··· 547 547 int adc_chan = chan->channel; 548 548 int ret = 0; 549 549 550 - if (adc_chan > PALMAS_ADC_CH_MAX) 550 + if (adc_chan >= PALMAS_ADC_CH_MAX) 551 551 return -EINVAL; 552 552 553 553 mutex_lock(&adc->lock); ··· 595 595 int adc_chan = chan->channel; 596 596 int ret = 0; 597 597 598 - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 598 + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 599 599 return -EINVAL; 600 600 601 601 mutex_lock(&adc->lock); ··· 684 684 int adc_chan = chan->channel; 685 685 int ret; 686 686 687 - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 687 + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 688 688 return -EINVAL; 689 689 690 690 mutex_lock(&adc->lock); ··· 710 710 int adc_chan = chan->channel; 711 711 int ret; 712 712 713 - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 713 + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 714 714 return -EINVAL; 715 715 716 716 mutex_lock(&adc->lock); ··· 744 744 int old; 745 745 int ret; 746 746 747 - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 747 + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) 748 748 return -EINVAL; 749 749 750 750 mutex_lock(&adc->lock);
+32 -29
drivers/iio/adc/stm32-adc.c
··· 2006 2006 * to get the *real* number of channels. 2007 2007 */ 2008 2008 ret = device_property_count_u32(dev, "st,adc-diff-channels"); 2009 - if (ret < 0) 2010 - return ret; 2011 - 2012 - ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); 2013 - if (ret > adc_info->max_channels) { 2014 - dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); 2015 - return -EINVAL; 2016 - } else if (ret > 0) { 2017 - adc->num_diff = ret; 2018 - num_channels += ret; 2009 + if (ret > 0) { 2010 + ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); 2011 + if (ret > adc_info->max_channels) { 2012 + dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); 2013 + return -EINVAL; 2014 + } else if (ret > 0) { 2015 + adc->num_diff = ret; 2016 + num_channels += ret; 2017 + } 2019 2018 } 2020 2019 2021 2020 /* Optional sample time is provided either for each, or all channels */ ··· 2036 2037 struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX]; 2037 2038 struct device *dev = &indio_dev->dev; 2038 2039 u32 num_diff = adc->num_diff; 2040 + int num_se = nchans - num_diff; 2039 2041 int size = num_diff * sizeof(*diff) / sizeof(u32); 2040 2042 int scan_index = 0, ret, i, c; 2041 2043 u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX]; ··· 2063 2063 scan_index++; 2064 2064 } 2065 2065 } 2066 - 2067 - ret = device_property_read_u32_array(dev, "st,adc-channels", chans, 2068 - nchans); 2069 - if (ret) 2070 - return ret; 2071 - 2072 - for (c = 0; c < nchans; c++) { 2073 - if (chans[c] >= adc_info->max_channels) { 2074 - dev_err(&indio_dev->dev, "Invalid channel %d\n", 2075 - chans[c]); 2076 - return -EINVAL; 2066 + if (num_se > 0) { 2067 + ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se); 2068 + if (ret) { 2069 + dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret); 2070 + return ret; 2077 2071 } 2078 2072 2079 - /* Channel can't be configured both as single-ended & diff */ 2080 - for (i = 0; i < num_diff; i++) { 2081 - if (chans[c] == diff[i].vinp) { 2082 - dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]); 2073 + for (c = 0; c < num_se; c++) { 2074 + if (chans[c] >= adc_info->max_channels) { 2075 + dev_err(&indio_dev->dev, "Invalid channel %d\n", 2076 + chans[c]); 2083 2077 return -EINVAL; 2084 2078 } 2079 + 2080 + /* Channel can't be configured both as single-ended & diff */ 2081 + for (i = 0; i < num_diff; i++) { 2082 + if (chans[c] == diff[i].vinp) { 2083 + dev_err(&indio_dev->dev, "channel %d misconfigured\n", 2084 + chans[c]); 2085 + return -EINVAL; 2086 + } 2087 + } 2088 + stm32_adc_chan_init_one(indio_dev, &channels[scan_index], 2089 + chans[c], 0, scan_index, false); 2090 + scan_index++; 2085 2091 } 2086 - stm32_adc_chan_init_one(indio_dev, &channels[scan_index], 2087 - chans[c], 0, scan_index, false); 2088 - scan_index++; 2089 2092 } 2090 2093 2091 2094 if (adc->nsmps > 0) { ··· 2309 2306 2310 2307 if (legacy) 2311 2308 ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels, 2312 - num_channels); 2309 + timestamping ? num_channels - 1 : num_channels); 2313 2310 else 2314 2311 ret = stm32_adc_generic_chan_init(indio_dev, adc, channels); 2315 2312 if (ret < 0)
+1 -1
drivers/iio/addac/ad74413r.c
··· 1007 1007 1008 1008 ret = ad74413r_get_single_adc_result(indio_dev, chan->channel, 1009 1009 val); 1010 - if (ret) 1010 + if (ret < 0) 1011 1011 return ret; 1012 1012 1013 1013 ad74413r_adc_to_resistance_result(*val, val);
+1 -1
drivers/iio/dac/Makefile
··· 17 17 obj-$(CONFIG_AD5592R) += ad5592r.o 18 18 obj-$(CONFIG_AD5593R) += ad5593r.o 19 19 obj-$(CONFIG_AD5755) += ad5755.o 20 - obj-$(CONFIG_AD5755) += ad5758.o 20 + obj-$(CONFIG_AD5758) += ad5758.o 21 21 obj-$(CONFIG_AD5761) += ad5761.o 22 22 obj-$(CONFIG_AD5764) += ad5764.o 23 23 obj-$(CONFIG_AD5766) += ad5766.o
+14 -2
drivers/iio/dac/mcp4725.c
··· 47 47 struct mcp4725_data *data = iio_priv(i2c_get_clientdata( 48 48 to_i2c_client(dev))); 49 49 u8 outbuf[2]; 50 + int ret; 50 51 51 52 outbuf[0] = (data->powerdown_mode + 1) << 4; 52 53 outbuf[1] = 0; 53 54 data->powerdown = true; 54 55 55 - return i2c_master_send(data->client, outbuf, 2); 56 + ret = i2c_master_send(data->client, outbuf, 2); 57 + if (ret < 0) 58 + return ret; 59 + else if (ret != 2) 60 + return -EIO; 61 + return 0; 56 62 } 57 63 58 64 static int mcp4725_resume(struct device *dev) ··· 66 60 struct mcp4725_data *data = iio_priv(i2c_get_clientdata( 67 61 to_i2c_client(dev))); 68 62 u8 outbuf[2]; 63 + int ret; 69 64 70 65 /* restore previous DAC value */ 71 66 outbuf[0] = (data->dac_value >> 8) & 0xf; 72 67 outbuf[1] = data->dac_value & 0xff; 73 68 data->powerdown = false; 74 69 75 - return i2c_master_send(data->client, outbuf, 2); 70 + ret = i2c_master_send(data->client, outbuf, 2); 71 + if (ret < 0) 72 + return ret; 73 + else if (ret != 2) 74 + return -EIO; 75 + return 0; 76 76 } 77 77 static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, 78 78 mcp4725_resume);
+5 -5
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
··· 275 275 { 276 276 struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); 277 277 struct device *dev = regmap_get_device(st->map); 278 + struct inv_icm42600_timestamp *ts = iio_priv(indio_dev); 278 279 279 280 pm_runtime_get_sync(dev); 281 + 282 + mutex_lock(&st->lock); 283 + inv_icm42600_timestamp_reset(ts); 284 + mutex_unlock(&st->lock); 280 285 281 286 return 0; 282 287 } ··· 380 375 struct device *dev = regmap_get_device(st->map); 381 376 unsigned int sensor; 382 377 unsigned int *watermark; 383 - struct inv_icm42600_timestamp *ts; 384 378 struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; 385 379 unsigned int sleep_temp = 0; 386 380 unsigned int sleep_sensor = 0; ··· 389 385 if (indio_dev == st->indio_gyro) { 390 386 sensor = INV_ICM42600_SENSOR_GYRO; 391 387 watermark = &st->fifo.watermark.gyro; 392 - ts = iio_priv(st->indio_gyro); 393 388 } else if (indio_dev == st->indio_accel) { 394 389 sensor = INV_ICM42600_SENSOR_ACCEL; 395 390 watermark = &st->fifo.watermark.accel; 396 - ts = iio_priv(st->indio_accel); 397 391 } else { 398 392 return -EINVAL; 399 393 } ··· 418 416 /* if FIFO is off, turn temperature off */ 419 417 if (!st->fifo.on) 420 418 ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp); 421 - 422 - inv_icm42600_timestamp_reset(ts); 423 419 424 420 out_unlock: 425 421 mutex_unlock(&st->lock);
+32 -10
drivers/iio/industrialio-gts-helper.c
··· 337 337 return ret; 338 338 } 339 339 340 + static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times, 341 + int num_times) 342 + { 343 + int i; 344 + 345 + for (i = 0; i < num_times; i++) { 346 + int_micro_times[i * 2] = time_us[i] / 1000000; 347 + int_micro_times[i * 2 + 1] = time_us[i] % 1000000; 348 + } 349 + } 350 + 340 351 /** 341 352 * iio_gts_build_avail_time_table - build table of available integration times 342 353 * @gts: Gain time scale descriptor ··· 362 351 */ 363 352 static int iio_gts_build_avail_time_table(struct iio_gts *gts) 364 353 { 365 - int *times, i, j, idx = 0; 354 + int *times, i, j, idx = 0, *int_micro_times; 366 355 367 356 if (!gts->num_itime) 368 357 return 0; ··· 389 378 } 390 379 } 391 380 } 392 - gts->avail_time_tables = times; 393 - /* 394 - * This is just to survive a unlikely corner-case where times in the 395 - * given time table were not unique. Else we could just trust the 396 - * gts->num_itime. 397 - */ 398 - gts->num_avail_time_tables = idx; 381 + 382 + /* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */ 383 + int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL); 384 + if (int_micro_times) { 385 + /* 386 + * This is just to survive a unlikely corner-case where times in 387 + * the given time table were not unique. Else we could just 388 + * trust the gts->num_itime. 389 + */ 390 + gts->num_avail_time_tables = idx; 391 + iio_gts_us_to_int_micro(times, int_micro_times, idx); 392 + } 393 + 394 + gts->avail_time_tables = int_micro_times; 395 + kfree(times); 396 + 397 + if (!int_micro_times) 398 + return -ENOMEM; 399 399 400 400 return 0; 401 401 } ··· 705 683 return -EINVAL; 706 684 707 685 *vals = gts->avail_time_tables; 708 - *type = IIO_VAL_INT; 709 - *length = gts->num_avail_time_tables; 686 + *type = IIO_VAL_INT_PLUS_MICRO; 687 + *length = gts->num_avail_time_tables * 2; 710 688 711 689 return IIO_AVAIL_LIST; 712 690 }
+20 -6
drivers/iio/light/rohm-bu27034.c
··· 231 231 232 232 static const struct regmap_range bu27034_volatile_ranges[] = { 233 233 { 234 + .range_min = BU27034_REG_SYSTEM_CONTROL, 235 + .range_max = BU27034_REG_SYSTEM_CONTROL, 236 + }, { 234 237 .range_min = BU27034_REG_MODE_CONTROL4, 235 238 .range_max = BU27034_REG_MODE_CONTROL4, 236 239 }, { ··· 1170 1167 1171 1168 switch (mask) { 1172 1169 case IIO_CHAN_INFO_INT_TIME: 1173 - *val = bu27034_get_int_time(data); 1174 - if (*val < 0) 1175 - return *val; 1170 + *val = 0; 1171 + *val2 = bu27034_get_int_time(data); 1172 + if (*val2 < 0) 1173 + return *val2; 1176 1174 1177 - return IIO_VAL_INT; 1175 + return IIO_VAL_INT_PLUS_MICRO; 1178 1176 1179 1177 case IIO_CHAN_INFO_SCALE: 1180 1178 return bu27034_get_scale(data, chan->channel, val, val2); ··· 1233 1229 ret = bu27034_set_scale(data, chan->channel, val, val2); 1234 1230 break; 1235 1231 case IIO_CHAN_INFO_INT_TIME: 1236 - ret = bu27034_try_set_int_time(data, val); 1232 + if (!val) 1233 + ret = bu27034_try_set_int_time(data, val2); 1234 + else 1235 + ret = -EINVAL; 1237 1236 break; 1238 1237 default: 1239 1238 ret = -EINVAL; ··· 1275 1268 int ret, sel; 1276 1269 1277 1270 /* Reset */ 1278 - ret = regmap_update_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL, 1271 + ret = regmap_write_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL, 1279 1272 BU27034_MASK_SW_RESET, BU27034_MASK_SW_RESET); 1280 1273 if (ret) 1281 1274 return dev_err_probe(data->dev, ret, "Sensor reset failed\n"); 1282 1275 1283 1276 msleep(1); 1277 + 1278 + ret = regmap_reinit_cache(data->regmap, &bu27034_regmap); 1279 + if (ret) { 1280 + dev_err(data->dev, "Failed to reinit reg cache\n"); 1281 + return ret; 1282 + } 1283 + 1284 1284 /* 1285 1285 * Read integration time here to ensure it is in regmap cache. We do 1286 1286 * this to speed-up the int-time acquisition in the start of the buffer
+3
drivers/iio/light/vcnl4035.c
··· 8 8 * TODO: Proximity 9 9 */ 10 10 #include <linux/bitops.h> 11 + #include <linux/bitfield.h> 11 12 #include <linux/i2c.h> 12 13 #include <linux/module.h> 13 14 #include <linux/pm_runtime.h> ··· 43 42 #define VCNL4035_ALS_PERS_MASK GENMASK(3, 2) 44 43 #define VCNL4035_INT_ALS_IF_H_MASK BIT(12) 45 44 #define VCNL4035_INT_ALS_IF_L_MASK BIT(13) 45 + #define VCNL4035_DEV_ID_MASK GENMASK(7, 0) 46 46 47 47 /* Default values */ 48 48 #define VCNL4035_MODE_ALS_ENABLE BIT(0) ··· 415 413 return ret; 416 414 } 417 415 416 + id = FIELD_GET(VCNL4035_DEV_ID_MASK, id); 418 417 if (id != VCNL4035_DEV_ID_VAL) { 419 418 dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n", 420 419 id, VCNL4035_DEV_ID_VAL);
+3 -2
drivers/iio/magnetometer/tmag5273.c
··· 296 296 return ret; 297 297 298 298 ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude); 299 - if (ret) 300 - return ret; 301 299 302 300 pm_runtime_mark_last_busy(data->dev); 303 301 pm_runtime_put_autosuspend(data->dev); 302 + 303 + if (ret) 304 + return ret; 304 305 305 306 switch (chan->address) { 306 307 case TEMPERATURE:
+1 -1
drivers/input/input.c
··· 703 703 704 704 __input_release_device(handle); 705 705 706 - if (!dev->inhibited && !--dev->users) { 706 + if (!--dev->users && !dev->inhibited) { 707 707 if (dev->poller) 708 708 input_dev_poller_stop(dev->poller); 709 709 if (dev->close)
-1
drivers/input/joystick/xpad.c
··· 281 281 { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, 282 282 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 283 283 { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 284 - { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, 285 284 { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, 286 285 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, 287 286 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+30
drivers/input/misc/soc_button_array.c
··· 109 109 }; 110 110 111 111 /* 112 + * Some devices have a wrong entry which points to a GPIO which is 113 + * required in another driver, so this driver must not claim it. 114 + */ 115 + static const struct dmi_system_id dmi_invalid_acpi_index[] = { 116 + { 117 + /* 118 + * Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry 119 + * points to a GPIO which is not a home button and which is 120 + * required by the lenovo-yogabook driver. 121 + */ 122 + .matches = { 123 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), 124 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), 125 + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), 126 + }, 127 + .driver_data = (void *)1l, 128 + }, 129 + {} /* Terminating entry */ 130 + }; 131 + 132 + /* 112 133 * Get the Nth GPIO number from the ACPI object. 113 134 */ 114 135 static int soc_button_lookup_gpio(struct device *dev, int acpi_index, ··· 158 137 struct platform_device *pd; 159 138 struct gpio_keys_button *gpio_keys; 160 139 struct gpio_keys_platform_data *gpio_keys_pdata; 140 + const struct dmi_system_id *dmi_id; 141 + int invalid_acpi_index = -1; 161 142 int error, gpio, irq; 162 143 int n_buttons = 0; 163 144 ··· 177 154 gpio_keys = (void *)(gpio_keys_pdata + 1); 178 155 n_buttons = 0; 179 156 157 + dmi_id = dmi_first_match(dmi_invalid_acpi_index); 158 + if (dmi_id) 159 + invalid_acpi_index = (long)dmi_id->driver_data; 160 + 180 161 for (info = button_info; info->name; info++) { 181 162 if (info->autorepeat != autorepeat) 163 + continue; 164 + 165 + if (info->acpi_index == invalid_acpi_index) 182 166 continue; 183 167 184 168 error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
+5 -4
drivers/input/mouse/elantech.c
··· 674 674 struct input_dev *dev = psmouse->dev; 675 675 struct elantech_data *etd = psmouse->private; 676 676 unsigned char *packet = psmouse->packet; 677 - int id = ((packet[3] & 0xe0) >> 5) - 1; 677 + int id; 678 678 int pres, traces; 679 679 680 - if (id < 0) 680 + id = ((packet[3] & 0xe0) >> 5) - 1; 681 + if (id < 0 || id >= ETP_MAX_FINGERS) 681 682 return; 682 683 683 684 etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2]; ··· 708 707 int id, sid; 709 708 710 709 id = ((packet[0] & 0xe0) >> 5) - 1; 711 - if (id < 0) 710 + if (id < 0 || id >= ETP_MAX_FINGERS) 712 711 return; 713 712 714 713 sid = ((packet[3] & 0xe0) >> 5) - 1; ··· 729 728 input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x); 730 729 input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y); 731 730 732 - if (sid >= 0) { 731 + if (sid >= 0 && sid < ETP_MAX_FINGERS) { 733 732 etd->mt[sid].x += delta_x2 * weight; 734 733 etd->mt[sid].y -= delta_y2 * weight; 735 734 input_mt_slot(dev, sid);
+1 -1
drivers/input/touchscreen/cyttsp5.c
··· 560 560 static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5 *ts) 561 561 { 562 562 int rc; 563 - u8 cmd[HID_OUTPUT_BL_LAUNCH_APP]; 563 + u8 cmd[HID_OUTPUT_BL_LAUNCH_APP_SIZE]; 564 564 u16 crc; 565 565 566 566 put_unaligned_le16(HID_OUTPUT_BL_LAUNCH_APP_SIZE, cmd);
+1
drivers/iommu/Kconfig
··· 282 282 config IPMMU_VMSA 283 283 bool "Renesas VMSA-compatible IPMMU" 284 284 depends on ARCH_RENESAS || COMPILE_TEST 285 + depends on ARM || ARM64 || COMPILE_TEST 285 286 depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE 286 287 select IOMMU_API 287 288 select IOMMU_IO_PGTABLE_LPAE
+1 -3
drivers/iommu/amd/amd_iommu.h
··· 15 15 extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 16 16 extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); 17 17 extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); 18 - extern int amd_iommu_init_devices(void); 19 - extern void amd_iommu_uninit_devices(void); 20 - extern void amd_iommu_init_notifier(void); 18 + extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu); 21 19 extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); 22 20 23 21 #ifdef CONFIG_AMD_IOMMU_DEBUGFS
+24
drivers/iommu/amd/init.c
··· 759 759 } 760 760 761 761 /* 762 + * This function restarts event logging in case the IOMMU experienced 763 + * an GA log overflow. 764 + */ 765 + void amd_iommu_restart_ga_log(struct amd_iommu *iommu) 766 + { 767 + u32 status; 768 + 769 + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 770 + if (status & MMIO_STATUS_GALOG_RUN_MASK) 771 + return; 772 + 773 + pr_info_ratelimited("IOMMU GA Log restarting\n"); 774 + 775 + iommu_feature_disable(iommu, CONTROL_GALOG_EN); 776 + iommu_feature_disable(iommu, CONTROL_GAINT_EN); 777 + 778 + writel(MMIO_STATUS_GALOG_OVERFLOW_MASK, 779 + iommu->mmio_base + MMIO_STATUS_OFFSET); 780 + 781 + iommu_feature_enable(iommu, CONTROL_GAINT_EN); 782 + iommu_feature_enable(iommu, CONTROL_GALOG_EN); 783 + } 784 + 785 + /* 762 786 * This function resets the command buffer if the IOMMU stopped fetching 763 787 * commands from it. 764 788 */
+25 -6
drivers/iommu/amd/iommu.c
··· 845 845 (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ 846 846 MMIO_STATUS_EVT_INT_MASK | \ 847 847 MMIO_STATUS_PPR_INT_MASK | \ 848 + MMIO_STATUS_GALOG_OVERFLOW_MASK | \ 848 849 MMIO_STATUS_GALOG_INT_MASK) 849 850 850 851 irqreturn_t amd_iommu_int_thread(int irq, void *data) ··· 869 868 } 870 869 871 870 #ifdef CONFIG_IRQ_REMAP 872 - if (status & MMIO_STATUS_GALOG_INT_MASK) { 871 + if (status & (MMIO_STATUS_GALOG_INT_MASK | 872 + MMIO_STATUS_GALOG_OVERFLOW_MASK)) { 873 873 pr_devel("Processing IOMMU GA Log\n"); 874 874 iommu_poll_ga_log(iommu); 875 + } 876 + 877 + if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) { 878 + pr_info_ratelimited("IOMMU GA Log overflow\n"); 879 + amd_iommu_restart_ga_log(iommu); 875 880 } 876 881 #endif 877 882 ··· 2074 2067 { 2075 2068 struct io_pgtable_ops *pgtbl_ops; 2076 2069 struct protection_domain *domain; 2077 - int pgtable = amd_iommu_pgtable; 2070 + int pgtable; 2078 2071 int mode = DEFAULT_PGTABLE_LEVEL; 2079 2072 int ret; 2080 2073 ··· 2091 2084 mode = PAGE_MODE_NONE; 2092 2085 } else if (type == IOMMU_DOMAIN_UNMANAGED) { 2093 2086 pgtable = AMD_IOMMU_V1; 2087 + } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) { 2088 + pgtable = amd_iommu_pgtable; 2089 + } else { 2090 + return NULL; 2094 2091 } 2095 2092 2096 2093 switch (pgtable) { ··· 2129 2118 return NULL; 2130 2119 } 2131 2120 2121 + static inline u64 dma_max_address(void) 2122 + { 2123 + if (amd_iommu_pgtable == AMD_IOMMU_V1) 2124 + return ~0ULL; 2125 + 2126 + /* V2 with 4/5 level page table */ 2127 + return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); 2128 + } 2129 + 2132 2130 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) 2133 2131 { 2134 2132 struct protection_domain *domain; ··· 2154 2134 return NULL; 2155 2135 2156 2136 domain->domain.geometry.aperture_start = 0; 2157 - domain->domain.geometry.aperture_end = ~0ULL; 2137 + domain->domain.geometry.aperture_end = dma_max_address(); 2158 2138 domain->domain.geometry.force_aperture = true; 2159 2139 2160 2140 return &domain->domain; ··· 2407 2387 unsigned long flags; 2408 2388 2409 2389 spin_lock_irqsave(&dom->lock, flags); 2410 - domain_flush_pages(dom, gather->start, gather->end - gather->start, 1); 2390 + domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1); 2411 2391 amd_iommu_domain_flush_complete(dom); 2412 2392 spin_unlock_irqrestore(&dom->lock, flags); 2413 2393 } ··· 3513 3493 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; 3514 3494 u64 valid; 3515 3495 3516 - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || 3517 - !entry || entry->lo.fields_vapic.guest_mode) 3496 + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry) 3518 3497 return 0; 3519 3498 3520 3499 valid = entry->lo.fields_vapic.valid;
+2 -1
drivers/iommu/mtk_iommu.c
··· 781 781 { 782 782 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 783 783 784 - mtk_iommu_tlb_flush_all(dom->bank->parent_data); 784 + if (dom->bank) 785 + mtk_iommu_tlb_flush_all(dom->bank->parent_data); 785 786 } 786 787 787 788 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+8 -6
drivers/iommu/rockchip-iommu.c
··· 1335 1335 for (i = 0; i < iommu->num_irq; i++) { 1336 1336 int irq = platform_get_irq(pdev, i); 1337 1337 1338 - if (irq < 0) 1339 - return irq; 1338 + if (irq < 0) { 1339 + err = irq; 1340 + goto err_pm_disable; 1341 + } 1340 1342 1341 1343 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, 1342 1344 IRQF_SHARED, dev_name(dev), iommu); 1343 - if (err) { 1344 - pm_runtime_disable(dev); 1345 - goto err_remove_sysfs; 1346 - } 1345 + if (err) 1346 + goto err_pm_disable; 1347 1347 } 1348 1348 1349 1349 dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); 1350 1350 1351 1351 return 0; 1352 + err_pm_disable: 1353 + pm_runtime_disable(dev); 1352 1354 err_remove_sysfs: 1353 1355 iommu_device_sysfs_remove(&iommu->iommu); 1354 1356 err_put_group:
+2
drivers/irqchip/irq-gic-common.c
··· 16 16 const struct gic_quirk *quirks, void *data) 17 17 { 18 18 for (; quirks->desc; quirks++) { 19 + if (!quirks->compatible && !quirks->property) 20 + continue; 19 21 if (quirks->compatible && 20 22 !of_device_is_compatible(np, quirks->compatible)) 21 23 continue;
+4 -4
drivers/leds/rgb/leds-qcom-lpg.c
··· 312 312 max_res = LPG_RESOLUTION_9BIT; 313 313 } 314 314 315 - min_period = (u64)NSEC_PER_SEC * 316 - div64_u64((1 << pwm_resolution_arr[0]), clk_rate_arr[clk_len - 1]); 315 + min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]), 316 + clk_rate_arr[clk_len - 1]); 317 317 if (period <= min_period) 318 318 return -EINVAL; 319 319 320 320 /* Limit period to largest possible value, to avoid overflows */ 321 - max_period = (u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * 322 - div64_u64((1 << LPG_MAX_M), 1024); 321 + max_period = div64_u64((u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * (1 << LPG_MAX_M), 322 + 1024); 323 323 if (period > max_period) 324 324 period = max_period; 325 325
+1 -1
drivers/md/raid5.c
··· 5516 5516 5517 5517 sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, 5518 5518 &dd_idx, NULL); 5519 - end_sector = bio_end_sector(raid_bio); 5519 + end_sector = sector + bio_sectors(raid_bio); 5520 5520 5521 5521 rcu_read_lock(); 5522 5522 if (r5c_big_stripe_cached(conf, sector))
+6 -2
drivers/media/cec/core/cec-adap.c
··· 1091 1091 mutex_lock(&adap->lock); 1092 1092 dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg); 1093 1093 1094 - adap->last_initiator = 0xff; 1094 + if (!adap->transmit_in_progress) 1095 + adap->last_initiator = 0xff; 1095 1096 1096 1097 /* Check if this message was for us (directed or broadcast). */ 1097 1098 if (!cec_msg_is_broadcast(msg)) { ··· 1586 1585 * 1587 1586 * This function is called with adap->lock held. 1588 1587 */ 1589 - static int cec_adap_enable(struct cec_adapter *adap) 1588 + int cec_adap_enable(struct cec_adapter *adap) 1590 1589 { 1591 1590 bool enable; 1592 1591 int ret = 0; ··· 1595 1594 adap->log_addrs.num_log_addrs; 1596 1595 if (adap->needs_hpd) 1597 1596 enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID; 1597 + 1598 + if (adap->devnode.unregistered) 1599 + enable = false; 1598 1600 1599 1601 if (enable == adap->is_enabled) 1600 1602 return 0;
+2
drivers/media/cec/core/cec-core.c
··· 191 191 mutex_lock(&adap->lock); 192 192 __cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false); 193 193 __cec_s_log_addrs(adap, NULL, false); 194 + // Disable the adapter (since adap->devnode.unregistered is true) 195 + cec_adap_enable(adap); 194 196 mutex_unlock(&adap->lock); 195 197 196 198 cdev_device_del(&devnode->cdev, &devnode->dev);
+1
drivers/media/cec/core/cec-priv.h
··· 47 47 void cec_monitor_pin_cnt_dec(struct cec_adapter *adap); 48 48 int cec_adap_status(struct seq_file *file, void *priv); 49 49 int cec_thread_func(void *_adap); 50 + int cec_adap_enable(struct cec_adapter *adap); 50 51 void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block); 51 52 int __cec_s_log_addrs(struct cec_adapter *adap, 52 53 struct cec_log_addrs *log_addrs, bool block);
+3
drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
··· 584 584 585 585 if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) { 586 586 for (i = 0; i < num_supported_formats; i++) { 587 + if (mtk_video_formats[i].type != MTK_FMT_DEC) 588 + continue; 589 + 587 590 mtk_video_formats[i].frmsize.max_width = 588 591 VCODEC_DEC_4K_CODED_WIDTH; 589 592 mtk_video_formats[i].frmsize.max_height =
-1
drivers/media/platform/qcom/camss/camss-video.c
··· 353 353 if (subdev == NULL) 354 354 return -EPIPE; 355 355 356 - memset(&fmt, 0, sizeof(fmt)); 357 356 fmt.pad = pad; 358 357 359 358 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+4 -2
drivers/media/platform/verisilicon/hantro_v4l2.c
··· 397 397 if (!raw_vpu_fmt) 398 398 return -EINVAL; 399 399 400 - if (ctx->is_encoder) 400 + if (ctx->is_encoder) { 401 401 encoded_fmt = &ctx->dst_fmt; 402 - else 402 + ctx->vpu_src_fmt = raw_vpu_fmt; 403 + } else { 403 404 encoded_fmt = &ctx->src_fmt; 405 + } 404 406 405 407 hantro_reset_fmt(&raw_fmt, raw_vpu_fmt); 406 408 raw_fmt.width = encoded_fmt->width;
+11 -5
drivers/media/usb/uvc/uvc_driver.c
··· 251 251 /* Find the format descriptor from its GUID. */ 252 252 fmtdesc = uvc_format_by_guid(&buffer[5]); 253 253 254 - if (fmtdesc != NULL) { 255 - format->fcc = fmtdesc->fcc; 256 - } else { 254 + if (!fmtdesc) { 255 + /* 256 + * Unknown video formats are not fatal errors, the 257 + * caller will skip this descriptor. 258 + */ 257 259 dev_info(&streaming->intf->dev, 258 260 "Unknown video format %pUl\n", &buffer[5]); 259 - format->fcc = 0; 261 + return 0; 260 262 } 261 263 264 + format->fcc = fmtdesc->fcc; 262 265 format->bpp = buffer[21]; 263 266 264 267 /* ··· 678 675 interval = (u32 *)&frame[nframes]; 679 676 680 677 streaming->format = format; 681 - streaming->nformats = nformats; 678 + streaming->nformats = 0; 682 679 683 680 /* Parse the format descriptors. */ 684 681 while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) { ··· 692 689 &interval, buffer, buflen); 693 690 if (ret < 0) 694 691 goto error; 692 + if (!ret) 693 + break; 695 694 695 + streaming->nformats++; 696 696 frame += format->nframes; 697 697 format++; 698 698
+1 -2
drivers/media/v4l2-core/v4l2-mc.c
··· 314 314 { 315 315 struct fwnode_handle *endpoint; 316 316 317 - if (!(sink->flags & MEDIA_PAD_FL_SINK) || 318 - !is_media_entity_v4l2_subdev(sink->entity)) 317 + if (!(sink->flags & MEDIA_PAD_FL_SINK)) 319 318 return -EINVAL; 320 319 321 320 fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
+23 -8
drivers/misc/fastrpc.c
··· 316 316 if (map->table) { 317 317 if (map->attr & FASTRPC_ATTR_SECUREMAP) { 318 318 struct qcom_scm_vmperm perm; 319 + int vmid = map->fl->cctx->vmperms[0].vmid; 320 + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid); 319 321 int err = 0; 320 322 321 323 perm.vmid = QCOM_SCM_VMID_HLOS; 322 324 perm.perm = QCOM_SCM_PERM_RWX; 323 325 err = qcom_scm_assign_mem(map->phys, map->size, 324 - &map->fl->cctx->perms, &perm, 1); 326 + &src_perms, &perm, 1); 325 327 if (err) { 326 328 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 327 329 map->phys, map->size, err); ··· 789 787 goto map_err; 790 788 } 791 789 792 - map->phys = sg_dma_address(map->table->sgl); 793 - map->phys += ((u64)fl->sctx->sid << 32); 790 + if (attr & FASTRPC_ATTR_SECUREMAP) { 791 + map->phys = sg_phys(map->table->sgl); 792 + } else { 793 + map->phys = sg_dma_address(map->table->sgl); 794 + map->phys += ((u64)fl->sctx->sid << 32); 795 + } 794 796 map->size = len; 795 797 map->va = sg_virt(map->table->sgl); 796 798 map->len = len; ··· 804 798 * If subsystem VMIDs are defined in DTSI, then do 805 799 * hyp_assign from HLOS to those VM(s) 806 800 */ 801 + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); 802 + struct qcom_scm_vmperm dst_perms[2] = {0}; 803 + 804 + dst_perms[0].vmid = QCOM_SCM_VMID_HLOS; 805 + dst_perms[0].perm = QCOM_SCM_PERM_RW; 806 + dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; 807 + dst_perms[1].perm = QCOM_SCM_PERM_RWX; 807 808 map->attr = attr; 808 - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms, 809 - fl->cctx->vmperms, fl->cctx->vmcount); 809 + err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2); 810 810 if (err) { 811 811 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", 812 812 map->phys, map->size, err); ··· 1904 1892 req.vaddrout = rsp_msg.vaddr; 1905 1893 1906 1894 /* Add memory to static PD pool, protection thru hypervisor */ 1907 - if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { 1895 + if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { 1908 1896 struct qcom_scm_vmperm perm; 1909 1897 1910 1898 perm.vmid = QCOM_SCM_VMID_HLOS; ··· 2349 2337 struct fastrpc_invoke_ctx *ctx; 2350 2338 2351 2339 spin_lock(&user->lock); 2352 - list_for_each_entry(ctx, &user->pending, node) 2340 + list_for_each_entry(ctx, &user->pending, node) { 2341 + ctx->retval = -EPIPE; 2353 2342 complete(&ctx->work); 2343 + } 2354 2344 spin_unlock(&user->lock); 2355 2345 } 2356 2346 ··· 2363 2349 struct fastrpc_user *user; 2364 2350 unsigned long flags; 2365 2351 2352 + /* No invocations past this point */ 2366 2353 spin_lock_irqsave(&cctx->lock, flags); 2354 + cctx->rpdev = NULL; 2367 2355 list_for_each_entry(user, &cctx->users, user) 2368 2356 fastrpc_notify_users(user); 2369 2357 spin_unlock_irqrestore(&cctx->lock, flags); ··· 2384 2368 2385 2369 of_platform_depopulate(&rpdev->dev); 2386 2370 2387 - cctx->rpdev = NULL; 2388 2371 fastrpc_channel_ctx_put(cctx); 2389 2372 } 2390 2373
+26 -8
drivers/mmc/core/pwrseq_sd8787.c
··· 28 28 struct mmc_pwrseq pwrseq; 29 29 struct gpio_desc *reset_gpio; 30 30 struct gpio_desc *pwrdn_gpio; 31 - u32 reset_pwrdwn_delay_ms; 32 31 }; 33 32 34 33 #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq) ··· 38 39 39 40 gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); 40 41 41 - msleep(pwrseq->reset_pwrdwn_delay_ms); 42 + msleep(300); 42 43 gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); 43 44 } 44 45 ··· 50 51 gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); 51 52 } 52 53 54 + static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host) 55 + { 56 + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); 57 + 58 + /* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */ 59 + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); 60 + msleep(5); 61 + gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); 62 + } 63 + 64 + static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host) 65 + { 66 + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); 67 + 68 + gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); 69 + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0); 70 + } 71 + 53 72 static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = { 54 73 .pre_power_on = mmc_pwrseq_sd8787_pre_power_on, 55 74 .power_off = mmc_pwrseq_sd8787_power_off, 56 75 }; 57 76 58 - static const u32 sd8787_delay_ms = 300; 59 - static const u32 wilc1000_delay_ms = 5; 77 + static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = { 78 + .pre_power_on = mmc_pwrseq_wilc1000_pre_power_on, 79 + .power_off = mmc_pwrseq_wilc1000_power_off, 80 + }; 60 81 61 82 static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = { 62 - { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms }, 63 - { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms }, 83 + { .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops }, 84 + { .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops }, 64 85 {/* sentinel */}, 65 86 }; 66 87 MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match); ··· 96 77 return -ENOMEM; 97 78 98 79 match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node); 99 - pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data; 100 80 101 81 pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW); 102 82 if (IS_ERR(pwrseq->pwrdn_gpio)) ··· 106 88 return PTR_ERR(pwrseq->reset_gpio); 107 89 108 90 pwrseq->pwrseq.dev = dev; 109 - pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops; 91 + pwrseq->pwrseq.ops = match->data; 110 92 pwrseq->pwrseq.owner = THIS_MODULE; 111 93 platform_set_drvdata(pdev, pwrseq); 112 94
+3
drivers/mmc/host/vub300.c
··· 1713 1713 int bytes = 3 & less_cmd; 1714 1714 int words = less_cmd >> 2; 1715 1715 u8 *r = vub300->resp.response.command_response; 1716 + 1717 + if (!resp_len) 1718 + return; 1716 1719 if (bytes == 3) { 1717 1720 cmd->resp[words] = (r[1 + (words << 2)] << 24) 1718 1721 | (r[2 + (words << 2)] << 16)
+4 -4
drivers/mtd/mtdchar.c
··· 590 590 (end_page - start_page + 1) * oob_per_page); 591 591 } 592 592 593 - static int mtdchar_write_ioctl(struct mtd_info *mtd, 594 - struct mtd_write_req __user *argp) 593 + static noinline_for_stack int 594 + mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) 595 595 { 596 596 struct mtd_info *master = mtd_get_master(mtd); 597 597 struct mtd_write_req req; ··· 688 688 return ret; 689 689 } 690 690 691 - static int mtdchar_read_ioctl(struct mtd_info *mtd, 692 - struct mtd_read_req __user *argp) 691 + static noinline_for_stack int 692 + mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp) 693 693 { 694 694 struct mtd_info *master = mtd_get_master(mtd); 695 695 struct mtd_read_req req;
+4 -4
drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
··· 36 36 void ingenic_ecc_release(struct ingenic_ecc *ecc); 37 37 struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np); 38 38 #else /* CONFIG_MTD_NAND_INGENIC_ECC */ 39 - int ingenic_ecc_calculate(struct ingenic_ecc *ecc, 39 + static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc, 40 40 struct ingenic_ecc_params *params, 41 41 const u8 *buf, u8 *ecc_code) 42 42 { 43 43 return -ENODEV; 44 44 } 45 45 46 - int ingenic_ecc_correct(struct ingenic_ecc *ecc, 46 + static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc, 47 47 struct ingenic_ecc_params *params, u8 *buf, 48 48 u8 *ecc_code) 49 49 { 50 50 return -ENODEV; 51 51 } 52 52 53 - void ingenic_ecc_release(struct ingenic_ecc *ecc) 53 + static inline void ingenic_ecc_release(struct ingenic_ecc *ecc) 54 54 { 55 55 } 56 56 57 - struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) 57 + static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) 58 58 { 59 59 return ERR_PTR(-ENODEV); 60 60 }
+6 -4
drivers/mtd/nand/raw/marvell_nand.c
··· 2457 2457 NDTR1_WAIT_MODE; 2458 2458 } 2459 2459 2460 + /* 2461 + * Reset nfc->selected_chip so the next command will cause the timing 2462 + * registers to be updated in marvell_nfc_select_target(). 2463 + */ 2464 + nfc->selected_chip = NULL; 2465 + 2460 2466 return 0; 2461 2467 } 2462 2468 ··· 2900 2894 regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL, 2901 2895 GENCONF_CLK_GATING_CTRL_ND_GATE, 2902 2896 GENCONF_CLK_GATING_CTRL_ND_GATE); 2903 - 2904 - regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL, 2905 - GENCONF_ND_CLK_CTRL_EN, 2906 - GENCONF_ND_CLK_CTRL_EN); 2907 2897 } 2908 2898 2909 2899 /* Configure the DMA if appropriate */
+4 -1
drivers/mtd/spi-nor/core.c
··· 2018 2018 2019 2019 static const struct flash_info spi_nor_generic_flash = { 2020 2020 .name = "spi-nor-generic", 2021 + .n_banks = 1, 2021 2022 /* 2022 2023 * JESD216 rev A doesn't specify the page size, therefore we need a 2023 2024 * sane default. ··· 2922 2921 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) 2923 2922 spi_nor_init_default_locking_ops(nor); 2924 2923 2925 - nor->params->bank_size = div64_u64(nor->params->size, nor->info->n_banks); 2924 + if (nor->info->n_banks > 1) 2925 + params->bank_size = div64_u64(params->size, nor->info->n_banks); 2926 2926 } 2927 2927 2928 2928 /** ··· 2989 2987 /* Set SPI NOR sizes. */ 2990 2988 params->writesize = 1; 2991 2989 params->size = (u64)info->sector_size * info->n_sectors; 2990 + params->bank_size = params->size; 2992 2991 params->page_size = info->page_size; 2993 2992 2994 2993 if (!(info->flags & SPI_NOR_NO_FR)) {
+2 -2
drivers/mtd/spi-nor/spansion.c
··· 361 361 */ 362 362 static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor) 363 363 { 364 - struct spi_mem_op op; 364 + struct spi_mem_op op = {}; 365 365 u8 addr_mode; 366 366 int ret; 367 367 ··· 492 492 const struct sfdp_parameter_header *bfpt_header, 493 493 const struct sfdp_bfpt *bfpt) 494 494 { 495 - struct spi_mem_op op; 495 + struct spi_mem_op op = {}; 496 496 int ret; 497 497 498 498 ret = cypress_nor_set_addr_mode_nbytes(nor);
-4
drivers/net/dsa/lan9303-core.c
··· 1188 1188 struct lan9303 *chip = ds->priv; 1189 1189 1190 1190 dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); 1191 - if (vid) 1192 - return -EOPNOTSUPP; 1193 1191 1194 1192 return lan9303_alr_add_port(chip, addr, port, false); 1195 1193 } ··· 1199 1201 struct lan9303 *chip = ds->priv; 1200 1202 1201 1203 dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); 1202 - if (vid) 1203 - return -EOPNOTSUPP; 1204 1204 lan9303_alr_del_port(chip, addr, port); 1205 1205 1206 1206 return 0;
+1
drivers/net/dsa/qca/Kconfig
··· 20 20 bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support" 21 21 depends on NET_DSA_QCA8K 22 22 depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_QCA8K 23 + depends on LEDS_TRIGGERS 23 24 help 24 25 This enabled support for LEDs present on the Qualcomm Atheros 25 26 QCA8K Ethernet switch chips.
+8 -2
drivers/net/ethernet/amd/pds_core/dev.c
··· 68 68 69 69 bool pdsc_is_fw_good(struct pdsc *pdsc) 70 70 { 71 - u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION; 71 + bool fw_running = pdsc_is_fw_running(pdsc); 72 + u8 gen; 72 73 73 - return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation; 74 + /* Make sure to update the cached fw_status by calling 75 + * pdsc_is_fw_running() before getting the generation 76 + */ 77 + gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION; 78 + 79 + return fw_running && gen == pdsc->fw_generation; 74 80 } 75 81 76 82 static u8 pdsc_devcmd_status(struct pdsc *pdsc)
+2 -2
drivers/net/ethernet/broadcom/bcmsysport.c
··· 2531 2531 priv->irq0 = platform_get_irq(pdev, 0); 2532 2532 if (!priv->is_lite) { 2533 2533 priv->irq1 = platform_get_irq(pdev, 1); 2534 - priv->wol_irq = platform_get_irq(pdev, 2); 2534 + priv->wol_irq = platform_get_irq_optional(pdev, 2); 2535 2535 } else { 2536 - priv->wol_irq = platform_get_irq(pdev, 1); 2536 + priv->wol_irq = platform_get_irq_optional(pdev, 1); 2537 2537 } 2538 2538 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2539 2539 ret = -EINVAL;
+32 -10
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 692 692 693 693 __netif_txq_completed_wake(txq, nr_pkts, tx_bytes, 694 694 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 695 - READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING); 695 + READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 696 696 } 697 697 698 698 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, ··· 2360 2360 if (BNXT_PTP_USE_RTC(bp)) { 2361 2361 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2362 2362 u64 ns; 2363 + 2364 + if (!ptp) 2365 + goto async_event_process_exit; 2363 2366 2364 2367 spin_lock_bh(&ptp->ptp_lock); 2365 2368 bnxt_ptp_update_current_time(bp); ··· 4762 4759 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4763 4760 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4764 4761 continue; 4762 + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 4763 + !bp->ptp_cfg) 4764 + continue; 4765 4765 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4766 4766 } 4767 4767 if (bmap && bmap_size) { ··· 5352 5346 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 5353 5347 return; 5354 5348 5349 + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5355 5350 /* all contexts configured to same hash_type, zero always exists */ 5356 5351 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5357 5352 resp = hwrm_req_hold(bp, req); ··· 8815 8808 goto err_out; 8816 8809 } 8817 8810 8811 + if (BNXT_VF(bp)) 8812 + bnxt_hwrm_func_qcfg(bp); 8813 + 8818 8814 rc = bnxt_setup_vnic(bp, 0); 8819 8815 if (rc) 8820 8816 goto err_out; ··· 11604 11594 static void bnxt_fw_health_check(struct bnxt *bp) 11605 11595 { 11606 11596 struct bnxt_fw_health *fw_health = bp->fw_health; 11597 + struct pci_dev *pdev = bp->pdev; 11607 11598 u32 val; 11608 11599 11609 11600 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) ··· 11618 11607 } 11619 11608 11620 11609 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 11621 - if (val == fw_health->last_fw_heartbeat) { 11610 + if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 11622 11611 fw_health->arrests++; 11623 11612 goto fw_reset; 11624 11613 } ··· 11626 11615 fw_health->last_fw_heartbeat = val; 11627 11616 11628 11617 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 11629 - if (val != fw_health->last_fw_reset_cnt) { 11618 + if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 11630 11619 fw_health->discoveries++; 11631 11620 goto fw_reset; 11632 11621 } ··· 13032 13021 13033 13022 #endif /* CONFIG_RFS_ACCEL */ 13034 13023 13035 - static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) 13024 + static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 13025 + unsigned int entry, struct udp_tunnel_info *ti) 13036 13026 { 13037 13027 struct bnxt *bp = netdev_priv(netdev); 13038 - struct udp_tunnel_info ti; 13039 13028 unsigned int cmd; 13040 13029 13041 - udp_tunnel_nic_get_port(netdev, table, 0, &ti); 13042 - if (ti.type == UDP_TUNNEL_TYPE_VXLAN) 13030 + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 13043 13031 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 13044 13032 else 13045 13033 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 13046 13034 13047 - if (ti.port) 13048 - return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); 13035 + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 13036 + } 13037 + 13038 + static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 13039 + unsigned int entry, struct udp_tunnel_info *ti) 13040 + { 13041 + struct bnxt *bp = netdev_priv(netdev); 13042 + unsigned int cmd; 13043 + 13044 + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 13045 + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 13046 + else 13047 + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 13049 13048 13050 13049 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 13051 13050 } 13052 13051 13053 13052 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 13054 - .sync_table = bnxt_udp_tunnel_sync, 13053 + .set_port = bnxt_udp_tunnel_set_port, 13054 + .unset_port = bnxt_udp_tunnel_unset_port, 13055 13055 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 13056 13056 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 13057 13057 .tables = {
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 3831 3831 } 3832 3832 } 3833 3833 3834 - if (req & BNXT_FW_RESET_AP) { 3834 + if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { 3835 3835 /* This feature is not supported in older firmware versions */ 3836 3836 if (bp->hwrm_spec_code >= 0x10803) { 3837 3837 if (!bnxt_firmware_reset_ap(dev)) {
+1
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
··· 952 952 bnxt_ptp_timecounter_init(bp, true); 953 953 bnxt_ptp_adjfine_rtc(bp, 0); 954 954 } 955 + bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); 955 956 956 957 ptp->ptp_info = bnxt_ptp_caps; 957 958 if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
+8 -14
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1272 1272 } 1273 1273 } 1274 1274 1275 - static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) 1275 + void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, 1276 + bool tx_lpi_enabled) 1276 1277 { 1277 1278 struct bcmgenet_priv *priv = netdev_priv(dev); 1278 1279 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; ··· 1293 1292 1294 1293 /* Enable EEE and switch to a 27Mhz clock automatically */ 1295 1294 reg = bcmgenet_readl(priv->base + off); 1296 - if (enable) 1295 + if (tx_lpi_enabled) 1297 1296 reg |= TBUF_EEE_EN | TBUF_PM_EN; 1298 1297 else 1299 1298 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); ··· 1314 1313 1315 1314 priv->eee.eee_enabled = enable; 1316 1315 priv->eee.eee_active = enable; 1316 + priv->eee.tx_lpi_enabled = tx_lpi_enabled; 1317 1317 } 1318 1318 1319 1319 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) ··· 1330 1328 1331 1329 e->eee_enabled = p->eee_enabled; 1332 1330 e->eee_active = p->eee_active; 1331 + e->tx_lpi_enabled = p->tx_lpi_enabled; 1333 1332 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); 1334 1333 1335 1334 return phy_ethtool_get_eee(dev->phydev, e); ··· 1340 1337 { 1341 1338 struct bcmgenet_priv *priv = netdev_priv(dev); 1342 1339 struct ethtool_eee *p = &priv->eee; 1343 - int ret = 0; 1344 1340 1345 1341 if (GENET_IS_V1(priv)) 1346 1342 return -EOPNOTSUPP; ··· 1350 1348 p->eee_enabled = e->eee_enabled; 1351 1349 1352 1350 if (!p->eee_enabled) { 1353 - bcmgenet_eee_enable_set(dev, false); 1351 + bcmgenet_eee_enable_set(dev, false, false); 1354 1352 } else { 1355 - ret = phy_init_eee(dev->phydev, false); 1356 - if (ret) { 1357 - netif_err(priv, hw, dev, "EEE initialization failed\n"); 1358 - return ret; 1359 - } 1360 - 1353 + p->eee_active = phy_init_eee(dev->phydev, false) >= 0; 1361 1354 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); 1362 - bcmgenet_eee_enable_set(dev, true); 1355 + bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); 1363 1356 } 1364 1357 1365 1358 return phy_ethtool_set_eee(dev->phydev, e); ··· 4275 4278 4276 4279 if (!device_may_wakeup(d)) 4277 4280 phy_resume(dev->phydev); 4278 - 4279 - if (priv->eee.eee_enabled) 4280 - bcmgenet_eee_enable_set(dev, true); 4281 4281 4282 4282 bcmgenet_netif_start(dev); 4283 4283
+3
drivers/net/ethernet/broadcom/genet/bcmgenet.h
··· 703 703 void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, 704 704 enum bcmgenet_power_mode mode); 705 705 706 + void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, 707 + bool tx_lpi_enabled); 708 + 706 709 #endif /* __BCMGENET_H__ */
+5
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 87 87 reg |= CMD_TX_EN | CMD_RX_EN; 88 88 } 89 89 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 90 + 91 + priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; 92 + bcmgenet_eee_enable_set(dev, 93 + priv->eee.eee_enabled && priv->eee.eee_active, 94 + priv->eee.tx_lpi_enabled); 90 95 } 91 96 92 97 /* setup netdev link state when PHY link status change and
+15 -1
drivers/net/ethernet/freescale/enetc/enetc.c
··· 1229 1229 if (!skb) 1230 1230 break; 1231 1231 1232 - rx_byte_cnt += skb->len; 1232 + /* When set, the outer VLAN header is extracted and reported 1233 + * in the receive buffer descriptor. So rx_byte_cnt should 1234 + * add the length of the extracted VLAN header. 1235 + */ 1236 + if (bd_status & ENETC_RXBD_FLAG_VLAN) 1237 + rx_byte_cnt += VLAN_HLEN; 1238 + rx_byte_cnt += skb->len + ETH_HLEN; 1233 1239 rx_frm_cnt++; 1234 1240 1235 1241 napi_gro_receive(napi, skb); ··· 1569 1563 1570 1564 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, 1571 1565 &cleaned_cnt, &xdp_buff); 1566 + 1567 + /* When set, the outer VLAN header is extracted and reported 1568 + * in the receive buffer descriptor. So rx_byte_cnt should 1569 + * add the length of the extracted VLAN header. 1570 + */ 1571 + if (bd_status & ENETC_RXBD_FLAG_VLAN) 1572 + rx_byte_cnt += VLAN_HLEN; 1573 + rx_byte_cnt += xdp_get_buff_len(&xdp_buff); 1572 1574 1573 1575 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); 1574 1576
+1 -1
drivers/net/ethernet/intel/ice/ice_common.c
··· 5160 5160 */ 5161 5161 int 5162 5162 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5163 - u16 bus_addr, __le16 addr, u8 params, u8 *data, 5163 + u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5164 5164 struct ice_sq_cd *cd) 5165 5165 { 5166 5166 struct ice_aq_desc desc = { 0 };
+1 -1
drivers/net/ethernet/intel/ice/ice_common.h
··· 229 229 struct ice_sq_cd *cd); 230 230 int 231 231 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 232 - u16 bus_addr, __le16 addr, u8 params, u8 *data, 232 + u16 bus_addr, __le16 addr, u8 params, const u8 *data, 233 233 struct ice_sq_cd *cd); 234 234 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); 235 235 #endif /* _ICE_COMMON_H_ */
+4 -60
drivers/net/ethernet/intel/ice/ice_gnss.c
··· 16 16 * * number of bytes written - success 17 17 * * negative - error code 18 18 */ 19 - static unsigned int 20 - ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) 19 + static int 20 + ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size) 21 21 { 22 22 struct ice_aqc_link_topo_addr link_topo; 23 23 struct ice_hw *hw = &pf->hw; ··· 72 72 dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n", 73 73 offset, size, err); 74 74 75 - return offset; 76 - } 77 - 78 - /** 79 - * ice_gnss_write_pending - Write all pending data to internal GNSS 80 - * @work: GNSS write work structure 81 - */ 82 - static void ice_gnss_write_pending(struct kthread_work *work) 83 - { 84 - struct gnss_serial *gnss = container_of(work, struct gnss_serial, 85 - write_work); 86 - struct ice_pf *pf = gnss->back; 87 - 88 - if (!pf) 89 - return; 90 - 91 - if (!test_bit(ICE_FLAG_GNSS, pf->flags)) 92 - return; 93 - 94 - if (!list_empty(&gnss->queue)) { 95 - struct gnss_write_buf *write_buf = NULL; 96 - unsigned int bytes; 97 - 98 - write_buf = list_first_entry(&gnss->queue, 99 - struct gnss_write_buf, queue); 100 - 101 - bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size); 102 - dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes); 103 - 104 - list_del(&write_buf->queue); 105 - kfree(write_buf->buf); 106 - kfree(write_buf); 107 - } 75 + return err; 108 76 } 109 77 110 78 /** ··· 188 220 pf->gnss_serial = gnss; 189 221 190 222 kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); 191 - INIT_LIST_HEAD(&gnss->queue); 192 - kthread_init_work(&gnss->write_work, ice_gnss_write_pending); 193 223 kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); 194 224 if (IS_ERR(kworker)) { 195 225 kfree(gnss); ··· 247 281 if (!gnss) 248 282 return; 249 283 250 - kthread_cancel_work_sync(&gnss->write_work); 251 284 kthread_cancel_delayed_work_sync(&gnss->read_work); 252 285 } 253 286 ··· 265 300 size_t count) 266 301 { 267 302 struct ice_pf *pf = gnss_get_drvdata(gdev); 268 - struct gnss_write_buf *write_buf; 269 303 struct gnss_serial *gnss; 270 - unsigned char *cmd_buf; 271 - int err = count; 272 304 273 305 /* We cannot write a single byte using our I2C implementation. */ 274 306 if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF) ··· 281 319 if (!gnss) 282 320 return -ENODEV; 283 321 284 - cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL); 285 - if (!cmd_buf) 286 - return -ENOMEM; 287 - 288 - memcpy(cmd_buf, buf, count); 289 - write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL); 290 - if (!write_buf) { 291 - kfree(cmd_buf); 292 - return -ENOMEM; 293 - } 294 - 295 - write_buf->buf = cmd_buf; 296 - write_buf->size = count; 297 - INIT_LIST_HEAD(&write_buf->queue); 298 - list_add_tail(&write_buf->queue, &gnss->queue); 299 - kthread_queue_work(gnss->kworker, &gnss->write_work); 300 - 301 - return err; 322 + return ice_gnss_do_write(pf, buf, count); 302 323 } 303 324 304 325 static const struct gnss_operations ice_gnss_ops = { ··· 377 432 if (pf->gnss_serial) { 378 433 struct gnss_serial *gnss = pf->gnss_serial; 379 434 380 - kthread_cancel_work_sync(&gnss->write_work); 381 435 kthread_cancel_delayed_work_sync(&gnss->read_work); 382 436 kthread_destroy_worker(gnss->kworker); 383 437 gnss->kworker = NULL;
-10
drivers/net/ethernet/intel/ice/ice_gnss.h
··· 22 22 */ 23 23 #define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1) 24 24 25 - struct gnss_write_buf { 26 - struct list_head queue; 27 - unsigned int size; 28 - unsigned char *buf; 29 - }; 30 - 31 25 /** 32 26 * struct gnss_serial - data used to initialize GNSS TTY port 33 27 * @back: back pointer to PF 34 28 * @kworker: kwork thread for handling periodic work 35 29 * @read_work: read_work function for handling GNSS reads 36 - * @write_work: write_work function for handling GNSS writes 37 - * @queue: write buffers queue 38 30 */ 39 31 struct gnss_serial { 40 32 struct ice_pf *back; 41 33 struct kthread_worker *kworker; 42 34 struct kthread_delayed_work read_work; 43 - struct kthread_work write_work; 44 - struct list_head queue; 45 35 }; 46 36 47 37 #if IS_ENABLED(CONFIG_GNSS)
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 1256 1256 if (!__netif_txq_completed_wake(txq, total_packets, total_bytes, 1257 1257 ixgbe_desc_unused(tx_ring), 1258 1258 TX_WAKE_THRESHOLD, 1259 - netif_carrier_ok(tx_ring->netdev) && 1259 + !netif_carrier_ok(tx_ring->netdev) || 1260 1260 test_bit(__IXGBE_DOWN, &adapter->state))) 1261 1261 ++tx_ring->tx_stats.restart_queue; 1262 1262
+1 -1
drivers/net/ethernet/qlogic/qed/qed_l2.c
··· 1903 1903 { 1904 1904 u32 i; 1905 1905 1906 - if (!cdev) { 1906 + if (!cdev || cdev->recov_in_prog) { 1907 1907 memset(stats, 0, sizeof(*stats)); 1908 1908 return; 1909 1909 }
+4
drivers/net/ethernet/qlogic/qede/qede.h
··· 269 269 #define QEDE_ERR_WARN 3 270 270 271 271 struct qede_dump_info dump_info; 272 + struct delayed_work periodic_task; 273 + unsigned long stats_coal_ticks; 274 + u32 stats_coal_usecs; 275 + spinlock_t stats_lock; /* lock for vport stats access */ 272 276 }; 273 277 274 278 enum QEDE_STATE {
+22 -2
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
··· 429 429 } 430 430 } 431 431 432 + spin_lock(&edev->stats_lock); 433 + 432 434 for (i = 0; i < QEDE_NUM_STATS; i++) { 433 435 if (qede_is_irrelevant_stat(edev, i)) 434 436 continue; ··· 439 437 440 438 buf++; 441 439 } 440 + 441 + spin_unlock(&edev->stats_lock); 442 442 443 443 __qede_unlock(edev); 444 444 } ··· 833 829 834 830 coal->rx_coalesce_usecs = rx_coal; 835 831 coal->tx_coalesce_usecs = tx_coal; 832 + coal->stats_block_coalesce_usecs = edev->stats_coal_usecs; 836 833 837 834 return rc; 838 835 } ··· 846 841 struct qede_fastpath *fp; 847 842 int i, rc = 0; 848 843 u16 rxc, txc; 844 + 845 + if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) { 846 + edev->stats_coal_usecs = coal->stats_block_coalesce_usecs; 847 + if (edev->stats_coal_usecs) { 848 + edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs); 849 + schedule_delayed_work(&edev->periodic_task, 0); 850 + 851 + DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n", 852 + edev->stats_coal_ticks); 853 + } else { 854 + cancel_delayed_work_sync(&edev->periodic_task); 855 + } 856 + } 849 857 850 858 if (!netif_running(dev)) { 851 859 DP_INFO(edev, "Interface is down\n"); ··· 2270 2252 } 2271 2253 2272 2254 static const struct ethtool_ops qede_ethtool_ops = { 2273 - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, 2255 + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 2256 + ETHTOOL_COALESCE_STATS_BLOCK_USECS, 2274 2257 .get_link_ksettings = qede_get_link_ksettings, 2275 2258 .set_link_ksettings = qede_set_link_ksettings, 2276 2259 .get_drvinfo = qede_get_drvinfo, ··· 2322 2303 }; 2323 2304 2324 2305 static const struct ethtool_ops qede_vf_ethtool_ops = { 2325 - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, 2306 + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 2307 + ETHTOOL_COALESCE_STATS_BLOCK_USECS, 2326 2308 .get_link_ksettings = qede_get_link_ksettings, 2327 2309 .get_drvinfo = qede_get_drvinfo, 2328 2310 .get_msglevel = qede_get_msglevel,
+33 -1
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 307 307 308 308 edev->ops->get_vport_stats(edev->cdev, &stats); 309 309 310 + spin_lock(&edev->stats_lock); 311 + 310 312 p_common->no_buff_discards = stats.common.no_buff_discards; 311 313 p_common->packet_too_big_discard = stats.common.packet_too_big_discard; 312 314 p_common->ttl0_discard = stats.common.ttl0_discard; ··· 406 404 p_ah->tx_1519_to_max_byte_packets = 407 405 stats.ah.tx_1519_to_max_byte_packets; 408 406 } 407 + 408 + spin_unlock(&edev->stats_lock); 409 409 } 410 410 411 411 static void qede_get_stats64(struct net_device *dev, ··· 416 412 struct qede_dev *edev = netdev_priv(dev); 417 413 struct qede_stats_common *p_common; 418 414 419 - qede_fill_by_demand_stats(edev); 420 415 p_common = &edev->stats.common; 416 + 417 + spin_lock(&edev->stats_lock); 421 418 422 419 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 423 420 p_common->rx_bcast_pkts; ··· 439 434 stats->collisions = edev->stats.bb.tx_total_collisions; 440 435 stats->rx_crc_errors = p_common->rx_crc_errors; 441 436 stats->rx_frame_errors = p_common->rx_align_errors; 437 + 438 + spin_unlock(&edev->stats_lock); 442 439 } 443 440 444 441 #ifdef CONFIG_QED_SRIOV ··· 1070 1063 rtnl_unlock(); 1071 1064 } 1072 1065 1066 + static void qede_periodic_task(struct work_struct *work) 1067 + { 1068 + struct qede_dev *edev = container_of(work, struct qede_dev, 1069 + periodic_task.work); 1070 + 1071 + qede_fill_by_demand_stats(edev); 1072 + schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks); 1073 + } 1074 + 1075 + static void qede_init_periodic_task(struct qede_dev *edev) 1076 + { 1077 + INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task); 1078 + spin_lock_init(&edev->stats_lock); 1079 + edev->stats_coal_usecs = USEC_PER_SEC; 1080 + edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC); 1081 + } 1082 + 1073 1083 static void qede_sp_task(struct work_struct *work) 1074 1084 { 1075 1085 struct qede_dev *edev = container_of(work, struct qede_dev, ··· 1106 1082 */ 1107 1083 1108 1084 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { 1085 + cancel_delayed_work_sync(&edev->periodic_task); 1109 1086 #ifdef CONFIG_QED_SRIOV 1110 1087 /* SRIOV must be disabled outside the lock to avoid a deadlock. 1111 1088 * The recovery of the active VFs is currently not supported. ··· 1297 1272 */ 1298 1273 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 1299 1274 mutex_init(&edev->qede_lock); 1275 + qede_init_periodic_task(edev); 1300 1276 1301 1277 rc = register_netdev(edev->ndev); 1302 1278 if (rc) { ··· 1322 1296 edev->rx_copybreak = QEDE_RX_HDR_SIZE; 1323 1297 1324 1298 qede_log_probe(edev); 1299 + 1300 + /* retain user config (for example - after recovery) */ 1301 + if (edev->stats_coal_usecs) 1302 + schedule_delayed_work(&edev->periodic_task, 0); 1303 + 1325 1304 return 0; 1326 1305 1327 1306 err4: ··· 1395 1364 unregister_netdev(ndev); 1396 1365 1397 1366 cancel_delayed_work_sync(&edev->sp_task); 1367 + cancel_delayed_work_sync(&edev->periodic_task); 1398 1368 1399 1369 edev->ops->common->set_power_state(cdev, PCI_D0); 1400 1370
+2 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
··· 644 644 plat_dat->fix_mac_speed = ethqos_fix_mac_speed; 645 645 plat_dat->dump_debug_regs = rgmii_dump; 646 646 plat_dat->has_gmac4 = 1; 647 - plat_dat->dwmac4_addrs = &data->dwmac4_addrs; 647 + if (ethqos->has_emac3) 648 + plat_dat->dwmac4_addrs = &data->dwmac4_addrs; 648 649 plat_dat->pmt = 1; 649 650 plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); 650 651 if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
+10 -5
drivers/net/phy/phylink.c
··· 2259 2259 2260 2260 ASSERT_RTNL(); 2261 2261 2262 - /* Mask out unsupported advertisements */ 2263 - linkmode_and(config.advertising, kset->link_modes.advertising, 2264 - pl->supported); 2265 - 2266 2262 if (pl->phydev) { 2263 + struct ethtool_link_ksettings phy_kset = *kset; 2264 + 2265 + linkmode_and(phy_kset.link_modes.advertising, 2266 + phy_kset.link_modes.advertising, 2267 + pl->supported); 2268 + 2267 2269 /* We can rely on phylib for this update; we also do not need 2268 2270 * to update the pl->link_config settings: 2269 2271 * - the configuration returned via ksettings_get() will come ··· 2284 2282 * the presence of a PHY, this should not be changed as that 2285 2283 * should be determined from the media side advertisement. 2286 2284 */ 2287 - return phy_ethtool_ksettings_set(pl->phydev, kset); 2285 + return phy_ethtool_ksettings_set(pl->phydev, &phy_kset); 2288 2286 } 2289 2287 2290 2288 config = pl->link_config; 2289 + /* Mask out unsupported advertisements */ 2290 + linkmode_and(config.advertising, kset->link_modes.advertising, 2291 + pl->supported); 2291 2292 2292 2293 /* FIXME: should we reject autoneg if phy/mac does not support it? */ 2293 2294 switch (kset->base.autoneg) {
+8 -8
drivers/net/virtio_net.c
··· 205 205 __virtio16 vid; 206 206 __virtio64 offloads; 207 207 struct virtio_net_ctrl_rss rss; 208 + struct virtio_net_ctrl_coal_tx coal_tx; 209 + struct virtio_net_ctrl_coal_rx coal_rx; 208 210 }; 209 211 210 212 struct virtnet_info { ··· 3045 3043 struct ethtool_coalesce *ec) 3046 3044 { 3047 3045 struct scatterlist sgs_tx, sgs_rx; 3048 - struct virtio_net_ctrl_coal_tx coal_tx; 3049 - struct virtio_net_ctrl_coal_rx coal_rx; 3050 3046 3051 - coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); 3052 - coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); 3053 - sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx)); 3047 + vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); 3048 + vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); 3049 + sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); 3054 3050 3055 3051 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3056 3052 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, ··· 3059 3059 vi->tx_usecs = ec->tx_coalesce_usecs; 3060 3060 vi->tx_max_packets = ec->tx_max_coalesced_frames; 3061 3061 3062 - coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 3063 - coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 3064 - sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx)); 3062 + vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 3063 + vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 3064 + sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); 3065 3065 3066 3066 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3067 3067 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+2 -6
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 2732 2732 if (wowlan_info_ver < 2) { 2733 2733 struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; 2734 2734 2735 - notif = kmemdup(notif_v1, 2736 - offsetofend(struct iwl_wowlan_info_notif, 2737 - received_beacons), 2738 - GFP_ATOMIC); 2739 - 2735 + notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); 2740 2736 if (!notif) 2741 2737 return false; 2742 2738 2743 2739 notif->tid_tear_down = notif_v1->tid_tear_down; 2744 2740 notif->station_id = notif_v1->station_id; 2745 - 2741 + memset_after(notif, 0, station_id); 2746 2742 } else { 2747 2743 notif = (void *)pkt->data; 2748 2744 }
+3
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
··· 914 914 915 915 msta = list_first_entry(&sta_poll_list, struct mt7615_sta, 916 916 poll_list); 917 + 918 + spin_lock_bh(&dev->sta_poll_lock); 917 919 list_del_init(&msta->poll_list); 920 + spin_unlock_bh(&dev->sta_poll_lock); 918 921 919 922 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; 920 923
+12 -7
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
··· 1004 1004 { 1005 1005 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1006 1006 struct ieee80211_vif *vif = info->control.vif; 1007 - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 1008 1007 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 1009 1008 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 1010 1009 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1010 + struct mt7996_vif *mvif; 1011 1011 u16 tx_count = 15; 1012 1012 u32 val; 1013 1013 bool beacon = !!(changed & (BSS_CHANGED_BEACON | ··· 1015 1015 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 1016 1016 BSS_CHANGED_FILS_DISCOVERY)); 1017 1017 1018 - if (vif) { 1018 + mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 1019 + if (mvif) { 1019 1020 omac_idx = mvif->mt76.omac_idx; 1020 1021 wmm_idx = mvif->mt76.wmm_idx; 1021 1022 band_idx = mvif->mt76.band_idx; ··· 1082 1081 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1083 1082 bool mcast = ieee80211_is_data(hdr->frame_control) && 1084 1083 is_multicast_ether_addr(hdr->addr1); 1085 - u8 idx = mvif->basic_rates_idx; 1084 + u8 idx = MT7996_BASIC_RATES_TBL; 1086 1085 1087 - if (mcast && mvif->mcast_rates_idx) 1088 - idx = mvif->mcast_rates_idx; 1089 - else if (beacon && mvif->beacon_rates_idx) 1090 - idx = mvif->beacon_rates_idx; 1086 + if (mvif) { 1087 + if (mcast && mvif->mcast_rates_idx) 1088 + idx = mvif->mcast_rates_idx; 1089 + else if (beacon && mvif->beacon_rates_idx) 1090 + idx = mvif->beacon_rates_idx; 1091 + else 1092 + idx = mvif->basic_rates_idx; 1093 + } 1091 1094 1092 1095 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx)); 1093 1096 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+5 -9
drivers/net/wireless/realtek/rtw88/mac80211.c
··· 88 88 } 89 89 } 90 90 91 - if (changed & IEEE80211_CONF_CHANGE_PS) { 92 - if (hw->conf.flags & IEEE80211_CONF_PS) { 93 - rtwdev->ps_enabled = true; 94 - } else { 95 - rtwdev->ps_enabled = false; 96 - rtw_leave_lps(rtwdev); 97 - } 98 - } 99 - 100 91 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) 101 92 rtw_set_channel(rtwdev); 102 93 ··· 206 215 config |= PORT_SET_BCN_CTRL; 207 216 rtw_vif_port_config(rtwdev, rtwvif, config); 208 217 rtw_core_port_switch(rtwdev, vif); 218 + rtw_recalc_lps(rtwdev, vif); 209 219 210 220 mutex_unlock(&rtwdev->mutex); 211 221 ··· 238 246 config |= PORT_SET_BCN_CTRL; 239 247 rtw_vif_port_config(rtwdev, rtwvif, config); 240 248 clear_bit(rtwvif->port, rtwdev->hw_port); 249 + rtw_recalc_lps(rtwdev, NULL); 241 250 242 251 mutex_unlock(&rtwdev->mutex); 243 252 } ··· 432 439 433 440 if (changed & BSS_CHANGED_ERP_SLOT) 434 441 rtw_conf_tx(rtwdev, rtwvif); 442 + 443 + if (changed & BSS_CHANGED_PS) 444 + rtw_recalc_lps(rtwdev, NULL); 435 445 436 446 rtw_vif_port_config(rtwdev, rtwvif, config); 437 447
+2 -2
drivers/net/wireless/realtek/rtw88/main.c
··· 271 271 * more than two stations associated to the AP, then we can not enter 272 272 * lps, because fw does not handle the overlapped beacon interval 273 273 * 274 - * mac80211 should iterate vifs and determine if driver can enter 275 - * ps by passing IEEE80211_CONF_PS to us, all we need to do is to 274 + * rtw_recalc_lps() iterate vifs and determine if driver can enter 275 + * ps by vif->type and vif->cfg.ps, all we need to do here is to 276 276 * get that vif and check if device is having traffic more than the 277 277 * threshold. 278 278 */
+43
drivers/net/wireless/realtek/rtw88/ps.c
··· 299 299 300 300 __rtw_leave_lps_deep(rtwdev); 301 301 } 302 + 303 + struct rtw_vif_recalc_lps_iter_data { 304 + struct rtw_dev *rtwdev; 305 + struct ieee80211_vif *found_vif; 306 + int count; 307 + }; 308 + 309 + static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data, 310 + struct ieee80211_vif *vif) 311 + { 312 + if (data->count < 0) 313 + return; 314 + 315 + if (vif->type != NL80211_IFTYPE_STATION) { 316 + data->count = -1; 317 + return; 318 + } 319 + 320 + data->count++; 321 + data->found_vif = vif; 322 + } 323 + 324 + static void rtw_vif_recalc_lps_iter(void *data, u8 *mac, 325 + struct ieee80211_vif *vif) 326 + { 327 + __rtw_vif_recalc_lps(data, vif); 328 + } 329 + 330 + void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif) 331 + { 332 + struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev }; 333 + 334 + if (new_vif) 335 + __rtw_vif_recalc_lps(&data, new_vif); 336 + rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data); 337 + 338 + if (data.count == 1 && data.found_vif->cfg.ps) { 339 + rtwdev->ps_enabled = true; 340 + } else { 341 + rtwdev->ps_enabled = false; 342 + rtw_leave_lps(rtwdev); 343 + } 344 + }
+2
drivers/net/wireless/realtek/rtw88/ps.h
··· 23 23 void rtw_leave_lps(struct rtw_dev *rtwdev); 24 24 void rtw_leave_lps_deep(struct rtw_dev *rtwdev); 25 25 enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev); 26 + void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif); 27 + 26 28 #endif
-3
drivers/net/wireless/realtek/rtw89/core.c
··· 2603 2603 rtwvif->tdls_peer) 2604 2604 return; 2605 2605 2606 - if (rtwdev->total_sta_assoc > 1) 2607 - return; 2608 - 2609 2606 if (rtwvif->offchan) 2610 2607 return; 2611 2608
+6 -9
drivers/net/wireless/realtek/rtw89/mac80211.c
··· 89 89 !(hw->conf.flags & IEEE80211_CONF_IDLE)) 90 90 rtw89_leave_ips(rtwdev); 91 91 92 - if (changed & IEEE80211_CONF_CHANGE_PS) { 93 - if (hw->conf.flags & IEEE80211_CONF_PS) { 94 - rtwdev->lps_enabled = true; 95 - } else { 96 - rtw89_leave_lps(rtwdev); 97 - rtwdev->lps_enabled = false; 98 - } 99 - } 100 - 101 92 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 102 93 rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, 103 94 &hw->conf.chandef); ··· 159 168 rtw89_core_txq_init(rtwdev, vif->txq); 160 169 161 170 rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START); 171 + 172 + rtw89_recalc_lps(rtwdev); 162 173 out: 163 174 mutex_unlock(&rtwdev->mutex); 164 175 ··· 185 192 rtw89_mac_remove_vif(rtwdev, rtwvif); 186 193 rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); 187 194 list_del_init(&rtwvif->list); 195 + rtw89_recalc_lps(rtwdev); 188 196 rtw89_enter_ips_by_hwflags(rtwdev); 189 197 190 198 mutex_unlock(&rtwdev->mutex); ··· 444 450 445 451 if (changed & BSS_CHANGED_CQM) 446 452 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); 453 + 454 + if (changed & BSS_CHANGED_PS) 455 + rtw89_recalc_lps(rtwdev); 447 456 448 457 mutex_unlock(&rtwdev->mutex); 449 458 }
+26
drivers/net/wireless/realtek/rtw89/ps.c
··· 252 252 rtw89_p2p_disable_all_noa(rtwdev, vif); 253 253 rtw89_p2p_update_noa(rtwdev, vif); 254 254 } 255 + 256 + void rtw89_recalc_lps(struct rtw89_dev *rtwdev) 257 + { 258 + struct ieee80211_vif *vif, *found_vif = NULL; 259 + struct rtw89_vif *rtwvif; 260 + int count = 0; 261 + 262 + rtw89_for_each_rtwvif(rtwdev, rtwvif) { 263 + vif = rtwvif_to_vif(rtwvif); 264 + 265 + if (vif->type != NL80211_IFTYPE_STATION) { 266 + count = 0; 267 + break; 268 + } 269 + 270 + count++; 271 + found_vif = vif; 272 + } 273 + 274 + if (count == 1 && found_vif->cfg.ps) { 275 + rtwdev->lps_enabled = true; 276 + } else { 277 + rtw89_leave_lps(rtwdev); 278 + rtwdev->lps_enabled = false; 279 + } 280 + }
+1
drivers/net/wireless/realtek/rtw89/ps.h
··· 15 15 void rtw89_leave_ips(struct rtw89_dev *rtwdev); 16 16 void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl); 17 17 void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); 18 + void rtw89_recalc_lps(struct rtw89_dev *rtwdev); 18 19 19 20 static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev) 20 21 {
+1 -1
drivers/nvme/host/constants.c
··· 21 21 [nvme_cmd_resv_release] = "Reservation Release", 22 22 [nvme_cmd_zone_mgmt_send] = "Zone Management Send", 23 23 [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", 24 - [nvme_cmd_zone_append] = "Zone Management Append", 24 + [nvme_cmd_zone_append] = "Zone Append", 25 25 }; 26 26 27 27 static const char * const nvme_admin_ops[] = {
+48 -4
drivers/nvme/host/core.c
··· 397 397 trace_nvme_complete_rq(req); 398 398 nvme_cleanup_cmd(req); 399 399 400 - if (ctrl->kas) 400 + /* 401 + * Completions of long-running commands should not be able to 402 + * defer sending of periodic keep alives, since the controller 403 + * may have completed processing such commands a long time ago 404 + * (arbitrarily close to command submission time). 405 + * req->deadline - req->timeout is the command submission time 406 + * in jiffies. 407 + */ 408 + if (ctrl->kas && 409 + req->deadline - req->timeout >= ctrl->ka_last_check_time) 401 410 ctrl->comp_seen = true; 402 411 403 412 switch (nvme_decide_disposition(req)) { ··· 1124 1115 } 1125 1116 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); 1126 1117 1127 - void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, 1118 + void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1128 1119 struct nvme_command *cmd, int status) 1129 1120 { 1130 1121 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { ··· 1141 1132 nvme_queue_scan(ctrl); 1142 1133 flush_work(&ctrl->scan_work); 1143 1134 } 1135 + if (ns) 1136 + return; 1144 1137 1145 1138 switch (cmd->common.opcode) { 1146 1139 case nvme_admin_set_features: ··· 1172 1161 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1173 1162 * accounting for transport roundtrip times [..]. 1174 1163 */ 1164 + static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) 1165 + { 1166 + unsigned long delay = ctrl->kato * HZ / 2; 1167 + 1168 + /* 1169 + * When using Traffic Based Keep Alive, we need to run 1170 + * nvme_keep_alive_work at twice the normal frequency, as one 1171 + * command completion can postpone sending a keep alive command 1172 + * by up to twice the delay between runs. 1173 + */ 1174 + if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) 1175 + delay /= 2; 1176 + return delay; 1177 + } 1178 + 1175 1179 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1176 1180 { 1177 - queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); 1181 + queue_delayed_work(nvme_wq, &ctrl->ka_work, 1182 + nvme_keep_alive_work_period(ctrl)); 1178 1183 } 1179 1184 1180 1185 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, ··· 1199 1172 struct nvme_ctrl *ctrl = rq->end_io_data; 1200 1173 unsigned long flags; 1201 1174 bool startka = false; 1175 + unsigned long rtt = jiffies - (rq->deadline - rq->timeout); 1176 + unsigned long delay = nvme_keep_alive_work_period(ctrl); 1177 + 1178 + /* 1179 + * Subtract off the keepalive RTT so nvme_keep_alive_work runs 1180 + * at the desired frequency. 1181 + */ 1182 + if (rtt <= delay) { 1183 + delay -= rtt; 1184 + } else { 1185 + dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", 1186 + jiffies_to_msecs(rtt)); 1187 + delay = 0; 1188 + } 1202 1189 1203 1190 blk_mq_free_request(rq); 1204 1191 ··· 1223 1182 return RQ_END_IO_NONE; 1224 1183 } 1225 1184 1185 + ctrl->ka_last_check_time = jiffies; 1226 1186 ctrl->comp_seen = false; 1227 1187 spin_lock_irqsave(&ctrl->lock, flags); 1228 1188 if (ctrl->state == NVME_CTRL_LIVE || ··· 1231 1189 startka = true; 1232 1190 spin_unlock_irqrestore(&ctrl->lock, flags); 1233 1191 if (startka) 1234 - nvme_queue_keep_alive_work(ctrl); 1192 + queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1235 1193 return RQ_END_IO_NONE; 1236 1194 } 1237 1195 ··· 1241 1199 struct nvme_ctrl, ka_work); 1242 1200 bool comp_seen = ctrl->comp_seen; 1243 1201 struct request *rq; 1202 + 1203 + ctrl->ka_last_check_time = jiffies; 1244 1204 1245 1205 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1246 1206 dev_dbg(ctrl->device,
+1 -1
drivers/nvme/host/ioctl.c
··· 254 254 blk_mq_free_request(req); 255 255 256 256 if (effects) 257 - nvme_passthru_end(ctrl, effects, cmd, ret); 257 + nvme_passthru_end(ctrl, ns, effects, cmd, ret); 258 258 259 259 return ret; 260 260 }
+2 -1
drivers/nvme/host/nvme.h
··· 328 328 struct delayed_work ka_work; 329 329 struct delayed_work failfast_work; 330 330 struct nvme_command ka_cmd; 331 + unsigned long ka_last_check_time; 331 332 struct work_struct fw_act_work; 332 333 unsigned long events; 333 334 ··· 1078 1077 u8 opcode); 1079 1078 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); 1080 1079 int nvme_execute_rq(struct request *rq, bool at_head); 1081 - void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, 1080 + void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1082 1081 struct nvme_command *cmd, int status); 1083 1082 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); 1084 1083 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+1 -1
drivers/nvme/target/passthru.c
··· 243 243 blk_mq_free_request(rq); 244 244 245 245 if (effects) 246 - nvme_passthru_end(ctrl, effects, req->cmd, status); 246 + nvme_passthru_end(ctrl, ns, effects, req->cmd, status); 247 247 } 248 248 249 249 static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+1 -1
drivers/platform/surface/aggregator/controller.c
··· 825 825 826 826 cplt->dev = dev; 827 827 828 - cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME); 828 + cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0); 829 829 if (!cplt->wq) 830 830 return -ENOMEM; 831 831
+10
drivers/platform/surface/surface_aggregator_tabletsw.c
··· 210 210 SSAM_KIP_COVER_STATE_LAPTOP = 0x03, 211 211 SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04, 212 212 SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05, 213 + SSAM_KIP_COVER_STATE_BOOK = 0x06, 213 214 }; 214 215 215 216 static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, ··· 232 231 case SSAM_KIP_COVER_STATE_FOLDED_BACK: 233 232 return "folded-back"; 234 233 234 + case SSAM_KIP_COVER_STATE_BOOK: 235 + return "book"; 236 + 235 237 default: 236 238 dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state->state); 237 239 return "<unknown>"; ··· 248 244 case SSAM_KIP_COVER_STATE_DISCONNECTED: 249 245 case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: 250 246 case SSAM_KIP_COVER_STATE_FOLDED_BACK: 247 + case SSAM_KIP_COVER_STATE_BOOK: 251 248 return true; 252 249 253 250 case SSAM_KIP_COVER_STATE_CLOSED: ··· 340 335 SSAM_POS_COVER_LAPTOP = 0x03, 341 336 SSAM_POS_COVER_FOLDED_CANVAS = 0x04, 342 337 SSAM_POS_COVER_FOLDED_BACK = 0x05, 338 + SSAM_POS_COVER_BOOK = 0x06, 343 339 }; 344 340 345 341 enum ssam_pos_state_sls { ··· 372 366 373 367 case SSAM_POS_COVER_FOLDED_BACK: 374 368 return "folded-back"; 369 + 370 + case SSAM_POS_COVER_BOOK: 371 + return "book"; 375 372 376 373 default: 377 374 dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state); ··· 425 416 case SSAM_POS_COVER_DISCONNECTED: 426 417 case SSAM_POS_COVER_FOLDED_CANVAS: 427 418 case SSAM_POS_COVER_FOLDED_BACK: 419 + case SSAM_POS_COVER_BOOK: 428 420 return true; 429 421 430 422 case SSAM_POS_COVER_CLOSED:
+8 -5
drivers/platform/x86/intel/int3472/clk_and_regulator.c
··· 101 101 102 102 int3472->clock.ena_gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0], 103 103 "int3472,clk-enable"); 104 - if (IS_ERR(int3472->clock.ena_gpio)) 105 - return dev_err_probe(int3472->dev, PTR_ERR(int3472->clock.ena_gpio), 106 - "getting clk-enable GPIO\n"); 104 + if (IS_ERR(int3472->clock.ena_gpio)) { 105 + ret = PTR_ERR(int3472->clock.ena_gpio); 106 + int3472->clock.ena_gpio = NULL; 107 + return dev_err_probe(int3472->dev, ret, "getting clk-enable GPIO\n"); 108 + } 107 109 108 110 if (polarity == GPIO_ACTIVE_LOW) 109 111 gpiod_toggle_active_low(int3472->clock.ena_gpio); ··· 201 199 int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0], 202 200 "int3472,regulator"); 203 201 if (IS_ERR(int3472->regulator.gpio)) { 204 - dev_err(int3472->dev, "Failed to get regulator GPIO line\n"); 205 - return PTR_ERR(int3472->regulator.gpio); 202 + ret = PTR_ERR(int3472->regulator.gpio); 203 + int3472->regulator.gpio = NULL; 204 + return dev_err_probe(int3472->dev, ret, "getting regulator GPIO\n"); 206 205 } 207 206 208 207 /* Ensure the pin is in output mode and non-active state */
+1
drivers/scsi/qla2xxx/qla_def.h
··· 3796 3796 uint64_t retry_term_jiff; 3797 3797 struct qla_tgt_counters tgt_counters; 3798 3798 uint16_t cpuid; 3799 + bool cpu_mapped; 3799 3800 struct qla_fw_resources fwres ____cacheline_aligned; 3800 3801 struct qla_buf_pool buf_pool; 3801 3802 u32 cmd_cnt;
+3
drivers/scsi/qla2xxx/qla_init.c
··· 9426 9426 qpair->rsp->req = qpair->req; 9427 9427 qpair->rsp->qpair = qpair; 9428 9428 9429 + if (!qpair->cpu_mapped) 9430 + qla_cpu_update(qpair, raw_smp_processor_id()); 9431 + 9429 9432 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 9430 9433 if (ha->fw_attributes & BIT_4) 9431 9434 qpair->difdix_supported = 1;
+3
drivers/scsi/qla2xxx/qla_inline.h
··· 539 539 if (!ha->qp_cpu_map) 540 540 return; 541 541 mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0); 542 + if (!mask) 543 + return; 542 544 qpair->cpuid = cpumask_first(mask); 543 545 for_each_cpu(cpu, mask) { 544 546 ha->qp_cpu_map[cpu] = qpair; 545 547 } 546 548 msix->cpuid = qpair->cpuid; 549 + qpair->cpu_mapped = true; 547 550 } 548 551 549 552 static inline void
+3
drivers/scsi/qla2xxx/qla_isr.c
··· 3770 3770 3771 3771 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { 3772 3772 rsp->qpair->rcv_intr = 1; 3773 + 3774 + if (!rsp->qpair->cpu_mapped) 3775 + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 3773 3776 } 3774 3777 3775 3778 #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
+4
drivers/scsi/stex.c
··· 109 109 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, 110 110 TASK_ATTRIBUTE_ORDERED = 0x2, 111 111 TASK_ATTRIBUTE_ACA = 0x4, 112 + }; 112 113 114 + enum { 113 115 SS_STS_NORMAL = 0x80000000, 114 116 SS_STS_DONE = 0x40000000, 115 117 SS_STS_HANDSHAKE = 0x20000000, ··· 123 121 SS_I2H_REQUEST_RESET = 0x2000, 124 122 125 123 SS_MU_OPERATIONAL = 0x80000000, 124 + }; 126 125 126 + enum { 127 127 STEX_CDB_LENGTH = 16, 128 128 STATUS_VAR_LEN = 128, 129 129
+2 -2
drivers/soc/fsl/qe/Kconfig
··· 36 36 config CPM_TSA 37 37 tristate "CPM TSA support" 38 38 depends on OF && HAS_IOMEM 39 - depends on CPM1 || COMPILE_TEST 39 + depends on CPM1 || (CPM && COMPILE_TEST) 40 40 help 41 41 Freescale CPM Time Slot Assigner (TSA) 42 42 controller. ··· 47 47 config CPM_QMC 48 48 tristate "CPM QMC support" 49 49 depends on OF && HAS_IOMEM 50 - depends on CPM1 || (FSL_SOC && COMPILE_TEST) 50 + depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST) 51 51 depends on CPM_TSA 52 52 help 53 53 Freescale CPM QUICC Multichannel Controller
+6 -1
drivers/spi/spi-fsl-lpspi.c
··· 910 910 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); 911 911 if (ret == -EPROBE_DEFER) 912 912 goto out_pm_get; 913 - 914 913 if (ret < 0) 915 914 dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); 915 + else 916 + /* 917 + * disable LPSPI module IRQ when enable DMA mode successfully, 918 + * to prevent the unexpected LPSPI module IRQ events. 919 + */ 920 + disable_irq(irq); 916 921 917 922 ret = devm_spi_register_controller(&pdev->dev, controller); 918 923 if (ret < 0) {
+3
drivers/spi/spi-mt65xx.c
··· 1275 1275 struct mtk_spi *mdata = spi_master_get_devdata(master); 1276 1276 int ret; 1277 1277 1278 + if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) 1279 + complete(&mdata->spimem_done); 1280 + 1278 1281 ret = pm_runtime_resume_and_get(&pdev->dev); 1279 1282 if (ret < 0) 1280 1283 return ret;
+18 -19
drivers/spi/spi-qup.c
··· 1028 1028 return -ENXIO; 1029 1029 } 1030 1030 1031 - ret = clk_prepare_enable(cclk); 1032 - if (ret) { 1033 - dev_err(dev, "cannot enable core clock\n"); 1034 - return ret; 1035 - } 1036 - 1037 - ret = clk_prepare_enable(iclk); 1038 - if (ret) { 1039 - clk_disable_unprepare(cclk); 1040 - dev_err(dev, "cannot enable iface clock\n"); 1041 - return ret; 1042 - } 1043 - 1044 1031 master = spi_alloc_master(dev, sizeof(struct spi_qup)); 1045 1032 if (!master) { 1046 - clk_disable_unprepare(cclk); 1047 - clk_disable_unprepare(iclk); 1048 1033 dev_err(dev, "cannot allocate master\n"); 1049 1034 return -ENOMEM; 1050 1035 } ··· 1077 1092 spin_lock_init(&controller->lock); 1078 1093 init_completion(&controller->done); 1079 1094 1095 + ret = clk_prepare_enable(cclk); 1096 + if (ret) { 1097 + dev_err(dev, "cannot enable core clock\n"); 1098 + goto error_dma; 1099 + } 1100 + 1101 + ret = clk_prepare_enable(iclk); 1102 + if (ret) { 1103 + clk_disable_unprepare(cclk); 1104 + dev_err(dev, "cannot enable iface clock\n"); 1105 + goto error_dma; 1106 + } 1107 + 1080 1108 iomode = readl_relaxed(base + QUP_IO_M_MODES); 1081 1109 1082 1110 size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); ··· 1119 1121 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 1120 1122 if (ret) { 1121 1123 dev_err(dev, "cannot set RESET state\n"); 1122 - goto error_dma; 1124 + goto error_clk; 1123 1125 } 1124 1126 1125 1127 writel_relaxed(0, base + QUP_OPERATIONAL); ··· 1143 1145 ret = devm_request_irq(dev, irq, spi_qup_qup_irq, 1144 1146 IRQF_TRIGGER_HIGH, pdev->name, controller); 1145 1147 if (ret) 1146 - goto error_dma; 1148 + goto error_clk; 1147 1149 1148 1150 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); 1149 1151 pm_runtime_use_autosuspend(dev); ··· 1158 1160 1159 1161 disable_pm: 1160 1162 pm_runtime_disable(&pdev->dev); 1163 + error_clk: 1164 + clk_disable_unprepare(cclk); 1165 + clk_disable_unprepare(iclk); 1161 1166 error_dma: 1162 1167 spi_qup_release_dma(master); 1163 1168 error: 1164 - clk_disable_unprepare(cclk); 1165 - clk_disable_unprepare(iclk); 1166 1169 spi_master_put(master); 1167 1170 return ret; 1168 1171 }
+2 -2
drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
··· 373 373 static int ov2680_detect(struct i2c_client *client) 374 374 { 375 375 struct i2c_adapter *adapter = client->adapter; 376 - u32 high, low; 376 + u32 high = 0, low = 0; 377 377 int ret; 378 378 u16 id; 379 379 u8 revision; ··· 383 383 384 384 ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_H, &high); 385 385 if (ret) { 386 - dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); 386 + dev_err(&client->dev, "sensor_id_high read failed (%d)\n", ret); 387 387 return -ENODEV; 388 388 } 389 389 ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_L, &low);
+1 -1
drivers/staging/media/imx/imx8mq-mipi-csi2.c
··· 354 354 struct v4l2_subdev_state *sd_state) 355 355 { 356 356 int ret; 357 - u32 hs_settle; 357 + u32 hs_settle = 0; 358 358 359 359 ret = imx8mq_mipi_csi_sw_reset(state); 360 360 if (ret)
-2
drivers/target/iscsi/iscsi_target.c
··· 364 364 init_completion(&np->np_restart_comp); 365 365 INIT_LIST_HEAD(&np->np_list); 366 366 367 - timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0); 368 - 369 367 ret = iscsi_target_setup_login_socket(np, sockaddr); 370 368 if (ret != 0) { 371 369 kfree(np);
+5 -58
drivers/target/iscsi/iscsi_target_login.c
··· 811 811 iscsit_dec_conn_usage_count(conn); 812 812 } 813 813 814 - void iscsi_handle_login_thread_timeout(struct timer_list *t) 815 - { 816 - struct iscsi_np *np = from_timer(np, t, np_login_timer); 817 - 818 - spin_lock_bh(&np->np_thread_lock); 819 - pr_err("iSCSI Login timeout on Network Portal %pISpc\n", 820 - &np->np_sockaddr); 821 - 822 - if (np->np_login_timer_flags & ISCSI_TF_STOP) { 823 - spin_unlock_bh(&np->np_thread_lock); 824 - return; 825 - } 826 - 827 - if (np->np_thread) 828 - send_sig(SIGINT, np->np_thread, 1); 829 - 830 - np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; 831 - spin_unlock_bh(&np->np_thread_lock); 832 - } 833 - 834 - static void iscsi_start_login_thread_timer(struct iscsi_np *np) 835 - { 836 - /* 837 - * This used the TA_LOGIN_TIMEOUT constant because at this 838 - * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout 839 - */ 840 - spin_lock_bh(&np->np_thread_lock); 841 - np->np_login_timer_flags &= ~ISCSI_TF_STOP; 842 - np->np_login_timer_flags |= ISCSI_TF_RUNNING; 843 - mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); 844 - 845 - pr_debug("Added timeout timer to iSCSI login request for" 846 - " %u seconds.\n", TA_LOGIN_TIMEOUT); 847 - spin_unlock_bh(&np->np_thread_lock); 848 - } 849 - 850 - static void iscsi_stop_login_thread_timer(struct iscsi_np *np) 851 - { 852 - spin_lock_bh(&np->np_thread_lock); 853 - if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) { 854 - spin_unlock_bh(&np->np_thread_lock); 855 - return; 856 - } 857 - np->np_login_timer_flags |= ISCSI_TF_STOP; 858 - spin_unlock_bh(&np->np_thread_lock); 859 - 860 - del_timer_sync(&np->np_login_timer); 861 - 862 - spin_lock_bh(&np->np_thread_lock); 863 - np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; 864 - spin_unlock_bh(&np->np_thread_lock); 865 - } 866 - 867 814 int iscsit_setup_np( 868 815 struct iscsi_np *np, 869 816 struct sockaddr_storage *sockaddr) ··· 1070 1123 spin_lock_init(&conn->nopin_timer_lock); 1071 1124 spin_lock_init(&conn->response_queue_lock); 1072 1125 spin_lock_init(&conn->state_lock); 1126 + spin_lock_init(&conn->login_worker_lock); 1127 + spin_lock_init(&conn->login_timer_lock); 1073 1128 1074 1129 timer_setup(&conn->nopin_response_timer, 1075 1130 iscsit_handle_nopin_response_timeout, 0); 1076 1131 timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); 1132 + timer_setup(&conn->login_timer, iscsit_login_timeout, 0); 1077 1133 1078 1134 if (iscsit_conn_set_transport(conn, np->np_transport) < 0) 1079 1135 goto free_conn; ··· 1254 1304 goto new_sess_out; 1255 1305 } 1256 1306 1257 - iscsi_start_login_thread_timer(np); 1307 + iscsit_start_login_timer(conn, current); 1258 1308 1259 1309 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); 1260 1310 conn->conn_state = TARG_CONN_STATE_XPT_UP; ··· 1367 1417 if (ret < 0) 1368 1418 goto new_sess_out; 1369 1419 1370 - iscsi_stop_login_thread_timer(np); 1371 - 1372 1420 if (ret == 1) { 1373 1421 tpg_np = conn->tpg_np; 1374 1422 ··· 1382 1434 new_sess_out: 1383 1435 new_sess = true; 1384 1436 old_sess_out: 1385 - iscsi_stop_login_thread_timer(np); 1437 + iscsit_stop_login_timer(conn); 1386 1438 tpg_np = conn->tpg_np; 1387 1439 iscsi_target_login_sess_out(conn, zero_tsih, new_sess); 1388 1440 new_sess = false; ··· 1396 1448 return 1; 1397 1449 1398 1450 exit: 1399 - iscsi_stop_login_thread_timer(np); 1400 1451 spin_lock_bh(&np->np_thread_lock); 1401 1452 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1402 1453 spin_unlock_bh(&np->np_thread_lock);
+42 -32
drivers/target/iscsi/iscsi_target_nego.c
··· 535 535 iscsi_target_login_sess_out(conn, zero_tsih, true); 536 536 } 537 537 538 - struct conn_timeout { 539 - struct timer_list timer; 540 - struct iscsit_conn *conn; 541 - }; 542 - 543 - static void iscsi_target_login_timeout(struct timer_list *t) 544 - { 545 - struct conn_timeout *timeout = from_timer(timeout, t, timer); 546 - struct iscsit_conn *conn = timeout->conn; 547 - 548 - pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); 549 - 550 - if (conn->login_kworker) { 551 - pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", 552 - conn->login_kworker->comm, conn->login_kworker->pid); 553 - send_sig(SIGINT, conn->login_kworker, 1); 554 - } 555 - } 556 - 557 538 static void iscsi_target_do_login_rx(struct work_struct *work) 558 539 { 559 540 struct iscsit_conn *conn = container_of(work, ··· 543 562 struct iscsi_np *np = login->np; 544 563 struct iscsi_portal_group *tpg = conn->tpg; 545 564 struct iscsi_tpg_np *tpg_np = conn->tpg_np; 546 - struct conn_timeout timeout; 547 565 int rc, zero_tsih = login->zero_tsih; 548 566 bool state; 549 567 550 568 pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", 551 569 conn, current->comm, current->pid); 570 + 571 + spin_lock(&conn->login_worker_lock); 572 + set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags); 573 + spin_unlock(&conn->login_worker_lock); 552 574 /* 553 575 * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() 554 576 * before initial PDU processing in iscsi_target_start_negotiation() ··· 581 597 goto err; 582 598 } 583 599 584 - conn->login_kworker = current; 585 600 allow_signal(SIGINT); 586 - 587 - timeout.conn = conn; 588 - timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0); 589 - mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ); 590 - pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid); 601 + rc = iscsit_set_login_timer_kworker(conn, current); 602 + if (rc < 0) { 603 + /* The login timer has already expired */ 604 + pr_debug("iscsi_target_do_login_rx, login failed\n"); 605 + goto err; 606 + } 591 607 592 608 rc = conn->conn_transport->iscsit_get_login_rx(conn, login); 593 - del_timer_sync(&timeout.timer); 594 - destroy_timer_on_stack(&timeout.timer); 595 609 flush_signals(current); 596 - conn->login_kworker = NULL; 597 610 598 611 if (rc < 0) 599 612 goto err; ··· 627 646 if (iscsi_target_sk_check_and_clear(conn, 628 647 LOGIN_FLAGS_WRITE_ACTIVE)) 629 648 goto err; 649 + 650 + /* 651 + * Set the login timer thread pointer to NULL to prevent the 652 + * login process from getting stuck if the initiator 653 + * stops sending data. 654 + */ 655 + rc = iscsit_set_login_timer_kworker(conn, NULL); 656 + if (rc < 0) 657 + goto err; 630 658 } else if (rc == 1) { 659 + iscsit_stop_login_timer(conn); 631 660 cancel_delayed_work(&conn->login_work); 632 661 iscsi_target_nego_release(conn); 633 662 iscsi_post_login_handler(np, conn, zero_tsih); ··· 647 656 648 657 err: 649 658 iscsi_target_restore_sock_callbacks(conn); 659 + iscsit_stop_login_timer(conn); 650 660 cancel_delayed_work(&conn->login_work); 651 661 iscsi_target_login_drop(conn, login); 652 662 iscsit_deaccess_np(np, tpg, tpg_np); ··· 1122 1130 iscsi_target_set_sock_callbacks(conn); 1123 1131 1124 1132 login->np = np; 1133 + conn->tpg = NULL; 1125 1134 1126 1135 login_req = (struct iscsi_login_req *) login->req; 1127 1136 payload_length = ntoh24(login_req->dlength); ··· 1190 1197 */ 1191 1198 sessiontype = strncmp(s_buf, DISCOVERY, 9); 1192 1199 if (!sessiontype) { 1193 - conn->tpg = iscsit_global->discovery_tpg; 1194 1200 if (!login->leading_connection) 1195 1201 goto get_target; 1196 1202 ··· 1206 1214 * Serialize access across the discovery struct iscsi_portal_group to 1207 1215 * process login attempt. 1208 1216 */ 1217 + conn->tpg = iscsit_global->discovery_tpg; 1209 1218 if (iscsit_access_np(np, conn->tpg) < 0) { 1210 1219 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1211 1220 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 1221 + conn->tpg = NULL; 1212 1222 ret = -1; 1213 1223 goto out; 1214 1224 } ··· 1362 1368 * and perform connection cleanup now. 1363 1369 */ 1364 1370 ret = iscsi_target_do_login(conn, login); 1365 - if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) 1366 - ret = -1; 1371 + if (!ret) { 1372 + spin_lock(&conn->login_worker_lock); 1373 + 1374 + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) 1375 + ret = -1; 1376 + else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) { 1377 + if (iscsit_set_login_timer_kworker(conn, NULL) < 0) { 1378 + /* 1379 + * The timeout has expired already. 1380 + * Schedule login_work to perform the cleanup. 1381 + */ 1382 + schedule_delayed_work(&conn->login_work, 0); 1383 + } 1384 + } 1385 + 1386 + spin_unlock(&conn->login_worker_lock); 1387 + } 1367 1388 1368 1389 if (ret < 0) { 1369 1390 iscsi_target_restore_sock_callbacks(conn); 1370 1391 iscsi_remove_failed_auth_entry(conn); 1371 1392 } 1372 1393 if (ret != 0) { 1394 + iscsit_stop_login_timer(conn); 1373 1395 cancel_delayed_work_sync(&conn->login_work); 1374 1396 iscsi_target_nego_release(conn); 1375 1397 }
+51
drivers/target/iscsi/iscsi_target_util.c
··· 1040 1040 spin_unlock_bh(&conn->nopin_timer_lock); 1041 1041 } 1042 1042 1043 + void iscsit_login_timeout(struct timer_list *t) 1044 + { 1045 + struct iscsit_conn *conn = from_timer(conn, t, login_timer); 1046 + struct iscsi_login *login = conn->login; 1047 + 1048 + pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); 1049 + 1050 + spin_lock_bh(&conn->login_timer_lock); 1051 + login->login_failed = 1; 1052 + 1053 + if (conn->login_kworker) { 1054 + pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", 1055 + conn->login_kworker->comm, conn->login_kworker->pid); 1056 + send_sig(SIGINT, conn->login_kworker, 1); 1057 + } else { 1058 + schedule_delayed_work(&conn->login_work, 0); 1059 + } 1060 + spin_unlock_bh(&conn->login_timer_lock); 1061 + } 1062 + 1063 + void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr) 1064 + { 1065 + pr_debug("Login timer started\n"); 1066 + 1067 + conn->login_kworker = kthr; 1068 + mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); 1069 + } 1070 + 1071 + int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr) 1072 + { 1073 + struct iscsi_login *login = conn->login; 1074 + int ret = 0; 1075 + 1076 + spin_lock_bh(&conn->login_timer_lock); 1077 + if (login->login_failed) { 1078 + /* The timer has already expired */ 1079 + ret = -1; 1080 + } else { 1081 + conn->login_kworker = kthr; 1082 + } 1083 + spin_unlock_bh(&conn->login_timer_lock); 1084 + 1085 + return ret; 1086 + } 1087 + 1088 + void iscsit_stop_login_timer(struct iscsit_conn *conn) 1089 + { 1090 + pr_debug("Login timer stopped\n"); 1091 + timer_delete_sync(&conn->login_timer); 1092 + } 1093 + 1043 1094 int iscsit_send_tx_data( 1044 1095 struct iscsit_cmd *cmd, 1045 1096 struct iscsit_conn *conn,
+4
drivers/target/iscsi/iscsi_target_util.h
··· 56 56 extern void __iscsit_start_nopin_timer(struct iscsit_conn *); 57 57 extern void iscsit_start_nopin_timer(struct iscsit_conn *); 58 58 extern void iscsit_stop_nopin_timer(struct iscsit_conn *); 59 + extern void iscsit_login_timeout(struct timer_list *t); 60 + extern void iscsit_start_login_timer(struct iscsit_conn *, struct task_struct *kthr); 61 + extern void iscsit_stop_login_timer(struct iscsit_conn *); 62 + extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_struct *kthr); 59 63 extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int); 60 64 extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *); 61 65 extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
+3 -1
drivers/tty/serial/8250/8250_tegra.c
··· 113 113 114 114 ret = serial8250_register_8250_port(&port8250); 115 115 if (ret < 0) 116 - goto err_clkdisable; 116 + goto err_ctrl_assert; 117 117 118 118 platform_set_drvdata(pdev, uart); 119 119 uart->line = ret; 120 120 121 121 return 0; 122 122 123 + err_ctrl_assert: 124 + reset_control_assert(uart->rst); 123 125 err_clkdisable: 124 126 clk_disable_unprepare(uart->clk); 125 127
+1 -1
drivers/tty/serial/Kconfig
··· 762 762 763 763 config SERIAL_CPM 764 764 tristate "CPM SCC/SMC serial port support" 765 - depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST) 765 + depends on CPM2 || CPM1 766 766 select SERIAL_CORE 767 767 help 768 768 This driver supports the SCC and SMC serial ports on Motorola
-2
drivers/tty/serial/cpm_uart/cpm_uart.h
··· 19 19 #include "cpm_uart_cpm2.h" 20 20 #elif defined(CONFIG_CPM1) 21 21 #include "cpm_uart_cpm1.h" 22 - #elif defined(CONFIG_COMPILE_TEST) 23 - #include "cpm_uart_cpm2.h" 24 22 #endif 25 23 26 24 #define SERIAL_CPM_MAJOR 204
+23 -21
drivers/tty/serial/fsl_lpuart.c
··· 1495 1495 1496 1496 static void lpuart32_break_ctl(struct uart_port *port, int break_state) 1497 1497 { 1498 - unsigned long temp, modem; 1499 - struct tty_struct *tty; 1500 - unsigned int cflag = 0; 1498 + unsigned long temp; 1501 1499 1502 - tty = tty_port_tty_get(&port->state->port); 1503 - if (tty) { 1504 - cflag = tty->termios.c_cflag; 1505 - tty_kref_put(tty); 1506 - } 1500 + temp = lpuart32_read(port, UARTCTRL); 1507 1501 1508 - temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK; 1509 - modem = lpuart32_read(port, UARTMODIR); 1510 - 1502 + /* 1503 + * LPUART IP now has two known bugs, one is CTS has higher priority than the 1504 + * break signal, which causes the break signal sending through UARTCTRL_SBK 1505 + * may impacted by the CTS input if the HW flow control is enabled. It 1506 + * exists on all platforms we support in this driver. 1507 + * Another bug is i.MX8QM LPUART may have an additional break character 1508 + * being sent after SBK was cleared. 1509 + * To avoid above two bugs, we use Transmit Data Inversion function to send 1510 + * the break signal instead of UARTCTRL_SBK. 1511 + */ 1511 1512 if (break_state != 0) { 1512 - temp |= UARTCTRL_SBK; 1513 1513 /* 1514 - * LPUART CTS has higher priority than SBK, need to disable CTS before 1515 - * asserting SBK to avoid any interference if flow control is enabled. 1514 + * Disable the transmitter to prevent any data from being sent out 1515 + * during break, then invert the TX line to send break. 1516 1516 */ 1517 - if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE) 1518 - lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); 1517 + temp &= ~UARTCTRL_TE; 1518 + lpuart32_write(port, temp, UARTCTRL); 1519 + temp |= UARTCTRL_TXINV; 1520 + lpuart32_write(port, temp, UARTCTRL); 1519 1521 } else { 1520 - /* Re-enable the CTS when break off. */ 1521 - if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE)) 1522 - lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR); 1522 + /* Disable the TXINV to turn off break and re-enable transmitter. */ 1523 + temp &= ~UARTCTRL_TXINV; 1524 + lpuart32_write(port, temp, UARTCTRL); 1525 + temp |= UARTCTRL_TE; 1526 + lpuart32_write(port, temp, UARTCTRL); 1523 1527 } 1524 - 1525 - lpuart32_write(port, temp, UARTCTRL); 1526 1528 } 1527 1529 1528 1530 static void lpuart_setup_watermark(struct lpuart_port *sport)
+13
drivers/usb/cdns3/cdns3-gadget.c
··· 2097 2097 else 2098 2098 priv_ep->trb_burst_size = 16; 2099 2099 2100 + /* 2101 + * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs 2102 + * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the 2103 + * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI 2104 + * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This 2105 + * results in data corruption when it crosses the 4K border. The corruption 2106 + * specifically occurs from the position (4K - (address & 0x7F)) to 4K. 2107 + * 2108 + * So force trb_burst_size to 16 at such platform. 2109 + */ 2110 + if (priv_dev->dev_ver < DEV_VER_V2) 2111 + priv_ep->trb_burst_size = 16; 2112 + 2100 2113 mult = min_t(u8, mult, EP_CFG_MULT_MAX); 2101 2114 buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); 2102 2115 maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+41
drivers/usb/core/buffer.c
··· 172 172 } 173 173 dma_free_coherent(hcd->self.sysdev, size, addr, dma); 174 174 } 175 + 176 + void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, 177 + size_t size, gfp_t mem_flags, dma_addr_t *dma) 178 + { 179 + if (size == 0) 180 + return NULL; 181 + 182 + if (hcd->localmem_pool) 183 + return gen_pool_dma_alloc_align(hcd->localmem_pool, 184 + size, dma, PAGE_SIZE); 185 + 186 + /* some USB hosts just use PIO */ 187 + if (!hcd_uses_dma(hcd)) { 188 + *dma = DMA_MAPPING_ERROR; 189 + return (void *)__get_free_pages(mem_flags, 190 + get_order(size)); 191 + } 192 + 193 + return dma_alloc_coherent(hcd->self.sysdev, 194 + size, dma, mem_flags); 195 + } 196 + 197 + void hcd_buffer_free_pages(struct usb_hcd *hcd, 198 + size_t size, void *addr, dma_addr_t dma) 199 + { 200 + if (!addr) 201 + return; 202 + 203 + if (hcd->localmem_pool) { 204 + gen_pool_free(hcd->localmem_pool, 205 + (unsigned long)addr, size); 206 + return; 207 + } 208 + 209 + if (!hcd_uses_dma(hcd)) { 210 + free_pages((unsigned long)addr, get_order(size)); 211 + return; 212 + } 213 + 214 + dma_free_coherent(hcd->self.sysdev, size, addr, dma); 215 + }
+14 -6
drivers/usb/core/devio.c
··· 186 186 static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) 187 187 { 188 188 struct usb_dev_state *ps = usbm->ps; 189 + struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); 189 190 unsigned long flags; 190 191 191 192 spin_lock_irqsave(&ps->lock, flags); ··· 195 194 list_del(&usbm->memlist); 196 195 spin_unlock_irqrestore(&ps->lock, flags); 197 196 198 - usb_free_coherent(ps->dev, usbm->size, usbm->mem, 199 - usbm->dma_handle); 197 + hcd_buffer_free_pages(hcd, usbm->size, 198 + usbm->mem, usbm->dma_handle); 200 199 usbfs_decrease_memory_usage( 201 200 usbm->size + sizeof(struct usb_memory)); 202 201 kfree(usbm); ··· 235 234 size_t size = vma->vm_end - vma->vm_start; 236 235 void *mem; 237 236 unsigned long flags; 238 - dma_addr_t dma_handle; 237 + dma_addr_t dma_handle = DMA_MAPPING_ERROR; 239 238 int ret; 240 239 241 240 ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); ··· 248 247 goto error_decrease_mem; 249 248 } 250 249 251 - mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, 252 - &dma_handle); 250 + mem = hcd_buffer_alloc_pages(hcd, 251 + size, GFP_USER | __GFP_NOWARN, &dma_handle); 253 252 if (!mem) { 254 253 ret = -ENOMEM; 255 254 goto error_free_usbm; ··· 265 264 usbm->vma_use_count = 1; 266 265 INIT_LIST_HEAD(&usbm->memlist); 267 266 268 - if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { 267 + /* 268 + * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates 269 + * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check 270 + * whether we are in such cases, and then use remap_pfn_range (or 271 + * dma_mmap_coherent) to map normal (or DMA) pages into the user 272 + * space, respectively. 273 + */ 274 + if (dma_handle == DMA_MAPPING_ERROR) { 269 275 if (remap_pfn_range(vma, vma->vm_start, 270 276 virt_to_phys(usbm->mem) >> PAGE_SHIFT, 271 277 size, vma->vm_page_prot) < 0) {
+1 -1
drivers/usb/gadget/function/f_fs.c
··· 3535 3535 /* Drain any pending AIO completions */ 3536 3536 drain_workqueue(ffs->io_completion_wq); 3537 3537 3538 + ffs_event_add(ffs, FUNCTIONFS_UNBIND); 3538 3539 if (!--opts->refcnt) 3539 3540 functionfs_unbind(ffs); 3540 3541 ··· 3560 3559 func->function.ssp_descriptors = NULL; 3561 3560 func->interfaces_nums = NULL; 3562 3561 3563 - ffs_event_add(ffs, FUNCTIONFS_UNBIND); 3564 3562 } 3565 3563 3566 3564 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
+3
drivers/usb/gadget/udc/amd5536udc_pci.c
··· 170 170 retval = -ENODEV; 171 171 goto err_probe; 172 172 } 173 + 174 + udc = dev; 175 + 173 176 return 0; 174 177 175 178 err_probe:
+1 -1
drivers/usb/typec/tipd/core.c
··· 920 920 enable_irq(client->irq); 921 921 } 922 922 923 - if (client->irq) 923 + if (!client->irq) 924 924 queue_delayed_work(system_power_efficient_wq, &tps->wq_poll, 925 925 msecs_to_jiffies(POLL_INTERVAL)); 926 926
+2 -3
drivers/video/fbdev/arcfb.c
··· 590 590 return retval; 591 591 } 592 592 593 - static int arcfb_remove(struct platform_device *dev) 593 + static void arcfb_remove(struct platform_device *dev) 594 594 { 595 595 struct fb_info *info = platform_get_drvdata(dev); 596 596 ··· 601 601 vfree((void __force *)info->screen_base); 602 602 framebuffer_release(info); 603 603 } 604 - return 0; 605 604 } 606 605 607 606 static struct platform_driver arcfb_driver = { 608 607 .probe = arcfb_probe, 609 - .remove = arcfb_remove, 608 + .remove_new = arcfb_remove, 610 609 .driver = { 611 610 .name = "arcfb", 612 611 },
+3 -8
drivers/video/fbdev/au1100fb.c
··· 520 520 return -ENODEV; 521 521 } 522 522 523 - int au1100fb_drv_remove(struct platform_device *dev) 523 + void au1100fb_drv_remove(struct platform_device *dev) 524 524 { 525 525 struct au1100fb_device *fbdev = NULL; 526 - 527 - if (!dev) 528 - return -ENODEV; 529 526 530 527 fbdev = platform_get_drvdata(dev); 531 528 ··· 540 543 clk_disable_unprepare(fbdev->lcdclk); 541 544 clk_put(fbdev->lcdclk); 542 545 } 543 - 544 - return 0; 545 546 } 546 547 547 548 #ifdef CONFIG_PM ··· 588 593 .name = "au1100-lcd", 589 594 }, 590 595 .probe = au1100fb_drv_probe, 591 - .remove = au1100fb_drv_remove, 596 + .remove_new = au1100fb_drv_remove, 592 597 .suspend = au1100fb_drv_suspend, 593 - .resume = au1100fb_drv_resume, 598 + .resume = au1100fb_drv_resume, 594 599 }; 595 600 module_platform_driver(au1100fb_driver); 596 601
+2 -4
drivers/video/fbdev/au1200fb.c
··· 1765 1765 return ret; 1766 1766 } 1767 1767 1768 - static int au1200fb_drv_remove(struct platform_device *dev) 1768 + static void au1200fb_drv_remove(struct platform_device *dev) 1769 1769 { 1770 1770 struct au1200fb_platdata *pd = platform_get_drvdata(dev); 1771 1771 struct fb_info *fbi; ··· 1788 1788 } 1789 1789 1790 1790 free_irq(platform_get_irq(dev, 0), (void *)dev); 1791 - 1792 - return 0; 1793 1791 } 1794 1792 1795 1793 #ifdef CONFIG_PM ··· 1838 1840 .pm = AU1200FB_PMOPS, 1839 1841 }, 1840 1842 .probe = au1200fb_drv_probe, 1841 - .remove = au1200fb_drv_remove, 1843 + .remove_new = au1200fb_drv_remove, 1842 1844 }; 1843 1845 module_platform_driver(au1200fb_driver); 1844 1846
+2 -3
drivers/video/fbdev/broadsheetfb.c
··· 1193 1193 1194 1194 } 1195 1195 1196 - static int broadsheetfb_remove(struct platform_device *dev) 1196 + static void broadsheetfb_remove(struct platform_device *dev) 1197 1197 { 1198 1198 struct fb_info *info = platform_get_drvdata(dev); 1199 1199 ··· 1209 1209 module_put(par->board->owner); 1210 1210 framebuffer_release(info); 1211 1211 } 1212 - return 0; 1213 1212 } 1214 1213 1215 1214 static struct platform_driver broadsheetfb_driver = { 1216 1215 .probe = broadsheetfb_probe, 1217 - .remove = broadsheetfb_remove, 1216 + .remove_new = broadsheetfb_remove, 1218 1217 .driver = { 1219 1218 .name = "broadsheetfb", 1220 1219 },
+2 -4
drivers/video/fbdev/bw2.c
··· 352 352 return err; 353 353 } 354 354 355 - static int bw2_remove(struct platform_device *op) 355 + static void bw2_remove(struct platform_device *op) 356 356 { 357 357 struct fb_info *info = dev_get_drvdata(&op->dev); 358 358 struct bw2_par *par = info->par; ··· 363 363 of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); 364 364 365 365 framebuffer_release(info); 366 - 367 - return 0; 368 366 } 369 367 370 368 static const struct of_device_id bw2_match[] = { ··· 379 381 .of_match_table = bw2_match, 380 382 }, 381 383 .probe = bw2_probe, 382 - .remove = bw2_remove, 384 + .remove_new = bw2_remove, 383 385 }; 384 386 385 387 static int __init bw2_init(void)
+3
drivers/video/fbdev/core/bitblit.c
··· 247 247 248 248 cursor.set = 0; 249 249 250 + if (!vc->vc_font.data) 251 + return; 252 + 250 253 c = scr_readw((u16 *) vc->vc_pos); 251 254 attribute = get_attribute(info, c); 252 255 src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
+9 -3
drivers/video/fbdev/imsttfb.c
··· 1452 1452 FBINFO_HWACCEL_FILLRECT | 1453 1453 FBINFO_HWACCEL_YPAN; 1454 1454 1455 - fb_alloc_cmap(&info->cmap, 0, 0); 1455 + if (fb_alloc_cmap(&info->cmap, 0, 0)) { 1456 + framebuffer_release(info); 1457 + return -ENODEV; 1458 + } 1456 1459 1457 1460 if (register_framebuffer(info) < 0) { 1461 + fb_dealloc_cmap(&info->cmap); 1458 1462 framebuffer_release(info); 1459 1463 return -ENODEV; 1460 1464 } ··· 1535 1531 goto error; 1536 1532 info->pseudo_palette = par->palette; 1537 1533 ret = init_imstt(info); 1538 - if (!ret) 1539 - pci_set_drvdata(pdev, info); 1534 + if (ret) 1535 + goto error; 1536 + 1537 + pci_set_drvdata(pdev, info); 1540 1538 return ret; 1541 1539 1542 1540 error:
+1 -1
drivers/video/fbdev/matrox/matroxfb_maven.c
··· 1291 1291 .driver = { 1292 1292 .name = "maven", 1293 1293 }, 1294 - .probe_new = maven_probe, 1294 + .probe = maven_probe, 1295 1295 .remove = maven_remove, 1296 1296 .id_table = maven_id, 1297 1297 };
+1 -1
drivers/video/fbdev/ssd1307fb.c
··· 844 844 MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id); 845 845 846 846 static struct i2c_driver ssd1307fb_driver = { 847 - .probe_new = ssd1307fb_probe, 847 + .probe = ssd1307fb_probe, 848 848 .remove = ssd1307fb_remove, 849 849 .id_table = ssd1307fb_i2c_id, 850 850 .driver = {
+3
fs/afs/dir.c
··· 1358 1358 op->dentry = dentry; 1359 1359 op->create.mode = S_IFDIR | mode; 1360 1360 op->create.reason = afs_edit_dir_for_mkdir; 1361 + op->mtime = current_time(dir); 1361 1362 op->ops = &afs_mkdir_operation; 1362 1363 return afs_do_sync_operation(op); 1363 1364 } ··· 1662 1661 op->dentry = dentry; 1663 1662 op->create.mode = S_IFREG | mode; 1664 1663 op->create.reason = afs_edit_dir_for_create; 1664 + op->mtime = current_time(dir); 1665 1665 op->ops = &afs_create_operation; 1666 1666 return afs_do_sync_operation(op); 1667 1667 ··· 1798 1796 op->ops = &afs_symlink_operation; 1799 1797 op->create.reason = afs_edit_dir_for_symlink; 1800 1798 op->create.symlink = content; 1799 + op->mtime = current_time(dir); 1801 1800 return afs_do_sync_operation(op); 1802 1801 1803 1802 error:
-4
fs/btrfs/bio.c
··· 811 811 goto fail; 812 812 813 813 if (dev_replace) { 814 - if (btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE && btrfs_is_zoned(fs_info)) { 815 - bbio->bio.bi_opf &= ~REQ_OP_WRITE; 816 - bbio->bio.bi_opf |= REQ_OP_ZONE_APPEND; 817 - } 818 814 ASSERT(smap.dev == fs_info->dev_replace.srcdev); 819 815 smap.dev = fs_info->dev_replace.tgtdev; 820 816 }
+32 -16
fs/btrfs/scrub.c
··· 1137 1137 wake_up(&stripe->io_wait); 1138 1138 } 1139 1139 1140 + static void scrub_submit_write_bio(struct scrub_ctx *sctx, 1141 + struct scrub_stripe *stripe, 1142 + struct btrfs_bio *bbio, bool dev_replace) 1143 + { 1144 + struct btrfs_fs_info *fs_info = sctx->fs_info; 1145 + u32 bio_len = bbio->bio.bi_iter.bi_size; 1146 + u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - 1147 + stripe->logical; 1148 + 1149 + fill_writer_pointer_gap(sctx, stripe->physical + bio_off); 1150 + atomic_inc(&stripe->pending_io); 1151 + btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); 1152 + if (!btrfs_is_zoned(fs_info)) 1153 + return; 1154 + /* 1155 + * For zoned writeback, queue depth must be 1, thus we must wait for 1156 + * the write to finish before the next write. 1157 + */ 1158 + wait_scrub_stripe_io(stripe); 1159 + 1160 + /* 1161 + * And also need to update the write pointer if write finished 1162 + * successfully. 1163 + */ 1164 + if (!test_bit(bio_off >> fs_info->sectorsize_bits, 1165 + &stripe->write_error_bitmap)) 1166 + sctx->write_pointer += bio_len; 1167 + } 1168 + 1140 1169 /* 1141 1170 * Submit the write bio(s) for the sectors specified by @write_bitmap. 1142 1171 * ··· 1184 1155 { 1185 1156 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 1186 1157 struct btrfs_bio *bbio = NULL; 1187 - const bool zoned = btrfs_is_zoned(fs_info); 1188 1158 int sector_nr; 1189 1159 1190 1160 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { ··· 1196 1168 1197 1169 /* Cannot merge with previous sector, submit the current one. */ 1198 1170 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { 1199 - fill_writer_pointer_gap(sctx, stripe->physical + 1200 - (sector_nr << fs_info->sectorsize_bits)); 1201 - atomic_inc(&stripe->pending_io); 1202 - btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); 1203 - /* For zoned writeback, queue depth must be 1. */ 1204 - if (zoned) 1205 - wait_scrub_stripe_io(stripe); 1171 + scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); 1206 1172 bbio = NULL; 1207 1173 } 1208 1174 if (!bbio) { ··· 1209 1187 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); 1210 1188 ASSERT(ret == fs_info->sectorsize); 1211 1189 } 1212 - if (bbio) { 1213 - fill_writer_pointer_gap(sctx, bbio->bio.bi_iter.bi_sector << 1214 - SECTOR_SHIFT); 1215 - atomic_inc(&stripe->pending_io); 1216 - btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); 1217 - if (zoned) 1218 - wait_scrub_stripe_io(stripe); 1219 - } 1190 + if (bbio) 1191 + scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); 1220 1192 } 1221 1193 1222 1194 /*
+15 -1
fs/ext4/mballoc.c
··· 2062 2062 if (bex->fe_len < gex->fe_len) 2063 2063 return; 2064 2064 2065 - if (finish_group) 2065 + if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2066 2066 ext4_mb_use_best_found(ac, e4b); 2067 2067 } 2068 2068 ··· 2073 2073 * previous found extent and if new one is better, then it's stored 2074 2074 * in the context. Later, the best found extent will be used, if 2075 2075 * mballoc can't find good enough extent. 2076 + * 2077 + * The algorithm used is roughly as follows: 2078 + * 2079 + * * If free extent found is exactly as big as goal, then 2080 + * stop the scan and use it immediately 2081 + * 2082 + * * If free extent found is smaller than goal, then keep retrying 2083 + * upto a max of sbi->s_mb_max_to_scan times (default 200). After 2084 + * that stop scanning and use whatever we have. 2085 + * 2086 + * * If free extent found is bigger than goal, then keep retrying 2087 + * upto a max of sbi->s_mb_min_to_scan times (default 10) before 2088 + * stopping the scan and using the extent. 2089 + * 2076 2090 * 2077 2091 * FIXME: real allocation policy is to be designed yet! 2078 2092 */
+14 -3
fs/gfs2/file.c
··· 784 784 if (!user_backed_iter(i)) 785 785 return false; 786 786 787 + /* 788 + * Try to fault in multiple pages initially. When that doesn't result 789 + * in any progress, fall back to a single page. 790 + */ 787 791 size = PAGE_SIZE; 788 792 offs = offset_in_page(iocb->ki_pos); 789 - if (*prev_count != count || !*window_size) { 793 + if (*prev_count != count) { 790 794 size_t nr_dirtied; 791 795 792 796 nr_dirtied = max(current->nr_dirtied_pause - ··· 874 870 struct gfs2_inode *ip = GFS2_I(inode); 875 871 size_t prev_count = 0, window_size = 0; 876 872 size_t written = 0; 873 + bool enough_retries; 877 874 ssize_t ret; 878 875 879 876 /* ··· 918 913 if (ret > 0) 919 914 written = ret; 920 915 916 + enough_retries = prev_count == iov_iter_count(from) && 917 + window_size <= PAGE_SIZE; 921 918 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) { 922 919 gfs2_glock_dq(gh); 923 920 window_size -= fault_in_iov_iter_readable(from, window_size); 924 - if (window_size) 925 - goto retry; 921 + if (window_size) { 922 + if (!enough_retries) 923 + goto retry; 924 + /* fall back to buffered I/O */ 925 + ret = 0; 926 + } 926 927 } 927 928 out_unlock: 928 929 if (gfs2_holder_queued(gh))
+1 -6
fs/nfsd/nfsctl.c
··· 690 690 if (err != 0 || fd < 0) 691 691 return -EINVAL; 692 692 693 - if (svc_alien_sock(net, fd)) { 694 - printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__); 695 - return -EINVAL; 696 - } 697 - 698 693 err = nfsd_create_serv(net); 699 694 if (err != 0) 700 695 return err; 701 696 702 - err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); 697 + err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); 703 698 704 699 if (err >= 0 && 705 700 !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+9 -1
fs/nfsd/vfs.c
··· 536 536 537 537 inode_lock(inode); 538 538 for (retries = 1;;) { 539 - host_err = __nfsd_setattr(dentry, iap); 539 + struct iattr attrs; 540 + 541 + /* 542 + * notify_change() can alter its iattr argument, making 543 + * @iap unsuitable for submission multiple times. Make a 544 + * copy for every loop iteration. 545 + */ 546 + attrs = *iap; 547 + host_err = __nfsd_setattr(dentry, &attrs); 540 548 if (host_err != -EAGAIN || !retries--) 541 549 break; 542 550 if (!nfsd_wait_for_delegreturn(rqstp, inode))
+4 -1
fs/xfs/libxfs/xfs_ag.c
··· 984 984 if (err2 != -ENOSPC) 985 985 goto resv_err; 986 986 987 - __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, true); 987 + err2 = __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, 988 + true); 989 + if (err2) 990 + goto resv_err; 988 991 989 992 /* 990 993 * Roll the transaction before trying to re-init the per-ag
+65 -26
fs/xfs/libxfs/xfs_alloc.c
··· 628 628 return 0; 629 629 } 630 630 631 + /* 632 + * We do not verify the AGFL contents against AGF-based index counters here, 633 + * even though we may have access to the perag that contains shadow copies. We 634 + * don't know if the AGF based counters have been checked, and if they have they 635 + * still may be inconsistent because they haven't yet been reset on the first 636 + * allocation after the AGF has been read in. 637 + * 638 + * This means we can only check that all agfl entries contain valid or null 639 + * values because we can't reliably determine the active range to exclude 640 + * NULLAGBNO as a valid value. 641 + * 642 + * However, we can't even do that for v4 format filesystems because there are 643 + * old versions of mkfs out there that does not initialise the AGFL to known, 644 + * verifiable values. HEnce we can't tell the difference between a AGFL block 645 + * allocated by mkfs and a corrupted AGFL block here on v4 filesystems. 646 + * 647 + * As a result, we can only fully validate AGFL block numbers when we pull them 648 + * from the freelist in xfs_alloc_get_freelist(). 649 + */ 631 650 static xfs_failaddr_t 632 651 xfs_agfl_verify( 633 652 struct xfs_buf *bp) ··· 656 637 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp); 657 638 int i; 658 639 659 - /* 660 - * There is no verification of non-crc AGFLs because mkfs does not 661 - * initialise the AGFL to zero or NULL. Hence the only valid part of the 662 - * AGFL is what the AGF says is active. We can't get to the AGF, so we 663 - * can't verify just those entries are valid. 664 - */ 665 640 if (!xfs_has_crc(mp)) 666 641 return NULL; 667 642 ··· 2334 2321 } 2335 2322 2336 2323 /* 2337 - * Check the agfl fields of the agf for inconsistency or corruption. The purpose 2338 - * is to detect an agfl header padding mismatch between current and early v5 2339 - * kernels. This problem manifests as a 1-slot size difference between the 2340 - * on-disk flcount and the active [first, last] range of a wrapped agfl. This 2341 - * may also catch variants of agfl count corruption unrelated to padding. Either 2342 - * way, we'll reset the agfl and warn the user. 2324 + * Check the agfl fields of the agf for inconsistency or corruption. 2325 + * 2326 + * The original purpose was to detect an agfl header padding mismatch between 2327 + * current and early v5 kernels. This problem manifests as a 1-slot size 2328 + * difference between the on-disk flcount and the active [first, last] range of 2329 + * a wrapped agfl. 2330 + * 2331 + * However, we need to use these same checks to catch agfl count corruptions 2332 + * unrelated to padding. This could occur on any v4 or v5 filesystem, so either 2333 + * way, we need to reset the agfl and warn the user. 2343 2334 * 2344 2335 * Return true if a reset is required before the agfl can be used, false 2345 2336 * otherwise. ··· 2358 2341 uint32_t c = be32_to_cpu(agf->agf_flcount); 2359 2342 int agfl_size = xfs_agfl_size(mp); 2360 2343 int active; 2361 - 2362 - /* no agfl header on v4 supers */ 2363 - if (!xfs_has_crc(mp)) 2364 - return false; 2365 2344 2366 2345 /* 2367 2346 * The agf read verifier catches severe corruption of these fields. ··· 2431 2418 * the real allocation can proceed. Deferring the free disconnects freeing up 2432 2419 * the AGFL slot from freeing the block. 2433 2420 */ 2434 - STATIC void 2421 + static int 2435 2422 xfs_defer_agfl_block( 2436 2423 struct xfs_trans *tp, 2437 2424 xfs_agnumber_t agno, ··· 2450 2437 xefi->xefi_blockcount = 1; 2451 2438 xefi->xefi_owner = oinfo->oi_owner; 2452 2439 2440 + if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, xefi->xefi_startblock))) 2441 + return -EFSCORRUPTED; 2442 + 2453 2443 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1); 2454 2444 2455 2445 xfs_extent_free_get_group(mp, xefi); 2456 2446 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &xefi->xefi_list); 2447 + return 0; 2457 2448 } 2458 2449 2459 2450 /* 2460 2451 * Add the extent to the list of extents to be free at transaction end. 2461 2452 * The list is maintained sorted (by block number). 2462 2453 */ 2463 - void 2454 + int 2464 2455 __xfs_free_extent_later( 2465 2456 struct xfs_trans *tp, 2466 2457 xfs_fsblock_t bno, ··· 2491 2474 #endif 2492 2475 ASSERT(xfs_extfree_item_cache != NULL); 2493 2476 2477 + if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len))) 2478 + return -EFSCORRUPTED; 2479 + 2494 2480 xefi = kmem_cache_zalloc(xfs_extfree_item_cache, 2495 2481 GFP_KERNEL | __GFP_NOFAIL); 2496 2482 xefi->xefi_startblock = bno; ··· 2517 2497 2518 2498 xfs_extent_free_get_group(mp, xefi); 2519 2499 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &xefi->xefi_list); 2500 + return 0; 2520 2501 } 2521 2502 2522 2503 #ifdef DEBUG ··· 2678 2657 goto out_agbp_relse; 2679 2658 2680 2659 /* defer agfl frees */ 2681 - xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo); 2660 + error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo); 2661 + if (error) 2662 + goto out_agbp_relse; 2682 2663 } 2683 2664 2684 2665 targs.tp = tp; ··· 2790 2767 */ 2791 2768 agfl_bno = xfs_buf_to_agfl_bno(agflbp); 2792 2769 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]); 2770 + if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno))) 2771 + return -EFSCORRUPTED; 2772 + 2793 2773 be32_add_cpu(&agf->agf_flfirst, 1); 2794 2774 xfs_trans_brelse(tp, agflbp); 2795 2775 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp)) ··· 2915 2889 return 0; 2916 2890 } 2917 2891 2892 + /* 2893 + * Verify the AGF is consistent. 2894 + * 2895 + * We do not verify the AGFL indexes in the AGF are fully consistent here 2896 + * because of issues with variable on-disk structure sizes. Instead, we check 2897 + * the agfl indexes for consistency when we initialise the perag from the AGF 2898 + * information after a read completes. 2899 + * 2900 + * If the index is inconsistent, then we mark the perag as needing an AGFL 2901 + * reset. The first AGFL update performed then resets the AGFL indexes and 2902 + * refills the AGFL with known good free blocks, allowing the filesystem to 2903 + * continue operating normally at the cost of a few leaked free space blocks. 2904 + */ 2918 2905 static xfs_failaddr_t 2919 2906 xfs_agf_verify( 2920 2907 struct xfs_buf *bp) ··· 3001 2962 return __this_address; 3002 2963 3003 2964 return NULL; 3004 - 3005 2965 } 3006 2966 3007 2967 static void ··· 3225 3187 */ 3226 3188 static int 3227 3189 xfs_alloc_vextent_prepare_ag( 3228 - struct xfs_alloc_arg *args) 3190 + struct xfs_alloc_arg *args, 3191 + uint32_t flags) 3229 3192 { 3230 3193 bool need_pag = !args->pag; 3231 3194 int error; ··· 3235 3196 args->pag = xfs_perag_get(args->mp, args->agno); 3236 3197 3237 3198 args->agbp = NULL; 3238 - error = xfs_alloc_fix_freelist(args, 0); 3199 + error = xfs_alloc_fix_freelist(args, flags); 3239 3200 if (error) { 3240 3201 trace_xfs_alloc_vextent_nofix(args); 3241 3202 if (need_pag) ··· 3375 3336 return error; 3376 3337 } 3377 3338 3378 - error = xfs_alloc_vextent_prepare_ag(args); 3339 + error = xfs_alloc_vextent_prepare_ag(args, 0); 3379 3340 if (!error && args->agbp) 3380 3341 error = xfs_alloc_ag_vextent_size(args); 3381 3342 ··· 3419 3380 for_each_perag_wrap_range(mp, start_agno, restart_agno, 3420 3381 mp->m_sb.sb_agcount, agno, args->pag) { 3421 3382 args->agno = agno; 3422 - error = xfs_alloc_vextent_prepare_ag(args); 3383 + error = xfs_alloc_vextent_prepare_ag(args, flags); 3423 3384 if (error) 3424 3385 break; 3425 3386 if (!args->agbp) { ··· 3585 3546 return error; 3586 3547 } 3587 3548 3588 - error = xfs_alloc_vextent_prepare_ag(args); 3549 + error = xfs_alloc_vextent_prepare_ag(args, 0); 3589 3550 if (!error && args->agbp) 3590 3551 error = xfs_alloc_ag_vextent_exact(args); 3591 3552 ··· 3626 3587 if (needs_perag) 3627 3588 args->pag = xfs_perag_grab(mp, args->agno); 3628 3589 3629 - error = xfs_alloc_vextent_prepare_ag(args); 3590 + error = xfs_alloc_vextent_prepare_ag(args, 0); 3630 3591 if (!error && args->agbp) 3631 3592 error = xfs_alloc_ag_vextent_near(args); 3632 3593
+3 -3
fs/xfs/libxfs/xfs_alloc.h
··· 230 230 return bp->b_addr; 231 231 } 232 232 233 - void __xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno, 233 + int __xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno, 234 234 xfs_filblks_t len, const struct xfs_owner_info *oinfo, 235 235 bool skip_discard); 236 236 ··· 254 254 #define XFS_EFI_ATTR_FORK (1U << 1) /* freeing attr fork block */ 255 255 #define XFS_EFI_BMBT_BLOCK (1U << 2) /* freeing bmap btree block */ 256 256 257 - static inline void 257 + static inline int 258 258 xfs_free_extent_later( 259 259 struct xfs_trans *tp, 260 260 xfs_fsblock_t bno, 261 261 xfs_filblks_t len, 262 262 const struct xfs_owner_info *oinfo) 263 263 { 264 - __xfs_free_extent_later(tp, bno, len, oinfo, false); 264 + return __xfs_free_extent_later(tp, bno, len, oinfo, false); 265 265 } 266 266 267 267
+8 -2
fs/xfs/libxfs/xfs_bmap.c
··· 572 572 cblock = XFS_BUF_TO_BLOCK(cbp); 573 573 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 574 574 return error; 575 + 575 576 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 576 - xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo); 577 + error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo); 578 + if (error) 579 + return error; 580 + 577 581 ip->i_nblocks--; 578 582 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 579 583 xfs_trans_binval(tp, cbp); ··· 5234 5230 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5235 5231 xfs_refcount_decrease_extent(tp, del); 5236 5232 } else { 5237 - __xfs_free_extent_later(tp, del->br_startblock, 5233 + error = __xfs_free_extent_later(tp, del->br_startblock, 5238 5234 del->br_blockcount, NULL, 5239 5235 (bflags & XFS_BMAPI_NODISCARD) || 5240 5236 del->br_state == XFS_EXT_UNWRITTEN); 5237 + if (error) 5238 + goto done; 5241 5239 } 5242 5240 } 5243 5241
+5 -2
fs/xfs/libxfs/xfs_bmap_btree.c
··· 268 268 struct xfs_trans *tp = cur->bc_tp; 269 269 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); 270 270 struct xfs_owner_info oinfo; 271 + int error; 271 272 272 273 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); 273 - xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo); 274 - ip->i_nblocks--; 274 + error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo); 275 + if (error) 276 + return error; 275 277 278 + ip->i_nblocks--; 276 279 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 277 280 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 278 281 return 0;
+16 -8
fs/xfs/libxfs/xfs_ialloc.c
··· 1834 1834 * might be sparse and only free the regions that are allocated as part of the 1835 1835 * chunk. 1836 1836 */ 1837 - STATIC void 1837 + static int 1838 1838 xfs_difree_inode_chunk( 1839 1839 struct xfs_trans *tp, 1840 1840 xfs_agnumber_t agno, ··· 1851 1851 1852 1852 if (!xfs_inobt_issparse(rec->ir_holemask)) { 1853 1853 /* not sparse, calculate extent info directly */ 1854 - xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, sagbno), 1855 - M_IGEO(mp)->ialloc_blks, 1856 - &XFS_RMAP_OINFO_INODES); 1857 - return; 1854 + return xfs_free_extent_later(tp, 1855 + XFS_AGB_TO_FSB(mp, agno, sagbno), 1856 + M_IGEO(mp)->ialloc_blks, 1857 + &XFS_RMAP_OINFO_INODES); 1858 1858 } 1859 1859 1860 1860 /* holemask is only 16-bits (fits in an unsigned long) */ ··· 1871 1871 XFS_INOBT_HOLEMASK_BITS); 1872 1872 nextbit = startidx + 1; 1873 1873 while (startidx < XFS_INOBT_HOLEMASK_BITS) { 1874 + int error; 1875 + 1874 1876 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, 1875 1877 nextbit); 1876 1878 /* ··· 1898 1896 1899 1897 ASSERT(agbno % mp->m_sb.sb_spino_align == 0); 1900 1898 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); 1901 - xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, agbno), 1902 - contigblk, &XFS_RMAP_OINFO_INODES); 1899 + error = xfs_free_extent_later(tp, 1900 + XFS_AGB_TO_FSB(mp, agno, agbno), 1901 + contigblk, &XFS_RMAP_OINFO_INODES); 1902 + if (error) 1903 + return error; 1903 1904 1904 1905 /* reset range to current bit and carry on... */ 1905 1906 startidx = endidx = nextbit; ··· 1910 1905 next: 1911 1906 nextbit++; 1912 1907 } 1908 + return 0; 1913 1909 } 1914 1910 1915 1911 STATIC int ··· 2009 2003 goto error0; 2010 2004 } 2011 2005 2012 - xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); 2006 + error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); 2007 + if (error) 2008 + goto error0; 2013 2009 } else { 2014 2010 xic->deleted = false; 2015 2011
+8 -1
fs/xfs/libxfs/xfs_log_format.h
··· 324 324 #define XFS_ILOG_DOWNER 0x200 /* change the data fork owner on replay */ 325 325 #define XFS_ILOG_AOWNER 0x400 /* change the attr fork owner on replay */ 326 326 327 - 328 327 /* 329 328 * The timestamps are dirty, but not necessarily anything else in the inode 330 329 * core. Unlike the other fields above this one must never make it to disk ··· 331 332 * ili_fields in the inode_log_item. 332 333 */ 333 334 #define XFS_ILOG_TIMESTAMP 0x4000 335 + 336 + /* 337 + * The version field has been changed, but not necessarily anything else of 338 + * interest. This must never make it to disk - it is used purely to ensure that 339 + * the inode item ->precommit operation can update the fsync flag triggers 340 + * in the inode item correctly. 341 + */ 342 + #define XFS_ILOG_IVERSION 0x8000 334 343 335 344 #define XFS_ILOG_NONCORE (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ 336 345 XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
+10 -3
fs/xfs/libxfs/xfs_refcount.c
··· 1151 1151 fsbno = XFS_AGB_TO_FSB(cur->bc_mp, 1152 1152 cur->bc_ag.pag->pag_agno, 1153 1153 tmp.rc_startblock); 1154 - xfs_free_extent_later(cur->bc_tp, fsbno, 1154 + error = xfs_free_extent_later(cur->bc_tp, fsbno, 1155 1155 tmp.rc_blockcount, NULL); 1156 + if (error) 1157 + goto out_error; 1156 1158 } 1157 1159 1158 1160 (*agbno) += tmp.rc_blockcount; ··· 1212 1210 fsbno = XFS_AGB_TO_FSB(cur->bc_mp, 1213 1211 cur->bc_ag.pag->pag_agno, 1214 1212 ext.rc_startblock); 1215 - xfs_free_extent_later(cur->bc_tp, fsbno, 1213 + error = xfs_free_extent_later(cur->bc_tp, fsbno, 1216 1214 ext.rc_blockcount, NULL); 1215 + if (error) 1216 + goto out_error; 1217 1217 } 1218 1218 1219 1219 skip: ··· 1980 1976 rr->rr_rrec.rc_blockcount); 1981 1977 1982 1978 /* Free the block. */ 1983 - xfs_free_extent_later(tp, fsb, rr->rr_rrec.rc_blockcount, NULL); 1979 + error = xfs_free_extent_later(tp, fsb, 1980 + rr->rr_rrec.rc_blockcount, NULL); 1981 + if (error) 1982 + goto out_trans; 1984 1983 1985 1984 error = xfs_trans_commit(tp); 1986 1985 if (error)
+8 -105
fs/xfs/libxfs/xfs_trans_inode.c
··· 40 40 iip->ili_lock_flags = lock_flags; 41 41 ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); 42 42 43 - /* 44 - * Get a log_item_desc to point at the new item. 45 - */ 43 + /* Reset the per-tx dirty context and add the item to the tx. */ 44 + iip->ili_dirty_flags = 0; 46 45 xfs_trans_add_item(tp, &iip->ili_item); 47 46 } 48 47 ··· 75 76 /* 76 77 * This is called to mark the fields indicated in fieldmask as needing to be 77 78 * logged when the transaction is committed. The inode must already be 78 - * associated with the given transaction. 79 - * 80 - * The values for fieldmask are defined in xfs_inode_item.h. We always log all 81 - * of the core inode if any of it has changed, and we always log all of the 82 - * inline data/extents/b-tree root if any of them has changed. 83 - * 84 - * Grab and pin the cluster buffer associated with this inode to avoid RMW 85 - * cycles at inode writeback time. Avoid the need to add error handling to every 86 - * xfs_trans_log_inode() call by shutting down on read error. This will cause 87 - * transactions to fail and everything to error out, just like if we return a 88 - * read error in a dirty transaction and cancel it. 79 + * associated with the given transaction. All we do here is record where the 80 + * inode was dirtied and mark the transaction and inode log item dirty; 81 + * everything else is done in the ->precommit log item operation after the 82 + * changes in the transaction have been completed. 89 83 */ 90 84 void 91 85 xfs_trans_log_inode( ··· 88 96 { 89 97 struct xfs_inode_log_item *iip = ip->i_itemp; 90 98 struct inode *inode = VFS_I(ip); 91 - uint iversion_flags = 0; 92 99 93 100 ASSERT(iip); 94 101 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 95 102 ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); 96 103 97 104 tp->t_flags |= XFS_TRANS_DIRTY; 98 - 99 - /* 100 - * Don't bother with i_lock for the I_DIRTY_TIME check here, as races 101 - * don't matter - we either will need an extra transaction in 24 hours 102 - * to log the timestamps, or will clear already cleared fields in the 103 - * worst case. 104 - */ 105 - if (inode->i_state & I_DIRTY_TIME) { 106 - spin_lock(&inode->i_lock); 107 - inode->i_state &= ~I_DIRTY_TIME; 108 - spin_unlock(&inode->i_lock); 109 - } 110 105 111 106 /* 112 107 * First time we log the inode in a transaction, bump the inode change ··· 107 128 if (!test_and_set_bit(XFS_LI_DIRTY, &iip->ili_item.li_flags)) { 108 129 if (IS_I_VERSION(inode) && 109 130 inode_maybe_inc_iversion(inode, flags & XFS_ILOG_CORE)) 110 - iversion_flags = XFS_ILOG_CORE; 131 + flags |= XFS_ILOG_IVERSION; 111 132 } 112 133 113 - /* 114 - * If we're updating the inode core or the timestamps and it's possible 115 - * to upgrade this inode to bigtime format, do so now. 116 - */ 117 - if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) && 118 - xfs_has_bigtime(ip->i_mount) && 119 - !xfs_inode_has_bigtime(ip)) { 120 - ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME; 121 - flags |= XFS_ILOG_CORE; 122 - } 123 - 124 - /* 125 - * Inode verifiers do not check that the extent size hint is an integer 126 - * multiple of the rt extent size on a directory with both rtinherit 127 - * and extszinherit flags set. If we're logging a directory that is 128 - * misconfigured in this way, clear the hint. 129 - */ 130 - if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && 131 - (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) && 132 - (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) { 133 - ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | 134 - XFS_DIFLAG_EXTSZINHERIT); 135 - ip->i_extsize = 0; 136 - flags |= XFS_ILOG_CORE; 137 - } 138 - 139 - /* 140 - * Record the specific change for fdatasync optimisation. This allows 141 - * fdatasync to skip log forces for inodes that are only timestamp 142 - * dirty. 143 - */ 144 - spin_lock(&iip->ili_lock); 145 - iip->ili_fsync_fields |= flags; 146 - 147 - if (!iip->ili_item.li_buf) { 148 - struct xfs_buf *bp; 149 - int error; 150 - 151 - /* 152 - * We hold the ILOCK here, so this inode is not going to be 153 - * flushed while we are here. Further, because there is no 154 - * buffer attached to the item, we know that there is no IO in 155 - * progress, so nothing will clear the ili_fields while we read 156 - * in the buffer. Hence we can safely drop the spin lock and 157 - * read the buffer knowing that the state will not change from 158 - * here. 159 - */ 160 - spin_unlock(&iip->ili_lock); 161 - error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp); 162 - if (error) { 163 - xfs_force_shutdown(ip->i_mount, SHUTDOWN_META_IO_ERROR); 164 - return; 165 - } 166 - 167 - /* 168 - * We need an explicit buffer reference for the log item but 169 - * don't want the buffer to remain attached to the transaction. 170 - * Hold the buffer but release the transaction reference once 171 - * we've attached the inode log item to the buffer log item 172 - * list. 173 - */ 174 - xfs_buf_hold(bp); 175 - spin_lock(&iip->ili_lock); 176 - iip->ili_item.li_buf = bp; 177 - bp->b_flags |= _XBF_INODES; 178 - list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list); 179 - xfs_trans_brelse(tp, bp); 180 - } 181 - 182 - /* 183 - * Always OR in the bits from the ili_last_fields field. This is to 184 - * coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines 185 - * in the eventual clearing of the ili_fields bits. See the big comment 186 - * in xfs_iflush() for an explanation of this coordination mechanism. 187 - */ 188 - iip->ili_fields |= (flags | iip->ili_last_fields | iversion_flags); 189 - spin_unlock(&iip->ili_lock); 134 + iip->ili_dirty_flags |= flags; 190 135 } 191 136 192 137 int
+13 -12
fs/xfs/scrub/bmap.c
··· 769 769 * mapping or false if there are no more mappings. Caller must ensure that 770 770 * @info.icur is zeroed before the first call. 771 771 */ 772 - static int 772 + static bool 773 773 xchk_bmap_iext_iter( 774 774 struct xchk_bmap_info *info, 775 775 struct xfs_bmbt_irec *irec) 776 776 { 777 777 struct xfs_bmbt_irec got; 778 778 struct xfs_ifork *ifp; 779 - xfs_filblks_t prev_len; 779 + unsigned int nr = 0; 780 780 781 781 ifp = xfs_ifork_ptr(info->sc->ip, info->whichfork); 782 782 ··· 790 790 irec->br_startoff); 791 791 return false; 792 792 } 793 + nr++; 793 794 794 795 /* 795 796 * Iterate subsequent iextent records and merge them with the one 796 797 * that we just read, if possible. 797 798 */ 798 - prev_len = irec->br_blockcount; 799 799 while (xfs_iext_peek_next_extent(ifp, &info->icur, &got)) { 800 800 if (!xchk_are_bmaps_contiguous(irec, &got)) 801 801 break; ··· 805 805 got.br_startoff); 806 806 return false; 807 807 } 808 - 809 - /* 810 - * Notify the user of mergeable records in the data or attr 811 - * forks. CoW forks only exist in memory so we ignore them. 812 - */ 813 - if (info->whichfork != XFS_COW_FORK && 814 - prev_len + got.br_blockcount > BMBT_BLOCKCOUNT_MASK) 815 - xchk_ino_set_preen(info->sc, info->sc->ip->i_ino); 808 + nr++; 816 809 817 810 irec->br_blockcount += got.br_blockcount; 818 - prev_len = got.br_blockcount; 819 811 xfs_iext_next(ifp, &info->icur); 820 812 } 813 + 814 + /* 815 + * If the merged mapping could be expressed with fewer bmbt records 816 + * than we actually found, notify the user that this fork could be 817 + * optimized. CoW forks only exist in memory so we ignore them. 818 + */ 819 + if (nr > 1 && info->whichfork != XFS_COW_FORK && 820 + howmany_64(irec->br_blockcount, XFS_MAX_BMBT_EXTLEN) < nr) 821 + xchk_ino_set_preen(info->sc, info->sc->ip->i_ino); 821 822 822 823 return true; 823 824 }
+4 -4
fs/xfs/scrub/scrub.h
··· 105 105 }; 106 106 107 107 /* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */ 108 - #define XCHK_TRY_HARDER (1 << 0) /* can't get resources, try again */ 109 - #define XCHK_FSGATES_DRAIN (1 << 2) /* defer ops draining enabled */ 110 - #define XCHK_NEED_DRAIN (1 << 3) /* scrub needs to drain defer ops */ 111 - #define XREP_ALREADY_FIXED (1 << 31) /* checking our repair work */ 108 + #define XCHK_TRY_HARDER (1U << 0) /* can't get resources, try again */ 109 + #define XCHK_FSGATES_DRAIN (1U << 2) /* defer ops draining enabled */ 110 + #define XCHK_NEED_DRAIN (1U << 3) /* scrub needs to drain defer ops */ 111 + #define XREP_ALREADY_FIXED (1U << 31) /* checking our repair work */ 112 112 113 113 /* 114 114 * The XCHK_FSGATES* flags reflect functionality in the main filesystem that
+65 -23
fs/xfs/xfs_buf_item.c
··· 452 452 * This is called to pin the buffer associated with the buf log item in memory 453 453 * so it cannot be written out. 454 454 * 455 - * We also always take a reference to the buffer log item here so that the bli 456 - * is held while the item is pinned in memory. This means that we can 457 - * unconditionally drop the reference count a transaction holds when the 458 - * transaction is completed. 455 + * We take a reference to the buffer log item here so that the BLI life cycle 456 + * extends at least until the buffer is unpinned via xfs_buf_item_unpin() and 457 + * inserted into the AIL. 458 + * 459 + * We also need to take a reference to the buffer itself as the BLI unpin 460 + * processing requires accessing the buffer after the BLI has dropped the final 461 + * BLI reference. See xfs_buf_item_unpin() for an explanation. 462 + * If unpins race to drop the final BLI reference and only the 463 + * BLI owns a reference to the buffer, then the loser of the race can have the 464 + * buffer fgreed from under it (e.g. on shutdown). Taking a buffer reference per 465 + * pin count ensures the life cycle of the buffer extends for as 466 + * long as we hold the buffer pin reference in xfs_buf_item_unpin(). 459 467 */ 460 468 STATIC void 461 469 xfs_buf_item_pin( ··· 478 470 479 471 trace_xfs_buf_item_pin(bip); 480 472 473 + xfs_buf_hold(bip->bli_buf); 481 474 atomic_inc(&bip->bli_refcount); 482 475 atomic_inc(&bip->bli_buf->b_pin_count); 483 476 } 484 477 485 478 /* 486 - * This is called to unpin the buffer associated with the buf log item which 487 - * was previously pinned with a call to xfs_buf_item_pin(). 479 + * This is called to unpin the buffer associated with the buf log item which was 480 + * previously pinned with a call to xfs_buf_item_pin(). We enter this function 481 + * with a buffer pin count, a buffer reference and a BLI reference. 482 + * 483 + * We must drop the BLI reference before we unpin the buffer because the AIL 484 + * doesn't acquire a BLI reference whenever it accesses it. Therefore if the 485 + * refcount drops to zero, the bli could still be AIL resident and the buffer 486 + * submitted for I/O at any point before we return. This can result in IO 487 + * completion freeing the buffer while we are still trying to access it here. 488 + * This race condition can also occur in shutdown situations where we abort and 489 + * unpin buffers from contexts other that journal IO completion. 490 + * 491 + * Hence we have to hold a buffer reference per pin count to ensure that the 492 + * buffer cannot be freed until we have finished processing the unpin operation. 493 + * The reference is taken in xfs_buf_item_pin(), and we must hold it until we 494 + * are done processing the buffer state. In the case of an abort (remove = 495 + * true) then we re-use the current pin reference as the IO reference we hand 496 + * off to IO failure handling. 488 497 */ 489 498 STATIC void 490 499 xfs_buf_item_unpin( ··· 518 493 519 494 trace_xfs_buf_item_unpin(bip); 520 495 521 - /* 522 - * Drop the bli ref associated with the pin and grab the hold required 523 - * for the I/O simulation failure in the abort case. We have to do this 524 - * before the pin count drops because the AIL doesn't acquire a bli 525 - * reference. Therefore if the refcount drops to zero, the bli could 526 - * still be AIL resident and the buffer submitted for I/O (and freed on 527 - * completion) at any point before we return. This can be removed once 528 - * the AIL properly holds a reference on the bli. 529 - */ 530 496 freed = atomic_dec_and_test(&bip->bli_refcount); 531 - if (freed && !stale && remove) 532 - xfs_buf_hold(bp); 533 497 if (atomic_dec_and_test(&bp->b_pin_count)) 534 498 wake_up_all(&bp->b_waiters); 535 499 536 - /* nothing to do but drop the pin count if the bli is active */ 537 - if (!freed) 500 + /* 501 + * Nothing to do but drop the buffer pin reference if the BLI is 502 + * still active. 503 + */ 504 + if (!freed) { 505 + xfs_buf_rele(bp); 538 506 return; 507 + } 539 508 540 509 if (stale) { 541 510 ASSERT(bip->bli_flags & XFS_BLI_STALE); ··· 540 521 ASSERT(!bp->b_transp); 541 522 542 523 trace_xfs_buf_item_unpin_stale(bip); 524 + 525 + /* 526 + * The buffer has been locked and referenced since it was marked 527 + * stale so we own both lock and reference exclusively here. We 528 + * do not need the pin reference any more, so drop it now so 529 + * that we only have one reference to drop once item completion 530 + * processing is complete. 531 + */ 532 + xfs_buf_rele(bp); 543 533 544 534 /* 545 535 * If we get called here because of an IO error, we may or may ··· 566 538 ASSERT(bp->b_log_item == NULL); 567 539 } 568 540 xfs_buf_relse(bp); 569 - } else if (remove) { 541 + return; 542 + } 543 + 544 + if (remove) { 570 545 /* 571 - * The buffer must be locked and held by the caller to simulate 572 - * an async I/O failure. We acquired the hold for this case 573 - * before the buffer was unpinned. 546 + * We need to simulate an async IO failures here to ensure that 547 + * the correct error completion is run on this buffer. This 548 + * requires a reference to the buffer and for the buffer to be 549 + * locked. We can safely pass ownership of the pin reference to 550 + * the IO to ensure that nothing can free the buffer while we 551 + * wait for the lock and then run the IO failure completion. 574 552 */ 575 553 xfs_buf_lock(bp); 576 554 bp->b_flags |= XBF_ASYNC; 577 555 xfs_buf_ioend_fail(bp); 556 + return; 578 557 } 558 + 559 + /* 560 + * BLI has no more active references - it will be moved to the AIL to 561 + * manage the remaining BLI/buffer life cycle. There is nothing left for 562 + * us to do here so drop the pin reference to the buffer. 563 + */ 564 + xfs_buf_rele(bp); 579 565 } 580 566 581 567 STATIC uint
-1
fs/xfs/xfs_filestream.c
··· 78 78 *longest = 0; 79 79 err = xfs_bmap_longest_free_extent(pag, NULL, longest); 80 80 if (err) { 81 - xfs_perag_rele(pag); 82 81 if (err != -EAGAIN) 83 82 break; 84 83 /* Couldn't lock the AGF, skip this AG. */
+37 -9
fs/xfs/xfs_icache.c
··· 454 454 return ret; 455 455 } 456 456 457 + /* Wait for all queued work and collect errors */ 458 + static int 459 + xfs_inodegc_wait_all( 460 + struct xfs_mount *mp) 461 + { 462 + int cpu; 463 + int error = 0; 464 + 465 + flush_workqueue(mp->m_inodegc_wq); 466 + for_each_online_cpu(cpu) { 467 + struct xfs_inodegc *gc; 468 + 469 + gc = per_cpu_ptr(mp->m_inodegc, cpu); 470 + if (gc->error && !error) 471 + error = gc->error; 472 + gc->error = 0; 473 + } 474 + 475 + return error; 476 + } 477 + 457 478 /* 458 479 * Check the validity of the inode we just found it the cache 459 480 */ ··· 1512 1491 if (error) 1513 1492 return error; 1514 1493 1515 - xfs_inodegc_flush(mp); 1516 - return 0; 1494 + return xfs_inodegc_flush(mp); 1517 1495 } 1518 1496 1519 1497 /* 1520 1498 * Reclaim all the free space that we can by scheduling the background blockgc 1521 1499 * and inodegc workers immediately and waiting for them all to clear. 1522 1500 */ 1523 - void 1501 + int 1524 1502 xfs_blockgc_flush_all( 1525 1503 struct xfs_mount *mp) 1526 1504 { ··· 1540 1520 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) 1541 1521 flush_delayed_work(&pag->pag_blockgc_work); 1542 1522 1543 - xfs_inodegc_flush(mp); 1523 + return xfs_inodegc_flush(mp); 1544 1524 } 1545 1525 1546 1526 /* ··· 1862 1842 * This is the last chance to make changes to an otherwise unreferenced file 1863 1843 * before incore reclamation happens. 1864 1844 */ 1865 - static void 1845 + static int 1866 1846 xfs_inodegc_inactivate( 1867 1847 struct xfs_inode *ip) 1868 1848 { 1849 + int error; 1850 + 1869 1851 trace_xfs_inode_inactivating(ip); 1870 - xfs_inactive(ip); 1852 + error = xfs_inactive(ip); 1871 1853 xfs_inodegc_set_reclaimable(ip); 1854 + return error; 1855 + 1872 1856 } 1873 1857 1874 1858 void ··· 1904 1880 1905 1881 WRITE_ONCE(gc->shrinker_hits, 0); 1906 1882 llist_for_each_entry_safe(ip, n, node, i_gclist) { 1883 + int error; 1884 + 1907 1885 xfs_iflags_set(ip, XFS_INACTIVATING); 1908 - xfs_inodegc_inactivate(ip); 1886 + error = xfs_inodegc_inactivate(ip); 1887 + if (error && !gc->error) 1888 + gc->error = error; 1909 1889 } 1910 1890 1911 1891 memalloc_nofs_restore(nofs_flag); ··· 1933 1905 * Force all currently queued inode inactivation work to run immediately and 1934 1906 * wait for the work to finish. 1935 1907 */ 1936 - void 1908 + int 1937 1909 xfs_inodegc_flush( 1938 1910 struct xfs_mount *mp) 1939 1911 { 1940 1912 xfs_inodegc_push(mp); 1941 1913 trace_xfs_inodegc_flush(mp, __return_address); 1942 - flush_workqueue(mp->m_inodegc_wq); 1914 + return xfs_inodegc_wait_all(mp); 1943 1915 } 1944 1916 1945 1917 /*
+2 -2
fs/xfs/xfs_icache.h
··· 62 62 unsigned int iwalk_flags); 63 63 int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags); 64 64 int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm); 65 - void xfs_blockgc_flush_all(struct xfs_mount *mp); 65 + int xfs_blockgc_flush_all(struct xfs_mount *mp); 66 66 67 67 void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip); 68 68 void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); ··· 80 80 81 81 void xfs_inodegc_worker(struct work_struct *work); 82 82 void xfs_inodegc_push(struct xfs_mount *mp); 83 - void xfs_inodegc_flush(struct xfs_mount *mp); 83 + int xfs_inodegc_flush(struct xfs_mount *mp); 84 84 void xfs_inodegc_stop(struct xfs_mount *mp); 85 85 void xfs_inodegc_start(struct xfs_mount *mp); 86 86 void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu);
+6 -14
fs/xfs/xfs_inode.c
··· 1620 1620 */ 1621 1621 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); 1622 1622 1623 - /* 1624 - * Just ignore errors at this point. There is nothing we can do except 1625 - * to try to keep going. Make sure it's not a silent error. 1626 - */ 1627 - error = xfs_trans_commit(tp); 1628 - if (error) 1629 - xfs_notice(mp, "%s: xfs_trans_commit returned error %d", 1630 - __func__, error); 1631 - 1632 - return 0; 1623 + return xfs_trans_commit(tp); 1633 1624 } 1634 1625 1635 1626 /* ··· 1684 1693 * now be truncated. Also, we clear all of the read-ahead state 1685 1694 * kept for the inode here since the file is now closed. 1686 1695 */ 1687 - void 1696 + int 1688 1697 xfs_inactive( 1689 1698 xfs_inode_t *ip) 1690 1699 { 1691 1700 struct xfs_mount *mp; 1692 - int error; 1701 + int error = 0; 1693 1702 int truncate = 0; 1694 1703 1695 1704 /* ··· 1727 1736 * reference to the inode at this point anyways. 1728 1737 */ 1729 1738 if (xfs_can_free_eofblocks(ip, true)) 1730 - xfs_free_eofblocks(ip); 1739 + error = xfs_free_eofblocks(ip); 1731 1740 1732 1741 goto out; 1733 1742 } ··· 1764 1773 /* 1765 1774 * Free the inode. 1766 1775 */ 1767 - xfs_inactive_ifree(ip); 1776 + error = xfs_inactive_ifree(ip); 1768 1777 1769 1778 out: 1770 1779 /* ··· 1772 1781 * the attached dquots. 1773 1782 */ 1774 1783 xfs_qm_dqdetach(ip); 1784 + return error; 1775 1785 } 1776 1786 1777 1787 /*
+1 -1
fs/xfs/xfs_inode.h
··· 470 470 (xfs_has_grpid((pip)->i_mount) || (VFS_I(pip)->i_mode & S_ISGID)) 471 471 472 472 int xfs_release(struct xfs_inode *ip); 473 - void xfs_inactive(struct xfs_inode *ip); 473 + int xfs_inactive(struct xfs_inode *ip); 474 474 int xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name, 475 475 struct xfs_inode **ipp, struct xfs_name *ci_name); 476 476 int xfs_create(struct mnt_idmap *idmap,
+149
fs/xfs/xfs_inode_item.c
··· 29 29 return container_of(lip, struct xfs_inode_log_item, ili_item); 30 30 } 31 31 32 + static uint64_t 33 + xfs_inode_item_sort( 34 + struct xfs_log_item *lip) 35 + { 36 + return INODE_ITEM(lip)->ili_inode->i_ino; 37 + } 38 + 39 + /* 40 + * Prior to finally logging the inode, we have to ensure that all the 41 + * per-modification inode state changes are applied. This includes VFS inode 42 + * state updates, format conversions, verifier state synchronisation and 43 + * ensuring the inode buffer remains in memory whilst the inode is dirty. 44 + * 45 + * We have to be careful when we grab the inode cluster buffer due to lock 46 + * ordering constraints. The unlinked inode modifications (xfs_iunlink_item) 47 + * require AGI -> inode cluster buffer lock order. The inode cluster buffer is 48 + * not locked until ->precommit, so it happens after everything else has been 49 + * modified. 50 + * 51 + * Further, we have AGI -> AGF lock ordering, and with O_TMPFILE handling we 52 + * have AGI -> AGF -> iunlink item -> inode cluster buffer lock order. Hence we 53 + * cannot safely lock the inode cluster buffer in xfs_trans_log_inode() because 54 + * it can be called on a inode (e.g. via bumplink/droplink) before we take the 55 + * AGF lock modifying directory blocks. 56 + * 57 + * Rather than force a complete rework of all the transactions to call 58 + * xfs_trans_log_inode() once and once only at the end of every transaction, we 59 + * move the pinning of the inode cluster buffer to a ->precommit operation. This 60 + * matches how the xfs_iunlink_item locks the inode cluster buffer, and it 61 + * ensures that the inode cluster buffer locking is always done last in a 62 + * transaction. i.e. we ensure the lock order is always AGI -> AGF -> inode 63 + * cluster buffer. 64 + * 65 + * If we return the inode number as the precommit sort key then we'll also 66 + * guarantee that the order all inode cluster buffer locking is the same all the 67 + * inodes and unlink items in the transaction. 68 + */ 69 + static int 70 + xfs_inode_item_precommit( 71 + struct xfs_trans *tp, 72 + struct xfs_log_item *lip) 73 + { 74 + struct xfs_inode_log_item *iip = INODE_ITEM(lip); 75 + struct xfs_inode *ip = iip->ili_inode; 76 + struct inode *inode = VFS_I(ip); 77 + unsigned int flags = iip->ili_dirty_flags; 78 + 79 + /* 80 + * Don't bother with i_lock for the I_DIRTY_TIME check here, as races 81 + * don't matter - we either will need an extra transaction in 24 hours 82 + * to log the timestamps, or will clear already cleared fields in the 83 + * worst case. 84 + */ 85 + if (inode->i_state & I_DIRTY_TIME) { 86 + spin_lock(&inode->i_lock); 87 + inode->i_state &= ~I_DIRTY_TIME; 88 + spin_unlock(&inode->i_lock); 89 + } 90 + 91 + /* 92 + * If we're updating the inode core or the timestamps and it's possible 93 + * to upgrade this inode to bigtime format, do so now. 94 + */ 95 + if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) && 96 + xfs_has_bigtime(ip->i_mount) && 97 + !xfs_inode_has_bigtime(ip)) { 98 + ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME; 99 + flags |= XFS_ILOG_CORE; 100 + } 101 + 102 + /* 103 + * Inode verifiers do not check that the extent size hint is an integer 104 + * multiple of the rt extent size on a directory with both rtinherit 105 + * and extszinherit flags set. If we're logging a directory that is 106 + * misconfigured in this way, clear the hint. 107 + */ 108 + if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && 109 + (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) && 110 + (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) { 111 + ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | 112 + XFS_DIFLAG_EXTSZINHERIT); 113 + ip->i_extsize = 0; 114 + flags |= XFS_ILOG_CORE; 115 + } 116 + 117 + /* 118 + * Record the specific change for fdatasync optimisation. This allows 119 + * fdatasync to skip log forces for inodes that are only timestamp 120 + * dirty. Once we've processed the XFS_ILOG_IVERSION flag, convert it 121 + * to XFS_ILOG_CORE so that the actual on-disk dirty tracking 122 + * (ili_fields) correctly tracks that the version has changed. 123 + */ 124 + spin_lock(&iip->ili_lock); 125 + iip->ili_fsync_fields |= (flags & ~XFS_ILOG_IVERSION); 126 + if (flags & XFS_ILOG_IVERSION) 127 + flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE); 128 + 129 + if (!iip->ili_item.li_buf) { 130 + struct xfs_buf *bp; 131 + int error; 132 + 133 + /* 134 + * We hold the ILOCK here, so this inode is not going to be 135 + * flushed while we are here. Further, because there is no 136 + * buffer attached to the item, we know that there is no IO in 137 + * progress, so nothing will clear the ili_fields while we read 138 + * in the buffer. Hence we can safely drop the spin lock and 139 + * read the buffer knowing that the state will not change from 140 + * here. 141 + */ 142 + spin_unlock(&iip->ili_lock); 143 + error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp); 144 + if (error) 145 + return error; 146 + 147 + /* 148 + * We need an explicit buffer reference for the log item but 149 + * don't want the buffer to remain attached to the transaction. 150 + * Hold the buffer but release the transaction reference once 151 + * we've attached the inode log item to the buffer log item 152 + * list. 153 + */ 154 + xfs_buf_hold(bp); 155 + spin_lock(&iip->ili_lock); 156 + iip->ili_item.li_buf = bp; 157 + bp->b_flags |= _XBF_INODES; 158 + list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list); 159 + xfs_trans_brelse(tp, bp); 160 + } 161 + 162 + /* 163 + * Always OR in the bits from the ili_last_fields field. This is to 164 + * coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines 165 + * in the eventual clearing of the ili_fields bits. See the big comment 166 + * in xfs_iflush() for an explanation of this coordination mechanism. 167 + */ 168 + iip->ili_fields |= (flags | iip->ili_last_fields); 169 + spin_unlock(&iip->ili_lock); 170 + 171 + /* 172 + * We are done with the log item transaction dirty state, so clear it so 173 + * that it doesn't pollute future transactions. 174 + */ 175 + iip->ili_dirty_flags = 0; 176 + return 0; 177 + } 178 + 32 179 /* 33 180 * The logged size of an inode fork is always the current size of the inode 34 181 * fork. This means that when an inode fork is relogged, the size of the logged ··· 809 662 } 810 663 811 664 static const struct xfs_item_ops xfs_inode_item_ops = { 665 + .iop_sort = xfs_inode_item_sort, 666 + .iop_precommit = xfs_inode_item_precommit, 812 667 .iop_size = xfs_inode_item_size, 813 668 .iop_format = xfs_inode_item_format, 814 669 .iop_pin = xfs_inode_item_pin,
+1
fs/xfs/xfs_inode_item.h
··· 17 17 struct xfs_log_item ili_item; /* common portion */ 18 18 struct xfs_inode *ili_inode; /* inode ptr */ 19 19 unsigned short ili_lock_flags; /* inode lock flags */ 20 + unsigned int ili_dirty_flags; /* dirty in current tx */ 20 21 /* 21 22 * The ili_lock protects the interactions between the dirty state and 22 23 * the flush state of the inode log item. This allows us to do atomic
+9 -10
fs/xfs/xfs_log_recover.c
··· 2711 2711 * just to flush the inodegc queue and wait for it to 2712 2712 * complete. 2713 2713 */ 2714 - xfs_inodegc_flush(mp); 2714 + error = xfs_inodegc_flush(mp); 2715 + if (error) 2716 + break; 2715 2717 } 2716 2718 2717 2719 prev_agino = agino; ··· 2721 2719 } 2722 2720 2723 2721 if (prev_ip) { 2722 + int error2; 2723 + 2724 2724 ip->i_prev_unlinked = prev_agino; 2725 2725 xfs_irele(prev_ip); 2726 + 2727 + error2 = xfs_inodegc_flush(mp); 2728 + if (error2 && !error) 2729 + return error2; 2726 2730 } 2727 - xfs_inodegc_flush(mp); 2728 2731 return error; 2729 2732 } 2730 2733 ··· 2796 2789 * bucket and remaining inodes on it unreferenced and 2797 2790 * unfreeable. 2798 2791 */ 2799 - xfs_inodegc_flush(pag->pag_mount); 2800 2792 xlog_recover_clear_agi_bucket(pag, bucket); 2801 2793 } 2802 2794 } ··· 2812 2806 2813 2807 for_each_perag(log->l_mp, agno, pag) 2814 2808 xlog_recover_iunlink_ag(pag); 2815 - 2816 - /* 2817 - * Flush the pending unlinked inodes to ensure that the inactivations 2818 - * are fully completed on disk and the incore inodes can be reclaimed 2819 - * before we signal that recovery is complete. 2820 - */ 2821 - xfs_inodegc_flush(log->l_mp); 2822 2809 } 2823 2810 2824 2811 STATIC void
+1
fs/xfs/xfs_mount.h
··· 62 62 struct xfs_inodegc { 63 63 struct llist_head list; 64 64 struct delayed_work work; 65 + int error; 65 66 66 67 /* approximate count of inodes in the list */ 67 68 unsigned int items;
+3 -1
fs/xfs/xfs_reflink.c
··· 616 616 xfs_refcount_free_cow_extent(*tpp, del.br_startblock, 617 617 del.br_blockcount); 618 618 619 - xfs_free_extent_later(*tpp, del.br_startblock, 619 + error = xfs_free_extent_later(*tpp, del.br_startblock, 620 620 del.br_blockcount, NULL); 621 + if (error) 622 + break; 621 623 622 624 /* Roll the transaction */ 623 625 error = xfs_defer_finish(tpp);
+1
fs/xfs/xfs_super.c
··· 1100 1100 #endif 1101 1101 init_llist_head(&gc->list); 1102 1102 gc->items = 0; 1103 + gc->error = 0; 1103 1104 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); 1104 1105 } 1105 1106 return 0;
+8 -1
fs/xfs/xfs_trans.c
··· 290 290 * Do not perform a synchronous scan because callers can hold 291 291 * other locks. 292 292 */ 293 - xfs_blockgc_flush_all(mp); 293 + error = xfs_blockgc_flush_all(mp); 294 + if (error) 295 + return error; 294 296 want_retry = false; 295 297 goto retry; 296 298 } ··· 970 968 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 971 969 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) { 972 970 error = xfs_defer_finish_noroll(&tp); 971 + if (error) 972 + goto out_unreserve; 973 + 974 + /* Run precommits from final tx in defer chain. */ 975 + error = xfs_trans_run_precommits(tp); 973 976 if (error) 974 977 goto out_unreserve; 975 978 }
+6
include/linux/cper.h
··· 572 572 int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg); 573 573 int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg); 574 574 575 + struct acpi_hest_generic_status; 576 + void cper_estatus_print(const char *pfx, 577 + const struct acpi_hest_generic_status *estatus); 578 + int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); 579 + int cper_estatus_check(const struct acpi_hest_generic_status *estatus); 580 + 575 581 #endif
+2
include/linux/efi.h
··· 1338 1338 return xen_efi_config_table_is_usable(guid, table); 1339 1339 } 1340 1340 1341 + umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n); 1342 + 1341 1343 #endif /* _LINUX_EFI_H */
+1 -1
include/linux/iio/iio-gts-helper.h
··· 135 135 /** 136 136 * iio_gts_find_sel_by_int_time - find selector matching integration time 137 137 * @gts: Gain time scale descriptor 138 - * @gain: HW-gain for which matching selector is searched for 138 + * @time: Integration time for which matching selector is searched for 139 139 * 140 140 * Return: a selector matching given integration time or -EINVAL if 141 141 * selector was not found.
+6 -3
include/linux/netdevice.h
··· 620 620 netdevice_tracker dev_tracker; 621 621 622 622 struct Qdisc __rcu *qdisc; 623 - struct Qdisc *qdisc_sleeping; 623 + struct Qdisc __rcu *qdisc_sleeping; 624 624 #ifdef CONFIG_SYSFS 625 625 struct kobject kobj; 626 626 #endif ··· 768 768 /* We only give a hint, preemption can change CPU under us */ 769 769 val |= raw_smp_processor_id(); 770 770 771 - if (table->ents[index] != val) 772 - table->ents[index] = val; 771 + /* The following WRITE_ONCE() is paired with the READ_ONCE() 772 + * here, and another one in get_rps_cpu(). 773 + */ 774 + if (READ_ONCE(table->ents[index]) != val) 775 + WRITE_ONCE(table->ents[index], val); 773 776 } 774 777 } 775 778
+6
include/linux/page-flags.h
··· 617 617 * Please note that, confusingly, "page_mapping" refers to the inode 618 618 * address_space which maps the page from disk; whereas "page_mapped" 619 619 * refers to user virtual address space into which the page is mapped. 620 + * 621 + * For slab pages, since slab reuses the bits in struct page to store its 622 + * internal states, the page->mapping does not exist as such, nor do these 623 + * flags below. So in order to avoid testing non-existent bits, please 624 + * make sure that PageSlab(page) actually evaluates to false before calling 625 + * the following functions (e.g., PageAnon). See mm/slab.h. 620 626 */ 621 627 #define PAGE_MAPPING_ANON 0x1 622 628 #define PAGE_MAPPING_MOVABLE 0x2
+13 -12
include/linux/pe.h
··· 11 11 #include <linux/types.h> 12 12 13 13 /* 14 - * Linux EFI stub v1.0 adds the following functionality: 15 - * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path, 16 - * - Loading/starting the kernel from firmware that targets a different 17 - * machine type, via the entrypoint exposed in the .compat PE/COFF section. 14 + * Starting from version v3.0, the major version field should be interpreted as 15 + * a bit mask of features supported by the kernel's EFI stub: 16 + * - 0x1: initrd loading from the LINUX_EFI_INITRD_MEDIA_GUID device path, 17 + * - 0x2: initrd loading using the initrd= command line option, where the file 18 + * may be specified using device path notation, and is not required to 19 + * reside on the same volume as the loaded kernel image. 18 20 * 19 21 * The recommended way of loading and starting v1.0 or later kernels is to use 20 22 * the LoadImage() and StartImage() EFI boot services, and expose the initrd 21 23 * via the LINUX_EFI_INITRD_MEDIA_GUID device path. 22 24 * 23 - * Versions older than v1.0 support initrd loading via the image load options 24 - * (using initrd=, limited to the volume from which the kernel itself was 25 - * loaded), or via arch specific means (bootparams, DT, etc). 25 + * Versions older than v1.0 may support initrd loading via the image load 26 + * options (using initrd=, limited to the volume from which the kernel itself 27 + * was loaded), or only via arch specific means (bootparams, DT, etc). 26 28 * 27 - * On x86, LoadImage() and StartImage() can be omitted if the EFI handover 28 - * protocol is implemented, which can be inferred from the version, 29 - * handover_offset and xloadflags fields in the bootparams structure. 29 + * The minor version field must remain 0x0. 30 + * (https://lore.kernel.org/all/efd6f2d4-547c-1378-1faa-53c044dbd297@gmail.com/) 30 31 */ 31 - #define LINUX_EFISTUB_MAJOR_VERSION 0x1 32 - #define LINUX_EFISTUB_MINOR_VERSION 0x1 32 + #define LINUX_EFISTUB_MAJOR_VERSION 0x3 33 + #define LINUX_EFISTUB_MINOR_VERSION 0x0 33 34 34 35 /* 35 36 * LINUX_PE_MAGIC appears at offset 0x38 into the MS-DOS header of EFI bootable
+3 -4
include/linux/sunrpc/svcsock.h
··· 61 61 void svc_send(struct svc_rqst *rqstp); 62 62 void svc_drop(struct svc_rqst *); 63 63 void svc_sock_update_bufs(struct svc_serv *serv); 64 - bool svc_alien_sock(struct net *net, int fd); 65 - int svc_addsock(struct svc_serv *serv, const int fd, 66 - char *name_return, const size_t len, 67 - const struct cred *cred); 64 + int svc_addsock(struct svc_serv *serv, struct net *net, 65 + const int fd, char *name_return, const size_t len, 66 + const struct cred *cred); 68 67 void svc_init_xprt_sock(void); 69 68 void svc_cleanup_xprt_sock(void); 70 69 struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
+1 -5
include/linux/surface_aggregator/device.h
··· 243 243 * Return: Returns the pointer to the &struct ssam_device_driver wrapping the 244 244 * given device driver @d. 245 245 */ 246 - static inline 247 - struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d) 248 - { 249 - return container_of(d, struct ssam_device_driver, driver); 250 - } 246 + #define to_ssam_device_driver(d) container_of_const(d, struct ssam_device_driver, driver) 251 247 252 248 const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table, 253 249 const struct ssam_device_uid uid);
+5
include/linux/usb/hcd.h
··· 501 501 void hcd_buffer_free(struct usb_bus *bus, size_t size, 502 502 void *addr, dma_addr_t dma); 503 503 504 + void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, 505 + size_t size, gfp_t mem_flags, dma_addr_t *dma); 506 + void hcd_buffer_free_pages(struct usb_hcd *hcd, 507 + size_t size, void *addr, dma_addr_t dma); 508 + 504 509 /* generic bus glue, needed for host controllers that don't use PCI */ 505 510 extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); 506 511
+1
include/media/v4l2-subdev.h
··· 1119 1119 * @vfh: pointer to &struct v4l2_fh 1120 1120 * @state: pointer to &struct v4l2_subdev_state 1121 1121 * @owner: module pointer to the owner of this file handle 1122 + * @client_caps: bitmask of ``V4L2_SUBDEV_CLIENT_CAP_*`` 1122 1123 */ 1123 1124 struct v4l2_subdev_fh { 1124 1125 struct v4l2_fh vfh;
+1
include/net/bluetooth/hci.h
··· 350 350 enum { 351 351 HCI_SETUP, 352 352 HCI_CONFIG, 353 + HCI_DEBUGFS_CREATED, 353 354 HCI_AUTO_OFF, 354 355 HCI_RFKILLED, 355 356 HCI_MGMT,
+3 -1
include/net/bluetooth/hci_core.h
··· 515 515 struct work_struct cmd_sync_work; 516 516 struct list_head cmd_sync_work_list; 517 517 struct mutex cmd_sync_work_lock; 518 + struct mutex unregister_lock; 518 519 struct work_struct cmd_sync_cancel_work; 519 520 struct work_struct reenable_adv_work; 520 521 ··· 1202 1201 if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis) 1203 1202 continue; 1204 1203 1205 - if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { 1204 + /* Match destination address if set */ 1205 + if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) { 1206 1206 rcu_read_unlock(); 1207 1207 return c; 1208 1208 }
+1 -1
include/net/neighbour.h
··· 180 180 netdevice_tracker dev_tracker; 181 181 u32 flags; 182 182 u8 protocol; 183 - u8 key[]; 183 + u32 key[]; 184 184 }; 185 185 186 186 /*
+1 -1
include/net/netns/ipv6.h
··· 53 53 int seg6_flowlabel; 54 54 u32 ioam6_id; 55 55 u64 ioam6_id_wide; 56 - bool skip_notify_on_dev_down; 56 + u8 skip_notify_on_dev_down; 57 57 u8 fib_notify_on_flag_change; 58 58 u8 icmpv6_error_anycast_as_unicast; 59 59 };
+1 -5
include/net/ping.h
··· 16 16 #define PING_HTABLE_SIZE 64 17 17 #define PING_HTABLE_MASK (PING_HTABLE_SIZE-1) 18 18 19 - /* 20 - * gid_t is either uint or ushort. We want to pass it to 21 - * proc_dointvec_minmax(), so it must not be larger than MAX_INT 22 - */ 23 - #define GID_T_MAX (((gid_t)~0U) >> 1) 19 + #define GID_T_MAX (((gid_t)~0U) - 1) 24 20 25 21 /* Compatibility glue so we can support IPv6 when it's compiled as a module */ 26 22 struct pingv6_ops {
+2
include/net/pkt_sched.h
··· 127 127 } 128 128 } 129 129 130 + extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 131 + 130 132 /* Calculate maximal size of packet seen by hard_start_xmit 131 133 routine of this device. 132 134 */
-3
include/net/rpl.h
··· 23 23 static inline void rpl_exit(void) {} 24 24 #endif 25 25 26 - /* Worst decompression memory usage ipv6 address (16) + pad 7 */ 27 - #define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7) 28 - 29 26 size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, 30 27 unsigned char cmpre); 31 28
+4 -2
include/net/sch_generic.h
··· 545 545 546 546 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 547 547 { 548 - return qdisc->dev_queue->qdisc_sleeping; 548 + return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); 549 549 } 550 550 551 551 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) ··· 754 754 755 755 for (i = 0; i < dev->num_tx_queues; i++) { 756 756 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 757 - if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 757 + 758 + if (rcu_access_pointer(txq->qdisc) != 759 + rcu_access_pointer(txq->qdisc_sleeping)) 758 760 return true; 759 761 } 760 762 return false;
+13 -5
include/net/sock.h
··· 1152 1152 * OR an additional socket flag 1153 1153 * [1] : sk_state and sk_prot are in the same cache line. 1154 1154 */ 1155 - if (sk->sk_state == TCP_ESTABLISHED) 1156 - sock_rps_record_flow_hash(sk->sk_rxhash); 1155 + if (sk->sk_state == TCP_ESTABLISHED) { 1156 + /* This READ_ONCE() is paired with the WRITE_ONCE() 1157 + * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). 1158 + */ 1159 + sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); 1160 + } 1157 1161 } 1158 1162 #endif 1159 1163 } ··· 1166 1162 const struct sk_buff *skb) 1167 1163 { 1168 1164 #ifdef CONFIG_RPS 1169 - if (unlikely(sk->sk_rxhash != skb->hash)) 1170 - sk->sk_rxhash = skb->hash; 1165 + /* The following WRITE_ONCE() is paired with the READ_ONCE() 1166 + * here, and another one in sock_rps_record_flow(). 1167 + */ 1168 + if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) 1169 + WRITE_ONCE(sk->sk_rxhash, skb->hash); 1171 1170 #endif 1172 1171 } 1173 1172 1174 1173 static inline void sock_rps_reset_rxhash(struct sock *sk) 1175 1174 { 1176 1175 #ifdef CONFIG_RPS 1177 - sk->sk_rxhash = 0; 1176 + /* Paired with READ_ONCE() in sock_rps_record_flow() */ 1177 + WRITE_ONCE(sk->sk_rxhash, 0); 1178 1178 #endif 1179 1179 } 1180 1180
+4 -3
include/target/iscsi/iscsi_target_core.h
··· 562 562 #define LOGIN_FLAGS_READ_ACTIVE 2 563 563 #define LOGIN_FLAGS_WRITE_ACTIVE 3 564 564 #define LOGIN_FLAGS_CLOSED 4 565 + #define LOGIN_FLAGS_WORKER_RUNNING 5 565 566 unsigned long login_flags; 566 567 struct delayed_work login_work; 567 568 struct iscsi_login *login; 568 569 struct timer_list nopin_timer; 569 570 struct timer_list nopin_response_timer; 570 - struct timer_list transport_timer; 571 + struct timer_list login_timer; 571 572 struct task_struct *login_kworker; 572 573 /* Spinlock used for add/deleting cmd's from conn_cmd_list */ 573 574 spinlock_t cmd_lock; ··· 577 576 spinlock_t nopin_timer_lock; 578 577 spinlock_t response_queue_lock; 579 578 spinlock_t state_lock; 579 + spinlock_t login_timer_lock; 580 + spinlock_t login_worker_lock; 580 581 /* libcrypto RX and TX contexts for crc32c */ 581 582 struct ahash_request *conn_rx_hash; 582 583 struct ahash_request *conn_tx_hash; ··· 795 792 enum np_thread_state_table np_thread_state; 796 793 bool enabled; 797 794 atomic_t np_reset_count; 798 - enum iscsi_timer_flags_table np_login_timer_flags; 799 795 u32 np_exports; 800 796 enum np_flags_table np_flags; 801 797 spinlock_t np_thread_lock; ··· 802 800 struct socket *np_socket; 803 801 struct sockaddr_storage np_sockaddr; 804 802 struct task_struct *np_thread; 805 - struct timer_list np_login_timer; 806 803 void *np_context; 807 804 struct iscsit_transport *np_transport; 808 805 struct list_head np_list;
+1
include/uapi/linux/bpf.h
··· 1035 1035 BPF_TRACE_KPROBE_MULTI, 1036 1036 BPF_LSM_CGROUP, 1037 1037 BPF_STRUCT_OPS, 1038 + BPF_NETFILTER, 1038 1039 __MAX_BPF_ATTACH_TYPE 1039 1040 }; 1040 1041
-4
io_uring/epoll.c
··· 25 25 { 26 26 struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll); 27 27 28 - pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will " 29 - "be removed in a future Linux kernel version.\n", 30 - current->comm); 31 - 32 28 if (sqe->buf_index || sqe->splice_fd_in) 33 29 return -EINVAL; 34 30
+6 -2
kernel/bpf/map_in_map.c
··· 69 69 /* Misc members not needed in bpf_map_meta_equal() check. */ 70 70 inner_map_meta->ops = inner_map->ops; 71 71 if (inner_map->ops == &array_map_ops) { 72 + struct bpf_array *inner_array_meta = 73 + container_of(inner_map_meta, struct bpf_array, map); 74 + struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map); 75 + 76 + inner_array_meta->index_mask = inner_array->index_mask; 77 + inner_array_meta->elem_size = inner_array->elem_size; 72 78 inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; 73 - container_of(inner_map_meta, struct bpf_array, map)->index_mask = 74 - container_of(inner_map, struct bpf_array, map)->index_mask; 75 79 } 76 80 77 81 fdput(f);
+9
kernel/bpf/syscall.c
··· 2434 2434 default: 2435 2435 return -EINVAL; 2436 2436 } 2437 + case BPF_PROG_TYPE_NETFILTER: 2438 + if (expected_attach_type == BPF_NETFILTER) 2439 + return 0; 2440 + return -EINVAL; 2437 2441 case BPF_PROG_TYPE_SYSCALL: 2438 2442 case BPF_PROG_TYPE_EXT: 2439 2443 if (expected_attach_type) ··· 4617 4613 4618 4614 switch (prog->type) { 4619 4615 case BPF_PROG_TYPE_EXT: 4616 + break; 4620 4617 case BPF_PROG_TYPE_NETFILTER: 4618 + if (attr->link_create.attach_type != BPF_NETFILTER) { 4619 + ret = -EINVAL; 4620 + goto out; 4621 + } 4621 4622 break; 4622 4623 case BPF_PROG_TYPE_PERF_EVENT: 4623 4624 case BPF_PROG_TYPE_TRACEPOINT:
+1 -1
kernel/fork.c
··· 627 627 arch_release_task_struct(tsk); 628 628 if (tsk->flags & PF_KTHREAD) 629 629 free_kthread_struct(tsk); 630 + bpf_task_storage_free(tsk); 630 631 free_task_struct(tsk); 631 632 } 632 633 EXPORT_SYMBOL(free_task); ··· 980 979 cgroup_free(tsk); 981 980 task_numa_free(tsk, true); 982 981 security_task_free(tsk); 983 - bpf_task_storage_free(tsk); 984 982 exit_creds(tsk); 985 983 delayacct_tsk_free(tsk); 986 984 put_signal_struct(tsk->signal);
+1 -1
kernel/module/decompress.c
··· 257 257 do { 258 258 struct page *page = module_get_next_page(info); 259 259 260 - if (!IS_ERR(page)) { 260 + if (IS_ERR(page)) { 261 261 retval = PTR_ERR(page); 262 262 goto out; 263 263 }
+11 -1
kernel/trace/bpf_trace.c
··· 900 900 901 901 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 902 902 { 903 + struct path copy; 903 904 long len; 904 905 char *p; 905 906 906 907 if (!sz) 907 908 return 0; 908 909 909 - p = d_path(path, buf, sz); 910 + /* 911 + * The path pointer is verified as trusted and safe to use, 912 + * but let's double check it's valid anyway to workaround 913 + * potentially broken verifier. 914 + */ 915 + len = copy_from_kernel_nofault(&copy, path, sizeof(*path)); 916 + if (len < 0) 917 + return len; 918 + 919 + p = d_path(&copy, buf, sz); 910 920 if (IS_ERR(p)) { 911 921 len = PTR_ERR(p); 912 922 } else {
+1 -1
kernel/trace/trace_probe.h
··· 308 308 { 309 309 struct trace_probe_event *tpe = trace_probe_event_from_call(call); 310 310 311 - return list_first_entry(&tpe->probes, struct trace_probe, list); 311 + return list_first_entry_or_null(&tpe->probes, struct trace_probe, list); 312 312 } 313 313 314 314 static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp)
+1 -1
lib/cpu_rmap.c
··· 280 280 struct irq_glue *glue = 281 281 container_of(ref, struct irq_glue, notify.kref); 282 282 283 - cpu_rmap_put(glue->rmap); 284 283 glue->rmap->obj[glue->index] = NULL; 284 + cpu_rmap_put(glue->rmap); 285 285 kfree(glue); 286 286 } 287 287
+65 -20
lib/test_firmware.c
··· 45 45 bool sent; 46 46 const struct firmware *fw; 47 47 const char *name; 48 + const char *fw_buf; 48 49 struct completion completion; 49 50 struct task_struct *task; 50 51 struct device *dev; ··· 176 175 177 176 for (i = 0; i < test_fw_config->num_requests; i++) { 178 177 req = &test_fw_config->reqs[i]; 179 - if (req->fw) 178 + if (req->fw) { 179 + if (req->fw_buf) { 180 + kfree_const(req->fw_buf); 181 + req->fw_buf = NULL; 182 + } 180 183 release_firmware(req->fw); 184 + req->fw = NULL; 185 + } 181 186 } 182 187 183 188 vfree(test_fw_config->reqs); ··· 360 353 return len; 361 354 } 362 355 356 + static inline int __test_dev_config_update_bool(const char *buf, size_t size, 357 + bool *cfg) 358 + { 359 + int ret; 360 + 361 + if (kstrtobool(buf, cfg) < 0) 362 + ret = -EINVAL; 363 + else 364 + ret = size; 365 + 366 + return ret; 367 + } 368 + 363 369 static int test_dev_config_update_bool(const char *buf, size_t size, 364 370 bool *cfg) 365 371 { 366 372 int ret; 367 373 368 374 mutex_lock(&test_fw_mutex); 369 - if (kstrtobool(buf, cfg) < 0) 370 - ret = -EINVAL; 371 - else 372 - ret = size; 375 + ret = __test_dev_config_update_bool(buf, size, cfg); 373 376 mutex_unlock(&test_fw_mutex); 374 377 375 378 return ret; ··· 390 373 return snprintf(buf, PAGE_SIZE, "%d\n", val); 391 374 } 392 375 393 - static int test_dev_config_update_size_t(const char *buf, 376 + static int __test_dev_config_update_size_t( 377 + const char *buf, 394 378 size_t size, 395 379 size_t *cfg) 396 380 { ··· 402 384 if (ret) 403 385 return ret; 404 386 405 - mutex_lock(&test_fw_mutex); 406 387 *(size_t *)cfg = new; 407 - mutex_unlock(&test_fw_mutex); 408 388 409 389 /* Always return full write size even if we didn't consume all */ 410 390 return size; ··· 418 402 return snprintf(buf, PAGE_SIZE, "%d\n", val); 419 403 } 420 404 421 - static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) 405 + static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) 422 406 { 423 407 u8 val; 424 408 int ret; ··· 427 411 if (ret) 428 412 return ret; 429 413 430 - mutex_lock(&test_fw_mutex); 431 414 *(u8 *)cfg = val; 432 - mutex_unlock(&test_fw_mutex); 433 415 434 416 /* Always return full write size even if we didn't consume all */ 435 417 return size; 418 + } 419 + 420 + static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) 421 + { 422 + int ret; 423 + 424 + mutex_lock(&test_fw_mutex); 425 + ret = __test_dev_config_update_u8(buf, size, cfg); 426 + mutex_unlock(&test_fw_mutex); 427 + 428 + return ret; 436 429 } 437 430 438 431 static ssize_t test_dev_config_show_u8(char *buf, u8 val) ··· 496 471 mutex_unlock(&test_fw_mutex); 497 472 goto out; 498 473 } 499 - mutex_unlock(&test_fw_mutex); 500 474 501 - rc = test_dev_config_update_u8(buf, count, 502 - &test_fw_config->num_requests); 475 + rc = __test_dev_config_update_u8(buf, count, 476 + &test_fw_config->num_requests); 477 + mutex_unlock(&test_fw_mutex); 503 478 504 479 out: 505 480 return rc; ··· 543 518 mutex_unlock(&test_fw_mutex); 544 519 goto out; 545 520 } 546 - mutex_unlock(&test_fw_mutex); 547 521 548 - rc = test_dev_config_update_size_t(buf, count, 549 - &test_fw_config->buf_size); 522 + rc = __test_dev_config_update_size_t(buf, count, 523 + &test_fw_config->buf_size); 524 + mutex_unlock(&test_fw_mutex); 550 525 551 526 out: 552 527 return rc; ··· 573 548 mutex_unlock(&test_fw_mutex); 574 549 goto out; 575 550 } 576 - mutex_unlock(&test_fw_mutex); 577 551 578 - rc = test_dev_config_update_size_t(buf, count, 579 - &test_fw_config->file_offset); 552 + rc = __test_dev_config_update_size_t(buf, count, 553 + &test_fw_config->file_offset); 554 + mutex_unlock(&test_fw_mutex); 580 555 581 556 out: 582 557 return rc; ··· 677 652 678 653 mutex_lock(&test_fw_mutex); 679 654 release_firmware(test_firmware); 655 + if (test_fw_config->reqs) 656 + __test_release_all_firmware(); 680 657 test_firmware = NULL; 681 658 rc = request_firmware(&test_firmware, name, dev); 682 659 if (rc) { ··· 779 752 mutex_lock(&test_fw_mutex); 780 753 release_firmware(test_firmware); 781 754 test_firmware = NULL; 755 + if (test_fw_config->reqs) 756 + __test_release_all_firmware(); 782 757 rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL, 783 758 NULL, trigger_async_request_cb); 784 759 if (rc) { ··· 823 794 824 795 mutex_lock(&test_fw_mutex); 825 796 release_firmware(test_firmware); 797 + if (test_fw_config->reqs) 798 + __test_release_all_firmware(); 826 799 test_firmware = NULL; 827 800 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name, 828 801 dev, GFP_KERNEL, NULL, ··· 887 856 test_fw_config->buf_size); 888 857 if (!req->fw) 889 858 kfree(test_buf); 859 + else 860 + req->fw_buf = test_buf; 890 861 } else { 891 862 req->rc = test_fw_config->req_firmware(&req->fw, 892 863 req->name, ··· 928 895 929 896 mutex_lock(&test_fw_mutex); 930 897 898 + if (test_fw_config->reqs) { 899 + rc = -EBUSY; 900 + goto out_bail; 901 + } 902 + 931 903 test_fw_config->reqs = 932 904 vzalloc(array3_size(sizeof(struct test_batched_req), 933 905 test_fw_config->num_requests, 2)); ··· 949 911 req->fw = NULL; 950 912 req->idx = i; 951 913 req->name = test_fw_config->name; 914 + req->fw_buf = NULL; 952 915 req->dev = dev; 953 916 init_completion(&req->completion); 954 917 req->task = kthread_run(test_fw_run_batch_request, req, ··· 1032 993 1033 994 mutex_lock(&test_fw_mutex); 1034 995 996 + if (test_fw_config->reqs) { 997 + rc = -EBUSY; 998 + goto out_bail; 999 + } 1000 + 1035 1001 test_fw_config->reqs = 1036 1002 vzalloc(array3_size(sizeof(struct test_batched_req), 1037 1003 test_fw_config->num_requests, 2)); ··· 1054 1010 for (i = 0; i < test_fw_config->num_requests; i++) { 1055 1011 req = &test_fw_config->reqs[i]; 1056 1012 req->name = test_fw_config->name; 1013 + req->fw_buf = NULL; 1057 1014 req->fw = NULL; 1058 1015 req->idx = i; 1059 1016 init_completion(&req->completion);
+1
mm/Kconfig.debug
··· 98 98 config PAGE_TABLE_CHECK 99 99 bool "Check for invalid mappings in user page tables" 100 100 depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK 101 + depends on EXCLUSIVE_SYSTEM_RAM 101 102 select PAGE_EXTENSION 102 103 help 103 104 Check that anonymous page is not being mapped twice with read write
+6
mm/page_table_check.c
··· 71 71 72 72 page = pfn_to_page(pfn); 73 73 page_ext = page_ext_get(page); 74 + 75 + BUG_ON(PageSlab(page)); 74 76 anon = PageAnon(page); 75 77 76 78 for (i = 0; i < pgcnt; i++) { ··· 109 107 110 108 page = pfn_to_page(pfn); 111 109 page_ext = page_ext_get(page); 110 + 111 + BUG_ON(PageSlab(page)); 112 112 anon = PageAnon(page); 113 113 114 114 for (i = 0; i < pgcnt; i++) { ··· 136 132 { 137 133 struct page_ext *page_ext; 138 134 unsigned long i; 135 + 136 + BUG_ON(PageSlab(page)); 139 137 140 138 page_ext = page_ext_get(page); 141 139 BUG_ON(!page_ext);
+1 -1
net/batman-adv/distributed-arp-table.c
··· 101 101 */ 102 102 static void batadv_dat_start_timer(struct batadv_priv *bat_priv) 103 103 { 104 - INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); 105 104 queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work, 106 105 msecs_to_jiffies(10000)); 107 106 } ··· 818 819 if (!bat_priv->dat.hash) 819 820 return -ENOMEM; 820 821 822 + INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); 821 823 batadv_dat_start_timer(bat_priv); 822 824 823 825 batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
+13 -9
net/bluetooth/hci_conn.c
··· 947 947 { 948 948 struct iso_list_data *d = data; 949 949 950 - /* Ignore broadcast */ 951 - if (!bacmp(&conn->dst, BDADDR_ANY)) 950 + /* Ignore broadcast or if CIG don't match */ 951 + if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) 952 952 return; 953 953 954 954 d->count++; ··· 963 963 struct hci_dev *hdev = conn->hdev; 964 964 struct iso_list_data d; 965 965 966 + if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) 967 + return; 968 + 966 969 memset(&d, 0, sizeof(d)); 967 970 d.cig = conn->iso_qos.ucast.cig; 968 971 969 972 /* Check if ISO connection is a CIS and remove CIG if there are 970 973 * no other connections using it. 971 974 */ 975 + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d); 976 + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d); 972 977 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); 973 978 if (d.count) 974 979 return; ··· 1771 1766 1772 1767 memset(&data, 0, sizeof(data)); 1773 1768 1774 - /* Allocate a CIG if not set */ 1769 + /* Allocate first still reconfigurable CIG if not set */ 1775 1770 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { 1776 - for (data.cig = 0x00; data.cig < 0xff; data.cig++) { 1771 + for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { 1777 1772 data.count = 0; 1778 - data.cis = 0xff; 1779 1773 1780 - hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, 1781 - BT_BOUND, &data); 1774 + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, 1775 + BT_CONNECT, &data); 1782 1776 if (data.count) 1783 1777 continue; 1784 1778 1785 - hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, 1779 + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, 1786 1780 BT_CONNECTED, &data); 1787 1781 if (!data.count) 1788 1782 break; 1789 1783 } 1790 1784 1791 - if (data.cig == 0xff) 1785 + if (data.cig == 0xf0) 1792 1786 return false; 1793 1787 1794 1788 /* Update CIG */
+6 -4
net/bluetooth/hci_core.c
··· 1416 1416 1417 1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 1418 1418 { 1419 - struct smp_ltk *k; 1419 + struct smp_ltk *k, *tmp; 1420 1420 int removed = 0; 1421 1421 1422 - list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1422 + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1423 1423 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 1424 1424 continue; 1425 1425 ··· 1435 1435 1436 1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 1437 1437 { 1438 - struct smp_irk *k; 1438 + struct smp_irk *k, *tmp; 1439 1439 1440 - list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { 1440 + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { 1441 1441 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 1442 1442 continue; 1443 1443 ··· 2686 2686 { 2687 2687 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2688 2688 2689 + mutex_lock(&hdev->unregister_lock); 2689 2690 hci_dev_set_flag(hdev, HCI_UNREGISTER); 2691 + mutex_unlock(&hdev->unregister_lock); 2690 2692 2691 2693 write_lock(&hci_dev_list_lock); 2692 2694 list_del(&hdev->list);
+27 -19
net/bluetooth/hci_event.c
··· 3804 3804 struct sk_buff *skb) 3805 3805 { 3806 3806 struct hci_rp_le_set_cig_params *rp = data; 3807 + struct hci_cp_le_set_cig_params *cp; 3807 3808 struct hci_conn *conn; 3808 - int i = 0; 3809 + u8 status = rp->status; 3810 + int i; 3809 3811 3810 3812 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3811 3813 3814 + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); 3815 + if (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id) { 3816 + bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); 3817 + status = HCI_ERROR_UNSPECIFIED; 3818 + } 3819 + 3812 3820 hci_dev_lock(hdev); 3813 3821 3814 - if (rp->status) { 3822 + if (status) { 3815 3823 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { 3816 3824 conn->state = BT_CLOSED; 3817 - hci_connect_cfm(conn, rp->status); 3825 + hci_connect_cfm(conn, status); 3818 3826 hci_conn_del(conn); 3819 3827 } 3820 3828 goto unlock; 3821 3829 } 3822 3830 3823 - rcu_read_lock(); 3824 - 3825 - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 3826 - if (conn->type != ISO_LINK || 3827 - conn->iso_qos.ucast.cig != rp->cig_id || 3828 - conn->state == BT_CONNECTED) 3831 + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 3832 + * 3833 + * If the Status return parameter is zero, then the Controller shall 3834 + * set the Connection_Handle arrayed return parameter to the connection 3835 + * handle(s) corresponding to the CIS configurations specified in 3836 + * the CIS_IDs command parameter, in the same order. 3837 + */ 3838 + for (i = 0; i < rp->num_handles; ++i) { 3839 + conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id, 3840 + cp->cis[i].cis_id); 3841 + if (!conn || !bacmp(&conn->dst, BDADDR_ANY)) 3829 3842 continue; 3830 3843 3831 - conn->handle = __le16_to_cpu(rp->handle[i++]); 3844 + if (conn->state != BT_BOUND && conn->state != BT_CONNECT) 3845 + continue; 3846 + 3847 + conn->handle = __le16_to_cpu(rp->handle[i]); 3832 3848 3833 3849 bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn, 3834 3850 conn->handle, conn->parent); 3835 3851 3836 3852 /* Create CIS if LE is already connected */ 3837 - if (conn->parent && conn->parent->state == BT_CONNECTED) { 3838 - rcu_read_unlock(); 3853 + if (conn->parent && conn->parent->state == BT_CONNECTED) 3839 3854 hci_le_create_cis(conn); 3840 - rcu_read_lock(); 3841 - } 3842 - 3843 - if (i == rp->num_handles) 3844 - break; 3845 3855 } 3846 - 3847 - rcu_read_unlock(); 3848 3856 3849 3857 unlock: 3850 3858 hci_dev_unlock(hdev);
+17 -6
net/bluetooth/hci_sync.c
··· 629 629 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); 630 630 INIT_LIST_HEAD(&hdev->cmd_sync_work_list); 631 631 mutex_init(&hdev->cmd_sync_work_lock); 632 + mutex_init(&hdev->unregister_lock); 632 633 633 634 INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); 634 635 INIT_WORK(&hdev->reenable_adv_work, reenable_adv); ··· 693 692 void *data, hci_cmd_sync_work_destroy_t destroy) 694 693 { 695 694 struct hci_cmd_sync_work_entry *entry; 695 + int err = 0; 696 696 697 - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) 698 - return -ENODEV; 697 + mutex_lock(&hdev->unregister_lock); 698 + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 699 + err = -ENODEV; 700 + goto unlock; 701 + } 699 702 700 703 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 701 - if (!entry) 702 - return -ENOMEM; 703 - 704 + if (!entry) { 705 + err = -ENOMEM; 706 + goto unlock; 707 + } 704 708 entry->func = func; 705 709 entry->data = data; 706 710 entry->destroy = destroy; ··· 716 710 717 711 queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); 718 712 719 - return 0; 713 + unlock: 714 + mutex_unlock(&hdev->unregister_lock); 715 + return err; 720 716 } 721 717 EXPORT_SYMBOL(hci_cmd_sync_submit); 722 718 ··· 4549 4541 */ 4550 4542 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4551 4543 !hci_dev_test_flag(hdev, HCI_CONFIG)) 4544 + return 0; 4545 + 4546 + if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) 4552 4547 return 0; 4553 4548 4554 4549 hci_debugfs_create_common(hdev);
+13
net/bluetooth/l2cap_core.c
··· 4306 4306 result = __le16_to_cpu(rsp->result); 4307 4307 status = __le16_to_cpu(rsp->status); 4308 4308 4309 + if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || 4310 + dcid > L2CAP_CID_DYN_END)) 4311 + return -EPROTO; 4312 + 4309 4313 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", 4310 4314 dcid, scid, result, status); 4311 4315 ··· 4341 4337 4342 4338 switch (result) { 4343 4339 case L2CAP_CR_SUCCESS: 4340 + if (__l2cap_get_chan_by_dcid(conn, dcid)) { 4341 + err = -EBADSLT; 4342 + break; 4343 + } 4344 + 4344 4345 l2cap_state_change(chan, BT_CONFIG); 4345 4346 chan->ident = 0; 4346 4347 chan->dcid = dcid; ··· 4672 4663 4673 4664 chan->ops->set_shutdown(chan); 4674 4665 4666 + l2cap_chan_unlock(chan); 4675 4667 mutex_lock(&conn->chan_lock); 4668 + l2cap_chan_lock(chan); 4676 4669 l2cap_chan_del(chan, ECONNRESET); 4677 4670 mutex_unlock(&conn->chan_lock); 4678 4671 ··· 4713 4702 return 0; 4714 4703 } 4715 4704 4705 + l2cap_chan_unlock(chan); 4716 4706 mutex_lock(&conn->chan_lock); 4707 + l2cap_chan_lock(chan); 4717 4708 l2cap_chan_del(chan, 0); 4718 4709 mutex_unlock(&conn->chan_lock); 4719 4710
+13 -11
net/can/j1939/main.c
··· 126 126 #define J1939_CAN_ID CAN_EFF_FLAG 127 127 #define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG) 128 128 129 - static DEFINE_SPINLOCK(j1939_netdev_lock); 129 + static DEFINE_MUTEX(j1939_netdev_lock); 130 130 131 131 static struct j1939_priv *j1939_priv_create(struct net_device *ndev) 132 132 { ··· 220 220 j1939_can_rx_unregister(priv); 221 221 j1939_ecu_unmap_all(priv); 222 222 j1939_priv_set(priv->ndev, NULL); 223 - spin_unlock(&j1939_netdev_lock); 223 + mutex_unlock(&j1939_netdev_lock); 224 224 } 225 225 226 226 /* get pointer to priv without increasing ref counter */ ··· 248 248 { 249 249 struct j1939_priv *priv; 250 250 251 - spin_lock(&j1939_netdev_lock); 251 + mutex_lock(&j1939_netdev_lock); 252 252 priv = j1939_priv_get_by_ndev_locked(ndev); 253 - spin_unlock(&j1939_netdev_lock); 253 + mutex_unlock(&j1939_netdev_lock); 254 254 255 255 return priv; 256 256 } ··· 260 260 struct j1939_priv *priv, *priv_new; 261 261 int ret; 262 262 263 - spin_lock(&j1939_netdev_lock); 263 + mutex_lock(&j1939_netdev_lock); 264 264 priv = j1939_priv_get_by_ndev_locked(ndev); 265 265 if (priv) { 266 266 kref_get(&priv->rx_kref); 267 - spin_unlock(&j1939_netdev_lock); 267 + mutex_unlock(&j1939_netdev_lock); 268 268 return priv; 269 269 } 270 - spin_unlock(&j1939_netdev_lock); 270 + mutex_unlock(&j1939_netdev_lock); 271 271 272 272 priv = j1939_priv_create(ndev); 273 273 if (!priv) ··· 277 277 spin_lock_init(&priv->j1939_socks_lock); 278 278 INIT_LIST_HEAD(&priv->j1939_socks); 279 279 280 - spin_lock(&j1939_netdev_lock); 280 + mutex_lock(&j1939_netdev_lock); 281 281 priv_new = j1939_priv_get_by_ndev_locked(ndev); 282 282 if (priv_new) { 283 283 /* Someone was faster than us, use their priv and roll 284 284 * back our's. 285 285 */ 286 286 kref_get(&priv_new->rx_kref); 287 - spin_unlock(&j1939_netdev_lock); 287 + mutex_unlock(&j1939_netdev_lock); 288 288 dev_put(ndev); 289 289 kfree(priv); 290 290 return priv_new; 291 291 } 292 292 j1939_priv_set(ndev, priv); 293 - spin_unlock(&j1939_netdev_lock); 294 293 295 294 ret = j1939_can_rx_register(priv); 296 295 if (ret < 0) 297 296 goto out_priv_put; 298 297 298 + mutex_unlock(&j1939_netdev_lock); 299 299 return priv; 300 300 301 301 out_priv_put: 302 302 j1939_priv_set(ndev, NULL); 303 + mutex_unlock(&j1939_netdev_lock); 304 + 303 305 dev_put(ndev); 304 306 kfree(priv); 305 307 ··· 310 308 311 309 void j1939_netdev_stop(struct j1939_priv *priv) 312 310 { 313 - kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); 311 + kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); 314 312 j1939_priv_put(priv); 315 313 } 316 314
+5
net/can/j1939/socket.c
··· 1088 1088 1089 1089 void j1939_sk_send_loop_abort(struct sock *sk, int err) 1090 1090 { 1091 + struct j1939_sock *jsk = j1939_sk(sk); 1092 + 1093 + if (jsk->state & J1939_SOCK_ERRQUEUE) 1094 + return; 1095 + 1091 1096 sk->sk_err = err; 1092 1097 1093 1098 sk_error_report(sk);
+5 -3
net/core/dev.c
··· 4471 4471 u32 next_cpu; 4472 4472 u32 ident; 4473 4473 4474 - /* First check into global flow table if there is a match */ 4475 - ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4474 + /* First check into global flow table if there is a match. 4475 + * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). 4476 + */ 4477 + ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); 4476 4478 if ((ident ^ hash) & ~rps_cpu_mask) 4477 4479 goto try_rps; 4478 4480 ··· 10546 10544 return NULL; 10547 10545 netdev_init_one_queue(dev, queue, NULL); 10548 10546 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10549 - queue->qdisc_sleeping = &noop_qdisc; 10547 + RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); 10550 10548 rcu_assign_pointer(dev->ingress_queue, queue); 10551 10549 #endif 10552 10550 return queue;
+2 -1
net/core/skmsg.c
··· 1210 1210 1211 1211 rcu_read_lock(); 1212 1212 psock = sk_psock(sk); 1213 - psock->saved_data_ready(sk); 1213 + if (psock) 1214 + psock->saved_data_ready(sk); 1214 1215 rcu_read_unlock(); 1215 1216 } 1216 1217 }
+4 -4
net/ipv4/sysctl_net_ipv4.c
··· 35 35 static int tcp_syn_retries_min = 1; 36 36 static int tcp_syn_retries_max = MAX_TCP_SYNCNT; 37 37 static int tcp_syn_linear_timeouts_max = MAX_TCP_SYNCNT; 38 - static int ip_ping_group_range_min[] = { 0, 0 }; 39 - static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 38 + static unsigned long ip_ping_group_range_min[] = { 0, 0 }; 39 + static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 40 40 static u32 u32_max_div_HZ = UINT_MAX / HZ; 41 41 static int one_day_secs = 24 * 3600; 42 42 static u32 fib_multipath_hash_fields_all_mask __maybe_unused = ··· 166 166 { 167 167 struct user_namespace *user_ns = current_user_ns(); 168 168 int ret; 169 - gid_t urange[2]; 169 + unsigned long urange[2]; 170 170 kgid_t low, high; 171 171 struct ctl_table tmp = { 172 172 .data = &urange, ··· 179 179 inet_get_ping_group_range_table(table, &low, &high); 180 180 urange[0] = from_kgid_munged(user_ns, low); 181 181 urange[1] = from_kgid_munged(user_ns, high); 182 - ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 182 + ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos); 183 183 184 184 if (write && ret == 0) { 185 185 low = make_kgid(user_ns, urange[0]);
+9 -10
net/ipv4/tcp_offload.c
··· 60 60 struct tcphdr *th; 61 61 unsigned int thlen; 62 62 unsigned int seq; 63 - __be32 delta; 64 63 unsigned int oldlen; 65 64 unsigned int mss; 66 65 struct sk_buff *gso_skb = skb; 67 66 __sum16 newcheck; 68 67 bool ooo_okay, copy_destructor; 68 + __wsum delta; 69 69 70 70 th = tcp_hdr(skb); 71 71 thlen = th->doff * 4; ··· 75 75 if (!pskb_may_pull(skb, thlen)) 76 76 goto out; 77 77 78 - oldlen = (u16)~skb->len; 78 + oldlen = ~skb->len; 79 79 __skb_pull(skb, thlen); 80 80 81 81 mss = skb_shinfo(skb)->gso_size; ··· 110 110 if (skb_is_gso(segs)) 111 111 mss *= skb_shinfo(segs)->gso_segs; 112 112 113 - delta = htonl(oldlen + (thlen + mss)); 113 + delta = (__force __wsum)htonl(oldlen + thlen + mss); 114 114 115 115 skb = segs; 116 116 th = tcp_hdr(skb); ··· 119 119 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) 120 120 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); 121 121 122 - newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + 123 - (__force u32)delta)); 122 + newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 124 123 125 124 while (skb->next) { 126 125 th->fin = th->psh = 0; ··· 164 165 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); 165 166 } 166 167 167 - delta = htonl(oldlen + (skb_tail_pointer(skb) - 168 - skb_transport_header(skb)) + 169 - skb->data_len); 170 - th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 171 - (__force u32)delta)); 168 + delta = (__force __wsum)htonl(oldlen + 169 + (skb_tail_pointer(skb) - 170 + skb_transport_header(skb)) + 171 + skb->data_len); 172 + th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 172 173 if (skb->ip_summed == CHECKSUM_PARTIAL) 173 174 gso_reset_checksum(skb, ~th->check); 174 175 else
+11 -18
net/ipv6/exthdrs.c
··· 567 567 return -1; 568 568 } 569 569 570 - if (skb_cloned(skb)) { 571 - if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0, 572 - GFP_ATOMIC)) { 573 - __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 574 - IPSTATS_MIB_OUTDISCARDS); 575 - kfree_skb(skb); 576 - return -1; 577 - } 578 - } else { 579 - err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE); 580 - if (unlikely(err)) { 581 - kfree_skb(skb); 582 - return -1; 583 - } 584 - } 585 - 586 - hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb); 587 - 588 570 if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri, 589 571 hdr->cmpre))) { 590 572 kfree_skb(skb); ··· 610 628 skb_pull(skb, ((hdr->hdrlen + 1) << 3)); 611 629 skb_postpull_rcsum(skb, oldhdr, 612 630 sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3)); 631 + if (unlikely(!hdr->segments_left)) { 632 + if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0, 633 + GFP_ATOMIC)) { 634 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); 635 + kfree_skb(skb); 636 + kfree(buf); 637 + return -1; 638 + } 639 + 640 + oldhdr = ipv6_hdr(skb); 641 + } 613 642 skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr)); 614 643 skb_reset_network_header(skb); 615 644 skb_mac_header_rebuild(skb);
+2 -2
net/ipv6/route.c
··· 6412 6412 { 6413 6413 .procname = "skip_notify_on_dev_down", 6414 6414 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down, 6415 - .maxlen = sizeof(int), 6415 + .maxlen = sizeof(u8), 6416 6416 .mode = 0644, 6417 - .proc_handler = proc_dointvec_minmax, 6417 + .proc_handler = proc_dou8vec_minmax, 6418 6418 .extra1 = SYSCTL_ZERO, 6419 6419 .extra2 = SYSCTL_ONE, 6420 6420 },
+10 -5
net/mac80211/he.c
··· 3 3 * HE handling 4 4 * 5 5 * Copyright(c) 2017 Intel Deutschland GmbH 6 - * Copyright(c) 2019 - 2022 Intel Corporation 6 + * Copyright(c) 2019 - 2023 Intel Corporation 7 7 */ 8 8 9 9 #include "ieee80211_i.h" ··· 114 114 struct link_sta_info *link_sta) 115 115 { 116 116 struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; 117 + const struct ieee80211_sta_he_cap *own_he_cap_ptr; 117 118 struct ieee80211_sta_he_cap own_he_cap; 118 119 struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; 119 120 u8 he_ppe_size; ··· 124 123 125 124 memset(he_cap, 0, sizeof(*he_cap)); 126 125 127 - if (!he_cap_ie || 128 - !ieee80211_get_he_iftype_cap(sband, 129 - ieee80211_vif_type_p2p(&sdata->vif))) 126 + if (!he_cap_ie) 130 127 return; 131 128 132 - own_he_cap = sband->iftype_data->he_cap; 129 + own_he_cap_ptr = 130 + ieee80211_get_he_iftype_cap(sband, 131 + ieee80211_vif_type_p2p(&sdata->vif)); 132 + if (!own_he_cap_ptr) 133 + return; 134 + 135 + own_he_cap = *own_he_cap_ptr; 133 136 134 137 /* Make sure size is OK */ 135 138 mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+7 -1
net/mac80211/mlme.c
··· 1217 1217 const u16 *inner) 1218 1218 { 1219 1219 unsigned int skb_len = skb->len; 1220 + bool at_extension = false; 1220 1221 bool added = false; 1221 1222 int i, j; 1222 1223 u8 *len, *list_len = NULL; ··· 1229 1228 for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) { 1230 1229 u16 elem = outer[i]; 1231 1230 bool have_inner = false; 1232 - bool at_extension = false; 1233 1231 1234 1232 /* should at least be sorted in the sense of normal -> ext */ 1235 1233 WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS); ··· 1257 1257 } 1258 1258 *list_len += 1; 1259 1259 skb_put_u8(skb, (u8)elem); 1260 + added = true; 1260 1261 } 1261 1262 1263 + /* if we added a list but no extension list, make a zero-len one */ 1264 + if (added && (!at_extension || !list_len)) 1265 + skb_put_u8(skb, 0); 1266 + 1267 + /* if nothing added remove extension element completely */ 1262 1268 if (!added) 1263 1269 skb_trim(skb, skb_len); 1264 1270 else
+3 -1
net/mac80211/rx.c
··· 4965 4965 } 4966 4966 4967 4967 if (unlikely(rx->sta && rx->sta->sta.mlo) && 4968 - is_unicast_ether_addr(hdr->addr1)) { 4968 + is_unicast_ether_addr(hdr->addr1) && 4969 + !ieee80211_is_probe_resp(hdr->frame_control) && 4970 + !ieee80211_is_beacon(hdr->frame_control)) { 4969 4971 /* translate to MLD addresses */ 4970 4972 if (ether_addr_equal(link->conf->addr, hdr->addr1)) 4971 4973 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
+1 -1
net/mac80211/tx.c
··· 5528 5528 { 5529 5529 struct ieee80211_ema_beacons *ema_beacons = NULL; 5530 5530 5531 - WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, false, link_id, 0, 5531 + WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, true, link_id, 0, 5532 5532 &ema_beacons)); 5533 5533 5534 5534 return ema_beacons;
+19 -4
net/mptcp/pm.c
··· 89 89 unsigned int subflows_max; 90 90 int ret = 0; 91 91 92 - if (mptcp_pm_is_userspace(msk)) 93 - return mptcp_userspace_pm_active(msk); 92 + if (mptcp_pm_is_userspace(msk)) { 93 + if (mptcp_userspace_pm_active(msk)) { 94 + spin_lock_bh(&pm->lock); 95 + pm->subflows++; 96 + spin_unlock_bh(&pm->lock); 97 + return true; 98 + } 99 + return false; 100 + } 94 101 95 102 subflows_max = mptcp_pm_get_subflows_max(msk); 96 103 ··· 190 183 struct mptcp_pm_data *pm = &msk->pm; 191 184 bool update_subflows; 192 185 193 - update_subflows = (subflow->request_join || subflow->mp_join) && 194 - mptcp_pm_is_kernel(msk); 186 + update_subflows = subflow->request_join || subflow->mp_join; 187 + if (mptcp_pm_is_userspace(msk)) { 188 + if (update_subflows) { 189 + spin_lock_bh(&pm->lock); 190 + pm->subflows--; 191 + spin_unlock_bh(&pm->lock); 192 + } 193 + return; 194 + } 195 + 195 196 if (!READ_ONCE(pm->work_pending) && !update_subflows) 196 197 return; 197 198
+18
net/mptcp/pm_netlink.c
··· 1558 1558 return ret; 1559 1559 } 1560 1560 1561 + void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) 1562 + { 1563 + struct mptcp_rm_list alist = { .nr = 0 }; 1564 + struct mptcp_pm_addr_entry *entry; 1565 + 1566 + list_for_each_entry(entry, rm_list, list) { 1567 + remove_anno_list_by_saddr(msk, &entry->addr); 1568 + if (alist.nr < MPTCP_RM_IDS_MAX) 1569 + alist.ids[alist.nr++] = entry->addr.id; 1570 + } 1571 + 1572 + if (alist.nr) { 1573 + spin_lock_bh(&msk->pm.lock); 1574 + mptcp_pm_remove_addr(msk, &alist); 1575 + spin_unlock_bh(&msk->pm.lock); 1576 + } 1577 + } 1578 + 1561 1579 void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, 1562 1580 struct list_head *rm_list) 1563 1581 {
+47 -1
net/mptcp/pm_userspace.c
··· 69 69 MPTCP_PM_MAX_ADDR_ID + 1, 70 70 1); 71 71 list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list); 72 + msk->pm.local_addr_used++; 72 73 ret = e->addr.id; 73 74 } else if (match) { 74 75 ret = entry->addr.id; ··· 78 77 append_err: 79 78 spin_unlock_bh(&msk->pm.lock); 80 79 return ret; 80 + } 81 + 82 + /* If the subflow is closed from the other peer (not via a 83 + * subflow destroy command then), we want to keep the entry 84 + * not to assign the same ID to another address and to be 85 + * able to send RM_ADDR after the removal of the subflow. 86 + */ 87 + static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk, 88 + struct mptcp_pm_addr_entry *addr) 89 + { 90 + struct mptcp_pm_addr_entry *entry, *tmp; 91 + 92 + list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) { 93 + if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) { 94 + /* TODO: a refcount is needed because the entry can 95 + * be used multiple times (e.g. fullmesh mode). 96 + */ 97 + list_del_rcu(&entry->list); 98 + kfree(entry); 99 + msk->pm.local_addr_used--; 100 + return 0; 101 + } 102 + } 103 + 104 + return -EINVAL; 81 105 } 82 106 83 107 int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, ··· 197 171 spin_lock_bh(&msk->pm.lock); 198 172 199 173 if (mptcp_pm_alloc_anno_list(msk, &addr_val)) { 174 + msk->pm.add_addr_signaled++; 200 175 mptcp_pm_announce_addr(msk, &addr_val.addr, false); 201 176 mptcp_pm_nl_addr_send_ack(msk); 202 177 } ··· 259 232 260 233 list_move(&match->list, &free_list); 261 234 262 - mptcp_pm_remove_addrs_and_subflows(msk, &free_list); 235 + mptcp_pm_remove_addrs(msk, &free_list); 263 236 264 237 release_sock((struct sock *)msk); 265 238 ··· 278 251 struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; 279 252 struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; 280 253 struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; 254 + struct mptcp_pm_addr_entry local = { 0 }; 281 255 struct mptcp_addr_info addr_r; 282 256 struct mptcp_addr_info addr_l; 283 257 struct mptcp_sock *msk; ··· 330 302 goto create_err; 331 303 } 332 304 305 + local.addr = addr_l; 306 + err = mptcp_userspace_pm_append_new_local_addr(msk, &local); 307 + if (err < 0) { 308 + GENL_SET_ERR_MSG(info, "did not match address and id"); 309 + goto create_err; 310 + } 311 + 333 312 lock_sock(sk); 334 313 335 314 err = __mptcp_subflow_connect(sk, &addr_l, &addr_r); 336 315 337 316 release_sock(sk); 317 + 318 + spin_lock_bh(&msk->pm.lock); 319 + if (err) 320 + mptcp_userspace_pm_delete_local_addr(msk, &local); 321 + else 322 + msk->pm.subflows++; 323 + spin_unlock_bh(&msk->pm.lock); 338 324 339 325 create_err: 340 326 sock_put((struct sock *)msk); ··· 462 420 ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r); 463 421 if (ssk) { 464 422 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 423 + struct mptcp_pm_addr_entry entry = { .addr = addr_l }; 465 424 425 + spin_lock_bh(&msk->pm.lock); 426 + mptcp_userspace_pm_delete_local_addr(msk, &entry); 427 + spin_unlock_bh(&msk->pm.lock); 466 428 mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN); 467 429 mptcp_close_ssk(sk, ssk, subflow); 468 430 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
+1
net/mptcp/protocol.h
··· 832 832 bool echo); 833 833 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); 834 834 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); 835 + void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list); 835 836 void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, 836 837 struct list_head *rm_list); 837 838
+8
net/netfilter/ipset/ip_set_core.c
··· 1694 1694 bool eexist = flags & IPSET_FLAG_EXIST, retried = false; 1695 1695 1696 1696 do { 1697 + if (retried) { 1698 + __ip_set_get(set); 1699 + nfnl_unlock(NFNL_SUBSYS_IPSET); 1700 + cond_resched(); 1701 + nfnl_lock(NFNL_SUBSYS_IPSET); 1702 + __ip_set_put(set); 1703 + } 1704 + 1697 1705 ip_set_lock(set); 1698 1706 ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); 1699 1707 ip_set_unlock(set);
+3
net/netfilter/nf_conntrack_core.c
··· 2260 2260 return 0; 2261 2261 2262 2262 helper = rcu_dereference(help->helper); 2263 + if (!helper) 2264 + return 0; 2265 + 2263 2266 if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) 2264 2267 return 0; 2265 2268
+3 -1
net/netfilter/nf_tables_api.c
··· 1600 1600 1601 1601 if (nft_base_chain_netdev(family, ops->hooknum)) { 1602 1602 nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS); 1603 + if (!nest_devs) 1604 + goto nla_put_failure; 1603 1605 1604 1606 if (!hook_list) 1605 1607 hook_list = &basechain->hook_list; ··· 9010 9008 continue; 9011 9009 } 9012 9010 9013 - if (WARN_ON_ONCE(data + expr->ops->size > data_boundary)) 9011 + if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary)) 9014 9012 return -ENOMEM; 9015 9013 9016 9014 memcpy(data + size, expr, expr->ops->size);
+1 -1
net/netfilter/nft_bitwise.c
··· 323 323 dreg = priv->dreg; 324 324 regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE); 325 325 for (i = 0; i < regcount; i++, dreg++) 326 - track->regs[priv->dreg].bitwise = expr; 326 + track->regs[dreg].bitwise = expr; 327 327 328 328 return false; 329 329 }
-19
net/openvswitch/datapath.c
··· 236 236 /* First drop references to device. */ 237 237 hlist_del_rcu(&p->dp_hash_node); 238 238 239 - /* Free percpu memory */ 240 - free_percpu(p->upcall_stats); 241 - 242 239 /* Then destroy it. */ 243 240 ovs_vport_del(p); 244 241 } ··· 1855 1858 goto err_destroy_portids; 1856 1859 } 1857 1860 1858 - vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); 1859 - if (!vport->upcall_stats) { 1860 - err = -ENOMEM; 1861 - goto err_destroy_vport; 1862 - } 1863 - 1864 1861 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1865 1862 info->snd_seq, 0, OVS_DP_CMD_NEW); 1866 1863 BUG_ON(err < 0); ··· 1867 1876 ovs_notify(&dp_datapath_genl_family, reply, info); 1868 1877 return 0; 1869 1878 1870 - err_destroy_vport: 1871 - ovs_dp_detach_port(vport); 1872 1879 err_destroy_portids: 1873 1880 kfree(rcu_dereference_raw(dp->upcall_portids)); 1874 1881 err_unlock_and_destroy_meters: ··· 2311 2322 goto exit_unlock_free; 2312 2323 } 2313 2324 2314 - vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); 2315 - if (!vport->upcall_stats) { 2316 - err = -ENOMEM; 2317 - goto exit_unlock_free_vport; 2318 - } 2319 - 2320 2325 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2321 2326 info->snd_portid, info->snd_seq, 0, 2322 2327 OVS_VPORT_CMD_NEW, GFP_KERNEL); ··· 2328 2345 ovs_notify(&dp_vport_genl_family, reply, info); 2329 2346 return 0; 2330 2347 2331 - exit_unlock_free_vport: 2332 - ovs_dp_detach_port(vport); 2333 2348 exit_unlock_free: 2334 2349 ovs_unlock(); 2335 2350 kfree_skb(reply);
+16 -2
net/openvswitch/vport.c
··· 124 124 { 125 125 struct vport *vport; 126 126 size_t alloc_size; 127 + int err; 127 128 128 129 alloc_size = sizeof(struct vport); 129 130 if (priv_size) { ··· 136 135 if (!vport) 137 136 return ERR_PTR(-ENOMEM); 138 137 138 + vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); 139 + if (!vport->upcall_stats) { 140 + err = -ENOMEM; 141 + goto err_kfree_vport; 142 + } 143 + 139 144 vport->dp = parms->dp; 140 145 vport->port_no = parms->port_no; 141 146 vport->ops = ops; 142 147 INIT_HLIST_NODE(&vport->dp_hash_node); 143 148 144 149 if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) { 145 - kfree(vport); 146 - return ERR_PTR(-EINVAL); 150 + err = -EINVAL; 151 + goto err_free_percpu; 147 152 } 148 153 149 154 return vport; 155 + 156 + err_free_percpu: 157 + free_percpu(vport->upcall_stats); 158 + err_kfree_vport: 159 + kfree(vport); 160 + return ERR_PTR(err); 150 161 } 151 162 EXPORT_SYMBOL_GPL(ovs_vport_alloc); 152 163 ··· 178 165 * it is safe to use raw dereference. 179 166 */ 180 167 kfree(rcu_dereference_raw(vport->upcall_portids)); 168 + free_percpu(vport->upcall_stats); 181 169 kfree(vport); 182 170 } 183 171 EXPORT_SYMBOL_GPL(ovs_vport_free);
+5 -5
net/sched/act_police.c
··· 357 357 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst); 358 358 if (p->rate_present) { 359 359 psched_ratecfg_getrate(&opt.rate, &p->rate); 360 - if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) && 360 + if ((p->rate.rate_bytes_ps >= (1ULL << 32)) && 361 361 nla_put_u64_64bit(skb, TCA_POLICE_RATE64, 362 - police->params->rate.rate_bytes_ps, 362 + p->rate.rate_bytes_ps, 363 363 TCA_POLICE_PAD)) 364 364 goto nla_put_failure; 365 365 } 366 366 if (p->peak_present) { 367 367 psched_ratecfg_getrate(&opt.peakrate, &p->peak); 368 - if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) && 368 + if ((p->peak.rate_bytes_ps >= (1ULL << 32)) && 369 369 nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64, 370 - police->params->peak.rate_bytes_ps, 370 + p->peak.rate_bytes_ps, 371 371 TCA_POLICE_PAD)) 372 372 goto nla_put_failure; 373 373 } 374 374 if (p->pps_present) { 375 375 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64, 376 - police->params->ppsrate.rate_pkts_ps, 376 + p->ppsrate.rate_pkts_ps, 377 377 TCA_POLICE_PAD)) 378 378 goto nla_put_failure; 379 379 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
+1 -2
net/sched/cls_api.c
··· 43 43 #include <net/flow_offload.h> 44 44 #include <net/tc_wrapper.h> 45 45 46 - extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 47 - 48 46 /* The list of all installed classifier types */ 49 47 static LIST_HEAD(tcf_proto_base); 50 48 ··· 2950 2952 return PTR_ERR(ops); 2951 2953 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2952 2954 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2955 + module_put(ops->owner); 2953 2956 return -EOPNOTSUPP; 2954 2957 } 2955 2958
+18 -10
net/sched/sch_api.c
··· 309 309 310 310 if (dev_ingress_queue(dev)) 311 311 q = qdisc_match_from_root( 312 - dev_ingress_queue(dev)->qdisc_sleeping, 312 + rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping), 313 313 handle); 314 314 out: 315 315 return q; ··· 328 328 329 329 nq = dev_ingress_queue_rcu(dev); 330 330 if (nq) 331 - q = qdisc_match_from_root(nq->qdisc_sleeping, handle); 331 + q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), 332 + handle); 332 333 out: 333 334 return q; 334 335 } ··· 635 634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, 636 635 u64 delta_ns) 637 636 { 638 - if (test_bit(__QDISC_STATE_DEACTIVATED, 639 - &qdisc_root_sleeping(wd->qdisc)->state)) 637 + bool deactivated; 638 + 639 + rcu_read_lock(); 640 + deactivated = test_bit(__QDISC_STATE_DEACTIVATED, 641 + &qdisc_root_sleeping(wd->qdisc)->state); 642 + rcu_read_unlock(); 643 + if (deactivated) 640 644 return; 641 645 642 646 if (hrtimer_is_queued(&wd->timer)) { ··· 1484 1478 } 1485 1479 q = qdisc_leaf(p, clid); 1486 1480 } else if (dev_ingress_queue(dev)) { 1487 - q = dev_ingress_queue(dev)->qdisc_sleeping; 1481 + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); 1488 1482 } 1489 1483 } else { 1490 1484 q = rtnl_dereference(dev->qdisc); ··· 1570 1564 } 1571 1565 q = qdisc_leaf(p, clid); 1572 1566 } else if (dev_ingress_queue_create(dev)) { 1573 - q = dev_ingress_queue(dev)->qdisc_sleeping; 1567 + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); 1574 1568 } 1575 1569 } else { 1576 1570 q = rtnl_dereference(dev->qdisc); ··· 1811 1805 1812 1806 dev_queue = dev_ingress_queue(dev); 1813 1807 if (dev_queue && 1814 - tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, 1815 - &q_idx, s_q_idx, false, 1808 + tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping), 1809 + skb, cb, &q_idx, s_q_idx, false, 1816 1810 tca[TCA_DUMP_INVISIBLE]) < 0) 1817 1811 goto done; 1818 1812 ··· 2255 2249 2256 2250 dev_queue = dev_ingress_queue(dev); 2257 2251 if (dev_queue && 2258 - tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, 2259 - &t, s_t, false) < 0) 2252 + tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping), 2253 + skb, tcm, cb, &t, s_t, false) < 0) 2260 2254 goto done; 2261 2255 2262 2256 done: ··· 2308 2302 .exit = psched_net_exit, 2309 2303 }; 2310 2304 2305 + #if IS_ENABLED(CONFIG_RETPOLINE) 2311 2306 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper); 2307 + #endif 2312 2308 2313 2309 static int __init pktsched_init(void) 2314 2310 {
+9 -1
net/sched/sch_fq_pie.c
··· 201 201 return NET_XMIT_CN; 202 202 } 203 203 204 + static struct netlink_range_validation fq_pie_q_range = { 205 + .min = 1, 206 + .max = 1 << 20, 207 + }; 208 + 204 209 static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = { 205 210 [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32}, 206 211 [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32}, ··· 213 208 [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32}, 214 209 [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32}, 215 210 [TCA_FQ_PIE_BETA] = {.type = NLA_U32}, 216 - [TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32}, 211 + [TCA_FQ_PIE_QUANTUM] = 212 + NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range), 217 213 [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32}, 218 214 [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32}, 219 215 [TCA_FQ_PIE_ECN] = {.type = NLA_U32}, ··· 379 373 spinlock_t *root_lock; /* to lock qdisc for probability calculations */ 380 374 u32 idx; 381 375 376 + rcu_read_lock(); 382 377 root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 383 378 spin_lock(root_lock); 384 379 ··· 392 385 mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate); 393 386 394 387 spin_unlock(root_lock); 388 + rcu_read_unlock(); 395 389 } 396 390 397 391 static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
+15 -15
net/sched/sch_generic.c
··· 648 648 649 649 static struct netdev_queue noop_netdev_queue = { 650 650 RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), 651 - .qdisc_sleeping = &noop_qdisc, 651 + RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), 652 652 }; 653 653 654 654 struct Qdisc noop_qdisc = { ··· 1103 1103 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 1104 1104 struct Qdisc *qdisc) 1105 1105 { 1106 - struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 1106 + struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1107 1107 spinlock_t *root_lock; 1108 1108 1109 1109 root_lock = qdisc_lock(oqdisc); ··· 1112 1112 /* ... and graft new one */ 1113 1113 if (qdisc == NULL) 1114 1114 qdisc = &noop_qdisc; 1115 - dev_queue->qdisc_sleeping = qdisc; 1115 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); 1116 1116 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 1117 1117 1118 1118 spin_unlock_bh(root_lock); ··· 1125 1125 struct netdev_queue *dev_queue, 1126 1126 void *_qdisc_default) 1127 1127 { 1128 - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1128 + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1129 1129 struct Qdisc *qdisc_default = _qdisc_default; 1130 1130 1131 1131 if (qdisc) { 1132 1132 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1133 - dev_queue->qdisc_sleeping = qdisc_default; 1133 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); 1134 1134 1135 1135 qdisc_put(qdisc); 1136 1136 } ··· 1154 1154 1155 1155 if (!netif_is_multiqueue(dev)) 1156 1156 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1157 - dev_queue->qdisc_sleeping = qdisc; 1157 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); 1158 1158 } 1159 1159 1160 1160 static void attach_default_qdiscs(struct net_device *dev) ··· 1167 1167 if (!netif_is_multiqueue(dev) || 1168 1168 dev->priv_flags & IFF_NO_QUEUE) { 1169 1169 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1170 - qdisc = txq->qdisc_sleeping; 1170 + qdisc = rtnl_dereference(txq->qdisc_sleeping); 1171 1171 rcu_assign_pointer(dev->qdisc, qdisc); 1172 1172 qdisc_refcount_inc(qdisc); 1173 1173 } else { ··· 1186 1186 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 1187 1187 dev->priv_flags |= IFF_NO_QUEUE; 1188 1188 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1189 - qdisc = txq->qdisc_sleeping; 1189 + qdisc = rtnl_dereference(txq->qdisc_sleeping); 1190 1190 rcu_assign_pointer(dev->qdisc, qdisc); 1191 1191 qdisc_refcount_inc(qdisc); 1192 1192 dev->priv_flags ^= IFF_NO_QUEUE; ··· 1202 1202 struct netdev_queue *dev_queue, 1203 1203 void *_need_watchdog) 1204 1204 { 1205 - struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 1205 + struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1206 1206 int *need_watchdog_p = _need_watchdog; 1207 1207 1208 1208 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) ··· 1272 1272 struct Qdisc *qdisc; 1273 1273 bool nolock; 1274 1274 1275 - qdisc = dev_queue->qdisc_sleeping; 1275 + qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1276 1276 if (!qdisc) 1277 1277 return; 1278 1278 ··· 1303 1303 int val; 1304 1304 1305 1305 dev_queue = netdev_get_tx_queue(dev, i); 1306 - q = dev_queue->qdisc_sleeping; 1306 + q = rtnl_dereference(dev_queue->qdisc_sleeping); 1307 1307 1308 1308 root_lock = qdisc_lock(q); 1309 1309 spin_lock_bh(root_lock); ··· 1379 1379 static int qdisc_change_tx_queue_len(struct net_device *dev, 1380 1380 struct netdev_queue *dev_queue) 1381 1381 { 1382 - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1382 + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1383 1383 const struct Qdisc_ops *ops = qdisc->ops; 1384 1384 1385 1385 if (ops->change_tx_queue_len) ··· 1404 1404 unsigned int i; 1405 1405 1406 1406 for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { 1407 - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; 1407 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); 1408 1408 /* Only update the default qdiscs we created, 1409 1409 * qdiscs with handles are always hashed. 1410 1410 */ ··· 1412 1412 qdisc_hash_del(qdisc); 1413 1413 } 1414 1414 for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { 1415 - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; 1415 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); 1416 1416 if (qdisc != &noop_qdisc && !qdisc->handle) 1417 1417 qdisc_hash_add(qdisc, false); 1418 1418 } ··· 1449 1449 struct Qdisc *qdisc = _qdisc; 1450 1450 1451 1451 rcu_assign_pointer(dev_queue->qdisc, qdisc); 1452 - dev_queue->qdisc_sleeping = qdisc; 1452 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); 1453 1453 } 1454 1454 1455 1455 void dev_init_scheduler(struct net_device *dev)
+4 -4
net/sched/sch_mq.c
··· 141 141 * qdisc totals are added at end. 142 142 */ 143 143 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 144 - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 144 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); 145 145 spin_lock_bh(qdisc_lock(qdisc)); 146 146 147 147 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, ··· 202 202 { 203 203 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 204 204 205 - return dev_queue->qdisc_sleeping; 205 + return rtnl_dereference(dev_queue->qdisc_sleeping); 206 206 } 207 207 208 208 static unsigned long mq_find(struct Qdisc *sch, u32 classid) ··· 221 221 222 222 tcm->tcm_parent = TC_H_ROOT; 223 223 tcm->tcm_handle |= TC_H_MIN(cl); 224 - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 224 + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; 225 225 return 0; 226 226 } 227 227 ··· 230 230 { 231 231 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 232 232 233 - sch = dev_queue->qdisc_sleeping; 233 + sch = rtnl_dereference(dev_queue->qdisc_sleeping); 234 234 if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || 235 235 qdisc_qstats_copy(d, sch) < 0) 236 236 return -1;
+4 -4
net/sched/sch_mqprio.c
··· 557 557 * qdisc totals are added at end. 558 558 */ 559 559 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 560 - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 560 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); 561 561 spin_lock_bh(qdisc_lock(qdisc)); 562 562 563 563 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, ··· 604 604 if (!dev_queue) 605 605 return NULL; 606 606 607 - return dev_queue->qdisc_sleeping; 607 + return rtnl_dereference(dev_queue->qdisc_sleeping); 608 608 } 609 609 610 610 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) ··· 637 637 tcm->tcm_parent = (tc < 0) ? 0 : 638 638 TC_H_MAKE(TC_H_MAJ(sch->handle), 639 639 TC_H_MIN(tc + TC_H_MIN_PRIORITY)); 640 - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 640 + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; 641 641 } else { 642 642 tcm->tcm_parent = TC_H_ROOT; 643 643 tcm->tcm_info = 0; ··· 693 693 } else { 694 694 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 695 695 696 - sch = dev_queue->qdisc_sleeping; 696 + sch = rtnl_dereference(dev_queue->qdisc_sleeping); 697 697 if (gnet_stats_copy_basic(d, sch->cpu_bstats, 698 698 &sch->bstats, true) < 0 || 699 699 qdisc_qstats_copy(d, sch) < 0)
+4 -1
net/sched/sch_pie.c
··· 421 421 { 422 422 struct pie_sched_data *q = from_timer(q, t, adapt_timer); 423 423 struct Qdisc *sch = q->sch; 424 - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 424 + spinlock_t *root_lock; 425 425 426 + rcu_read_lock(); 427 + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 426 428 spin_lock(root_lock); 427 429 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); 428 430 ··· 432 430 if (q->params.tupdate) 433 431 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); 434 432 spin_unlock(root_lock); 433 + rcu_read_unlock(); 435 434 } 436 435 437 436 static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+4 -1
net/sched/sch_red.c
··· 321 321 { 322 322 struct red_sched_data *q = from_timer(q, t, adapt_timer); 323 323 struct Qdisc *sch = q->sch; 324 - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 324 + spinlock_t *root_lock; 325 325 326 + rcu_read_lock(); 327 + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 326 328 spin_lock(root_lock); 327 329 red_adaptative_algo(&q->parms, &q->vars); 328 330 mod_timer(&q->adapt_timer, jiffies + HZ/2); 329 331 spin_unlock(root_lock); 332 + rcu_read_unlock(); 330 333 } 331 334 332 335 static int red_init(struct Qdisc *sch, struct nlattr *opt,
+4 -1
net/sched/sch_sfq.c
··· 606 606 { 607 607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer); 608 608 struct Qdisc *sch = q->sch; 609 - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 609 + spinlock_t *root_lock; 610 610 siphash_key_t nkey; 611 611 612 612 get_random_bytes(&nkey, sizeof(nkey)); 613 + rcu_read_lock(); 614 + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 613 615 spin_lock(root_lock); 614 616 q->perturbation = nkey; 615 617 if (!q->filter_list && q->tail) ··· 620 618 621 619 if (q->perturb_period) 622 620 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 621 + rcu_read_unlock(); 623 622 } 624 623 625 624 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+4 -3
net/sched/sch_taprio.c
··· 2426 2426 if (!dev_queue) 2427 2427 return NULL; 2428 2428 2429 - return dev_queue->qdisc_sleeping; 2429 + return rtnl_dereference(dev_queue->qdisc_sleeping); 2430 2430 } 2431 2431 2432 2432 static unsigned long taprio_find(struct Qdisc *sch, u32 classid) ··· 2445 2445 2446 2446 tcm->tcm_parent = TC_H_ROOT; 2447 2447 tcm->tcm_handle |= TC_H_MIN(cl); 2448 - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 2448 + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; 2449 2449 2450 2450 return 0; 2451 2451 } ··· 2456 2456 __acquires(d->lock) 2457 2457 { 2458 2458 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 2459 - struct Qdisc *child = dev_queue->qdisc_sleeping; 2460 2459 struct tc_taprio_qopt_offload offload = { 2461 2460 .cmd = TAPRIO_CMD_TC_STATS, 2462 2461 .tc_stats = { 2463 2462 .tc = cl - 1, 2464 2463 }, 2465 2464 }; 2465 + struct Qdisc *child; 2466 2466 2467 + child = rtnl_dereference(dev_queue->qdisc_sleeping); 2467 2468 if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 || 2468 2469 qdisc_qstats_copy(d, child) < 0) 2469 2470 return -1;
+1 -1
net/sched/sch_teql.c
··· 297 297 struct net_device *slave = qdisc_dev(q); 298 298 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); 299 299 300 - if (slave_txq->qdisc_sleeping != q) 300 + if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q) 301 301 continue; 302 302 if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) || 303 303 !netif_running(slave)) {
+2 -2
net/smc/smc_llc.c
··· 851 851 addc_llc->num_rkeys = *num_rkeys_todo; 852 852 n = *num_rkeys_todo; 853 853 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) { 854 + while (*buf_pos && !(*buf_pos)->used) 855 + *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); 854 856 if (!*buf_pos) { 855 857 addc_llc->num_rkeys = addc_llc->num_rkeys - 856 858 *num_rkeys_todo; ··· 869 867 870 868 (*num_rkeys_todo)--; 871 869 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); 872 - while (*buf_pos && !(*buf_pos)->used) 873 - *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); 874 870 } 875 871 addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT; 876 872 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
+6 -18
net/sunrpc/svcsock.c
··· 1480 1480 return svsk; 1481 1481 } 1482 1482 1483 - bool svc_alien_sock(struct net *net, int fd) 1484 - { 1485 - int err; 1486 - struct socket *sock = sockfd_lookup(fd, &err); 1487 - bool ret = false; 1488 - 1489 - if (!sock) 1490 - goto out; 1491 - if (sock_net(sock->sk) != net) 1492 - ret = true; 1493 - sockfd_put(sock); 1494 - out: 1495 - return ret; 1496 - } 1497 - EXPORT_SYMBOL_GPL(svc_alien_sock); 1498 - 1499 1483 /** 1500 1484 * svc_addsock - add a listener socket to an RPC service 1501 1485 * @serv: pointer to RPC service to which to add a new listener 1486 + * @net: caller's network namespace 1502 1487 * @fd: file descriptor of the new listener 1503 1488 * @name_return: pointer to buffer to fill in with name of listener 1504 1489 * @len: size of the buffer ··· 1493 1508 * Name is terminated with '\n'. On error, returns a negative errno 1494 1509 * value. 1495 1510 */ 1496 - int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, 1497 - const size_t len, const struct cred *cred) 1511 + int svc_addsock(struct svc_serv *serv, struct net *net, const int fd, 1512 + char *name_return, const size_t len, const struct cred *cred) 1498 1513 { 1499 1514 int err = 0; 1500 1515 struct socket *so = sockfd_lookup(fd, &err); ··· 1505 1520 1506 1521 if (!so) 1507 1522 return err; 1523 + err = -EINVAL; 1524 + if (sock_net(so->sk) != net) 1525 + goto out; 1508 1526 err = -EAFNOSUPPORT; 1509 1527 if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) 1510 1528 goto out;
+2 -2
net/wireless/core.c
··· 368 368 rdev = container_of(work, struct cfg80211_registered_device, 369 369 sched_scan_stop_wk); 370 370 371 - rtnl_lock(); 371 + wiphy_lock(&rdev->wiphy); 372 372 list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { 373 373 if (req->nl_owner_dead) 374 374 cfg80211_stop_sched_scan_req(rdev, req, false); 375 375 } 376 - rtnl_unlock(); 376 + wiphy_unlock(&rdev->wiphy); 377 377 } 378 378 379 379 static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
+2
net/wireless/nl80211.c
··· 10723 10723 if (!info->attrs[NL80211_ATTR_MLD_ADDR]) 10724 10724 return -EINVAL; 10725 10725 req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); 10726 + if (!is_valid_ether_addr(req.ap_mld_addr)) 10727 + return -EINVAL; 10726 10728 } 10727 10729 10728 10730 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
+2 -2
net/wireless/reg.c
··· 2440 2440 struct wireless_dev *wdev; 2441 2441 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 2442 2442 2443 - ASSERT_RTNL(); 2444 - 2443 + wiphy_lock(wiphy); 2445 2444 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) 2446 2445 if (!reg_wdev_chan_valid(wiphy, wdev)) 2447 2446 cfg80211_leave(rdev, wdev); 2447 + wiphy_unlock(wiphy); 2448 2448 } 2449 2449 2450 2450 static void reg_check_chans_work(struct work_struct *work)
+5 -1
security/selinux/Makefile
··· 26 26 cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h 27 27 28 28 targets += flask.h av_permissions.h 29 - $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE 29 + # once make >= 4.3 is required, we can use grouped targets in the rule below, 30 + # which basically involves adding both headers and a '&' before the colon, see 31 + # the example below: 32 + # $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/... 33 + $(obj)/flask.h: scripts/selinux/genheaders/genheaders FORCE 30 34 $(call if_changed,flask)
+1
tools/include/uapi/linux/bpf.h
··· 1035 1035 BPF_TRACE_KPROBE_MULTI, 1036 1036 BPF_LSM_CGROUP, 1037 1037 BPF_STRUCT_OPS, 1038 + BPF_NETFILTER, 1038 1039 __MAX_BPF_ATTACH_TYPE 1039 1040 }; 1040 1041
+2 -1
tools/lib/bpf/libbpf.c
··· 117 117 [BPF_PERF_EVENT] = "perf_event", 118 118 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", 119 119 [BPF_STRUCT_OPS] = "struct_ops", 120 + [BPF_NETFILTER] = "netfilter", 120 121 }; 121 122 122 123 static const char * const link_type_name[] = { ··· 8737 8736 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), 8738 8737 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), 8739 8738 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), 8740 - SEC_DEF("netfilter", NETFILTER, 0, SEC_NONE), 8739 + SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE), 8741 8740 }; 8742 8741 8743 8742 static size_t custom_sec_def_cnt;
+2
tools/lib/bpf/libbpf_probes.c
··· 180 180 case BPF_PROG_TYPE_SK_REUSEPORT: 181 181 case BPF_PROG_TYPE_FLOW_DISSECTOR: 182 182 case BPF_PROG_TYPE_CGROUP_SYSCTL: 183 + break; 183 184 case BPF_PROG_TYPE_NETFILTER: 185 + opts.expected_attach_type = BPF_NETFILTER; 184 186 break; 185 187 default: 186 188 return -EOPNOTSUPP;
+31
tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <test_progs.h> 4 + 5 + #include "inner_array_lookup.skel.h" 6 + 7 + void test_inner_array_lookup(void) 8 + { 9 + int map1_fd, err; 10 + int key = 3; 11 + int val = 1; 12 + struct inner_array_lookup *skel; 13 + 14 + skel = inner_array_lookup__open_and_load(); 15 + if (!ASSERT_OK_PTR(skel, "open_load_skeleton")) 16 + return; 17 + 18 + err = inner_array_lookup__attach(skel); 19 + if (!ASSERT_OK(err, "skeleton_attach")) 20 + goto cleanup; 21 + 22 + map1_fd = bpf_map__fd(skel->maps.inner_map1); 23 + bpf_map_update_elem(map1_fd, &key, &val, 0); 24 + 25 + /* Probe should have set the element at index 3 to 2 */ 26 + bpf_map_lookup_elem(map1_fd, &key, &val); 27 + ASSERT_EQ(val, 2, "value_is_2"); 28 + 29 + cleanup: 30 + inner_array_lookup__destroy(skel); 31 + }
+1 -1
tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
··· 209 209 err, errno); 210 210 goto err; 211 211 } 212 - ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); 212 + ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); 213 213 214 214 free(big_buf); 215 215 close(fd);
+45
tools/testing/selftests/bpf/progs/inner_array_lookup.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/bpf.h> 4 + #include <bpf/bpf_helpers.h> 5 + 6 + struct inner_map { 7 + __uint(type, BPF_MAP_TYPE_ARRAY); 8 + __uint(max_entries, 5); 9 + __type(key, int); 10 + __type(value, int); 11 + } inner_map1 SEC(".maps"); 12 + 13 + struct outer_map { 14 + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); 15 + __uint(max_entries, 3); 16 + __type(key, int); 17 + __array(values, struct inner_map); 18 + } outer_map1 SEC(".maps") = { 19 + .values = { 20 + [2] = &inner_map1, 21 + }, 22 + }; 23 + 24 + SEC("raw_tp/sys_enter") 25 + int handle__sys_enter(void *ctx) 26 + { 27 + int outer_key = 2, inner_key = 3; 28 + int *val; 29 + void *map; 30 + 31 + map = bpf_map_lookup_elem(&outer_map1, &outer_key); 32 + if (!map) 33 + return 1; 34 + 35 + val = bpf_map_lookup_elem(map, &inner_key); 36 + if (!val) 37 + return 1; 38 + 39 + if (*val == 1) 40 + *val = 2; 41 + 42 + return 0; 43 + } 44 + 45 + char _license[] SEC("license") = "GPL";
+28 -19
tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
··· 9 9 exit_fail 10 10 } 11 11 12 - echo "Test event filter function name" 12 + sample_events() { 13 + echo > trace 14 + echo 1 > events/kmem/kmem_cache_free/enable 15 + echo 1 > tracing_on 16 + ls > /dev/null 17 + echo 0 > tracing_on 18 + echo 0 > events/kmem/kmem_cache_free/enable 19 + } 20 + 13 21 echo 0 > tracing_on 14 22 echo 0 > events/enable 15 - echo > trace 16 - echo 'call_site.function == exit_mmap' > events/kmem/kmem_cache_free/filter 17 - echo 1 > events/kmem/kmem_cache_free/enable 18 - echo 1 > tracing_on 19 - ls > /dev/null 20 - echo 0 > events/kmem/kmem_cache_free/enable 21 23 22 - hitcnt=`grep kmem_cache_free trace| grep exit_mmap | wc -l` 23 - misscnt=`grep kmem_cache_free trace| grep -v exit_mmap | wc -l` 24 + echo "Get the most frequently calling function" 25 + sample_events 26 + 27 + target_func=`cut -d: -f3 trace | sed 's/call_site=\([^+]*\)+0x.*/\1/' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'` 28 + if [ -z "$target_func" ]; then 29 + exit_fail 30 + fi 31 + echo > trace 32 + 33 + echo "Test event filter function name" 34 + echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter 35 + sample_events 36 + 37 + hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l` 38 + misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l` 24 39 25 40 if [ $hitcnt -eq 0 ]; then 26 41 exit_fail ··· 45 30 exit_fail 46 31 fi 47 32 48 - address=`grep ' exit_mmap$' /proc/kallsyms | cut -d' ' -f1` 33 + address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1` 49 34 50 35 echo "Test event filter function address" 51 - echo 0 > tracing_on 52 - echo 0 > events/enable 53 - echo > trace 54 36 echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter 55 - echo 1 > events/kmem/kmem_cache_free/enable 56 - echo 1 > tracing_on 57 - sleep 1 58 - echo 0 > events/kmem/kmem_cache_free/enable 37 + sample_events 59 38 60 - hitcnt=`grep kmem_cache_free trace| grep exit_mmap | wc -l` 61 - misscnt=`grep kmem_cache_free trace| grep -v exit_mmap | wc -l` 39 + hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l` 40 + misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l` 62 41 63 42 if [ $hitcnt -eq 0 ]; then 64 43 exit_fail
+1
tools/testing/selftests/kvm/Makefile
··· 116 116 TEST_GEN_PROGS_x86_64 += x86_64/amx_test 117 117 TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test 118 118 TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test 119 + TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test 119 120 TEST_GEN_PROGS_x86_64 += access_tracking_perf_test 120 121 TEST_GEN_PROGS_x86_64 += demand_paging_test 121 122 TEST_GEN_PROGS_x86_64 += dirty_log_test
+74
tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Test edge cases and race conditions in kvm_recalculate_apic_map(). 4 + */ 5 + 6 + #include <sys/ioctl.h> 7 + #include <pthread.h> 8 + #include <time.h> 9 + 10 + #include "processor.h" 11 + #include "test_util.h" 12 + #include "kvm_util.h" 13 + #include "apic.h" 14 + 15 + #define TIMEOUT 5 /* seconds */ 16 + 17 + #define LAPIC_DISABLED 0 18 + #define LAPIC_X2APIC (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) 19 + #define MAX_XAPIC_ID 0xff 20 + 21 + static void *race(void *arg) 22 + { 23 + struct kvm_lapic_state lapic = {}; 24 + struct kvm_vcpu *vcpu = arg; 25 + 26 + while (1) { 27 + /* Trigger kvm_recalculate_apic_map(). */ 28 + vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic); 29 + pthread_testcancel(); 30 + } 31 + 32 + return NULL; 33 + } 34 + 35 + int main(void) 36 + { 37 + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 38 + struct kvm_vcpu *vcpuN; 39 + struct kvm_vm *vm; 40 + pthread_t thread; 41 + time_t t; 42 + int i; 43 + 44 + kvm_static_assert(KVM_MAX_VCPUS > MAX_XAPIC_ID); 45 + 46 + /* 47 + * Create the max number of vCPUs supported by selftests so that KVM 48 + * has decent amount of work to do when recalculating the map, i.e. to 49 + * make the problematic window large enough to hit. 50 + */ 51 + vm = vm_create_with_vcpus(KVM_MAX_VCPUS, NULL, vcpus); 52 + 53 + /* 54 + * Enable x2APIC on all vCPUs so that KVM doesn't bail from the recalc 55 + * due to vCPUs having aliased xAPIC IDs (truncated to 8 bits). 56 + */ 57 + for (i = 0; i < KVM_MAX_VCPUS; i++) 58 + vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC); 59 + 60 + ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0); 61 + 62 + vcpuN = vcpus[KVM_MAX_VCPUS - 1]; 63 + for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { 64 + vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_X2APIC); 65 + vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED); 66 + } 67 + 68 + ASSERT_EQ(pthread_cancel(thread), 0); 69 + ASSERT_EQ(pthread_join(thread, NULL), 0); 70 + 71 + kvm_vm_free(vm); 72 + 73 + return 0; 74 + }
+3
tools/testing/selftests/net/.gitignore
··· 8 8 fin_ack_lat 9 9 gro 10 10 hwtstamp_config 11 + io_uring_zerocopy_tx 11 12 ioam6_parser 12 13 ip_defrag 14 + ip_local_port_range 13 15 ipsec 14 16 ipv6_flowlabel 15 17 ipv6_flowlabel_mgr ··· 28 26 reuseport_bpf_numa 29 27 reuseport_dualstack 30 28 rxtimestamp 29 + sctp_hello 31 30 sk_bind_sendto_listen 32 31 sk_connect_zero_addr 33 32 socket
+10 -1
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 867 867 sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q') 868 868 ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id 869 869 sleep 1 870 + sp=$(grep "type:10" "$evts_ns1" | 871 + sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q') 872 + da=$(grep "type:10" "$evts_ns1" | 873 + sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q') 874 + dp=$(grep "type:10" "$evts_ns1" | 875 + sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q') 870 876 ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id 877 + ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \ 878 + lport $sp rip $da rport $dp token $tk 871 879 fi 872 880 873 881 counter=$((counter + 1)) ··· 941 933 sleep 1 942 934 sp=$(grep "type:10" "$evts_ns2" | 943 935 sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q') 936 + ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id 944 937 ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \ 945 938 rip $da rport $dp token $tk 946 939 fi ··· 3157 3148 pm_nl_set_limits $ns1 0 1 3158 3149 run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow 3159 3150 chk_join_nr 1 1 1 3160 - chk_rm_nr 0 1 3151 + chk_rm_nr 1 1 3161 3152 kill_events_pids 3162 3153 fi 3163 3154 }