Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Build failure in drivers/net/wwan/mhi_wwan_mbim.c:
add missing parameter (0, assuming we don't want buffer pre-alloc).

Conflict in drivers/net/dsa/sja1105/sja1105_main.c between:
589918df9322 ("net: dsa: sja1105: be stateless with FDB entries on SJA1105P/Q/R/S/SJA1110 too")
0fac6aa098ed ("net: dsa: sja1105: delete the best_effort_vlan_filtering mode")

Follow the instructions from the commit message of the former commit
- removed the if conditions. When looking at commit 589918df9322 ("net:
dsa: sja1105: be stateless with FDB entries on SJA1105P/Q/R/S/SJA1110 too")
note that the mask_iotag fields get removed by the following patch.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1260 -490
+17
Documentation/networking/netdev-FAQ.rst
··· 228 228 gets overloaded very easily and netdev@vger really doesn't need more 229 229 traffic if we can help it. 230 230 231 + netdevsim is great, can I extend it for my out-of-tree tests? 232 + ------------------------------------------------------------- 233 + 234 + No, `netdevsim` is a test vehicle solely for upstream tests. 235 + (Please add your tests under tools/testing/selftests/.) 236 + 237 + We also give no guarantees that `netdevsim` won't change in the future 238 + in a way which would break what would normally be considered uAPI. 239 + 240 + Is netdevsim considered a "user" of an API? 241 + ------------------------------------------- 242 + 243 + Linux kernel has a long standing rule that no API should be added unless 244 + it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are 245 + strongly encouraged when adding new APIs, but `netdevsim` in itself 246 + is **not** considered a use case/user. 247 + 231 248 Any other tips to help ensure my net/net-next patch gets OK'd? 232 249 -------------------------------------------------------------- 233 250 Attention to detail. Re-read your own work as if you were the
+4 -2
Documentation/networking/operstates.rst
··· 73 73 state (f.e. VLAN). 74 74 75 75 IF_OPER_TESTING (4): 76 - Unused in current kernel. 76 + Interface is in testing mode, for example executing driver self-tests 77 + or media (cable) test. It can't be used for normal traffic until tests 78 + complete. 77 79 78 80 IF_OPER_DORMANT (5): 79 81 Interface is L1 up, but waiting for an external event, f.e. for a ··· 113 111 114 112 Note that for certain kind of soft-devices, which are not managing any 115 113 real hardware, it is possible to set this bit from userspace. One 116 - should use TVL IFLA_CARRIER to do so. 114 + should use TLV IFLA_CARRIER to do so. 117 115 118 116 netif_carrier_ok() can be used to query that bit. 119 117
+1 -2
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 14 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Opossums on Parade 7 7 8 8 # *DOCUMENTATION* ··· 546 546 PHONY += scripts_basic 547 547 scripts_basic: 548 548 $(Q)$(MAKE) $(build)=scripts/basic 549 - $(Q)rm -f .tmp_quiet_recordmcount 550 549 551 550 PHONY += outputmakefile 552 551 ifdef building_out_of_srctree
+7
arch/powerpc/kernel/vdso64/Makefile
··· 27 27 28 28 ccflags-y := -shared -fno-common -fno-builtin -nostdlib \ 29 29 -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both 30 + 31 + # Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true 32 + # by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is 33 + # compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code 34 + # generation is minimal, it will just use r29 instead. 35 + ccflags-y += $(call cc-option, -ffixed-r30) 36 + 30 37 asflags-y := -D__VDSO64__ -s 31 38 32 39 targets += vdso64.lds
+1 -1
arch/powerpc/platforms/pseries/setup.c
··· 77 77 #include "../../../../drivers/pci/pci.h" 78 78 79 79 DEFINE_STATIC_KEY_FALSE(shared_processor); 80 - EXPORT_SYMBOL_GPL(shared_processor); 80 + EXPORT_SYMBOL(shared_processor); 81 81 82 82 int CMO_PrPSP = -1; 83 83 int CMO_SecPSP = -1;
+1
arch/s390/boot/compressed/Makefile
··· 11 11 KASAN_SANITIZE := n 12 12 13 13 obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o 14 + obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o 14 15 obj-all := $(obj-y) piggy.o syms.o 15 16 targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 16 17 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+2
arch/s390/boot/compressed/clz_ctz.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include "../../../../lib/clz_ctz.c"
+1 -1
arch/s390/configs/debug_defconfig
··· 335 335 CONFIG_L2TP_V3=y 336 336 CONFIG_L2TP_IP=m 337 337 CONFIG_L2TP_ETH=m 338 - CONFIG_BRIDGE=m 338 + CONFIG_BRIDGE=y 339 339 CONFIG_BRIDGE_MRP=y 340 340 CONFIG_VLAN_8021Q=m 341 341 CONFIG_VLAN_8021Q_GVRP=y
+1 -1
arch/s390/configs/defconfig
··· 325 325 CONFIG_L2TP_V3=y 326 326 CONFIG_L2TP_IP=m 327 327 CONFIG_L2TP_ETH=m 328 - CONFIG_BRIDGE=m 328 + CONFIG_BRIDGE=y 329 329 CONFIG_BRIDGE_MRP=y 330 330 CONFIG_VLAN_8021Q=m 331 331 CONFIG_VLAN_8021Q_GVRP=y
+1
arch/s390/kernel/vdso32/vdso32.lds.S
··· 51 51 52 52 .rela.dyn ALIGN(8) : { *(.rela.dyn) } 53 53 .got ALIGN(8) : { *(.got .toc) } 54 + .got.plt ALIGN(8) : { *(.got.plt) } 54 55 55 56 _end = .; 56 57 PROVIDE(end = .);
+1
arch/s390/kernel/vdso64/vdso64.lds.S
··· 51 51 52 52 .rela.dyn ALIGN(8) : { *(.rela.dyn) } 53 53 .got ALIGN(8) : { *(.got .toc) } 54 + .got.plt ALIGN(8) : { *(.got.plt) } 54 55 55 56 _end = .; 56 57 PROVIDE(end = .);
+14 -4
arch/x86/kvm/hyperv.c
··· 2016 2016 2017 2017 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) 2018 2018 { 2019 + trace_kvm_hv_hypercall_done(result); 2019 2020 kvm_hv_hypercall_set_result(vcpu, result); 2020 2021 ++vcpu->stat.hypercalls; 2021 2022 return kvm_skip_emulated_instruction(vcpu); ··· 2140 2139 2141 2140 int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 2142 2141 { 2142 + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 2143 2143 struct kvm_hv_hcall hc; 2144 2144 u64 ret = HV_STATUS_SUCCESS; 2145 2145 ··· 2175 2173 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; 2176 2174 hc.rep = !!(hc.rep_cnt || hc.rep_idx); 2177 2175 2178 - if (hc.fast && is_xmm_fast_hypercall(&hc)) 2179 - kvm_hv_hypercall_read_xmm(&hc); 2180 - 2181 2176 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx, 2182 2177 hc.ingpa, hc.outgpa); 2183 2178 2184 - if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) { 2179 + if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) { 2185 2180 ret = HV_STATUS_ACCESS_DENIED; 2186 2181 goto hypercall_complete; 2182 + } 2183 + 2184 + if (hc.fast && is_xmm_fast_hypercall(&hc)) { 2185 + if (unlikely(hv_vcpu->enforce_cpuid && 2186 + !(hv_vcpu->cpuid_cache.features_edx & 2187 + HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) { 2188 + kvm_queue_exception(vcpu, UD_VECTOR); 2189 + return 1; 2190 + } 2191 + 2192 + kvm_hv_hypercall_read_xmm(&hc); 2187 2193 } 2188 2194 2189 2195 switch (hc.code) {
+1 -1
arch/x86/kvm/mmu/mmu.c
··· 1644 1644 * aggregate version in order to make the slab shrinker 1645 1645 * faster 1646 1646 */ 1647 - static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) 1647 + static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) 1648 1648 { 1649 1649 kvm->arch.n_used_mmu_pages += nr; 1650 1650 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
+25 -20
arch/x86/kvm/svm/sev.c
··· 64 64 unsigned int max_sev_asid; 65 65 static unsigned int min_sev_asid; 66 66 static unsigned long sev_me_mask; 67 + static unsigned int nr_asids; 67 68 static unsigned long *sev_asid_bitmap; 68 69 static unsigned long *sev_reclaim_asid_bitmap; 69 70 ··· 79 78 /* Called with the sev_bitmap_lock held, or on shutdown */ 80 79 static int sev_flush_asids(int min_asid, int max_asid) 81 80 { 82 - int ret, pos, error = 0; 81 + int ret, asid, error = 0; 83 82 84 83 /* Check if there are any ASIDs to reclaim before performing a flush */ 85 - pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid); 86 - if (pos >= max_asid) 84 + asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid); 85 + if (asid > max_asid) 87 86 return -EBUSY; 88 87 89 88 /* ··· 116 115 117 116 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ 118 117 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, 119 - max_sev_asid); 120 - bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid); 118 + nr_asids); 119 + bitmap_zero(sev_reclaim_asid_bitmap, nr_asids); 121 120 122 121 return true; 123 122 } 124 123 125 124 static int sev_asid_new(struct kvm_sev_info *sev) 126 125 { 127 - int pos, min_asid, max_asid, ret; 126 + int asid, min_asid, max_asid, ret; 128 127 bool retry = true; 129 128 enum misc_res_type type; 130 129 ··· 144 143 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. 145 144 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. 146 145 */ 147 - min_asid = sev->es_active ? 0 : min_sev_asid - 1; 146 + min_asid = sev->es_active ? 1 : min_sev_asid; 148 147 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; 149 148 again: 150 - pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid); 151 - if (pos >= max_asid) { 149 + asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); 150 + if (asid > max_asid) { 152 151 if (retry && __sev_recycle_asids(min_asid, max_asid)) { 153 152 retry = false; 154 153 goto again; ··· 158 157 goto e_uncharge; 159 158 } 160 159 161 - __set_bit(pos, sev_asid_bitmap); 160 + __set_bit(asid, sev_asid_bitmap); 162 161 163 162 mutex_unlock(&sev_bitmap_lock); 164 163 165 - return pos + 1; 164 + return asid; 166 165 e_uncharge: 167 166 misc_cg_uncharge(type, sev->misc_cg, 1); 168 167 put_misc_cg(sev->misc_cg); ··· 180 179 static void sev_asid_free(struct kvm_sev_info *sev) 181 180 { 182 181 struct svm_cpu_data *sd; 183 - int cpu, pos; 182 + int cpu; 184 183 enum misc_res_type type; 185 184 186 185 mutex_lock(&sev_bitmap_lock); 187 186 188 - pos = sev->asid - 1; 189 - __set_bit(pos, sev_reclaim_asid_bitmap); 187 + __set_bit(sev->asid, sev_reclaim_asid_bitmap); 190 188 191 189 for_each_possible_cpu(cpu) { 192 190 sd = per_cpu(svm_data, cpu); 193 - sd->sev_vmcbs[pos] = NULL; 191 + sd->sev_vmcbs[sev->asid] = NULL; 194 192 } 195 193 196 194 mutex_unlock(&sev_bitmap_lock); ··· 1857 1857 min_sev_asid = edx; 1858 1858 sev_me_mask = 1UL << (ebx & 0x3f); 1859 1859 1860 - /* Initialize SEV ASID bitmaps */ 1861 - sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); 1860 + /* 1861 + * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap, 1862 + * even though it's never used, so that the bitmap is indexed by the 1863 + * actual ASID. 1864 + */ 1865 + nr_asids = max_sev_asid + 1; 1866 + sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL); 1862 1867 if (!sev_asid_bitmap) 1863 1868 goto out; 1864 1869 1865 - sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); 1870 + sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL); 1866 1871 if (!sev_reclaim_asid_bitmap) { 1867 1872 bitmap_free(sev_asid_bitmap); 1868 1873 sev_asid_bitmap = NULL; ··· 1912 1907 return; 1913 1908 1914 1909 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ 1915 - sev_flush_asids(0, max_sev_asid); 1910 + sev_flush_asids(1, max_sev_asid); 1916 1911 1917 1912 bitmap_free(sev_asid_bitmap); 1918 1913 bitmap_free(sev_reclaim_asid_bitmap); ··· 1926 1921 if (!sev_enabled) 1927 1922 return 0; 1928 1923 1929 - sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); 1924 + sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL); 1930 1925 if (!sd->sev_vmcbs) 1931 1926 return -ENOMEM; 1932 1927
+15
arch/x86/kvm/trace.h
··· 92 92 __entry->outgpa) 93 93 ); 94 94 95 + TRACE_EVENT(kvm_hv_hypercall_done, 96 + TP_PROTO(u64 result), 97 + TP_ARGS(result), 98 + 99 + TP_STRUCT__entry( 100 + __field(__u64, result) 101 + ), 102 + 103 + TP_fast_assign( 104 + __entry->result = result; 105 + ), 106 + 107 + TP_printk("result 0x%llx", __entry->result) 108 + ); 109 + 95 110 /* 96 111 * Tracepoint for Xen hypercall. 97 112 */
+11 -2
arch/x86/kvm/x86.c
··· 4358 4358 4359 4359 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4360 4360 { 4361 - return kvm_arch_interrupt_allowed(vcpu) && 4362 - kvm_cpu_accept_dm_intr(vcpu); 4361 + /* 4362 + * Do not cause an interrupt window exit if an exception 4363 + * is pending or an event needs reinjection; userspace 4364 + * might want to inject the interrupt manually using KVM_SET_REGS 4365 + * or KVM_SET_SREGS. For that to work, we must be at an 4366 + * instruction boundary and with no events half-injected. 4367 + */ 4368 + return (kvm_arch_interrupt_allowed(vcpu) && 4369 + kvm_cpu_accept_dm_intr(vcpu) && 4370 + !kvm_event_needs_reinjection(vcpu) && 4371 + !vcpu->arch.exception.pending); 4363 4372 } 4364 4373 4365 4374 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+1 -1
drivers/bus/mhi/core/internal.h
··· 682 682 struct image_info *img_info); 683 683 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); 684 684 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, 685 - struct mhi_chan *mhi_chan); 685 + struct mhi_chan *mhi_chan, unsigned int flags); 686 686 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, 687 687 struct mhi_chan *mhi_chan); 688 688 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+6 -3
drivers/bus/mhi/core/main.c
··· 1430 1430 } 1431 1431 1432 1432 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, 1433 - struct mhi_chan *mhi_chan) 1433 + struct mhi_chan *mhi_chan, unsigned int flags) 1434 1434 { 1435 1435 int ret = 0; 1436 1436 struct device *dev = &mhi_chan->mhi_dev->dev; ··· 1455 1455 if (ret) 1456 1456 goto error_pm_state; 1457 1457 1458 + if (mhi_chan->dir == DMA_FROM_DEVICE) 1459 + mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); 1460 + 1458 1461 /* Pre-allocate buffer for xfer ring */ 1459 1462 if (mhi_chan->pre_alloc) { 1460 1463 int nr_el = get_nr_avail_ring_elements(mhi_cntrl, ··· 1613 1610 } 1614 1611 1615 1612 /* Move channel to start state */ 1616 - int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) 1613 + int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) 1617 1614 { 1618 1615 int ret, dir; 1619 1616 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; ··· 1624 1621 if (!mhi_chan) 1625 1622 continue; 1626 1623 1627 - ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); 1624 + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); 1628 1625 if (ret) 1629 1626 goto error_open_chan; 1630 1627 }
+8 -1
drivers/clk/clk-devres.c
··· 92 92 } 93 93 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional); 94 94 95 + static void devm_clk_bulk_release_all(struct device *dev, void *res) 96 + { 97 + struct clk_bulk_devres *devres = res; 98 + 99 + clk_bulk_put_all(devres->num_clks, devres->clks); 100 + } 101 + 95 102 int __must_check devm_clk_bulk_get_all(struct device *dev, 96 103 struct clk_bulk_data **clks) 97 104 { 98 105 struct clk_bulk_devres *devres; 99 106 int ret; 100 107 101 - devres = devres_alloc(devm_clk_bulk_release, 108 + devres = devres_alloc(devm_clk_bulk_release_all, 102 109 sizeof(*devres), GFP_KERNEL); 103 110 if (!devres) 104 111 return -ENOMEM;
+5 -5
drivers/clk/clk-stm32f4.c
··· 526 526 527 527 struct stm32f4_pll_post_div_data { 528 528 int idx; 529 - u8 pll_num; 529 + int pll_idx; 530 530 const char *name; 531 531 const char *parent; 532 532 u8 flag; ··· 557 557 558 558 #define MAX_POST_DIV 3 559 559 static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = { 560 - { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q", 560 + { CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q", 561 561 CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL}, 562 562 563 - { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q", 563 + { CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q", 564 564 CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL }, 565 565 566 - { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT, 566 + { NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT, 567 567 STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table }, 568 568 }; 569 569 ··· 1774 1774 post_div->width, 1775 1775 post_div->flag_div, 1776 1776 post_div->div_table, 1777 - clks[post_div->pll_num], 1777 + clks[post_div->pll_idx], 1778 1778 &stm32f4_clk_lock); 1779 1779 1780 1780 if (post_div->idx != NO_IDX)
+1
drivers/clk/hisilicon/Kconfig
··· 18 18 config COMMON_CLK_HI3559A 19 19 bool "Hi3559A Clock Driver" 20 20 depends on ARCH_HISI || COMPILE_TEST 21 + select RESET_HISI 21 22 default ARCH_HISI 22 23 help 23 24 Build the clock driver for hi3559a.
+1 -1
drivers/clk/qcom/clk-smd-rpm.c
··· 467 467 468 468 static struct clk_smd_rpm *msm8936_clks[] = { 469 469 [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk, 470 - [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_clk, 470 + [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk, 471 471 [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk, 472 472 [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk, 473 473 [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+10
drivers/clk/tegra/clk-sdmmc-mux.c
··· 194 194 gate_ops->disable(gate_hw); 195 195 } 196 196 197 + static void clk_sdmmc_mux_disable_unused(struct clk_hw *hw) 198 + { 199 + struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw); 200 + const struct clk_ops *gate_ops = sdmmc_mux->gate_ops; 201 + struct clk_hw *gate_hw = &sdmmc_mux->gate.hw; 202 + 203 + gate_ops->disable_unused(gate_hw); 204 + } 205 + 197 206 static void clk_sdmmc_mux_restore_context(struct clk_hw *hw) 198 207 { 199 208 struct clk_hw *parent = clk_hw_get_parent(hw); ··· 227 218 .is_enabled = clk_sdmmc_mux_is_enabled, 228 219 .enable = clk_sdmmc_mux_enable, 229 220 .disable = clk_sdmmc_mux_disable, 221 + .disable_unused = clk_sdmmc_mux_disable_unused, 230 222 .restore_context = clk_sdmmc_mux_restore_context, 231 223 }; 232 224
+1 -1
drivers/gpio/gpio-mpc8xxx.c
··· 405 405 406 406 ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn, 407 407 mpc8xxx_gpio_irq_cascade, 408 - IRQF_SHARED, "gpio-cascade", 408 + IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade", 409 409 mpc8xxx_gc); 410 410 if (ret) { 411 411 dev_err(&pdev->dev,
+3 -3
drivers/gpio/gpio-tqmx86.c
··· 238 238 struct resource *res; 239 239 int ret, irq; 240 240 241 - irq = platform_get_irq(pdev, 0); 242 - if (irq < 0) 241 + irq = platform_get_irq_optional(pdev, 0); 242 + if (irq < 0 && irq != -ENXIO) 243 243 return irq; 244 244 245 245 res = platform_get_resource(pdev, IORESOURCE_IO, 0); ··· 278 278 279 279 pm_runtime_enable(&pdev->dev); 280 280 281 - if (irq) { 281 + if (irq > 0) { 282 282 struct irq_chip *irq_chip = &gpio->irq_chip; 283 283 u8 irq_status; 284 284
+12 -1
drivers/media/common/videobuf2/videobuf2-core.c
··· 1573 1573 struct media_request *req) 1574 1574 { 1575 1575 struct vb2_buffer *vb; 1576 + enum vb2_buffer_state orig_state; 1576 1577 int ret; 1577 1578 1578 1579 if (q->error) { ··· 1674 1673 * Add to the queued buffers list, a buffer will stay on it until 1675 1674 * dequeued in dqbuf. 1676 1675 */ 1676 + orig_state = vb->state; 1677 1677 list_add_tail(&vb->queued_entry, &q->queued_list); 1678 1678 q->queued_count++; 1679 1679 q->waiting_for_buffers = false; ··· 1705 1703 if (q->streaming && !q->start_streaming_called && 1706 1704 q->queued_count >= q->min_buffers_needed) { 1707 1705 ret = vb2_start_streaming(q); 1708 - if (ret) 1706 + if (ret) { 1707 + /* 1708 + * Since vb2_core_qbuf will return with an error, 1709 + * we should return it to state DEQUEUED since 1710 + * the error indicates that the buffer wasn't queued. 1711 + */ 1712 + list_del(&vb->queued_entry); 1713 + q->queued_count--; 1714 + vb->state = orig_state; 1709 1715 return ret; 1716 + } 1710 1717 } 1711 1718 1712 1719 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
+8
drivers/media/platform/atmel/Kconfig
··· 8 8 select VIDEOBUF2_DMA_CONTIG 9 9 select REGMAP_MMIO 10 10 select V4L2_FWNODE 11 + select VIDEO_ATMEL_ISC_BASE 11 12 help 12 13 This module makes the ATMEL Image Sensor Controller available 13 14 as a v4l2 device. ··· 20 19 select VIDEOBUF2_DMA_CONTIG 21 20 select REGMAP_MMIO 22 21 select V4L2_FWNODE 22 + select VIDEO_ATMEL_ISC_BASE 23 23 help 24 24 This module makes the ATMEL eXtended Image Sensor Controller 25 25 available as a v4l2 device. 26 + 27 + config VIDEO_ATMEL_ISC_BASE 28 + tristate 29 + default n 30 + help 31 + ATMEL ISC and XISC common code base. 26 32 27 33 config VIDEO_ATMEL_ISI 28 34 tristate "ATMEL Image Sensor Interface (ISI) support"
+3 -2
drivers/media/platform/atmel/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o 3 - atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o 2 + atmel-isc-objs = atmel-sama5d2-isc.o 3 + atmel-xisc-objs = atmel-sama7g5-isc.o 4 4 5 5 obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o 6 + obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o 6 7 obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o 7 8 obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
+11
drivers/media/platform/atmel/atmel-isc-base.c
··· 378 378 379 379 return 0; 380 380 } 381 + EXPORT_SYMBOL_GPL(isc_clk_init); 381 382 382 383 void isc_clk_cleanup(struct isc_device *isc) 383 384 { ··· 393 392 clk_unregister(isc_clk->clk); 394 393 } 395 394 } 395 + EXPORT_SYMBOL_GPL(isc_clk_cleanup); 396 396 397 397 static int isc_queue_setup(struct vb2_queue *vq, 398 398 unsigned int *nbuffers, unsigned int *nplanes, ··· 1580 1578 1581 1579 return ret; 1582 1580 } 1581 + EXPORT_SYMBOL_GPL(isc_interrupt); 1583 1582 1584 1583 static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max) 1585 1584 { ··· 2215 2212 .unbind = isc_async_unbind, 2216 2213 .complete = isc_async_complete, 2217 2214 }; 2215 + EXPORT_SYMBOL_GPL(isc_async_ops); 2218 2216 2219 2217 void isc_subdev_cleanup(struct isc_device *isc) 2220 2218 { ··· 2228 2224 2229 2225 INIT_LIST_HEAD(&isc->subdev_entities); 2230 2226 } 2227 + EXPORT_SYMBOL_GPL(isc_subdev_cleanup); 2231 2228 2232 2229 int isc_pipeline_init(struct isc_device *isc) 2233 2230 { ··· 2269 2264 2270 2265 return 0; 2271 2266 } 2267 + EXPORT_SYMBOL_GPL(isc_pipeline_init); 2272 2268 2273 2269 /* regmap configuration */ 2274 2270 #define ATMEL_ISC_REG_MAX 0xd5c ··· 2279 2273 .val_bits = 32, 2280 2274 .max_register = ATMEL_ISC_REG_MAX, 2281 2275 }; 2276 + EXPORT_SYMBOL_GPL(isc_regmap_config); 2282 2277 2278 + MODULE_AUTHOR("Songjun Wu"); 2279 + MODULE_AUTHOR("Eugen Hristev"); 2280 + MODULE_DESCRIPTION("Atmel ISC common code base"); 2281 + MODULE_LICENSE("GPL v2");
+11 -3
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
··· 37 37 } else { 38 38 /* read */ 39 39 requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); 40 - pipe = usb_rcvctrlpipe(d->udev, 0); 40 + 41 + /* 42 + * Zero-length transfers must use usb_sndctrlpipe() and 43 + * rtl28xxu_identify_state() uses a zero-length i2c read 44 + * command to determine the chip type. 45 + */ 46 + if (req->size) 47 + pipe = usb_rcvctrlpipe(d->udev, 0); 48 + else 49 + pipe = usb_sndctrlpipe(d->udev, 0); 41 50 } 42 51 43 52 ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value, ··· 621 612 static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name) 622 613 { 623 614 struct rtl28xxu_dev *dev = d_to_priv(d); 624 - u8 buf[1]; 625 615 int ret; 626 - struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf}; 616 + struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL}; 627 617 628 618 dev_dbg(&d->intf->dev, "\n"); 629 619
+12 -4
drivers/net/dsa/qca/ar9331.c
··· 837 837 return 0; 838 838 } 839 839 840 - ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val); 841 - if (ret < 0) 842 - goto error; 843 - 840 + /* In case of this switch we work with 32bit registers on top of 16bit 841 + * bus. Some registers (for example access to forwarding database) have 842 + * trigger bit on the first 16bit half of request, the result and 843 + * configuration of request in the second half. 844 + * To make it work properly, we should do the second part of transfer 845 + * before the first one is done. 846 + */ 844 847 ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2, 845 848 val >> 16); 846 849 if (ret < 0) 847 850 goto error; 848 851 852 + ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val); 853 + if (ret < 0) 854 + goto error; 855 + 849 856 return 0; 857 + 850 858 error: 851 859 dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n"); 852 860 return ret;
+14 -13
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
··· 304 304 hostcmd = SJA1105_HOSTCMD_INVALIDATE; 305 305 } 306 306 sja1105_packing(p, &hostcmd, 25, 23, size, op); 307 + } 308 + 309 + static void 310 + sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, 311 + enum packing_op op) 312 + { 313 + int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY; 314 + 315 + sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size); 307 316 308 317 /* Hack - The hardware takes the 'index' field within 309 318 * struct sja1105_l2_lookup_entry as the index on which this command ··· 322 313 * such that our API doesn't need to ask for a full-blown entry 323 314 * structure when e.g. a delete is requested. 324 315 */ 325 - sja1105_packing(buf, &cmd->index, 15, 6, 326 - SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op); 327 - } 328 - 329 - static void 330 - sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, 331 - enum packing_op op) 332 - { 333 - int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY; 334 - 335 - return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size); 316 + sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op); 336 317 } 337 318 338 319 static void 339 320 sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, 340 321 enum packing_op op) 341 322 { 342 - int size = SJA1110_SIZE_L2_LOOKUP_ENTRY; 323 + int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY; 343 324 344 - return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size); 325 + sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size); 326 + 327 + sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op); 345 328 } 346 329 347 330 /* The switch is so retarded that it makes our command/entry abstraction
+70 -24
drivers/net/dsa/sja1105/sja1105_main.c
··· 1484 1484 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 1485 1485 const unsigned char *addr, u16 vid) 1486 1486 { 1487 - struct sja1105_l2_lookup_entry l2_lookup = {0}; 1487 + struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp; 1488 1488 struct sja1105_private *priv = ds->priv; 1489 1489 struct device *dev = ds->dev; 1490 1490 int last_unused = -1; 1491 + int start, end, i; 1491 1492 int bin, way, rc; 1492 1493 1493 1494 bin = sja1105et_fdb_hash(priv, addr, vid); ··· 1500 1499 * mask? If yes, we need to do nothing. If not, we need 1501 1500 * to rewrite the entry by adding this port to it. 1502 1501 */ 1503 - if (l2_lookup.destports & BIT(port)) 1502 + if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds) 1504 1503 return 0; 1505 1504 l2_lookup.destports |= BIT(port); 1506 1505 } else { ··· 1531 1530 index, NULL, false); 1532 1531 } 1533 1532 } 1533 + l2_lookup.lockeds = true; 1534 1534 l2_lookup.index = sja1105et_fdb_index(bin, way); 1535 1535 1536 1536 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, ··· 1539 1537 true); 1540 1538 if (rc < 0) 1541 1539 return rc; 1540 + 1541 + /* Invalidate a dynamically learned entry if that exists */ 1542 + start = sja1105et_fdb_index(bin, 0); 1543 + end = sja1105et_fdb_index(bin, way); 1544 + 1545 + for (i = start; i < end; i++) { 1546 + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1547 + i, &tmp); 1548 + if (rc == -ENOENT) 1549 + continue; 1550 + if (rc) 1551 + return rc; 1552 + 1553 + if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid) 1554 + continue; 1555 + 1556 + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1557 + i, NULL, false); 1558 + if (rc) 1559 + return rc; 1560 + 1561 + break; 1562 + } 1542 1563 1543 1564 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1544 1565 } ··· 1604 1579 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1605 1580 const unsigned char *addr, u16 vid) 1606 1581 { 1607 - struct sja1105_l2_lookup_entry l2_lookup = {0}; 1582 + struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp; 1608 1583 struct sja1105_private *priv = ds->priv; 1609 1584 int rc, i; 1610 1585 1611 1586 /* Search for an existing entry in the FDB table */ 1612 1587 l2_lookup.macaddr = ether_addr_to_u64(addr); 1613 1588 l2_lookup.vlanid = vid; 1614 - l2_lookup.iotag = SJA1105_S_TAG; 1615 1589 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1616 - if (priv->vlan_aware) { 1617 - l2_lookup.mask_vlanid = VLAN_VID_MASK; 1618 - l2_lookup.mask_iotag = BIT(0); 1619 - } else { 1620 - l2_lookup.mask_vlanid = 0; 1621 - l2_lookup.mask_iotag = 0; 1622 - } 1590 + l2_lookup.mask_vlanid = VLAN_VID_MASK; 1623 1591 l2_lookup.destports = BIT(port); 1624 1592 1593 + tmp = l2_lookup; 1594 + 1625 1595 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1626 - SJA1105_SEARCH, &l2_lookup); 1627 - if (rc == 0) { 1628 - /* Found and this port is already in the entry's 1596 + SJA1105_SEARCH, &tmp); 1597 + if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) { 1598 + /* Found a static entry and this port is already in the entry's 1629 1599 * port mask => job done 1630 1600 */ 1631 - if (l2_lookup.destports & BIT(port)) 1601 + if ((tmp.destports & BIT(port)) && tmp.lockeds) 1632 1602 return 0; 1603 + 1604 + l2_lookup = tmp; 1605 + 1633 1606 /* l2_lookup.index is populated by the switch in case it 1634 1607 * found something. 1635 1608 */ ··· 1649 1626 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1650 1627 return -EINVAL; 1651 1628 } 1652 - l2_lookup.lockeds = true; 1653 1629 l2_lookup.index = i; 1654 1630 1655 1631 skip_finding_an_index: 1632 + l2_lookup.lockeds = true; 1633 + 1656 1634 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1657 1635 l2_lookup.index, &l2_lookup, 1658 1636 true); 1659 1637 if (rc < 0) 1660 1638 return rc; 1639 + 1640 + /* The switch learns dynamic entries and looks up the FDB left to 1641 + * right. It is possible that our addition was concurrent with the 1642 + * dynamic learning of the same address, so now that the static entry 1643 + * has been installed, we are certain that address learning for this 1644 + * particular address has been turned off, so the dynamic entry either 1645 + * is in the FDB at an index smaller than the static one, or isn't (it 1646 + * can also be at a larger index, but in that case it is inactive 1647 + * because the static FDB entry will match first, and the dynamic one 1648 + * will eventually age out). Search for a dynamically learned address 1649 + * prior to our static one and invalidate it. 1650 + */ 1651 + tmp = l2_lookup; 1652 + 1653 + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1654 + SJA1105_SEARCH, &tmp); 1655 + if (rc < 0) { 1656 + dev_err(ds->dev, 1657 + "port %d failed to read back entry for %pM vid %d: %pe\n", 1658 + port, addr, vid, ERR_PTR(rc)); 1659 + return rc; 1660 + } 1661 + 1662 + if (tmp.index < l2_lookup.index) { 1663 + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1664 + tmp.index, NULL, false); 1665 + if (rc < 0) 1666 + return rc; 1667 + } 1661 1668 1662 1669 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1663 1670 } ··· 1702 1649 1703 1650 l2_lookup.macaddr = ether_addr_to_u64(addr); 1704 1651 l2_lookup.vlanid = vid; 1705 - l2_lookup.iotag = SJA1105_S_TAG; 1706 1652 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1707 - if (priv->vlan_aware) { 1708 - l2_lookup.mask_vlanid = VLAN_VID_MASK; 1709 - l2_lookup.mask_iotag = BIT(0); 1710 - } else { 1711 - l2_lookup.mask_vlanid = 0; 1712 - l2_lookup.mask_iotag = 0; 1713 - } 1653 + l2_lookup.mask_vlanid = VLAN_VID_MASK; 1714 1654 l2_lookup.destports = BIT(port); 1715 1655 1716 1656 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+2 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 2669 2669 } 2670 2670 2671 2671 /* Allocated memory for FW statistics */ 2672 - if (bnx2x_alloc_fw_stats_mem(bp)) 2672 + rc = bnx2x_alloc_fw_stats_mem(bp); 2673 + if (rc) 2673 2674 LOAD_ERROR_EXIT(bp, load_error0); 2674 2675 2675 2676 /* request pf to initialize status blocks */
+1 -1
drivers/net/ethernet/freescale/fec_main.c
··· 4017 4017 if (of_phy_is_fixed_link(np)) 4018 4018 of_phy_deregister_fixed_link(np); 4019 4019 of_node_put(fep->phy_node); 4020 - free_netdev(ndev); 4021 4020 4022 4021 clk_disable_unprepare(fep->clk_ahb); 4023 4022 clk_disable_unprepare(fep->clk_ipg); 4024 4023 pm_runtime_put_noidle(&pdev->dev); 4025 4024 pm_runtime_disable(&pdev->dev); 4026 4025 4026 + free_netdev(ndev); 4027 4027 return 0; 4028 4028 } 4029 4029
+2
drivers/net/ethernet/marvell/prestera/prestera_devlink.c
··· 530 530 prestera_trap = &prestera_trap_items_arr[i]; 531 531 devlink_traps_unregister(devlink, &prestera_trap->trap, 1); 532 532 } 533 + devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr, 534 + groups_count); 533 535 err_groups_register: 534 536 kfree(trap_data->trap_items_arr); 535 537 err_trap_items_alloc:
+14 -7
drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
··· 13 13 */ 14 14 #define VSTAX 73 15 15 16 - static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) 16 + #define ifh_encode_bitfield(ifh, value, pos, _width) \ 17 + ({ \ 18 + u32 width = (_width); \ 19 + \ 20 + /* Max width is 5 bytes - 40 bits. In worst case this will 21 + * spread over 6 bytes - 48 bits 22 + */ \ 23 + compiletime_assert(width <= 40, \ 24 + "Unsupported width, must be <= 40"); \ 25 + __ifh_encode_bitfield((ifh), (value), (pos), width); \ 26 + }) 27 + 28 + static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) 17 29 { 18 30 u8 *ifh_hdr = ifh; 19 31 /* Calculate the Start IFH byte position of this IFH bit position */ 20 32 u32 byte = (35 - (pos / 8)); 21 33 /* Calculate the Start bit position in the Start IFH byte */ 22 34 u32 bit = (pos % 8); 23 - u64 encode = GENMASK(bit + width - 1, bit) & (value << bit); 24 - 25 - /* Max width is 5 bytes - 40 bits. In worst case this will 26 - * spread over 6 bytes - 48 bits 27 - */ 28 - compiletime_assert(width <= 40, "Unsupported width, must be <= 40"); 35 + u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit); 29 36 30 37 /* The b0-b7 goes into the start IFH byte */ 31 38 if (encode & 0xFF)
+2 -6
drivers/net/ethernet/natsemi/natsemi.c
··· 819 819 printk(version); 820 820 #endif 821 821 822 - i = pci_enable_device(pdev); 822 + i = pcim_enable_device(pdev); 823 823 if (i) return i; 824 824 825 825 /* natsemi has a non-standard PM control register ··· 852 852 ioaddr = ioremap(iostart, iosize); 853 853 if (!ioaddr) { 854 854 i = -ENOMEM; 855 - goto err_ioremap; 855 + goto err_pci_request_regions; 856 856 } 857 857 858 858 /* Work around the dropped serial bit. */ ··· 973 973 974 974 err_register_netdev: 975 975 iounmap(ioaddr); 976 - 977 - err_ioremap: 978 - pci_release_regions(pdev); 979 976 980 977 err_pci_request_regions: 981 978 free_netdev(dev); ··· 3238 3241 3239 3242 NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); 3240 3243 unregister_netdev (dev); 3241 - pci_release_regions (pdev); 3242 3244 iounmap(ioaddr); 3243 3245 free_netdev (dev); 3244 3246 }
+3 -3
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 3512 3512 3513 3513 kfree(vdev->vpaths); 3514 3514 3515 - /* we are safe to free it now */ 3516 - free_netdev(dev); 3517 - 3518 3515 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", 3519 3516 buf); 3520 3517 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, 3521 3518 __func__, __LINE__); 3519 + 3520 + /* we are safe to free it now */ 3521 + free_netdev(dev); 3522 3522 } 3523 3523 3524 3524 /*
+2
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
··· 286 286 287 287 /* Init to unknowns */ 288 288 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 289 + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 290 + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 289 291 cmd->base.port = PORT_OTHER; 290 292 cmd->base.speed = SPEED_UNKNOWN; 291 293 cmd->base.duplex = DUPLEX_UNKNOWN;
+1
drivers/net/ethernet/qlogic/qede/qede.h
··· 492 492 #define QEDE_SP_HW_ERR 4 493 493 #define QEDE_SP_ARFS_CONFIG 5 494 494 #define QEDE_SP_AER 7 495 + #define QEDE_SP_DISABLE 8 495 496 496 497 #ifdef CONFIG_RFS_ACCEL 497 498 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+8
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 1005 1005 struct qede_dev *edev = container_of(work, struct qede_dev, 1006 1006 sp_task.work); 1007 1007 1008 + /* Disable execution of this deferred work once 1009 + * qede removal is in progress, this stop any future 1010 + * scheduling of sp_task. 1011 + */ 1012 + if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags)) 1013 + return; 1014 + 1008 1015 /* The locking scheme depends on the specific flag: 1009 1016 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to 1010 1017 * ensure that ongoing flows are ended and new ones are not started. ··· 1299 1292 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); 1300 1293 1301 1294 if (mode != QEDE_REMOVE_RECOVERY) { 1295 + set_bit(QEDE_SP_DISABLE, &edev->sp_flags); 1302 1296 unregister_netdev(ndev); 1303 1297 1304 1298 cancel_delayed_work_sync(&edev->sp_task);
+5 -1
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 2061 2061 2062 2062 for (i = 1; i <= common->port_num; i++) { 2063 2063 struct am65_cpsw_port *port = am65_common_get_port(common, i); 2064 - struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev); 2064 + struct am65_cpsw_ndev_priv *priv; 2065 2065 2066 + if (!port->ndev) 2067 + continue; 2068 + 2069 + priv = am65_ndev_to_priv(port->ndev); 2066 2070 priv->offload_fwd_mark = set_val; 2067 2071 } 2068 2072 }
+1 -1
drivers/net/mhi_net.c
··· 319 319 u64_stats_init(&mhi_netdev->stats.tx_syncp); 320 320 321 321 /* Start MHI channels */ 322 - err = mhi_prepare_for_transfer(mhi_dev); 322 + err = mhi_prepare_for_transfer(mhi_dev, 0); 323 323 if (err) 324 324 goto out_err; 325 325
+5 -5
drivers/net/phy/micrel.c
··· 401 401 } 402 402 403 403 static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, 404 - const u32 ksz_phy_id) 404 + const bool ksz_8051) 405 405 { 406 406 int ret; 407 407 408 - if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id) 408 + if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051) 409 409 return 0; 410 410 411 411 ret = phy_read(phydev, MII_BMSR); ··· 418 418 * the switch does not. 419 419 */ 420 420 ret &= BMSR_ERCAP; 421 - if (ksz_phy_id == PHY_ID_KSZ8051) 421 + if (ksz_8051) 422 422 return ret; 423 423 else 424 424 return !ret; ··· 426 426 427 427 static int ksz8051_match_phy_device(struct phy_device *phydev) 428 428 { 429 - return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051); 429 + return ksz8051_ksz8795_match_phy_device(phydev, true); 430 430 } 431 431 432 432 static int ksz8081_config_init(struct phy_device *phydev) ··· 535 535 536 536 static int ksz8795_match_phy_device(struct phy_device *phydev) 537 537 { 538 - return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX); 538 + return ksz8051_ksz8795_match_phy_device(phydev, false); 539 539 } 540 540 541 541 static int ksz9021_load_values_from_of(struct phy_device *phydev,
+13 -5
drivers/net/usb/lan78xx.c
··· 1154 1154 { 1155 1155 struct phy_device *phydev = dev->net->phydev; 1156 1156 struct ethtool_link_ksettings ecmd; 1157 - int ladv, radv, ret; 1157 + int ladv, radv, ret, link; 1158 1158 u32 buf; 1159 1159 1160 1160 /* clear LAN78xx interrupt status */ ··· 1162 1162 if (unlikely(ret < 0)) 1163 1163 return -EIO; 1164 1164 1165 + mutex_lock(&phydev->lock); 1165 1166 phy_read_status(phydev); 1167 + link = phydev->link; 1168 + mutex_unlock(&phydev->lock); 1166 1169 1167 - if (!phydev->link && dev->link_on) { 1170 + if (!link && dev->link_on) { 1168 1171 dev->link_on = false; 1169 1172 1170 1173 /* reset MAC */ ··· 1180 1177 return -EIO; 1181 1178 1182 1179 del_timer(&dev->stat_monitor); 1183 - } else if (phydev->link && !dev->link_on) { 1180 + } else if (link && !dev->link_on) { 1184 1181 dev->link_on = true; 1185 1182 1186 1183 phy_ethtool_ksettings_get(phydev, &ecmd); ··· 1469 1466 1470 1467 static u32 lan78xx_get_link(struct net_device *net) 1471 1468 { 1472 - phy_read_status(net->phydev); 1469 + u32 link; 1473 1470 1474 - return net->phydev->link; 1471 + mutex_lock(&net->phydev->lock); 1472 + phy_read_status(net->phydev); 1473 + link = net->phydev->link; 1474 + mutex_unlock(&net->phydev->lock); 1475 + 1476 + return link; 1475 1477 } 1476 1478 1477 1479 static void lan78xx_get_drvinfo(struct net_device *net,
+88 -64
drivers/net/usb/pegasus.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com) 3 + * Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com) 4 4 * 5 - * ChangeLog: 6 - * .... Most of the time spent on reading sources & docs. 7 - * v0.2.x First official release for the Linux kernel. 8 - * v0.3.0 Beutified and structured, some bugs fixed. 9 - * v0.3.x URBifying bulk requests and bugfixing. First relatively 10 - * stable release. Still can touch device's registers only 11 - * from top-halves. 12 - * v0.4.0 Control messages remained unurbified are now URBs. 13 - * Now we can touch the HW at any time. 14 - * v0.4.9 Control urbs again use process context to wait. Argh... 15 - * Some long standing bugs (enable_net_traffic) fixed. 16 - * Also nasty trick about resubmiting control urb from 17 - * interrupt context used. Please let me know how it 18 - * behaves. Pegasus II support added since this version. 19 - * TODO: suppressing HCD warnings spewage on disconnect. 20 - * v0.4.13 Ethernet address is now set at probe(), not at open() 21 - * time as this seems to break dhcpd. 22 - * v0.5.0 branch to 2.5.x kernels 23 - * v0.5.1 ethtool support added 24 - * v0.5.5 rx socket buffers are in a pool and the their allocation 25 - * is out of the interrupt routine. 26 - * ... 27 - * v0.9.3 simplified [get|set]_register(s), async update registers 28 - * logic revisited, receive skb_pool removed. 29 5 */ 30 6 31 7 #include <linux/sched.h> ··· 21 45 /* 22 46 * Version Information 23 47 */ 24 - #define DRIVER_VERSION "v0.9.3 (2013/04/25)" 25 48 #define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>" 26 49 #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver" 27 50 ··· 107 132 static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, 108 133 const void *data) 109 134 { 110 - return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS, 135 + int ret; 136 + 137 + ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS, 111 138 PEGASUS_REQT_WRITE, 0, indx, data, size, 112 139 1000, GFP_NOIO); 140 + if (ret < 0) 141 + netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret); 142 + 143 + return ret; 113 144 } 114 145 115 146 /* ··· 126 145 static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) 127 146 { 128 147 void *buf = &data; 148 + int ret; 129 149 130 - return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG, 150 + ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG, 131 151 PEGASUS_REQT_WRITE, data, indx, buf, 1, 132 152 1000, GFP_NOIO); 153 + if (ret < 0) 154 + netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret); 155 + 156 + return ret; 133 157 } 134 158 135 159 static int update_eth_regs_async(pegasus_t *pegasus) ··· 174 188 175 189 static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd) 176 190 { 177 - int i; 178 - __u8 data[4] = { phy, 0, 0, indx }; 191 + int i, ret; 179 192 __le16 regdi; 180 - int ret = -ETIMEDOUT; 193 + __u8 data[4] = { phy, 0, 0, indx }; 181 194 182 195 if (cmd & PHY_WRITE) { 183 196 __le16 *t = (__le16 *) & data[1]; ··· 192 207 if (data[0] & PHY_DONE) 193 208 break; 194 209 } 195 - if (i >= REG_TIMEOUT) 210 + if (i >= REG_TIMEOUT) { 211 + ret = -ETIMEDOUT; 196 212 goto fail; 213 + } 197 214 if (cmd & PHY_READ) { 198 215 ret = get_registers(p, PhyData, 2, &regdi); 216 + if (ret < 0) 217 + goto fail; 199 218 *regd = le16_to_cpu(regdi); 200 - return ret; 201 219 } 202 220 return 0; 203 221 fail: ··· 223 235 static int mdio_read(struct net_device *dev, int phy_id, int loc) 224 236 { 225 237 pegasus_t *pegasus = netdev_priv(dev); 238 + int ret; 226 239 u16 res; 227 240 228 - read_mii_word(pegasus, phy_id, loc, &res); 241 + ret = read_mii_word(pegasus, phy_id, loc, &res); 242 + if (ret < 0) 243 + return ret; 244 + 229 245 return (int)res; 230 246 } 231 247 ··· 243 251 244 252 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) 245 253 { 246 - int i; 247 - __u8 tmp = 0; 254 + int ret, i; 248 255 __le16 retdatai; 249 - int ret; 256 + __u8 tmp = 0; 250 257 251 258 set_register(pegasus, EpromCtrl, 0); 252 259 set_register(pegasus, EpromOffset, index); ··· 253 262 254 263 for (i = 0; i < REG_TIMEOUT; i++) { 255 264 ret = get_registers(pegasus, EpromCtrl, 1, &tmp); 265 + if (ret < 0) 266 + goto fail; 256 267 if (tmp & EPROM_DONE) 257 268 break; 258 - if (ret == -ESHUTDOWN) 259 - goto fail; 260 269 } 261 - if (i >= REG_TIMEOUT) 270 + if (i >= REG_TIMEOUT) { 271 + ret = -ETIMEDOUT; 262 272 goto fail; 273 + } 263 274 264 275 ret = get_registers(pegasus, EpromData, 2, &retdatai); 276 + if (ret < 0) 277 + goto fail; 265 278 *retdata = le16_to_cpu(retdatai); 266 279 return ret; 267 280 268 281 fail: 269 - netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); 270 - return -ETIMEDOUT; 282 + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); 283 + return ret; 271 284 } 272 285 273 286 #ifdef PEGASUS_WRITE_EEPROM ··· 319 324 return ret; 320 325 321 326 fail: 322 - netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); 327 + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); 323 328 return -ETIMEDOUT; 324 329 } 325 - #endif /* PEGASUS_WRITE_EEPROM */ 330 + #endif /* PEGASUS_WRITE_EEPROM */ 326 331 327 332 static inline int get_node_id(pegasus_t *pegasus, u8 *id) 328 333 { ··· 362 367 return; 363 368 err: 364 369 eth_hw_addr_random(pegasus->net); 365 - dev_info(&pegasus->intf->dev, "software assigned MAC address.\n"); 370 + netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n"); 366 371 367 372 return; 368 373 } 369 374 370 375 static inline int reset_mac(pegasus_t *pegasus) 371 376 { 377 + int ret, i; 372 378 __u8 data = 0x8; 373 - int i; 374 379 375 380 set_register(pegasus, EthCtrl1, data); 376 381 for (i = 0; i < REG_TIMEOUT; i++) { 377 - get_registers(pegasus, EthCtrl1, 1, &data); 382 + ret = get_registers(pegasus, EthCtrl1, 1, &data); 383 + if (ret < 0) 384 + goto fail; 378 385 if (~data & 0x08) { 379 386 if (loopback) 380 387 break; ··· 399 402 } 400 403 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) { 401 404 __u16 auxmode; 402 - read_mii_word(pegasus, 3, 0x1b, &auxmode); 405 + ret = read_mii_word(pegasus, 3, 0x1b, &auxmode); 406 + if (ret < 0) 407 + goto fail; 403 408 auxmode |= 4; 404 409 write_mii_word(pegasus, 3, 0x1b, &auxmode); 405 410 } 406 411 407 412 return 0; 413 + fail: 414 + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); 415 + return ret; 408 416 } 409 417 410 418 static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) 411 419 { 412 - __u16 linkpart; 413 - __u8 data[4]; 414 420 pegasus_t *pegasus = netdev_priv(dev); 415 421 int ret; 422 + __u16 linkpart; 423 + __u8 data[4]; 416 424 417 - read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); 425 + ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); 426 + if (ret < 0) 427 + goto fail; 418 428 data[0] = 0xc8; /* TX & RX enable, append status, no CRC */ 419 429 data[1] = 0; 420 430 if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) ··· 439 435 usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 || 440 436 usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { 441 437 u16 auxmode; 442 - read_mii_word(pegasus, 0, 0x1b, &auxmode); 438 + ret = read_mii_word(pegasus, 0, 0x1b, &auxmode); 439 + if (ret < 0) 440 + goto fail; 443 441 auxmode |= 4; 444 442 write_mii_word(pegasus, 0, 0x1b, &auxmode); 445 443 } 446 444 445 + return 0; 446 + fail: 447 + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); 447 448 return ret; 448 449 } 449 450 ··· 456 447 { 457 448 pegasus_t *pegasus = urb->context; 458 449 struct net_device *net; 450 + u8 *buf = urb->transfer_buffer; 459 451 int rx_status, count = urb->actual_length; 460 452 int status = urb->status; 461 - u8 *buf = urb->transfer_buffer; 462 453 __u16 pkt_len; 463 454 464 455 if (!pegasus) ··· 744 735 set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); 745 736 } 746 737 747 - static inline void get_interrupt_interval(pegasus_t *pegasus) 738 + static inline int get_interrupt_interval(pegasus_t *pegasus) 748 739 { 749 740 u16 data; 750 741 u8 interval; 742 + int ret; 751 743 752 - read_eprom_word(pegasus, 4, &data); 744 + ret = read_eprom_word(pegasus, 4, &data); 745 + if (ret < 0) 746 + return ret; 747 + 753 748 interval = data >> 8; 754 749 if (pegasus->usb->speed != USB_SPEED_HIGH) { 755 750 if (interval < 0x80) { ··· 768 755 } 769 756 } 770 757 pegasus->intr_interval = interval; 758 + 759 + return 0; 771 760 } 772 761 773 762 static void set_carrier(struct net_device *net) ··· 895 880 pegasus_t *pegasus = netdev_priv(dev); 896 881 897 882 strlcpy(info->driver, driver_name, sizeof(info->driver)); 898 - strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); 899 883 usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info)); 900 884 } 901 885 ··· 1013 999 data[0] = pegasus->phy; 1014 1000 fallthrough; 1015 1001 case SIOCDEVPRIVATE + 1: 1016 - read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); 1017 - res = 0; 1002 + res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); 1018 1003 break; 1019 1004 case SIOCDEVPRIVATE + 2: 1020 1005 if (!capable(CAP_NET_ADMIN)) ··· 1047 1034 1048 1035 static __u8 mii_phy_probe(pegasus_t *pegasus) 1049 1036 { 1050 - int i; 1037 + int i, ret; 1051 1038 __u16 tmp; 1052 1039 1053 1040 for (i = 0; i < 32; i++) { 1054 - read_mii_word(pegasus, i, MII_BMSR, &tmp); 1041 + ret = read_mii_word(pegasus, i, MII_BMSR, &tmp); 1042 + if (ret < 0) 1043 + goto fail; 1055 1044 if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0) 1056 1045 continue; 1057 1046 else 1058 1047 return i; 1059 1048 } 1060 - 1049 + fail: 1061 1050 return 0xff; 1062 1051 } 1063 1052 1064 1053 static inline void setup_pegasus_II(pegasus_t *pegasus) 1065 1054 { 1055 + int ret; 1066 1056 __u8 data = 0xa5; 1067 1057 1068 1058 set_register(pegasus, Reg1d, 0); ··· 1077 1061 set_register(pegasus, Reg7b, 2); 1078 1062 1079 1063 set_register(pegasus, 0x83, data); 1080 - get_registers(pegasus, 0x83, 1, &data); 1064 + ret = get_registers(pegasus, 0x83, 1, &data); 1065 + if (ret < 0) 1066 + goto fail; 1081 1067 1082 1068 if (data == 0xa5) 1083 1069 pegasus->chip = 0x8513; ··· 1094 1076 set_register(pegasus, Reg81, 6); 1095 1077 else 1096 1078 set_register(pegasus, Reg81, 2); 1079 + 1080 + return; 1081 + fail: 1082 + netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); 1097 1083 } 1098 1084 1099 1085 static void check_carrier(struct work_struct *work) ··· 1172 1150 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1173 1151 1174 1152 pegasus->features = usb_dev_id[dev_index].private; 1175 - get_interrupt_interval(pegasus); 1153 + res = get_interrupt_interval(pegasus); 1154 + if (res) 1155 + goto out2; 1176 1156 if (reset_mac(pegasus)) { 1177 1157 dev_err(&intf->dev, "can't reset MAC\n"); 1178 1158 res = -EIO; ··· 1321 1297 1322 1298 static int __init pegasus_init(void) 1323 1299 { 1324 - pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION); 1300 + pr_info("%s: " DRIVER_DESC "\n", driver_name); 1325 1301 if (devid) 1326 1302 parse_id(devid); 1327 1303 return usb_register(&pegasus_driver);
+2 -2
drivers/net/wwan/iosm/iosm_ipc_mmio.h
··· 10 10 #define IOSM_CP_VERSION 0x0100UL 11 11 12 12 /* DL dir Aggregation support mask */ 13 - #define DL_AGGR BIT(23) 13 + #define DL_AGGR BIT(9) 14 14 15 15 /* UL dir Aggregation support mask */ 16 - #define UL_AGGR BIT(22) 16 + #define UL_AGGR BIT(8) 17 17 18 18 /* UL flow credit support mask */ 19 19 #define UL_FLOW_CREDIT BIT(21)
+2 -2
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
··· 320 320 return; 321 321 } 322 322 323 - ul_credits = fct->vfl.nr_of_bytes; 323 + ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes); 324 324 325 325 dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d", 326 326 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits); ··· 586 586 qlt->reserved[0] = 0; 587 587 qlt->reserved[1] = 0; 588 588 589 - qlt->vfl.nr_of_bytes = session->ul_list.qlen; 589 + qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen); 590 590 591 591 /* Add QLT to the transfer list. */ 592 592 skb_queue_tail(&ipc_mux->channel->ul_list,
+1 -1
drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
··· 106 106 * @nr_of_bytes: Number of bytes available to transmit in the queue. 107 107 */ 108 108 struct mux_lite_vfl { 109 - u32 nr_of_bytes; 109 + __le32 nr_of_bytes; 110 110 }; 111 111 112 112 /**
+2 -2
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
··· 412 412 } 413 413 414 414 if (p_td->buffer.address != IPC_CB(skb)->mapping) { 415 - dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p", 416 - (void *)p_td->buffer.address, skb->data); 415 + dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p", 416 + (unsigned long long)p_td->buffer.address, skb->data); 417 417 ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); 418 418 skb = NULL; 419 419 goto ret;
+1 -1
drivers/net/wwan/iosm/iosm_ipc_wwan.c
··· 228 228 229 229 RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL); 230 230 /* unregistering includes synchronize_net() */ 231 - unregister_netdevice(dev); 231 + unregister_netdevice_queue(dev, head); 232 232 233 233 unlock: 234 234 mutex_unlock(&ipc_wwan->if_mutex);
+1 -1
drivers/net/wwan/mhi_wwan_ctrl.c
··· 110 110 int ret; 111 111 112 112 /* Start mhi device's channel(s) */ 113 - ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev); 113 + ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0); 114 114 if (ret) 115 115 return ret; 116 116
+1 -1
drivers/net/wwan/mhi_wwan_mbim.c
··· 608 608 INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work); 609 609 610 610 /* Start MHI channels */ 611 - err = mhi_prepare_for_transfer(mhi_dev); 611 + err = mhi_prepare_for_transfer(mhi_dev, 0); 612 612 if (err) 613 613 return err; 614 614
+1
drivers/pcmcia/i82092.c
··· 112 112 for (i = 0; i < socket_count; i++) { 113 113 sockets[i].card_state = 1; /* 1 = present but empty */ 114 114 sockets[i].io_base = pci_resource_start(dev, 0); 115 + sockets[i].dev = dev; 115 116 sockets[i].socket.features |= SS_CAP_PCCARD; 116 117 sockets[i].socket.map_size = 0x1000; 117 118 sockets[i].socket.irq_mask = 0;
+2 -2
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 453 453 if (!h->ctlr) 454 454 err = SCSI_DH_RES_TEMP_UNAVAIL; 455 455 else { 456 - list_add_rcu(&h->node, &h->ctlr->dh_list); 457 456 h->sdev = sdev; 457 + list_add_rcu(&h->node, &h->ctlr->dh_list); 458 458 } 459 459 spin_unlock(&list_lock); 460 460 err = SCSI_DH_OK; ··· 778 778 spin_lock(&list_lock); 779 779 if (h->ctlr) { 780 780 list_del_rcu(&h->node); 781 - h->sdev = NULL; 782 781 kref_put(&h->ctlr->kref, release_controller); 783 782 } 784 783 spin_unlock(&list_lock); 785 784 sdev->handler_data = NULL; 785 + synchronize_rcu(); 786 786 kfree(h); 787 787 } 788 788
+17 -2
drivers/scsi/ibmvscsi/ibmvfc.c
··· 807 807 for (i = 0; i < size; ++i) { 808 808 struct ibmvfc_event *evt = &pool->events[i]; 809 809 810 + /* 811 + * evt->active states 812 + * 1 = in flight 813 + * 0 = being completed 814 + * -1 = free/freed 815 + */ 816 + atomic_set(&evt->active, -1); 810 817 atomic_set(&evt->free, 1); 811 818 evt->crq.valid = 0x80; 812 819 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); ··· 1024 1017 1025 1018 BUG_ON(!ibmvfc_valid_event(pool, evt)); 1026 1019 BUG_ON(atomic_inc_return(&evt->free) != 1); 1020 + BUG_ON(atomic_dec_and_test(&evt->active)); 1027 1021 1028 1022 spin_lock_irqsave(&evt->queue->l_lock, flags); 1029 1023 list_add_tail(&evt->queue_list, &evt->queue->free); ··· 1080 1072 **/ 1081 1073 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) 1082 1074 { 1075 + /* 1076 + * Anything we are failing should still be active. Otherwise, it 1077 + * implies we already got a response for the command and are doing 1078 + * something bad like double completing it. 1079 + */ 1080 + BUG_ON(!atomic_dec_and_test(&evt->active)); 1083 1081 if (evt->cmnd) { 1084 1082 evt->cmnd->result = (error_code << 16); 1085 1083 evt->done = ibmvfc_scsi_eh_done; ··· 1737 1723 1738 1724 evt->done(evt); 1739 1725 } else { 1726 + atomic_set(&evt->active, 1); 1740 1727 spin_unlock_irqrestore(&evt->queue->l_lock, flags); 1741 1728 ibmvfc_trc_start(evt); 1742 1729 } ··· 3266 3251 return; 3267 3252 } 3268 3253 3269 - if (unlikely(atomic_read(&evt->free))) { 3254 + if (unlikely(atomic_dec_if_positive(&evt->active))) { 3270 3255 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", 3271 3256 crq->ioba); 3272 3257 return; ··· 3793 3778 return; 3794 3779 } 3795 3780 3796 - if (unlikely(atomic_read(&evt->free))) { 3781 + if (unlikely(atomic_dec_if_positive(&evt->active))) { 3797 3782 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", 3798 3783 crq->ioba); 3799 3784 return;
+1
drivers/scsi/ibmvscsi/ibmvfc.h
··· 745 745 struct ibmvfc_target *tgt; 746 746 struct scsi_cmnd *cmnd; 747 747 atomic_t free; 748 + atomic_t active; 748 749 union ibmvfc_iu *xfer_iu; 749 750 void (*done)(struct ibmvfc_event *evt); 750 751 void (*_done)(struct ibmvfc_event *evt);
+15 -6
drivers/scsi/megaraid/megaraid_mm.c
··· 238 238 mimd_t mimd; 239 239 uint32_t adapno; 240 240 int iterator; 241 - 241 + bool is_found; 242 242 243 243 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { 244 244 *rval = -EFAULT; ··· 254 254 255 255 adapter = NULL; 256 256 iterator = 0; 257 + is_found = false; 257 258 258 259 list_for_each_entry(adapter, &adapters_list_g, list) { 259 - if (iterator++ == adapno) break; 260 + if (iterator++ == adapno) { 261 + is_found = true; 262 + break; 263 + } 260 264 } 261 265 262 - if (!adapter) { 266 + if (!is_found) { 263 267 *rval = -ENODEV; 264 268 return NULL; 265 269 } ··· 729 725 uint32_t adapno; 730 726 int iterator; 731 727 mraid_mmadp_t* adapter; 728 + bool is_found; 732 729 733 730 /* 734 731 * When the kioc returns from driver, make sure it still doesn't ··· 752 747 iterator = 0; 753 748 adapter = NULL; 754 749 adapno = kioc->adapno; 750 + is_found = false; 755 751 756 752 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " 757 753 "ioctl that was timedout before\n")); 758 754 759 755 list_for_each_entry(adapter, &adapters_list_g, list) { 760 - if (iterator++ == adapno) break; 756 + if (iterator++ == adapno) { 757 + is_found = true; 758 + break; 759 + } 761 760 } 762 761 763 762 kioc->timedout = 0; 764 763 765 - if (adapter) { 764 + if (is_found) 766 765 mraid_mm_dealloc_kioc( adapter, kioc ); 767 - } 766 + 768 767 } 769 768 else { 770 769 wake_up(&wait_q);
+15 -17
drivers/scsi/pm8001/pm8001_sas.c
··· 684 684 685 685 void pm8001_task_done(struct sas_task *task) 686 686 { 687 - if (!del_timer(&task->slow_task->timer)) 688 - return; 687 + del_timer(&task->slow_task->timer); 689 688 complete(&task->slow_task->completion); 690 689 } 691 690 ··· 692 693 { 693 694 struct sas_task_slow *slow = from_timer(slow, t, timer); 694 695 struct sas_task *task = slow->task; 696 + unsigned long flags; 695 697 696 - task->task_state_flags |= SAS_TASK_STATE_ABORTED; 697 - complete(&task->slow_task->completion); 698 + spin_lock_irqsave(&task->task_state_lock, flags); 699 + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 700 + task->task_state_flags |= SAS_TASK_STATE_ABORTED; 701 + complete(&task->slow_task->completion); 702 + } 703 + spin_unlock_irqrestore(&task->task_state_lock, flags); 698 704 } 699 705 700 706 #define PM8001_TASK_TIMEOUT 20 ··· 752 748 } 753 749 res = -TMF_RESP_FUNC_FAILED; 754 750 /* Even TMF timed out, return direct. */ 755 - if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 756 - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 757 - pm8001_dbg(pm8001_ha, FAIL, 758 - "TMF task[%x]timeout.\n", 759 - tmf->tmf); 760 - goto ex_err; 761 - } 751 + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 752 + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", 753 + tmf->tmf); 754 + goto ex_err; 762 755 } 763 756 764 757 if (task->task_status.resp == SAS_TASK_COMPLETE && ··· 835 834 wait_for_completion(&task->slow_task->completion); 836 835 res = TMF_RESP_FUNC_FAILED; 837 836 /* Even TMF timed out, return direct. */ 838 - if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 839 - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 840 - pm8001_dbg(pm8001_ha, FAIL, 841 - "TMF task timeout.\n"); 842 - goto ex_err; 843 - } 837 + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 838 + pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n"); 839 + goto ex_err; 844 840 } 845 841 846 842 if (task->task_status.resp == SAS_TASK_COMPLETE &&
+2 -1
drivers/scsi/scsi_scan.c
··· 475 475 error = shost->hostt->target_alloc(starget); 476 476 477 477 if(error) { 478 - dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); 478 + if (error != -ENXIO) 479 + dev_err(dev, "target allocation failed, error %d\n", error); 479 480 /* don't want scsi_target_reap to do the final 480 481 * put because it will be under the host lock */ 481 482 scsi_target_destroy(starget);
+6 -3
drivers/scsi/scsi_sysfs.c
··· 807 807 mutex_lock(&sdev->state_mutex); 808 808 ret = scsi_device_set_state(sdev, state); 809 809 /* 810 - * If the device state changes to SDEV_RUNNING, we need to run 811 - * the queue to avoid I/O hang. 810 + * If the device state changes to SDEV_RUNNING, we need to 811 + * rescan the device to revalidate it, and run the queue to 812 + * avoid I/O hang. 812 813 */ 813 - if (ret == 0 && state == SDEV_RUNNING) 814 + if (ret == 0 && state == SDEV_RUNNING) { 815 + scsi_rescan_device(dev); 814 816 blk_mq_run_hw_queues(sdev->request_queue, true); 817 + } 815 818 mutex_unlock(&sdev->state_mutex); 816 819 817 820 return ret == 0 ? count : -EINVAL;
+1 -1
drivers/scsi/sr.c
··· 221 221 else if (med->media_event_code == 2) 222 222 return DISK_EVENT_MEDIA_CHANGE; 223 223 else if (med->media_event_code == 3) 224 - return DISK_EVENT_EJECT_REQUEST; 224 + return DISK_EVENT_MEDIA_CHANGE; 225 225 return 0; 226 226 } 227 227
+1 -1
fs/cifs/file.c
··· 4619 4619 4620 4620 static int cifs_readpage(struct file *file, struct page *page) 4621 4621 { 4622 - loff_t offset = (loff_t)page->index << PAGE_SHIFT; 4622 + loff_t offset = page_file_offset(page); 4623 4623 int rc = -EACCES; 4624 4624 unsigned int xid; 4625 4625
+7
fs/cifs/fs_context.c
··· 925 925 ctx->cred_uid = uid; 926 926 ctx->cruid_specified = true; 927 927 break; 928 + case Opt_backupuid: 929 + uid = make_kuid(current_user_ns(), result.uint_32); 930 + if (!uid_valid(uid)) 931 + goto cifs_parse_mount_err; 932 + ctx->backupuid = uid; 933 + ctx->backupuid_specified = true; 934 + break; 928 935 case Opt_backupgid: 929 936 gid = make_kgid(current_user_ns(), result.uint_32); 930 937 if (!gid_valid(gid))
+2 -1
fs/cifs/smb2ops.c
··· 3617 3617 char *buf) 3618 3618 { 3619 3619 struct cifs_io_parms io_parms = {0}; 3620 - int rc, nbytes; 3620 + int nbytes; 3621 + int rc = 0; 3621 3622 struct kvec iov[2]; 3622 3623 3623 3624 io_parms.netfid = cfile->fid.netfid;
+17 -2
fs/pipe.c
··· 32 32 #include "internal.h" 33 33 34 34 /* 35 + * New pipe buffers will be restricted to this size while the user is exceeding 36 + * their pipe buffer quota. The general pipe use case needs at least two 37 + * buffers: one for data yet to be read, and one for new data. If this is less 38 + * than two, then a write to a non-empty pipe may block even if the pipe is not 39 + * full. This can occur with GNU make jobserver or similar uses of pipes as 40 + * semaphores: multiple processes may be waiting to write tokens back to the 41 + * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/. 42 + * 43 + * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their 44 + * own risk, namely: pipe writes to non-full pipes may block until the pipe is 45 + * emptied. 46 + */ 47 + #define PIPE_MIN_DEF_BUFFERS 2 48 + 49 + /* 35 50 * The max size that a non-root user is allowed to grow the pipe. Can 36 51 * be set by root in /proc/sys/fs/pipe-max-size 37 52 */ ··· 796 781 user_bufs = account_pipe_buffers(user, 0, pipe_bufs); 797 782 798 783 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) { 799 - user_bufs = account_pipe_buffers(user, pipe_bufs, 1); 800 - pipe_bufs = 1; 784 + user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS); 785 + pipe_bufs = PIPE_MIN_DEF_BUFFERS; 801 786 } 802 787 803 788 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
+10 -1
fs/xfs/libxfs/xfs_log_format.h
··· 411 411 /* start of the extended dinode, writable fields */ 412 412 uint32_t di_crc; /* CRC of the inode */ 413 413 uint64_t di_changecount; /* number of attribute changes */ 414 - xfs_lsn_t di_lsn; /* flush sequence */ 414 + 415 + /* 416 + * The LSN we write to this field during formatting is not a reflection 417 + * of the current on-disk LSN. It should never be used for recovery 418 + * sequencing, nor should it be recovered into the on-disk inode at all. 419 + * See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk() 420 + * for details. 421 + */ 422 + xfs_lsn_t di_lsn; 423 + 415 424 uint64_t di_flags2; /* more random flags */ 416 425 uint32_t di_cowextsize; /* basic cow extent size for file */ 417 426 uint8_t di_pad2[12]; /* more padding for future expansion */
+13 -2
fs/xfs/xfs_buf_item_recover.c
··· 698 698 static xfs_lsn_t 699 699 xlog_recover_get_buf_lsn( 700 700 struct xfs_mount *mp, 701 - struct xfs_buf *bp) 701 + struct xfs_buf *bp, 702 + struct xfs_buf_log_format *buf_f) 702 703 { 703 704 uint32_t magic32; 704 705 uint16_t magic16; ··· 707 706 void *blk = bp->b_addr; 708 707 uuid_t *uuid; 709 708 xfs_lsn_t lsn = -1; 709 + uint16_t blft; 710 710 711 711 /* v4 filesystems always recover immediately */ 712 712 if (!xfs_sb_version_hascrc(&mp->m_sb)) 713 + goto recover_immediately; 714 + 715 + /* 716 + * realtime bitmap and summary file blocks do not have magic numbers or 717 + * UUIDs, so we must recover them immediately. 718 + */ 719 + blft = xfs_blft_from_flags(buf_f); 720 + if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF) 713 721 goto recover_immediately; 714 722 715 723 magic32 = be32_to_cpu(*(__be32 *)blk); ··· 806 796 switch (magicda) { 807 797 case XFS_DIR3_LEAF1_MAGIC: 808 798 case XFS_DIR3_LEAFN_MAGIC: 799 + case XFS_ATTR3_LEAF_MAGIC: 809 800 case XFS_DA3_NODE_MAGIC: 810 801 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); 811 802 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; ··· 930 919 * the verifier will be reset to match whatever recover turns that 931 920 * buffer into. 932 921 */ 933 - lsn = xlog_recover_get_buf_lsn(mp, bp); 922 + lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f); 934 923 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 935 924 trace_xfs_log_recover_buf_skip(log, buf_f); 936 925 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
+29 -10
fs/xfs/xfs_inode_item_recover.c
··· 145 145 STATIC void 146 146 xfs_log_dinode_to_disk( 147 147 struct xfs_log_dinode *from, 148 - struct xfs_dinode *to) 148 + struct xfs_dinode *to, 149 + xfs_lsn_t lsn) 149 150 { 150 151 to->di_magic = cpu_to_be16(from->di_magic); 151 152 to->di_mode = cpu_to_be16(from->di_mode); ··· 183 182 to->di_flags2 = cpu_to_be64(from->di_flags2); 184 183 to->di_cowextsize = cpu_to_be32(from->di_cowextsize); 185 184 to->di_ino = cpu_to_be64(from->di_ino); 186 - to->di_lsn = cpu_to_be64(from->di_lsn); 185 + to->di_lsn = cpu_to_be64(lsn); 187 186 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); 188 187 uuid_copy(&to->di_uuid, &from->di_uuid); 189 188 to->di_flushiter = 0; ··· 262 261 } 263 262 264 263 /* 265 - * If the inode has an LSN in it, recover the inode only if it's less 266 - * than the lsn of the transaction we are replaying. Note: we still 267 - * need to replay an owner change even though the inode is more recent 268 - * than the transaction as there is no guarantee that all the btree 269 - * blocks are more recent than this transaction, too. 264 + * If the inode has an LSN in it, recover the inode only if the on-disk 265 + * inode's LSN is older than the lsn of the transaction we are 266 + * replaying. We can have multiple checkpoints with the same start LSN, 267 + * so the current LSN being equal to the on-disk LSN doesn't necessarily 268 + * mean that the on-disk inode is more recent than the change being 269 + * replayed. 270 + * 271 + * We must check the current_lsn against the on-disk inode 272 + * here because the we can't trust the log dinode to contain a valid LSN 273 + * (see comment below before replaying the log dinode for details). 274 + * 275 + * Note: we still need to replay an owner change even though the inode 276 + * is more recent than the transaction as there is no guarantee that all 277 + * the btree blocks are more recent than this transaction, too. 270 278 */ 271 279 if (dip->di_version >= 3) { 272 280 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); 273 281 274 - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 282 + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) { 275 283 trace_xfs_log_recover_inode_skip(log, in_f); 276 284 error = 0; 277 285 goto out_owner_change; ··· 378 368 goto out_release; 379 369 } 380 370 381 - /* recover the log dinode inode into the on disk inode */ 382 - xfs_log_dinode_to_disk(ldip, dip); 371 + /* 372 + * Recover the log dinode inode into the on disk inode. 373 + * 374 + * The LSN in the log dinode is garbage - it can be zero or reflect 375 + * stale in-memory runtime state that isn't coherent with the changes 376 + * logged in this transaction or the changes written to the on-disk 377 + * inode. Hence we write the current lSN into the inode because that 378 + * matches what xfs_iflush() would write inode the inode when flushing 379 + * the changes in this transaction. 380 + */ 381 + xfs_log_dinode_to_disk(ldip, dip, current_lsn); 383 382 384 383 fields = in_f->ilf_fields; 385 384 if (fields & XFS_ILOG_DEV)
+165 -86
fs/xfs/xfs_log.c
··· 78 78 STATIC void 79 79 xlog_verify_tail_lsn( 80 80 struct xlog *log, 81 - struct xlog_in_core *iclog, 82 - xfs_lsn_t tail_lsn); 81 + struct xlog_in_core *iclog); 83 82 #else 84 83 #define xlog_verify_dest_ptr(a,b) 85 84 #define xlog_verify_grant_tail(a) 86 85 #define xlog_verify_iclog(a,b,c) 87 - #define xlog_verify_tail_lsn(a,b,c) 86 + #define xlog_verify_tail_lsn(a,b) 88 87 #endif 89 88 90 89 STATIC int ··· 486 487 return error; 487 488 } 488 489 489 - static bool 490 - __xlog_state_release_iclog( 491 - struct xlog *log, 492 - struct xlog_in_core *iclog) 493 - { 494 - lockdep_assert_held(&log->l_icloglock); 495 - 496 - if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 497 - /* update tail before writing to iclog */ 498 - xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); 499 - 500 - iclog->ic_state = XLOG_STATE_SYNCING; 501 - iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 502 - xlog_verify_tail_lsn(log, iclog, tail_lsn); 503 - /* cycle incremented when incrementing curr_block */ 504 - trace_xlog_iclog_syncing(iclog, _RET_IP_); 505 - return true; 506 - } 507 - 508 - ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 509 - return false; 510 - } 511 - 512 490 /* 513 491 * Flush iclog to disk if this is the last reference to the given iclog and the 514 492 * it is in the WANT_SYNC state. 493 + * 494 + * If the caller passes in a non-zero @old_tail_lsn and the current log tail 495 + * does not match, there may be metadata on disk that must be persisted before 496 + * this iclog is written. To satisfy that requirement, set the 497 + * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new 498 + * log tail value. 499 + * 500 + * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the 501 + * log tail is updated correctly. NEED_FUA indicates that the iclog will be 502 + * written to stable storage, and implies that a commit record is contained 503 + * within the iclog. We need to ensure that the log tail does not move beyond 504 + * the tail that the first commit record in the iclog ordered against, otherwise 505 + * correct recovery of that checkpoint becomes dependent on future operations 506 + * performed on this iclog. 507 + * 508 + * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the 509 + * current tail into iclog. Once the iclog tail is set, future operations must 510 + * not modify it, otherwise they potentially violate ordering constraints for 511 + * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in 512 + * the iclog will get zeroed on activation of the iclog after sync, so we 513 + * always capture the tail lsn on the iclog on the first NEED_FUA release 514 + * regardless of the number of active reference counts on this iclog. 515 515 */ 516 + 516 517 int 517 518 xlog_state_release_iclog( 518 519 struct xlog *log, 519 - struct xlog_in_core *iclog) 520 + struct xlog_in_core *iclog, 521 + xfs_lsn_t old_tail_lsn) 520 522 { 523 + xfs_lsn_t tail_lsn; 521 524 lockdep_assert_held(&log->l_icloglock); 522 525 523 526 trace_xlog_iclog_release(iclog, _RET_IP_); 524 527 if (iclog->ic_state == XLOG_STATE_IOERROR) 525 528 return -EIO; 526 529 527 - if (atomic_dec_and_test(&iclog->ic_refcnt) && 528 - __xlog_state_release_iclog(log, iclog)) { 529 - spin_unlock(&log->l_icloglock); 530 - xlog_sync(log, iclog); 531 - spin_lock(&log->l_icloglock); 530 + /* 531 + * Grabbing the current log tail needs to be atomic w.r.t. the writing 532 + * of the tail LSN into the iclog so we guarantee that the log tail does 533 + * not move between deciding if a cache flush is required and writing 534 + * the LSN into the iclog below. 535 + */ 536 + if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) { 537 + tail_lsn = xlog_assign_tail_lsn(log->l_mp); 538 + 539 + if (old_tail_lsn && tail_lsn != old_tail_lsn) 540 + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 541 + 542 + if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) && 543 + !iclog->ic_header.h_tail_lsn) 544 + iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 532 545 } 533 546 547 + if (!atomic_dec_and_test(&iclog->ic_refcnt)) 548 + return 0; 549 + 550 + if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { 551 + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 552 + return 0; 553 + } 554 + 555 + iclog->ic_state = XLOG_STATE_SYNCING; 556 + if (!iclog->ic_header.h_tail_lsn) 557 + iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 558 + xlog_verify_tail_lsn(log, iclog); 559 + trace_xlog_iclog_syncing(iclog, _RET_IP_); 560 + 561 + spin_unlock(&log->l_icloglock); 562 + xlog_sync(log, iclog); 563 + spin_lock(&log->l_icloglock); 534 564 return 0; 535 565 } 536 566 ··· 802 774 } 803 775 804 776 /* 777 + * Flush out the iclog to disk ensuring that device caches are flushed and 778 + * the iclog hits stable storage before any completion waiters are woken. 779 + */ 780 + static inline int 781 + xlog_force_iclog( 782 + struct xlog_in_core *iclog) 783 + { 784 + atomic_inc(&iclog->ic_refcnt); 785 + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 786 + if (iclog->ic_state == XLOG_STATE_ACTIVE) 787 + xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); 788 + return xlog_state_release_iclog(iclog->ic_log, iclog, 0); 789 + } 790 + 791 + /* 805 792 * Wait for the iclog and all prior iclogs to be written disk as required by the 806 793 * log force state machine. Waiting on ic_force_wait ensures iclog completions 807 794 * have been ordered and callbacks run before we are woken here, hence ··· 870 827 /* account for space used by record data */ 871 828 ticket->t_curr_res -= sizeof(ulf); 872 829 873 - /* 874 - * For external log devices, we need to flush the data device cache 875 - * first to ensure all metadata writeback is on stable storage before we 876 - * stamp the tail LSN into the unmount record. 877 - */ 878 - if (log->l_targ != log->l_mp->m_ddev_targp) 879 - blkdev_issue_flush(log->l_targ->bt_bdev); 880 830 return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS); 881 831 } 882 832 ··· 901 865 902 866 spin_lock(&log->l_icloglock); 903 867 iclog = log->l_iclog; 904 - atomic_inc(&iclog->ic_refcnt); 905 - if (iclog->ic_state == XLOG_STATE_ACTIVE) 906 - xlog_state_switch_iclogs(log, iclog, 0); 907 - else 908 - ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || 909 - iclog->ic_state == XLOG_STATE_IOERROR); 910 - /* 911 - * Ensure the journal is fully flushed and on stable storage once the 912 - * iclog containing the unmount record is written. 913 - */ 914 - iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); 915 - error = xlog_state_release_iclog(log, iclog); 868 + error = xlog_force_iclog(iclog); 916 869 xlog_wait_on_iclog(iclog); 917 870 918 871 if (tic) { ··· 1821 1796 * metadata writeback and causing priority inversions. 1822 1797 */ 1823 1798 iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE; 1824 - if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) 1799 + if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { 1825 1800 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; 1801 + /* 1802 + * For external log devices, we also need to flush the data 1803 + * device cache first to ensure all metadata writeback covered 1804 + * by the LSN in this iclog is on stable storage. This is slow, 1805 + * but it *must* complete before we issue the external log IO. 1806 + */ 1807 + if (log->l_targ != log->l_mp->m_ddev_targp) 1808 + blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev); 1809 + } 1826 1810 if (iclog->ic_flags & XLOG_ICL_NEED_FUA) 1827 1811 iclog->ic_bio.bi_opf |= REQ_FUA; 1812 + 1828 1813 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); 1829 1814 1830 1815 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { ··· 2345 2310 return 0; 2346 2311 2347 2312 release_iclog: 2348 - error = xlog_state_release_iclog(log, iclog); 2313 + error = xlog_state_release_iclog(log, iclog, 0); 2349 2314 spin_unlock(&log->l_icloglock); 2350 2315 return error; 2351 2316 } ··· 2564 2529 ASSERT(optype & XLOG_COMMIT_TRANS); 2565 2530 *commit_iclog = iclog; 2566 2531 } else { 2567 - error = xlog_state_release_iclog(log, iclog); 2532 + error = xlog_state_release_iclog(log, iclog, 0); 2568 2533 } 2569 2534 spin_unlock(&log->l_icloglock); 2570 2535 ··· 2602 2567 memset(iclog->ic_header.h_cycle_data, 0, 2603 2568 sizeof(iclog->ic_header.h_cycle_data)); 2604 2569 iclog->ic_header.h_lsn = 0; 2570 + iclog->ic_header.h_tail_lsn = 0; 2605 2571 } 2606 2572 2607 2573 /* ··· 3003 2967 * reference to the iclog. 3004 2968 */ 3005 2969 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) 3006 - error = xlog_state_release_iclog(log, iclog); 2970 + error = xlog_state_release_iclog(log, iclog, 0); 3007 2971 spin_unlock(&log->l_icloglock); 3008 2972 if (error) 3009 2973 return error; ··· 3168 3132 } 3169 3133 3170 3134 /* 3135 + * Force the iclog to disk and check if the iclog has been completed before 3136 + * xlog_force_iclog() returns. This can happen on synchronous (e.g. 3137 + * pmem) or fast async storage because we drop the icloglock to issue the IO. 3138 + * If completion has already occurred, tell the caller so that it can avoid an 3139 + * unnecessary wait on the iclog. 3140 + */ 3141 + static int 3142 + xlog_force_and_check_iclog( 3143 + struct xlog_in_core *iclog, 3144 + bool *completed) 3145 + { 3146 + xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3147 + int error; 3148 + 3149 + *completed = false; 3150 + error = xlog_force_iclog(iclog); 3151 + if (error) 3152 + return error; 3153 + 3154 + /* 3155 + * If the iclog has already been completed and reused the header LSN 3156 + * will have been rewritten by completion 3157 + */ 3158 + if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) 3159 + *completed = true; 3160 + return 0; 3161 + } 3162 + 3163 + /* 3171 3164 * Write out all data in the in-core log as of this exact moment in time. 3172 3165 * 3173 3166 * Data may be written to the in-core log during this call. However, ··· 3230 3165 { 3231 3166 struct xlog *log = mp->m_log; 3232 3167 struct xlog_in_core *iclog; 3233 - xfs_lsn_t lsn; 3234 3168 3235 3169 XFS_STATS_INC(mp, xs_log_force); 3236 3170 trace_xfs_log_force(mp, 0, _RET_IP_); ··· 3257 3193 iclog = iclog->ic_prev; 3258 3194 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3259 3195 if (atomic_read(&iclog->ic_refcnt) == 0) { 3260 - /* 3261 - * We are the only one with access to this iclog. 3262 - * 3263 - * Flush it out now. There should be a roundoff of zero 3264 - * to show that someone has already taken care of the 3265 - * roundoff from the previous sync. 3266 - */ 3267 - atomic_inc(&iclog->ic_refcnt); 3268 - lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3269 - xlog_state_switch_iclogs(log, iclog, 0); 3270 - if (xlog_state_release_iclog(log, iclog)) 3196 + /* We have exclusive access to this iclog. */ 3197 + bool completed; 3198 + 3199 + if (xlog_force_and_check_iclog(iclog, &completed)) 3271 3200 goto out_error; 3272 3201 3273 - if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) 3202 + if (completed) 3274 3203 goto out_unlock; 3275 3204 } else { 3276 3205 /* 3277 - * Someone else is writing to this iclog. 3278 - * 3279 - * Use its call to flush out the data. However, the 3280 - * other thread may not force out this LR, so we mark 3281 - * it WANT_SYNC. 3206 + * Someone else is still writing to this iclog, so we 3207 + * need to ensure that when they release the iclog it 3208 + * gets synced immediately as we may be waiting on it. 3282 3209 */ 3283 3210 xlog_state_switch_iclogs(log, iclog, 0); 3284 3211 } 3285 - } else { 3286 - /* 3287 - * If the head iclog is not active nor dirty, we just attach 3288 - * ourselves to the head and go to sleep if necessary. 3289 - */ 3290 - ; 3291 3212 } 3213 + 3214 + /* 3215 + * The iclog we are about to wait on may contain the checkpoint pushed 3216 + * by the above xlog_cil_force() call, but it may not have been pushed 3217 + * to disk yet. Like the ACTIVE case above, we need to make sure caches 3218 + * are flushed when this iclog is written. 3219 + */ 3220 + if (iclog->ic_state == XLOG_STATE_WANT_SYNC) 3221 + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3292 3222 3293 3223 if (flags & XFS_LOG_SYNC) 3294 3224 return xlog_wait_on_iclog(iclog); ··· 3303 3245 bool already_slept) 3304 3246 { 3305 3247 struct xlog_in_core *iclog; 3248 + bool completed; 3306 3249 3307 3250 spin_lock(&log->l_icloglock); 3308 3251 iclog = log->l_iclog; ··· 3317 3258 goto out_unlock; 3318 3259 } 3319 3260 3320 - if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3261 + switch (iclog->ic_state) { 3262 + case XLOG_STATE_ACTIVE: 3321 3263 /* 3322 3264 * We sleep here if we haven't already slept (e.g. this is the 3323 3265 * first time we've looked at the correct iclog buf) and the ··· 3341 3281 &log->l_icloglock); 3342 3282 return -EAGAIN; 3343 3283 } 3344 - atomic_inc(&iclog->ic_refcnt); 3345 - xlog_state_switch_iclogs(log, iclog, 0); 3346 - if (xlog_state_release_iclog(log, iclog)) 3284 + if (xlog_force_and_check_iclog(iclog, &completed)) 3347 3285 goto out_error; 3348 3286 if (log_flushed) 3349 3287 *log_flushed = 1; 3288 + if (completed) 3289 + goto out_unlock; 3290 + break; 3291 + case XLOG_STATE_WANT_SYNC: 3292 + /* 3293 + * This iclog may contain the checkpoint pushed by the 3294 + * xlog_cil_force_seq() call, but there are other writers still 3295 + * accessing it so it hasn't been pushed to disk yet. Like the 3296 + * ACTIVE case above, we need to make sure caches are flushed 3297 + * when this iclog is written. 3298 + */ 3299 + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3300 + break; 3301 + default: 3302 + /* 3303 + * The entire checkpoint was written by the CIL force and is on 3304 + * its way to disk already. It will be stable when it 3305 + * completes, so we don't need to manipulate caches here at all. 3306 + * We just need to wait for completion if necessary. 3307 + */ 3308 + break; 3350 3309 } 3351 3310 3352 3311 if (flags & XFS_LOG_SYNC) ··· 3638 3559 STATIC void 3639 3560 xlog_verify_tail_lsn( 3640 3561 struct xlog *log, 3641 - struct xlog_in_core *iclog, 3642 - xfs_lsn_t tail_lsn) 3562 + struct xlog_in_core *iclog) 3643 3563 { 3644 - int blocks; 3564 + xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); 3565 + int blocks; 3645 3566 3646 3567 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3647 3568 blocks =
+11 -2
fs/xfs/xfs_log_cil.c
··· 654 654 struct xfs_trans_header thdr; 655 655 struct xfs_log_iovec lhdr; 656 656 struct xfs_log_vec lvhdr = { NULL }; 657 + xfs_lsn_t preflush_tail_lsn; 657 658 xfs_lsn_t commit_lsn; 658 - xfs_lsn_t push_seq; 659 + xfs_csn_t push_seq; 659 660 struct bio bio; 660 661 DECLARE_COMPLETION_ONSTACK(bdev_flush); 661 662 ··· 731 730 * because we hold the flush lock exclusively. Hence we can now issue 732 731 * a cache flush to ensure all the completed metadata in the journal we 733 732 * are about to overwrite is on stable storage. 733 + * 734 + * Because we are issuing this cache flush before we've written the 735 + * tail lsn to the iclog, we can have metadata IO completions move the 736 + * tail forwards between the completion of this flush and the iclog 737 + * being written. In this case, we need to re-issue the cache flush 738 + * before the iclog write. To detect whether the log tail moves, sample 739 + * the tail LSN *before* we issue the flush. 734 740 */ 741 + preflush_tail_lsn = atomic64_read(&log->l_tail_lsn); 735 742 xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev, 736 743 &bdev_flush); 737 744 ··· 950 941 * storage. 951 942 */ 952 943 commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; 953 - xlog_state_release_iclog(log, commit_iclog); 944 + xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn); 954 945 spin_unlock(&log->l_icloglock); 955 946 return; 956 947
+12 -4
fs/xfs/xfs_log_priv.h
··· 59 59 { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \ 60 60 { XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" } 61 61 62 + /* 63 + * In core log flags 64 + */ 65 + #define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */ 66 + #define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */ 67 + 68 + #define XLOG_ICL_STRINGS \ 69 + { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ 70 + { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } 71 + 62 72 63 73 /* 64 74 * Log ticket flags ··· 152 142 #define XLOG_STATE_COVER_DONE2 4 153 143 154 144 #define XLOG_COVER_OPS 5 155 - 156 - #define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */ 157 - #define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */ 158 145 159 146 /* Ticket reservation region accounting */ 160 147 #define XLOG_TIC_LEN_MAX 15 ··· 504 497 void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); 505 498 void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); 506 499 507 - int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog); 500 + int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, 501 + xfs_lsn_t log_tail_lsn); 508 502 509 503 /* 510 504 * When we crack an atomic LSN, we sample it first so that the value will not
+4 -1
fs/xfs/xfs_trace.h
··· 3944 3944 __field(uint32_t, state) 3945 3945 __field(int32_t, refcount) 3946 3946 __field(uint32_t, offset) 3947 + __field(uint32_t, flags) 3947 3948 __field(unsigned long long, lsn) 3948 3949 __field(unsigned long, caller_ip) 3949 3950 ), ··· 3953 3952 __entry->state = iclog->ic_state; 3954 3953 __entry->refcount = atomic_read(&iclog->ic_refcnt); 3955 3954 __entry->offset = iclog->ic_offset; 3955 + __entry->flags = iclog->ic_flags; 3956 3956 __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3957 3957 __entry->caller_ip = caller_ip; 3958 3958 ), 3959 - TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx caller %pS", 3959 + TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS", 3960 3960 MAJOR(__entry->dev), MINOR(__entry->dev), 3961 3961 __print_symbolic(__entry->state, XLOG_STATE_STRINGS), 3962 3962 __entry->refcount, 3963 3963 __entry->offset, 3964 3964 __entry->lsn, 3965 + __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS), 3965 3966 (char *)__entry->caller_ip) 3966 3967 3967 3968 );
+6 -1
include/linux/mhi.h
··· 721 721 * host and device execution environments match and 722 722 * channels are in a DISABLED state. 723 723 * @mhi_dev: Device associated with the channels 724 + * @flags: MHI channel flags 724 725 */ 725 - int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); 726 + int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, 727 + unsigned int flags); 728 + 729 + /* Automatically allocate and queue inbound buffers */ 730 + #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) 726 731 727 732 /** 728 733 * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
+1
include/net/bluetooth/hci_core.h
··· 1230 1230 void hci_free_dev(struct hci_dev *hdev); 1231 1231 int hci_register_dev(struct hci_dev *hdev); 1232 1232 void hci_unregister_dev(struct hci_dev *hdev); 1233 + void hci_cleanup_dev(struct hci_dev *hdev); 1233 1234 int hci_suspend_dev(struct hci_dev *hdev); 1234 1235 int hci_resume_dev(struct hci_dev *hdev); 1235 1236 int hci_reset_dev(struct hci_dev *hdev);
+1 -1
include/net/flow_offload.h
··· 293 293 } 294 294 295 295 /** 296 - * flow_action_has_one_action() - check if exactly one action is present 296 + * flow_offload_has_one_action() - check if exactly one action is present 297 297 * @action: tc filter flow offload action 298 298 * 299 299 * Returns true if exactly one action is present.
+1 -1
include/net/ip6_route.h
··· 265 265 266 266 static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) 267 267 { 268 - int mtu; 268 + unsigned int mtu; 269 269 270 270 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? 271 271 inet6_sk(skb->sk) : NULL;
+1
include/net/netns/xfrm.h
··· 75 75 #endif 76 76 spinlock_t xfrm_state_lock; 77 77 seqcount_spinlock_t xfrm_state_hash_generation; 78 + seqcount_spinlock_t xfrm_policy_hash_generation; 78 79 79 80 spinlock_t xfrm_policy_lock; 80 81 struct mutex xfrm_cfg_mutex;
+4
include/net/pkt_cls.h
··· 329 329 330 330 /** 331 331 * struct tcf_pkt_info - packet information 332 + * 333 + * @ptr: start of the pkt data 334 + * @nexthdr: offset of the next header 332 335 */ 333 336 struct tcf_pkt_info { 334 337 unsigned char * ptr; ··· 350 347 * @ops: the operations lookup table of the corresponding ematch module 351 348 * @datalen: length of the ematch specific configuration data 352 349 * @data: ematch specific data 350 + * @net: the network namespace 353 351 */ 354 352 struct tcf_ematch { 355 353 struct tcf_ematch_ops * ops;
+3 -1
kernel/trace/trace.c
··· 9135 9135 return -EINVAL; 9136 9136 9137 9137 ret = event_trace_add_tracer(tr->dir, tr); 9138 - if (ret) 9138 + if (ret) { 9139 9139 tracefs_remove(tr->dir); 9140 + return ret; 9141 + } 9140 9142 9141 9143 init_tracer_tracefs(tr, tr->dir); 9142 9144 __update_tracer_options(tr);
+23 -1
kernel/trace/trace_events_hist.c
··· 65 65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ 66 66 C(EMPTY_SORT_FIELD, "Empty sort field"), \ 67 67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ 68 - C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), 68 + C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \ 69 + C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), 69 70 70 71 #undef C 71 72 #define C(a, b) HIST_ERR_##a ··· 2157 2156 ret = PTR_ERR(operand1); 2158 2157 goto free; 2159 2158 } 2159 + if (operand1->flags & HIST_FIELD_FL_STRING) { 2160 + /* String type can not be the operand of unary operator. */ 2161 + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2162 + destroy_hist_field(operand1, 0); 2163 + ret = -EINVAL; 2164 + goto free; 2165 + } 2160 2166 2161 2167 expr->flags |= operand1->flags & 2162 2168 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); ··· 2265 2257 operand1 = NULL; 2266 2258 goto free; 2267 2259 } 2260 + if (operand1->flags & HIST_FIELD_FL_STRING) { 2261 + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str)); 2262 + ret = -EINVAL; 2263 + goto free; 2264 + } 2268 2265 2269 2266 /* rest of string could be another expression e.g. b+c in a+b+c */ 2270 2267 operand_flags = 0; ··· 2277 2264 if (IS_ERR(operand2)) { 2278 2265 ret = PTR_ERR(operand2); 2279 2266 operand2 = NULL; 2267 + goto free; 2268 + } 2269 + if (operand2->flags & HIST_FIELD_FL_STRING) { 2270 + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2271 + ret = -EINVAL; 2280 2272 goto free; 2281 2273 } 2282 2274 ··· 2305 2287 2306 2288 expr->operands[0] = operand1; 2307 2289 expr->operands[1] = operand2; 2290 + 2291 + /* The operand sizes should be the same, so just pick one */ 2292 + expr->size = operand1->size; 2293 + 2308 2294 expr->operator = field_op; 2309 2295 expr->name = expr_str(expr, 0); 2310 2296 expr->type = kstrdup(operand1->type, GFP_KERNEL);
+1 -1
kernel/trace/trace_hwlat.c
··· 327 327 328 328 get_online_cpus(); 329 329 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); 330 - next_cpu = cpumask_next(smp_processor_id(), current_mask); 330 + next_cpu = cpumask_next(raw_smp_processor_id(), current_mask); 331 331 put_online_cpus(); 332 332 333 333 if (next_cpu >= nr_cpu_ids)
+7 -3
kernel/ucount.c
··· 160 160 { 161 161 struct hlist_head *hashent = ucounts_hashentry(ns, uid); 162 162 struct ucounts *ucounts, *new; 163 + long overflow; 163 164 164 165 spin_lock_irq(&ucounts_lock); 165 166 ucounts = find_ucounts(ns, uid, hashent); ··· 185 184 return new; 186 185 } 187 186 } 187 + overflow = atomic_add_negative(1, &ucounts->count); 188 188 spin_unlock_irq(&ucounts_lock); 189 - ucounts = get_ucounts(ucounts); 189 + if (overflow) { 190 + put_ucounts(ucounts); 191 + return NULL; 192 + } 190 193 return ucounts; 191 194 } 192 195 ··· 198 193 { 199 194 unsigned long flags; 200 195 201 - if (atomic_dec_and_test(&ucounts->count)) { 202 - spin_lock_irqsave(&ucounts_lock, flags); 196 + if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) { 203 197 hlist_del_init(&ucounts->node); 204 198 spin_unlock_irqrestore(&ucounts_lock, flags); 205 199 kfree(ucounts);
+8 -8
net/bluetooth/hci_core.c
··· 3996 3996 /* Unregister HCI device */ 3997 3997 void hci_unregister_dev(struct hci_dev *hdev) 3998 3998 { 3999 - int id; 4000 - 4001 3999 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 4002 4000 4003 4001 hci_dev_set_flag(hdev, HCI_UNREGISTER); 4004 - 4005 - id = hdev->id; 4006 4002 4007 4003 write_lock(&hci_dev_list_lock); 4008 4004 list_del(&hdev->list); ··· 4034 4038 } 4035 4039 4036 4040 device_del(&hdev->dev); 4041 + /* Actual cleanup is deferred until hci_cleanup_dev(). */ 4042 + hci_dev_put(hdev); 4043 + } 4044 + EXPORT_SYMBOL(hci_unregister_dev); 4037 4045 4046 + /* Cleanup HCI device */ 4047 + void hci_cleanup_dev(struct hci_dev *hdev) 4048 + { 4038 4049 debugfs_remove_recursive(hdev->debugfs); 4039 4050 kfree_const(hdev->hw_info); 4040 4051 kfree_const(hdev->fw_info); ··· 4066 4063 hci_blocked_keys_clear(hdev); 4067 4064 hci_dev_unlock(hdev); 4068 4065 4069 - hci_dev_put(hdev); 4070 - 4071 - ida_simple_remove(&hci_index_ida, id); 4066 + ida_simple_remove(&hci_index_ida, hdev->id); 4072 4067 } 4073 - EXPORT_SYMBOL(hci_unregister_dev); 4074 4068 4075 4069 /* Suspend HCI device */ 4076 4070 int hci_suspend_dev(struct hci_dev *hdev)
+33 -16
net/bluetooth/hci_sock.c
··· 59 59 char comm[TASK_COMM_LEN]; 60 60 }; 61 61 62 + static struct hci_dev *hci_hdev_from_sock(struct sock *sk) 63 + { 64 + struct hci_dev *hdev = hci_pi(sk)->hdev; 65 + 66 + if (!hdev) 67 + return ERR_PTR(-EBADFD); 68 + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) 69 + return ERR_PTR(-EPIPE); 70 + return hdev; 71 + } 72 + 62 73 void hci_sock_set_flag(struct sock *sk, int nr) 63 74 { 64 75 set_bit(nr, &hci_pi(sk)->flags); ··· 770 759 if (event == HCI_DEV_UNREG) { 771 760 struct sock *sk; 772 761 773 - /* Detach sockets from device */ 762 + /* Wake up sockets using this dead device */ 774 763 read_lock(&hci_sk_list.lock); 775 764 sk_for_each(sk, &hci_sk_list.head) { 776 - lock_sock(sk); 777 765 if (hci_pi(sk)->hdev == hdev) { 778 - hci_pi(sk)->hdev = NULL; 779 766 sk->sk_err = EPIPE; 780 - sk->sk_state = BT_OPEN; 781 767 sk->sk_state_change(sk); 782 - 783 - hci_dev_put(hdev); 784 768 } 785 - release_sock(sk); 786 769 } 787 770 read_unlock(&hci_sk_list.lock); 788 771 } ··· 935 930 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, 936 931 unsigned long arg) 937 932 { 938 - struct hci_dev *hdev = hci_pi(sk)->hdev; 933 + struct hci_dev *hdev = hci_hdev_from_sock(sk); 939 934 940 - if (!hdev) 941 - return -EBADFD; 935 + if (IS_ERR(hdev)) 936 + return PTR_ERR(hdev); 942 937 943 938 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 944 939 return -EBUSY; ··· 1107 1102 return -EINVAL; 1108 1103 1109 1104 lock_sock(sk); 1105 + 1106 + /* Allow detaching from dead device and attaching to alive device, if 1107 + * the caller wants to re-bind (instead of close) this socket in 1108 + * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. 1109 + */ 1110 + hdev = hci_pi(sk)->hdev; 1111 + if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 1112 + hci_pi(sk)->hdev = NULL; 1113 + sk->sk_state = BT_OPEN; 1114 + hci_dev_put(hdev); 1115 + } 1116 + hdev = NULL; 1110 1117 1111 1118 if (sk->sk_state == BT_BOUND) { 1112 1119 err = -EALREADY; ··· 1396 1379 1397 1380 lock_sock(sk); 1398 1381 1399 - hdev = hci_pi(sk)->hdev; 1400 - if (!hdev) { 1401 - err = -EBADFD; 1382 + hdev = hci_hdev_from_sock(sk); 1383 + if (IS_ERR(hdev)) { 1384 + err = PTR_ERR(hdev); 1402 1385 goto done; 1403 1386 } 1404 1387 ··· 1760 1743 goto done; 1761 1744 } 1762 1745 1763 - hdev = hci_pi(sk)->hdev; 1764 - if (!hdev) { 1765 - err = -EBADFD; 1746 + hdev = hci_hdev_from_sock(sk); 1747 + if (IS_ERR(hdev)) { 1748 + err = PTR_ERR(hdev); 1766 1749 goto done; 1767 1750 } 1768 1751
+3
net/bluetooth/hci_sysfs.c
··· 83 83 static void bt_host_release(struct device *dev) 84 84 { 85 85 struct hci_dev *hdev = to_hci_dev(dev); 86 + 87 + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) 88 + hci_cleanup_dev(hdev); 86 89 kfree(hdev); 87 90 module_put(THIS_MODULE); 88 91 }
+2 -1
net/bridge/br.c
··· 166 166 case SWITCHDEV_FDB_ADD_TO_BRIDGE: 167 167 fdb_info = ptr; 168 168 err = br_fdb_external_learn_add(br, p, fdb_info->addr, 169 - fdb_info->vid, false); 169 + fdb_info->vid, 170 + fdb_info->is_local, false); 170 171 if (err) { 171 172 err = notifier_from_errno(err); 172 173 break;
+24 -6
net/bridge/br_fdb.c
··· 1011 1011 1012 1012 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, 1013 1013 struct net_bridge_port *p, const unsigned char *addr, 1014 - u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[]) 1014 + u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[], 1015 + struct netlink_ext_ack *extack) 1015 1016 { 1016 1017 int err = 0; 1017 1018 ··· 1031 1030 rcu_read_unlock(); 1032 1031 local_bh_enable(); 1033 1032 } else if (ndm->ndm_flags & NTF_EXT_LEARNED) { 1034 - err = br_fdb_external_learn_add(br, p, addr, vid, true); 1033 + if (!p && !(ndm->ndm_state & NUD_PERMANENT)) { 1034 + NL_SET_ERR_MSG_MOD(extack, 1035 + "FDB entry towards bridge must be permanent"); 1036 + return -EINVAL; 1037 + } 1038 + 1039 + err = br_fdb_external_learn_add(br, p, addr, vid, 1040 + ndm->ndm_state & NUD_PERMANENT, 1041 + true); 1035 1042 } else { 1036 1043 spin_lock_bh(&br->hash_lock); 1037 1044 err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb); ··· 1111 1102 } 1112 1103 1113 1104 /* VID was specified, so use it. */ 1114 - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb); 1105 + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb, 1106 + extack); 1115 1107 } else { 1116 - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb); 1108 + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb, 1109 + extack); 1117 1110 if (err || !vg || !vg->num_vlans) 1118 1111 goto out; 1119 1112 ··· 1127 1116 if (!br_vlan_should_use(v)) 1128 1117 continue; 1129 1118 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, 1130 - nfea_tb); 1119 + nfea_tb, extack); 1131 1120 if (err) 1132 1121 goto out; 1133 1122 } ··· 1267 1256 } 1268 1257 1269 1258 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, 1270 - const unsigned char *addr, u16 vid, 1259 + const unsigned char *addr, u16 vid, bool is_local, 1271 1260 bool swdev_notify) 1272 1261 { 1273 1262 struct net_bridge_fdb_entry *fdb; ··· 1284 1273 1285 1274 if (swdev_notify) 1286 1275 flags |= BIT(BR_FDB_ADDED_BY_USER); 1276 + 1277 + if (is_local) 1278 + flags |= BIT(BR_FDB_LOCAL); 1279 + 1287 1280 fdb = fdb_create(br, p, addr, vid, flags); 1288 1281 if (!fdb) { 1289 1282 err = -ENOMEM; ··· 1313 1298 1314 1299 if (swdev_notify) 1315 1300 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 1301 + 1302 + if (is_local) 1303 + set_bit(BR_FDB_LOCAL, &fdb->flags); 1316 1304 1317 1305 if (modified) 1318 1306 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+1 -1
net/bridge/br_private.h
··· 770 770 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p); 771 771 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p); 772 772 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, 773 - const unsigned char *addr, u16 vid, 773 + const unsigned char *addr, u16 vid, bool is_local, 774 774 bool swdev_notify); 775 775 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, 776 776 const unsigned char *addr, u16 vid,
+3
net/ipv4/tcp_offload.c
··· 298 298 if (th->cwr) 299 299 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 300 300 301 + if (skb->encapsulation) 302 + skb->inner_transport_header = skb->transport_header; 303 + 301 304 return 0; 302 305 } 303 306 EXPORT_SYMBOL(tcp_gro_complete);
+4
net/ipv4/udp_offload.c
··· 624 624 625 625 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 626 626 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 627 + 628 + if (skb->encapsulation) 629 + skb->inner_transport_header = skb->transport_header; 630 + 627 631 return 0; 628 632 } 629 633
-1
net/mptcp/pm_netlink.c
··· 27 27 struct mptcp_addr_info addr; 28 28 u8 flags; 29 29 int ifindex; 30 - struct rcu_head rcu; 31 30 struct socket *lsk; 32 31 }; 33 32
+15 -1
net/qrtr/mhi.c
··· 15 15 struct qrtr_endpoint ep; 16 16 struct mhi_device *mhi_dev; 17 17 struct device *dev; 18 + struct completion ready; 18 19 }; 19 20 20 21 /* From MHI to QRTR */ ··· 51 50 struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); 52 51 int rc; 53 52 53 + rc = wait_for_completion_interruptible(&qdev->ready); 54 + if (rc) 55 + goto free_skb; 56 + 54 57 if (skb->sk) 55 58 sock_hold(skb->sk); 56 59 ··· 84 79 int rc; 85 80 86 81 /* start channels */ 87 - rc = mhi_prepare_for_transfer(mhi_dev); 82 + rc = mhi_prepare_for_transfer(mhi_dev, 0); 88 83 if (rc) 89 84 return rc; 90 85 ··· 101 96 if (rc) 102 97 return rc; 103 98 99 + /* start channels */ 100 + rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); 101 + if (rc) { 102 + qrtr_endpoint_unregister(&qdev->ep); 103 + dev_set_drvdata(&mhi_dev->dev, NULL); 104 + return rc; 105 + } 106 + 107 + complete_all(&qdev->ready); 104 108 dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n"); 105 109 106 110 return 0;
+1 -1
net/sched/sch_generic.c
··· 913 913 914 914 /* seqlock has the same scope of busylock, for NOLOCK qdisc */ 915 915 spin_lock_init(&sch->seqlock); 916 - lockdep_set_class(&sch->busylock, 916 + lockdep_set_class(&sch->seqlock, 917 917 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 918 918 919 919 seqcount_init(&sch->running);
-2
net/sched/sch_taprio.c
··· 1739 1739 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1740 1740 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1741 1741 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 1742 - if (ntx < dev->real_num_tx_queues) 1743 - qdisc_hash_add(qdisc, false); 1744 1742 } else { 1745 1743 old = dev_graft_qdisc(qdisc->dev_queue, sch); 1746 1744 qdisc_refcount_inc(sch);
+9 -5
net/sctp/auth.c
··· 857 857 memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength); 858 858 cur_key->key = key; 859 859 860 - if (replace) { 861 - list_del_init(&shkey->key_list); 862 - sctp_auth_shkey_release(shkey); 863 - if (asoc && asoc->active_key_id == auth_key->sca_keynumber) 864 - sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); 860 + if (!replace) { 861 + list_add(&cur_key->key_list, sh_keys); 862 + return 0; 865 863 } 864 + 865 + list_del_init(&shkey->key_list); 866 + sctp_auth_shkey_release(shkey); 866 867 list_add(&cur_key->key_list, sh_keys); 868 + 869 + if (asoc && asoc->active_key_id == auth_key->sca_keynumber) 870 + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); 867 871 868 872 return 0; 869 873 }
+3
net/vmw_vsock/virtio_transport_common.c
··· 1079 1079 virtio_transport_recv_enqueue(vsk, pkt); 1080 1080 sk->sk_data_ready(sk); 1081 1081 return err; 1082 + case VIRTIO_VSOCK_OP_CREDIT_REQUEST: 1083 + virtio_transport_send_credit_update(vsk); 1084 + break; 1082 1085 case VIRTIO_VSOCK_OP_CREDIT_UPDATE: 1083 1086 sk->sk_write_space(sk); 1084 1087 break;
+44 -5
net/xfrm/xfrm_compat.c
··· 298 298 len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]); 299 299 300 300 nla_for_each_attr(nla, attrs, len, remaining) { 301 - int err = xfrm_xlate64_attr(dst, nla); 301 + int err; 302 302 303 + switch (type) { 304 + case XFRM_MSG_NEWSPDINFO: 305 + err = xfrm_nla_cpy(dst, nla, nla_len(nla)); 306 + break; 307 + default: 308 + err = xfrm_xlate64_attr(dst, nla); 309 + break; 310 + } 303 311 if (err) 304 312 return err; 305 313 } ··· 349 341 350 342 /* Calculates len of translated 64-bit message. */ 351 343 static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src, 352 - struct nlattr *attrs[XFRMA_MAX+1]) 344 + struct nlattr *attrs[XFRMA_MAX + 1], 345 + int maxtype) 353 346 { 354 347 size_t len = nlmsg_len(src); 355 348 ··· 367 358 case XFRM_MSG_POLEXPIRE: 368 359 len += 8; 369 360 break; 361 + case XFRM_MSG_NEWSPDINFO: 362 + /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */ 363 + return len; 370 364 default: 371 365 break; 372 366 } 367 + 368 + /* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please 369 + * correct both 64=>32-bit and 32=>64-bit translators to copy 370 + * new attributes. 371 + */ 372 + if (WARN_ON_ONCE(maxtype)) 373 + return len; 373 374 374 375 if (attrs[XFRMA_SA]) 375 376 len += 4; ··· 459 440 460 441 static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src, 461 442 struct nlattr *attrs[XFRMA_MAX+1], 462 - size_t size, u8 type, struct netlink_ext_ack *extack) 443 + size_t size, u8 type, int maxtype, 444 + struct netlink_ext_ack *extack) 463 445 { 464 446 size_t pos; 465 447 int i; ··· 540 520 } 541 521 pos = dst->nlmsg_len; 542 522 523 + if (maxtype) { 524 + /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */ 525 + WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO); 526 + 527 + for (i = 1; i <= maxtype; i++) { 528 + int err; 529 + 530 + if (!attrs[i]) 531 + continue; 532 + 533 + /* just copy - no need for translation */ 534 + err = xfrm_attr_cpy32(dst, &pos, attrs[i], size, 535 + nla_len(attrs[i]), nla_len(attrs[i])); 536 + if (err) 537 + return err; 538 + } 539 + return 0; 540 + } 541 + 543 542 for (i = 1; i < XFRMA_MAX + 1; i++) { 544 543 int err; 545 544 ··· 603 564 if (err < 0) 604 565 return ERR_PTR(err); 605 566 606 - len = xfrm_user_rcv_calculate_len64(h32, attrs); 567 + len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype); 607 568 /* The message doesn't need translation */ 608 569 if (len == nlmsg_len(h32)) 609 570 return NULL; ··· 613 574 if (!h64) 614 575 return ERR_PTR(-ENOMEM); 615 576 616 - err = xfrm_xlate32(h64, h32, attrs, len, type, extack); 577 + err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack); 617 578 if (err < 0) { 618 579 kvfree(h64); 619 580 return ERR_PTR(err);
+1 -1
net/xfrm/xfrm_ipcomp.c
··· 241 241 break; 242 242 } 243 243 244 - WARN_ON(!pos); 244 + WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list)); 245 245 246 246 if (--pos->users) 247 247 return;
+12 -20
net/xfrm/xfrm_policy.c
··· 155 155 __read_mostly; 156 156 157 157 static struct kmem_cache *xfrm_dst_cache __ro_after_init; 158 - static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation; 159 158 160 159 static struct rhashtable xfrm_policy_inexact_table; 161 160 static const struct rhashtable_params xfrm_pol_inexact_params; ··· 584 585 return; 585 586 586 587 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 587 - write_seqcount_begin(&xfrm_policy_hash_generation); 588 + write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 588 589 589 590 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, 590 591 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); ··· 595 596 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst); 596 597 net->xfrm.policy_bydst[dir].hmask = nhashmask; 597 598 598 - write_seqcount_end(&xfrm_policy_hash_generation); 599 + write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); 599 600 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 600 601 601 602 synchronize_rcu(); ··· 1244 1245 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1245 1246 1246 1247 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1247 - write_seqcount_begin(&xfrm_policy_hash_generation); 1248 + write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 1248 1249 1249 1250 /* make sure that we can insert the indirect policies again before 1250 1251 * we start with destructive action. ··· 1353 1354 1354 1355 out_unlock: 1355 1356 __xfrm_policy_inexact_flush(net); 1356 - write_seqcount_end(&xfrm_policy_hash_generation); 1357 + write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); 1357 1358 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1358 1359 1359 1360 mutex_unlock(&hash_resize_mutex); ··· 2090 2091 if (unlikely(!daddr || !saddr)) 2091 2092 return NULL; 2092 2093 2093 - retry: 2094 - sequence = read_seqcount_begin(&xfrm_policy_hash_generation); 2095 2094 rcu_read_lock(); 2096 - 2097 - chain = policy_hash_direct(net, daddr, saddr, family, dir); 2098 - if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) { 2099 - rcu_read_unlock(); 2100 - goto retry; 2101 - } 2095 + retry: 2096 + do { 2097 + sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 2098 + chain = policy_hash_direct(net, daddr, saddr, family, dir); 2099 + } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)); 2102 2100 2103 2101 ret = NULL; 2104 2102 hlist_for_each_entry_rcu(pol, chain, bydst) { ··· 2126 2130 } 2127 2131 2128 2132 skip_inexact: 2129 - if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) { 2130 - rcu_read_unlock(); 2133 + if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)) 2131 2134 goto retry; 2132 - } 2133 2135 2134 - if (ret && !xfrm_pol_hold_rcu(ret)) { 2135 - rcu_read_unlock(); 2136 + if (ret && !xfrm_pol_hold_rcu(ret)) 2136 2137 goto retry; 2137 - } 2138 2138 fail: 2139 2139 rcu_read_unlock(); 2140 2140 ··· 4081 4089 /* Initialize the per-net locks here */ 4082 4090 spin_lock_init(&net->xfrm.xfrm_state_lock); 4083 4091 spin_lock_init(&net->xfrm.xfrm_policy_lock); 4092 + seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock); 4084 4093 mutex_init(&net->xfrm.xfrm_cfg_mutex); 4085 4094 4086 4095 rv = xfrm_statistics_init(net); ··· 4126 4133 { 4127 4134 register_pernet_subsys(&xfrm_net_ops); 4128 4135 xfrm_dev_init(); 4129 - seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex); 4130 4136 xfrm_input_init(); 4131 4137 4132 4138 #ifdef CONFIG_XFRM_ESPINTCP
+10
net/xfrm/xfrm_user.c
··· 2811 2811 2812 2812 err = link->doit(skb, nlh, attrs); 2813 2813 2814 + /* We need to free skb allocated in xfrm_alloc_compat() before 2815 + * returning from this function, because consume_skb() won't take 2816 + * care of frag_list since netlink destructor sets 2817 + * sbk->head to NULL. (see netlink_skb_destructor()) 2818 + */ 2819 + if (skb_has_frag_list(skb)) { 2820 + kfree_skb(skb_shinfo(skb)->frag_list); 2821 + skb_shinfo(skb)->frag_list = NULL; 2822 + } 2823 + 2814 2824 err: 2815 2825 kvfree(nlh64); 2816 2826 return err;
-40
scripts/recordmcount.pl
··· 173 173 my $mcount_adjust; # Address adjustment to mcount offset 174 174 my $alignment; # The .align value to use for $mcount_section 175 175 my $section_type; # Section header plus possible alignment command 176 - my $can_use_local = 0; # If we can use local function references 177 - 178 - # Shut up recordmcount if user has older objcopy 179 - my $quiet_recordmcount = ".tmp_quiet_recordmcount"; 180 - my $print_warning = 1; 181 - $print_warning = 0 if ( -f $quiet_recordmcount); 182 - 183 - ## 184 - # check_objcopy - whether objcopy supports --globalize-symbols 185 - # 186 - # --globalize-symbols came out in 2.17, we must test the version 187 - # of objcopy, and if it is less than 2.17, then we can not 188 - # record local functions. 189 - sub check_objcopy 190 - { 191 - open (IN, "$objcopy --version |") or die "error running $objcopy"; 192 - while (<IN>) { 193 - if (/objcopy.*\s(\d+)\.(\d+)/) { 194 - $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17)); 195 - last; 196 - } 197 - } 198 - close (IN); 199 - 200 - if (!$can_use_local && $print_warning) { 201 - print STDERR "WARNING: could not find objcopy version or version " . 202 - "is less than 2.17.\n" . 203 - "\tLocal function references are disabled.\n"; 204 - open (QUIET, ">$quiet_recordmcount"); 205 - printf QUIET "Disables the warning from recordmcount.pl\n"; 206 - close QUIET; 207 - } 208 - } 209 176 210 177 if ($arch =~ /(x86(_64)?)|(i386)/) { 211 178 if ($bits == 64) { ··· 401 434 my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; 402 435 my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; 403 436 404 - check_objcopy(); 405 - 406 437 # 407 438 # Step 1: find all the local (static functions) and weak symbols. 408 439 # 't' is local, 'w/W' is weak ··· 438 473 439 474 # is this function static? If so, note this fact. 440 475 if (defined $locals{$ref_func}) { 441 - 442 - # only use locals if objcopy supports globalize-symbols 443 - if (!$can_use_local) { 444 - return; 445 - } 446 476 $convert{$ref_func} = 1; 447 477 } 448 478
+3 -3
scripts/tracing/draw_functrace.py
··· 17 17 $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func 18 18 Wait some times but not too much, the script is a bit slow. 19 19 Break the pipe (Ctrl + Z) 20 - $ scripts/draw_functrace.py < raw_trace_func > draw_functrace 20 + $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace 21 21 Then you have your drawn trace in draw_functrace 22 22 """ 23 23 ··· 103 103 line = line.strip() 104 104 if line.startswith("#"): 105 105 raise CommentLineException 106 - m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) 106 + m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line) 107 107 if m is None: 108 108 raise BrokenLineException 109 - return (m.group(1), m.group(2), m.group(3)) 109 + return (m.group(2), m.group(3), m.group(4)) 110 110 111 111 112 112 def main():
+4 -6
security/selinux/ss/policydb.c
··· 874 874 rc = sidtab_init(s); 875 875 if (rc) { 876 876 pr_err("SELinux: out of memory on SID table init\n"); 877 - goto out; 877 + return rc; 878 878 } 879 879 880 880 head = p->ocontexts[OCON_ISID]; ··· 885 885 if (sid == SECSID_NULL) { 886 886 pr_err("SELinux: SID 0 was assigned a context.\n"); 887 887 sidtab_destroy(s); 888 - goto out; 888 + return -EINVAL; 889 889 } 890 890 891 891 /* Ignore initial SIDs unused by this kernel. */ ··· 897 897 pr_err("SELinux: unable to load initial SID %s.\n", 898 898 name); 899 899 sidtab_destroy(s); 900 - goto out; 900 + return rc; 901 901 } 902 902 } 903 - rc = 0; 904 - out: 905 - return rc; 903 + return 0; 906 904 } 907 905 908 906 int policydb_class_isvalid(struct policydb *p, unsigned int class)
+18 -2
tools/perf/util/cs-etm.c
··· 2434 2434 return 0; 2435 2435 } 2436 2436 2437 + static void dump_queued_data(struct cs_etm_auxtrace *etm, 2438 + struct perf_record_auxtrace *event) 2439 + { 2440 + struct auxtrace_buffer *buf; 2441 + unsigned int i; 2442 + /* 2443 + * Find all buffers with same reference in the queues and dump them. 2444 + * This is because the queues can contain multiple entries of the same 2445 + * buffer that were split on aux records. 2446 + */ 2447 + for (i = 0; i < etm->queues.nr_queues; ++i) 2448 + list_for_each_entry(buf, &etm->queues.queue_array[i].head, list) 2449 + if (buf->reference == event->reference) 2450 + cs_etm__dump_event(etm, buf); 2451 + } 2452 + 2437 2453 static int cs_etm__process_auxtrace_event(struct perf_session *session, 2438 2454 union perf_event *event, 2439 2455 struct perf_tool *tool __maybe_unused) ··· 2482 2466 cs_etm__dump_event(etm, buffer); 2483 2467 auxtrace_buffer__put_data(buffer); 2484 2468 } 2485 - } 2469 + } else if (dump_trace) 2470 + dump_queued_data(etm, &event->auxtrace); 2486 2471 2487 2472 return 0; 2488 2473 } ··· 3059 3042 3060 3043 if (dump_trace) { 3061 3044 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu); 3062 - return 0; 3063 3045 } 3064 3046 3065 3047 err = cs_etm__synth_events(etm, session);
-2
tools/perf/util/map.c
··· 192 192 if (!(prot & PROT_EXEC)) 193 193 dso__set_loaded(dso); 194 194 } 195 - 196 - nsinfo__put(dso->nsinfo); 197 195 dso->nsinfo = nsi; 198 196 199 197 if (build_id__is_defined(bid))
+24 -9
tools/perf/util/pmu.c
··· 742 742 return perf_pmu__find_map(NULL); 743 743 } 744 744 745 - static bool perf_pmu__valid_suffix(char *pmu_name, char *tok) 745 + /* 746 + * Suffix must be in form tok_{digits}, or tok{digits}, or same as pmu_name 747 + * to be valid. 748 + */ 749 + static bool perf_pmu__valid_suffix(const char *pmu_name, char *tok) 746 750 { 747 - char *p; 751 + const char *p; 748 752 749 753 if (strncmp(pmu_name, tok, strlen(tok))) 750 754 return false; ··· 757 753 if (*p == 0) 758 754 return true; 759 755 760 - if (*p != '_') 761 - return false; 756 + if (*p == '_') 757 + ++p; 762 758 763 - ++p; 764 - if (*p == 0 || !isdigit(*p)) 765 - return false; 759 + /* Ensure we end in a number */ 760 + while (1) { 761 + if (!isdigit(*p)) 762 + return false; 763 + if (*(++p) == 0) 764 + break; 765 + } 766 766 767 767 return true; 768 768 } ··· 797 789 * match "socket" in "socketX_pmunameY" and then "pmuname" in 798 790 * "pmunameY". 799 791 */ 800 - for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) { 792 + while (1) { 793 + char *next_tok = strtok_r(NULL, ",", &tmp); 794 + 801 795 name = strstr(name, tok); 802 - if (!name || !perf_pmu__valid_suffix((char *)name, tok)) { 796 + if (!name || 797 + (!next_tok && !perf_pmu__valid_suffix(name, tok))) { 803 798 res = false; 804 799 goto out; 805 800 } 801 + if (!next_tok) 802 + break; 803 + tok = next_tok; 804 + name += strlen(tok); 806 805 } 807 806 808 807 res = true;
+4 -1
tools/testing/selftests/kvm/include/x86_64/hyperv.h
··· 117 117 #define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) 118 118 #define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) 119 119 #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) 120 - #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4) 120 + #define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4) 121 121 #define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) 122 122 #define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) 123 123 #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) ··· 181 181 #define HV_STATUS_INVALID_PORT_ID 17 182 182 #define HV_STATUS_INVALID_CONNECTION_ID 18 183 183 #define HV_STATUS_INSUFFICIENT_BUFFERS 19 184 + 185 + /* hypercall options */ 186 + #define HV_HYPERCALL_FAST_BIT BIT(16) 184 187 185 188 #endif /* !SELFTEST_KVM_HYPERV_H */
+1 -1
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
··· 215 215 vcpu_set_hv_cpuid(vm, VCPU_ID); 216 216 217 217 tsc_page_gva = vm_vaddr_alloc_page(vm); 218 - memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize()); 218 + memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); 219 219 TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, 220 220 "TSC page has to be page aligned\n"); 221 221 vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
+38 -3
tools/testing/selftests/kvm/x86_64/hyperv_features.c
··· 47 47 } 48 48 49 49 static int nr_gp; 50 + static int nr_ud; 50 51 51 52 static inline u64 hypercall(u64 control, vm_vaddr_t input_address, 52 53 vm_vaddr_t output_address) ··· 81 80 regs->rip = (uint64_t)&wrmsr_end; 82 81 } 83 82 83 + static void guest_ud_handler(struct ex_regs *regs) 84 + { 85 + nr_ud++; 86 + regs->rip += 3; 87 + } 88 + 84 89 struct msr_data { 85 90 uint32_t idx; 86 91 bool available; ··· 97 90 struct hcall_data { 98 91 uint64_t control; 99 92 uint64_t expect; 93 + bool ud_expected; 100 94 }; 101 95 102 96 static void guest_msr(struct msr_data *msr) ··· 125 117 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) 126 118 { 127 119 int i = 0; 120 + u64 res, input, output; 128 121 129 122 wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID); 130 123 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); 131 124 132 125 while (hcall->control) { 133 - GUEST_ASSERT(hypercall(hcall->control, pgs_gpa, 134 - pgs_gpa + 4096) == hcall->expect); 126 + nr_ud = 0; 127 + if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) { 128 + input = pgs_gpa; 129 + output = pgs_gpa + 4096; 130 + } else { 131 + input = output = 0; 132 + } 133 + 134 + res = hypercall(hcall->control, input, output); 135 + if (hcall->ud_expected) 136 + GUEST_ASSERT(nr_ud == 1); 137 + else 138 + GUEST_ASSERT(res == hcall->expect); 139 + 135 140 GUEST_SYNC(i++); 136 141 } 137 142 ··· 573 552 recomm.ebx = 0xfff; 574 553 hcall->expect = HV_STATUS_SUCCESS; 575 554 break; 576 - 577 555 case 17: 556 + /* XMM fast hypercall */ 557 + hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT; 558 + hcall->ud_expected = true; 559 + break; 560 + case 18: 561 + feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; 562 + hcall->ud_expected = false; 563 + hcall->expect = HV_STATUS_SUCCESS; 564 + break; 565 + 566 + case 19: 578 567 /* END */ 579 568 hcall->control = 0; 580 569 break; ··· 655 624 656 625 /* Test hypercalls */ 657 626 vm = vm_create_default(VCPU_ID, 0, guest_hcall); 627 + 628 + vm_init_descriptor_tables(vm); 629 + vcpu_init_descriptor_tables(vm, VCPU_ID); 630 + vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 658 631 659 632 /* Hypercall input/output */ 660 633 hcall_page = vm_vaddr_alloc_pages(vm, 2);
+163 -2
tools/testing/selftests/net/ipsec.c
··· 484 484 MONITOR_ACQUIRE, 485 485 EXPIRE_STATE, 486 486 EXPIRE_POLICY, 487 + SPDINFO_ATTRS, 487 488 }; 488 489 const char *desc_name[] = { 489 490 "create tunnel", 490 491 "alloc spi", 491 492 "monitor acquire", 492 493 "expire state", 493 - "expire policy" 494 + "expire policy", 495 + "spdinfo attributes", 496 + "" 494 497 }; 495 498 struct xfrm_desc { 496 499 enum desc_type type; ··· 1596 1593 return ret; 1597 1594 } 1598 1595 1596 + static int xfrm_spdinfo_set_thresh(int xfrm_sock, uint32_t *seq, 1597 + unsigned thresh4_l, unsigned thresh4_r, 1598 + unsigned thresh6_l, unsigned thresh6_r, 1599 + bool add_bad_attr) 1600 + 1601 + { 1602 + struct { 1603 + struct nlmsghdr nh; 1604 + union { 1605 + uint32_t unused; 1606 + int error; 1607 + }; 1608 + char attrbuf[MAX_PAYLOAD]; 1609 + } req; 1610 + struct xfrmu_spdhthresh thresh; 1611 + 1612 + memset(&req, 0, sizeof(req)); 1613 + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.unused)); 1614 + req.nh.nlmsg_type = XFRM_MSG_NEWSPDINFO; 1615 + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; 1616 + req.nh.nlmsg_seq = (*seq)++; 1617 + 1618 + thresh.lbits = thresh4_l; 1619 + thresh.rbits = thresh4_r; 1620 + if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV4_HTHRESH, &thresh, sizeof(thresh))) 1621 + return -1; 1622 + 1623 + thresh.lbits = thresh6_l; 1624 + thresh.rbits = thresh6_r; 1625 + if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV6_HTHRESH, &thresh, sizeof(thresh))) 1626 + return -1; 1627 + 1628 + if (add_bad_attr) { 1629 + BUILD_BUG_ON(XFRMA_IF_ID <= XFRMA_SPD_MAX + 1); 1630 + if (rtattr_pack(&req.nh, sizeof(req), XFRMA_IF_ID, NULL, 0)) { 1631 + pr_err("adding attribute failed: no space"); 1632 + return -1; 1633 + } 1634 + } 1635 + 1636 + if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) { 1637 + pr_err("send()"); 1638 + return -1; 1639 + } 1640 + 1641 + if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) { 1642 + pr_err("recv()"); 1643 + return -1; 1644 + } else if (req.nh.nlmsg_type != NLMSG_ERROR) { 1645 + printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type); 1646 + return -1; 1647 + } 1648 + 1649 + if (req.error) { 1650 + printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error)); 1651 + return -1; 1652 + } 1653 + 1654 + return 0; 1655 + } 1656 + 1657 + static int xfrm_spdinfo_attrs(int xfrm_sock, uint32_t *seq) 1658 + { 1659 + struct { 1660 + struct nlmsghdr nh; 1661 + union { 1662 + uint32_t unused; 1663 + int error; 1664 + }; 1665 + char attrbuf[MAX_PAYLOAD]; 1666 + } req; 1667 + 1668 + if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 31, 120, 16, false)) { 1669 + pr_err("Can't set SPD HTHRESH"); 1670 + return KSFT_FAIL; 1671 + } 1672 + 1673 + memset(&req, 0, sizeof(req)); 1674 + 1675 + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.unused)); 1676 + req.nh.nlmsg_type = XFRM_MSG_GETSPDINFO; 1677 + req.nh.nlmsg_flags = NLM_F_REQUEST; 1678 + req.nh.nlmsg_seq = (*seq)++; 1679 + if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) { 1680 + pr_err("send()"); 1681 + return KSFT_FAIL; 1682 + } 1683 + 1684 + if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) { 1685 + pr_err("recv()"); 1686 + return KSFT_FAIL; 1687 + } else if (req.nh.nlmsg_type == XFRM_MSG_NEWSPDINFO) { 1688 + size_t len = NLMSG_PAYLOAD(&req.nh, sizeof(req.unused)); 1689 + struct rtattr *attr = (void *)req.attrbuf; 1690 + int got_thresh = 0; 1691 + 1692 + for (; RTA_OK(attr, len); attr = RTA_NEXT(attr, len)) { 1693 + if (attr->rta_type == XFRMA_SPD_IPV4_HTHRESH) { 1694 + struct xfrmu_spdhthresh *t = RTA_DATA(attr); 1695 + 1696 + got_thresh++; 1697 + if (t->lbits != 32 || t->rbits != 31) { 1698 + pr_err("thresh differ: %u, %u", 1699 + t->lbits, t->rbits); 1700 + return KSFT_FAIL; 1701 + } 1702 + } 1703 + if (attr->rta_type == XFRMA_SPD_IPV6_HTHRESH) { 1704 + struct xfrmu_spdhthresh *t = RTA_DATA(attr); 1705 + 1706 + got_thresh++; 1707 + if (t->lbits != 120 || t->rbits != 16) { 1708 + pr_err("thresh differ: %u, %u", 1709 + t->lbits, t->rbits); 1710 + return KSFT_FAIL; 1711 + } 1712 + } 1713 + } 1714 + if (got_thresh != 2) { 1715 + pr_err("only %d thresh returned by XFRM_MSG_GETSPDINFO", got_thresh); 1716 + return KSFT_FAIL; 1717 + } 1718 + } else if (req.nh.nlmsg_type != NLMSG_ERROR) { 1719 + printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type); 1720 + return KSFT_FAIL; 1721 + } else { 1722 + printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error)); 1723 + return -1; 1724 + } 1725 + 1726 + /* Restore the default */ 1727 + if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, false)) { 1728 + pr_err("Can't restore SPD HTHRESH"); 1729 + return KSFT_FAIL; 1730 + } 1731 + 1732 + /* 1733 + * At this moment xfrm uses nlmsg_parse_deprecated(), which 1734 + * implies NL_VALIDATE_LIBERAL - ignoring attributes with 1735 + * (type > maxtype). nla_parse_depricated_strict() would enforce 1736 + * it. Or even stricter nla_parse(). 1737 + * Right now it's not expected to fail, but to be ignored. 1738 + */ 1739 + if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, true)) 1740 + return KSFT_PASS; 1741 + 1742 + return KSFT_PASS; 1743 + } 1744 + 1599 1745 static int child_serv(int xfrm_sock, uint32_t *seq, 1600 1746 unsigned int nr, int cmd_fd, void *buf, struct xfrm_desc *desc) 1601 1747 { ··· 1868 1716 break; 1869 1717 case EXPIRE_POLICY: 1870 1718 ret = xfrm_expire_policy(xfrm_sock, &seq, nr, &desc); 1719 + break; 1720 + case SPDINFO_ATTRS: 1721 + ret = xfrm_spdinfo_attrs(xfrm_sock, &seq); 1871 1722 break; 1872 1723 default: 1873 1724 printk("Unknown desc type %d", desc.type); ··· 2149 1994 * sizeof(xfrm_user_polexpire) = 168 | sizeof(xfrm_user_polexpire) = 176 2150 1995 * 2151 1996 * Check the affected by the UABI difference structures. 1997 + * Also, check translation for xfrm_set_spdinfo: it has it's own attributes 1998 + * which needs to be correctly copied, but not translated. 2152 1999 */ 2153 - const unsigned int compat_plan = 4; 2000 + const unsigned int compat_plan = 5; 2154 2001 static int write_compat_struct_tests(int test_desc_fd) 2155 2002 { 2156 2003 struct xfrm_desc desc = {}; ··· 2173 2016 return -1; 2174 2017 2175 2018 desc.type = EXPIRE_POLICY; 2019 + if (__write_desc(test_desc_fd, &desc)) 2020 + return -1; 2021 + 2022 + desc.type = SPDINFO_ATTRS; 2176 2023 if (__write_desc(test_desc_fd, &desc)) 2177 2024 return -1; 2178 2025
+16 -2
virt/kvm/kvm_main.c
··· 892 892 893 893 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 894 894 { 895 + static DEFINE_MUTEX(kvm_debugfs_lock); 896 + struct dentry *dent; 895 897 char dir_name[ITOA_MAX_LEN * 2]; 896 898 struct kvm_stat_data *stat_data; 897 899 const struct _kvm_stats_desc *pdesc; ··· 905 903 return 0; 906 904 907 905 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 908 - kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); 906 + mutex_lock(&kvm_debugfs_lock); 907 + dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 908 + if (dent) { 909 + pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 910 + dput(dent); 911 + mutex_unlock(&kvm_debugfs_lock); 912 + return 0; 913 + } 914 + dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 915 + mutex_unlock(&kvm_debugfs_lock); 916 + if (IS_ERR(dent)) 917 + return 0; 909 918 919 + kvm->debugfs_dentry = dent; 910 920 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 911 921 sizeof(*kvm->debugfs_stat_data), 912 922 GFP_KERNEL_ACCOUNT); ··· 5215 5201 } 5216 5202 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5217 5203 5218 - if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { 5204 + if (kvm->debugfs_dentry) { 5219 5205 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5220 5206 5221 5207 if (p) {