···971971N: Daniel Drake972972E: dsd@gentoo.org973973D: USBAT02 CompactFlash support in usb-storage974974+D: ZD1211RW wireless driver974975S: UK975976976977N: Oleg Drokin
+3-2
Documentation/core-api/irq/irq-domain.rst
···175175case the Linux IRQ numbers cannot be dynamically assigned and the legacy176176mapping should be used.177177178178-As the name implies, the *_legacy() functions are deprecated and only178178+As the name implies, the \*_legacy() functions are deprecated and only179179exist to ease the support of ancient platforms. No new users should be180180-added.180180+added. Same goes for the \*_simple() functions when their use results181181+in the legacy behaviour.181182182183The legacy map assumes a contiguous range of IRQ numbers has already183184been allocated for the controller and that the IRQ number can be
···2020 - snps,dwcmshc-sdhci21212222 reg:2323- minItems: 12424- items:2525- - description: Offset and length of the register set for the device2323+ maxItems: 126242725 interrupts:2826 maxItems: 1
···132132Core Complex Die (CCD) temperatures. Up to 8 such temperatures are reported133133as temp{3..10}_input, labeled Tccd{1..8}. Actual support depends on the CPU134134variant.135135-136136-Various Family 17h and 18h CPUs report voltage and current telemetry137137-information. The following attributes may be reported.138138-139139-Attribute Label Description140140-=============== ======= ================141141-in0_input Vcore Core voltage142142-in1_input Vsoc SoC voltage143143-curr1_input Icore Core current144144-curr2_input Isoc SoC current145145-=============== ======= ================146146-147147-Current values are raw (unscaled) as reported by the CPU. Core current is148148-reported as multiples of 1A / LSB. SoC is reported as multiples of 0.25A149149-/ LSB. The real current is board specific. Reported currents should be seen150150-as rough guidance, and should be scaled using sensors3.conf as appropriate151151-for a given board.
···851851- 0x88A8 traffic will not be received unless VLAN stripping is disabled with852852 the following command::853853854854- # ethool -K <ethX> rxvlan off854854+ # ethtool -K <ethX> rxvlan off855855856856- 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS857857 configured on the same port. 0x88a8/0x8100 traffic will not be received if
+35-38
MAINTAINERS
···414414F: drivers/acpi/pmic/415415416416ACPI THERMAL DRIVER417417-M: Zhang Rui <rui.zhang@intel.com>417417+M: Rafael J. Wysocki <rafael@kernel.org>418418+R: Zhang Rui <rui.zhang@intel.com>418419L: linux-acpi@vger.kernel.org419420S: Supported420421W: https://01.org/linux-acpi···811810F: drivers/dma/altera-msgdma.c812811813812ALTERA PIO DRIVER814814-M: Joyce Ooi <joyce.ooi@intel.com>813813+M: Mun Yew Tham <mun.yew.tham@intel.com>815814L: linux-gpio@vger.kernel.org816815S: Maintained817816F: drivers/gpio/gpio-altera.c···978977S: Maintained979978F: drivers/platform/x86/amd-pmc.*980979981981-AMD POWERPLAY980980+AMD POWERPLAY AND SWSMU982981M: Evan Quan <evan.quan@amd.com>983982L: amd-gfx@lists.freedesktop.org984983S: Supported985984T: git https://gitlab.freedesktop.org/agd5f/linux.git986986-F: drivers/gpu/drm/amd/pm/powerplay/985985+F: drivers/gpu/drm/amd/pm/987986988987AMD PTDMA DRIVER989988M: Sanjay R Mehta <sanju.mehta@amd.com>···1276127512771276APPLE DART IOMMU DRIVER12781277M: Sven Peter <sven@svenpeter.dev>12781278+R: Alyssa Rosenzweig <alyssa@rosenzweig.io>12791279L: iommu@lists.linux-foundation.org12801280S: Maintained12811281F: Documentation/devicetree/bindings/iommu/apple,dart.yaml···1713171117141712ARM/APPLE MACHINE SUPPORT17151713M: Hector Martin <marcan@marcan.st>17141714+M: Sven Peter <sven@svenpeter.dev>17151715+R: Alyssa Rosenzweig <alyssa@rosenzweig.io>17161716L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)17171717S: Maintained17181718W: https://asahilinux.org···2240223622412237ARM/MStar/Sigmastar Armv7 SoC support22422238M: Daniel Palmer <daniel@thingy.jp>22392239+M: Romain Perier <romain.perier@gmail.com>22432240L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)22442241S: Maintained22452242W: http://linux-chenxing.org/···2717271227182713ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE27192714M: Nishanth Menon <nm@ti.com>27152715+M: Vignesh Raghavendra <vigneshr@ti.com>27202716M: Tero Kristo <kristo@kernel.org>27212717L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)27222718S: Supported···28102804F: arch/arm/mach-pxa/vpac270.c2811280528122806ARM/VT8500 ARM ARCHITECTURE28132813-M: Tony Prisk <linux@prisktech.co.nz>28142807L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)28152815-S: Maintained28082808+S: Orphan28162809F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt28172810F: arch/arm/mach-vt8500/28182811F: drivers/clocksource/timer-vt8500.c···29672962F: include/linux/async_tx.h2968296329692964AT24 EEPROM DRIVER29702970-M: Bartosz Golaszewski <bgolaszewski@baylibre.com>29652965+M: Bartosz Golaszewski <brgl@bgdev.pl>29712966L: linux-i2c@vger.kernel.org29722967S: Maintained29732968T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git···33903385F: Documentation/userspace-api/ebpf/33913386F: arch/*/net/*33923387F: include/linux/bpf*33883388+F: include/linux/btf*33933389F: include/linux/filter.h33943390F: include/trace/events/xdp.h33953391F: include/uapi/linux/bpf*33923392+F: include/uapi/linux/btf*33963393F: include/uapi/linux/filter.h33973394F: kernel/bpf/33983395F: kernel/trace/bpf_trace.c···3828382138293822BROADCOM NETXTREME-E ROCE DRIVER38303823M: Selvin Xavier <selvin.xavier@broadcom.com>38313831-M: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>38323824L: linux-rdma@vger.kernel.org38333825S: Supported38343826W: http://www.broadcom.com···46624656T: git git://git.samba.org/sfrench/cifs-2.6.git46634657F: Documentation/admin-guide/cifs/46644658F: fs/cifs/46654665-F: fs/cifs_common/46594659+F: fs/smbfs_common/4666466046674661COMPACTPCI HOTPLUG CORE46684662M: Scott Murray <scott@spiteful.org>···7992798679937987GPIO SUBSYSTEM79947988M: Linus Walleij <linus.walleij@linaro.org>79957995-M: Bartosz Golaszewski <bgolaszewski@baylibre.com>79897989+M: Bartosz Golaszewski <brgl@bgdev.pl>79967990L: linux-gpio@vger.kernel.org79977991S: Maintained79987992T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git···86148608F: drivers/iio/humidity/hts221*8615860986168610HUAWEI ETHERNET DRIVER86178617-M: Bin Luo <luobin9@huawei.com>86188611L: netdev@vger.kernel.org86198619-S: Supported86128612+S: Orphan86208613F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst86218614F: drivers/net/ethernet/huawei/hinic/86228615···1019910194L: linux-cifs@vger.kernel.org1020010195S: Maintained1020110196T: git git://git.samba.org/ksmbd.git1020210202-F: fs/cifs_common/1020310197F: fs/ksmbd/1019810198+F: fs/smbfs_common/10204101991020510200KERNEL UNIT TESTING FRAMEWORK (KUnit)1020610201M: Brendan Higgins <brendanhiggins@google.com>···1137211367F: drivers/iio/proximity/mb1232.c11373113681137411369MAXIM MAX77650 PMIC MFD DRIVER1137511375-M: Bartosz Golaszewski <bgolaszewski@baylibre.com>1137011370+M: Bartosz Golaszewski <brgl@bgdev.pl>1137611371L: linux-kernel@vger.kernel.org1137711372S: Maintained1137811373F: Documentation/devicetree/bindings/*/*max77650.yaml···1326013255F: drivers/scsi/nsp32*13261132561326213257NIOS2 ARCHITECTURE1326313263-M: Ley Foon Tan <ley.foon.tan@intel.com>1325813258+M: Dinh Nguyen <dinguyen@kernel.org>1326413259S: Maintained1326513265-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git1326013260+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git1326613261F: arch/nios2/13267132621326813263NITRO ENCLAVES (NE)···1665616651S: Supported1665716652F: drivers/char/pcmcia/scr24x_cs.c16658166531665916659-SCSI CDROM DRIVER1666016660-M: Jens Axboe <axboe@kernel.dk>1666116661-L: linux-scsi@vger.kernel.org1666216662-S: Maintained1666316663-W: http://www.kernel.dk1666416664-F: drivers/scsi/sr*1666516665-1666616654SCSI RDMA PROTOCOL (SRP) INITIATOR1666716655M: Bart Van Assche <bvanassche@acm.org>1666816656L: linux-rdma@vger.kernel.org···16954169561695516957SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS1695616958M: Karsten Graul <kgraul@linux.ibm.com>1695716957-M: Guvenc Gulce <guvenc@linux.ibm.com>1695816959L: linux-s390@vger.kernel.org1695916960S: Supported1696016961W: http://www.ibm.com/developerworks/linux/linux390/···17798178011779917802STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)1780017803M: Jens Frederich <jfrederich@gmail.com>1780117801-M: Daniel Drake <dsd@laptop.org>1780217804M: Jon Nettleton <jon.nettleton@gmail.com>1780317805S: Maintained1780417806W: http://wiki.laptop.org/go/DCON···1788817892M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>1788917893L: alsa-devel@alsa-project.org (moderated for non-subscribers)1789017894S: Maintained1789117891-F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml1789517895+F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml1789617896+F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml1789217897F: sound/soc/stm/17893178981789417899STM32 TIMER/LPTIMER DRIVERS···1796617969F: arch/x86/boot/video*17967179701796817971SWIOTLB SUBSYSTEM1796917969-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>1797217972+M: Christoph Hellwig <hch@infradead.org>1797017973L: iommu@lists.linux-foundation.org1797117974S: Supported1797217972-T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git1797517975+W: http://git.infradead.org/users/hch/dma-mapping.git1797617976+T: git git://git.infradead.org/users/hch/dma-mapping.git1797317977F: arch/*/kernel/pci-swiotlb.c1797417978F: include/linux/swiotlb.h1797517979F: kernel/dma/swiotlb.c···1855318555F: drivers/media/radio/radio-raremono.c18554185561855518557THERMAL1855618556-M: Zhang Rui <rui.zhang@intel.com>1855818558+M: Rafael J. Wysocki <rafael@kernel.org>1855718559M: Daniel Lezcano <daniel.lezcano@linaro.org>1855818560R: Amit Kucheria <amitk@kernel.org>1856118561+R: Zhang Rui <rui.zhang@intel.com>1855918562L: linux-pm@vger.kernel.org1856018563S: Supported1856118564Q: https://patchwork.kernel.org/project/linux-pm/list/1856218562-T: git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git1856518565+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal1856318566F: Documentation/devicetree/bindings/thermal/1856418567F: drivers/thermal/1856518568F: include/linux/cpu_cooling.h···18689186901869018691TI DAVINCI MACHINE SUPPORT1869118692M: Sekhar Nori <nsekhar@ti.com>1869218692-R: Bartosz Golaszewski <bgolaszewski@baylibre.com>1869318693+R: Bartosz Golaszewski <brgl@bgdev.pl>1869318694L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)1869418695S: Supported1869518696T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git···1928819289F: drivers/usb/misc/chaoskey.c19289192901929019291USB CYPRESS C67X00 DRIVER1929119291-M: Peter Korsgaard <jacmet@sunsite.dk>1929219292L: linux-usb@vger.kernel.org1929319293-S: Maintained1929319293+S: Orphan1929419294F: drivers/usb/c67x00/19295192951929619296USB DAVICOM DM9601 DRIVER1929719297-M: Peter Korsgaard <jacmet@sunsite.dk>1929719297+M: Peter Korsgaard <peter@korsgaard.com>1929819298L: netdev@vger.kernel.org1929919299S: Maintained1930019300W: http://www.linux-usb.org/usbnet···2047320475F: tools/lib/bpf/xsk*20474204762047520477XEN BLOCK SUBSYSTEM2047620476-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>2047720478M: Roger Pau Monné <roger.pau@citrix.com>2047820479L: xen-devel@lists.xenproject.org (moderated for non-subscribers)2047920480S: Supported···2052020523F: drivers/net/xen-netback/*20521205242052220525XEN PCI SUBSYSTEM2052320523-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>2052620526+M: Juergen Gross <jgross@suse.com>2052420527L: xen-devel@lists.xenproject.org (moderated for non-subscribers)2052520528S: Supported2052620529F: arch/x86/pci/*xen*···2054320546F: sound/xen/*20544205472054520548XEN SWIOTLB SUBSYSTEM2054620546-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>2054920549+M: Juergen Gross <jgross@suse.com>2055020550+M: Stefano Stabellini <sstabellini@kernel.org>2054720551L: xen-devel@lists.xenproject.org (moderated for non-subscribers)2054820552L: iommu@lists.linux-foundation.org2054920553S: Supported···2070320705F: mm/zbud.c20704207062070520707ZD1211RW WIRELESS DRIVER2070620706-M: Daniel Drake <dsd@gentoo.org>2070720708M: Ulrich Kunitz <kune@deine-taler.de>2070820709L: linux-wireless@vger.kernel.org2070920710L: zd1211-devs@lists.sourceforge.net (subscribers-only)
···99/* REVISIT: omap1 legacy drivers still rely on this */1010#include <mach/soc.h>11111212-/*1313- * Bus address is physical address, except for OMAP-1510 Local Bus.1414- * OMAP-1510 bus address is translated into a Local Bus address if the1515- * OMAP bus type is lbus. We do the address translation based on the1616- * device overriding the defaults used in the dma-mapping API.1717- */1818-1919-/*2020- * OMAP-1510 Local Bus address offset2121- */2222-#define OMAP1510_LB_OFFSET UL(0x30000000)2323-2412#endif
+82-34
arch/arm/mach-omap1/usb.c
···1111#include <linux/platform_device.h>1212#include <linux/dma-map-ops.h>1313#include <linux/io.h>1414+#include <linux/delay.h>14151516#include <asm/irq.h>1617···207206208207#endif209208210210-#if IS_ENABLED(CONFIG_USB_OHCI_HCD)211211-212209/* The dmamask must be set for OHCI to work */213210static u64 ohci_dmamask = ~(u32)0;214211···235236236237static inline void ohci_device_init(struct omap_usb_config *pdata)237238{239239+ if (!IS_ENABLED(CONFIG_USB_OHCI_HCD))240240+ return;241241+238242 if (cpu_is_omap7xx())239243 ohci_resources[1].start = INT_7XX_USB_HHC_1;240244 pdata->ohci_device = &ohci_device;241245 pdata->ocpi_enable = &ocpi_enable;242246}243243-244244-#else245245-246246-static inline void ohci_device_init(struct omap_usb_config *pdata)247247-{248248-}249249-250250-#endif251247252248#if defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG)253249···528534}529535530536#ifdef CONFIG_ARCH_OMAP15XX537537+/* OMAP-1510 OHCI has its own MMU for DMA */538538+#define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */539539+#define OMAP1510_LB_CLOCK_DIV 0xfffec10c540540+#define OMAP1510_LB_MMU_CTL 0xfffec208541541+#define OMAP1510_LB_MMU_LCK 0xfffec224542542+#define OMAP1510_LB_MMU_LD_TLB 0xfffec228543543+#define OMAP1510_LB_MMU_CAM_H 0xfffec22c544544+#define OMAP1510_LB_MMU_CAM_L 0xfffec230545545+#define OMAP1510_LB_MMU_RAM_H 0xfffec234546546+#define OMAP1510_LB_MMU_RAM_L 0xfffec238547547+548548+/*549549+ * Bus address is physical address, except for OMAP-1510 Local Bus.550550+ * OMAP-1510 bus address is translated into a Local Bus address if the551551+ * OMAP bus type is lbus.552552+ */553553+#define OMAP1510_LB_OFFSET UL(0x30000000)554554+555555+/*556556+ * OMAP-1510 specific Local Bus clock on/off557557+ */558558+static int omap_1510_local_bus_power(int on)559559+{560560+ if (on) {561561+ omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);562562+ udelay(200);563563+ } else {564564+ omap_writel(0, OMAP1510_LB_MMU_CTL);565565+ }566566+567567+ return 0;568568+}569569+570570+/*571571+ * OMAP-1510 specific Local Bus initialization572572+ * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.573573+ * See also arch/mach-omap/memory.h for __virt_to_dma() and574574+ * __dma_to_virt() which need to match with the physical575575+ * Local Bus address below.576576+ */577577+static int omap_1510_local_bus_init(void)578578+{579579+ unsigned int tlb;580580+ unsigned long lbaddr, physaddr;581581+582582+ omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,583583+ OMAP1510_LB_CLOCK_DIV);584584+585585+ /* Configure the Local Bus MMU table */586586+ for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {587587+ lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;588588+ physaddr = tlb * 0x00100000 + PHYS_OFFSET;589589+ omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);590590+ omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,591591+ OMAP1510_LB_MMU_CAM_L);592592+ omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);593593+ omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);594594+ omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);595595+ omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);596596+ }597597+598598+ /* Enable the walking table */599599+ omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);600600+ udelay(200);601601+602602+ return 0;603603+}604604+605605+static void omap_1510_local_bus_reset(void)606606+{607607+ omap_1510_local_bus_power(1);608608+ omap_1510_local_bus_init();609609+}531610532611/* ULPD_DPLL_CTRL */533612#define DPLL_IOB (1 << 13)···609542610543/* ULPD_APLL_CTRL */611544#define APLL_NDPLL_SWITCH (1 << 0)612612-613613-static int omap_1510_usb_ohci_notifier(struct notifier_block *nb,614614- unsigned long event, void *data)615615-{616616- struct device *dev = data;617617-618618- if (event != BUS_NOTIFY_ADD_DEVICE)619619- return NOTIFY_DONE;620620-621621- if (strncmp(dev_name(dev), "ohci", 4) == 0 &&622622- dma_direct_set_offset(dev, PHYS_OFFSET, OMAP1510_LB_OFFSET,623623- (u64)-1))624624- WARN_ONCE(1, "failed to set DMA offset\n");625625- return NOTIFY_OK;626626-}627627-628628-static struct notifier_block omap_1510_usb_ohci_nb = {629629- .notifier_call = omap_1510_usb_ohci_notifier,630630-};631545632546static void __init omap_1510_usb_init(struct omap_usb_config *config)633547{···664616 }665617#endif666618667667-#if IS_ENABLED(CONFIG_USB_OHCI_HCD)668668- if (config->register_host) {619619+ if (IS_ENABLED(CONFIG_USB_OHCI_HCD) && config->register_host) {669620 int status;670621671671- bus_register_notifier(&platform_bus_type,672672- &omap_1510_usb_ohci_nb);673622 ohci_device.dev.platform_data = config;623623+ dma_direct_set_offset(&ohci_device.dev, PHYS_OFFSET,624624+ OMAP1510_LB_OFFSET, (u64)-1);674625 status = platform_device_register(&ohci_device);675626 if (status)676627 pr_debug("can't register OHCI device, %d\n", status);677628 /* hcd explicitly gates 48MHz */629629+630630+ config->lb_reset = omap_1510_local_bus_reset;678631 }679679-#endif680632}681633682634#else
···525525#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)526526#endif527527528528+#ifdef CONFIG_KASAN_HW_TAGS529529+#define EXPORT_SYMBOL_NOHWKASAN(name)530530+#else531531+#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)532532+#endif528533 /*529534 * Emit a 64-bit absolute little endian symbol reference in a way that530535 * ensures that it will be resolved at build time, even when building a
+6
arch/arm64/include/asm/mte.h
···9999100100static inline void mte_check_tfsr_entry(void)101101{102102+ if (!system_supports_mte())103103+ return;104104+102105 mte_check_tfsr_el1();103106}104107105108static inline void mte_check_tfsr_exit(void)106109{110110+ if (!system_supports_mte())111111+ return;112112+107113 /*108114 * The asynchronous faults are sync'ed automatically with109115 * TFSR_EL1 on kernel entry but for exit an explicit dsb()
···273273 return __pgprot(PROT_DEVICE_nGnRnE);274274}275275276276-static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,277277- acpi_size size, bool memory)276276+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)278277{279278 efi_memory_desc_t *md, *region = NULL;280279 pgprot_t prot;···299300 * It is fine for AML to remap regions that are not represented in the300301 * EFI memory map at all, as it only describes normal memory, and MMIO301302 * regions that require a virtual mapping to make them accessible to302302- * the EFI runtime services. Determine the region default303303- * attributes by checking the requested memory semantics.303303+ * the EFI runtime services.304304 */305305- prot = memory ? __pgprot(PROT_NORMAL_NC) :306306- __pgprot(PROT_DEVICE_nGnRnE);305305+ prot = __pgprot(PROT_DEVICE_nGnRnE);307306 if (region) {308307 switch (region->type) {309308 case EFI_LOADER_CODE:···359362 }360363 }361364 return __ioremap(phys, size, prot);362362-}363363-364364-void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)365365-{366366- return __acpi_os_ioremap(phys, size, false);367367-}368368-369369-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)370370-{371371- return __acpi_os_ioremap(phys, size, true);372365}373366374367/*
+6-2
arch/arm64/kernel/cpufeature.c
···15261526 /*15271527 * For reasons that aren't entirely clear, enabling KPTI on Cavium15281528 * ThunderX leads to apparent I-cache corruption of kernel text, which15291529- * ends as well as you might imagine. Don't even try.15291529+ * ends as well as you might imagine. Don't even try. We cannot rely15301530+ * on the cpus_have_*cap() helpers here to detect the CPU erratum15311531+ * because cpucap detection order may change. However, since we know15321532+ * affected CPUs are always in a homogeneous configuration, it is15331533+ * safe to rely on this_cpu_has_cap() here.15301534 */15311531- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {15351535+ if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {15321536 str = "ARM64_WORKAROUND_CAVIUM_27456";15331537 __kpti_forced = -1;15341538 }
+4-6
arch/arm64/kernel/mte.c
···142142#ifdef CONFIG_KASAN_HW_TAGS143143void mte_check_tfsr_el1(void)144144{145145- u64 tfsr_el1;146146-147147- if (!system_supports_mte())148148- return;149149-150150- tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);145145+ u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);151146152147 if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {153148 /*···194199195200void mte_thread_switch(struct task_struct *next)196201{202202+ if (!system_supports_mte())203203+ return;204204+197205 mte_update_sctlr_user(next);198206199207 /*
+1-3
arch/arm64/kernel/signal.c
···940940 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))941941 do_signal(regs);942942943943- if (thread_flags & _TIF_NOTIFY_RESUME) {943943+ if (thread_flags & _TIF_NOTIFY_RESUME)944944 tracehook_notify_resume(regs);945945- rseq_handle_notify_resume(NULL, regs);946946- }947945948946 if (thread_flags & _TIF_FOREIGN_FPSTATE)949947 fpsimd_restore_current_state();
+1-1
arch/arm64/kvm/hyp/nvhe/Makefile
···5454# runtime. Because the hypervisor is part of the kernel binary, relocations5555# produce a kernel VA. We enumerate relocations targeting hyp at build time5656# and convert the kernel VAs at those positions to hyp VAs.5757-$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel5757+$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE5858 $(call if_changed,hyprel)59596060# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
···3636#include <linux/linkage.h>3737#include <asm/errno.h>3838#include <asm/setup.h>3939-#include <asm/segment.h>4039#include <asm/traps.h>4140#include <asm/unistd.h>4241#include <asm/asm-offsets.h>···77787879ENTRY(sys_sigreturn)7980 SAVE_SWITCH_STACK8080- movel %sp,%sp@- | switch_stack pointer8181- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer8181+ movel %sp,%a1 | switch_stack pointer8282+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer8383+ lea %sp@(-84),%sp | leave a gap8484+ movel %a1,%sp@-8585+ movel %a0,%sp@-8286 jbsr do_sigreturn8383- addql #8,%sp8484- RESTORE_SWITCH_STACK8585- rts8787+ jra 1f | shared with rt_sigreturn()86888789ENTRY(sys_rt_sigreturn)8890 SAVE_SWITCH_STACK8989- movel %sp,%sp@- | switch_stack pointer9090- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer9191+ movel %sp,%a1 | switch_stack pointer9292+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer9393+ lea %sp@(-84),%sp | leave a gap9494+ movel %a1,%sp@-9595+ movel %a0,%sp@-9696+ | stack contents:9797+ | [original pt_regs address] [original switch_stack address]9898+ | [gap] [switch_stack] [pt_regs] [exception frame]9199 jbsr do_rt_sigreturn9292- addql #8,%sp100100+101101+1:102102+ | stack contents now:103103+ | [original pt_regs address] [original switch_stack address]104104+ | [unused part of the gap] [moved switch_stack] [moved pt_regs]105105+ | [replacement exception frame]106106+ | return value of do_{rt_,}sigreturn() points to moved switch_stack.107107+108108+ movel %d0,%sp | discard the leftover junk93109 RESTORE_SWITCH_STACK110110+ | stack contents now is just [syscall return address] [pt_regs] [frame]111111+ | return pt_regs.d0112112+ movel %sp@(PT_OFF_D0+4),%d094113 rts9511496115ENTRY(buserr)···197180 jbsr syscall_trace198181 RESTORE_SWITCH_STACK199182 addql #4,%sp200200- jra .Lret_from_exception201201-202202-ENTRY(ret_from_signal)203203- movel %curptr@(TASK_STACK),%a1204204- tstb %a1@(TINFO_FLAGS+2)205205- jge 1f206206- jbsr syscall_trace207207-1: RESTORE_SWITCH_STACK208208- addql #4,%sp209209-/* on 68040 complete pending writebacks if any */210210-#ifdef CONFIG_M68040211211- bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0212212- subql #7,%d0 | bus error frame ?213213- jbne 1f214214- movel %sp,%sp@-215215- jbsr berr_040cleanup216216- addql #4,%sp217217-1:218218-#endif219183 jra .Lret_from_exception220184221185ENTRY(system_call)···336338337339 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */338340 movec %sfc,%d0339339- movew %d0,%a0@(TASK_THREAD+THREAD_FS)341341+ movew %d0,%a0@(TASK_THREAD+THREAD_FC)340342341343 /* save usp */342344 /* it is better to use a movel here instead of a movew 8*) */···422424 movel %a0,%usp423425424426 /* restore fs (sfc,%dfc) */425425- movew %a1@(TASK_THREAD+THREAD_FS),%a0427427+ movew %a1@(TASK_THREAD+THREAD_FC),%a0426428 movec %a0,%sfc427429 movec %a0,%dfc428430
+2-2
arch/m68k/kernel/process.c
···92929393void flush_thread(void)9494{9595- current->thread.fs = __USER_DS;9595+ current->thread.fc = USER_DATA;9696#ifdef CONFIG_FPU9797 if (!FPU_IS_EMU) {9898 unsigned long zero = 0;···155155 * Must save the current SFC/DFC value, NOT the value when156156 * the parent was last descheduled - RGH 10-08-96157157 */158158- p->thread.fs = get_fs().seg;158158+ p->thread.fc = USER_DATA;159159160160 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {161161 /* kernel thread */
+82-115
arch/m68k/kernel/signal.c
···447447448448 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {449449 fpu_version = sc->sc_fpstate[0];450450- if (CPU_IS_020_OR_030 &&450450+ if (CPU_IS_020_OR_030 && !regs->stkadj &&451451 regs->vector >= (VEC_FPBRUC * 4) &&452452 regs->vector <= (VEC_FPNAN * 4)) {453453 /* Clear pending exception in 68882 idle frame */···510510 if (!(CPU_IS_060 || CPU_IS_COLDFIRE))511511 context_size = fpstate[1];512512 fpu_version = fpstate[0];513513- if (CPU_IS_020_OR_030 &&513513+ if (CPU_IS_020_OR_030 && !regs->stkadj &&514514 regs->vector >= (VEC_FPBRUC * 4) &&515515 regs->vector <= (VEC_FPNAN * 4)) {516516 /* Clear pending exception in 68882 idle frame */···641641static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,642642 void __user *fp)643643{644644- int fsize = frame_extra_sizes(formatvec >> 12);645645- if (fsize < 0) {644644+ int extra = frame_extra_sizes(formatvec >> 12);645645+ char buf[sizeof_field(struct frame, un)];646646+647647+ if (extra < 0) {646648 /*647649 * user process trying to return with weird frame format648650 */649651 pr_debug("user process returning with weird frame format\n");650650- return 1;652652+ return -1;651653 }652652- if (!fsize) {653653- regs->format = formatvec >> 12;654654- regs->vector = formatvec & 0xfff;655655- } else {656656- struct switch_stack *sw = (struct switch_stack *)regs - 1;657657- /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */658658- unsigned long buf[sizeof_field(struct frame, un) / 2];654654+ if (extra && copy_from_user(buf, fp, extra))655655+ return -1;656656+ regs->format = formatvec >> 12;657657+ regs->vector = formatvec & 0xfff;658658+ if (extra) {659659+ void *p = (struct switch_stack *)regs - 1;660660+ struct frame *new = (void *)regs - extra;661661+ int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);659662660660- /* that'll make sure that expansion won't crap over data */661661- if (copy_from_user(buf + fsize / 4, fp, fsize))662662- return 1;663663-664664- /* point of no return */665665- regs->format = formatvec >> 12;666666- regs->vector = formatvec & 0xfff;667667-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))668668- __asm__ __volatile__ (669669-#ifdef CONFIG_COLDFIRE670670- " movel %0,%/sp\n\t"671671- " bra ret_from_signal\n"672672-#else673673- " movel %0,%/a0\n\t"674674- " subl %1,%/a0\n\t" /* make room on stack */675675- " movel %/a0,%/sp\n\t" /* set stack pointer */676676- /* move switch_stack and pt_regs */677677- "1: movel %0@+,%/a0@+\n\t"678678- " dbra %2,1b\n\t"679679- " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */680680- " lsrl #2,%1\n\t"681681- " subql #1,%1\n\t"682682- /* copy to the gap we'd made */683683- "2: movel %4@+,%/a0@+\n\t"684684- " dbra %1,2b\n\t"685685- " bral ret_from_signal\n"663663+ memmove(p - extra, p, size);664664+ memcpy(p - extra + size, buf, extra);665665+ current->thread.esp0 = (unsigned long)&new->ptregs;666666+#ifdef CONFIG_M68040667667+ /* on 68040 complete pending writebacks if any */668668+ if (new->ptregs.format == 7) // bus error frame669669+ berr_040cleanup(new);686670#endif687687- : /* no outputs, it doesn't ever return */688688- : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),689689- "n" (frame_offset), "a" (buf + fsize/4)690690- : "a0");691691-#undef frame_offset692671 }693693- return 0;672672+ return extra;694673}695674696675static inline int···677698{678699 int formatvec;679700 struct sigcontext context;680680- int err = 0;681701682702 siginfo_build_tests();683703···685707686708 /* get previous context */687709 if (copy_from_user(&context, usc, sizeof(context)))688688- goto badframe;710710+ return -1;689711690712 /* restore passed registers */691713 regs->d0 = context.sc_d0;···698720 wrusp(context.sc_usp);699721 formatvec = context.sc_formatvec;700722701701- err = restore_fpu_state(&context);723723+ if (restore_fpu_state(&context))724724+ return -1;702725703703- if (err || mangle_kernel_stack(regs, formatvec, fp))704704- goto badframe;705705-706706- return 0;707707-708708-badframe:709709- return 1;726726+ return mangle_kernel_stack(regs, formatvec, fp);710727}711728712729static inline int···718745719746 err = __get_user(temp, &uc->uc_mcontext.version);720747 if (temp != MCONTEXT_VERSION)721721- goto badframe;748748+ return -1;722749 /* restore passed registers */723750 err |= __get_user(regs->d0, &gregs[0]);724751 err |= __get_user(regs->d1, &gregs[1]);···747774 err |= restore_altstack(&uc->uc_stack);748775749776 if (err)750750- goto badframe;777777+ return -1;751778752752- if (mangle_kernel_stack(regs, temp, &uc->uc_extra))753753- goto badframe;754754-755755- return 0;756756-757757-badframe:758758- return 1;779779+ return mangle_kernel_stack(regs, temp, &uc->uc_extra);759780}760781761761-asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)782782+asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)762783{763784 unsigned long usp = rdusp();764785 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);765786 sigset_t set;787787+ int size;766788767789 if (!access_ok(frame, sizeof(*frame)))768790 goto badframe;···769801770802 set_current_blocked(&set);771803772772- if (restore_sigcontext(regs, &frame->sc, frame + 1))804804+ size = restore_sigcontext(regs, &frame->sc, frame + 1);805805+ if (size < 0)773806 goto badframe;774774- return regs->d0;807807+ return (void *)sw - size;775808776809badframe:777810 force_sig(SIGSEGV);778778- return 0;811811+ return sw;779812}780813781781-asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)814814+asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)782815{783816 unsigned long usp = rdusp();784817 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);785818 sigset_t set;819819+ int size;786820787821 if (!access_ok(frame, sizeof(*frame)))788822 goto badframe;···793823794824 set_current_blocked(&set);795825796796- if (rt_restore_ucontext(regs, sw, &frame->uc))826826+ size = rt_restore_ucontext(regs, sw, &frame->uc);827827+ if (size < 0)797828 goto badframe;798798- return regs->d0;829829+ return (void *)sw - size;799830800831badframe:801832 force_sig(SIGSEGV);802802- return 0;833833+ return sw;834834+}835835+836836+static inline struct pt_regs *rte_regs(struct pt_regs *regs)837837+{838838+ return (void *)regs + regs->stkadj;803839}804840805841static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,806842 unsigned long mask)807843{844844+ struct pt_regs *tregs = rte_regs(regs);808845 sc->sc_mask = mask;809846 sc->sc_usp = rdusp();810847 sc->sc_d0 = regs->d0;811848 sc->sc_d1 = regs->d1;812849 sc->sc_a0 = regs->a0;813850 sc->sc_a1 = regs->a1;814814- sc->sc_sr = regs->sr;815815- sc->sc_pc = regs->pc;816816- sc->sc_formatvec = regs->format << 12 | regs->vector;851851+ sc->sc_sr = tregs->sr;852852+ sc->sc_pc = tregs->pc;853853+ sc->sc_formatvec = tregs->format << 12 | tregs->vector;817854 save_a5_state(sc, regs);818855 save_fpu_state(sc, regs);819856}···828851static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)829852{830853 struct switch_stack *sw = (struct switch_stack *)regs - 1;854854+ struct pt_regs *tregs = rte_regs(regs);831855 greg_t __user *gregs = uc->uc_mcontext.gregs;832856 int err = 0;833857···849871 err |= __put_user(sw->a5, &gregs[13]);850872 err |= __put_user(sw->a6, &gregs[14]);851873 err |= __put_user(rdusp(), &gregs[15]);852852- err |= __put_user(regs->pc, &gregs[16]);853853- err |= __put_user(regs->sr, &gregs[17]);854854- err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);874874+ err |= __put_user(tregs->pc, &gregs[16]);875875+ err |= __put_user(tregs->sr, &gregs[17]);876876+ err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);855877 err |= rt_save_fpu_state(uc, regs);856878 return err;857879}···868890 struct pt_regs *regs)869891{870892 struct sigframe __user *frame;871871- int fsize = frame_extra_sizes(regs->format);893893+ struct pt_regs *tregs = rte_regs(regs);894894+ int fsize = frame_extra_sizes(tregs->format);872895 struct sigcontext context;873896 int err = 0, sig = ksig->sig;874897875898 if (fsize < 0) {876899 pr_debug("setup_frame: Unknown frame format %#x\n",877877- regs->format);900900+ tregs->format);878901 return -EFAULT;879902 }880903···886907887908 err |= __put_user(sig, &frame->sig);888909889889- err |= __put_user(regs->vector, &frame->code);910910+ err |= __put_user(tregs->vector, &frame->code);890911 err |= __put_user(&frame->sc, &frame->psc);891912892913 if (_NSIG_WORDS > 1)···913934 push_cache ((unsigned long) &frame->retcode);914935915936 /*916916- * Set up registers for signal handler. All the state we are about917917- * to destroy is successfully copied to sigframe.918918- */919919- wrusp ((unsigned long) frame);920920- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;921921- adjustformat(regs);922922-923923- /*924937 * This is subtle; if we build more than one sigframe, all but the925938 * first one will see frame format 0 and have fsize == 0, so we won't926939 * screw stkadj.927940 */928928- if (fsize)941941+ if (fsize) {929942 regs->stkadj = fsize;930930-931931- /* Prepare to skip over the extra stuff in the exception frame. */932932- if (regs->stkadj) {933933- struct pt_regs *tregs =934934- (struct pt_regs *)((ulong)regs + regs->stkadj);943943+ tregs = rte_regs(regs);935944 pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);936936- /* This must be copied with decreasing addresses to937937- handle overlaps. */938945 tregs->vector = 0;939946 tregs->format = 0;940940- tregs->pc = regs->pc;941947 tregs->sr = regs->sr;942948 }949949+950950+ /*951951+ * Set up registers for signal handler. All the state we are about952952+ * to destroy is successfully copied to sigframe.953953+ */954954+ wrusp ((unsigned long) frame);955955+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;956956+ adjustformat(regs);957957+943958 return 0;944959}945960···941968 struct pt_regs *regs)942969{943970 struct rt_sigframe __user *frame;944944- int fsize = frame_extra_sizes(regs->format);971971+ struct pt_regs *tregs = rte_regs(regs);972972+ int fsize = frame_extra_sizes(tregs->format);945973 int err = 0, sig = ksig->sig;946974947975 if (fsize < 0) {···9931019 push_cache ((unsigned long) &frame->retcode);99410209951021 /*996996- * Set up registers for signal handler. All the state we are about997997- * to destroy is successfully copied to sigframe.998998- */999999- wrusp ((unsigned long) frame);10001000- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;10011001- adjustformat(regs);10021002-10031003- /*10041022 * This is subtle; if we build more than one sigframe, all but the10051023 * first one will see frame format 0 and have fsize == 0, so we won't10061024 * screw stkadj.10071025 */10081008- if (fsize)10261026+ if (fsize) {10091027 regs->stkadj = fsize;10101010-10111011- /* Prepare to skip over the extra stuff in the exception frame. */10121012- if (regs->stkadj) {10131013- struct pt_regs *tregs =10141014- (struct pt_regs *)((ulong)regs + regs->stkadj);10281028+ tregs = rte_regs(regs);10151029 pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);10161016- /* This must be copied with decreasing addresses to10171017- handle overlaps. */10181030 tregs->vector = 0;10191031 tregs->format = 0;10201020- tregs->pc = regs->pc;10211032 tregs->sr = regs->sr;10221033 }10341034+10351035+ /*10361036+ * Set up registers for signal handler. All the state we are about10371037+ * to destroy is successfully copied to sigframe.10381038+ */10391039+ wrusp ((unsigned long) frame);10401040+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;10411041+ adjustformat(regs);10231042 return 0;10241043}10251044
+4-9
arch/m68k/kernel/traps.c
···181181static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)182182{183183 unsigned long mmusr;184184- mm_segment_t old_fs = get_fs();185184186186- set_fs(MAKE_MM_SEG(wbs));185185+ set_fc(wbs);187186188187 if (iswrite)189188 asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));···191192192193 asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));193194194194- set_fs(old_fs);195195+ set_fc(USER_DATA);195196196197 return mmusr;197198}···200201 unsigned long wbd)201202{202203 int res = 0;203203- mm_segment_t old_fs = get_fs();204204205205- /* set_fs can not be moved, otherwise put_user() may oops */206206- set_fs(MAKE_MM_SEG(wbs));205205+ set_fc(wbs);207206208207 switch (wbs & WBSIZ_040) {209208 case BA_SIZE_BYTE:···215218 break;216219 }217220218218- /* set_fs can not be moved, otherwise put_user() may oops */219219- set_fs(old_fs);220220-221221+ set_fc(USER_DATA);221222222223 pr_debug("do_040writeback1, res=%d\n", res);223224
···1010#include <linux/io.h>1111#include <linux/types.h>12121313-#include <asm/mips-boards/launch.h>1414-1513extern unsigned long __cps_access_bad_size(void)1614 __compiletime_error("Bad size for CPS accessor");1715···165167 */166168static inline unsigned int mips_cps_numcores(unsigned int cluster)167169{168168- unsigned int ncores;169169-170170 if (!mips_cm_present())171171 return 0;172172173173 /* Add one before masking to handle 0xff indicating no cores */174174- ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;175175-176176- if (IS_ENABLED(CONFIG_SOC_MT7621)) {177177- struct cpulaunch *launch;178178-179179- /*180180- * Ralink MT7621S SoC is single core, but the GCR_CONFIG method181181- * always reports 2 cores. Check the second core's LAUNCH_FREADY182182- * flag to detect if the second core is missing. This method183183- * only works before the core has been started.184184- */185185- launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);186186- launch += 2; /* MT7621 has 2 VPEs per core */187187- if (!(launch->flags & LAUNCH_FREADY))188188- ncores = 1;189189- }190190-191191- return ncores;174174+ return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;192175}193176194177/**
+1-3
arch/mips/kernel/signal.c
···906906 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))907907 do_signal(regs);908908909909- if (thread_info_flags & _TIF_NOTIFY_RESUME) {909909+ if (thread_info_flags & _TIF_NOTIFY_RESUME)910910 tracehook_notify_resume(regs);911911- rseq_handle_notify_resume(NULL, regs);912912- }913911914912 user_enter();915913}
+43-14
arch/mips/net/bpf_jit.c
···662662 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \663663 func##_positive)664664665665+static bool is_bad_offset(int b_off)666666+{667667+ return b_off > 0x1ffff || b_off < -0x20000;668668+}669669+665670static int build_body(struct jit_ctx *ctx)666671{667672 const struct bpf_prog *prog = ctx->skf;···733728 /* Load return register on DS for failures */734729 emit_reg_move(r_ret, r_zero, ctx);735730 /* Return with error */736736- emit_b(b_imm(prog->len, ctx), ctx);731731+ b_off = b_imm(prog->len, ctx);732732+ if (is_bad_offset(b_off))733733+ return -E2BIG;734734+ emit_b(b_off, ctx);737735 emit_nop(ctx);738736 break;739737 case BPF_LD | BPF_W | BPF_IND:···783775 emit_jalr(MIPS_R_RA, r_s0, ctx);784776 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */785777 /* Check the error value */786786- emit_bcond(MIPS_COND_NE, r_ret, 0,787787- b_imm(prog->len, ctx), ctx);778778+ b_off = b_imm(prog->len, ctx);779779+ if (is_bad_offset(b_off))780780+ return -E2BIG;781781+ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);788782 emit_reg_move(r_ret, r_zero, ctx);789783 /* We are good */790784 /* X <- P[1:K] & 0xf */···865855 /* A /= X */866856 ctx->flags |= SEEN_X | SEEN_A;867857 /* Check if r_X is zero */868868- emit_bcond(MIPS_COND_EQ, r_X, r_zero,869869- b_imm(prog->len, ctx), ctx);858858+ b_off = b_imm(prog->len, ctx);859859+ if (is_bad_offset(b_off))860860+ return -E2BIG;861861+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);870862 emit_load_imm(r_ret, 0, ctx); /* delay slot */871863 emit_div(r_A, r_X, ctx);872864 break;···876864 /* A %= X */877865 ctx->flags |= SEEN_X | SEEN_A;878866 /* Check if r_X is zero */879879- emit_bcond(MIPS_COND_EQ, r_X, r_zero,880880- b_imm(prog->len, ctx), ctx);867867+ b_off = b_imm(prog->len, ctx);868868+ if (is_bad_offset(b_off))869869+ return -E2BIG;870870+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);881871 emit_load_imm(r_ret, 0, ctx); /* delay slot */882872 emit_mod(r_A, r_X, ctx);883873 break;···940926 break;941927 case BPF_JMP | BPF_JA:942928 /* pc += K */943943- emit_b(b_imm(i + k + 1, ctx), ctx);929929+ b_off = b_imm(i + k + 1, ctx);930930+ if (is_bad_offset(b_off))931931+ return -E2BIG;932932+ emit_b(b_off, ctx);944933 emit_nop(ctx);945934 break;946935 case BPF_JMP | BPF_JEQ | BPF_K:···10731056 break;10741057 case BPF_RET | BPF_A:10751058 ctx->flags |= SEEN_A;10761076- if (i != prog->len - 1)10591059+ if (i != prog->len - 1) {10771060 /*10781061 * If this is not the last instruction10791062 * then jump to the epilogue10801063 */10811081- emit_b(b_imm(prog->len, ctx), ctx);10641064+ b_off = b_imm(prog->len, ctx);10651065+ if (is_bad_offset(b_off))10661066+ return -E2BIG;10671067+ emit_b(b_off, ctx);10681068+ }10821069 emit_reg_move(r_ret, r_A, ctx); /* delay slot */10831070 break;10841071 case BPF_RET | BPF_K:···10961075 * If this is not the last instruction10971076 * then jump to the epilogue10981077 */10991099- emit_b(b_imm(prog->len, ctx), ctx);10781078+ b_off = b_imm(prog->len, ctx);10791079+ if (is_bad_offset(b_off))10801080+ return -E2BIG;10811081+ emit_b(b_off, ctx);11001082 emit_nop(ctx);11011083 }11021084 break;···11571133 /* Load *dev pointer */11581134 emit_load_ptr(r_s0, r_skb, off, ctx);11591135 /* error (0) in the delay slot */11601160- emit_bcond(MIPS_COND_EQ, r_s0, r_zero,11611161- b_imm(prog->len, ctx), ctx);11361136+ b_off = b_imm(prog->len, ctx);11371137+ if (is_bad_offset(b_off))11381138+ return -E2BIG;11391139+ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);11621140 emit_reg_move(r_ret, r_zero, ctx);11631141 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {11641142 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);···1270124412711245 /* Generate the actual JIT code */12721246 build_prologue(&ctx);12731273- build_body(&ctx);12471247+ if (build_body(&ctx)) {12481248+ module_memfree(ctx.target);12491249+ goto out;12501250+ }12741251 build_epilogue(&ctx);1275125212761253 /* Update the icache */
+2-1
arch/nios2/Kconfig.debug
···33config EARLY_PRINTK44 bool "Activate early kernel debugging"55 default y66+ depends on TTY67 select SERIAL_CORE_CONSOLE78 help88- Enable early printk on console99+ Enable early printk on console.910 This is useful for kernel debugging when your machine crashes very1011 early before the console code is initialized.1112 You should normally say N here, unless you want to debug such a crash.
-2
arch/nios2/kernel/setup.c
···149149150150void __init setup_arch(char **cmdline_p)151151{152152- int dram_start;153153-154152 console_verbose();155153156154 memory_start = memblock_start_of_DRAM();
···79798080static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)8181{8282- return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);8282+ return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);8383}84848585static inline int kvm_is_ucontrol(struct kvm *kvm)
+1-1
arch/sh/include/asm/pgtable-3level.h
···34343535static inline pmd_t *pud_pgtable(pud_t pud)3636{3737- return (pmd_t *)pud_val(pud);3737+ return (pmd_t *)(unsigned long)pud_val(pud);3838}39394040/* only used by the stubbed out hugetlb gup code, should never be called */
···122122 ipi_arg->reserved = 0;123123 ipi_arg->vp_set.valid_bank_mask = 0;124124125125- if (!cpumask_equal(mask, cpu_present_mask)) {125125+ /*126126+ * Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET127127+ * when the IPI is sent to all currently present CPUs.128128+ */129129+ if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {126130 ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;127131 if (exclude_self)128132 nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);129133 else130134 nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);131131- }132132- if (nr_bank < 0)133133- goto ipi_mask_ex_done;134134- if (!nr_bank)135135+136136+ /*137137+ * 'nr_bank <= 0' means some CPUs in cpumask can't be138138+ * represented in VP_SET. Return an error and fall back to139139+ * native (architectural) method of sending IPIs.140140+ */141141+ if (nr_bank <= 0)142142+ goto ipi_mask_ex_done;143143+ } else {135144 ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;145145+ }136146137147 status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,138148 ipi_arg, NULL);
···22#ifndef _ASM_X86_PKEYS_H33#define _ASM_X86_PKEYS_H4455-#define ARCH_DEFAULT_PKEY 066-75/*86 * If more than 16 keys are ever supported, a thorough audit97 * will be necessary to ensure that the types that store key
···33#define _ASM_X86_SWIOTLB_XEN_H4455#ifdef CONFIG_SWIOTLB_XEN66-extern int xen_swiotlb;76extern int __init pci_xen_swiotlb_detect(void);88-extern void __init pci_xen_swiotlb_init(void);97extern int pci_xen_swiotlb_init_late(void);108#else1111-#define xen_swiotlb (0)1212-static inline int __init pci_xen_swiotlb_detect(void) { return 0; }1313-static inline void __init pci_xen_swiotlb_init(void) { }99+#define pci_xen_swiotlb_detect NULL1410static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }1511#endif1612
+2-11
arch/x86/kernel/kvmclock.c
···4949static struct pvclock_vsyscall_time_info5050 hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);5151static struct pvclock_wall_clock wall_clock __bss_decrypted;5252-static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);5352static struct pvclock_vsyscall_time_info *hvclock_mem;5454-5555-static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)5656-{5757- return &this_cpu_read(hv_clock_per_cpu)->pvti;5858-}5959-6060-static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)6161-{6262- return this_cpu_read(hv_clock_per_cpu);6363-}5353+DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);5454+EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);64556556/*6657 * The wallclock is the time of day when we booted. Since then, some time may
+14-12
arch/x86/kernel/setup.c
···830830831831 x86_init.oem.arch_setup();832832833833+ /*834834+ * Do some memory reservations *before* memory is added to memblock, so835835+ * memblock allocations won't overwrite it.836836+ *837837+ * After this point, everything still needed from the boot loader or838838+ * firmware or kernel text should be early reserved or marked not RAM in839839+ * e820. All other memory is free game.840840+ *841841+ * This call needs to happen before e820__memory_setup() which calls the842842+ * xen_memory_setup() on Xen dom0 which relies on the fact that those843843+ * early reservations have happened already.844844+ */845845+ early_reserve_memory();846846+833847 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;834848 e820__memory_setup();835849 parse_setup_data();···889875 x86_configure_nx();890876891877 parse_early_param();892892-893893- /*894894- * Do some memory reservations *before* memory is added to895895- * memblock, so memblock allocations won't overwrite it.896896- * Do it after early param, so we could get (unlikely) panic from897897- * serial.898898- *899899- * After this point everything still needed from the boot loader or900900- * firmware or kernel text should be early reserved or marked not901901- * RAM in e820. All other memory is free game.902902- */903903- early_reserve_memory();904878905879#ifdef CONFIG_MEMORY_HOTPLUG906880 /*
+2-2
arch/x86/kvm/cpuid.c
···6565 for (i = 0; i < nent; i++) {6666 e = &entries[i];67676868- if (e->function == function && (e->index == index ||6969- !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))6868+ if (e->function == function &&6969+ (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))7070 return e;7171 }7272
···707707 if (!is_shadow_present_pte(*it.sptep)) {708708 table_gfn = gw->table_gfn[it.level - 2];709709 access = gw->pt_access[it.level - 2];710710- sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,711711- false, access);710710+ sp = kvm_mmu_get_page(vcpu, table_gfn, addr,711711+ it.level-1, false, access);712712+ /*713713+ * We must synchronize the pagetable before linking it714714+ * because the guest doesn't need to flush tlb when715715+ * the gpte is changed from non-present to present.716716+ * Otherwise, the guest may use the wrong mapping.717717+ *718718+ * For PG_LEVEL_4K, kvm_mmu_get_page() has already719719+ * synchronized it transiently via kvm_sync_page().720720+ *721721+ * For higher level pagetable, we synchronize it via722722+ * the slower mmu_sync_children(). If it needs to723723+ * break, some progress has been made; return724724+ * RET_PF_RETRY and retry on the next #PF.725725+ * KVM_REQ_MMU_SYNC is not necessary but it726726+ * expedites the process.727727+ */728728+ if (sp->unsync_children &&729729+ mmu_sync_children(vcpu, sp, false))730730+ return RET_PF_RETRY;712731 }713732714733 /*···10661047 * Using the cached information from sp->gfns is safe because:10671048 * - The spte has a reference to the struct page, so the pfn for a given gfn10681049 * can't change unless all sptes pointing to it are nuked first.10691069- *10701070- * Note:10711071- * We should flush all tlbs if spte is dropped even though guest is10721072- * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page10731073- * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't10741074- * used by guest then tlbs are not flushed, so guest is allowed to access the10751075- * freed pages.10761076- * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.10771050 */10781051static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)10791052{···11181107 return 0;1119110811201109 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {11211121- /*11221122- * Update spte before increasing tlbs_dirty to make11231123- * sure no tlb flush is lost after spte is zapped; see11241124- * the comments in kvm_flush_remote_tlbs().11251125- */11261126- smp_wmb();11271127- vcpu->kvm->tlbs_dirty++;11101110+ set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;11281111 continue;11291112 }11301113···1133112811341129 if (gfn != sp->gfns[i]) {11351130 drop_spte(vcpu->kvm, &sp->spt[i]);11361136- /*11371137- * The same as above where we are doing11381138- * prefetch_invalid_gpte().11391139- */11401140- smp_wmb();11411141- vcpu->kvm->tlbs_dirty++;11311131+ set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;11421132 continue;11431133 }11441134
···595595 return 0;596596}597597598598+static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,599599+ int *error)600600+{601601+ struct sev_data_launch_update_vmsa vmsa;602602+ struct vcpu_svm *svm = to_svm(vcpu);603603+ int ret;604604+605605+ /* Perform some pre-encryption checks against the VMSA */606606+ ret = sev_es_sync_vmsa(svm);607607+ if (ret)608608+ return ret;609609+610610+ /*611611+ * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of612612+ * the VMSA memory content (i.e it will write the same memory region613613+ * with the guest's key), so invalidate it first.614614+ */615615+ clflush_cache_range(svm->vmsa, PAGE_SIZE);616616+617617+ vmsa.reserved = 0;618618+ vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;619619+ vmsa.address = __sme_pa(svm->vmsa);620620+ vmsa.len = PAGE_SIZE;621621+ return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);622622+}623623+598624static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)599625{600600- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;601601- struct sev_data_launch_update_vmsa vmsa;602626 struct kvm_vcpu *vcpu;603627 int i, ret;604628605629 if (!sev_es_guest(kvm))606630 return -ENOTTY;607631608608- vmsa.reserved = 0;609609-610632 kvm_for_each_vcpu(i, vcpu, kvm) {611611- struct vcpu_svm *svm = to_svm(vcpu);612612-613613- /* Perform some pre-encryption checks against the VMSA */614614- ret = sev_es_sync_vmsa(svm);633633+ ret = mutex_lock_killable(&vcpu->mutex);615634 if (ret)616635 return ret;617636618618- /*619619- * The LAUNCH_UPDATE_VMSA command will perform in-place620620- * encryption of the VMSA memory content (i.e it will write621621- * the same memory region with the guest's key), so invalidate622622- * it first.623623- */624624- clflush_cache_range(svm->vmsa, PAGE_SIZE);637637+ ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);625638626626- vmsa.handle = sev->handle;627627- vmsa.address = __sme_pa(svm->vmsa);628628- vmsa.len = PAGE_SIZE;629629- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,630630- &argp->error);639639+ mutex_unlock(&vcpu->mutex);631640 if (ret)632641 return ret;633633-634634- svm->vcpu.arch.guest_state_protected = true;635642 }636643637644 return 0;···1404139714051398 /* Bind ASID to this guest */14061399 ret = sev_bind_asid(kvm, start.handle, error);14071407- if (ret)14001400+ if (ret) {14011401+ sev_decommission(start.handle);14081402 goto e_free_session;14031403+ }1409140414101405 params.handle = start.handle;14111406 if (copy_to_user((void __user *)(uintptr_t)argp->data,···1473146414741465 /* Pin guest memory */14751466 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,14761476- PAGE_SIZE, &n, 0);14671467+ PAGE_SIZE, &n, 1);14771468 if (IS_ERR(guest_page)) {14781469 ret = PTR_ERR(guest_page);14791470 goto e_free_trans;···15101501 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);15111502}1512150315041504+static bool cmd_allowed_from_miror(u32 cmd_id)15051505+{15061506+ /*15071507+ * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES15081508+ * active mirror VMs. Also allow the debugging and status commands.15091509+ */15101510+ if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||15111511+ cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||15121512+ cmd_id == KVM_SEV_DBG_ENCRYPT)15131513+ return true;15141514+15151515+ return false;15161516+}15171517+15131518int svm_mem_enc_op(struct kvm *kvm, void __user *argp)15141519{15151520 struct kvm_sev_cmd sev_cmd;···1540151715411518 mutex_lock(&kvm->lock);1542151915431543- /* enc_context_owner handles all memory enc operations */15441544- if (is_mirroring_enc_context(kvm)) {15201520+ /* Only the enc_context_owner handles some memory enc operations. */15211521+ if (is_mirroring_enc_context(kvm) &&15221522+ !cmd_allowed_from_miror(sev_cmd.id)) {15451523 r = -EINVAL;15461524 goto out;15471525 }···17391715{17401716 struct file *source_kvm_file;17411717 struct kvm *source_kvm;17421742- struct kvm_sev_info *mirror_sev;17431743- unsigned int asid;17181718+ struct kvm_sev_info source_sev, *mirror_sev;17441719 int ret;1745172017461721 source_kvm_file = fget(source_fd);···17621739 goto e_source_unlock;17631740 }1764174117651765- asid = to_kvm_svm(source_kvm)->sev_info.asid;17421742+ memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,17431743+ sizeof(source_sev));1766174417671745 /*17681746 * The mirror kvm holds an enc_context_owner ref so its asid can't···17831759 /* Set enc_context_owner and copy its encryption context over */17841760 mirror_sev = &to_kvm_svm(kvm)->sev_info;17851761 mirror_sev->enc_context_owner = source_kvm;17861786- mirror_sev->asid = asid;17871762 mirror_sev->active = true;17631763+ mirror_sev->asid = source_sev.asid;17641764+ mirror_sev->fd = source_sev.fd;17651765+ mirror_sev->es_active = source_sev.es_active;17661766+ mirror_sev->handle = source_sev.handle;17671767+ /*17681768+ * Do not copy ap_jump_table. Since the mirror does not share the same17691769+ * KVM contexts as the original, and they may have different17701770+ * memory-views.17711771+ */1788177217891773 mutex_unlock(&kvm->lock);17901774 return 0;
+74-63
arch/x86/kvm/svm/svm.c
···1566156615671567 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &15681568 V_IRQ_INJECTION_BITS_MASK;15691569+15701570+ svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;15691571 }1570157215711573 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);···2222222022232221 /* Both #GP cases have zero error_code */22242222 if (error_code)22232223+ goto reinject;22242224+22252225+ /* All SVM instructions expect page aligned RAX */22262226+ if (svm->vmcb->save.rax & ~PAGE_MASK)22252227 goto reinject;2226222822272229 /* Decode the instruction for usage later */···42914285 struct kvm_host_map map_save;42924286 int ret;4293428742944294- if (is_guest_mode(vcpu)) {42954295- /* FED8h - SVM Guest */42964296- put_smstate(u64, smstate, 0x7ed8, 1);42974297- /* FEE0h - SVM Guest VMCB Physical Address */42984298- put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);42884288+ if (!is_guest_mode(vcpu))42894289+ return 0;4299429043004300- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];43014301- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];43024302- svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];42914291+ /* FED8h - SVM Guest */42924292+ put_smstate(u64, smstate, 0x7ed8, 1);42934293+ /* FEE0h - SVM Guest VMCB Physical Address */42944294+ put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);4303429543044304- ret = nested_svm_vmexit(svm);43054305- if (ret)43064306- return ret;42964296+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];42974297+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];42984298+ svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];4307429943084308- /*43094309- * KVM uses VMCB01 to store L1 host state while L2 runs but43104310- * VMCB01 is going to be used during SMM and thus the state will43114311- * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save43124312- * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the43134313- * format of the area is identical to guest save area offsetted43144314- * by 0x400 (matches the offset of 'struct vmcb_save_area'43154315- * within 'struct vmcb'). Note: HSAVE area may also be used by43164316- * L1 hypervisor to save additional host context (e.g. KVM does43174317- * that, see svm_prepare_guest_switch()) which must be43184318- * preserved.43194319- */43204320- if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),43214321- &map_save) == -EINVAL)43224322- return 1;43004300+ ret = nested_svm_vmexit(svm);43014301+ if (ret)43024302+ return ret;4323430343244324- BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);43044304+ /*43054305+ * KVM uses VMCB01 to store L1 host state while L2 runs but43064306+ * VMCB01 is going to be used during SMM and thus the state will43074307+ * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save43084308+ * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the43094309+ * format of the area is identical to guest save area offsetted43104310+ * by 0x400 (matches the offset of 'struct vmcb_save_area'43114311+ * within 'struct vmcb'). Note: HSAVE area may also be used by43124312+ * L1 hypervisor to save additional host context (e.g. KVM does43134313+ * that, see svm_prepare_guest_switch()) which must be43144314+ * preserved.43154315+ */43164316+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),43174317+ &map_save) == -EINVAL)43184318+ return 1;4325431943264326- svm_copy_vmrun_state(map_save.hva + 0x400,43274327- &svm->vmcb01.ptr->save);43204320+ BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);4328432143294329- kvm_vcpu_unmap(vcpu, &map_save, true);43304330- }43224322+ svm_copy_vmrun_state(map_save.hva + 0x400,43234323+ &svm->vmcb01.ptr->save);43244324+43254325+ kvm_vcpu_unmap(vcpu, &map_save, true);43314326 return 0;43324327}43334328···43364329{43374330 struct vcpu_svm *svm = to_svm(vcpu);43384331 struct kvm_host_map map, map_save;43394339- int ret = 0;43324332+ u64 saved_efer, vmcb12_gpa;43334333+ struct vmcb *vmcb12;43344334+ int ret;4340433543414341- if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {43424342- u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);43434343- u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);43444344- u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);43454345- struct vmcb *vmcb12;43364336+ if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))43374337+ return 0;4346433843474347- if (guest) {43484348- if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))43494349- return 1;43394339+ /* Non-zero if SMI arrived while vCPU was in guest mode. */43404340+ if (!GET_SMSTATE(u64, smstate, 0x7ed8))43414341+ return 0;4350434243514351- if (!(saved_efer & EFER_SVME))43524352- return 1;43434343+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))43444344+ return 1;4353434543544354- if (kvm_vcpu_map(vcpu,43554355- gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)43564356- return 1;43464346+ saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);43474347+ if (!(saved_efer & EFER_SVME))43484348+ return 1;4357434943584358- if (svm_allocate_nested(svm))43594359- return 1;43504350+ vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);43514351+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)43524352+ return 1;4360435343614361- vmcb12 = map.hva;43544354+ ret = 1;43554355+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)43564356+ goto unmap_map;4362435743634363- nested_load_control_from_vmcb12(svm, &vmcb12->control);43584358+ if (svm_allocate_nested(svm))43594359+ goto unmap_save;4364436043654365- ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);43664366- kvm_vcpu_unmap(vcpu, &map, true);43614361+ /*43624362+ * Restore L1 host state from L1 HSAVE area as VMCB01 was43634363+ * used during SMM (see svm_enter_smm())43644364+ */4367436543684368- /*43694369- * Restore L1 host state from L1 HSAVE area as VMCB01 was43704370- * used during SMM (see svm_enter_smm())43714371- */43724372- if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),43734373- &map_save) == -EINVAL)43744374- return 1;43664366+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);4375436743764376- svm_copy_vmrun_state(&svm->vmcb01.ptr->save,43774377- map_save.hva + 0x400);43684368+ /*43694369+ * Enter the nested guest now43704370+ */4378437143794379- kvm_vcpu_unmap(vcpu, &map_save, true);43804380- }43814381- }43724372+ vmcb12 = map.hva;43734373+ nested_load_control_from_vmcb12(svm, &vmcb12->control);43744374+ ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);4382437543764376+unmap_save:43774377+ kvm_vcpu_unmap(vcpu, &map_save, true);43784378+unmap_map:43794379+ kvm_vcpu_unmap(vcpu, &map, true);43834380 return ret;43844381}43854382
···353353 switch (msr_index) {354354 case MSR_IA32_VMX_EXIT_CTLS:355355 case MSR_IA32_VMX_TRUE_EXIT_CTLS:356356- ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;356356+ ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;357357 break;358358 case MSR_IA32_VMX_ENTRY_CTLS:359359 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:360360- ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;360360+ ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;361361 break;362362 case MSR_IA32_VMX_PROCBASED_CTLS2:363363- ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;363363+ ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;364364+ break;365365+ case MSR_IA32_VMX_PINBASED_CTLS:366366+ ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;367367+ break;368368+ case MSR_IA32_VMX_VMFUNC:369369+ ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;364370 break;365371 }366372
+15-9
arch/x86/kvm/vmx/nested.c
···25832583 * Guest state is invalid and unrestricted guest is disabled,25842584 * which means L1 attempted VMEntry to L2 with invalid state.25852585 * Fail the VMEntry.25862586+ *25872587+ * However when force loading the guest state (SMM exit or25882588+ * loading nested state after migration, it is possible to25892589+ * have invalid guest state now, which will be later fixed by25902590+ * restoring L2 register state25862591 */25872587- if (CC(!vmx_guest_state_valid(vcpu))) {25922592+ if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {25882593 *entry_failure_code = ENTRY_FAIL_DEFAULT;25892594 return -EINVAL;25902595 }···43564351 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,43574352 vmcs12->vm_exit_msr_load_count))43584353 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);43544354+43554355+ to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);43594356}4360435743614358static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)···49064899 return -ENOMEM;49074900}4908490149094909-/*49104910- * Emulate the VMXON instruction.49114911- * Currently, we just remember that VMX is active, and do not save or even49124912- * inspect the argument to VMXON (the so-called "VMXON pointer") because we49134913- * do not currently need to store anything in that guest-allocated memory49144914- * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their49154915- * argument is different from the VMXON pointer (which the spec says they do).49164916- */49024902+/* Emulate the VMXON instruction. */49174903static int handle_vmon(struct kvm_vcpu *vcpu)49184904{49194905 int ret;···59025902 return true;59035903 case EXIT_REASON_VMFUNC:59045904 /* VM functions are emulated through L2->L0 vmexits. */59055905+ return true;59065906+ case EXIT_REASON_BUS_LOCK:59075907+ /*59085908+ * At present, bus lock VM exit is never exposed to L1.59095909+ * Handle L2's bus locks in L0 directly.59105910+ */59055911 return true;59065912 default:59075913 break;
+27-12
arch/x86/kvm/vmx/vmx.c
···13231323 vmx_prepare_switch_to_host(to_vmx(vcpu));13241324}1325132513261326-static bool emulation_required(struct kvm_vcpu *vcpu)13261326+bool vmx_emulation_required(struct kvm_vcpu *vcpu)13271327{13281328 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);13291329}···13671367 vmcs_writel(GUEST_RFLAGS, rflags);1368136813691369 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)13701370- vmx->emulation_required = emulation_required(vcpu);13701370+ vmx->emulation_required = vmx_emulation_required(vcpu);13711371}1372137213731373u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)···18371837 &msr_info->data))18381838 return 1;18391839 /*18401840- * Enlightened VMCS v1 doesn't have certain fields, but buggy18411841- * Hyper-V versions are still trying to use corresponding18421842- * features when they are exposed. Filter out the essential18431843- * minimum.18401840+ * Enlightened VMCS v1 doesn't have certain VMCS fields but18411841+ * instead of just ignoring the features, different Hyper-V18421842+ * versions are either trying to use them and fail or do some18431843+ * sanity checking and refuse to boot. Filter all unsupported18441844+ * features out.18441845 */18451846 if (!msr_info->host_initiated &&18461847 vmx->nested.enlightened_vmcs_enabled)···30783077 }3079307830803079 /* depends on vcpu->arch.cr0 to be set to a new value */30813081- vmx->emulation_required = emulation_required(vcpu);30803080+ vmx->emulation_required = vmx_emulation_required(vcpu);30823081}3083308230843083static int vmx_get_max_tdp_level(void)···33313330{33323331 __vmx_set_segment(vcpu, var, seg);3333333233343334- to_vmx(vcpu)->emulation_required = emulation_required(vcpu);33333333+ to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);33353334}3336333533373336static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)···66226621 vmx->loaded_vmcs->soft_vnmi_blocked))66236622 vmx->loaded_vmcs->entry_time = ktime_get();6624662366256625- /* Don't enter VMX if guest state is invalid, let the exit handler66266626- start emulation until we arrive back to a valid state */66276627- if (vmx->emulation_required)66246624+ /*66256625+ * Don't enter VMX if guest state is invalid, let the exit handler66266626+ * start emulation until we arrive back to a valid state. Synthesize a66276627+ * consistency check VM-Exit due to invalid guest state and bail.66286628+ */66296629+ if (unlikely(vmx->emulation_required)) {66306630+66316631+ /* We don't emulate invalid state of a nested guest */66326632+ vmx->fail = is_guest_mode(vcpu);66336633+66346634+ vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;66356635+ vmx->exit_reason.failed_vmentry = 1;66366636+ kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);66376637+ vmx->exit_qualification = ENTRY_FAIL_DEFAULT;66386638+ kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);66396639+ vmx->exit_intr_info = 0;66286640 return EXIT_FASTPATH_NONE;66416641+ }6629664266306643 trace_kvm_entry(vcpu);66316644···68486833 */68496834 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);68506835 if (tsx_ctrl)68516851- vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;68366836+ tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;68526837 }6853683868546839 err = alloc_loaded_vmcs(&vmx->vmcs01);
+1-4
arch/x86/kvm/vmx/vmx.h
···248248 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside249249 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to250250 * be loaded into hardware if those conditions aren't met.251251- * nr_active_uret_msrs tracks the number of MSRs that need to be loaded252252- * into hardware when running the guest. guest_uret_msrs[] is resorted253253- * whenever the number of "active" uret MSRs is modified.254251 */255252 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];256256- int nr_active_uret_msrs;257253 bool guest_uret_msrs_loaded;258254#ifdef CONFIG_X86_64259255 u64 msr_host_kernel_gs_base;···355359void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,356360 unsigned long fs_base, unsigned long gs_base);357361int vmx_get_cpl(struct kvm_vcpu *vcpu);362362+bool vmx_emulation_required(struct kvm_vcpu *vcpu);358363unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);359364void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);360365u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
+26-2
arch/x86/kvm/x86.c
···13321332 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,13331333 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,13341334 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,13351335+13361336+ MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,13371337+ MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,13381338+ MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,13391339+ MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,13401340+ MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,13411341+ MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,13351342};1336134313371344static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];···29762969 offsetof(struct compat_vcpu_info, time));29772970 if (vcpu->xen.vcpu_time_info_set)29782971 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);29792979- if (v == kvm_get_vcpu(v->kvm, 0))29722972+ if (!v->vcpu_idx)29802973 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);29812974 return 0;29822975}···7665765876667659 /* Process a latched INIT or SMI, if any. */76677660 kvm_make_request(KVM_REQ_EVENT, vcpu);76617661+76627662+ /*76637663+ * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,76647664+ * on SMM exit we still need to reload them from76657665+ * guest memory76667666+ */76677667+ vcpu->arch.pdptrs_from_userspace = false;76687668 }7669766976707670 kvm_mmu_reset_context(vcpu);···1066610652 int r;10667106531066810654 vcpu->arch.last_vmentry_cpu = -1;1065510655+ vcpu->arch.regs_avail = ~0;1065610656+ vcpu->arch.regs_dirty = ~0;10669106571067010658 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))1067110659 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;···10908108921090910893 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);1091010894 kvm_rip_write(vcpu, 0xfff0);1089510895+1089610896+ vcpu->arch.cr3 = 0;1089710897+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);10911108981091210899 /*1091310900 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions···11158111391115911140int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)1116011141{1114211142+ int ret;1114311143+1116111144 if (type)1116211145 return -EINVAL;1114611146+1114711147+ ret = kvm_page_track_init(kvm);1114811148+ if (ret)1114911149+ return ret;11163111501116411151 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);1116511152 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);···11199111741120011175 kvm_apicv_init(kvm);1120111176 kvm_hv_init_vm(kvm);1120211202- kvm_page_track_init(kvm);1120311177 kvm_mmu_init_vm(kvm);1120411178 kvm_xen_init_vm(kvm);1120511179
+2-2
arch/x86/lib/insn.c
···3737 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)38383939#define __get_next(t, insn) \4040- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })4040+ ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })41414242#define __peek_nbyte_next(t, insn, n) \4343- ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })4343+ ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })44444545#define get_next(t, insn) \4646 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+18-8
arch/x86/mm/fault.c
···710710711711static noinline void712712kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,713713- unsigned long address, int signal, int si_code)713713+ unsigned long address, int signal, int si_code,714714+ u32 pkey)714715{715716 WARN_ON_ONCE(user_mode(regs));716717···736735737736 set_signal_archinfo(address, error_code);738737739739- /* XXX: hwpoison faults will set the wrong code. */740740- force_sig_fault(signal, si_code, (void __user *)address);738738+ if (si_code == SEGV_PKUERR) {739739+ force_sig_pkuerr((void __user *)address, pkey);740740+ } else {741741+ /* XXX: hwpoison faults will set the wrong code. */742742+ force_sig_fault(signal, si_code, (void __user *)address);743743+ }741744 }742745743746 /*···803798 struct task_struct *tsk = current;804799805800 if (!user_mode(regs)) {806806- kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);801801+ kernelmode_fixup_or_oops(regs, error_code, address,802802+ SIGSEGV, si_code, pkey);807803 return;808804 }809805···936930{937931 /* Kernel mode? Handle exceptions or die: */938932 if (!user_mode(regs)) {939939- kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);933933+ kernelmode_fixup_or_oops(regs, error_code, address,934934+ SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);940935 return;941936 }942937···14031396 */14041397 if (!user_mode(regs))14051398 kernelmode_fixup_or_oops(regs, error_code, address,14061406- SIGBUS, BUS_ADRERR);13991399+ SIGBUS, BUS_ADRERR,14001400+ ARCH_DEFAULT_PKEY);14071401 return;14081402 }14091403···14241416 return;1425141714261418 if (fatal_signal_pending(current) && !user_mode(regs)) {14271427- kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);14191419+ kernelmode_fixup_or_oops(regs, error_code, address,14201420+ 0, 0, ARCH_DEFAULT_PKEY);14281421 return;14291422 }14301423···14331424 /* Kernel mode? Handle exceptions or die: */14341425 if (!user_mode(regs)) {14351426 kernelmode_fixup_or_oops(regs, error_code, address,14361436- SIGSEGV, SEGV_MAPERR);14271427+ SIGSEGV, SEGV_MAPERR,14281428+ ARCH_DEFAULT_PKEY);14371429 return;14381430 }14391431
+48-18
arch/x86/net/bpf_jit_comp.c
···13411341 if (insn->imm == (BPF_AND | BPF_FETCH) ||13421342 insn->imm == (BPF_OR | BPF_FETCH) ||13431343 insn->imm == (BPF_XOR | BPF_FETCH)) {13441344- u8 *branch_target;13451344 bool is64 = BPF_SIZE(insn->code) == BPF_DW;13461345 u32 real_src_reg = src_reg;13461346+ u32 real_dst_reg = dst_reg;13471347+ u8 *branch_target;1347134813481349 /*13491350 * Can't be implemented with a single x86 insn.···13551354 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);13561355 if (src_reg == BPF_REG_0)13571356 real_src_reg = BPF_REG_AX;13571357+ if (dst_reg == BPF_REG_0)13581358+ real_dst_reg = BPF_REG_AX;1358135913591360 branch_target = prog;13601361 /* Load old value */13611362 emit_ldx(&prog, BPF_SIZE(insn->code),13621362- BPF_REG_0, dst_reg, insn->off);13631363+ BPF_REG_0, real_dst_reg, insn->off);13631364 /*13641365 * Perform the (commutative) operation locally,13651366 * put the result in the AUX_REG.···13721369 add_2reg(0xC0, AUX_REG, real_src_reg));13731370 /* Attempt to swap in new value */13741371 err = emit_atomic(&prog, BPF_CMPXCHG,13751375- dst_reg, AUX_REG, insn->off,13721372+ real_dst_reg, AUX_REG,13731373+ insn->off,13761374 BPF_SIZE(insn->code));13771375 if (WARN_ON(err))13781376 return err;···13871383 /* Restore R0 after clobbering RAX */13881384 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);13891385 break;13901390-13911386 }1392138713931388 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,13941394- insn->off, BPF_SIZE(insn->code));13891389+ insn->off, BPF_SIZE(insn->code));13951390 if (err)13961391 return err;13971392 break;···17471744}1748174517491746static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,17501750- struct bpf_prog *p, int stack_size, bool mod_ret)17471747+ struct bpf_prog *p, int stack_size, bool save_ret)17511748{17521749 u8 *prog = *pprog;17531750 u8 *jmp_insn;···17801777 if (emit_call(&prog, p->bpf_func, prog))17811778 return -EINVAL;1782177917831783- /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return17801780+ /*17811781+ * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return17841782 * of the previous call which is then passed on the stack to17851783 * the next BPF program.17841784+ *17851785+ * BPF_TRAMP_FENTRY trampoline may need to return the return17861786+ * value of BPF_PROG_TYPE_STRUCT_OPS prog.17861787 */17871787- if (mod_ret)17881788+ if (save_ret)17881789 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);1789179017901791 /* replace 2 nops with JE insn, since jmp target is known */···18351828}1836182918371830static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,18381838- struct bpf_tramp_progs *tp, int stack_size)18311831+ struct bpf_tramp_progs *tp, int stack_size,18321832+ bool save_ret)18391833{18401834 int i;18411835 u8 *prog = *pprog;1842183618431837 for (i = 0; i < tp->nr_progs; i++) {18441844- if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))18381838+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,18391839+ save_ret))18451840 return -EINVAL;18461841 }18471842 *pprog = prog;···1884187518851876 *pprog = prog;18861877 return 0;18781878+}18791879+18801880+static bool is_valid_bpf_tramp_flags(unsigned int flags)18811881+{18821882+ if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&18831883+ (flags & BPF_TRAMP_F_SKIP_FRAME))18841884+ return false;18851885+18861886+ /*18871887+ * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,18881888+ * and it must be used alone.18891889+ */18901890+ if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&18911891+ (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))18921892+ return false;18931893+18941894+ return true;18871895}1888189618891897/* Example:···19751949 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];19761950 u8 **branches = NULL;19771951 u8 *prog;19521952+ bool save_ret;1978195319791954 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */19801955 if (nr_args > 6)19811956 return -ENOTSUPP;1982195719831983- if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&19841984- (flags & BPF_TRAMP_F_SKIP_FRAME))19581958+ if (!is_valid_bpf_tramp_flags(flags))19851959 return -EINVAL;1986196019871987- if (flags & BPF_TRAMP_F_CALL_ORIG)19881988- stack_size += 8; /* room for return value of orig_call */19611961+ /* room for return value of orig_call or fentry prog */19621962+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);19631963+ if (save_ret)19641964+ stack_size += 8;1989196519901966 if (flags & BPF_TRAMP_F_IP_ARG)19911967 stack_size += 8; /* room for IP address argument */···20332005 }2034200620352007 if (fentry->nr_progs)20362036- if (invoke_bpf(m, &prog, fentry, stack_size))20082008+ if (invoke_bpf(m, &prog, fentry, stack_size,20092009+ flags & BPF_TRAMP_F_RET_FENTRY_RET))20372010 return -EINVAL;2038201120392012 if (fmod_ret->nr_progs) {···20812052 }2082205320832054 if (fexit->nr_progs)20842084- if (invoke_bpf(m, &prog, fexit, stack_size)) {20552055+ if (invoke_bpf(m, &prog, fexit, stack_size, false)) {20852056 ret = -EINVAL;20862057 goto cleanup;20872058 }···21012072 ret = -EINVAL;21022073 goto cleanup;21032074 }21042104- /* restore original return value back into RAX */21052105- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);21062075 }20762076+ /* restore return value of orig_call or fentry prog back into RAX */20772077+ if (save_ret)20782078+ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);2107207921082080 EMIT1(0x5B); /* pop rbx */21092081 EMIT1(0xC9); /* leave */
+9-6
arch/x86/xen/enlighten_pv.c
···755755 preempt_enable();756756}757757758758-static void xen_convert_trap_info(const struct desc_ptr *desc,759759- struct trap_info *traps)758758+static unsigned xen_convert_trap_info(const struct desc_ptr *desc,759759+ struct trap_info *traps, bool full)760760{761761 unsigned in, out, count;762762···766766 for (in = out = 0; in < count; in++) {767767 gate_desc *entry = (gate_desc *)(desc->address) + in;768768769769- if (cvt_gate_to_trap(in, entry, &traps[out]))769769+ if (cvt_gate_to_trap(in, entry, &traps[out]) || full)770770 out++;771771 }772772- traps[out].address = 0;772772+773773+ return out;773774}774775775776void xen_copy_trap_info(struct trap_info *traps)776777{777778 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);778779779779- xen_convert_trap_info(desc, traps);780780+ xen_convert_trap_info(desc, traps, true);780781}781782782783/* Load a new IDT into Xen. In principle this can be per-CPU, so we···787786{788787 static DEFINE_SPINLOCK(lock);789788 static struct trap_info traps[257];789789+ unsigned out;790790791791 trace_xen_cpu_load_idt(desc);792792···795793796794 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));797795798798- xen_convert_trap_info(desc, traps);796796+ out = xen_convert_trap_info(desc, traps, false);797797+ memset(&traps[out], 0, sizeof(traps[0]));799798800799 xen_mc_flush();801800 if (HYPERVISOR_set_trap_table(traps))
+2-2
arch/x86/xen/pci-swiotlb-xen.c
···1818#endif1919#include <linux/export.h>20202121-int xen_swiotlb __read_mostly;2121+static int xen_swiotlb __read_mostly;22222323/*2424 * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary···5656 return xen_swiotlb;5757}58585959-void __init pci_xen_swiotlb_init(void)5959+static void __init pci_xen_swiotlb_init(void)6060{6161 if (xen_swiotlb) {6262 xen_swiotlb_init_early();
-4
arch/x86/xen/smp_pv.c
···290290291291 gdt = get_cpu_gdt_rw(cpu);292292293293- memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));294294-295293 /*296294 * Bring up the CPU in cpu_bringup_and_idle() with the stack297295 * pointing just below where pt_regs would be if it were a normal···305307 ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);306308307309 xen_copy_trap_info(ctxt->trap_ctxt);308308-309309- ctxt->ldt_ents = 0;310310311311 BUG_ON((unsigned long)gdt & ~PAGE_MASK);312312
+3-13
block/bfq-iosched.c
···26622662 * are likely to increase the throughput.26632663 */26642664 bfqq->new_bfqq = new_bfqq;26652665- /*26662666- * The above assignment schedules the following redirections:26672667- * each time some I/O for bfqq arrives, the process that26682668- * generated that I/O is disassociated from bfqq and26692669- * associated with new_bfqq. Here we increases new_bfqq->ref26702670- * in advance, adding the number of processes that are26712671- * expected to be associated with new_bfqq as they happen to26722672- * issue I/O.26732673- */26742665 new_bfqq->ref += process_refs;26752666 return new_bfqq;26762667}···27232732 void *io_struct, bool request, struct bfq_io_cq *bic)27242733{27252734 struct bfq_queue *in_service_bfqq, *new_bfqq;27262726-27272727- /* if a merge has already been setup, then proceed with that first */27282728- if (bfqq->new_bfqq)27292729- return bfqq->new_bfqq;2730273527312736 /*27322737 * Check delayed stable merge for rotational or non-queueing···28242837 */28252838 if (bfq_too_late_for_merging(bfqq))28262839 return NULL;28402840+28412841+ if (bfqq->new_bfqq)28422842+ return bfqq->new_bfqq;2827284328282844 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))28292845 return NULL;
+1-1
block/bio.c
···14661466 if (!bio_integrity_endio(bio))14671467 return;1468146814691469- if (bio->bi_bdev)14691469+ if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))14701470 rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);1471147114721472 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
···1414#include <linux/task_io_accounting_ops.h>1515#include <linux/falloc.h>1616#include <linux/suspend.h>1717+#include <linux/fs.h>1718#include "blk.h"18191920static struct inode *bdev_file_inode(struct file *file)···554553static long blkdev_fallocate(struct file *file, int mode, loff_t start,555554 loff_t len)556555{557557- struct block_device *bdev = I_BDEV(bdev_file_inode(file));556556+ struct inode *inode = bdev_file_inode(file);557557+ struct block_device *bdev = I_BDEV(inode);558558 loff_t end = start + len - 1;559559 loff_t isize;560560 int error;···582580 if ((start | len) & (bdev_logical_block_size(bdev) - 1))583581 return -EINVAL;584582583583+ filemap_invalidate_lock(inode->i_mapping);584584+585585 /* Invalidate the page cache, including dirty pages. */586586 error = truncate_bdev_range(bdev, file->f_mode, start, end);587587 if (error)588588- return error;588588+ goto fail;589589590590 switch (mode) {591591 case FALLOC_FL_ZERO_RANGE:···604600 GFP_KERNEL, 0);605601 break;606602 default:607607- return -EOPNOTSUPP;603603+ error = -EOPNOTSUPP;608604 }609609- if (error)610610- return error;611605612612- /*613613- * Invalidate the page cache again; if someone wandered in and dirtied614614- * a page, we just discard it - userspace has no way of knowing whether615615- * the write happened before or after discard completing...616616- */617617- return truncate_bdev_range(bdev, file->f_mode, start, end);606606+ fail:607607+ filemap_invalidate_unlock(inode->i_mapping);608608+ return error;618609}619610620611const struct file_operations def_blk_fops = {
+12
drivers/acpi/nfit/core.c
···30073007 ndr_desc->target_node = NUMA_NO_NODE;30083008 }3009300930103010+ /* Fallback to address based numa information if node lookup failed */30113011+ if (ndr_desc->numa_node == NUMA_NO_NODE) {30123012+ ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);30133013+ dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",30143014+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);30153015+ }30163016+ if (ndr_desc->target_node == NUMA_NO_NODE) {30173017+ ndr_desc->target_node = phys_to_target_node(spa->address);30183018+ dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",30193019+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);30203020+ }30213021+30103022 /*30113023 * Persistence domain bits are hierarchical, if30123024 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
+7-16
drivers/acpi/osl.c
···284284#define should_use_kmap(pfn) page_is_ram(pfn)285285#endif286286287287-static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,288288- bool memory)287287+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)289288{290289 unsigned long pfn;291290···294295 return NULL;295296 return (void __iomem __force *)kmap(pfn_to_page(pfn));296297 } else297297- return memory ? acpi_os_memmap(pg_off, pg_sz) :298298- acpi_os_ioremap(pg_off, pg_sz);298298+ return acpi_os_ioremap(pg_off, pg_sz);299299}300300301301static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)···309311}310312311313/**312312- * __acpi_os_map_iomem - Get a virtual address for a given physical address range.314314+ * acpi_os_map_iomem - Get a virtual address for a given physical address range.313315 * @phys: Start of the physical address range to map.314316 * @size: Size of the physical address range to map.315315- * @memory: true if remapping memory, false if IO316317 *317318 * Look up the given physical address range in the list of existing ACPI memory318319 * mappings. If found, get a reference to it and return a pointer to it (its···321324 * During early init (when acpi_permanent_mmap has not been set yet) this322325 * routine simply calls __acpi_map_table() to get the job done.323326 */324324-static void __iomem __ref325325-*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)327327+void __iomem __ref328328+*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)326329{327330 struct acpi_ioremap *map;328331 void __iomem *virt;···353356354357 pg_off = round_down(phys, PAGE_SIZE);355358 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;356356- virt = acpi_map(phys, size, memory);359359+ virt = acpi_map(phys, size);357360 if (!virt) {358361 mutex_unlock(&acpi_ioremap_lock);359362 kfree(map);···372375 mutex_unlock(&acpi_ioremap_lock);373376 return map->virt + (phys - map->phys);374377}375375-376376-void __iomem *__ref377377-acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)378378-{379379- return __acpi_os_map_iomem(phys, size, false);380380-}381378EXPORT_SYMBOL_GPL(acpi_os_map_iomem);382379383380void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)384381{385385- return (void *)__acpi_os_map_iomem(phys, size, true);382382+ return (void *)acpi_os_map_iomem(phys, size);386383}387384EXPORT_SYMBOL_GPL(acpi_os_map_memory);388385
+46-12
drivers/android/binder.c
···18521852}1853185318541854static void binder_transaction_buffer_release(struct binder_proc *proc,18551855+ struct binder_thread *thread,18551856 struct binder_buffer *buffer,18561857 binder_size_t failed_at,18571858 bool is_failure)···20122011 &proc->alloc, &fd, buffer,20132012 offset, sizeof(fd));20142013 WARN_ON(err);20152015- if (!err)20142014+ if (!err) {20162015 binder_deferred_fd_close(fd);20162016+ /*20172017+ * Need to make sure the thread goes20182018+ * back to userspace to complete the20192019+ * deferred close20202020+ */20212021+ if (thread)20222022+ thread->looper_need_return = true;20232023+ }20172024 }20182025 } break;20192026 default:···30473038 if (reply) {30483039 binder_enqueue_thread_work(thread, tcomplete);30493040 binder_inner_proc_lock(target_proc);30503050- if (target_thread->is_dead || target_proc->is_frozen) {30513051- return_error = target_thread->is_dead ?30523052- BR_DEAD_REPLY : BR_FROZEN_REPLY;30413041+ if (target_thread->is_dead) {30423042+ return_error = BR_DEAD_REPLY;30533043 binder_inner_proc_unlock(target_proc);30543044 goto err_dead_proc_or_thread;30553045 }···31133105err_copy_data_failed:31143106 binder_free_txn_fixups(t);31153107 trace_binder_transaction_failed_buffer_release(t->buffer);31163116- binder_transaction_buffer_release(target_proc, t->buffer,31083108+ binder_transaction_buffer_release(target_proc, NULL, t->buffer,31173109 buffer_offset, true);31183110 if (target_node)31193111 binder_dec_node_tmpref(target_node);···31923184 * Cleanup buffer and free it.31933185 */31943186static void31953195-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)31873187+binder_free_buf(struct binder_proc *proc,31883188+ struct binder_thread *thread,31893189+ struct binder_buffer *buffer)31963190{31973191 binder_inner_proc_lock(proc);31983192 if (buffer->transaction) {···32223212 binder_node_inner_unlock(buf_node);32233213 }32243214 trace_binder_transaction_buffer_release(buffer);32253225- binder_transaction_buffer_release(proc, buffer, 0, false);32153215+ binder_transaction_buffer_release(proc, thread, buffer, 0, false);32263216 binder_alloc_free_buf(&proc->alloc, buffer);32273217}32283218···34243414 proc->pid, thread->pid, (u64)data_ptr,34253415 buffer->debug_id,34263416 buffer->transaction ? "active" : "finished");34273427- binder_free_buf(proc, buffer);34173417+ binder_free_buf(proc, thread, buffer);34283418 break;34293419 }34303420···41174107 buffer->transaction = NULL;41184108 binder_cleanup_transaction(t, "fd fixups failed",41194109 BR_FAILED_REPLY);41204120- binder_free_buf(proc, buffer);41104110+ binder_free_buf(proc, thread, buffer);41214111 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,41224112 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",41234113 proc->pid, thread->pid,···46584648 return 0;46594649}4660465046514651+static bool binder_txns_pending_ilocked(struct binder_proc *proc)46524652+{46534653+ struct rb_node *n;46544654+ struct binder_thread *thread;46554655+46564656+ if (proc->outstanding_txns > 0)46574657+ return true;46584658+46594659+ for (n = rb_first(&proc->threads); n; n = rb_next(n)) {46604660+ thread = rb_entry(n, struct binder_thread, rb_node);46614661+ if (thread->transaction_stack)46624662+ return true;46634663+ }46644664+ return false;46654665+}46664666+46614667static int binder_ioctl_freeze(struct binder_freeze_info *info,46624668 struct binder_proc *target_proc)46634669{···47054679 (!target_proc->outstanding_txns),47064680 msecs_to_jiffies(info->timeout_ms));4707468147084708- if (!ret && target_proc->outstanding_txns)47094709- ret = -EAGAIN;46824682+ /* Check pending transactions that wait for reply */46834683+ if (ret >= 0) {46844684+ binder_inner_proc_lock(target_proc);46854685+ if (binder_txns_pending_ilocked(target_proc))46864686+ ret = -EAGAIN;46874687+ binder_inner_proc_unlock(target_proc);46884688+ }4710468947114690 if (ret < 0) {47124691 binder_inner_proc_lock(target_proc);···47274696{47284697 struct binder_proc *target_proc;47294698 bool found = false;46994699+ __u32 txns_pending;4730470047314701 info->sync_recv = 0;47324702 info->async_recv = 0;···47374705 if (target_proc->pid == info->pid) {47384706 found = true;47394707 binder_inner_proc_lock(target_proc);47404740- info->sync_recv |= target_proc->sync_recv;47084708+ txns_pending = binder_txns_pending_ilocked(target_proc);47094709+ info->sync_recv |= target_proc->sync_recv |47104710+ (txns_pending << 1);47414711 info->async_recv |= target_proc->async_recv;47424712 binder_inner_proc_unlock(target_proc);47434713 }
+2
drivers/android/binder_internal.h
···378378 * binder transactions379379 * (protected by @inner_lock)380380 * @sync_recv: process received sync transactions since last frozen381381+ * bit 0: received sync transaction after being frozen382382+ * bit 1: new pending sync transaction during freezing381383 * (protected by @inner_lock)382384 * @async_recv: process received async transactions since last frozen383385 * (protected by @inner_lock)
+63-27
drivers/base/core.c
···95959696 list_add(&link->s_hook, &sup->consumers);9797 list_add(&link->c_hook, &con->suppliers);9898+ pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",9999+ con, sup);98100out:99101 mutex_unlock(&fwnode_link_lock);100102101103 return ret;104104+}105105+106106+/**107107+ * __fwnode_link_del - Delete a link between two fwnode_handles.108108+ * @link: the fwnode_link to be deleted109109+ *110110+ * The fwnode_link_lock needs to be held when this function is called.111111+ */112112+static void __fwnode_link_del(struct fwnode_link *link)113113+{114114+ pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",115115+ link->consumer, link->supplier);116116+ list_del(&link->s_hook);117117+ list_del(&link->c_hook);118118+ kfree(link);102119}103120104121/**···129112 struct fwnode_link *link, *tmp;130113131114 mutex_lock(&fwnode_link_lock);132132- list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {133133- list_del(&link->s_hook);134134- list_del(&link->c_hook);135135- kfree(link);136136- }115115+ list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)116116+ __fwnode_link_del(link);137117 mutex_unlock(&fwnode_link_lock);138118}139119···145131 struct fwnode_link *link, *tmp;146132147133 mutex_lock(&fwnode_link_lock);148148- list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {149149- list_del(&link->s_hook);150150- list_del(&link->c_hook);151151- kfree(link);152152- }134134+ list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)135135+ __fwnode_link_del(link);153136 mutex_unlock(&fwnode_link_lock);154137}155138···986975{987976 struct device_link *link;988977 int ret = 0;978978+ struct fwnode_handle *sup_fw;989979990980 /*991981 * Device waiting for supplier to become available is not allowed to···995983 mutex_lock(&fwnode_link_lock);996984 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&997985 !fw_devlink_is_permissive()) {998998- dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",999999- list_first_entry(&dev->fwnode->suppliers,10001000- struct fwnode_link,10011001- c_hook)->supplier);986986+ sup_fw = list_first_entry(&dev->fwnode->suppliers,987987+ struct fwnode_link,988988+ c_hook)->supplier;989989+ dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",990990+ sup_fw);1002991 mutex_unlock(&fwnode_link_lock);1003992 return -EPROBE_DEFER;1004993 }···10141001 if (link->status != DL_STATE_AVAILABLE &&10151002 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {10161003 device_links_missing_supplier(dev);10171017- dev_dbg(dev, "probe deferral - supplier %s not ready\n",10181018- dev_name(link->supplier));10041004+ dev_err_probe(dev, -EPROBE_DEFER,10051005+ "supplier %s not ready\n",10061006+ dev_name(link->supplier));10191007 ret = -EPROBE_DEFER;10201008 break;10211009 }···17361722 struct device *sup_dev;17371723 int ret = 0;1738172417251725+ /*17261726+ * In some cases, a device P might also be a supplier to its child node17271727+ * C. However, this would defer the probe of C until the probe of P17281728+ * completes successfully. This is perfectly fine in the device driver17291729+ * model. device_add() doesn't guarantee probe completion of the device17301730+ * by the time it returns.17311731+ *17321732+ * However, there are a few drivers that assume C will finish probing17331733+ * as soon as it's added and before P finishes probing. So, we provide17341734+ * a flag to let fw_devlink know not to delay the probe of C until the17351735+ * probe of P completes successfully.17361736+ *17371737+ * When such a flag is set, we can't create device links where P is the17381738+ * supplier of C as that would delay the probe of C.17391739+ */17401740+ if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&17411741+ fwnode_is_ancestor_of(sup_handle, con->fwnode))17421742+ return -EINVAL;17431743+17391744 sup_dev = get_dev_from_fwnode(sup_handle);17401745 if (sup_dev) {17411746 /*···18051772 * be broken by applying logic. Check for these types of cycles and18061773 * break them so that devices in the cycle probe properly.18071774 *18081808- * If the supplier's parent is dependent on the consumer, then18091809- * the consumer-supplier dependency is a false dependency. So,18101810- * treat it as an invalid link.17751775+ * If the supplier's parent is dependent on the consumer, then the17761776+ * consumer and supplier have a cyclic dependency. Since fw_devlink17771777+ * can't tell which of the inferred dependencies are incorrect, don't17781778+ * enforce probe ordering between any of the devices in this cyclic17791779+ * dependency. Do this by relaxing all the fw_devlink device links in17801780+ * this cycle and by treating the fwnode link between the consumer and17811781+ * the supplier as an invalid dependency.18111782 */18121783 sup_dev = fwnode_get_next_parent_dev(sup_handle);18131784 if (sup_dev && device_is_dependent(con, sup_dev)) {18141814- dev_dbg(con, "Not linking to %pfwP - False link\n",18151815- sup_handle);17851785+ dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",17861786+ sup_handle, dev_name(sup_dev));17871787+ device_links_write_lock();17881788+ fw_devlink_relax_cycle(con, sup_dev);17891789+ device_links_write_unlock();18161790 ret = -EINVAL;18171791 } else {18181792 /*···18981858 if (!own_link || ret == -EAGAIN)18991859 continue;1900186019011901- list_del(&link->s_hook);19021902- list_del(&link->c_hook);19031903- kfree(link);18611861+ __fwnode_link_del(link);19041862 }19051863}19061864···19501912 if (!own_link || ret == -EAGAIN)19511913 continue;1952191419531953- list_del(&link->s_hook);19541954- list_del(&link->c_hook);19551955- kfree(link);19151915+ __fwnode_link_del(link);1956191619571917 /* If no device link was created, nothing more to do. */19581918 if (ret)
···204204205205config QCOM_SCM206206 tristate "Qcom SCM driver"207207- depends on ARM || ARM64207207+ depends on ARCH_QCOM || COMPILE_TEST208208 depends on HAVE_ARM_SMCCC209209 select RESET_CONTROLLER210210
+1-1
drivers/firmware/arm_scmi/Kconfig
···68686969config ARM_SCMI_TRANSPORT_VIRTIO7070 bool "SCMI transport based on VirtIO"7171- depends on VIRTIO7171+ depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL7272 select ARM_SCMI_HAVE_TRANSPORT7373 select ARM_SCMI_HAVE_MSG7474 help
+31-13
drivers/firmware/arm_scmi/virtio.c
···110110 if (vioch->is_rx) {111111 scmi_vio_feed_vq_rx(vioch, msg);112112 } else {113113- unsigned long flags;114114-115115- spin_lock_irqsave(&vioch->lock, flags);113113+ /* Here IRQs are assumed to be already disabled by the caller */114114+ spin_lock(&vioch->lock);116115 list_add(&msg->list, &vioch->free_list);117117- spin_unlock_irqrestore(&vioch->lock, flags);116116+ spin_unlock(&vioch->lock);118117 }119118}120119121120static void scmi_vio_complete_cb(struct virtqueue *vqueue)122121{123122 unsigned long ready_flags;124124- unsigned long flags;125123 unsigned int length;126124 struct scmi_vio_channel *vioch;127125 struct scmi_vio_msg *msg;···138140 goto unlock_ready_out;139141 }140142141141- spin_lock_irqsave(&vioch->lock, flags);143143+ /* IRQs already disabled here no need to irqsave */144144+ spin_lock(&vioch->lock);142145 if (cb_enabled) {143146 virtqueue_disable_cb(vqueue);144147 cb_enabled = false;···150151 goto unlock_out;151152 cb_enabled = true;152153 }153153- spin_unlock_irqrestore(&vioch->lock, flags);154154+ spin_unlock(&vioch->lock);154155155156 if (msg) {156157 msg->rx_len = length;···160161 scmi_finalize_message(vioch, msg);161162 }162163164164+ /*165165+ * Release ready_lock and re-enable IRQs between loop iterations166166+ * to allow virtio_chan_free() to possibly kick in and set the167167+ * flag vioch->ready to false even in between processing of168168+ * messages, so as to force outstanding messages to be ignored169169+ * when system is shutting down.170170+ */163171 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);164172 }165173166174unlock_out:167167- spin_unlock_irqrestore(&vioch->lock, flags);175175+ spin_unlock(&vioch->lock);168176unlock_ready_out:169177 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);170178}···390384 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];391385392386 /* Only one SCMI VirtiO device allowed */393393- if (scmi_vdev)394394- return -EINVAL;387387+ if (scmi_vdev) {388388+ dev_err(dev,389389+ "One SCMI Virtio device was already initialized: only one allowed.\n");390390+ return -EBUSY;391391+ }395392396393 have_vq_rx = scmi_vio_have_vq_rx(vdev);397394 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;···437428 }438429439430 vdev->priv = channels;440440- scmi_vdev = vdev;431431+ /* Ensure initialized scmi_vdev is visible */432432+ smp_store_mb(scmi_vdev, vdev);441433442434 return 0;443435}444436445437static void scmi_vio_remove(struct virtio_device *vdev)446438{439439+ /*440440+ * Once we get here, virtio_chan_free() will have already been called by441441+ * the SCMI core for any existing channel and, as a consequence, all the442442+ * virtio channels will have been already marked NOT ready, causing any443443+ * outstanding message on any vqueue to be ignored by complete_cb: now444444+ * we can just stop processing buffers and destroy the vqueues.445445+ */447446 vdev->config->reset(vdev);448447 vdev->config->del_vqs(vdev);449449- scmi_vdev = NULL;448448+ /* Ensure scmi_vdev is visible as NULL */449449+ smp_store_mb(scmi_vdev, NULL);450450}451451452452static int scmi_vio_validate(struct virtio_device *vdev)···494476 return register_virtio_driver(&virtio_scmi_driver);495477}496478497497-static void __exit virtio_scmi_exit(void)479479+static void virtio_scmi_exit(void)498480{499481 unregister_virtio_driver(&virtio_scmi_driver);500482}
+8-6
drivers/fpga/dfl.c
···10191019{10201020 unsigned int irq_base, nr_irqs;10211021 struct dfl_feature_info *finfo;10221022+ u8 revision = 0;10221023 int ret;10231023- u8 revision;10241024 u64 v;1025102510261026- v = readq(binfo->ioaddr + ofst);10271027- revision = FIELD_GET(DFH_REVISION, v);10261026+ if (fid != FEATURE_ID_AFU) {10271027+ v = readq(binfo->ioaddr + ofst);10281028+ revision = FIELD_GET(DFH_REVISION, v);1028102910291029- /* read feature size and id if inputs are invalid */10301030- size = size ? size : feature_size(v);10311031- fid = fid ? fid : feature_id(v);10301030+ /* read feature size and id if inputs are invalid */10311031+ size = size ? size : feature_size(v);10321032+ fid = fid ? fid : feature_id(v);10331033+ }1032103410331035 if (binfo->len - ofst < size)10341036 return -EINVAL;
+5-1
drivers/fpga/machxo2-spi.c
···225225 goto fail;226226227227 get_status(spi, &status);228228- if (test_bit(FAIL, &status))228228+ if (test_bit(FAIL, &status)) {229229+ ret = -EINVAL;229230 goto fail;231231+ }230232 dump_status_reg(&status);231233232234 spi_message_init(&msg);···315313 dump_status_reg(&status);316314 if (!test_bit(DONE, &status)) {317315 machxo2_cleanup(mgr);316316+ ret = -EINVAL;318317 goto fail;319318 }320319···338335 break;339336 if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {340337 machxo2_cleanup(mgr);338338+ ret = -EINVAL;341339 goto fail;342340 }343341 } while (1);
+1-1
drivers/gpio/gpio-aspeed-sgpio.c
···395395 reg = ioread32(bank_reg(data, bank, reg_irq_status));396396397397 for_each_set_bit(p, ®, 32)398398- generic_handle_domain_irq(gc->irq.domain, i * 32 + p);398398+ generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);399399 }400400401401 chained_irq_exit(ic, desc);
+2-9
drivers/gpio/gpio-pca953x.c
···468468 mutex_lock(&chip->i2c_lock);469469 ret = regmap_read(chip->regmap, inreg, ®_val);470470 mutex_unlock(&chip->i2c_lock);471471- if (ret < 0) {472472- /*473473- * NOTE:474474- * diagnostic already emitted; that's all we should475475- * do unless gpio_*_value_cansleep() calls become different476476- * from their nonsleeping siblings (and report faults).477477- */478478- return 0;479479- }471471+ if (ret < 0)472472+ return ret;480473481474 return !!(reg_val & bit);482475}
+24-2
drivers/gpio/gpio-rockchip.c
···141141 u32 data;142142143143 data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);144144- if (data & BIT(offset))144144+ if (data)145145 return GPIO_LINE_DIRECTION_OUT;146146147147 return GPIO_LINE_DIRECTION_IN;···195195 unsigned int cur_div_reg;196196 u64 div;197197198198- if (!IS_ERR(bank->db_clk)) {198198+ if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) {199199 div_debounce_support = true;200200 freq = clk_get_rate(bank->db_clk);201201 max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;···689689 struct device_node *pctlnp = of_get_parent(np);690690 struct pinctrl_dev *pctldev = NULL;691691 struct rockchip_pin_bank *bank = NULL;692692+ struct rockchip_pin_output_deferred *cfg;692693 static int gpio;693694 int id, ret;694695···717716 if (ret)718717 return ret;719718719719+ /*720720+ * Prevent clashes with a deferred output setting721721+ * being added right at this moment.722722+ */723723+ mutex_lock(&bank->deferred_lock);724724+720725 ret = rockchip_gpiolib_register(bank);721726 if (ret) {722727 clk_disable_unprepare(bank->clk);728728+ mutex_unlock(&bank->deferred_lock);723729 return ret;724730 }731731+732732+ while (!list_empty(&bank->deferred_output)) {733733+ cfg = list_first_entry(&bank->deferred_output,734734+ struct rockchip_pin_output_deferred, head);735735+ list_del(&cfg->head);736736+737737+ ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);738738+ if (ret)739739+ dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);740740+741741+ kfree(cfg);742742+ }743743+744744+ mutex_unlock(&bank->deferred_lock);725745726746 platform_set_drvdata(pdev, bank);727747 dev_info(dev, "probed %pOF\n", np);
···313313314314 ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);315315 if (ret)316316- gpiochip_free_own_desc(desc);316316+ dev_warn(chip->parent,317317+ "Failed to set debounce-timeout for pin 0x%04X, err %d\n",318318+ pin, ret);317319318318- return ret ? ERR_PTR(ret) : desc;320320+ return desc;319321}320322321323static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+31
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
···837837 return 0;838838}839839840840+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */841841+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)842842+{843843+ u64 micro_tile_mode;844844+845845+ /* Zero swizzle mode means linear */846846+ if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)847847+ return 0;848848+849849+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);850850+ switch (micro_tile_mode) {851851+ case 0: /* DISPLAY */852852+ case 3: /* RENDER */853853+ return 0;854854+ default:855855+ drm_dbg_kms(afb->base.dev,856856+ "Micro tile mode %llu not supported for scanout\n",857857+ micro_tile_mode);858858+ return -EINVAL;859859+ }860860+}861861+840862static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,841863 unsigned int *width, unsigned int *height)842864{···11251103 const struct drm_mode_fb_cmd2 *mode_cmd,11261104 struct drm_gem_object *obj)11271105{11061106+ struct amdgpu_device *adev = drm_to_adev(dev);11281107 int ret, i;1129110811301109 /*···11441121 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);11451122 if (ret)11461123 return ret;11241124+11251125+ if (!dev->mode_config.allow_fb_modifiers) {11261126+ drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,11271127+ "GFX9+ requires FB check based on format modifier\n");11281128+ ret = check_tiling_flags_gfx6(rfb);11291129+ if (ret)11301130+ return ret;11311131+ }1147113211481133 if (dev->mode_config.allow_fb_modifiers &&11491134 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
+1-1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
···3599359936003600 /* set static priority for a queue/ring */36013601 gfx_v9_0_mqd_set_priority(ring, mqd);36023602- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);36023602+ mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);3603360336043604 /* map_queues packet doesn't need activate the queue,36053605 * so only kiq need set this field.
+2-1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
···10981098{10991099 struct amdgpu_device *adev = (struct amdgpu_device *)handle;1100110011011101+ gmc_v10_0_gart_disable(adev);11021102+11011103 if (amdgpu_sriov_vf(adev)) {11021104 /* full access mode, so don't touch any GMC register */11031105 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");···1108110611091107 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);11101108 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);11111111- gmc_v10_0_gart_disable(adev);1112110911131110 return 0;11141111}
+2-1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
···17941794{17951795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;1796179617971797+ gmc_v9_0_gart_disable(adev);17981798+17971799 if (amdgpu_sriov_vf(adev)) {17981800 /* full access mode, so don't touch any GMC register */17991801 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");···1804180218051803 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);18061804 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);18071807- gmc_v9_0_gart_disable(adev);1808180518091806 return 0;18101807}
+8
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
···868868 msleep(1000);869869 }870870871871+ /* TODO: check whether can submit a doorbell request to raise872872+ * a doorbell fence to exit gfxoff.873873+ */874874+ if (adev->in_s0ix)875875+ amdgpu_gfx_off_ctrl(adev, false);876876+871877 sdma_v5_2_soft_reset(adev);872878 /* unhalt the MEs */873879 sdma_v5_2_enable(adev, true);···882876883877 /* start the gfx rings and rlc compute queues */884878 r = sdma_v5_2_gfx_resume(adev);879879+ if (adev->in_s0ix)880880+ amdgpu_gfx_off_ctrl(adev, true);885881 if (r)886882 return r;887883 r = sdma_v5_2_rlc_resume(adev);
···18261826 if (panel_mode == DP_PANEL_MODE_EDP) {18271827 struct cp_psp *cp_psp = &stream->ctx->cp_psp;1828182818291829- if (cp_psp && cp_psp->funcs.enable_assr) {18301830- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {18311831- /* since eDP implies ASSR on, change panel18321832- * mode to disable ASSR18331833- */18341834- panel_mode = DP_PANEL_MODE_DEFAULT;18351835- }18361836- }18291829+ if (cp_psp && cp_psp->funcs.enable_assr)18301830+ /* ASSR is bound to fail with unsigned PSP18311831+ * verstage used during devlopment phase.18321832+ * Report and continue with eDP panel mode to18331833+ * perform eDP link training with right settings18341834+ */18351835+ cp_psp->funcs.enable_assr(cp_psp->handle, link);18371836 }18381837#endif18391838
+2-2
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
···4242#define DC_LOGGER \4343 engine->ctx->logger44444545-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */4545+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)4646#define IS_DC_I2CAUX_LOGGING_ENABLED() (false)4747#define LOG_FLAG_Error_I2cAux LOG_ERROR4848#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX···7676#define DEFAULT_AUX_ENGINE_MULT 07777#define DEFAULT_AUX_ENGINE_LENGTH 6978787979-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */7979+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)80808181static void release_engine(8282 struct dce_aux *engine)
···805805 */806806void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)807807{808808+ int id;809809+808810 if (!HAS_DMC(dev_priv))809811 return;810812811813 intel_dmc_ucode_suspend(dev_priv);812814 drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);813815814814- kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload);816816+ for (id = 0; id < DMC_FW_MAX; id++)817817+ kfree(dev_priv->dmc.dmc_info[id].payload);815818}
+5-4
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
···356356{357357 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);358358359359- if (likely(obj)) {360360- /* This releases all gem object bindings to the backend. */359359+ if (likely(obj))361360 i915_ttm_free_cached_io_st(obj);362362- __i915_gem_free_object(obj);363363- }364361}365362366363static struct intel_memory_region *···872875{873876 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);874877878878+ /* This releases all gem object bindings to the backend. */879879+ __i915_gem_free_object(obj);880880+875881 i915_gem_object_release_memory_region(obj);876882 mutex_destroy(&obj->ttm.get_io_page.lock);883883+877884 if (obj->ttm.created)878885 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);879886}
+3-2
drivers/gpu/drm/i915/gt/intel_context.c
···362362 return 0;363363}364364365365-static int sw_fence_dummy_notify(struct i915_sw_fence *sf,366366- enum i915_sw_fence_notify state)365365+static int __i915_sw_fence_call366366+sw_fence_dummy_notify(struct i915_sw_fence *sf,367367+ enum i915_sw_fence_notify state)367368{368369 return NOTIFY_DONE;369370}
-2
drivers/gpu/drm/i915/gt/intel_rps.c
···882882 if (!intel_rps_is_enabled(rps))883883 return;884884885885- GEM_BUG_ON(atomic_read(&rps->num_waiters));886886-887885 if (!intel_rps_clear_active(rps))888886 return;889887
···576576577577 /* No one is going to touch shadow bb from now on. */578578 i915_gem_object_flush_map(bb->obj);579579- i915_gem_object_unlock(bb->obj);579579+ i915_gem_ww_ctx_fini(&ww);580580 }581581 }582582 return 0;···630630 return ret;631631 }632632633633- i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);633633+ i915_gem_ww_ctx_fini(&ww);634634635635 /* FIXME: we are not tracking our pinned VMA leaving it636636 * up to the core to fix up the stray pin_count upon
···255255 if (!privdata->cl_data)256256 return -ENOMEM;257257258258- rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);258258+ mp2_select_ops(privdata);259259+260260+ rc = amd_sfh_hid_client_init(privdata);259261 if (rc)260262 return rc;261263262262- mp2_select_ops(privdata);263263-264264- return amd_sfh_hid_client_init(privdata);264264+ return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);265265}266266267267static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
+7
drivers/hid/hid-apple.c
···336336337337/*338338 * MacBook JIS keyboard has wrong logical maximum339339+ * Magic Keyboard JIS has wrong logical maximum339340 */340341static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,341342 unsigned int *rsize)342343{343344 struct apple_sc *asc = hid_get_drvdata(hdev);345345+346346+ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {347347+ hid_info(hdev,348348+ "fixing up Magic Keyboard JIS report descriptor\n");349349+ rdesc[64] = rdesc[70] = 0xe7;350350+ }344351345352 if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&346353 rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+10-3
drivers/hid/hid-betopff.c
···5656{5757 struct betopff_device *betopff;5858 struct hid_report *report;5959- struct hid_input *hidinput =6060- list_first_entry(&hid->inputs, struct hid_input, list);5959+ struct hid_input *hidinput;6160 struct list_head *report_list =6261 &hid->report_enum[HID_OUTPUT_REPORT].report_list;6363- struct input_dev *dev = hidinput->input;6262+ struct input_dev *dev;6463 int field_count = 0;6564 int error;6665 int i, j;6666+6767+ if (list_empty(&hid->inputs)) {6868+ hid_err(hid, "no inputs found\n");6969+ return -ENODEV;7070+ }7171+7272+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);7373+ dev = hidinput->input;67746875 if (list_empty(report_list)) {6976 hid_err(hid, "no output reports found\n");
+3-1
drivers/hid/hid-u2fzero.c
···198198 }199199200200 ret = u2fzero_recv(dev, &req, &resp);201201- if (ret < 0)201201+202202+ /* ignore errors or packets without data */203203+ if (ret < offsetof(struct u2f_hid_msg, init.data))202204 return 0;203205204206 /* only take the minimum amount of data it is safe to take */
···989989 return ret;990990991991 /* check external clock presence */992992- extclk = devm_clk_get(st->dev, NULL);993993- if (!IS_ERR(extclk)) {992992+ extclk = devm_clk_get_optional(st->dev, NULL);993993+ if (IS_ERR(extclk))994994+ return dev_err_probe(st->dev, PTR_ERR(extclk),995995+ "Failed to get external clock\n");996996+997997+ if (extclk) {994998 unsigned long rate_hz;995999 u8 pre = 0, div, tbctl;9961000 u64 aux;
+9-3
drivers/hwmon/mlxreg-fan.c
···315315{316316 struct mlxreg_fan *fan = cdev->devdata;317317 unsigned long cur_state;318318+ int i, config = 0;318319 u32 regval;319319- int i;320320 int err;321321322322 /*···329329 * overwritten.330330 */331331 if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {332332+ /*333333+ * This is configuration change, which is only supported through sysfs.334334+ * For configuration non-zero value is to be returned to avoid thermal335335+ * statistics update.336336+ */337337+ config = 1;332338 state -= MLXREG_FAN_MAX_STATE;333339 for (i = 0; i < state; i++)334340 fan->cooling_levels[i] = state;···349343350344 cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);351345 if (state < cur_state)352352- return 0;346346+ return config;353347354348 state = cur_state;355349 }···365359 dev_err(fan->dev, "Failed to write PWM duty\n");366360 return err;367361 }368368- return 0;362362+ return config;369363}370364371365static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+5-12
drivers/hwmon/occ/common.c
···340340 if (val == OCC_TEMP_SENSOR_FAULT)341341 return -EREMOTEIO;342342343343- /*344344- * VRM doesn't return temperature, only alarm bit. This345345- * attribute maps to tempX_alarm instead of tempX_input for346346- * VRM347347- */348348- if (temp->fru_type != OCC_FRU_TYPE_VRM) {349349- /* sensor not ready */350350- if (val == 0)351351- return -EAGAIN;343343+ /* sensor not ready */344344+ if (val == 0)345345+ return -EAGAIN;352346353353- val *= 1000;354354- }347347+ val *= 1000;355348 break;356349 case 2:357350 val = temp->fru_type;···879886 0, i);880887 attr++;881888882882- if (sensors->temp.version > 1 &&889889+ if (sensors->temp.version == 2 &&883890 temp->fru_type == OCC_FRU_TYPE_VRM) {884891 snprintf(attr->name, sizeof(attr->name),885892 "temp%d_alarm", s);
···17461746 }17471747}1748174817491749-static void cma_cancel_listens(struct rdma_id_private *id_priv)17491749+static void _cma_cancel_listens(struct rdma_id_private *id_priv)17501750{17511751 struct rdma_id_private *dev_id_priv;17521752+17531753+ lockdep_assert_held(&lock);1752175417531755 /*17541756 * Remove from listen_any_list to prevent added devices from spawning17551757 * additional listen requests.17561758 */17571757- mutex_lock(&lock);17581759 list_del(&id_priv->list);1759176017601761 while (!list_empty(&id_priv->listen_list)) {···17691768 rdma_destroy_id(&dev_id_priv->id);17701769 mutex_lock(&lock);17711770 }17711771+}17721772+17731773+static void cma_cancel_listens(struct rdma_id_private *id_priv)17741774+{17751775+ mutex_lock(&lock);17761776+ _cma_cancel_listens(id_priv);17721777 mutex_unlock(&lock);17731778}17741779···17831776{17841777 switch (state) {17851778 case RDMA_CM_ADDR_QUERY:17791779+ /*17801780+ * We can avoid doing the rdma_addr_cancel() based on state,17811781+ * only RDMA_CM_ADDR_QUERY has a work that could still execute.17821782+ * Notice that the addr_handler work could still be exiting17831783+ * outside this state, however due to the interaction with the17841784+ * handler_mutex the work is guaranteed not to touch id_priv17851785+ * during exit.17861786+ */17861787 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);17871788 break;17881789 case RDMA_CM_ROUTE_QUERY:···18251810static void destroy_mc(struct rdma_id_private *id_priv,18261811 struct cma_multicast *mc)18271812{18131813+ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);18141814+18281815 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))18291816 ib_sa_free_multicast(mc->sa_mc);18301817···1843182618441827 cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,18451828 &mgid);18461846- cma_igmp_send(ndev, &mgid, false);18291829+18301830+ if (!send_only)18311831+ cma_igmp_send(ndev, &mgid, false);18321832+18471833 dev_put(ndev);18481834 }18491835···25942574 return 0;2595257525962576err_listen:25972597- list_del(&id_priv->list);25772577+ _cma_cancel_listens(id_priv);25982578 mutex_unlock(&lock);25992579 if (to_destroy)26002580 rdma_destroy_id(&to_destroy->id);···34333413 if (dst_addr->sa_family == AF_IB) {34343414 ret = cma_resolve_ib_addr(id_priv);34353415 } else {34163416+ /*34173417+ * The FSM can return back to RDMA_CM_ADDR_BOUND after34183418+ * rdma_resolve_ip() is called, eg through the error34193419+ * path in addr_handler(). If this happens the existing34203420+ * request must be canceled before issuing a new one.34213421+ * Since canceling a request is a bit slow and this34223422+ * oddball path is rare, keep track once a request has34233423+ * been issued. The track turns out to be a permanent34243424+ * state since this is the only cancel as it is34253425+ * immediately before rdma_resolve_ip().34263426+ */34273427+ if (id_priv->used_resolve_ip)34283428+ rdma_addr_cancel(&id->route.addr.dev_addr);34293429+ else34303430+ id_priv->used_resolve_ip = 1;34363431 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,34373432 &id->route.addr.dev_addr,34383433 timeout_ms, addr_handler,···38063771 int ret;3807377238083773 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {37743774+ struct sockaddr_in any_in = {37753775+ .sin_family = AF_INET,37763776+ .sin_addr.s_addr = htonl(INADDR_ANY),37773777+ };37783778+38093779 /* For a well behaved ULP state will be RDMA_CM_IDLE */38103810- id->route.addr.src_addr.ss_family = AF_INET;38113811- ret = rdma_bind_addr(id, cma_src_addr(id_priv));37803780+ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);38123781 if (ret)38133782 return ret;38143783 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
···4501450145024502 if (err) {45034503 if (i > 0)45044504- its_vpe_irq_domain_free(domain, virq, i - 1);45044504+ its_vpe_irq_domain_free(domain, virq, i);4505450545064506 its_lpi_free(bitmap, base, nr_ids);45074507 its_free_prop_table(vprop_page);
+51-1
drivers/irqchip/irq-gic.c
···107107108108#endif109109110110+static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);111111+110112/*111113 * The GIC mapping of CPU interfaces does not necessarily match112114 * the logical CPU numbering. Let's use a mapping as returned···776774#endif777775778776#ifdef CONFIG_SMP777777+static void rmw_writeb(u8 bval, void __iomem *addr)778778+{779779+ static DEFINE_RAW_SPINLOCK(rmw_lock);780780+ unsigned long offset = (unsigned long)addr & 3UL;781781+ unsigned long shift = offset * 8;782782+ unsigned long flags;783783+ u32 val;784784+785785+ raw_spin_lock_irqsave(&rmw_lock, flags);786786+787787+ addr -= offset;788788+ val = readl_relaxed(addr);789789+ val &= ~GENMASK(shift + 7, shift);790790+ val |= bval << shift;791791+ writel_relaxed(val, addr);792792+793793+ raw_spin_unlock_irqrestore(&rmw_lock, flags);794794+}795795+779796static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,780797 bool force)781798{···809788 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)810789 return -EINVAL;811790812812- writeb_relaxed(gic_cpu_map[cpu], reg);791791+ if (static_branch_unlikely(&needs_rmw_access))792792+ rmw_writeb(gic_cpu_map[cpu], reg);793793+ else794794+ writeb_relaxed(gic_cpu_map[cpu], reg);813795 irq_data_update_effective_affinity(d, cpumask_of(cpu));814796815797 return IRQ_SET_MASK_OK_DONE;···13991375 return true;14001376}1401137713781378+static bool gic_enable_rmw_access(void *data)13791379+{13801380+ /*13811381+ * The EMEV2 class of machines has a broken interconnect, and13821382+ * locks up on accesses that are less than 32bit. So far, only13831383+ * the affinity setting requires it.13841384+ */13851385+ if (of_machine_is_compatible("renesas,emev2")) {13861386+ static_branch_enable(&needs_rmw_access);13871387+ return true;13881388+ }13891389+13901390+ return false;13911391+}13921392+13931393+static const struct gic_quirk gic_quirks[] = {13941394+ {13951395+ .desc = "broken byte access",13961396+ .compatible = "arm,pl390",13971397+ .init = gic_enable_rmw_access,13981398+ },13991399+ { },14001400+};14011401+14021402static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)14031403{14041404 if (!gic || !node)···1438139014391391 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))14401392 gic->percpu_offset = 0;13931393+13941394+ gic_enable_of_quirks(node, gic_quirks, gic);1441139514421396 return 0;14431397
+3-3
drivers/irqchip/irq-mbigen.c
···2525/* The maximum IRQ pin number of mbigen chip(start from 0) */2626#define MAXIMUM_IRQ_PIN_NUM 140727272828-/**2828+/*2929 * In mbigen vector register3030 * bit[21:12]: event id value3131 * bit[11:0]: device id···3939/* offset of vector register in mbigen node */4040#define REG_MBIGEN_VEC_OFFSET 0x20041414242-/**4242+/*4343 * offset of clear register in mbigen node4444 * This register is used to clear the status4545 * of interrupt4646 */4747#define REG_MBIGEN_CLEAR_OFFSET 0xa00048484949-/**4949+/*5050 * offset of interrupt type register5151 * This register is used to configure interrupt5252 * trigger type
···57005700 disk->flags |= GENHD_FL_EXT_DEVT;57015701 disk->events |= DISK_EVENT_MEDIA_CHANGE;57025702 mddev->gendisk = disk;57035703- /* As soon as we call add_disk(), another thread could get57045704- * through to md_open, so make sure it doesn't get too far57055705- */57065706- mutex_lock(&mddev->open_mutex);57075703 add_disk(disk);5708570457095705 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");···57145718 if (mddev->kobj.sd &&57155719 sysfs_create_group(&mddev->kobj, &md_bitmap_group))57165720 pr_debug("pointless warning\n");57175717- mutex_unlock(&mddev->open_mutex);57185721 abort:57195722 mutex_unlock(&disks_mutex);57205723 if (!error && mddev->kobj.sd) {
+9-9
drivers/media/platform/s5p-jpeg/jpeg-core.c
···11401140 continue;11411141 length = 0;11421142 switch (c) {11431143- /* SOF0: baseline JPEG */11441144- case SOF0:11431143+ /* JPEG_MARKER_SOF0: baseline JPEG */11441144+ case JPEG_MARKER_SOF0:11451145 if (get_word_be(&jpeg_buffer, &word))11461146 break;11471147 length = (long)word - 2;···11721172 notfound = 0;11731173 break;1174117411751175- case DQT:11751175+ case JPEG_MARKER_DQT:11761176 if (get_word_be(&jpeg_buffer, &word))11771177 break;11781178 length = (long)word - 2;···11851185 skip(&jpeg_buffer, length);11861186 break;1187118711881188- case DHT:11881188+ case JPEG_MARKER_DHT:11891189 if (get_word_be(&jpeg_buffer, &word))11901190 break;11911191 length = (long)word - 2;···11981198 skip(&jpeg_buffer, length);11991199 break;1200120012011201- case SOS:12011201+ case JPEG_MARKER_SOS:12021202 sos = jpeg_buffer.curr - 2; /* 0xffda */12031203 break;1204120412051205 /* skip payload-less markers */12061206- case RST ... RST + 7:12071207- case SOI:12081208- case EOI:12091209- case TEM:12061206+ case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:12071207+ case JPEG_MARKER_SOI:12081208+ case JPEG_MARKER_EOI:12091209+ case JPEG_MARKER_TEM:12101210 break;1211121112121212 /* skip uninteresting payload markers */
+14-14
drivers/media/platform/s5p-jpeg/jpeg-core.h
···3737#define EXYNOS3250_IRQ_TIMEOUT 0x1000000038383939/* a selection of JPEG markers */4040-#define TEM 0x014141-#define SOF0 0xc04242-#define DHT 0xc44343-#define RST 0xd04444-#define SOI 0xd84545-#define EOI 0xd94646-#define SOS 0xda4747-#define DQT 0xdb4848-#define DHP 0xde4040+#define JPEG_MARKER_TEM 0x014141+#define JPEG_MARKER_SOF0 0xc04242+#define JPEG_MARKER_DHT 0xc44343+#define JPEG_MARKER_RST 0xd04444+#define JPEG_MARKER_SOI 0xd84545+#define JPEG_MARKER_EOI 0xd94646+#define JPEG_MARKER_SOS 0xda4747+#define JPEG_MARKER_DQT 0xdb4848+#define JPEG_MARKER_DHP 0xde49495050/* Flags that indicate a format can be used for capture/output */5151#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)···187187 * @fmt: driver-specific format of this queue188188 * @w: image width189189 * @h: image height190190- * @sos: SOS marker's position relative to the buffer beginning191191- * @dht: DHT markers' positions relative to the buffer beginning192192- * @dqt: DQT markers' positions relative to the buffer beginning193193- * @sof: SOF0 marker's position relative to the buffer beginning194194- * @sof_len: SOF0 marker's payload length (without length field itself)190190+ * @sos: JPEG_MARKER_SOS's position relative to the buffer beginning191191+ * @dht: JPEG_MARKER_DHT' positions relative to the buffer beginning192192+ * @dqt: JPEG_MARKER_DQT' positions relative to the buffer beginning193193+ * @sof: JPEG_MARKER_SOF0's position relative to the buffer beginning194194+ * @sof_len: JPEG_MARKER_SOF0's payload length (without length field itself)195195 * @size: image buffer size in bytes196196 */197197struct s5p_jpeg_q_data {
+20-1
drivers/media/rc/ir_toy.c
···2424// End transmit and repeat reset command so we exit sump mode2525static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };2626static const u8 COMMAND_SMODE_ENTER[] = { 's' };2727+static const u8 COMMAND_SMODE_EXIT[] = { 0 };2728static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };28292930#define REPLY_XMITCOUNT 't'···310309 buf[i] = cpu_to_be16(v);311310 }312311313313- buf[count] = cpu_to_be16(0xffff);312312+ buf[count] = 0xffff;314313315314 irtoy->tx_buf = buf;316315 irtoy->tx_len = size;317316 irtoy->emitted = 0;317317+318318+ // There is an issue where if the unit is receiving IR while the319319+ // first TXSTART command is sent, the device might end up hanging320320+ // with its led on. It does not respond to any command when this321321+ // happens. To work around this, re-enter sample mode.322322+ err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,323323+ sizeof(COMMAND_SMODE_EXIT), STATE_RESET);324324+ if (err) {325325+ dev_err(irtoy->dev, "exit sample mode: %d\n", err);326326+ return err;327327+ }328328+329329+ err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,330330+ sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);331331+ if (err) {332332+ dev_err(irtoy->dev, "enter sample mode: %d\n", err);333333+ return err;334334+ }318335319336 err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),320337 STATE_TX);
···405405static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)406406{407407 bool next_entry_found = false;408408- struct hl_cs *next;408408+ struct hl_cs *next, *first_cs;409409410410 if (!cs_needs_timeout(cs))411411 return;···415415 /* We need to handle tdr only once for the complete staged submission.416416 * Hence, we choose the CS that reaches this function first which is417417 * the CS marked as 'staged_last'.418418+ * In case single staged cs was submitted which has both first and last419419+ * indications, then "cs_find_first" below will return NULL, since we420420+ * removed the cs node from the list before getting here,421421+ * in such cases just continue with the cs to cancel it's TDR work.418422 */419419- if (cs->staged_cs && cs->staged_last)420420- cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);423423+ if (cs->staged_cs && cs->staged_last) {424424+ first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);425425+ if (first_cs)426426+ cs = first_cs;427427+ }421428422429 spin_unlock(&hdev->cs_mirror_lock);423430···12951288 if (rc)12961289 goto free_cs_object;1297129012911291+ /* If this is a staged submission we must return the staged sequence12921292+ * rather than the internal CS sequence12931293+ */12941294+ if (cs->staged_cs)12951295+ *cs_seq = cs->staged_sequence;12961296+12981297 /* Validate ALL the CS chunks before submitting the CS */12991298 for (i = 0 ; i < num_chunks ; i++) {13001299 struct hl_cs_chunk *chunk = &cs_chunk_array[i];···20011988 goto free_cs_chunk_array;20021989 }2003199019911991+ if (!hdev->nic_ports_mask) {19921992+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);19931993+ atomic64_inc(&cntr->validation_drop_cnt);19941994+ dev_err(hdev->dev,19951995+ "Collective operations not supported when NIC ports are disabled");19961996+ rc = -EINVAL;19971997+ goto free_cs_chunk_array;19981998+ }19991999+20042000 collective_engine_id = chunk->collective_engine_id;20052001 }20062002···20482026 spin_unlock(&ctx->sig_mgr.lock);2049202720502028 if (!handle_found) {20512051- dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",20292029+ /* treat as signal CS already finished */20302030+ dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",20522031 signal_seq);20532053- rc = -EINVAL;20322032+ rc = 0;20542033 goto free_cs_chunk_array;20552034 }20562035···26362613 * completed after the poll function.26372614 */26382615 if (!mcs_data.completion_bitmap) {26392639- dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");26162616+ dev_warn_ratelimited(hdev->dev,26172617+ "Multi-CS got completion on wait but no CS completed\n");26402618 rc = -EFAULT;26412619 }26422620 }···27642740 else27652741 interrupt = &hdev->user_interrupt[interrupt_offset];2766274227432743+ /* Add pending user interrupt to relevant list for the interrupt27442744+ * handler to monitor27452745+ */27462746+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);27472747+ list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);27482748+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);27492749+27502750+ /* We check for completion value as interrupt could have been received27512751+ * before we added the node to the wait list27522752+ */27672753 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {27682754 dev_err(hdev->dev, "Failed to copy completion value from user\n");27692755 rc = -EFAULT;27702770- goto free_fence;27562756+ goto remove_pending_user_interrupt;27712757 }2772275827732759 if (completion_value >= target_value)···27862752 *status = CS_WAIT_STATUS_BUSY;2787275327882754 if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))27892789- goto free_fence;27902790-27912791- /* Add pending user interrupt to relevant list for the interrupt27922792- * handler to monitor27932793- */27942794- spin_lock_irqsave(&interrupt->wait_list_lock, flags);27952795- list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);27962796- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);27552755+ goto remove_pending_user_interrupt;2797275627982757wait_again:27992758 /* Wait for interrupt handler to signal completion */···27972770 * If comparison fails, keep waiting until timeout expires27982771 */27992772 if (completion_rc > 0) {27732773+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);27742774+ /* reinit_completion must be called before we check for user27752775+ * completion value, otherwise, if interrupt is received after27762776+ * the comparison and before the next wait_for_completion,27772777+ * we will reach timeout and fail27782778+ */27792779+ reinit_completion(&pend->fence.completion);27802780+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);27812781+28002782 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {28012783 dev_err(hdev->dev, "Failed to copy completion value from user\n");28022784 rc = -EFAULT;···28162780 if (completion_value >= target_value) {28172781 *status = CS_WAIT_STATUS_COMPLETED;28182782 } else {28192819- spin_lock_irqsave(&interrupt->wait_list_lock, flags);28202820- reinit_completion(&pend->fence.completion);28212783 timeout = completion_rc;28222822-28232823- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);28242784 goto wait_again;28252785 }28262786 } else if (completion_rc == -ERESTARTSYS) {···28342802 list_del(&pend->wait_list_node);28352803 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);2836280428372837-free_fence:28382805 kfree(pend);28392806 hl_ctx_put(ctx);28402807
+7-2
drivers/misc/habanalabs/common/hw_queue.c
···437437 struct hl_cs_compl *cs_cmpl)438438{439439 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;440440+ u32 offset = 0;440441441442 cs_cmpl->hw_sob = handle->hw_sob;442443···447446 * set offset 1 for example he mean to wait only for the first448447 * signal only, which will be pre_sob_val, and if he set offset 2449448 * then the value required is (pre_sob_val + 1) and so on...449449+ * if user set wait offset to 0, then treat it as legacy wait cs,450450+ * wait for the next signal.450451 */451451- cs_cmpl->sob_val = handle->pre_sob_val +452452- (job->encaps_sig_wait_offset - 1);452452+ if (job->encaps_sig_wait_offset)453453+ offset = job->encaps_sig_wait_offset - 1;454454+455455+ cs_cmpl->sob_val = handle->pre_sob_val + offset;453456}454457455458static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
···1802180218031803 spin_lock_irqsave(&host->irq_lock, flags);1804180418051805- if (!host->data_status)18051805+ /*18061806+ * Only inject an error if we haven't already got an error or data over18071807+ * interrupt.18081808+ */18091809+ if (!host->data_status) {18061810 host->data_status = SDMMC_INT_DCRC;18071807- set_bit(EVENT_DATA_ERROR, &host->pending_events);18081808- tasklet_schedule(&host->tasklet);18111811+ set_bit(EVENT_DATA_ERROR, &host->pending_events);18121812+ tasklet_schedule(&host->tasklet);18131813+ }1809181418101815 spin_unlock_irqrestore(&host->irq_lock, flags);18111816···27262721 }2727272227282723 if (pending & DW_MCI_DATA_ERROR_FLAGS) {27242724+ spin_lock(&host->irq_lock);27252725+27292726 /* if there is an error report DATA_ERROR */27302727 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);27312728 host->data_status = pending;27322729 smp_wmb(); /* drain writebuffer */27332730 set_bit(EVENT_DATA_ERROR, &host->pending_events);27342731 tasklet_schedule(&host->tasklet);27322732+27332733+ spin_unlock(&host->irq_lock);27352734 }2736273527372736 if (pending & SDMMC_INT_DATA_OVER) {
+2
drivers/mmc/host/renesas_sdhi_core.c
···561561 /* Unknown why but without polling reset status, it will hang */562562 read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,563563 false, priv->rstc);564564+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */565565+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);564566 priv->needs_adjust_hs400 = false;565567 renesas_sdhi_set_clock(host, host->clk_cache);566568 } else if (priv->scc_ctl) {
···6868 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);6969 unsigned int port, count = 0;70707171- for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {7171+ for (port = 0; port < ds->num_ports; port++) {7272 if (dsa_is_cpu_port(ds, port))7373 continue;7474 if (priv->port_sts[port].enabled)···15121512{15131513 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);1514151415151515+ if (!priv)15161516+ return 0;15171517+15151518 priv->wol_ports_mask = 0;15161519 /* Disable interrupts */15171520 bcm_sf2_intr_disable(priv);···15261523 if (priv->type == BCM7278_DEVICE_ID)15271524 reset_control_assert(priv->rcdev);1528152515261526+ platform_set_drvdata(pdev, NULL);15271527+15291528 return 0;15301529}1531153015321531static void bcm_sf2_sw_shutdown(struct platform_device *pdev)15331532{15341533 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);15341534+15351535+ if (!priv)15361536+ return;1535153715361538 /* For a kernel about to be kexec'd we want to keep the GPHY on for a15371539 * successful MDIO bus scan to occur. If we did turn off the GPHY···15461538 */15471539 if (priv->hw_params.num_gphy == 1)15481540 bcm_sf2_gphy_enable_set(priv->dev->ds, true);15411541+15421542+ dsa_switch_shutdown(priv->dev->ds);15431543+15441544+ platform_set_drvdata(pdev, NULL);15491545}1550154615511547#ifdef CONFIG_PM_SLEEP
···11// SPDX-License-Identifier: GPL-2.022-/* Copyright 2019-2021 NXP Semiconductors22+/* Copyright 2019-2021 NXP33 *44 * This is an umbrella module for all network switches that are55 * register-compatible with Ocelot and that perform I/O to their host CPU
···413413 if (deep) {414414 /* Reinitialize Nic/Vecs objects */415415 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);416416-417417- ret = aq_nic_init(nic);418418- if (ret)419419- goto err_exit;420416 }421417422418 if (netif_running(nic->ndev)) {419419+ ret = aq_nic_init(nic);420420+ if (ret)421421+ goto err_exit;422422+423423 ret = aq_nic_start(nic);424424 if (ret)425425 goto err_exit;
+2
drivers/net/ethernet/broadcom/bgmac-bcma.c
···129129 bcma_set_drvdata(core, bgmac);130130131131 err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);132132+ if (err == -EPROBE_DEFER)133133+ return err;132134133135 /* If no MAC address assigned via device tree, check SPROM */134136 if (err) {
+3
drivers/net/ethernet/broadcom/bgmac-platform.c
···192192 bgmac->dma_dev = &pdev->dev;193193194194 ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);195195+ if (ret == -EPROBE_DEFER)196196+ return ret;197197+195198 if (ret)196199 dev_warn(&pdev->dev,197200 "MAC address not present in device tree\n");
+4-4
drivers/net/ethernet/broadcom/bnxt/bnxt.c
···391391 * netif_tx_queue_stopped().392392 */393393 smp_mb();394394- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {394394+ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {395395 netif_tx_wake_queue(txq);396396 return false;397397 }···764764 smp_mb();765765766766 if (unlikely(netif_tx_queue_stopped(txq)) &&767767- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&767767+ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&768768 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)769769 netif_tx_wake_queue(txq);770770}···24162416 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {24172417 tx_pkts++;24182418 /* return full budget so NAPI will complete. */24192419- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {24192419+ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {24202420 rx_pkts = budget;24212421 raw_cons = NEXT_RAW_CMP(raw_cons);24222422 if (budget)···36403640 u16 i;3641364136423642 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,36433643- MAX_SKB_FRAGS + 1);36433643+ BNXT_MIN_TX_DESC_CNT);3644364436453645 for (i = 0; i < bp->tx_nr_rings; i++) {36463646 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+5
drivers/net/ethernet/broadcom/bnxt/bnxt.h
···629629#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)630630#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)631631632632+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra633633+ * BD because the first TX BD is always a long BD.634634+ */635635+#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)636636+632637#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))633638#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))634639
+1-1
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
···798798799799 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||800800 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||801801- (ering->tx_pending <= MAX_SKB_FRAGS))801801+ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))802802 return -EINVAL;803803804804 if (netif_running(dev))
+2-5
drivers/net/ethernet/freescale/enetc/enetc.c
···419419420420static void enetc_rx_net_dim(struct enetc_int_vector *v)421421{422422- struct dim_sample dim_sample;422422+ struct dim_sample dim_sample = {};423423424424 v->comp_cnt++;425425···18791879static int enetc_setup_irqs(struct enetc_ndev_priv *priv)18801880{18811881 struct pci_dev *pdev = priv->si->pdev;18821882- cpumask_t cpu_mask;18831882 int i, j, err;1884188318851884 for (i = 0; i < priv->bdr_int_num; i++) {···1907190819081909 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);19091910 }19101910- cpumask_clear(&cpu_mask);19111911- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);19121912- irq_set_affinity_hint(irq, &cpu_mask);19111911+ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));19131912 }1914191319151914 return 0;
+1-1
drivers/net/ethernet/freescale/enetc/enetc_ierb.c
···11// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)22-/* Copyright 2021 NXP Semiconductors22+/* Copyright 2021 NXP33 *44 * The Integrated Endpoint Register Block (IERB) is configured by pre-boot55 * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
···541541542542 if (phy_interface_mode_is_rgmii(phy_mode)) {543543 val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);544544- val &= ~ENETC_PM0_IFM_EN_AUTO;545545- val &= ENETC_PM0_IFM_IFMODE_MASK;544544+ val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);546545 val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;547546 enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);548547 }
···780780 gve_num_tx_qpls(priv));781781782782 /* we are out of rx qpls */783783- if (id == priv->qpl_cfg.qpl_map_size)783783+ if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))784784 return NULL;785785786786 set_bit(id, priv->qpl_cfg.qpl_id_map);
+29-16
drivers/net/ethernet/google/gve/gve_main.c
···4141{4242 struct gve_priv *priv = netdev_priv(dev);4343 unsigned int start;4444+ u64 packets, bytes;4445 int ring;45464647 if (priv->rx) {···4948 do {5049 start =5150 u64_stats_fetch_begin(&priv->rx[ring].statss);5252- s->rx_packets += priv->rx[ring].rpackets;5353- s->rx_bytes += priv->rx[ring].rbytes;5151+ packets = priv->rx[ring].rpackets;5252+ bytes = priv->rx[ring].rbytes;5453 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,5554 start));5555+ s->rx_packets += packets;5656+ s->rx_bytes += bytes;5657 }5758 }5859 if (priv->tx) {···6259 do {6360 start =6461 u64_stats_fetch_begin(&priv->tx[ring].statss);6565- s->tx_packets += priv->tx[ring].pkt_done;6666- s->tx_bytes += priv->tx[ring].bytes_done;6262+ packets = priv->tx[ring].pkt_done;6363+ bytes = priv->tx[ring].bytes_done;6764 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,6865 start));6666+ s->tx_packets += packets;6767+ s->tx_bytes += bytes;6968 }7069 }7170}···87828883static void gve_free_counter_array(struct gve_priv *priv)8984{8585+ if (!priv->counter_array)8686+ return;8787+9088 dma_free_coherent(&priv->pdev->dev,9189 priv->num_event_counters *9290 sizeof(*priv->counter_array),···150142151143static void gve_free_stats_report(struct gve_priv *priv)152144{145145+ if (!priv->stats_report)146146+ return;147147+153148 del_timer_sync(&priv->stats_report_timer);154149 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,155150 priv->stats_report, priv->stats_report_bus);···381370{382371 int i;383372384384- if (priv->msix_vectors) {385385- /* Free the irqs */386386- for (i = 0; i < priv->num_ntfy_blks; i++) {387387- struct gve_notify_block *block = &priv->ntfy_blocks[i];388388- int msix_idx = i;373373+ if (!priv->msix_vectors)374374+ return;389375390390- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,391391- NULL);392392- free_irq(priv->msix_vectors[msix_idx].vector, block);393393- }394394- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);376376+ /* Free the irqs */377377+ for (i = 0; i < priv->num_ntfy_blks; i++) {378378+ struct gve_notify_block *block = &priv->ntfy_blocks[i];379379+ int msix_idx = i;380380+381381+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,382382+ NULL);383383+ free_irq(priv->msix_vectors[msix_idx].vector, block);395384 }385385+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);396386 dma_free_coherent(&priv->pdev->dev,397387 priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),398388 priv->ntfy_blocks, priv->ntfy_block_bus);···1197118511981186void gve_handle_report_stats(struct gve_priv *priv)11991187{12001200- int idx, stats_idx = 0, tx_bytes;12011201- unsigned int start = 0;12021188 struct stats *stats = priv->stats_report->stats;11891189+ int idx, stats_idx = 0;11901190+ unsigned int start = 0;11911191+ u64 tx_bytes;1203119212041193 if (!gve_get_report_stats(priv))12051194 return;
+7-1
drivers/net/ethernet/google/gve/gve_rx.c
···104104 if (!rx->data.page_info)105105 return -ENOMEM;106106107107- if (!rx->data.raw_addressing)107107+ if (!rx->data.raw_addressing) {108108 rx->data.qpl = gve_assign_rx_qpl(priv);109109+ if (!rx->data.qpl) {110110+ kvfree(rx->data.page_info);111111+ rx->data.page_info = NULL;112112+ return -ENOMEM;113113+ }114114+ }109115 for (i = 0; i < slots; i++) {110116 if (!rx->data.raw_addressing) {111117 struct page *page = rx->data.qpl->pages[i];
-1
drivers/net/ethernet/hisilicon/hns3/hnae3.h
···752752 u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */753753 u16 tqp_count[HNAE3_MAX_TC];754754 u16 tqp_offset[HNAE3_MAX_TC];755755- unsigned long tc_en; /* bitmap of TC enabled */756755 u8 num_tc; /* Total number of enabled TCs */757756 bool mqprio_active;758757};
+7-9
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
···623623 return ret;624624 }625625626626- for (i = 0; i < HNAE3_MAX_TC; i++) {627627- if (!test_bit(i, &tc_info->tc_en))628628- continue;629629-626626+ for (i = 0; i < tc_info->num_tc; i++)630627 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],631628 tc_info->tqp_offset[i]);632632- }633629 }634630635631 ret = netif_set_real_num_tx_queues(netdev, queue_size);···774778775779 if (hns3_nic_resetting(netdev))776780 return -EBUSY;781781+782782+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {783783+ netdev_warn(netdev, "net open repeatedly!\n");784784+ return 0;785785+ }777786778787 netif_carrier_off(netdev);779788···48664865 struct hnae3_tc_info *tc_info = &kinfo->tc_info;48674866 int i;4868486748694869- for (i = 0; i < HNAE3_MAX_TC; i++) {48684868+ for (i = 0; i < tc_info->num_tc; i++) {48704869 int j;48714871-48724872- if (!test_bit(i, &tc_info->tc_en))48734873- continue;4874487048754871 for (j = 0; j < tc_info->tqp_count[i]; j++) {48764872 struct hnae3_queue *q;
···334334335335#if IS_ENABLED(CONFIG_VLAN_8021Q)336336 /* Disable the vlan filter for selftest does not support it */337337- if (h->ae_algo->ops->enable_vlan_filter)337337+ if (h->ae_algo->ops->enable_vlan_filter &&338338+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)338339 h->ae_algo->ops->enable_vlan_filter(h, false);339340#endif340341···360359 h->ae_algo->ops->halt_autoneg(h, false);361360362361#if IS_ENABLED(CONFIG_VLAN_8021Q)363363- if (h->ae_algo->ops->enable_vlan_filter)362362+ if (h->ae_algo->ops->enable_vlan_filter &&363363+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)364364 h->ae_algo->ops->enable_vlan_filter(h, true);365365#endif366366
···581581 ret = hclge_cmd_send(&hdev->hw, &desc, 1);582582 if (ret) {583583 dev_err(&hdev->pdev->dev,584584- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",584584+ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",585585 vport->vport_id, shap_cfg_cmd->qs_id,586586 max_tx_rate, ret);587587 return ret;···687687688688 for (i = 0; i < HNAE3_MAX_TC; i++) {689689 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {690690- set_bit(i, &kinfo->tc_info.tc_en);691690 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;692691 kinfo->tc_info.tqp_count[i] = kinfo->rss_size;693692 } else {694693 /* Set to default queue if TC is disable */695695- clear_bit(i, &kinfo->tc_info.tc_en);696694 kinfo->tc_info.tqp_offset[i] = 0;697695 kinfo->tc_info.tqp_count[i] = 1;698696 }···727729 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)728730 hdev->tm_info.prio_tc[i] =729731 (i >= hdev->tm_info.num_tc) ? 0 : i;730730-731731- /* DCB is enabled if we have more than 1 TC or pfc_en is732732- * non-zero.733733- */734734- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)735735- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;736736- else737737- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;738732}739733740734static void hclge_tm_pg_info_init(struct hclge_dev *hdev)···757767758768static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)759769{760760- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {770770+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {761771 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)762772 dev_warn(&hdev->pdev->dev,763763- "DCB is disable, but last mode is FC_PFC\n");773773+ "Only 1 tc used, but last mode is FC_PFC\n");764774765775 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;766776 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {···786796 }787797}788798789789-static void hclge_pfc_info_init(struct hclge_dev *hdev)799799+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)790800{791801 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)792802 hclge_update_fc_mode(hdev);···802812803813 hclge_tm_vport_info_update(hdev);804814805805- hclge_pfc_info_init(hdev);815815+ hclge_tm_pfc_info_update(hdev);806816}807817808818static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)···15481558 hclge_tm_schd_info_init(hdev);15491559}1550156015511551-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)15521552-{15531553- /* DCB is enabled if we have more than 1 TC or pfc_en is15541554- * non-zero.15551555- */15561556- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)15571557- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;15581558- else15591559- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;15601560-15611561- hclge_pfc_info_init(hdev);15621562-}15631563-15641561int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)15651562{15661563 int ret;···15931616 if (ret)15941617 return ret;1595161815961596- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))16191619+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)15971620 return 0;1598162115991622 return hclge_tm_bp_setup(hdev);
···816816 return 0;817817}818818819819+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,820820+ u8 *hash_algo)821821+{822822+ switch (hfunc) {823823+ case ETH_RSS_HASH_TOP:824824+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;825825+ return 0;826826+ case ETH_RSS_HASH_XOR:827827+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;828828+ return 0;829829+ case ETH_RSS_HASH_NO_CHANGE:830830+ *hash_algo = hdev->rss_cfg.hash_algo;831831+ return 0;832832+ default:833833+ return -EINVAL;834834+ }835835+}836836+819837static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,820838 const u8 *key, const u8 hfunc)821839{822840 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);823841 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;842842+ u8 hash_algo;824843 int ret, i;825844826845 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {846846+ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);847847+ if (ret)848848+ return ret;849849+827850 /* Set the RSS Hash Key if specififed by the user */828851 if (key) {829829- switch (hfunc) {830830- case ETH_RSS_HASH_TOP:831831- rss_cfg->hash_algo =832832- HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;833833- break;834834- case ETH_RSS_HASH_XOR:835835- rss_cfg->hash_algo =836836- HCLGEVF_RSS_HASH_ALGO_SIMPLE;837837- break;838838- case ETH_RSS_HASH_NO_CHANGE:839839- break;840840- default:841841- return -EINVAL;842842- }843843-844844- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,845845- key);846846- if (ret)852852+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);853853+ if (ret) {854854+ dev_err(&hdev->pdev->dev,855855+ "invalid hfunc type %u\n", hfunc);847856 return ret;857857+ }848858849859 /* Update the shadow RSS key with user specified qids */850860 memcpy(rss_cfg->rss_hash_key, key,851861 HCLGEVF_RSS_KEY_SIZE);862862+ } else {863863+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,864864+ rss_cfg->rss_hash_key);865865+ if (ret)866866+ return ret;852867 }868868+ rss_cfg->hash_algo = hash_algo;853869 }854870855871 /* update the shadow RSS table with user specified qids */
+1-1
drivers/net/ethernet/hisilicon/hns_mdio.c
···354354355355 if (dev_of_node(bus->parent)) {356356 if (!mdio_dev->subctrl_vbase) {357357- dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");357357+ dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n");358358 return -ENODEV;359359 }360360
-8
drivers/net/ethernet/ibm/ibmvnic.c
···47084708 return 0;47094709 }4710471047114711- if (adapter->failover_pending) {47124712- adapter->init_done_rc = -EAGAIN;47134713- netdev_dbg(netdev, "Failover pending, ignoring login response\n");47144714- complete(&adapter->init_done);47154715- /* login response buffer will be released on reset */47164716- return 0;47174717- }47184718-47194711 netdev->mtu = adapter->req_mtu - ETH_HLEN;4720471247214713 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
+1
drivers/net/ethernet/intel/Kconfig
···335335 tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"336336 default n337337 depends on PCI338338+ depends on PTP_1588_CLOCK_OPTIONAL338339 help339340 This driver supports Intel(R) Ethernet Controller I225-LM/I225-V340341 family of adapters.
+15-7
drivers/net/ethernet/intel/e100.c
···24372437 sizeof(info->bus_info));24382438}2439243924402440-#define E100_PHY_REGS 0x1C24402440+#define E100_PHY_REGS 0x1D24412441static int e100_get_regs_len(struct net_device *netdev)24422442{24432443 struct nic *nic = netdev_priv(netdev);24442444- return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);24442444+24452445+ /* We know the number of registers, and the size of the dump buffer.24462446+ * Calculate the total size in bytes.24472447+ */24482448+ return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);24452449}2446245024472451static void e100_get_regs(struct net_device *netdev,···24592455 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |24602456 ioread8(&nic->csr->scb.cmd_lo) << 16 |24612457 ioread16(&nic->csr->scb.status);24622462- for (i = E100_PHY_REGS; i >= 0; i--)24632463- buff[1 + E100_PHY_REGS - i] =24642464- mdio_read(netdev, nic->mii.phy_id, i);24582458+ for (i = 0; i < E100_PHY_REGS; i++)24592459+ /* Note that we read the registers in reverse order. This24602460+ * ordering is the ABI apparently used by ethtool and other24612461+ * applications.24622462+ */24632463+ buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,24642464+ E100_PHY_REGS - 1 - i);24652465 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));24662466 e100_exec_cb(nic, NULL, e100_dump);24672467 msleep(10);24682468- memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,24692469- sizeof(nic->mem->dump_buf));24682468+ memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,24692469+ sizeof(nic->mem->dump_buf));24702470}2471247124722472static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+3-2
drivers/net/ethernet/intel/i40e/i40e_main.c
···48714871{48724872 int i;4873487348744874- i40e_free_misc_vector(pf);48744874+ if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))48754875+ i40e_free_misc_vector(pf);4875487648764877 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,48774878 I40E_IWARP_IRQ_PILE_ID);···1011410113 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {1011510114 /* retry with a larger buffer */1011610115 buf_len = data_size;1011710117- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {1011610116+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {1011810117 dev_info(&pf->pdev->dev,1011910118 "capability discovery failed, err %s aq_err %s\n",1012010119 i40e_stat_str(&pf->hw, err),
···1011210112 struct ixgbe_adapter *adapter = netdev_priv(dev);1011310113 struct bpf_prog *old_prog;1011410114 bool need_reset;1011510115+ int num_queues;10115101161011610117 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)1011710118 return -EINVAL;···1016210161 /* Kick start the NAPI context if there is an AF_XDP socket open1016310162 * on that queue id. This so that receiving will start.1016410163 */1016510165- if (need_reset && prog)1016610166- for (i = 0; i < adapter->num_rx_queues; i++)1016410164+ if (need_reset && prog) {1016510165+ num_queues = min_t(int, adapter->num_rx_queues,1016610166+ adapter->num_xdp_queues);1016710167+ for (i = 0; i < num_queues; i++)1016710168 if (adapter->xdp_ring[i]->xsk_pool)1016810169 (void)ixgbe_xsk_wakeup(adapter->netdev, i,1016910170 XDP_WAKEUP_RX);1017110171+ }10170101721017110173 return 0;1017210174}
+3
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
···186186 int hash;187187 int i;188188189189+ if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params))190190+ return -EEXIST;191191+189192 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {190193 struct flow_match_meta match;191194
+32-18
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
···372372 int nhoff = skb_network_offset(skb);373373 int ret = 0;374374375375+ if (skb->encapsulation)376376+ return -EPROTONOSUPPORT;377377+375378 if (skb->protocol != htons(ETH_P_IP))376379 return -EPROTONOSUPPORT;377380···12721269 if (!netif_carrier_ok(dev)) {12731270 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {12741271 if (priv->port_state.link_state) {12751275- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;12761272 netif_carrier_on(dev);12771273 en_dbg(LINK, priv, "Link Up\n");12781274 }···15591557 mutex_unlock(&mdev->state_lock);15601558}1561155915621562-static void mlx4_en_linkstate(struct work_struct *work)15601560+static void mlx4_en_linkstate(struct mlx4_en_priv *priv)15611561+{15621562+ struct mlx4_en_port_state *port_state = &priv->port_state;15631563+ struct mlx4_en_dev *mdev = priv->mdev;15641564+ struct net_device *dev = priv->dev;15651565+ bool up;15661566+15671567+ if (mlx4_en_QUERY_PORT(mdev, priv->port))15681568+ port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;15691569+15701570+ up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;15711571+ if (up == netif_carrier_ok(dev))15721572+ netif_carrier_event(dev);15731573+ if (!up) {15741574+ en_info(priv, "Link Down\n");15751575+ netif_carrier_off(dev);15761576+ } else {15771577+ en_info(priv, "Link Up\n");15781578+ netif_carrier_on(dev);15791579+ }15801580+}15811581+15821582+static void mlx4_en_linkstate_work(struct work_struct *work)15631583{15641584 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,15651585 linkstate_task);15661586 struct mlx4_en_dev *mdev = priv->mdev;15671567- int linkstate = priv->link_state;1568158715691588 mutex_lock(&mdev->state_lock);15701570- /* If observable port state changed set carrier state and15711571- * report to system log */15721572- if (priv->last_link_state != linkstate) {15731573- if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {15741574- en_info(priv, "Link Down\n");15751575- netif_carrier_off(priv->dev);15761576- } else {15771577- en_info(priv, "Link Up\n");15781578- netif_carrier_on(priv->dev);15791579- }15801580- }15811581- priv->last_link_state = linkstate;15891589+ mlx4_en_linkstate(priv);15821590 mutex_unlock(&mdev->state_lock);15831591}15841592···20912079 mlx4_en_clear_stats(dev);2092208020932081 err = mlx4_en_start_port(dev);20942094- if (err)20822082+ if (err) {20952083 en_err(priv, "Failed starting port:%d\n", priv->port);20962096-20842084+ goto out;20852085+ }20862086+ mlx4_en_linkstate(priv);20972087out:20982088 mutex_unlock(&mdev->state_lock);20992089 return err;···31823168 spin_lock_init(&priv->stats_lock);31833169 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);31843170 INIT_WORK(&priv->restart_task, mlx4_en_restart);31853185- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);31713171+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);31863172 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);31873173 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);31883174#ifdef CONFIG_RFS_ACCEL
···20362036 }2037203720382038 new_params = priv->channels.params;20392039+ /* Don't allow enabling TX-port-TS if MQPRIO mode channel offload is20402040+ * active, since it defines explicitly which TC accepts the packet.20412041+ * This conflicts with TX-port-TS hijacking the PTP traffic to a specific20422042+ * HW TX-queue.20432043+ */20442044+ if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {20452045+ netdev_err(priv->netdev,20462046+ "%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n",20472047+ __func__);20482048+ return -EINVAL;20492049+ }20392050 MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);20402051 /* No need to verify SQ stop room as20412052 * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
+130-48
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
···22642264}2265226522662266static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,22672267- struct tc_mqprio_qopt_offload *mqprio)22672267+ struct netdev_tc_txq *tc_to_txq)22682268{22692269 int tc, err;22702270···22822282 for (tc = 0; tc < ntc; tc++) {22832283 u16 count, offset;2284228422852285- /* For DCB mode, map netdev TCs to offset 022862286- * We have our own UP to TXQ mapping for QoS22872287- */22882288- count = mqprio ? mqprio->qopt.count[tc] : nch;22892289- offset = mqprio ? mqprio->qopt.offset[tc] : 0;22852285+ count = tc_to_txq[tc].count;22862286+ offset = tc_to_txq[tc].offset;22902287 netdev_set_tc_queue(netdev, tc, count, offset);22912288 }22922289···2312231523132316static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)23142317{23182318+ struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;23152319 struct net_device *netdev = priv->netdev;23162320 int old_num_txqs, old_ntc;23172321 int num_rxqs, nch, ntc;23182322 int err;23232323+ int i;2319232423202325 old_num_txqs = netdev->real_num_tx_queues;23212326 old_ntc = netdev->num_tc ? : 1;23272327+ for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)23282328+ old_tc_to_txq[i] = netdev->tc_to_txq[i];2322232923232330 nch = priv->channels.params.num_channels;23242324- ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);23312331+ ntc = priv->channels.params.mqprio.num_tc;23252332 num_rxqs = nch * priv->profile->rq_groups;23332333+ tc_to_txq = priv->channels.params.mqprio.tc_to_txq;2326233423272327- err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL);23352335+ err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);23282336 if (err)23292337 goto err_out;23302338 err = mlx5e_update_tx_netdev_queues(priv);···23522350 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));2353235123542352err_tcs:23552355- mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL);23532353+ WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,23542354+ old_tc_to_txq));23562355err_out:23572356 return err;23582357}23582358+23592359+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);2359236023602361static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,23612362 struct mlx5e_params *params)···28662861 return 0;28672862}2868286328642864+static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,28652865+ int ntc, int nch)28662866+{28672867+ int tc;28682868+28692869+ memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);28702870+28712871+ /* Map netdev TCs to offset 0.28722872+ * We have our own UP to TXQ mapping for DCB mode of QoS28732873+ */28742874+ for (tc = 0; tc < ntc; tc++) {28752875+ tc_to_txq[tc] = (struct netdev_tc_txq) {28762876+ .count = nch,28772877+ .offset = 0,28782878+ };28792879+ }28802880+}28812881+28822882+static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,28832883+ struct tc_mqprio_qopt *qopt)28842884+{28852885+ int tc;28862886+28872887+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {28882888+ tc_to_txq[tc] = (struct netdev_tc_txq) {28892889+ .count = qopt->count[tc],28902890+ .offset = qopt->offset[tc],28912891+ };28922892+ }28932893+}28942894+28952895+static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)28962896+{28972897+ params->mqprio.mode = TC_MQPRIO_MODE_DCB;28982898+ params->mqprio.num_tc = num_tc;28992899+ mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,29002900+ params->num_channels);29012901+}29022902+29032903+static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,29042904+ struct tc_mqprio_qopt *qopt)29052905+{29062906+ params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;29072907+ params->mqprio.num_tc = qopt->num_tc;29082908+ mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);29092909+}29102910+29112911+static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)29122912+{29132913+ mlx5e_params_mqprio_dcb_set(params, 1);29142914+}29152915+28692916static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,28702917 struct tc_mqprio_qopt *mqprio)28712918{···29312874 return -EINVAL;2932287529332876 new_params = priv->channels.params;29342934- new_params.mqprio.mode = TC_MQPRIO_MODE_DCB;29352935- new_params.mqprio.num_tc = tc ? tc : 1;28772877+ mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);2936287829372879 err = mlx5e_safe_switch_params(priv, &new_params,29382880 mlx5e_num_channels_changed_ctx, NULL, true);···29452889 struct tc_mqprio_qopt_offload *mqprio)29462890{29472891 struct net_device *netdev = priv->netdev;28922892+ struct mlx5e_ptp *ptp_channel;29482893 int agg_count = 0;29492894 int i;28952895+28962896+ ptp_channel = priv->channels.ptp;28972897+ if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {28982898+ netdev_err(netdev,28992899+ "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");29002900+ return -EINVAL;29012901+ }2950290229512903 if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||29522904 mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)···29902926 return 0;29912927}2992292829932993-static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx)29942994-{29952995- struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx;29962996- struct net_device *netdev = priv->netdev;29972997- u8 num_tc;29982998-29992999- if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL)30003000- return -EINVAL;30013001-30023002- num_tc = priv->channels.params.mqprio.num_tc;30033003- mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio);30043004-30053005- return 0;30063006-}30073007-30082929static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,30092930 struct tc_mqprio_qopt_offload *mqprio)30102931{29322932+ mlx5e_fp_preactivate preactivate;30112933 struct mlx5e_params new_params;29342934+ bool nch_changed;30122935 int err;3013293630142937 err = mlx5e_mqprio_channel_validate(priv, mqprio);···30032952 return err;3004295330052954 new_params = priv->channels.params;30063006- new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL;30073007- new_params.mqprio.num_tc = mqprio->qopt.num_tc;30083008- err = mlx5e_safe_switch_params(priv, &new_params,30093009- mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true);29552955+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);3010295630113011- return err;29572957+ nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;29582958+ preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :29592959+ mlx5e_update_netdev_queues_ctx;29602960+ return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);30122961}3013296230142963static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,···31163065{31173066 int i;3118306731193119- for (i = 0; i < priv->max_nch; i++) {30683068+ for (i = 0; i < priv->stats_nch; i++) {31203069 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];31213070 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;31223071 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;···42374186 struct mlx5_core_dev *mdev = priv->mdev;42384187 u8 rx_cq_period_mode;4239418842404240- priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);42414241-42424189 params->sw_mtu = mtu;42434190 params->hard_mtu = MLX5E_ETH_HARD_MTU;42444191 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,42454192 priv->max_nch);42464246- params->mqprio.num_tc = 1;41934193+ mlx5e_params_mqprio_reset(params);4247419442484195 /* Set an initial non-zero value, so that mlx5e_select_queue won't42494196 * divide by zero if called before first activating channels.···47314682 .rx_ptp_support = true,47324683};4733468446854685+static unsigned int46864686+mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,46874687+ const struct mlx5e_profile *profile)46884688+46894689+{46904690+ unsigned int max_nch, tmp;46914691+46924692+ /* core resources */46934693+ max_nch = mlx5e_get_max_num_channels(mdev);46944694+46954695+ /* netdev rx queues */46964696+ tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);46974697+ max_nch = min_t(unsigned int, max_nch, tmp);46984698+46994699+ /* netdev tx queues */47004700+ tmp = netdev->num_tx_queues;47014701+ if (mlx5_qos_is_supported(mdev))47024702+ tmp -= mlx5e_qos_max_leaf_nodes(mdev);47034703+ if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))47044704+ tmp -= profile->max_tc;47054705+ tmp = tmp / profile->max_tc;47064706+ max_nch = min_t(unsigned int, max_nch, tmp);47074707+47084708+ return max_nch;47094709+}47104710+47344711/* mlx5e generic netdev management API (move to en_common.c) */47354712int mlx5e_priv_init(struct mlx5e_priv *priv,47134713+ const struct mlx5e_profile *profile,47364714 struct net_device *netdev,47374715 struct mlx5_core_dev *mdev)47384716{···47674691 priv->mdev = mdev;47684692 priv->netdev = netdev;47694693 priv->msglevel = MLX5E_MSG_LEVEL;46944694+ priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);46954695+ priv->stats_nch = priv->max_nch;47704696 priv->max_opened_tc = 1;4771469747724698 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))···48124734}4813473548144736struct net_device *48154815-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)47374737+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,47384738+ unsigned int txqs, unsigned int rxqs)48164739{48174740 struct net_device *netdev;48184741 int err;···48244745 return NULL;48254746 }4826474748274827- err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);47484748+ err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);48284749 if (err) {48294750 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);48304751 goto err_free_netdev;···48664787 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);4867478848684789 /* max number of channels may have changed */48694869- max_nch = mlx5e_get_max_num_channels(priv->mdev);47904790+ max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);48704791 if (priv->channels.params.num_channels > max_nch) {48714792 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);48724793 /* Reducing the number of channels - RXFH has to be reset, and···48744795 */48754796 priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;48764797 priv->channels.params.num_channels = max_nch;47984798+ if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {47994799+ mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");48004800+ mlx5e_params_mqprio_reset(&priv->channels.params);48014801+ }48774802 }48034803+ if (max_nch != priv->max_nch) {48044804+ mlx5_core_warn(priv->mdev,48054805+ "MLX5E: Updating max number of channels from %u to %u\n",48064806+ priv->max_nch, max_nch);48074807+ priv->max_nch = max_nch;48084808+ }48094809+48784810 /* 1. Set the real number of queues in the kernel the first time.48794811 * 2. Set our default XPS cpumask.48804812 * 3. Build the RQT.···49504860 struct mlx5e_priv *priv = netdev_priv(netdev);49514861 int err;4952486249534953- err = mlx5e_priv_init(priv, netdev, mdev);48634863+ err = mlx5e_priv_init(priv, new_profile, netdev, mdev);49544864 if (err) {49554865 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);49564866 return err;···49764886int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,49774887 const struct mlx5e_profile *new_profile, void *new_ppriv)49784888{49794979- unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);49804889 const struct mlx5e_profile *orig_profile = priv->profile;49814890 struct net_device *netdev = priv->netdev;49824891 struct mlx5_core_dev *mdev = priv->mdev;49834892 void *orig_ppriv = priv->ppriv;49844893 int err, rollback_err;49854985-49864986- /* sanity */49874987- if (new_max_nch != priv->max_nch) {49884988- netdev_warn(netdev, "%s: Replacing profile with different max channels\n",49894989- __func__);49904990- return -EINVAL;49914991- }4992489449934895 /* cleanup old profile */49944896 mlx5e_detach_netdev(priv);···50774995 nch = mlx5e_get_max_num_channels(mdev);50784996 txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;50794997 rxqs = nch * profile->rq_groups;50805080- netdev = mlx5e_create_netdev(mdev, txqs, rxqs);49984998+ netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);50814999 if (!netdev) {50825000 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");50835001 return -ENOMEM;
···3434#include "en.h"3535#include "en_accel/tls.h"3636#include "en_accel/en_accel.h"3737+#include "en/ptp.h"37383839static unsigned int stats_grps_num(struct mlx5e_priv *priv)3940{···451450452451 memset(s, 0, sizeof(*s));453452454454- for (i = 0; i < priv->max_nch; i++) {453453+ for (i = 0; i < priv->stats_nch; i++) {455454 struct mlx5e_channel_stats *channel_stats =456455 &priv->channel_stats[i];457456 int j;···20772076 if (priv->rx_ptp_opened) {20782077 for (i = 0; i < NUM_PTP_RQ_STATS; i++)20792078 sprintf(data + (idx++) * ETH_GSTRING_LEN,20802080- ptp_rq_stats_desc[i].format);20792079+ ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);20812080 }20822081 return idx;20832082}···2120211921212120static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)21222121{21232123- int max_nch = priv->max_nch;21222122+ int max_nch = priv->stats_nch;2124212321252124 return (NUM_RQ_STATS * max_nch) +21262125 (NUM_CH_STATS * max_nch) +···21342133static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)21352134{21362135 bool is_xsk = priv->xsk.ever_used;21372137- int max_nch = priv->max_nch;21362136+ int max_nch = priv->stats_nch;21382137 int i, j, tc;2139213821402139 for (i = 0; i < max_nch; i++)···21762175static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)21772176{21782177 bool is_xsk = priv->xsk.ever_used;21792179- int max_nch = priv->max_nch;21782178+ int max_nch = priv->stats_nch;21802179 int i, j, tc;2181218021822181 for (i = 0; i < max_nch; i++)
···998998}99999910001000struct ocelot_vcap_filter *10011001-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,10021002- bool tc_offload)10011001+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,10021002+ unsigned long cookie, bool tc_offload)10031003{10041004 struct ocelot_vcap_filter *filter;10051005
+3-1
drivers/net/ethernet/pensando/ionic/ionic_lif.c
···12921292 if (err && err != -EEXIST) {12931293 /* set the state back to NEW so we can try again later */12941294 f = ionic_rx_filter_by_addr(lif, addr);12951295- if (f && f->state == IONIC_FILTER_STATE_SYNCED)12951295+ if (f && f->state == IONIC_FILTER_STATE_SYNCED) {12961296 f->state = IONIC_FILTER_STATE_NEW;12971297+ set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);12981298+ }1297129912981300 spin_unlock_bh(&lif->rx_filters.lock);12991301
···380380 &ionic_dbg_intr_stats_desc[i]);381381 (*buf)++;382382 }383383- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {384384- **buf = IONIC_READ_STAT64(&txqcq->napi_stats,385385- &ionic_dbg_napi_stats_desc[i]);386386- (*buf)++;387387- }388388- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {389389- **buf = txqcq->napi_stats.work_done_cntr[i];390390- (*buf)++;391391- }392383 for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {393384 **buf = txstats->sg_cntr[i];394385 (*buf)++;
+8
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
···12971297 prev_weight = weight;1298129812991299 while (weight) {13001300+ /* If the HW device is during recovery, all resources are13011301+ * immediately reset without receiving a per-cid indication13021302+ * from HW. In this case we don't expect the cid_map to be13031303+ * cleared.13041304+ */13051305+ if (p_hwfn->cdev->recov_in_prog)13061306+ return 0;13071307+13001308 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);1301130913021310 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+8
drivers/net/ethernet/qlogic/qed/qed_roce.c
···7777 * Beyond the added delay we clear the bitmap anyway.7878 */7979 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {8080+ /* If the HW device is during recovery, all resources are8181+ * immediately reset without receiving a per-cid indication8282+ * from HW. In this case we don't expect the cid bitmap to be8383+ * cleared.8484+ */8585+ if (p_hwfn->cdev->recov_in_prog)8686+ return;8787+8088 msleep(100);8189 if (wait_count++ > 20) {8290 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
···7373config SUNVNET_COMMON7474 tristate "Common routines to support Sun Virtual Networking"7575 depends on SUN_LDOMS7676+ depends on INET7677 default m77787879config SUNVNET
+1
drivers/net/hamradio/Kconfig
···4848config DMASCC4949 tristate "High-speed (DMA) SCC driver for AX.25"5050 depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API5151+ depends on VIRT_TO_BUS5152 help5253 This is a driver for high-speed SCC boards, i.e. those supporting5354 DMA on one port. You usually use those boards to connect your
+5-1
drivers/net/mdio/mdio-ipq4019.c
···207207{208208 struct ipq4019_mdio_data *priv;209209 struct mii_bus *bus;210210+ struct resource *res;210211 int ret;211212212213 bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));···225224 return PTR_ERR(priv->mdio_clk);226225227226 /* The platform resource is provided on the chipset IPQ5018 */228228- priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1);227227+ /* This resource is optional */228228+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);229229+ if (res)230230+ priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);229231230232 bus->name = "ipq4019_mdio";231233 bus->read = ipq4019_mdio_read;
+10-5
drivers/net/mdio/mdio-mscc-miim.c
···134134135135static int mscc_miim_probe(struct platform_device *pdev)136136{137137- struct mii_bus *bus;138137 struct mscc_miim_dev *dev;138138+ struct resource *res;139139+ struct mii_bus *bus;139140 int ret;140141141142 bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));···157156 return PTR_ERR(dev->regs);158157 }159158160160- dev->phy_regs = devm_platform_ioremap_resource(pdev, 1);161161- if (IS_ERR(dev->phy_regs)) {162162- dev_err(&pdev->dev, "Unable to map internal phy registers\n");163163- return PTR_ERR(dev->phy_regs);159159+ /* This resource is optional */160160+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);161161+ if (res) {162162+ dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);163163+ if (IS_ERR(dev->phy_regs)) {164164+ dev_err(&pdev->dev, "Unable to map internal phy registers\n");165165+ return PTR_ERR(dev->phy_regs);166166+ }164167 }165168166169 ret = of_mdiobus_register(bus, pdev->dev.of_node);
+1-5
drivers/net/mhi_net.c
···321321 /* Start MHI channels */322322 err = mhi_prepare_for_transfer(mhi_dev);323323 if (err)324324- goto out_err;324324+ return err;325325326326 /* Number of transfer descriptors determines size of the queue */327327 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);···331331 return err;332332333333 return 0;334334-335335-out_err:336336- free_netdev(ndev);337337- return err;338334}339335340336static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
···666666{667667 int ret;668668669669+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);670670+ if (ret < 0)671671+ return ret;672672+669673 if (enable) {670674 /* Enable EEE */671675 ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |···677673 DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |678674 mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT;679675 } else {680680- ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);681681- if (ret < 0)682682- return ret;683676 ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |684677 DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |685678 DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |···691690 if (ret < 0)692691 return ret;693692694694- ret |= DW_VR_MII_EEE_TRN_LPI;693693+ if (enable)694694+ ret |= DW_VR_MII_EEE_TRN_LPI;695695+ else696696+ ret &= ~DW_VR_MII_EEE_TRN_LPI;697697+695698 return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret);696699}697700EXPORT_SYMBOL_GPL(xpcs_config_eee);698701699702static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)700703{701701- int ret;704704+ int ret, mdio_ctrl;702705703706 /* For AN for C37 SGMII mode, the settings are :-704704- * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)705705- * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)707707+ * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case708708+ it is already enabled)709709+ * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)710710+ * 3) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)706711 * DW xPCS used with DW EQoS MAC is always MAC side SGMII.707707- * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic712712+ * 4) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic708713 * speed/duplex mode change by HW after SGMII AN complete)714714+ * 5) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 1b (Enable SGMII AN)709715 *710716 * Note: Since it is MAC side SGMII, there is no need to set711717 * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from···720712 * between PHY and Link Partner. There is also no need to721713 * trigger AN restart for MAC-side SGMII.722714 */715715+ mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);716716+ if (mdio_ctrl < 0)717717+ return mdio_ctrl;718718+719719+ if (mdio_ctrl & AN_CL37_EN) {720720+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,721721+ mdio_ctrl & ~AN_CL37_EN);722722+ if (ret < 0)723723+ return ret;724724+ }725725+723726 ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);724727 if (ret < 0)725728 return ret;···755736 else756737 ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;757738758758- return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);739739+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);740740+ if (ret < 0)741741+ return ret;742742+743743+ if (phylink_autoneg_inband(mode))744744+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,745745+ mdio_ctrl | AN_CL37_EN);746746+747747+ return ret;759748}760749761750static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
+110-4
drivers/net/phy/bcm7xxx.c
···2727#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe2828#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf2929#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a3030+#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x03131+#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x13232+#define MII_BCM7XXX_SHD_3_EEE_CAP 0x23033#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x33434+#define MII_BCM7XXX_SHD_3_EEE_LP 0x43535+#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x53136#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x63237#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x44003338#define MII_BCM7XXX_SHD_3_AN_STAT 0xb···221216 return genphy_config_aneg(phydev);222217}223218224224-static int phy_set_clr_bits(struct phy_device *dev, int location,225225- int set_mask, int clr_mask)219219+static int __phy_set_clr_bits(struct phy_device *dev, int location,220220+ int set_mask, int clr_mask)226221{227222 int v, ret;228223229229- v = phy_read(dev, location);224224+ v = __phy_read(dev, location);230225 if (v < 0)231226 return v;232227233228 v &= ~clr_mask;234229 v |= set_mask;235230236236- ret = phy_write(dev, location, v);231231+ ret = __phy_write(dev, location, v);237232 if (ret < 0)238233 return ret;239234240235 return v;236236+}237237+238238+static int phy_set_clr_bits(struct phy_device *dev, int location,239239+ int set_mask, int clr_mask)240240+{241241+ int ret;242242+243243+ mutex_lock(&dev->mdio.bus->mdio_lock);244244+ ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);245245+ mutex_unlock(&dev->mdio.bus->mdio_lock);246246+247247+ return ret;241248}242249243250static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)···413396 return ret;414397415398 return bcm7xxx_28nm_ephy_apd_enable(phydev);399399+}400400+401401+#define MII_BCM7XXX_REG_INVALID 0xff402402+403403+static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)404404+{405405+ switch (regnum) {406406+ case MDIO_CTRL1:407407+ return MII_BCM7XXX_SHD_3_PCS_CTRL;408408+ case MDIO_STAT1:409409+ return MII_BCM7XXX_SHD_3_PCS_STATUS;410410+ case MDIO_PCS_EEE_ABLE:411411+ return MII_BCM7XXX_SHD_3_EEE_CAP;412412+ case MDIO_AN_EEE_ADV:413413+ return MII_BCM7XXX_SHD_3_AN_EEE_ADV;414414+ case MDIO_AN_EEE_LPABLE:415415+ return MII_BCM7XXX_SHD_3_EEE_LP;416416+ case MDIO_PCS_EEE_WK_ERR:417417+ return MII_BCM7XXX_SHD_3_EEE_WK_ERR;418418+ default:419419+ return MII_BCM7XXX_REG_INVALID;420420+ }421421+}422422+423423+static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)424424+{425425+ return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;426426+}427427+428428+static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,429429+ int devnum, u16 regnum)430430+{431431+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);432432+ int ret;433433+434434+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||435435+ shd == MII_BCM7XXX_REG_INVALID)436436+ return -EOPNOTSUPP;437437+438438+ /* set shadow mode 2 */439439+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,440440+ MII_BCM7XXX_SHD_MODE_2, 0);441441+ if (ret < 0)442442+ return ret;443443+444444+ /* Access the desired shadow register address */445445+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);446446+ if (ret < 0)447447+ goto reset_shadow_mode;448448+449449+ ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);450450+451451+reset_shadow_mode:452452+ /* reset shadow mode 2 */453453+ __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,454454+ MII_BCM7XXX_SHD_MODE_2);455455+ return ret;456456+}457457+458458+static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,459459+ int devnum, u16 regnum, u16 val)460460+{461461+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);462462+ int ret;463463+464464+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||465465+ shd == MII_BCM7XXX_REG_INVALID)466466+ return -EOPNOTSUPP;467467+468468+ /* set shadow mode 2 */469469+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,470470+ MII_BCM7XXX_SHD_MODE_2, 0);471471+ if (ret < 0)472472+ return ret;473473+474474+ /* Access the desired shadow register address */475475+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);476476+ if (ret < 0)477477+ goto reset_shadow_mode;478478+479479+ /* Write the desired value in the shadow register */480480+ __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);481481+482482+reset_shadow_mode:483483+ /* reset shadow mode 2 */484484+ return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,485485+ MII_BCM7XXX_SHD_MODE_2);416486}417487418488static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)···699595 .get_stats = bcm7xxx_28nm_get_phy_stats, \700596 .probe = bcm7xxx_28nm_probe, \701597 .remove = bcm7xxx_28nm_remove, \598598+ .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \599599+ .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \702600}703601704602#define BCM7XXX_40NM_EPHY(_oui, _name) \
+11
drivers/net/phy/mdio_bus.c
···525525 NULL == bus->read || NULL == bus->write)526526 return -EINVAL;527527528528+ if (bus->parent && bus->parent->of_node)529529+ bus->parent->of_node->fwnode.flags |=530530+ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;531531+528532 BUG_ON(bus->state != MDIOBUS_ALLOCATED &&529533 bus->state != MDIOBUS_UNREGISTERED);530534···537533 bus->dev.class = &mdio_bus_class;538534 bus->dev.groups = NULL;539535 dev_set_name(&bus->dev, "%s", bus->id);536536+537537+ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release538538+ * the device in mdiobus_free()539539+ *540540+ * State will be updated later in this function in case of success541541+ */542542+ bus->state = MDIOBUS_UNREGISTERED;540543541544 err = device_register(&bus->dev);542545 if (err) {
+11
drivers/net/phy/mdio_device.c
···179179 return 0;180180}181181182182+static void mdio_shutdown(struct device *dev)183183+{184184+ struct mdio_device *mdiodev = to_mdio_device(dev);185185+ struct device_driver *drv = mdiodev->dev.driver;186186+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);187187+188188+ if (mdiodrv->shutdown)189189+ mdiodrv->shutdown(mdiodev);190190+}191191+182192/**183193 * mdio_driver_register - register an mdio_driver with the MDIO layer184194 * @drv: new mdio_driver to register···203193 mdiodrv->driver.bus = &mdio_bus_type;204194 mdiodrv->driver.probe = mdio_probe;205195 mdiodrv->driver.remove = mdio_remove;196196+ mdiodrv->driver.shutdown = mdio_shutdown;206197207198 retval = driver_register(&mdiodrv->driver);208199 if (retval) {
···499499 * the header's copy failed, and they are500500 * sharing a slot, send an error501501 */502502- if (i == 0 && sharedslot)502502+ if (i == 0 && !first_shinfo && sharedslot)503503 xenvif_idx_release(queue, pending_idx,504504 XEN_NETIF_RSP_ERROR);505505 else
···24872487 */24882488 if (ctrl->ctrl.queue_count > 1) {24892489 nvme_stop_queues(&ctrl->ctrl);24902490+ nvme_sync_io_queues(&ctrl->ctrl);24902491 blk_mq_tagset_busy_iter(&ctrl->tag_set,24912492 nvme_fc_terminate_exchange, &ctrl->ctrl);24922493 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);···25112510 * clean up the admin queue. Same thing as above.25122511 */25132512 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);25132513+ blk_sync_queue(ctrl->ctrl.admin_q);25142514 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,25152515 nvme_fc_terminate_exchange, &ctrl->ctrl);25162516 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);···29532951 if (ctrl->ctrl.queue_count == 1)29542952 return 0;2955295329542954+ if (prior_ioq_cnt != nr_io_queues) {29552955+ dev_info(ctrl->ctrl.device,29562956+ "reconnect: revising io queue count from %d to %d\n",29572957+ prior_ioq_cnt, nr_io_queues);29582958+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);29592959+ }29602960+29562961 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);29572962 if (ret)29582963 goto out_free_io_queues;···29672958 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);29682959 if (ret)29692960 goto out_delete_hw_queues;29702970-29712971- if (prior_ioq_cnt != nr_io_queues) {29722972- dev_info(ctrl->ctrl.device,29732973- "reconnect: revising io queue count from %d to %d\n",29742974- prior_ioq_cnt, nr_io_queues);29752975- nvme_wait_freeze(&ctrl->ctrl);29762976- blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);29772977- nvme_unfreeze(&ctrl->ctrl);29782978- }2979296129802962 return 0;29812963
+6
drivers/nvme/host/nvme.h
···138138 * 48 bits.139139 */140140 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),141141+142142+ /*143143+ * The controller requires the command_id value be be limited, so skip144144+ * encoding the generation sequence number.145145+ */146146+ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),141147};142148143149/*
···620620 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);621621 data->ttag = pdu->ttag;622622 data->command_id = nvme_cid(rq);623623- data->data_offset = cpu_to_le32(req->data_sent);623623+ data->data_offset = pdu->r2t_offset;624624 data->data_length = cpu_to_le32(req->pdu_len);625625 return 0;626626}···953953 nvme_tcp_ddgst_update(queue->snd_hash, page,954954 offset, ret);955955956956- /* fully successful last write*/956956+ /*957957+ * update the request iterator except for the last payload send958958+ * in the request where we don't want to modify it as we may959959+ * compete with the RX path completing the request.960960+ */961961+ if (req->data_sent + ret < req->data_len)962962+ nvme_tcp_advance_req(req, ret);963963+964964+ /* fully successful last send in current PDU */957965 if (last && ret == len) {958966 if (queue->data_digest) {959967 nvme_tcp_ddgst_final(queue->snd_hash,···973965 }974966 return 1;975967 }976976- nvme_tcp_advance_req(req, ret);977968 }978969 return -EAGAIN;979970}
+1
drivers/nvmem/Kconfig
···109109110110config NVMEM_NINTENDO_OTP111111 tristate "Nintendo Wii and Wii U OTP Support"112112+ depends on WII || COMPILE_TEST112113 help113114 This is a driver exposing the OTP of a Nintendo Wii or Wii U console.114115
+1-1
drivers/pci/Kconfig
···110110111111config XEN_PCIDEV_FRONTEND112112 tristate "Xen PCI Frontend"113113- depends on X86 && XEN113113+ depends on XEN_PV114114 select PCI_XEN115115 select XEN_XENBUS_FRONTEND116116 default y
+10-3
drivers/pci/controller/pci-hyperv.c
···33013301 return 0;3302330233033303 if (!keep_devs) {33043304- /* Delete any children which might still exist. */33043304+ struct list_head removed;33053305+33063306+ /* Move all present children to the list on stack */33073307+ INIT_LIST_HEAD(&removed);33053308 spin_lock_irqsave(&hbus->device_list_lock, flags);33063306- list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {33093309+ list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)33103310+ list_move_tail(&hpdev->list_entry, &removed);33113311+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);33123312+33133313+ /* Remove all children in the list */33143314+ list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {33073315 list_del(&hpdev->list_entry);33083316 if (hpdev->pci_slot)33093317 pci_destroy_slot(hpdev->pci_slot);···33193311 put_pcichild(hpdev);33203312 put_pcichild(hpdev);33213313 }33223322- spin_unlock_irqrestore(&hbus->device_list_lock, flags);33233314 }3324331533253316 ret = hv_send_resources_released(hdev);
···2306230623072307/**23082308 * devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().23092309- * @dev: device for which which resource was allocated23092309+ * @dev: device for which resource was allocated23102310 * @pctldev: the pinctrl device to unregister.23112311 */23122312void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
+14-5
drivers/pinctrl/pinctrl-amd.c
···445445 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);446446 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);447447 u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);448448+ int err;448449449450 raw_spin_lock_irqsave(&gpio_dev->lock, flags);450451 pin_reg = readl(gpio_dev->base + (d->hwirq)*4);···457456458457 writel(pin_reg, gpio_dev->base + (d->hwirq)*4);459458 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);459459+460460+ if (on)461461+ err = enable_irq_wake(gpio_dev->irq);462462+ else463463+ err = disable_irq_wake(gpio_dev->irq);464464+465465+ if (err)466466+ dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",467467+ on ? "enable" : "disable");460468461469 return 0;462470}···912902static int amd_gpio_probe(struct platform_device *pdev)913903{914904 int ret = 0;915915- int irq_base;916905 struct resource *res;917906 struct amd_gpio *gpio_dev;918907 struct gpio_irq_chip *girq;···934925 if (!gpio_dev->base)935926 return -ENOMEM;936927937937- irq_base = platform_get_irq(pdev, 0);938938- if (irq_base < 0)939939- return irq_base;928928+ gpio_dev->irq = platform_get_irq(pdev, 0);929929+ if (gpio_dev->irq < 0)930930+ return gpio_dev->irq;940931941932#ifdef CONFIG_PM_SLEEP942933 gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,···996987 goto out2;997988 }998989999999- ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,990990+ ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,1000991 IRQF_SHARED, KBUILD_MODNAME, gpio_dev);1001992 if (ret)1002993 goto out2;
···20922092 return false;20932093}2094209420952095+static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,20962096+ unsigned int pin, u32 arg)20972097+{20982098+ struct rockchip_pin_output_deferred *cfg;20992099+21002100+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);21012101+ if (!cfg)21022102+ return -ENOMEM;21032103+21042104+ cfg->pin = pin;21052105+ cfg->arg = arg;21062106+21072107+ list_add_tail(&cfg->head, &bank->deferred_output);21082108+21092109+ return 0;21102110+}21112111+20952112/* set the pin config settings for a specified pin */20962113static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,20972114 unsigned long *configs, unsigned num_configs)···21522135 RK_FUNC_GPIO);21532136 if (rc != RK_FUNC_GPIO)21542137 return -EINVAL;21382138+21392139+ /*21402140+ * Check for gpio driver not being probed yet.21412141+ * The lock makes sure that either gpio-probe has completed21422142+ * or the gpio driver hasn't probed yet.21432143+ */21442144+ mutex_lock(&bank->deferred_lock);21452145+ if (!gpio || !gpio->direction_output) {21462146+ rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);21472147+ mutex_unlock(&bank->deferred_lock);21482148+ if (rc)21492149+ return rc;21502150+21512151+ break;21522152+ }21532153+ mutex_unlock(&bank->deferred_lock);2155215421562155 rc = gpio->direction_output(gpio, pin - bank->pin_base,21572156 arg);···22362203 rc = rockchip_get_mux(bank, pin - bank->pin_base);22372204 if (rc != RK_FUNC_GPIO)22382205 return -EINVAL;22062206+22072207+ if (!gpio || !gpio->get) {22082208+ arg = 0;22092209+ break;22102210+ }2239221122402212 rc = gpio->get(gpio, pin - bank->pin_base);22412213 if (rc < 0)···24882450 pin_bank->name, pin);24892451 pdesc++;24902452 }24532453+24542454+ INIT_LIST_HEAD(&pin_bank->deferred_output);24552455+ mutex_init(&pin_bank->deferred_lock);24912456 }2492245724932458 ret = rockchip_pinctrl_parse_dt(pdev, info);···27522711 if (ret) {27532712 dev_err(&pdev->dev, "failed to register gpio device\n");27542713 return ret;27142714+ }27152715+27162716+ return 0;27172717+}27182718+27192719+static int rockchip_pinctrl_remove(struct platform_device *pdev)27202720+{27212721+ struct rockchip_pinctrl *info = platform_get_drvdata(pdev);27222722+ struct rockchip_pin_bank *bank;27232723+ struct rockchip_pin_output_deferred *cfg;27242724+ int i;27252725+27262726+ of_platform_depopulate(&pdev->dev);27272727+27282728+ for (i = 0; i < info->ctrl->nr_banks; i++) {27292729+ bank = &info->ctrl->pin_banks[i];27302730+27312731+ mutex_lock(&bank->deferred_lock);27322732+ while (!list_empty(&bank->deferred_output)) {27332733+ cfg = list_first_entry(&bank->deferred_output,27342734+ struct rockchip_pin_output_deferred, head);27352735+ list_del(&cfg->head);27362736+ kfree(cfg);27372737+ }27382738+ mutex_unlock(&bank->deferred_lock);27552739 }2756274027572741 return 0;···3241317532423176static struct platform_driver rockchip_pinctrl_driver = {32433177 .probe = rockchip_pinctrl_probe,31783178+ .remove = rockchip_pinctrl_remove,32443179 .driver = {32453180 .name = "rockchip-pinctrl",32463181 .pm = &rockchip_pinctrl_dev_pm_ops,
+10
drivers/pinctrl/pinctrl-rockchip.h
···141141 * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode142142 * @recalced_mask: bit mask to indicate a need to recalulate the mask143143 * @route_mask: bits describing the routing pins of per bank144144+ * @deferred_output: gpio output settings to be done after gpio bank probed145145+ * @deferred_lock: mutex for the deferred_output shared btw gpio and pinctrl144146 */145147struct rockchip_pin_bank {146148 struct device *dev;···171169 u32 toggle_edge_mode;172170 u32 recalced_mask;173171 u32 route_mask;172172+ struct list_head deferred_output;173173+ struct mutex deferred_lock;174174};175175176176/**···245241 unsigned int func;246242 unsigned long *configs;247243 unsigned int nconfigs;244244+};245245+246246+struct rockchip_pin_output_deferred {247247+ struct list_head head;248248+ unsigned int pin;249249+ u32 arg;248250};249251250252/**
···166166167167config DELL_WMI_PRIVACY168168 bool "Dell WMI Hardware Privacy Support"169169- depends on DELL_WMI170170- depends on LEDS_TRIGGER_AUDIO169169+ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO171170 help172171 This option adds integration with the "Dell Hardware Privacy"173172 feature of Dell laptops to the dell-wmi driver.
+1
drivers/platform/x86/gigabyte-wmi.c
···144144 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),145145 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),146146 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),147147+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),147148 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),148149 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),149150 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+22-5
drivers/platform/x86/intel/hid.c
···118118 { }119119};120120121121+/*122122+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE123123+ * reports. Accept such reports only from devices in this list.124124+ */125125+static const struct dmi_system_id dmi_auto_add_switch[] = {126126+ {127127+ .matches = {128128+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),129129+ },130130+ },131131+ {132132+ .matches = {133133+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),134134+ },135135+ },136136+ {} /* Array terminator */137137+};138138+121139struct intel_hid_priv {122140 struct input_dev *input_dev;123141 struct input_dev *array;124142 struct input_dev *switches;125143 bool wakeup_mode;126126- bool dual_accel;144144+ bool auto_add_switch;127145};128146129147#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"···470452 * Some convertible have unreliable VGBS return which could cause incorrect471453 * SW_TABLET_MODE report, in these cases we enable support when receiving472454 * the first event instead of during driver setup.473473- *474474- * See dual_accel_detect.h for more info on the dual_accel check.475455 */476476- if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {456456+ if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {477457 dev_info(&device->dev, "switch event received, enable switches supports\n");478458 err = intel_hid_switches_setup(device);479459 if (err)···612596 return -ENOMEM;613597 dev_set_drvdata(&device->dev, priv);614598615615- priv->dual_accel = dual_accel_detect();599599+ /* See dual_accel_detect.h for more info on the dual_accel check. */600600+ priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();616601617602 err = intel_hid_input_setup(device);618603 if (err) {
+1-2
drivers/platform/x86/intel/punit_ipc.c
···88 * which provide mailbox interface for power management usage.99 */10101111-#include <linux/acpi.h>1211#include <linux/bitops.h>1312#include <linux/delay.h>1413#include <linux/device.h>···318319 .remove = intel_punit_ipc_remove,319320 .driver = {320321 .name = "intel_punit_ipc",321321- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),322322+ .acpi_match_table = punit_ipc_acpi_ids,322323 },323324};324325
+1-1
drivers/platform/x86/lg-laptop.c
···655655 goto out_platform_registered;656656 }657657 product = dmi_get_system_info(DMI_PRODUCT_NAME);658658- if (strlen(product) > 4)658658+ if (product && strlen(product) > 4)659659 switch (product[4]) {660660 case '5':661661 case '6':
···174174 depends on I2C && MTD175175 depends on SERIAL_8250176176 depends on !S390177177+ depends on COMMON_CLK177178 select NET_DEVLINK178179 help179180 This driver adds support for an OpenCompute time card.
+2-7
drivers/ptp/ptp_kvm_x86.c
···1515#include <linux/ptp_clock_kernel.h>1616#include <linux/ptp_kvm.h>17171818-struct pvclock_vsyscall_time_info *hv_clock;1919-2018static phys_addr_t clock_pair_gpa;2119static struct kvm_clock_pairing clock_pair;2220···2628 return -ENODEV;27292830 clock_pair_gpa = slow_virt_to_phys(&clock_pair);2929- hv_clock = pvclock_get_pvti_cpu0_va();3030- if (!hv_clock)3131+ if (!pvclock_get_pvti_cpu0_va())3132 return -ENODEV;32333334 ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,···6164 struct pvclock_vcpu_time_info *src;6265 unsigned int version;6366 long ret;6464- int cpu;65676666- cpu = smp_processor_id();6767- src = &hv_clock[cpu].pvti;6868+ src = this_cpu_pvti();68696970 do {7071 /*
···262262263263 if (strcmp("free", parm) == 0) {264264 rc = blacklist_parse_parameters(buf, free, 0);265265- /* There could be subchannels without proper devices connected.266266- * evaluate all the entries265265+ /*266266+ * Evaluate the subchannels without an online device. This way,267267+ * no path-verification will be triggered on those subchannels268268+ * and it avoids unnecessary delays.267269 */268268- css_schedule_eval_all();270270+ css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);269271 } else if (strcmp("add", parm) == 0)270272 rc = blacklist_parse_parameters(buf, add, 0);271273 else if (strcmp("purge", parm) == 0)
+8-2
drivers/s390/cio/ccwgroup.c
···7777/**7878 * ccwgroup_set_offline() - disable a ccwgroup device7979 * @gdev: target ccwgroup device8080+ * @call_gdrv: Call the registered gdrv set_offline function8081 *8182 * This function attempts to put the ccwgroup device into the offline state.8283 * Returns:8384 * %0 on success and a negative error value on failure.8485 */8585-int ccwgroup_set_offline(struct ccwgroup_device *gdev)8686+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)8687{8788 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);8889 int ret = -EINVAL;···9291 return -EAGAIN;9392 if (gdev->state == CCWGROUP_OFFLINE)9493 goto out;9494+ if (!call_gdrv) {9595+ ret = 0;9696+ goto offline;9797+ }9598 if (gdrv->set_offline)9699 ret = gdrv->set_offline(gdev);97100 if (ret)98101 goto out;99102103103+offline:100104 gdev->state = CCWGROUP_OFFLINE;101105out:102106 atomic_set(&gdev->onoff, 0);···130124 if (value == 1)131125 ret = ccwgroup_set_online(gdev);132126 else if (value == 0)133133- ret = ccwgroup_set_offline(gdev);127127+ ret = ccwgroup_set_offline(gdev, true);134128 else135129 ret = -EINVAL;136130out:
+31-9
drivers/s390/cio/css.c
···788788 return 0;789789}790790791791-void css_schedule_eval_all_unreg(unsigned long delay)791791+static int __unset_online(struct device *dev, void *data)792792+{793793+ struct idset *set = data;794794+ struct subchannel *sch = to_subchannel(dev);795795+ struct ccw_device *cdev = sch_get_cdev(sch);796796+797797+ if (cdev && cdev->online)798798+ idset_sch_del(set, sch->schid);799799+800800+ return 0;801801+}802802+803803+void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)792804{793805 unsigned long flags;794794- struct idset *unreg_set;806806+ struct idset *set;795807796808 /* Find unregistered subchannels. */797797- unreg_set = idset_sch_new();798798- if (!unreg_set) {809809+ set = idset_sch_new();810810+ if (!set) {799811 /* Fallback. */800812 css_schedule_eval_all();801813 return;802814 }803803- idset_fill(unreg_set);804804- bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);815815+ idset_fill(set);816816+ switch (cond) {817817+ case CSS_EVAL_UNREG:818818+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);819819+ break;820820+ case CSS_EVAL_NOT_ONLINE:821821+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);822822+ break;823823+ default:824824+ break;825825+ }826826+805827 /* Apply to slow_subchannel_set. */806828 spin_lock_irqsave(&slow_subchannel_lock, flags);807807- idset_add_set(slow_subchannel_set, unreg_set);829829+ idset_add_set(slow_subchannel_set, set);808830 atomic_set(&css_eval_scheduled, 1);809831 queue_delayed_work(cio_work_q, &slow_path_work, delay);810832 spin_unlock_irqrestore(&slow_subchannel_lock, flags);811811- idset_free(unreg_set);833833+ idset_free(set);812834}813835814836void css_wait_for_slow_path(void)···842820void css_schedule_reprobe(void)843821{844822 /* Schedule with a delay to allow merging of subsequent calls. */845845- css_schedule_eval_all_unreg(1 * HZ);823823+ css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);846824}847825EXPORT_SYMBOL_GPL(css_schedule_reprobe);848826
+9-1
drivers/s390/cio/css.h
···3434#define SNID_STATE3_MULTI_PATH 13535#define SNID_STATE3_SINGLE_PATH 036363737+/*3838+ * Conditions used to specify which subchannels need evaluation3939+ */4040+enum css_eval_cond {4141+ CSS_EVAL_UNREG, /* unregistered subchannels */4242+ CSS_EVAL_NOT_ONLINE /* sch without an online-device */4343+};4444+3745struct path_state {3846 __u8 state1 : 2; /* path state value 1 */3947 __u8 state2 : 2; /* path state value 2 */···144136/* Helper functions to build lists for the slow path. */145137void css_schedule_eval(struct subchannel_id schid);146138void css_schedule_eval_all(void);147147-void css_schedule_eval_all_unreg(unsigned long delay);139139+void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);148140int css_complete_work(void);149141150142int sch_is_pseudo_sch(struct subchannel *);
···7070static int qeth_qdio_establish(struct qeth_card *);7171static void qeth_free_qdio_queues(struct qeth_card *card);72727373-static void qeth_close_dev_handler(struct work_struct *work)7474-{7575- struct qeth_card *card;7676-7777- card = container_of(work, struct qeth_card, close_dev_work);7878- QETH_CARD_TEXT(card, 2, "cldevhdl");7979- ccwgroup_set_offline(card->gdev);8080-}8181-8273static const char *qeth_get_cardname(struct qeth_card *card)8374{8475 if (IS_VM_NIC(card)) {···192201 list_for_each_entry_safe(pool_entry, tmp,193202 &card->qdio.in_buf_pool.entry_list, list)194203 list_del(&pool_entry->list);204204+205205+ if (!queue)206206+ return;195207196208 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)197209 queue->bufs[i].pool_entry = NULL;···786792 case IPA_CMD_STOPLAN:787793 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {788794 dev_err(&card->gdev->dev,789789- "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",795795+ "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",790796 netdev_name(card->dev));791791- schedule_work(&card->close_dev_work);797797+ /* Set offline, then probably fail to set online: */798798+ qeth_schedule_recovery(card);792799 } else {800800+ /* stay online for subsequent STARTLAN */793801 dev_warn(&card->gdev->dev,794802 "The link for interface %s on CHPID 0x%X failed\n",795803 netdev_name(card->dev), card->info.chpid);···15331537 INIT_LIST_HEAD(&card->ipato.entries);15341538 qeth_init_qdio_info(card);15351539 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);15361536- INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);15371540 hash_init(card->rx_mode_addrs);15381541 hash_init(card->local_addrs4);15391542 hash_init(card->local_addrs6);···55145519 dev_info(&card->gdev->dev,55155520 "Device successfully recovered!\n");55165521 } else {55175517- ccwgroup_set_offline(card->gdev);55225522+ qeth_set_offline(card, disc, true);55235523+ ccwgroup_set_offline(card->gdev, false);55185524 dev_warn(&card->gdev->dev,55195525 "The qeth device driver failed to recover an error on the device\n");55205526 }
-1
drivers/s390/net/qeth_l2_main.c
···23072307 if (gdev->state == CCWGROUP_ONLINE)23082308 qeth_set_offline(card, card->discipline, false);2309230923102310- cancel_work_sync(&card->close_dev_work);23112310 if (card->dev->reg_state == NETREG_REGISTERED) {23122311 priv = netdev_priv(card->dev);23132312 if (priv->brport_features & BR_LEARNING_SYNC) {
-1
drivers/s390/net/qeth_l3_main.c
···19691969 if (cgdev->state == CCWGROUP_ONLINE)19701970 qeth_set_offline(card, card->discipline, false);1971197119721972- cancel_work_sync(&card->close_dev_work);19731972 if (card->dev->reg_state == NETREG_REGISTERED)19741973 unregister_netdev(card->dev);19751974
-11
drivers/scsi/arm/Kconfig
···1010 This enables support for the Acorn SCSI card (aka30). If you have an1111 Acorn system with one of these, say Y. If unsure, say N.12121313-config SCSI_ACORNSCSI_TAGGED_QUEUE1414- bool "Support SCSI 2 Tagged queueing"1515- depends on SCSI_ACORNSCSI_31616- help1717- Say Y here to enable tagged queuing support on the Acorn SCSI card.1818-1919- This is a feature of SCSI-2 which improves performance: the host2020- adapter can send several SCSI commands to a device's queue even if2121- previous commands haven't finished yet. Some SCSI devices don't2222- implement this properly, so the safe answer is N.2323-2413config SCSI_ACORNSCSI_SYNC2514 bool "Support SCSI 2 Synchronous Transfers"2615 depends on SCSI_ACORNSCSI_3
+22-81
drivers/scsi/arm/acornscsi.c
···5252 * You can tell if you have a device that supports tagged queueing my5353 * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported5454 * as '2 TAG'.5555- *5656- * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config5757- * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug,5858- * comment out the undef.5955 */6060-#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE5656+6157/*6258 * SCSI-II Synchronous transfer support.6359 *···167171 unsigned int result);168172static int acornscsi_reconnect_finish(AS_Host *host);169173static void acornscsi_dma_cleanup(AS_Host *host);170170-static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);174174+static void acornscsi_abortcmd(AS_Host *host);171175172176/* ====================================================================================173177 * Miscellaneous···737741#endif738742739743 if (from_queue) {740740-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE741741- /*742742- * tagged queueing - allocate a new tag to this command743743- */744744- if (SCpnt->device->simple_tags) {745745- SCpnt->device->current_tag += 1;746746- if (SCpnt->device->current_tag == 0)747747- SCpnt->device->current_tag = 1;748748- SCpnt->tag = SCpnt->device->current_tag;749749- } else750750-#endif751744 set_bit(SCpnt->device->id * 8 +752745 (u8)(SCpnt->device->lun & 0x07), host->busyluns);753746···11771192 * the device recognises the attention.11781193 */11791194 if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {11801180- acornscsi_abortcmd(host, host->SCpnt->tag);11951195+ acornscsi_abortcmd(host);1181119611821197 dmac_write(host, DMAC_TXCNTLO, 0);11831198 dmac_write(host, DMAC_TXCNTHI, 0);···15451560 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);1546156115471562 switch (host->scsi.last_message) {15481548-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE15491549- case HEAD_OF_QUEUE_TAG:15501550- case ORDERED_QUEUE_TAG:15511551- case SIMPLE_QUEUE_TAG:15521552- /*15531553- * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)15541554- * If a target does not implement tagged queuing and a queue tag15551555- * message is received, it shall respond with a MESSAGE REJECT15561556- * message and accept the I/O process as if it were untagged.15571557- */15581558- printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",15591559- host->host->host_no, acornscsi_target(host));15601560- host->SCpnt->device->simple_tags = 0;15611561- set_bit(host->SCpnt->device->id * 8 +15621562- (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);15631563- break;15641564-#endif15651563 case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):15661564 /*15671565 * Target can't handle synchronous transfers···16551687#if 016561688 /* does the device need the current command aborted */16571689 if (cmd_aborted) {16581658- acornscsi_abortcmd(host->SCpnt->tag);16901690+ acornscsi_abortcmd(host);16591691 return;16601692 }16611693#endif1662169416631663-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE16641664- if (host->SCpnt->tag) {16651665- unsigned int tag_type;16661666-16671667- if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||16681668- host->SCpnt->cmnd[0] == TEST_UNIT_READY ||16691669- host->SCpnt->cmnd[0] == INQUIRY)16701670- tag_type = HEAD_OF_QUEUE_TAG;16711671- else16721672- tag_type = SIMPLE_QUEUE_TAG;16731673- msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);16741674- }16751675-#endif1676169516771696#ifdef CONFIG_SCSI_ACORNSCSI_SYNC16781697 if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {···17531798 "to reconnect with\n",17541799 host->host->host_no, '0' + target);17551800 acornscsi_dumplog(host, target);17561756- acornscsi_abortcmd(host, 0);18011801+ acornscsi_abortcmd(host);17571802 if (host->SCpnt) {17581803 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);17591804 host->SCpnt = NULL;···17761821 host->scsi.disconnectable = 0;17771822 if (host->SCpnt->device->id == host->scsi.reconnected.target &&17781823 host->SCpnt->device->lun == host->scsi.reconnected.lun &&17791779- host->SCpnt->tag == host->scsi.reconnected.tag) {18241824+ scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) {17801825#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))17811826 DBG(host->SCpnt, printk("scsi%d.%c: reconnected",17821827 host->host->host_no, acornscsi_target(host)));···18031848 }1804184918051850 if (!host->SCpnt)18061806- acornscsi_abortcmd(host, host->scsi.reconnected.tag);18511851+ acornscsi_abortcmd(host);18071852 else {18081853 /*18091854 * Restore data pointer from SAVED pointers.···18441889 * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)18451890 * Purpose : abort a currently executing command18461891 * Params : host - host with connected command to abort18471847- * tag - tag to abort18481892 */18491893static18501850-void acornscsi_abortcmd(AS_Host *host, unsigned char tag)18941894+void acornscsi_abortcmd(AS_Host *host)18511895{18521896 host->scsi.phase = PHASE_ABORTED;18531897 sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);1854189818551899 msgqueue_flush(&host->scsi.msgs);18561856-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE18571857- if (tag)18581858- msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);18591859- else18601860-#endif18611861- msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);19001900+ msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);18621901}1863190218641903/* ==========================================================================================···19421993 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",19431994 host->host->host_no, acornscsi_target(host), ssr);19441995 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);19451945- acornscsi_abortcmd(host, host->SCpnt->tag);19961996+ acornscsi_abortcmd(host);19461997 }19471998 return INTR_PROCESSING;19481999···19782029 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",19792030 host->host->host_no, acornscsi_target(host), ssr);19802031 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);19811981- acornscsi_abortcmd(host, host->SCpnt->tag);20322032+ acornscsi_abortcmd(host);19822033 }19832034 return INTR_PROCESSING;19842035···20242075 case 0x18: /* -> PHASE_DATAOUT */20252076 /* COMMAND -> DATA OUT */20262077 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)20272027- acornscsi_abortcmd(host, host->SCpnt->tag);20782078+ acornscsi_abortcmd(host);20282079 acornscsi_dma_setup(host, DMA_OUT);20292080 if (!acornscsi_starttransfer(host))20302030- acornscsi_abortcmd(host, host->SCpnt->tag);20812081+ acornscsi_abortcmd(host);20312082 host->scsi.phase = PHASE_DATAOUT;20322083 return INTR_IDLE;2033208420342085 case 0x19: /* -> PHASE_DATAIN */20352086 /* COMMAND -> DATA IN */20362087 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)20372037- acornscsi_abortcmd(host, host->SCpnt->tag);20882088+ acornscsi_abortcmd(host);20382089 acornscsi_dma_setup(host, DMA_IN);20392090 if (!acornscsi_starttransfer(host))20402040- acornscsi_abortcmd(host, host->SCpnt->tag);20912091+ acornscsi_abortcmd(host);20412092 host->scsi.phase = PHASE_DATAIN;20422093 return INTR_IDLE;20432094···21052156 /* MESSAGE IN -> DATA OUT */21062157 acornscsi_dma_setup(host, DMA_OUT);21072158 if (!acornscsi_starttransfer(host))21082108- acornscsi_abortcmd(host, host->SCpnt->tag);21592159+ acornscsi_abortcmd(host);21092160 host->scsi.phase = PHASE_DATAOUT;21102161 return INTR_IDLE;21112162···21142165 /* MESSAGE IN -> DATA IN */21152166 acornscsi_dma_setup(host, DMA_IN);21162167 if (!acornscsi_starttransfer(host))21172117- acornscsi_abortcmd(host, host->SCpnt->tag);21682168+ acornscsi_abortcmd(host);21182169 host->scsi.phase = PHASE_DATAIN;21192170 return INTR_IDLE;21202171···21552206 switch (ssr) {21562207 case 0x19: /* -> PHASE_DATAIN */21572208 case 0x89: /* -> PHASE_DATAIN */21582158- acornscsi_abortcmd(host, host->SCpnt->tag);22092209+ acornscsi_abortcmd(host);21592210 return INTR_IDLE;2160221121612212 case 0x1b: /* -> PHASE_STATUSIN */···22042255 switch (ssr) {22052256 case 0x18: /* -> PHASE_DATAOUT */22062257 case 0x88: /* -> PHASE_DATAOUT */22072207- acornscsi_abortcmd(host, host->SCpnt->tag);22582258+ acornscsi_abortcmd(host);22082259 return INTR_IDLE;2209226022102261 case 0x1b: /* -> PHASE_STATUSIN */···24312482 SCpnt->scsi_done = done;24322483 SCpnt->host_scribble = NULL;24332484 SCpnt->result = 0;24342434- SCpnt->tag = 0;24352485 SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);24362486 SCpnt->SCp.sent_command = 0;24372487 SCpnt->SCp.scsi_xferred = 0;···25292581 break;2530258225312583 default:25322532- acornscsi_abortcmd(host, host->SCpnt->tag);25842584+ acornscsi_abortcmd(host);25332585 res = res_snooze;25342586 }25352587 local_irq_restore(flags);···26952747#ifdef CONFIG_SCSI_ACORNSCSI_SYNC26962748 " SYNC"26972749#endif26982698-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE26992699- " TAG"27002700-#endif27012750#if (DEBUG & DEBUG_NO_WRITE)27022751 " NOWRITE (" __stringify(NO_WRITE) ")"27032752#endif···27142769 seq_printf(m, "AcornSCSI driver v%d.%d.%d"27152770#ifdef CONFIG_SCSI_ACORNSCSI_SYNC27162771 " SYNC"27172717-#endif27182718-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE27192719- " TAG"27202772#endif27212773#if (DEBUG & DEBUG_NO_WRITE)27222774 " NOWRITE (" __stringify(NO_WRITE) ")"···27692827 seq_printf(m, "Device/Lun TaggedQ Sync\n");27702828 seq_printf(m, " %d/%llu ", scd->id, scd->lun);27712829 if (scd->tagged_supported)27722772- seq_printf(m, "%3sabled(%3d) ",27732773- scd->simple_tags ? "en" : "dis",27742774- scd->current_tag);28302830+ seq_printf(m, "%3sabled ",28312831+ scd->simple_tags ? "en" : "dis");27752832 else27762833 seq_printf(m, "unsupported ");27772834
+8-23
drivers/scsi/arm/fas216.c
···7777 * I was thinking that this was a good chip until I found this restriction ;(7878 */7979#define SCSI2_SYNC8080-#undef SCSI2_TAG81808281#undef DEBUG_CONNECT8382#undef DEBUG_MESSAGES···989990 info->scsi.disconnectable = 0;990991 if (info->SCpnt->device->id == target &&991992 info->SCpnt->device->lun == lun &&992992- info->SCpnt->tag == tag) {993993+ scsi_cmd_to_rq(info->SCpnt)->tag == tag) {993994 fas216_log(info, LOG_CONNECT, "reconnected previously executing command");994995 } else {995996 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);···17901791 /*17911792 * add tag message if required17921793 */17931793- if (SCpnt->tag)17941794- msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag);17941794+ if (SCpnt->device->simple_tags)17951795+ msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG,17961796+ scsi_cmd_to_rq(SCpnt)->tag);1795179717961798 do {17971799#ifdef SCSI2_SYNC···1815181518161816static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)18171817{18181818-#ifdef SCSI2_TAG18191819- /*18201820- * tagged queuing - allocate a new tag to this command18211821- */18221822- if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&18231823- SCpnt->cmnd[0] != INQUIRY) {18241824- SCpnt->device->current_tag += 1;18251825- if (SCpnt->device->current_tag == 0)18261826- SCpnt->device->current_tag = 1;18271827- SCpnt->tag = SCpnt->device->current_tag;18281828- } else18291829-#endif18301830- set_bit(SCpnt->device->id * 8 +18311831- (u8)(SCpnt->device->lun & 0x7), info->busyluns);18181818+ set_bit(SCpnt->device->id * 8 +18191819+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);1832182018331821 info->stats.removes += 1;18341822 switch (SCpnt->cmnd[0]) {···21052117 init_SCp(SCpnt);21062118 SCpnt->SCp.Message = 0;21072119 SCpnt->SCp.Status = 0;21082108- SCpnt->tag = 0;21092120 SCpnt->host_scribble = (void *)fas216_rq_sns_done;2110212121112122 /*···22102223 init_SCp(SCpnt);2211222422122225 info->stats.queues += 1;22132213- SCpnt->tag = 0;2214222622152227 spin_lock(&info->host_lock);22162228···29893003 dev = &info->device[scd->id];29903004 seq_printf(m, " %d/%llu ", scd->id, scd->lun);29913005 if (scd->tagged_supported)29922992- seq_printf(m, "%3sabled(%3d) ",29932993- scd->simple_tags ? "en" : "dis",29942994- scd->current_tag);30063006+ seq_printf(m, "%3sabled ",30073007+ scd->simple_tags ? "en" : "dis");29953008 else29963009 seq_puts(m, "unsupported ");29973010
···928928 break;929929930930 case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {931931- enum efc_nport_topology topology =932932- (enum efc_nport_topology)arg;931931+ enum efc_nport_topology *topology = arg;933932934933 WARN_ON(node->nport->domain->attached);935934936935 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);937936938937 node_printf(node, "topology notification, topology=%d\n",939939- topology);938938+ *topology);940939941940 /* At the time the PLOGI was received, the topology was unknown,942941 * so we didn't know which node would perform the domain attach:943942 * 1. The node from which the PLOGI was sent (p2p) or944943 * 2. The node to which the FLOGI was sent (fabric).945944 */946946- if (topology == EFC_NPORT_TOPO_P2P) {945945+ if (*topology == EFC_NPORT_TOPO_P2P) {947946 /* if this is p2p, need to attach to the domain using948947 * the d_id from the PLOGI received949948 */
···40154015 be32_to_cpu(pcgd->desc_tag),40164016 be32_to_cpu(pcgd->desc_len),40174017 be32_to_cpu(pcgd->xmt_signal_capability),40184018- be32_to_cpu(pcgd->xmt_signal_frequency.count),40194019- be32_to_cpu(pcgd->xmt_signal_frequency.units),40184018+ be16_to_cpu(pcgd->xmt_signal_frequency.count),40194019+ be16_to_cpu(pcgd->xmt_signal_frequency.units),40204020 be32_to_cpu(pcgd->rcv_signal_capability),40214021- be32_to_cpu(pcgd->rcv_signal_frequency.count),40224022- be32_to_cpu(pcgd->rcv_signal_frequency.units));40214021+ be16_to_cpu(pcgd->rcv_signal_frequency.count),40224022+ be16_to_cpu(pcgd->rcv_signal_frequency.units));4023402340244024 /* Compare driver and Fport capabilities and choose40254025 * least common.···93879387 /* Extract the next WWPN from the payload */93889388 wwn = *wwnlist++;93899389 wwpn = be64_to_cpu(wwn);93909390- len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ,93909390+ len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,93919391 " %016llx", wwpn);9392939293939393 /* Log a message if we are on the last WWPN
···19161916 raid = MR_LdRaidGet(ld, local_map_ptr);1917191719181918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)19191919- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);19191919+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);1920192019211921 mr_device_priv_data->is_tm_capable =19221922 raid->capability.tmCapable;···8033803380348034 if (instance->adapter_type != MFI_SERIES) {80358035 megasas_release_fusion(instance);80368036- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +80368036+ pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +80378037 (sizeof(struct MR_PD_CFG_SEQ) *80388038 (MAX_PHYSICAL_DEVICES - 1));80398039 for (i = 0; i < 2 ; i++) {···8773877387748774 if (event_type & SCAN_VD_CHANNEL) {87758775 if (!instance->requestorId ||87768776- (instance->requestorId &&87778777- megasas_get_ld_vf_affiliation(instance, 0))) {87768776+ megasas_get_ld_vf_affiliation(instance, 0)) {87788777 dcmd_ret = megasas_ld_list_query(instance,87798778 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);87808779 if (dcmd_ret != DCMD_SUCCESS)
+3-1
drivers/scsi/mpt3sas/mpt3sas_base.c
···15821582 * wait for current poll to complete.15831583 */15841584 for (qid = 0; qid < iopoll_q_count; qid++) {15851585- while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))15851585+ while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {15861586+ cpu_relax();15861587 udelay(500);15881588+ }15871589 }15881590}15891591
···71697169 return 0;71707170 break;71717171 case QLA2XXX_INI_MODE_DUAL:71727172- if (!qla_dual_mode_enabled(vha))71727172+ if (!qla_dual_mode_enabled(vha) &&71737173+ !qla_ini_mode_enabled(vha))71737174 return 0;71747175 break;71757176 case QLA2XXX_INI_MODE_ENABLED:
···154154155155 /*156156 * Report zone buffer size should be at most 64B times the number of157157- * zones requested plus the 64B reply header, but should be at least158158- * SECTOR_SIZE for ATA devices.157157+ * zones requested plus the 64B reply header, but should be aligned158158+ * to SECTOR_SIZE for ATA devices.159159 * Make sure that this size does not exceed the hardware capabilities.160160 * Furthermore, since the report zone command cannot be split, make161161 * sure that the allocated buffer can always be mapped by limiting the···174174 *buflen = bufsize;175175 return buf;176176 }177177- bufsize >>= 1;177177+ bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);178178 }179179180180 return NULL;···280280{281281 struct scsi_disk *sdkp;282282 unsigned long flags;283283- unsigned int zno;283283+ sector_t zno;284284 int ret;285285286286 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
+19-5
drivers/scsi/ses.c
···8787 08888 };8989 unsigned char recv_page_code;9090+ unsigned int retries = SES_RETRIES;9191+ struct scsi_sense_hdr sshdr;90929191- ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,9292- NULL, SES_TIMEOUT, SES_RETRIES, NULL);9393+ do {9494+ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,9595+ &sshdr, SES_TIMEOUT, 1, NULL);9696+ } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&9797+ (sshdr.sense_key == NOT_READY ||9898+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));9999+93100 if (unlikely(ret))94101 return ret;95102···118111static int ses_send_diag(struct scsi_device *sdev, int page_code,119112 void *buf, int bufflen)120113{121121- u32 result;114114+ int result;122115123116 unsigned char cmd[] = {124117 SEND_DIAGNOSTIC,···128121 bufflen & 0xff,129122 0130123 };124124+ struct scsi_sense_hdr sshdr;125125+ unsigned int retries = SES_RETRIES;131126132132- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,133133- NULL, SES_TIMEOUT, SES_RETRIES, NULL);127127+ do {128128+ result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,129129+ &sshdr, SES_TIMEOUT, 1, NULL);130130+ } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&131131+ (sshdr.sense_key == NOT_READY ||132132+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));133133+134134 if (result)135135 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",136136 result);
+1-1
drivers/scsi/sr_ioctl.c
···523523 return rc;524524 cd->readcd_known = 0;525525 sr_printk(KERN_INFO, cd,526526- "CDROM does'nt support READ CD (0xbe) command\n");526526+ "CDROM doesn't support READ CD (0xbe) command\n");527527 /* fall & retry the other way */528528 }529529 /* ... if this fails, we switch the blocksize using MODE SELECT */
···128128 return err;129129}130130131131+static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)132132+{133133+ struct ufs_pa_layer_attr pwr_info = hba->pwr_info;134134+ int ret;135135+136136+ pwr_info.lane_rx = lanes;137137+ pwr_info.lane_tx = lanes;138138+ ret = ufshcd_config_pwr_mode(hba, &pwr_info);139139+ if (ret)140140+ dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",141141+ __func__, lanes, ret);142142+ return ret;143143+}144144+145145+static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,146146+ enum ufs_notify_change_status status,147147+ struct ufs_pa_layer_attr *dev_max_params,148148+ struct ufs_pa_layer_attr *dev_req_params)149149+{150150+ int err = 0;151151+152152+ switch (status) {153153+ case PRE_CHANGE:154154+ if (ufshcd_is_hs_mode(dev_max_params) &&155155+ (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))156156+ ufs_intel_set_lanes(hba, 2);157157+ memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));158158+ break;159159+ case POST_CHANGE:160160+ if (ufshcd_is_hs_mode(dev_req_params)) {161161+ u32 peer_granularity;162162+163163+ usleep_range(1000, 1250);164164+ err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),165165+ &peer_granularity);166166+ }167167+ break;168168+ default:169169+ break;170170+ }171171+172172+ return err;173173+}174174+175175+static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)176176+{177177+ u32 granularity, peer_granularity;178178+ u32 pa_tactivate, peer_pa_tactivate;179179+ int ret;180180+181181+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);182182+ if (ret)183183+ goto out;184184+185185+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);186186+ if (ret)187187+ goto out;188188+189189+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);190190+ if (ret)191191+ goto out;192192+193193+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);194194+ if (ret)195195+ goto out;196196+197197+ if (granularity == peer_granularity) {198198+ u32 new_peer_pa_tactivate = pa_tactivate + 2;199199+200200+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);201201+ }202202+out:203203+ return ret;204204+}205205+131206#define INTEL_ACTIVELTR 0x804132207#define INTEL_IDLELTR 0x808133208···426351 struct ufs_host *ufs_host;427352 int err;428353354354+ hba->nop_out_timeout = 200;429355 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;430356 hba->caps |= UFSHCD_CAP_CRYPTO;431357 err = ufs_intel_common_init(hba);···457381 .exit = ufs_intel_common_exit,458382 .hce_enable_notify = ufs_intel_hce_enable_notify,459383 .link_startup_notify = ufs_intel_link_startup_notify,384384+ .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,385385+ .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,460386 .resume = ufs_intel_resume,461387 .device_reset = ufs_intel_device_reset,462388};
+58-61
drivers/scsi/ufs/ufshcd.c
···1717#include <linux/blk-pm.h>1818#include <linux/blkdev.h>1919#include <scsi/scsi_driver.h>2020-#include <scsi/scsi_transport.h>2121-#include "../scsi_transport_api.h"2220#include "ufshcd.h"2321#include "ufs_quirks.h"2422#include "unipro.h"···235237static irqreturn_t ufshcd_intr(int irq, void *__hba);236238static int ufshcd_change_power_mode(struct ufs_hba *hba,237239 struct ufs_pa_layer_attr *pwr_mode);240240+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);238241static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);239242static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);240243static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,···318319static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,319320 enum ufs_trace_str_t str_t)320321{321321- int off = (int)tag - hba->nutrs;322322- struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];322322+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];323323324324 if (!trace_ufshcd_upiu_enabled())325325 return;···27572759out:27582760 up_read(&hba->clk_scaling_lock);2759276127602760- if (ufs_trigger_eh())27612761- scsi_schedule_eh(hba->host);27622762+ if (ufs_trigger_eh()) {27632763+ unsigned long flags;27642764+27652765+ spin_lock_irqsave(hba->host->host_lock, flags);27662766+ ufshcd_schedule_eh_work(hba);27672767+ spin_unlock_irqrestore(hba->host->host_lock, flags);27682768+ }2762276927632770 return err;27642771}···39223919}39233920EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);3924392139253925-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)39263926-{39273927- lockdep_assert_held(hba->host->host_lock);39283928-39293929- return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||39303930- (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));39313931-}39323932-39333933-static void ufshcd_schedule_eh(struct ufs_hba *hba)39343934-{39353935- bool schedule_eh = false;39363936- unsigned long flags;39373937-39383938- spin_lock_irqsave(hba->host->host_lock, flags);39393939- /* handle fatal errors only when link is not in error state */39403940- if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {39413941- if (hba->force_reset || ufshcd_is_link_broken(hba) ||39423942- ufshcd_is_saved_err_fatal(hba))39433943- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;39443944- else39453945- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;39463946- schedule_eh = true;39473947- }39483948- spin_unlock_irqrestore(hba->host->host_lock, flags);39493949-39503950- if (schedule_eh)39513951- scsi_schedule_eh(hba->host);39523952-}39533953-39543922/**39553923 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power39563924 * state) and waits for it to take effect.···39423968{39433969 DECLARE_COMPLETION_ONSTACK(uic_async_done);39443970 unsigned long flags;39453945- bool schedule_eh = false;39463971 u8 status;39473972 int ret;39483973 bool reenable_intr = false;···40114038 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);40124039 if (ret) {40134040 ufshcd_set_link_broken(hba);40144014- schedule_eh = true;40414041+ ufshcd_schedule_eh_work(hba);40154042 }40164016-40174043out_unlock:40184044 spin_unlock_irqrestore(hba->host->host_lock, flags);40194019-40204020- if (schedule_eh)40214021- ufshcd_schedule_eh(hba);40224045 mutex_unlock(&hba->uic_cmd_mutex);4023404640244047 return ret;···47454776 mutex_lock(&hba->dev_cmd.lock);47464777 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {47474778 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,47484748- NOP_OUT_TIMEOUT);47794779+ hba->nop_out_timeout);4749478047504781 if (!err || err == -ETIMEDOUT)47514782 break;···58805911 return err_handling;58815912}5882591359145914+/* host lock must be held before calling this func */59155915+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)59165916+{59175917+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||59185918+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));59195919+}59205920+59215921+/* host lock must be held before calling this func */59225922+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)59235923+{59245924+ /* handle fatal errors only when link is not in error state */59255925+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {59265926+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||59275927+ ufshcd_is_saved_err_fatal(hba))59285928+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;59295929+ else59305930+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;59315931+ queue_work(hba->eh_wq, &hba->eh_work);59325932+ }59335933+}59345934+58835935static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)58845936{58855937 down_write(&hba->clk_scaling_lock);···6034604460356045/**60366046 * ufshcd_err_handler - handle UFS errors that require s/w attention60376037- * @host: SCSI host pointer60476047+ * @work: pointer to work structure60386048 */60396039-static void ufshcd_err_handler(struct Scsi_Host *host)60496049+static void ufshcd_err_handler(struct work_struct *work)60406050{60416041- struct ufs_hba *hba = shost_priv(host);60516051+ struct ufs_hba *hba;60426052 unsigned long flags;60436053 bool err_xfer = false;60446054 bool err_tm = false;···60466056 int tag;60476057 bool needs_reset = false, needs_restore = false;6048605860596059+ hba = container_of(work, struct ufs_hba, eh_work);60606060+60496061 down(&hba->host_sem);60506062 spin_lock_irqsave(hba->host->host_lock, flags);60516051- hba->host->host_eh_scheduled = 0;60526063 if (ufshcd_err_handling_should_stop(hba)) {60536064 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)60546065 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;···63626371 "host_regs: ");63636372 ufshcd_print_pwr_info(hba);63646373 }63746374+ ufshcd_schedule_eh_work(hba);63656375 retval |= IRQ_HANDLED;63666376 }63676377 /*···63746382 hba->errors = 0;63756383 hba->uic_error = 0;63766384 spin_unlock(hba->host->host_lock);63776377-63786378- if (queue_eh_work)63796379- ufshcd_schedule_eh(hba);63806380-63816385 return retval;63826386}63836387···68646876 err = ufshcd_clear_cmd(hba, pos);68656877 if (err)68666878 break;68676867- __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);68796879+ __ufshcd_transfer_req_compl(hba, 1U << pos, false);68686880 }68696881 }68706882···70367048 * will be to send LU reset which, again, is a spec violation.70377049 * To avoid these unnecessary/illegal steps, first we clean up70387050 * the lrb taken by this cmd and re-set it in outstanding_reqs,70397039- * then queue the error handler and bail.70517051+ * then queue the eh_work and bail.70407052 */70417053 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {70427054 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);7043705570447056 spin_lock_irqsave(host->host_lock, flags);70457057 hba->force_reset = true;70587058+ ufshcd_schedule_eh_work(hba);70467059 spin_unlock_irqrestore(host->host_lock, flags);70477047-70487048- ufshcd_schedule_eh(hba);70497049-70507060 goto release;70517061 }70527062···7177719171787192 spin_lock_irqsave(hba->host->host_lock, flags);71797193 hba->force_reset = true;71947194+ ufshcd_schedule_eh_work(hba);71807195 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);71817196 spin_unlock_irqrestore(hba->host->host_lock, flags);7182719771837183- ufshcd_err_handler(hba->host);71987198+ flush_work(&hba->eh_work);7184719971857200 spin_lock_irqsave(hba->host->host_lock, flags);71867201 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)···85918604 if (hba->is_powered) {85928605 ufshcd_exit_clk_scaling(hba);85938606 ufshcd_exit_clk_gating(hba);86078607+ if (hba->eh_wq)86088608+ destroy_workqueue(hba->eh_wq);85948609 ufs_debugfs_hba_exit(hba);85958610 ufshcd_variant_hba_exit(hba);85968611 ufshcd_setup_vreg(hba, false);···94379448 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));94389449}9439945094409440-static struct scsi_transport_template ufshcd_transport_template = {94419441- .eh_strategy_handler = ufshcd_err_handler,94429442-};94439443-94449451/**94459452 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)94469453 * @dev: pointer to device handle···94639478 err = -ENOMEM;94649479 goto out_error;94659480 }94669466- host->transportt = &ufshcd_transport_template;94679481 hba = shost_priv(host);94689482 hba->host = host;94699483 hba->dev = dev;94709484 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;94859485+ hba->nop_out_timeout = NOP_OUT_TIMEOUT;94719486 INIT_LIST_HEAD(&hba->clk_list_head);94729487 spin_lock_init(&hba->outstanding_lock);94739488···95029517 int err;95039518 struct Scsi_Host *host = hba->host;95049519 struct device *dev = hba->dev;95209520+ char eh_wq_name[sizeof("ufs_eh_wq_00")];9505952195069522 if (!mmio_base) {95079523 dev_err(hba->dev,···9556957095579571 hba->max_pwr_info.is_valid = false;9558957295739573+ /* Initialize work queues */95749574+ snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",95759575+ hba->host->host_no);95769576+ hba->eh_wq = create_singlethread_workqueue(eh_wq_name);95779577+ if (!hba->eh_wq) {95789578+ dev_err(hba->dev, "%s: failed to create eh workqueue\n",95799579+ __func__);95809580+ err = -ENOMEM;95819581+ goto out_disable;95829582+ }95839583+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);95599584 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);9560958595619586 sema_init(&hba->host_sem, 1);
+5
drivers/scsi/ufs/ufshcd.h
···741741 * @is_powered: flag to check if HBA is powered742742 * @shutting_down: flag to check if shutdown has been invoked743743 * @host_sem: semaphore used to serialize concurrent contexts744744+ * @eh_wq: Workqueue that eh_work works on745745+ * @eh_work: Worker to handle UFS errors that require s/w attention744746 * @eeh_work: Worker to handle exception events745747 * @errors: HBA errors746748 * @uic_error: UFS interconnect layer error status···845843 struct semaphore host_sem;846844847845 /* Work Queues */846846+ struct workqueue_struct *eh_wq;847847+ struct work_struct eh_work;848848 struct work_struct eeh_work;849849850850 /* HBA Errors */···862858 /* Device management request data */863859 struct ufs_dev_cmd dev_cmd;864860 ktime_t last_dme_cmd_tstamp;861861+ int nop_out_timeout;865862866863 /* Keeps information of the UFS device connected to this host */867864 struct ufs_dev_info dev_info;
···9898 if (ehdr->e_phnum < 2)9999 return ERR_PTR(-EINVAL);100100101101- if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD)101101+ if (phdrs[0].p_type == PT_LOAD)102102 return ERR_PTR(-EINVAL);103103104104 if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
+1-1
drivers/soc/qcom/socinfo.c
···628628 /* Feed the soc specific unique data into entropy pool */629629 add_device_randomness(info, item_size);630630631631- platform_set_drvdata(pdev, qs->soc_dev);631631+ platform_set_drvdata(pdev, qs);632632633633 return 0;634634}
+16-13
drivers/soc/ti/omap_prm.c
···825825 writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);826826 spin_unlock_irqrestore(&reset->lock, flags);827827828828- if (!has_rstst)829829- goto exit;830830-831831- /* wait for the status to be set */828828+ /* wait for the reset bit to clear */832829 ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +833833- reset->prm->data->rstst,834834- v, v & BIT(st_bit), 1,835835- OMAP_RESET_MAX_WAIT);830830+ reset->prm->data->rstctrl,831831+ v, !(v & BIT(id)), 1,832832+ OMAP_RESET_MAX_WAIT);836833 if (ret)837834 pr_err("%s: timedout waiting for %s:%lu\n", __func__,838835 reset->prm->data->name, id);839836840840-exit:841841- if (reset->clkdm) {842842- /* At least dra7 iva needs a delay before clkdm idle */843843- if (has_rstst)844844- udelay(1);845845- pdata->clkdm_allow_idle(reset->clkdm);837837+ /* wait for the status to be set */838838+ if (has_rstst) {839839+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +840840+ reset->prm->data->rstst,841841+ v, v & BIT(st_bit), 1,842842+ OMAP_RESET_MAX_WAIT);843843+ if (ret)844844+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,845845+ reset->prm->data->name, id);846846 }847847+848848+ if (reset->clkdm)849849+ pdata->clkdm_allow_idle(reset->clkdm);847850848851 return ret;849852}
-8
drivers/spi/spi.c
···5858 const struct spi_device *spi = to_spi_device(dev);5959 int len;60606161- len = of_device_modalias(dev, buf, PAGE_SIZE);6262- if (len != -ENODEV)6363- return len;6464-6561 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);6662 if (len != -ENODEV)6763 return len;···362366{363367 const struct spi_device *spi = to_spi_device(dev);364368 int rc;365365-366366- rc = of_device_uevent_modalias(dev, env);367367- if (rc != -ENODEV)368368- return rc;369369370370 rc = acpi_device_uevent_modalias(dev, env);371371 if (rc != -ENODEV)
···416416 USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),417417418418/*419419- * Reported by Ondrej Zary <linux@rainbow-software.org>419419+ * Reported by Ondrej Zary <linux@zary.sk>420420 * The device reports one sector more and breaks when that sector is accessed421421+ * Firmwares older than 2.6c (the latest one and the only that claims Linux422422+ * support) have also broken tag handling421423 */424424+UNUSUAL_DEV( 0x04ce, 0x0002, 0x0000, 0x026b,425425+ "ScanLogic",426426+ "SL11R-IDE",427427+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,428428+ US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),422429UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,423430 "ScanLogic",424431 "SL11R-IDE",
+1-1
drivers/usb/storage/unusual_uas.h
···5050 "LaCie",5151 "Rugged USB3-FW",5252 USB_SC_DEVICE, USB_PR_DEVICE, NULL,5353- US_FL_IGNORE_UAS),5353+ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),54545555/*5656 * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+5
drivers/vdpa/mlx5/net/mlx5_vnet.c
···17141714 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);17151715 struct mlx5_vdpa_virtqueue *mvq;1716171617171717+ if (!mvdev->actual_features)17181718+ return;17191719+17171720 if (!is_index_valid(mvdev, idx))17181721 return;17191722···2148214521492146 for (i = 0; i < ndev->mvdev.max_vqs; i++)21502147 ndev->vqs[i].ready = false;21482148+21492149+ ndev->mvdev.cvq.ready = false;21512150}2152215121532152static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+5-5
drivers/vdpa/vdpa_user/vduse_dev.c
···665665static int vduse_vdpa_reset(struct vdpa_device *vdpa)666666{667667 struct vduse_dev *dev = vdpa_to_vduse(vdpa);668668-669669- if (vduse_dev_set_status(dev, 0))670670- return -EIO;668668+ int ret = vduse_dev_set_status(dev, 0);671669672670 vduse_dev_reset(dev);673671674674- return 0;672672+ return ret;675673}676674677675static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)···1591159315921594 vduse_irq_wq = alloc_workqueue("vduse-irq",15931595 WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);15941594- if (!vduse_irq_wq)15961596+ if (!vduse_irq_wq) {15971597+ ret = -ENOMEM;15951598 goto err_wq;15991599+ }1596160015971601 ret = vduse_domain_init();15981602 if (ret)
···640640 u64 offset, map_size, map_iova = iova;641641 struct vdpa_map_file *map_file;642642 struct vm_area_struct *vma;643643- int ret;643643+ int ret = 0;644644645645 mmap_read_lock(dev->mm);646646
+6-1
drivers/virtio/virtio.c
···345345 ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);346346 BUG_ON(ret >= sizeof(compat));347347348348+ /*349349+ * On powerpc/pseries virtio devices are PCI devices so PCI350350+ * vendor/device ids play the role of the "compatible" property.351351+ * Simply don't init of_node in this case.352352+ */348353 if (!of_device_is_compatible(np, compat)) {349349- ret = -EINVAL;354354+ ret = 0;350355 goto out;351356 }352357
+1-1
drivers/watchdog/Kconfig
···1666166616671667config SIBYTE_WDOG16681668 tristate "Sibyte SoC hardware watchdog"16691669- depends on CPU_SB1 || (MIPS && COMPILE_TEST)16691669+ depends on CPU_SB116701670 help16711671 Watchdog driver for the built in watchdog hardware in Sibyte16721672 SoC processors. There are apparently two watchdog timers
···230230 /*231231 * Get IO TLB memory from any location.232232 */233233- start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);233233+ start = memblock_alloc(PAGE_ALIGN(bytes),234234+ IO_TLB_SEGSIZE << IO_TLB_SHIFT);234235 if (!start)235235- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",236236- __func__, PAGE_ALIGN(bytes), PAGE_SIZE);236236+ panic("%s: Failed to allocate %lu bytes\n",237237+ __func__, PAGE_ALIGN(bytes));237238238239 /*239240 * And replace that memory with pages under 4GB.
+4-4
fs/9p/cache.c
···2323 .version = 0,2424};25252626-/**2626+/*2727 * v9fs_random_cachetag - Generate a random tag to be associated2828 * with a new cache session.2929 *···233233 unlock_page(page);234234}235235236236-/**236236+/*237237 * __v9fs_readpage_from_fscache - read a page from cache238238 *239239 * Returns 0 if the pages are in cache and a BIO is submitted,···268268 }269269}270270271271-/**271271+/*272272 * __v9fs_readpages_from_fscache - read multiple pages from cache273273 *274274 * Returns 0 if the pages are in cache and a BIO is submitted,···308308 }309309}310310311311-/**311311+/*312312 * __v9fs_readpage_to_fscache - write a page to the cache313313 *314314 */
+7-7
fs/9p/fid.c
···1919#include "v9fs_vfs.h"2020#include "fid.h"21212222+static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)2323+{2424+ hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);2525+}2626+2727+2228/**2329 * v9fs_fid_add - add a fid to a dentry2430 * @dentry: dentry that the fid is being added to2531 * @fid: fid to add2632 *2733 */2828-2929-static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)3030-{3131- hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);3232-}3333-3434void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)3535{3636 spin_lock(&dentry->d_lock);···67676868/**6969 * v9fs_open_fid_add - add an open fid to an inode7070- * @dentry: inode that the fid is being added to7070+ * @inode: inode that the fid is being added to7171 * @fid: fid to add7272 *7373 */
+3-5
fs/9p/v9fs.c
···155155/**156156 * v9fs_parse_options - parse mount options into session structure157157 * @v9ses: existing v9fs session information158158+ * @opts: The mount option string158159 *159160 * Return 0 upon success, -ERRNO upon failure.160161 */···543542static struct kobject *v9fs_kobj;544543545544#ifdef CONFIG_9P_FSCACHE546546-/**547547- * caches_show - list caches associated with a session548548- *549549- * Returns the size of buffer written.545545+/*546546+ * List caches associated with a session550547 */551551-552548static ssize_t caches_show(struct kobject *kobj,553549 struct kobj_attribute *attr,554550 char *buf)
+9-5
fs/9p/vfs_addr.c
···30303131/**3232 * v9fs_fid_readpage - read an entire page in from 9P3333- *3434- * @fid: fid being read3333+ * @data: Opaque pointer to the fid being read3534 * @page: structure to page3635 *3736 */···115116116117/**117118 * v9fs_release_page - release the private state associated with a page119119+ * @page: The page to be released120120+ * @gfp: The caller's allocation restrictions118121 *119122 * Returns 1 if the page can be released, false otherwise.120123 */···130129131130/**132131 * v9fs_invalidate_page - Invalidate a page completely or partially133133- *134134- * @page: structure to page135135- * @offset: offset in the page132132+ * @page: The page to be invalidated133133+ * @offset: offset of the invalidated region134134+ * @length: length of the invalidated region136135 */137136138137static void v9fs_invalidate_page(struct page *page, unsigned int offset,···200199201200/**202201 * v9fs_launder_page - Writeback a dirty page202202+ * @page: The page to be cleaned up203203+ *203204 * Returns 0 on success.204205 */205206···222219/**223220 * v9fs_direct_IO - 9P address space operation for direct I/O224221 * @iocb: target I/O control block222222+ * @iter: The data/buffer to use225223 *226224 * The presence of v9fs_direct_IO() in the address space ops vector227225 * allowes open() O_DIRECT flags which would have failed otherwise.
+12-21
fs/9p/vfs_file.c
···359359}360360361361/**362362- * v9fs_file_read - read from a file363363- * @filp: file pointer to read364364- * @udata: user data buffer to read data into365365- * @count: size of buffer366366- * @offset: offset at which to read data362362+ * v9fs_file_read_iter - read from a file363363+ * @iocb: The operation parameters364364+ * @to: The buffer to read into367365 *368366 */369369-370367static ssize_t371368v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)372369{···385388}386389387390/**388388- * v9fs_file_write - write to a file389389- * @filp: file pointer to write390390- * @data: data buffer to write data from391391- * @count: size of buffer392392- * @offset: offset at which to write data391391+ * v9fs_file_write_iter - write to a file392392+ * @iocb: The operation parameters393393+ * @from: The data to write393394 *394395 */395396static ssize_t···556561}557562558563/**559559- * v9fs_mmap_file_read - read from a file560560- * @filp: file pointer to read561561- * @data: user data buffer to read data into562562- * @count: size of buffer563563- * @offset: offset at which to read data564564+ * v9fs_mmap_file_read_iter - read from a file565565+ * @iocb: The operation parameters566566+ * @to: The buffer to read into564567 *565568 */566569static ssize_t···569576}570577571578/**572572- * v9fs_mmap_file_write - write to a file573573- * @filp: file pointer to write574574- * @data: data buffer to write data from575575- * @count: size of buffer576576- * @offset: offset at which to write data579579+ * v9fs_mmap_file_write_iter - write to a file580580+ * @iocb: The operation parameters581581+ * @from: The data to write577582 *578583 */579584static ssize_t
+16-8
fs/9p/vfs_inode.c
···218218219219/**220220 * v9fs_alloc_inode - helper function to allocate an inode221221- *221221+ * @sb: The superblock to allocate the inode from222222 */223223struct inode *v9fs_alloc_inode(struct super_block *sb)224224{···238238239239/**240240 * v9fs_free_inode - destroy an inode241241- *241241+ * @inode: The inode to be freed242242 */243243244244void v9fs_free_inode(struct inode *inode)···343343 * v9fs_get_inode - helper function to setup an inode344344 * @sb: superblock345345 * @mode: mode to setup inode with346346- *346346+ * @rdev: The device numbers to set347347 */348348349349struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)···369369}370370371371/**372372- * v9fs_clear_inode - release an inode372372+ * v9fs_evict_inode - Remove an inode from the inode cache373373 * @inode: inode to release374374 *375375 */···665665666666/**667667 * v9fs_vfs_create - VFS hook to create a regular file668668+ * @mnt_userns: The user namespace of the mount669669+ * @dir: The parent directory670670+ * @dentry: The name of file to be created671671+ * @mode: The UNIX file mode to set672672+ * @excl: True if the file must not yet exist668673 *669674 * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called670675 * for mknod(2).671671- *672672- * @dir: directory inode that is being created673673- * @dentry: dentry that is being deleted674674- * @mode: create permissions675676 *676677 */677678···697696698697/**699698 * v9fs_vfs_mkdir - VFS mkdir hook to create a directory699699+ * @mnt_userns: The user namespace of the mount700700 * @dir: inode that is being unlinked701701 * @dentry: dentry that is being unlinked702702 * @mode: mode for new directory···902900903901/**904902 * v9fs_vfs_rename - VFS hook to rename an inode903903+ * @mnt_userns: The user namespace of the mount905904 * @old_dir: old dir inode906905 * @old_dentry: old dentry907906 * @new_dir: new dir inode908907 * @new_dentry: new dentry908908+ * @flags: RENAME_* flags909909 *910910 */911911···1013100910141010/**10151011 * v9fs_vfs_getattr - retrieve file metadata10121012+ * @mnt_userns: The user namespace of the mount10161013 * @path: Object to query10171014 * @stat: metadata structure to populate10181015 * @request_mask: Mask of STATX_xxx flags indicating the caller's interests···1055105010561051/**10571052 * v9fs_vfs_setattr - set file metadata10531053+ * @mnt_userns: The user namespace of the mount10581054 * @dentry: file whose metadata to set10591055 * @iattr: metadata assignment structure10601056 *···1291128512921286/**12931287 * v9fs_vfs_symlink - helper function to create symlinks12881288+ * @mnt_userns: The user namespace of the mount12941289 * @dir: directory inode containing symlink12951290 * @dentry: dentry for symlink12961291 * @symname: symlink data···1347134013481341/**13491342 * v9fs_vfs_mknod - create a special file13431343+ * @mnt_userns: The user namespace of the mount13501344 * @dir: inode destination for new link13511345 * @dentry: dentry for file13521346 * @mode: mode for creation
+9-2
fs/9p/vfs_inode_dotl.c
···3737 struct dentry *dentry, umode_t omode, dev_t rdev);38383939/**4040- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a4040+ * v9fs_get_fsgid_for_create - Helper function to get the gid for a new object4141+ * @dir_inode: The directory inode4242+ *4343+ * Helper function to get the gid for creating a4144 * new file system object. This checks the S_ISGID to determine the owning4245 * group of the new file system object.4346 */···214211215212/**216213 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.214214+ * @mnt_userns: The user namespace of the mount217215 * @dir: directory inode that is being created218216 * @dentry: dentry that is being deleted219217 * @omode: create permissions218218+ * @excl: True if the file must not yet exist220219 *221220 */222222-223221static int224222v9fs_vfs_create_dotl(struct user_namespace *mnt_userns, struct inode *dir,225223 struct dentry *dentry, umode_t omode, bool excl)···365361366362/**367363 * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory364364+ * @mnt_userns: The user namespace of the mount368365 * @dir: inode that is being unlinked369366 * @dentry: dentry that is being unlinked370367 * @omode: mode for new directory···542537543538/**544539 * v9fs_vfs_setattr_dotl - set file metadata540540+ * @mnt_userns: The user namespace of the mount545541 * @dentry: file whose metadata to set546542 * @iattr: metadata assignment structure547543 *···822816823817/**824818 * v9fs_vfs_mknod_dotl - create a special file819819+ * @mnt_userns: The user namespace of the mount825820 * @dir: inode destination for new link826821 * @dentry: dentry for file827822 * @omode: mode for creation
+2-2
fs/afs/dir_silly.c
···8686 return afs_do_sync_operation(op);8787}88888989-/**9090- * afs_sillyrename - Perform a silly-rename of a dentry8989+/*9090+ * Perform silly-rename of a dentry.9191 *9292 * AFS is stateless and the server doesn't know when the client is holding a9393 * file open. To prevent application problems when a file is unlinked while
···665665666666 if (!ordered) {667667 ordered = btrfs_lookup_ordered_extent(inode, offset);668668- BUG_ON(!ordered); /* Logic error */668668+ /*669669+ * The bio range is not covered by any ordered extent,670670+ * must be a code logic error.671671+ */672672+ if (unlikely(!ordered)) {673673+ WARN(1, KERN_WARNING674674+ "no ordered extent for root %llu ino %llu offset %llu\n",675675+ inode->root->root_key.objectid,676676+ btrfs_ino(inode), offset);677677+ kvfree(sums);678678+ return BLK_STS_IOERR;679679+ }669680 }670681671682 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
+3-2
fs/btrfs/space-info.c
···414414{415415 lockdep_assert_held(&info->lock);416416417417- btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",417417+ /* The free space could be negative in case of overcommit */418418+ btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",418419 info->flags,419419- info->total_bytes - btrfs_space_info_used(info, true),420420+ (s64)(info->total_bytes - btrfs_space_info_used(info, true)),420421 info->full ? "" : "not ");421422 btrfs_info(fs_info,422423 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
+4-2
fs/btrfs/verity.c
···451451 */452452static int rollback_verity(struct btrfs_inode *inode)453453{454454- struct btrfs_trans_handle *trans;454454+ struct btrfs_trans_handle *trans = NULL;455455 struct btrfs_root *root = inode->root;456456 int ret;457457···473473 trans = btrfs_start_transaction(root, 2);474474 if (IS_ERR(trans)) {475475 ret = PTR_ERR(trans);476476+ trans = NULL;476477 btrfs_handle_fs_error(root->fs_info, ret,477478 "failed to start transaction in verity rollback %llu",478479 (u64)inode->vfs_inode.i_ino);···491490 btrfs_abort_transaction(trans, ret);492491 goto out;493492 }494494- btrfs_end_transaction(trans);495493out:494494+ if (trans)495495+ btrfs_end_transaction(trans);496496 return ret;497497}498498
+13
fs/btrfs/volumes.c
···11371137 atomic_set(&device->dev_stats_ccnt, 0);11381138 extent_io_tree_release(&device->alloc_state);1139113911401140+ /*11411141+ * Reset the flush error record. We might have a transient flush error11421142+ * in this mount, and if so we aborted the current transaction and set11431143+ * the fs to an error state, guaranteeing no super blocks can be further11441144+ * committed. However that error might be transient and if we unmount the11451145+ * filesystem and mount it again, we should allow the mount to succeed11461146+ * (btrfs_check_rw_degradable() should not fail) - if after mounting the11471147+ * filesystem again we still get flush errors, then we will again abort11481148+ * any transaction and set the error state, guaranteeing no commits of11491149+ * unsafe super blocks.11501150+ */11511151+ device->last_flush_error = 0;11521152+11401153 /* Verify the device is back in a pristine state */11411154 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));11421155 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
+6-2
fs/buffer.c
···14251425}14261426EXPORT_SYMBOL_GPL(invalidate_bh_lrus);1427142714281428-void invalidate_bh_lrus_cpu(int cpu)14281428+/*14291429+ * It's called from workqueue context so we need a bh_lru_lock to close14301430+ * the race with preemption/irq.14311431+ */14321432+void invalidate_bh_lrus_cpu(void)14291433{14301434 struct bh_lru *b;1431143514321436 bh_lru_lock();14331433- b = per_cpu_ptr(&bh_lrus, cpu);14371437+ b = this_cpu_ptr(&bh_lrus);14341438 __invalidate_bh_lrus(b);14351439 bh_lru_unlock();14361440}
+2-2
fs/ceph/caps.c
···22632263 list_for_each_entry(req, &ci->i_unsafe_dirops,22642264 r_unsafe_dir_item) {22652265 s = req->r_session;22662266- if (unlikely(s->s_mds > max)) {22662266+ if (unlikely(s->s_mds >= max)) {22672267 spin_unlock(&ci->i_unsafe_lock);22682268 goto retry;22692269 }···22772277 list_for_each_entry(req, &ci->i_unsafe_iops,22782278 r_unsafe_target_item) {22792279 s = req->r_session;22802280- if (unlikely(s->s_mds > max)) {22802280+ if (unlikely(s->s_mds >= max)) {22812281 spin_unlock(&ci->i_unsafe_lock);22822282 goto retry;22832283 }
+3-2
fs/cifs/connect.c
···23892389 spin_lock(&cifs_tcp_ses_lock);23902390 cifs_sb = CIFS_SB(sb);23912391 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));23922392- if (IS_ERR(tlink)) {23922392+ if (tlink == NULL) {23932393+ /* can not match superblock if tlink were ever null */23932394 spin_unlock(&cifs_tcp_ses_lock);23942394- return rc;23952395+ return 0;23952396 }23962397 tcon = tlink_tcon(tlink);23972398 ses = tcon->ses;
···264264265265 /* Uid is not converted */266266 buffer->Uid = treeCon->ses->Suid;267267- buffer->Mid = get_next_mid(treeCon->ses->server);267267+ if (treeCon->ses->server)268268+ buffer->Mid = get_next_mid(treeCon->ses->server);268269 }269270 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)270271 buffer->Flags2 |= SMBFLG2_DFS;···591590592591/**593592 * cifs_queue_oplock_break - queue the oplock break handler for cfile593593+ * @cfile: The file to break the oplock on594594 *595595 * This function is called from the demultiplex thread when it596596 * receives an oplock break for @cfile.···1067106510681066/**10691067 * cifs_alloc_hash - allocate hash and hash context together10681068+ * @name: The name of the crypto hash algo10691069+ * @shash: Where to put the pointer to the hash algo10701070+ * @sdesc: Where to put the pointer to the hash descriptor10701071 *10711072 * The caller has to make sure @sdesc is initialized to either NULL or10721073 * a valid context. Both can be freed via cifs_free_hash().···1108110311091104/**11101105 * cifs_free_hash - free hash and hash context together11061106+ * @shash: Where to find the pointer to the hash algo11071107+ * @sdesc: Where to find the pointer to the hash descriptor11111108 *11121109 * Freeing a NULL hash or context is safe.11131110 */···1125111811261119/**11271120 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst11281128- * Input: rqst - a smb_rqst, page - a page index for rqst11291129- * Output: *len - the length for this page, *offset - the offset for this page11211121+ * @rqst: The request descriptor11221122+ * @page: The index of the page to query11231123+ * @len: Where to store the length for this page:11241124+ * @offset: Where to store the offset for this page11301125 */11311126void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,11321127 unsigned int *len, unsigned int *offset)···1161115211621153/**11631154 * copy_path_name - copy src path to dst, possibly truncating11551155+ * @dst: The destination buffer11561156+ * @src: The source name11641157 *11651158 * returns number of bytes written (including trailing nul)11661159 */
+2-2
fs/cifs/smb2pdu.c
···23972397 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);23982398 /* Ship the ACL for now. we will copy it into buf later. */23992399 aclptr = ptr;24002400- ptr += sizeof(struct cifs_acl);24002400+ ptr += sizeof(struct smb3_acl);2401240124022402 /* create one ACE to hold the mode embedded in reserved special SID */24032403 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);···24222422 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */24232423 acl.AclSize = cpu_to_le16(acl_size);24242424 acl.AceCount = cpu_to_le16(ace_count);24252425- memcpy(aclptr, &acl, sizeof(struct cifs_acl));24252425+ memcpy(aclptr, &acl, sizeof(struct smb3_acl));2426242624272427 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);24282428 *len = roundup(ptr - (__u8 *)buf, 8);
+1-1
fs/debugfs/inode.c
···528528{529529 struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);530530531531- if (de)531531+ if (!IS_ERR(de))532532 d_inode(de)->i_size = file_size;533533}534534EXPORT_SYMBOL_GPL(debugfs_create_file_size);
+1-1
fs/erofs/inode.c
···176176 }177177178178 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {179179- if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) {179179+ if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {180180 erofs_err(inode->i_sb,181181 "unsupported chunk format %x of nid %llu",182182 vi->chunkformat, vi->nid);
···59165916}5917591759185918/* Check if *cur is a hole and if it is, skip it */59195919-static void skip_hole(struct inode *inode, ext4_lblk_t *cur)59195919+static int skip_hole(struct inode *inode, ext4_lblk_t *cur)59205920{59215921 int ret;59225922 struct ext4_map_blocks map;···59255925 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;5926592659275927 ret = ext4_map_blocks(NULL, inode, &map, 0);59285928+ if (ret < 0)59295929+ return ret;59285930 if (ret != 0)59295929- return;59315931+ return 0;59305932 *cur = *cur + map.m_len;59335933+ return 0;59315934}5932593559335936/* Count number of blocks used by this inode and update i_blocks */···59795976 * iblocks by total number of differences found.59805977 */59815978 cur = 0;59825982- skip_hole(inode, &cur);59795979+ ret = skip_hole(inode, &cur);59805980+ if (ret < 0)59815981+ goto out;59835982 path = ext4_find_extent(inode, cur, NULL, 0);59845983 if (IS_ERR(path))59855984 goto out;···60005995 }60015996 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +60025997 ext4_ext_get_actual_len(ex));60036003- skip_hole(inode, &cur);60046004-59985998+ ret = skip_hole(inode, &cur);59995999+ if (ret < 0) {60006000+ ext4_ext_drop_refs(path);60016001+ kfree(path);60026002+ break;60036003+ }60056004 path2 = ext4_find_extent(inode, cur, NULL, 0);60066005 if (IS_ERR(path2)) {60076006 ext4_ext_drop_refs(path);
+6
fs/ext4/fast_commit.c
···892892 sizeof(lrange), (u8 *)&lrange, crc))893893 return -ENOSPC;894894 } else {895895+ unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?896896+ EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;897897+898898+ /* Limit the number of blocks in one extent */899899+ map.m_len = min(max, map.m_len);900900+895901 fc_ext.fc_ino = cpu_to_le32(inode->i_ino);896902 ex = (struct ext4_extent *)&fc_ext.fc_ex;897903 ex->ee_block = cpu_to_le32(map.m_lblk);
+85-65
fs/ext4/inline.c
···77#include <linux/iomap.h>88#include <linux/fiemap.h>99#include <linux/iversion.h>1010+#include <linux/backing-dev.h>10111112#include "ext4_jbd2.h"1213#include "ext4.h"···734733int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,735734 unsigned copied, struct page *page)736735{737737- int ret, no_expand;736736+ handle_t *handle = ext4_journal_current_handle();737737+ int no_expand;738738 void *kaddr;739739 struct ext4_iloc iloc;740740+ int ret = 0, ret2;740741741741- if (unlikely(copied < len)) {742742- if (!PageUptodate(page)) {743743- copied = 0;742742+ if (unlikely(copied < len) && !PageUptodate(page))743743+ copied = 0;744744+745745+ if (likely(copied)) {746746+ ret = ext4_get_inode_loc(inode, &iloc);747747+ if (ret) {748748+ unlock_page(page);749749+ put_page(page);750750+ ext4_std_error(inode->i_sb, ret);744751 goto out;745752 }746746- }753753+ ext4_write_lock_xattr(inode, &no_expand);754754+ BUG_ON(!ext4_has_inline_data(inode));747755748748- ret = ext4_get_inode_loc(inode, &iloc);749749- if (ret) {750750- ext4_std_error(inode->i_sb, ret);751751- copied = 0;752752- goto out;753753- }756756+ /*757757+ * ei->i_inline_off may have changed since758758+ * ext4_write_begin() called759759+ * ext4_try_to_write_inline_data()760760+ */761761+ (void) ext4_find_inline_data_nolock(inode);754762755755- ext4_write_lock_xattr(inode, &no_expand);756756- BUG_ON(!ext4_has_inline_data(inode));763763+ kaddr = kmap_atomic(page);764764+ ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);765765+ kunmap_atomic(kaddr);766766+ SetPageUptodate(page);767767+ /* clear page dirty so that writepages wouldn't work for us. */768768+ ClearPageDirty(page);769769+770770+ ext4_write_unlock_xattr(inode, &no_expand);771771+ brelse(iloc.bh);772772+773773+ /*774774+ * It's important to update i_size while still holding page775775+ * lock: page writeout could otherwise come in and zero776776+ * beyond i_size.777777+ */778778+ ext4_update_inode_size(inode, pos + copied);779779+ }780780+ unlock_page(page);781781+ put_page(page);757782758783 /*759759- * ei->i_inline_off may have changed since ext4_write_begin()760760- * called ext4_try_to_write_inline_data()784784+ * Don't mark the inode dirty under page lock. First, it unnecessarily785785+ * makes the holding time of page lock longer. Second, it forces lock786786+ * ordering of page lock and transaction start for journaling787787+ * filesystems.761788 */762762- (void) ext4_find_inline_data_nolock(inode);763763-764764- kaddr = kmap_atomic(page);765765- ext4_write_inline_data(inode, &iloc, kaddr, pos, len);766766- kunmap_atomic(kaddr);767767- SetPageUptodate(page);768768- /* clear page dirty so that writepages wouldn't work for us. */769769- ClearPageDirty(page);770770-771771- ext4_write_unlock_xattr(inode, &no_expand);772772- brelse(iloc.bh);773773- mark_inode_dirty(inode);789789+ if (likely(copied))790790+ mark_inode_dirty(inode);774791out:775775- return copied;792792+ /*793793+ * If we didn't copy as much data as expected, we need to trim back794794+ * size of xattr containing inline data.795795+ */796796+ if (pos + len > inode->i_size && ext4_can_truncate(inode))797797+ ext4_orphan_add(handle, inode);798798+799799+ ret2 = ext4_journal_stop(handle);800800+ if (!ret)801801+ ret = ret2;802802+ if (pos + len > inode->i_size) {803803+ ext4_truncate_failed_write(inode);804804+ /*805805+ * If truncate failed early the inode might still be806806+ * on the orphan list; we need to make sure the inode807807+ * is removed from the orphan list in that case.808808+ */809809+ if (inode->i_nlink)810810+ ext4_orphan_del(NULL, inode);811811+ }812812+ return ret ? ret : copied;776813}777814778815struct buffer_head *···990951out:991952 brelse(iloc.bh);992953 return ret;993993-}994994-995995-int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,996996- unsigned len, unsigned copied,997997- struct page *page)998998-{999999- int ret;10001000-10011001- ret = ext4_write_inline_data_end(inode, pos, len, copied, page);10021002- if (ret < 0) {10031003- unlock_page(page);10041004- put_page(page);10051005- return ret;10061006- }10071007- copied = ret;10081008-10091009- /*10101010- * No need to use i_size_read() here, the i_size10111011- * cannot change under us because we hold i_mutex.10121012- *10131013- * But it's important to update i_size while still holding page lock:10141014- * page writeout could otherwise come in and zero beyond i_size.10151015- */10161016- if (pos+copied > inode->i_size)10171017- i_size_write(inode, pos+copied);10181018- unlock_page(page);10191019- put_page(page);10201020-10211021- /*10221022- * Don't mark the inode dirty under page lock. First, it unnecessarily10231023- * makes the holding time of page lock longer. Second, it forces lock10241024- * ordering of page lock and transaction start for journaling10251025- * filesystems.10261026- */10271027- mark_inode_dirty(inode);10281028-10291029- return copied;1030954}10319551032956#ifdef INLINE_DIR_DEBUG···19191917 EXT4_I(inode)->i_disksize = i_size;1920191819211919 if (i_size < inline_size) {19201920+ /*19211921+ * if there's inline data to truncate and this file was19221922+ * converted to extents after that inline data was written,19231923+ * the extent status cache must be cleared to avoid leaving19241924+ * behind stale delayed allocated extent entries19251925+ */19261926+ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {19271927+retry:19281928+ err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);19291929+ if (err == -ENOMEM) {19301930+ cond_resched();19311931+ congestion_wait(BLK_RW_ASYNC, HZ/50);19321932+ goto retry;19331933+ }19341934+ if (err)19351935+ goto out_error;19361936+ }19371937+19221938 /* Clear the content in the xattr space. */19231939 if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {19241940 if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
+60-118
fs/ext4/inode.c
···12841284 loff_t old_size = inode->i_size;12851285 int ret = 0, ret2;12861286 int i_size_changed = 0;12871287- int inline_data = ext4_has_inline_data(inode);12881287 bool verity = ext4_verity_in_progress(inode);1289128812901289 trace_ext4_write_end(inode, pos, len, copied);12911291- if (inline_data) {12921292- ret = ext4_write_inline_data_end(inode, pos, len,12931293- copied, page);12941294- if (ret < 0) {12951295- unlock_page(page);12961296- put_page(page);12971297- goto errout;12981298- }12991299- copied = ret;13001300- } else13011301- copied = block_write_end(file, mapping, pos,13021302- len, copied, page, fsdata);12901290+12911291+ if (ext4_has_inline_data(inode))12921292+ return ext4_write_inline_data_end(inode, pos, len, copied, page);12931293+12941294+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);13031295 /*13041296 * it's important to update i_size while still holding page lock:13051297 * page writeout could otherwise come in and zero beyond i_size.···13121320 * ordering of page lock and transaction start for journaling13131321 * filesystems.13141322 */13151315- if (i_size_changed || inline_data)13231323+ if (i_size_changed)13161324 ret = ext4_mark_inode_dirty(handle, inode);1317132513181326 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))···13211329 * inode->i_size. So truncate them13221330 */13231331 ext4_orphan_add(handle, inode);13241324-errout:13321332+13251333 ret2 = ext4_journal_stop(handle);13261334 if (!ret)13271335 ret = ret2;···13871395 int partial = 0;13881396 unsigned from, to;13891397 int size_changed = 0;13901390- int inline_data = ext4_has_inline_data(inode);13911398 bool verity = ext4_verity_in_progress(inode);1392139913931400 trace_ext4_journalled_write_end(inode, pos, len, copied);···1395140413961405 BUG_ON(!ext4_handle_valid(handle));1397140613981398- if (inline_data) {13991399- ret = ext4_write_inline_data_end(inode, pos, len,14001400- copied, page);14011401- if (ret < 0) {14021402- unlock_page(page);14031403- put_page(page);14041404- goto errout;14051405- }14061406- copied = ret;14071407- } else if (unlikely(copied < len) && !PageUptodate(page)) {14071407+ if (ext4_has_inline_data(inode))14081408+ return ext4_write_inline_data_end(inode, pos, len, copied, page);14091409+14101410+ if (unlikely(copied < len) && !PageUptodate(page)) {14081411 copied = 0;14091412 ext4_journalled_zero_new_buffers(handle, inode, page, from, to);14101413 } else {···14211436 if (old_size < pos && !verity)14221437 pagecache_isize_extended(inode, old_size, pos);1423143814241424- if (size_changed || inline_data) {14391439+ if (size_changed) {14251440 ret2 = ext4_mark_inode_dirty(handle, inode);14261441 if (!ret)14271442 ret = ret2;···14341449 */14351450 ext4_orphan_add(handle, inode);1436145114371437-errout:14381452 ret2 = ext4_journal_stop(handle);14391453 if (!ret)14401454 ret = ret2;···16281644 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);16291645 int ret;16301646 bool allocated = false;16471647+ bool reserved = false;1631164816321649 /*16331650 * If the cluster containing lblk is shared with a delayed,···16451660 ret = ext4_da_reserve_space(inode);16461661 if (ret != 0) /* ENOSPC */16471662 goto errout;16631663+ reserved = true;16481664 } else { /* bigalloc */16491665 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {16501666 if (!ext4_es_scan_clu(inode,···16581672 ret = ext4_da_reserve_space(inode);16591673 if (ret != 0) /* ENOSPC */16601674 goto errout;16751675+ reserved = true;16611676 } else {16621677 allocated = true;16631678 }···16691682 }1670168316711684 ret = ext4_es_insert_delayed_block(inode, lblk, allocated);16851685+ if (ret && reserved)16861686+ ext4_da_release_space(inode, 1);1672168716731688errout:16741689 return ret;···17111722 }1712172317131724 /*17141714- * Delayed extent could be allocated by fallocate.17151715- * So we need to check it.17251725+ * the buffer head associated with a delayed and not unwritten17261726+ * block found in the extent status cache must contain an17271727+ * invalid block number and have its BH_New and BH_Delay bits17281728+ * set, reflecting the state assigned when the block was17291729+ * initially delayed allocated17161730 */17171717- if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {17181718- map_bh(bh, inode->i_sb, invalid_block);17191719- set_buffer_new(bh);17201720- set_buffer_delay(bh);17311731+ if (ext4_es_is_delonly(&es)) {17321732+ BUG_ON(bh->b_blocknr != invalid_block);17331733+ BUG_ON(!buffer_new(bh));17341734+ BUG_ON(!buffer_delay(bh));17211735 return 0;17221736 }17231737···29242932 return 0;29252933}2926293429272927-/* We always reserve for an inode update; the superblock could be there too */29282928-static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)29292929-{29302930- if (likely(ext4_has_feature_large_file(inode->i_sb)))29312931- return 1;29322932-29332933- if (pos + len <= 0x7fffffffULL)29342934- return 1;29352935-29362936- /* We might need to update the superblock to set LARGE_FILE */29372937- return 2;29382938-}29392939-29402935static int ext4_da_write_begin(struct file *file, struct address_space *mapping,29412936 loff_t pos, unsigned len, unsigned flags,29422937 struct page **pagep, void **fsdata)···29322953 struct page *page;29332954 pgoff_t index;29342955 struct inode *inode = mapping->host;29352935- handle_t *handle;2936295629372957 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))29382958 return -EIO;···29572979 return 0;29582980 }2959298129602960- /*29612961- * grab_cache_page_write_begin() can take a long time if the29622962- * system is thrashing due to memory pressure, or if the page29632963- * is being written back. So grab it first before we start29642964- * the transaction handle. This also allows us to allocate29652965- * the page (if needed) without using GFP_NOFS.29662966- */29672967-retry_grab:29822982+retry:29682983 page = grab_cache_page_write_begin(mapping, index, flags);29692984 if (!page)29702985 return -ENOMEM;29712971- unlock_page(page);2972298629732973- /*29742974- * With delayed allocation, we don't log the i_disksize update29752975- * if there is delayed block allocation. But we still need29762976- * to journalling the i_disksize update if writes to the end29772977- * of file which has an already mapped buffer.29782978- */29792979-retry_journal:29802980- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,29812981- ext4_da_write_credits(inode, pos, len));29822982- if (IS_ERR(handle)) {29832983- put_page(page);29842984- return PTR_ERR(handle);29852985- }29862986-29872987- lock_page(page);29882988- if (page->mapping != mapping) {29892989- /* The page got truncated from under us */29902990- unlock_page(page);29912991- put_page(page);29922992- ext4_journal_stop(handle);29932993- goto retry_grab;29942994- }29952987 /* In case writeback began while the page was unlocked */29962988 wait_for_stable_page(page);29972989···29733025#endif29743026 if (ret < 0) {29753027 unlock_page(page);29762976- ext4_journal_stop(handle);30283028+ put_page(page);29773029 /*29783030 * block_write_begin may have instantiated a few blocks29793031 * outside i_size. Trim these off again. Don't need29802980- * i_size_read because we hold i_mutex.30323032+ * i_size_read because we hold inode lock.29813033 */29823034 if (pos + len > inode->i_size)29833035 ext4_truncate_failed_write(inode);2984303629853037 if (ret == -ENOSPC &&29863038 ext4_should_retry_alloc(inode->i_sb, &retries))29872987- goto retry_journal;29882988-29892989- put_page(page);30393039+ goto retry;29903040 return ret;29913041 }29923042···30213075 struct page *page, void *fsdata)30223076{30233077 struct inode *inode = mapping->host;30243024- int ret = 0, ret2;30253025- handle_t *handle = ext4_journal_current_handle();30263078 loff_t new_i_size;30273079 unsigned long start, end;30283080 int write_mode = (int)(unsigned long)fsdata;···30303086 len, copied, page, fsdata);3031308730323088 trace_ext4_da_write_end(inode, pos, len, copied);30333033- start = pos & (PAGE_SIZE - 1);30343034- end = start + copied - 1;30353035-30363036- /*30373037- * generic_write_end() will run mark_inode_dirty() if i_size30383038- * changes. So let's piggyback the i_disksize mark_inode_dirty30393039- * into that.30403040- */30413041- new_i_size = pos + copied;30423042- if (copied && new_i_size > EXT4_I(inode)->i_disksize) {30433043- if (ext4_has_inline_data(inode) ||30443044- ext4_da_should_update_i_disksize(page, end)) {30453045- ext4_update_i_disksize(inode, new_i_size);30463046- /* We need to mark inode dirty even if30473047- * new_i_size is less that inode->i_size30483048- * bu greater than i_disksize.(hint delalloc)30493049- */30503050- ret = ext4_mark_inode_dirty(handle, inode);30513051- }30523052- }3053308930543090 if (write_mode != CONVERT_INLINE_DATA &&30553091 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&30563092 ext4_has_inline_data(inode))30573057- ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,30583058- page);30593059- else30603060- ret2 = generic_write_end(file, mapping, pos, len, copied,30613061- page, fsdata);30933093+ return ext4_write_inline_data_end(inode, pos, len, copied, page);3062309430633063- copied = ret2;30643064- if (ret2 < 0)30653065- ret = ret2;30663066- ret2 = ext4_journal_stop(handle);30673067- if (unlikely(ret2 && !ret))30683068- ret = ret2;30953095+ start = pos & (PAGE_SIZE - 1);30963096+ end = start + copied - 1;3069309730703070- return ret ? ret : copied;30983098+ /*30993099+ * Since we are holding inode lock, we are sure i_disksize <=31003100+ * i_size. We also know that if i_disksize < i_size, there are31013101+ * delalloc writes pending in the range upto i_size. If the end of31023102+ * the current write is <= i_size, there's no need to touch31033103+ * i_disksize since writeback will push i_disksize upto i_size31043104+ * eventually. If the end of the current write is > i_size and31053105+ * inside an allocated block (ext4_da_should_update_i_disksize()31063106+ * check), we need to update i_disksize here as neither31073107+ * ext4_writepage() nor certain ext4_writepages() paths not31083108+ * allocating blocks update i_disksize.31093109+ *31103110+ * Note that we defer inode dirtying to generic_write_end() /31113111+ * ext4_da_write_inline_data_end().31123112+ */31133113+ new_i_size = pos + copied;31143114+ if (copied && new_i_size > inode->i_size &&31153115+ ext4_da_should_update_i_disksize(page, end))31163116+ ext4_update_i_disksize(inode, new_i_size);31173117+31183118+ return generic_write_end(file, mapping, pos, len, copied, page, fsdata);30713119}3072312030733121/*···42764340 goto has_buffer;4277434142784342 lock_buffer(bh);43434343+ if (ext4_buffer_uptodate(bh)) {43444344+ /* Someone brought it uptodate while we waited */43454345+ unlock_buffer(bh);43464346+ goto has_buffer;43474347+ }43484348+42794349 /*42804350 * If we have all information of the inode in memory and this42814351 * is the only valid inode in the block, we need not read the
+15-6
fs/ext4/super.c
···658658 * constraints, it may not be safe to do it right here so we659659 * defer superblock flushing to a workqueue.660660 */661661- if (continue_fs)661661+ if (continue_fs && journal)662662 schedule_work(&EXT4_SB(sb)->s_error_work);663663 else664664 ext4_commit_super(sb);···13501350 true);13511351 dump_stack();13521352 }13531353+13541354+ if (EXT4_I(inode)->i_reserved_data_blocks)13551355+ ext4_msg(inode->i_sb, KERN_ERR,13561356+ "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",13571357+ inode->i_ino, EXT4_I(inode),13581358+ EXT4_I(inode)->i_reserved_data_blocks);13531359}1354136013551361static void init_once(void *foo)···30273021 */30283022static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)30293023{30303030- loff_t res = EXT4_NDIR_BLOCKS;30243024+ unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;30313025 int meta_blocks;30323032- loff_t upper_limit;30333033- /* This is calculated to be the largest file size for a dense, block30263026+30273027+ /*30283028+ * This is calculated to be the largest file size for a dense, block30343029 * mapped file such that the file's total number of 512-byte sectors,30353030 * including data and all indirect blocks, does not exceed (2^48 - 1).30363031 *30373032 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total30383033 * number of 512-byte sectors of the file.30393034 */30403040-30413035 if (!has_huge_files) {30423036 /*30433037 * !has_huge_files or implies that the inode i_block field···30803074 if (res > MAX_LFS_FILESIZE)30813075 res = MAX_LFS_FILESIZE;3082307630833083- return res;30773077+ return (loff_t)res;30843078}3085307930863080static ext4_fsblk_t descriptor_loc(struct super_block *sb,···50485042 sbi->s_ea_block_cache = NULL;5049504350505044 if (sbi->s_journal) {50455045+ /* flush s_error_work before journal destroy. */50465046+ flush_work(&sbi->s_error_work);50515047 jbd2_journal_destroy(sbi->s_journal);50525048 sbi->s_journal = NULL;50535049 }50545050failed_mount3a:50555051 ext4_es_unregister_shrinker(sbi);50565052failed_mount3:50535053+ /* flush s_error_work before sbi destroy */50575054 flush_work(&sbi->s_error_work);50585055 del_timer_sync(&sbi->s_err_report);50595056 ext4_stop_mmpd(sbi);
+1-1
fs/fscache/object.c
···7777static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);7878static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);7979static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);8080-static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);8180static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);8281static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);8382···906907 * @object: The object to ask about907908 * @data: The auxiliary data for the object908909 * @datalen: The size of the auxiliary data910910+ * @object_size: The size of the object according to the server.909911 *910912 * This function consults the netfs about the coherency state of an object.911913 * The caller must be holding a ref on cookie->n_active (held by
+3
fs/fscache/operation.c
···22222323/**2424 * fscache_operation_init - Do basic initialisation of an operation2525+ * @cookie: The cookie to operate on2526 * @op: The operation to initialise2727+ * @processor: The function to perform the operation2828+ * @cancel: A function to handle operation cancellation2629 * @release: The release function to assign2730 *2831 * Do basic initialisation of an operation. The caller must still set flags,
···403403 struct wait_queue_head cq_wait;404404 unsigned cq_extra;405405 atomic_t cq_timeouts;406406- struct fasync_struct *cq_fasync;407406 unsigned cq_last_tm_flush;408407 } ____cacheline_aligned_in_smp;409408···501502struct io_close {502503 struct file *file;503504 int fd;505505+ u32 file_slot;504506};505507506508struct io_timeout_data {···1098109810991099static int io_install_fixed_file(struct io_kiocb *req, struct file *file,11001100 unsigned int issue_flags, u32 slot_index);11011101+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);11021102+11011103static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);1102110411031105static struct kmem_cache *req_cachep;···16131611 wake_up(&ctx->sq_data->wait);16141612 if (io_should_trigger_evfd(ctx))16151613 eventfd_signal(ctx->cq_ev_fd, 1);16161616- if (waitqueue_active(&ctx->poll_wait)) {16141614+ if (waitqueue_active(&ctx->poll_wait))16171615 wake_up_interruptible(&ctx->poll_wait);16181618- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);16191619- }16201616}1621161716221618static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)···16281628 }16291629 if (io_should_trigger_evfd(ctx))16301630 eventfd_signal(ctx->cq_ev_fd, 1);16311631- if (waitqueue_active(&ctx->poll_wait)) {16311631+ if (waitqueue_active(&ctx->poll_wait))16321632 wake_up_interruptible(&ctx->poll_wait);16331633- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);16341634- }16351633}1636163416371635/* Returns true if there are no backlogged entries after the flush */···36033605 iov_iter_save_state(iter, state);36043606 }36053607 req->result = iov_iter_count(iter);36063606- ret2 = 0;3607360836083609 /* Ensure we clear previously set non-block flag */36093610 if (!force_nonblock)···36673670 } else {36683671copy_iov:36693672 iov_iter_restore(iter, state);36703670- if (ret2 > 0)36713671- iov_iter_advance(iter, ret2);36723673 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);36733674 return ret ?: -EAGAIN;36743675 }···43824387 int i, bid = pbuf->bid;4383438843844389 for (i = 0; i < pbuf->nbufs; i++) {43854385- buf = kmalloc(sizeof(*buf), GFP_KERNEL);43904390+ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);43864391 if (!buf)43874392 break;43884393···45894594 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))45904595 return -EINVAL;45914596 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||45924592- sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)45974597+ sqe->rw_flags || sqe->buf_index)45934598 return -EINVAL;45944599 if (req->flags & REQ_F_FIXED_FILE)45954600 return -EBADF;4596460145974602 req->close.fd = READ_ONCE(sqe->fd);46034603+ req->close.file_slot = READ_ONCE(sqe->file_index);46044604+ if (req->close.file_slot && req->close.fd)46054605+ return -EINVAL;46064606+45984607 return 0;45994608}46004609···46094610 struct fdtable *fdt;46104611 struct file *file = NULL;46114612 int ret = -EBADF;46134613+46144614+ if (req->close.file_slot) {46154615+ ret = io_close_fixed(req, issue_flags);46164616+ goto err;46174617+ }4612461846134619 spin_lock(&files->file_lock);46144620 fdt = files_fdtable(files);···53425338 if (req->poll.events & EPOLLONESHOT)53435339 flags = 0;53445340 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {53455345- req->poll.done = true;53415341+ req->poll.events |= EPOLLONESHOT;53465342 flags = 0;53475343 }53485344 if (flags & IORING_CQE_F_MORE)···53715367 } else {53725368 bool done;5373536953705370+ if (req->poll.done) {53715371+ spin_unlock(&ctx->completion_lock);53725372+ return;53735373+ }53745374 done = __io_poll_complete(req, req->result);53755375 if (done) {53765376 io_poll_remove_double(req);53775377 hash_del(&req->hash_node);53785378+ req->poll.done = true;53785379 } else {53795380 req->result = 0;53805381 add_wait_queue(req->poll.head, &req->poll.wait);···5517550855185509 hash_del(&req->hash_node);55195510 io_poll_remove_double(req);55115511+ apoll->poll.done = true;55205512 spin_unlock(&ctx->completion_lock);5521551355225514 if (!READ_ONCE(apoll->poll.canceled))···58385828 struct io_ring_ctx *ctx = req->ctx;58395829 struct io_poll_table ipt;58405830 __poll_t mask;58315831+ bool done;5841583258425833 ipt.pt._qproc = io_poll_queue_proc;58435834···5847583658485837 if (mask) { /* no async, we'd stolen it */58495838 ipt.error = 0;58505850- io_poll_complete(req, mask);58395839+ done = io_poll_complete(req, mask);58515840 }58525841 spin_unlock(&ctx->completion_lock);5853584258545843 if (mask) {58555844 io_cqring_ev_posted(ctx);58565856- if (poll->events & EPOLLONESHOT)58455845+ if (done)58575846 io_put_req(req);58585847 }58595848 return ipt.error;···63446333 struct io_uring_rsrc_update2 up;63456334 int ret;6346633563476347- if (issue_flags & IO_URING_F_NONBLOCK)63486348- return -EAGAIN;63496349-63506336 up.offset = req->rsrc_update.offset;63516337 up.data = req->rsrc_update.arg;63526338 up.nr = 0;63536339 up.tags = 0;63546340 up.resv = 0;6355634163566356- mutex_lock(&ctx->uring_lock);63426342+ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));63576343 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,63586344 &up, req->rsrc_update.nr_args);63596359- mutex_unlock(&ctx->uring_lock);63456345+ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));6360634663616347 if (ret < 0)63626348 req_set_fail(req);···84088400 return ret;84098401}8410840284038403+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)84048404+{84058405+ unsigned int offset = req->close.file_slot - 1;84068406+ struct io_ring_ctx *ctx = req->ctx;84078407+ struct io_fixed_file *file_slot;84088408+ struct file *file;84098409+ int ret, i;84108410+84118411+ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));84128412+ ret = -ENXIO;84138413+ if (unlikely(!ctx->file_data))84148414+ goto out;84158415+ ret = -EINVAL;84168416+ if (offset >= ctx->nr_user_files)84178417+ goto out;84188418+ ret = io_rsrc_node_switch_start(ctx);84198419+ if (ret)84208420+ goto out;84218421+84228422+ i = array_index_nospec(offset, ctx->nr_user_files);84238423+ file_slot = io_fixed_file_slot(&ctx->file_table, i);84248424+ ret = -EBADF;84258425+ if (!file_slot->file_ptr)84268426+ goto out;84278427+84288428+ file = (struct file *)(file_slot->file_ptr & FFS_MASK);84298429+ ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);84308430+ if (ret)84318431+ goto out;84328432+84338433+ file_slot->file_ptr = 0;84348434+ io_rsrc_node_switch(ctx, ctx->file_data);84358435+ ret = 0;84368436+out:84378437+ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));84388438+ return ret;84398439+}84408440+84118441static int __io_sqe_files_update(struct io_ring_ctx *ctx,84128442 struct io_uring_rsrc_update2 *up,84138443 unsigned nr_args)···92129166 struct io_buffer *buf;92139167 unsigned long index;9214916892159215- xa_for_each(&ctx->io_buffers, index, buf)91699169+ xa_for_each(&ctx->io_buffers, index, buf) {92169170 __io_remove_buffers(ctx, buf, index, -1U);91719171+ cond_resched();91729172+ }92179173}9218917492199175static void io_req_cache_free(struct list_head *list)···93389290 mask |= EPOLLIN | EPOLLRDNORM;9339929193409292 return mask;93419341-}93429342-93439343-static int io_uring_fasync(int fd, struct file *file, int on)93449344-{93459345- struct io_ring_ctx *ctx = file->private_data;93469346-93479347- return fasync_helper(fd, file, on, &ctx->cq_fasync);93489293}9349929493509295static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)···97069665 struct io_tctx_node *node;97079666 unsigned long index;9708966797099709- xa_for_each(&tctx->xa, index, node)96689668+ xa_for_each(&tctx->xa, index, node) {97109669 io_uring_del_tctx_node(index);96709670+ cond_resched();96719671+ }97119672 if (wq) {97129673 /*97139674 * Must be after io_uring_del_task_file() (removes nodes under···1013310090 .mmap_capabilities = io_uring_nommu_mmap_capabilities,1013410091#endif1013510092 .poll = io_uring_poll,1013610136- .fasync = io_uring_fasync,1013710093#ifdef CONFIG_PROC_FS1013810094 .show_fdinfo = io_uring_show_fdinfo,1013910095#endif
+7-2
fs/kernfs/dir.c
···11161116 if (!inode)11171117 inode = ERR_PTR(-ENOMEM);11181118 }11191119- /* Needed only for negative dentry validation */11201120- if (!inode)11191119+ /*11201120+ * Needed for negative dentry validation.11211121+ * The negative dentry can be created in kernfs_iop_lookup()11221122+ * or transforms from positive dentry in dentry_unlink_inode()11231123+ * called from vfs_rmdir().11241124+ */11251125+ if (!IS_ERR(inode))11211126 kernfs_set_rev(parent, dentry);11221127 up_read(&kernfs_rwsem);11231128
-205
fs/ksmbd/auth.c
···6868 memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);6969}70707171-static void7272-str_to_key(unsigned char *str, unsigned char *key)7373-{7474- int i;7575-7676- key[0] = str[0] >> 1;7777- key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);7878- key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);7979- key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);8080- key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);8181- key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);8282- key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);8383- key[7] = str[6] & 0x7F;8484- for (i = 0; i < 8; i++)8585- key[i] = (key[i] << 1);8686-}8787-8888-static int8989-smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)9090-{9191- unsigned char key2[8];9292- struct des_ctx ctx;9393-9494- if (fips_enabled) {9595- ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");9696- return -ENOENT;9797- }9898-9999- str_to_key(key, key2);100100- des_expand_key(&ctx, key2, DES_KEY_SIZE);101101- des_encrypt(&ctx, out, in);102102- memzero_explicit(&ctx, sizeof(ctx));103103- return 0;104104-}105105-106106-static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)107107-{108108- int rc;109109-110110- rc = smbhash(p24, c8, p21);111111- if (rc)112112- return rc;113113- rc = smbhash(p24 + 8, c8, p21 + 7);114114- if (rc)115115- return rc;116116- return smbhash(p24 + 16, c8, p21 + 14);117117-}118118-119119-/* produce a md4 message digest from data of length n bytes */120120-static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,121121- int link_len)122122-{123123- int rc;124124- struct ksmbd_crypto_ctx *ctx;125125-126126- ctx = ksmbd_crypto_ctx_find_md4();127127- if (!ctx) {128128- ksmbd_debug(AUTH, "Crypto md4 allocation error\n");129129- return -ENOMEM;130130- }131131-132132- rc = crypto_shash_init(CRYPTO_MD4(ctx));133133- if (rc) {134134- ksmbd_debug(AUTH, "Could not init md4 shash\n");135135- goto out;136136- }137137-138138- rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);139139- if (rc) {140140- ksmbd_debug(AUTH, "Could not update with link_str\n");141141- goto out;142142- }143143-144144- rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);145145- if (rc)146146- ksmbd_debug(AUTH, "Could not generate md4 hash\n");147147-out:148148- ksmbd_release_crypto_ctx(ctx);149149- return rc;150150-}151151-152152-static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,153153- char *server_challenge, int len)154154-{155155- int rc;156156- struct ksmbd_crypto_ctx *ctx;157157-158158- ctx = ksmbd_crypto_ctx_find_md5();159159- if (!ctx) {160160- ksmbd_debug(AUTH, "Crypto md5 allocation error\n");161161- return -ENOMEM;162162- }163163-164164- rc = crypto_shash_init(CRYPTO_MD5(ctx));165165- if (rc) {166166- ksmbd_debug(AUTH, "Could not init md5 shash\n");167167- goto out;168168- }169169-170170- rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);171171- if (rc) {172172- ksmbd_debug(AUTH, "Could not update with challenge\n");173173- goto out;174174- }175175-176176- rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);177177- if (rc) {178178- ksmbd_debug(AUTH, "Could not update with nonce\n");179179- goto out;180180- }181181-182182- rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);183183- if (rc)184184- ksmbd_debug(AUTH, "Could not generate md5 hash\n");185185-out:186186- ksmbd_release_crypto_ctx(ctx);187187- return rc;188188-}189189-19071/**19172 * ksmbd_gen_sess_key() - function to generate session key19273 * @sess: session of connection···206325}207326208327/**209209- * ksmbd_auth_ntlm() - NTLM authentication handler210210- * @sess: session of connection211211- * @pw_buf: NTLM challenge response212212- * @passkey: user password213213- *214214- * Return: 0 on success, error number on error215215- */216216-int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf)217217-{218218- int rc;219219- unsigned char p21[21];220220- char key[CIFS_AUTH_RESP_SIZE];221221-222222- memset(p21, '\0', 21);223223- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);224224- rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key);225225- if (rc) {226226- pr_err("password processing failed\n");227227- return rc;228228- }229229-230230- ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),231231- CIFS_SMB1_SESSKEY_SIZE);232232- memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,233233- CIFS_AUTH_RESP_SIZE);234234- sess->sequence_number = 1;235235-236236- if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {237237- ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");238238- return -EINVAL;239239- }240240-241241- ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");242242- return 0;243243-}244244-245245-/**246328 * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler247329 * @sess: session of connection248330 * @ntlmv2: NTLMv2 challenge response···286442}287443288444/**289289- * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler290290- * @sess: session of connection291291- * @client_nonce: client nonce from LM response.292292- * @ntlm_resp: ntlm response data from client.293293- *294294- * Return: 0 on success, error number on error295295- */296296-static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce,297297- char *ntlm_resp)298298-{299299- char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};300300- int rc;301301- unsigned char p21[21];302302- char key[CIFS_AUTH_RESP_SIZE];303303-304304- rc = ksmbd_enc_update_sess_key(sess_key,305305- client_nonce,306306- (char *)sess->ntlmssp.cryptkey, 8);307307- if (rc) {308308- pr_err("password processing failed\n");309309- goto out;310310- }311311-312312- memset(p21, '\0', 21);313313- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);314314- rc = ksmbd_enc_p24(p21, sess_key, key);315315- if (rc) {316316- pr_err("password processing failed\n");317317- goto out;318318- }319319-320320- if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)321321- rc = -EINVAL;322322-out:323323- return rc;324324-}325325-326326-/**327445 * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct328446 * authenticate blob329447 * @authblob: authenticate blob source pointer···317511 lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);318512 nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);319513 nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);320320-321321- /* process NTLM authentication */322322- if (nt_len == CIFS_AUTH_RESP_SIZE) {323323- if (le32_to_cpu(authblob->NegotiateFlags) &324324- NTLMSSP_NEGOTIATE_EXTENDED_SEC)325325- return __ksmbd_auth_ntlmv2(sess, (char *)authblob +326326- lm_off, (char *)authblob + nt_off);327327- else328328- return ksmbd_auth_ntlm(sess, (char *)authblob +329329- nt_off);330330- }331514332515 /* TODO : use domain name that imported from configuration file */333516 domain_name = smb_strndup_from_utf16((const char *)authblob +
···14511451 */14521452struct create_context *smb2_find_context_vals(void *open_req, const char *tag)14531453{14541454- char *data_offset;14551454 struct create_context *cc;14561455 unsigned int next = 0;14571456 char *name;14581457 struct smb2_create_req *req = (struct smb2_create_req *)open_req;14581458+ unsigned int remain_len, name_off, name_len, value_off, value_len,14591459+ cc_len;1459146014601460- data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);14611461- cc = (struct create_context *)data_offset;14611461+ /*14621462+ * CreateContextsOffset and CreateContextsLength are guaranteed to14631463+ * be valid because of ksmbd_smb2_check_message().14641464+ */14651465+ cc = (struct create_context *)((char *)req + 4 +14661466+ le32_to_cpu(req->CreateContextsOffset));14671467+ remain_len = le32_to_cpu(req->CreateContextsLength);14621468 do {14631463- int val;14641464-14651469 cc = (struct create_context *)((char *)cc + next);14661466- name = le16_to_cpu(cc->NameOffset) + (char *)cc;14671467- val = le16_to_cpu(cc->NameLength);14681468- if (val < 4)14701470+ if (remain_len < offsetof(struct create_context, Buffer))14691471 return ERR_PTR(-EINVAL);1470147214711471- if (memcmp(name, tag, val) == 0)14721472- return cc;14731473 next = le32_to_cpu(cc->Next);14741474+ name_off = le16_to_cpu(cc->NameOffset);14751475+ name_len = le16_to_cpu(cc->NameLength);14761476+ value_off = le16_to_cpu(cc->DataOffset);14771477+ value_len = le32_to_cpu(cc->DataLength);14781478+ cc_len = next ? next : remain_len;14791479+14801480+ if ((next & 0x7) != 0 ||14811481+ next > remain_len ||14821482+ name_off != offsetof(struct create_context, Buffer) ||14831483+ name_len < 4 ||14841484+ name_off + name_len > cc_len ||14851485+ (value_off & 0x7) != 0 ||14861486+ (value_off && (value_off < name_off + name_len)) ||14871487+ ((u64)value_off + value_len > cc_len))14881488+ return ERR_PTR(-EINVAL);14891489+14901490+ name = (char *)cc + name_off;14911491+ if (memcmp(name, tag, name_len) == 0)14921492+ return cc;14931493+14941494+ remain_len -= next;14741495 } while (next != 0);1475149614761497 return NULL;
+3
fs/ksmbd/server.c
···584584 ret = ksmbd_workqueue_init();585585 if (ret)586586 goto err_crypto_destroy;587587+588588+ pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");589589+587590 return 0;588591589592err_crypto_destroy:
+235-142
fs/ksmbd/smb2pdu.c
···433433 work->compound_pfid = KSMBD_NO_FID;434434 }435435 memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);436436- rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;436436+ rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;437437 rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;438438 rsp_hdr->Command = rcv_hdr->Command;439439···459459bool is_chained_smb2_message(struct ksmbd_work *work)460460{461461 struct smb2_hdr *hdr = work->request_buf;462462- unsigned int len;462462+ unsigned int len, next_cmd;463463464464 if (hdr->ProtocolId != SMB2_PROTO_NUMBER)465465 return false;466466467467 hdr = ksmbd_req_buf_next(work);468468- if (le32_to_cpu(hdr->NextCommand) > 0) {468468+ next_cmd = le32_to_cpu(hdr->NextCommand);469469+ if (next_cmd > 0) {470470+ if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +471471+ __SMB2_HEADER_STRUCTURE_SIZE >472472+ get_rfc1002_len(work->request_buf)) {473473+ pr_err("next command(%u) offset exceeds smb msg size\n",474474+ next_cmd);475475+ return false;476476+ }477477+469478 ksmbd_debug(SMB, "got SMB2 chained command\n");470479 init_chained_smb2_rsp(work);471480 return true;···643634smb2_get_name(struct ksmbd_share_config *share, const char *src,644635 const int maxlen, struct nls_table *local_nls)645636{646646- char *name, *norm_name, *unixname;637637+ char *name;647638648639 name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);649640 if (IS_ERR(name)) {···651642 return name;652643 }653644654654- /* change it to absolute unix name */655655- norm_name = ksmbd_conv_path_to_unix(name);656656- if (IS_ERR(norm_name)) {657657- kfree(name);658658- return norm_name;659659- }660660- kfree(name);661661-662662- unixname = convert_to_unix_name(share, norm_name);663663- kfree(norm_name);664664- if (!unixname) {665665- pr_err("can not convert absolute name\n");666666- return ERR_PTR(-ENOMEM);667667- }668668-669669- ksmbd_debug(SMB, "absolute name = %s\n", unixname);670670- return unixname;645645+ ksmbd_conv_path_to_unix(name);646646+ ksmbd_strip_last_slash(name);647647+ return name;671648}672649673650int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)···10671072 struct smb2_negotiate_req *req = work->request_buf;10681073 struct smb2_negotiate_rsp *rsp = work->response_buf;10691074 int rc = 0;10751075+ unsigned int smb2_buf_len, smb2_neg_size;10701076 __le32 status;1071107710721078 ksmbd_debug(SMB, "Received negotiate request\n");···10831087 rsp->hdr.Status = STATUS_INVALID_PARAMETER;10841088 rc = -EINVAL;10851089 goto err_out;10901090+ }10911091+10921092+ smb2_buf_len = get_rfc1002_len(work->request_buf);10931093+ smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;10941094+ if (smb2_neg_size > smb2_buf_len) {10951095+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;10961096+ rc = -EINVAL;10971097+ goto err_out;10981098+ }10991099+11001100+ if (conn->dialect == SMB311_PROT_ID) {11011101+ unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);11021102+11031103+ if (smb2_buf_len < nego_ctxt_off) {11041104+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;11051105+ rc = -EINVAL;11061106+ goto err_out;11071107+ }11081108+11091109+ if (smb2_neg_size > nego_ctxt_off) {11101110+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;11111111+ rc = -EINVAL;11121112+ goto err_out;11131113+ }11141114+11151115+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >11161116+ nego_ctxt_off) {11171117+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;11181118+ rc = -EINVAL;11191119+ goto err_out;11201120+ }11211121+ } else {11221122+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >11231123+ smb2_buf_len) {11241124+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;11251125+ rc = -EINVAL;11261126+ goto err_out;11271127+ }10861128 }1087112910881130 conn->cli_cap = le32_to_cpu(req->Capabilities);···21412107 * smb2_set_ea() - handler for setting extended attributes using set21422108 * info command21432109 * @eabuf: set info command buffer21102110+ * @buf_len: set info command buffer length21442111 * @path: dentry path for get ea21452112 *21462113 * Return: 0 on success, otherwise error21472114 */21482148-static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)21152115+static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,21162116+ struct path *path)21492117{21502118 struct user_namespace *user_ns = mnt_user_ns(path->mnt);21512119 char *attr_name = NULL, *value;21522120 int rc = 0;21532153- int next = 0;21212121+ unsigned int next = 0;21222122+21232123+ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +21242124+ le16_to_cpu(eabuf->EaValueLength))21252125+ return -EINVAL;2154212621552127 attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);21562128 if (!attr_name)···2221218122222182next:22232183 next = le32_to_cpu(eabuf->NextEntryOffset);21842184+ if (next == 0 || buf_len < next)21852185+ break;21862186+ buf_len -= next;22242187 eabuf = (struct smb2_ea_info *)((char *)eabuf + next);21882188+ if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))21892189+ break;21902190+22252191 } while (next != 0);2226219222272193 kfree(attr_name);···23982352 return rc;23992353 }2400235424012401- rc = ksmbd_vfs_kern_path(name, 0, path, 0);23552355+ rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);24022356 if (rc) {24032357 pr_err("cannot get linux path (%s), err = %d\n",24042358 name, rc);···24272381 ksmbd_debug(SMB,24282382 "Set ACLs using SMB2_CREATE_SD_BUFFER context\n");24292383 sd_buf = (struct create_sd_buf_req *)context;23842384+ if (le16_to_cpu(context->DataOffset) +23852385+ le32_to_cpu(context->DataLength) <23862386+ sizeof(struct create_sd_buf_req))23872387+ return -EINVAL;24302388 return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,24312389 le32_to_cpu(sd_buf->ccontext.DataLength), true);24322390}···24772427 struct oplock_info *opinfo;24782428 __le32 *next_ptr = NULL;24792429 int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;24802480- int rc = 0, len = 0;24302430+ int rc = 0;24812431 int contxt_cnt = 0, query_disk_id = 0;24822432 int maximal_access_ctxt = 0, posix_ctxt = 0;24832433 int s_type = 0;···25492499 goto err_out1;25502500 }25512501 } else {25522552- len = strlen(share->path);25532553- ksmbd_debug(SMB, "share path len %d\n", len);25542554- name = kmalloc(len + 1, GFP_KERNEL);25022502+ name = kstrdup("", GFP_KERNEL);25552503 if (!name) {25562556- rsp->hdr.Status = STATUS_NO_MEMORY;25572504 rc = -ENOMEM;25582505 goto err_out1;25592506 }25602560-25612561- memcpy(name, share->path, len);25622562- *(name + len) = '\0';25632507 }2564250825652509 req_op_level = req->RequestedOplockLevel;···26252581 goto err_out1;26262582 } else if (context) {26272583 ea_buf = (struct create_ea_buf_req *)context;25842584+ if (le16_to_cpu(context->DataOffset) +25852585+ le32_to_cpu(context->DataLength) <25862586+ sizeof(struct create_ea_buf_req)) {25872587+ rc = -EINVAL;25882588+ goto err_out1;25892589+ }26282590 if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {26292591 rsp->hdr.Status = STATUS_ACCESS_DENIED;26302592 rc = -EACCES;···26692619 } else if (context) {26702620 struct create_posix *posix =26712621 (struct create_posix *)context;26222622+ if (le16_to_cpu(context->DataOffset) +26232623+ le32_to_cpu(context->DataLength) <26242624+ sizeof(struct create_posix)) {26252625+ rc = -EINVAL;26262626+ goto err_out1;26272627+ }26722628 ksmbd_debug(SMB, "get posix context\n");2673262926742630 posix_mode = le32_to_cpu(posix->Mode);···26882632 goto err_out1;26892633 }2690263426912691- if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {26922692- /*26932693- * On delete request, instead of following up, need to26942694- * look the current entity26952695- */26962696- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);26972697- if (!rc) {26352635+ rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);26362636+ if (!rc) {26372637+ if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {26982638 /*26992639 * If file exists with under flags, return access27002640 * denied error.···27092657 path_put(&path);27102658 goto err_out;27112659 }27122712- }27132713- } else {27142714- if (test_share_config_flag(work->tcon->share_conf,27152715- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) {27162716- /*27172717- * Use LOOKUP_FOLLOW to follow the path of27182718- * symlink in path buildup27192719- */27202720- rc = ksmbd_vfs_kern_path(name, LOOKUP_FOLLOW, &path, 1);27212721- if (rc) { /* Case for broken link ?*/27222722- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);27232723- }27242724- } else {27252725- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);27262726- if (!rc && d_is_symlink(path.dentry)) {27272727- rc = -EACCES;27282728- path_put(&path);27292729- goto err_out;27302730- }26602660+ } else if (d_is_symlink(path.dentry)) {26612661+ rc = -EACCES;26622662+ path_put(&path);26632663+ goto err_out;27312664 }27322665 }2733266627342667 if (rc) {27352735- if (rc == -EACCES) {27362736- ksmbd_debug(SMB,27372737- "User does not have right permission\n");26682668+ if (rc != -ENOENT)27382669 goto err_out;27392739- }27402670 ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",27412671 name, rc);27422672 rc = 0;···28242790 created = true;28252791 user_ns = mnt_user_ns(path.mnt);28262792 if (ea_buf) {28272827- rc = smb2_set_ea(&ea_buf->ea, &path);27932793+ if (le32_to_cpu(ea_buf->ccontext.DataLength) <27942794+ sizeof(struct smb2_ea_info)) {27952795+ rc = -EINVAL;27962796+ goto err_out;27972797+ }27982798+27992799+ rc = smb2_set_ea(&ea_buf->ea,28002800+ le32_to_cpu(ea_buf->ccontext.DataLength),28012801+ &path);28282802 if (rc == -EOPNOTSUPP)28292803 rc = 0;28302804 else if (rc)···30653023 rc = PTR_ERR(az_req);30663024 goto err_out;30673025 } else if (az_req) {30683068- loff_t alloc_size = le64_to_cpu(az_req->AllocationSize);30263026+ loff_t alloc_size;30693027 int err;3070302830293029+ if (le16_to_cpu(az_req->ccontext.DataOffset) +30303030+ le32_to_cpu(az_req->ccontext.DataLength) <30313031+ sizeof(struct create_alloc_size_req)) {30323032+ rc = -EINVAL;30333033+ goto err_out;30343034+ }30353035+ alloc_size = le64_to_cpu(az_req->AllocationSize);30713036 ksmbd_debug(SMB,30723037 "request smb2 create allocate size : %llu\n",30733038 alloc_size);···32293180 rsp->hdr.Status = STATUS_INVALID_PARAMETER;32303181 else if (rc == -EOPNOTSUPP)32313182 rsp->hdr.Status = STATUS_NOT_SUPPORTED;32323232- else if (rc == -EACCES || rc == -ESTALE)31833183+ else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV)32333184 rsp->hdr.Status = STATUS_ACCESS_DENIED;32343185 else if (rc == -ENOENT)32353186 rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;···42434194static int get_file_basic_info(struct smb2_query_info_rsp *rsp,42444195 struct ksmbd_file *fp, void *rsp_org)42454196{42464246- struct smb2_file_all_info *basic_info;41974197+ struct smb2_file_basic_info *basic_info;42474198 struct kstat stat;42484199 u64 time;42494200···42534204 return -EACCES;42544205 }4255420642564256- basic_info = (struct smb2_file_all_info *)rsp->Buffer;42074207+ basic_info = (struct smb2_file_basic_info *)rsp->Buffer;42574208 generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),42584209 &stat);42594210 basic_info->CreationTime = cpu_to_le64(fp->create_time);···42664217 basic_info->Attributes = fp->f_ci->m_fattr;42674218 basic_info->Pad1 = 0;42684219 rsp->OutputBufferLength =42694269- cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize));42704270- inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info,42714271- AllocationSize));42204220+ cpu_to_le32(sizeof(struct smb2_file_basic_info));42214221+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));42724222 return 0;42734223}42744224···43444296 return -EACCES;43454297 }4346429843474347- filename = convert_to_nt_pathname(fp->filename,43484348- work->tcon->share_conf->path);42994299+ filename = convert_to_nt_pathname(fp->filename);43494300 if (!filename)43504301 return -ENOMEM;43514302···44754428 file_info->NextEntryOffset = cpu_to_le32(next);44764429 }4477443044784478- if (nbytes) {44314431+ if (!S_ISDIR(stat.mode)) {44794432 file_info = (struct smb2_file_stream_info *)44804433 &rsp->Buffer[nbytes];44814434 streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,44824435 "::$DATA", 7, conn->local_nls, 0);44834436 streamlen *= 2;44844437 file_info->StreamNameLength = cpu_to_le32(streamlen);44854485- file_info->StreamSize = S_ISDIR(stat.mode) ? 0 :44864486- cpu_to_le64(stat.size);44874487- file_info->StreamAllocationSize = S_ISDIR(stat.mode) ? 0 :44884488- cpu_to_le64(stat.size);44384438+ file_info->StreamSize = 0;44394439+ file_info->StreamAllocationSize = 0;44894440 nbytes += sizeof(struct smb2_file_stream_info) + streamlen;44904441 }44914442···47984753 struct path path;47994754 int rc = 0, len;48004755 int fs_infoclass_size = 0;48014801- int lookup_flags = 0;4802475648034803- if (test_share_config_flag(share, KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))48044804- lookup_flags = LOOKUP_FOLLOW;48054805-48064806- rc = ksmbd_vfs_kern_path(share->path, lookup_flags, &path, 0);47574757+ rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);48074758 if (rc) {48084759 pr_err("cannot create vfs path\n");48094760 return -EIO;···53485307 goto out;5349530853505309 len = strlen(new_name);53515351- if (new_name[len - 1] != '/') {53105310+ if (len > 0 && new_name[len - 1] != '/') {53525311 pr_err("not allow base filename in rename\n");53535312 rc = -ESHARE;53545313 goto out;···53765335 }5377533653785337 ksmbd_debug(SMB, "new name %s\n", new_name);53795379- rc = ksmbd_vfs_kern_path(new_name, 0, &path, 1);53805380- if (rc)53385338+ rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);53395339+ if (rc) {53405340+ if (rc != -ENOENT)53415341+ goto out;53815342 file_present = false;53825382- else53435343+ } else {53835344 path_put(&path);53455345+ }5384534653855347 if (ksmbd_share_veto_filename(share, new_name)) {53865348 rc = -ENOENT;···54235379static int smb2_create_link(struct ksmbd_work *work,54245380 struct ksmbd_share_config *share,54255381 struct smb2_file_link_info *file_info,54265426- struct file *filp,53825382+ unsigned int buf_len, struct file *filp,54275383 struct nls_table *local_nls)54285384{54295385 char *link_name = NULL, *target_name = NULL, *pathname = NULL;54305386 struct path path;54315387 bool file_present = true;54325388 int rc;53895389+53905390+ if (buf_len < (u64)sizeof(struct smb2_file_link_info) +53915391+ le32_to_cpu(file_info->FileNameLength))53925392+ return -EINVAL;5433539354345394 ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");54355395 pathname = kmalloc(PATH_MAX, GFP_KERNEL);···54575409 }5458541054595411 ksmbd_debug(SMB, "target name is %s\n", target_name);54605460- rc = ksmbd_vfs_kern_path(link_name, 0, &path, 0);54615461- if (rc)54125412+ rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);54135413+ if (rc) {54145414+ if (rc != -ENOENT)54155415+ goto out;54625416 file_present = false;54635463- else54175417+ } else {54645418 path_put(&path);54195419+ }5465542054665421 if (file_info->ReplaceIfExists) {54675422 if (file_present) {···54945443 return rc;54955444}5496544554975497-static int set_file_basic_info(struct ksmbd_file *fp, char *buf,54465446+static int set_file_basic_info(struct ksmbd_file *fp,54475447+ struct smb2_file_basic_info *file_info,54985448 struct ksmbd_share_config *share)54995449{55005500- struct smb2_file_all_info *file_info;55015450 struct iattr attrs;55025451 struct timespec64 ctime;55035452 struct file *filp;···55085457 if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))55095458 return -EACCES;5510545955115511- file_info = (struct smb2_file_all_info *)buf;55125460 attrs.ia_valid = 0;55135461 filp = fp->filp;55145462 inode = file_inode(filp);···55845534}5585553555865536static int set_file_allocation_info(struct ksmbd_work *work,55875587- struct ksmbd_file *fp, char *buf)55375537+ struct ksmbd_file *fp,55385538+ struct smb2_file_alloc_info *file_alloc_info)55885539{55895540 /*55905541 * TODO : It's working fine only when store dos attributes···55935542 * properly with any smb.conf option55945543 */5595554455965596- struct smb2_file_alloc_info *file_alloc_info;55975545 loff_t alloc_blks;55985546 struct inode *inode;55995547 int rc;···56005550 if (!(fp->daccess & FILE_WRITE_DATA_LE))56015551 return -EACCES;5602555256035603- file_alloc_info = (struct smb2_file_alloc_info *)buf;56045553 alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;56055554 inode = file_inode(fp->filp);56065555···56225573 * inode size is retained by backup inode size.56235574 */56245575 size = i_size_read(inode);56255625- rc = ksmbd_vfs_truncate(work, NULL, fp, alloc_blks * 512);55765576+ rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);56265577 if (rc) {56275578 pr_err("truncate failed! filename : %s, err %d\n",56285579 fp->filename, rc);···56355586}5636558756375588static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,56385638- char *buf)55895589+ struct smb2_file_eof_info *file_eof_info)56395590{56405640- struct smb2_file_eof_info *file_eof_info;56415591 loff_t newsize;56425592 struct inode *inode;56435593 int rc;···56445596 if (!(fp->daccess & FILE_WRITE_DATA_LE))56455597 return -EACCES;5646559856475647- file_eof_info = (struct smb2_file_eof_info *)buf;56485599 newsize = le64_to_cpu(file_eof_info->EndOfFile);56495600 inode = file_inode(fp->filp);56505601···56575610 if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {56585611 ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",56595612 fp->filename, newsize);56605660- rc = ksmbd_vfs_truncate(work, NULL, fp, newsize);56135613+ rc = ksmbd_vfs_truncate(work, fp, newsize);56615614 if (rc) {56625615 ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",56635616 fp->filename, rc);···56705623}5671562456725625static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,56735673- char *buf)56265626+ struct smb2_file_rename_info *rename_info,56275627+ unsigned int buf_len)56745628{56755629 struct user_namespace *user_ns;56765630 struct ksmbd_file *parent_fp;···56835635 pr_err("no right to delete : 0x%x\n", fp->daccess);56845636 return -EACCES;56855637 }56385638+56395639+ if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +56405640+ le32_to_cpu(rename_info->FileNameLength))56415641+ return -EINVAL;5686564256875643 user_ns = file_mnt_user_ns(fp->filp);56885644 if (ksmbd_stream_fd(fp))···57105658 }57115659 }57125660next:57135713- return smb2_rename(work, fp, user_ns,57145714- (struct smb2_file_rename_info *)buf,56615661+ return smb2_rename(work, fp, user_ns, rename_info,57155662 work->sess->conn->local_nls);57165663}5717566457185718-static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)56655665+static int set_file_disposition_info(struct ksmbd_file *fp,56665666+ struct smb2_file_disposition_info *file_info)57195667{57205720- struct smb2_file_disposition_info *file_info;57215668 struct inode *inode;5722566957235670 if (!(fp->daccess & FILE_DELETE_LE)) {···57255674 }5726567557275676 inode = file_inode(fp->filp);57285728- file_info = (struct smb2_file_disposition_info *)buf;57295677 if (file_info->DeletePending) {57305678 if (S_ISDIR(inode->i_mode) &&57315679 ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)···57365686 return 0;57375687}5738568857395739-static int set_file_position_info(struct ksmbd_file *fp, char *buf)56895689+static int set_file_position_info(struct ksmbd_file *fp,56905690+ struct smb2_file_pos_info *file_info)57405691{57415741- struct smb2_file_pos_info *file_info;57425692 loff_t current_byte_offset;57435693 unsigned long sector_size;57445694 struct inode *inode;5745569557465696 inode = file_inode(fp->filp);57475747- file_info = (struct smb2_file_pos_info *)buf;57485697 current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);57495698 sector_size = inode->i_sb->s_blocksize;57505699···57595710 return 0;57605711}5761571257625762-static int set_file_mode_info(struct ksmbd_file *fp, char *buf)57135713+static int set_file_mode_info(struct ksmbd_file *fp,57145714+ struct smb2_file_mode_info *file_info)57635715{57645764- struct smb2_file_mode_info *file_info;57655716 __le32 mode;5766571757675767- file_info = (struct smb2_file_mode_info *)buf;57685718 mode = file_info->Mode;5769571957705720 if ((mode & ~FILE_MODE_INFO_MASK) ||···57935745 * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH57945746 */57955747static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,57965796- int info_class, char *buf,57485748+ struct smb2_set_info_req *req,57975749 struct ksmbd_share_config *share)57985750{57995799- switch (info_class) {57515751+ unsigned int buf_len = le32_to_cpu(req->BufferLength);57525752+57535753+ switch (req->FileInfoClass) {58005754 case FILE_BASIC_INFORMATION:58015801- return set_file_basic_info(fp, buf, share);57555755+ {57565756+ if (buf_len < sizeof(struct smb2_file_basic_info))57575757+ return -EINVAL;5802575857595759+ return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);57605760+ }58035761 case FILE_ALLOCATION_INFORMATION:58045804- return set_file_allocation_info(work, fp, buf);57625762+ {57635763+ if (buf_len < sizeof(struct smb2_file_alloc_info))57645764+ return -EINVAL;5805576557665766+ return set_file_allocation_info(work, fp,57675767+ (struct smb2_file_alloc_info *)req->Buffer);57685768+ }58065769 case FILE_END_OF_FILE_INFORMATION:58075807- return set_end_of_file_info(work, fp, buf);57705770+ {57715771+ if (buf_len < sizeof(struct smb2_file_eof_info))57725772+ return -EINVAL;5808577357745774+ return set_end_of_file_info(work, fp,57755775+ (struct smb2_file_eof_info *)req->Buffer);57765776+ }58095777 case FILE_RENAME_INFORMATION:57785778+ {58105779 if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {58115780 ksmbd_debug(SMB,58125781 "User does not have write permission\n");58135782 return -EACCES;58145783 }58155815- return set_rename_info(work, fp, buf);5816578457855785+ if (buf_len < sizeof(struct smb2_file_rename_info))57865786+ return -EINVAL;57875787+57885788+ return set_rename_info(work, fp,57895789+ (struct smb2_file_rename_info *)req->Buffer,57905790+ buf_len);57915791+ }58175792 case FILE_LINK_INFORMATION:58185818- return smb2_create_link(work, work->tcon->share_conf,58195819- (struct smb2_file_link_info *)buf, fp->filp,58205820- work->sess->conn->local_nls);57935793+ {57945794+ if (buf_len < sizeof(struct smb2_file_link_info))57955795+ return -EINVAL;5821579657975797+ return smb2_create_link(work, work->tcon->share_conf,57985798+ (struct smb2_file_link_info *)req->Buffer,57995799+ buf_len, fp->filp,58005800+ work->sess->conn->local_nls);58015801+ }58225802 case FILE_DISPOSITION_INFORMATION:58035803+ {58235804 if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {58245805 ksmbd_debug(SMB,58255806 "User does not have write permission\n");58265807 return -EACCES;58275808 }58285828- return set_file_disposition_info(fp, buf);5829580958105810+ if (buf_len < sizeof(struct smb2_file_disposition_info))58115811+ return -EINVAL;58125812+58135813+ return set_file_disposition_info(fp,58145814+ (struct smb2_file_disposition_info *)req->Buffer);58155815+ }58305816 case FILE_FULL_EA_INFORMATION:58315817 {58325818 if (!(fp->daccess & FILE_WRITE_EA_LE)) {···58695787 return -EACCES;58705788 }5871578958725872- return smb2_set_ea((struct smb2_ea_info *)buf,58735873- &fp->filp->f_path);58745874- }57905790+ if (buf_len < sizeof(struct smb2_ea_info))57915791+ return -EINVAL;5875579257935793+ return smb2_set_ea((struct smb2_ea_info *)req->Buffer,57945794+ buf_len, &fp->filp->f_path);57955795+ }58765796 case FILE_POSITION_INFORMATION:58775877- return set_file_position_info(fp, buf);57975797+ {57985798+ if (buf_len < sizeof(struct smb2_file_pos_info))57995799+ return -EINVAL;5878580058015801+ return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);58025802+ }58795803 case FILE_MODE_INFORMATION:58805880- return set_file_mode_info(fp, buf);58045804+ {58055805+ if (buf_len < sizeof(struct smb2_file_mode_info))58065806+ return -EINVAL;58075807+58085808+ return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);58095809+ }58815810 }5882581158835883- pr_err("Unimplemented Fileinfoclass :%d\n", info_class);58125812+ pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);58845813 return -EOPNOTSUPP;58855814}58865815···59525859 switch (req->InfoType) {59535860 case SMB2_O_INFO_FILE:59545861 ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");59555955- rc = smb2_set_info_file(work, fp, req->FileInfoClass,59565956- req->Buffer, work->tcon->share_conf);58625862+ rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);59575863 break;59585864 case SMB2_O_INFO_SECURITY:59595865 ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");···59795887 return 0;5980588859815889err_out:59825982- if (rc == -EACCES || rc == -EPERM)58905890+ if (rc == -EACCES || rc == -EPERM || rc == -EXDEV)59835891 rsp->hdr.Status = STATUS_ACCESS_DENIED;59845892 else if (rc == -EINVAL)59855893 rsp->hdr.Status = STATUS_INVALID_PARAMETER;···8306821483078215 WORK_BUFFERS(work, req, rsp);8308821683098309- if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE)82178217+ if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&82188218+ conn->preauth_info)83108219 ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,83118220 conn->preauth_info->Preauth_HashValue);83128221···84148321 unsigned int buf_data_size = pdu_length + 4 -84158322 sizeof(struct smb2_transform_hdr);84168323 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;84178417- unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);84188324 int rc = 0;84198419-84208420- sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));84218421- if (!sess) {84228422- pr_err("invalid session id(%llx) in transform header\n",84238423- le64_to_cpu(tr_hdr->SessionId));84248424- return -ECONNABORTED;84258425- }8426832584278326 if (pdu_length + 4 <84288327 sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {···84238338 return -ECONNABORTED;84248339 }8425834084268426- if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) {83418341+ if (pdu_length + 4 <83428342+ le32_to_cpu(tr_hdr->OriginalMessageSize) + sizeof(struct smb2_transform_hdr)) {84278343 pr_err("Transform message is broken\n");83448344+ return -ECONNABORTED;83458345+ }83468346+83478347+ sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));83488348+ if (!sess) {83498349+ pr_err("invalid session id(%llx) in transform header\n",83508350+ le64_to_cpu(tr_hdr->SessionId));84288351 return -ECONNABORTED;84298352 }84308353
+9
fs/ksmbd/smb2pdu.h
···14641464 char FileName[1];14651465} __packed; /* level 18 Query */1466146614671467+struct smb2_file_basic_info { /* data block encoding of response to level 18 */14681468+ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */14691469+ __le64 LastAccessTime;14701470+ __le64 LastWriteTime;14711471+ __le64 ChangeTime;14721472+ __le32 Attributes;14731473+ __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */14741474+} __packed;14751475+14671476struct smb2_file_alt_name_info {14681477 __le32 FileNameLength;14691478 char FileName[0];
···380380{381381 int i, ret;382382 int num_aces = 0;383383- int acl_size;383383+ unsigned int acl_size;384384 char *acl_base;385385 struct smb_ace **ppace;386386 struct posix_acl_entry *cf_pace, *cf_pdace;···392392 return;393393394394 /* validate that we do not go past end of acl */395395- if (end_of_acl <= (char *)pdacl ||395395+ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||396396 end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {397397 pr_err("ACL too small to parse DACL\n");398398 return;···431431 * user/group/other have no permissions432432 */433433 for (i = 0; i < num_aces; ++i) {434434+ if (end_of_acl - acl_base < acl_size)435435+ break;436436+434437 ppace[i] = (struct smb_ace *)(acl_base + acl_size);435438 acl_base = (char *)ppace[i];439439+ acl_size = offsetof(struct smb_ace, sid) +440440+ offsetof(struct smb_sid, sub_auth);441441+442442+ if (end_of_acl - acl_base < acl_size ||443443+ ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||444444+ (end_of_acl - acl_base <445445+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||446446+ (le16_to_cpu(ppace[i]->size) <447447+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))448448+ break;449449+436450 acl_size = le16_to_cpu(ppace[i]->size);437451 ppace[i]->access_req =438452 smb_map_generic_desired_access(ppace[i]->access_req);···820806821807 if (!pntsd)822808 return -EIO;809809+810810+ if (acl_len < sizeof(struct smb_ntsd))811811+ return -EINVAL;823812824813 owner_sid_ptr = (struct smb_sid *)((char *)pntsd +825814 le32_to_cpu(pntsd->osidoffset));
+2-2
fs/ksmbd/transport_tcp.c
···215215 * ksmbd_kthread_fn() - listen to new SMB connections and callback server216216 * @p: arguments to forker thread217217 *218218- * Return: Returns a task_struct or ERR_PTR218218+ * Return: 0 on success, error number otherwise219219 */220220static int ksmbd_kthread_fn(void *p)221221{···387387/**388388 * create_socket - create socket for ksmbd/0389389 *390390- * Return: Returns a task_struct or ERR_PTR390390+ * Return: 0 on success, error number otherwise391391 */392392static int create_socket(struct interface *iface)393393{
+88-94
fs/ksmbd/vfs.c
···1919#include <linux/sched/xacct.h>2020#include <linux/crc32c.h>21212222+#include "../internal.h" /* for vfs_path_lookup */2323+2224#include "glob.h"2325#include "oplock.h"2426#include "connection.h"···4644 p++;4745 } else {4846 p = NULL;4949- pr_err("Invalid path %s\n", path);5047 }5148 return p;5249}···156155/**157156 * ksmbd_vfs_create() - vfs helper for smb create file158157 * @work: work159159- * @name: file name158158+ * @name: file name that is relative to share160159 * @mode: file create mode161160 *162161 * Return: 0 on success, otherwise error···167166 struct dentry *dentry;168167 int err;169168170170- dentry = kern_path_create(AT_FDCWD, name, &path, 0);169169+ dentry = ksmbd_vfs_kern_path_create(work, name,170170+ LOOKUP_NO_SYMLINKS, &path);171171 if (IS_ERR(dentry)) {172172 err = PTR_ERR(dentry);173173 if (err != -ENOENT)···193191/**194192 * ksmbd_vfs_mkdir() - vfs helper for smb create directory195193 * @work: work196196- * @name: directory name194194+ * @name: directory name that is relative to share197195 * @mode: directory create mode198196 *199197 * Return: 0 on success, otherwise error···205203 struct dentry *dentry;206204 int err;207205208208- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);206206+ dentry = ksmbd_vfs_kern_path_create(work, name,207207+ LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,208208+ &path);209209 if (IS_ERR(dentry)) {210210 err = PTR_ERR(dentry);211211 if (err != -EEXIST)···582578583579/**584580 * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink585585- * @name: absolute directory or file name581581+ * @name: directory or file name that is relative to share586582 *587583 * Return: 0 on success, otherwise error588584 */···592588 struct path path;593589 struct dentry *parent;594590 int err;595595- int flags = 0;596591597592 if (ksmbd_override_fsids(work))598593 return -ENOMEM;599594600600- if (test_share_config_flag(work->tcon->share_conf,601601- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))602602- flags = LOOKUP_FOLLOW;603603-604604- err = kern_path(name, flags, &path);595595+ err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);605596 if (err) {606597 ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);607598 ksmbd_revert_fsids(work);···641642/**642643 * ksmbd_vfs_link() - vfs helper for creating smb hardlink643644 * @oldname: source file name644644- * @newname: hardlink name645645+ * @newname: hardlink name that is relative to share645646 *646647 * Return: 0 on success, otherwise error647648 */···651652 struct path oldpath, newpath;652653 struct dentry *dentry;653654 int err;654654- int flags = 0;655655656656 if (ksmbd_override_fsids(work))657657 return -ENOMEM;658658659659- if (test_share_config_flag(work->tcon->share_conf,660660- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))661661- flags = LOOKUP_FOLLOW;662662-663663- err = kern_path(oldname, flags, &oldpath);659659+ err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath);664660 if (err) {665661 pr_err("cannot get linux path for %s, err = %d\n",666662 oldname, err);667663 goto out1;668664 }669665670670- dentry = kern_path_create(AT_FDCWD, newname, &newpath,671671- flags | LOOKUP_REVAL);666666+ dentry = ksmbd_vfs_kern_path_create(work, newname,667667+ LOOKUP_NO_SYMLINKS | LOOKUP_REVAL,668668+ &newpath);672669 if (IS_ERR(dentry)) {673670 err = PTR_ERR(dentry);674671 pr_err("path create err for %s, err %d\n", newname, err);···783788 struct dentry *src_dent, *trap_dent, *src_child;784789 char *dst_name;785790 int err;786786- int flags;787791788792 dst_name = extract_last_component(newname);789789- if (!dst_name)790790- return -EINVAL;793793+ if (!dst_name) {794794+ dst_name = newname;795795+ newname = "";796796+ }791797792798 src_dent_parent = dget_parent(fp->filp->f_path.dentry);793799 src_dent = fp->filp->f_path.dentry;794800795795- flags = LOOKUP_DIRECTORY;796796- if (test_share_config_flag(work->tcon->share_conf,797797- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))798798- flags |= LOOKUP_FOLLOW;799799-800800- err = kern_path(newname, flags, &dst_path);801801+ err = ksmbd_vfs_kern_path(work, newname,802802+ LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,803803+ &dst_path, false);801804 if (err) {802805 ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);803806 goto out;···841848/**842849 * ksmbd_vfs_truncate() - vfs helper for smb file truncate843850 * @work: work844844- * @name: old filename845851 * @fid: file id of old file846852 * @size: truncate to given size847853 *848854 * Return: 0 on success, otherwise error849855 */850850-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,856856+int ksmbd_vfs_truncate(struct ksmbd_work *work,851857 struct ksmbd_file *fp, loff_t size)852858{853853- struct path path;854859 int err = 0;860860+ struct file *filp;855861856856- if (name) {857857- err = kern_path(name, 0, &path);862862+ filp = fp->filp;863863+864864+ /* Do we need to break any of a levelII oplock? */865865+ smb_break_all_levII_oplock(work, fp, 1);866866+867867+ if (!work->tcon->posix_extensions) {868868+ struct inode *inode = file_inode(filp);869869+870870+ if (size < inode->i_size) {871871+ err = check_lock_range(filp, size,872872+ inode->i_size - 1, WRITE);873873+ } else {874874+ err = check_lock_range(filp, inode->i_size,875875+ size - 1, WRITE);876876+ }877877+858878 if (err) {859859- pr_err("cannot get linux path for %s, err %d\n",860860- name, err);861861- return err;879879+ pr_err("failed due to lock\n");880880+ return -EAGAIN;862881 }863863- err = vfs_truncate(&path, size);864864- if (err)865865- pr_err("truncate failed for %s err %d\n",866866- name, err);867867- path_put(&path);868868- } else {869869- struct file *filp;870870-871871- filp = fp->filp;872872-873873- /* Do we need to break any of a levelII oplock? */874874- smb_break_all_levII_oplock(work, fp, 1);875875-876876- if (!work->tcon->posix_extensions) {877877- struct inode *inode = file_inode(filp);878878-879879- if (size < inode->i_size) {880880- err = check_lock_range(filp, size,881881- inode->i_size - 1, WRITE);882882- } else {883883- err = check_lock_range(filp, inode->i_size,884884- size - 1, WRITE);885885- }886886-887887- if (err) {888888- pr_err("failed due to lock\n");889889- return -EAGAIN;890890- }891891- }892892-893893- err = vfs_truncate(&filp->f_path, size);894894- if (err)895895- pr_err("truncate failed for filename : %s err %d\n",896896- fp->filename, err);897882 }898883884884+ err = vfs_truncate(&filp->f_path, size);885885+ if (err)886886+ pr_err("truncate failed for filename : %s err %d\n",887887+ fp->filename, err);899888 return err;900889}901890···1195122011961221/**11971222 * ksmbd_vfs_kern_path() - lookup a file and get path info11981198- * @name: name of file for lookup12231223+ * @name: file path that is relative to share11991224 * @flags: lookup flags12001225 * @path: if lookup succeed, return path info12011226 * @caseless: caseless filename lookup12021227 *12031228 * Return: 0 on success, otherwise error12041229 */12051205-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,12061206- bool caseless)12301230+int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,12311231+ unsigned int flags, struct path *path, bool caseless)12071232{12331233+ struct ksmbd_share_config *share_conf = work->tcon->share_conf;12081234 int err;1209123512101210- if (name[0] != '/')12111211- return -EINVAL;12121212-12131213- err = kern_path(name, flags, path);12361236+ flags |= LOOKUP_BENEATH;12371237+ err = vfs_path_lookup(share_conf->vfs_path.dentry,12381238+ share_conf->vfs_path.mnt,12391239+ name,12401240+ flags,12411241+ path);12141242 if (!err)12151243 return 0;12161244···12271249 return -ENOMEM;1228125012291251 path_len = strlen(filepath);12301230- remain_len = path_len - 1;12521252+ remain_len = path_len;1231125312321232- err = kern_path("/", flags, &parent);12331233- if (err)12341234- goto out;12541254+ parent = share_conf->vfs_path;12551255+ path_get(&parent);1235125612361257 while (d_can_lookup(parent.dentry)) {12371258 char *filename = filepath + path_len - remain_len;···1243126612441267 err = ksmbd_vfs_lookup_in_dir(&parent, filename,12451268 filename_len);12461246- if (err) {12471247- path_put(&parent);12481248- goto out;12491249- }12501250-12511269 path_put(&parent);12521252- next[0] = '\0';12531253-12541254- err = kern_path(filepath, flags, &parent);12551270 if (err)12561271 goto out;1257127212581258- if (is_last) {12591259- path->mnt = parent.mnt;12601260- path->dentry = parent.dentry;12731273+ next[0] = '\0';12741274+12751275+ err = vfs_path_lookup(share_conf->vfs_path.dentry,12761276+ share_conf->vfs_path.mnt,12771277+ filepath,12781278+ flags,12791279+ &parent);12801280+ if (err)12811281+ goto out;12821282+ else if (is_last) {12831283+ *path = parent;12611284 goto out;12621285 }12631286···12711294 kfree(filepath);12721295 }12731296 return err;12971297+}12981298+12991299+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,13001300+ const char *name,13011301+ unsigned int flags,13021302+ struct path *path)13031303+{13041304+ char *abs_name;13051305+ struct dentry *dent;13061306+13071307+ abs_name = convert_to_unix_name(work->tcon->share_conf, name);13081308+ if (!abs_name)13091309+ return ERR_PTR(-ENOMEM);13101310+13111311+ dent = kern_path_create(AT_FDCWD, abs_name, path, flags);13121312+ kfree(abs_name);13131313+ return dent;12741314}1275131512761316int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
···42424343/**4444 * locks_end_grace4545- * @net: net namespace that this lock manager belongs to4645 * @lm: who this grace period is for4746 *4847 * Call this function to state that the given lock manager is ready to
+1-1
fs/nfsd/filecache.c
···542542}543543544544/**545545- * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file545545+ * nfsd_file_close_inode - attempt a delayed close of a nfsd_file546546 * @inode: inode of the file to attempt to remove547547 *548548 * Walk the whole hash bucket, looking for any files that correspond to "inode".
+13-3
fs/nfsd/nfs4state.c
···35703570}3571357135723572static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,35733573- struct nfsd4_session *session, u32 req)35733573+ struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)35743574{35753575 struct nfs4_client *clp = session->se_client;35763576 struct svc_xprt *xpt = rqst->rq_xprt;···35933593 else35943594 status = nfserr_inval;35953595 spin_unlock(&clp->cl_lock);35963596+ if (status == nfs_ok && conn)35973597+ *conn = c;35963598 return status;35973599}35983600···36193617 status = nfserr_wrong_cred;36203618 if (!nfsd4_mach_creds_match(session->se_client, rqstp))36213619 goto out;36223622- status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);36233623- if (status == nfs_ok || status == nfserr_inval)36203620+ status = nfsd4_match_existing_connection(rqstp, session,36213621+ bcts->dir, &conn);36223622+ if (status == nfs_ok) {36233623+ if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||36243624+ bcts->dir == NFS4_CDFC4_BACK)36253625+ conn->cn_flags |= NFS4_CDFC4_BACK;36263626+ nfsd4_probe_callback(session->se_client);36273627+ goto out;36283628+ }36293629+ if (status == nfserr_inval)36243630 goto out;36253631 status = nfsd4_map_bcts_dir(&bcts->dir);36263632 if (status)
+11-8
fs/nfsd/nfs4xdr.c
···35443544 goto fail;35453545 cd->rd_maxcount -= entry_bytes;35463546 /*35473547- * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so35483548- * let's always let through the first entry, at least:35473547+ * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and35483548+ * notes that it could be zero. If it is zero, then the server35493549+ * should enforce only the rd_maxcount value.35493550 */35503550- if (!cd->rd_dircount)35513551- goto fail;35523552- name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;35533553- if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)35543554- goto fail;35553555- cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);35513551+ if (cd->rd_dircount) {35523552+ name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;35533553+ if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)35543554+ goto fail;35553555+ cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);35563556+ if (!cd->rd_dircount)35573557+ cd->rd_maxcount = 0;35583558+ }3556355935573560 cd->cookie_offset = cookie_offset;35583561skip_entry:
···12191219 goto out_dput;12201220 }12211221 } else {12221222- if (!d_is_negative(newdentry) &&12231223- (!new_opaque || !ovl_is_whiteout(newdentry)))12241224- goto out_dput;12221222+ if (!d_is_negative(newdentry)) {12231223+ if (!new_opaque || !ovl_is_whiteout(newdentry))12241224+ goto out_dput;12251225+ } else {12261226+ if (flags & RENAME_EXCHANGE)12271227+ goto out_dput;12281228+ }12251229 }1226123012271231 if (olddentry == trap)
+14-1
fs/overlayfs/file.c
···296296 if (ret)297297 return ret;298298299299+ ret = -EINVAL;300300+ if (iocb->ki_flags & IOCB_DIRECT &&301301+ (!real.file->f_mapping->a_ops ||302302+ !real.file->f_mapping->a_ops->direct_IO))303303+ goto out_fdput;304304+299305 old_cred = ovl_override_creds(file_inode(file)->i_sb);300306 if (is_sync_kiocb(iocb)) {301307 ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,···326320out:327321 revert_creds(old_cred);328322 ovl_file_accessed(file);329329-323323+out_fdput:330324 fdput(real);331325332326 return ret;···354348 ret = ovl_real_fdget(file, &real);355349 if (ret)356350 goto out_unlock;351351+352352+ ret = -EINVAL;353353+ if (iocb->ki_flags & IOCB_DIRECT &&354354+ (!real.file->f_mapping->a_ops ||355355+ !real.file->f_mapping->a_ops->direct_IO))356356+ goto out_fdput;357357358358 if (!ovl_should_sync(OVL_FS(inode->i_sb)))359359 ifl &= ~(IOCB_DSYNC | IOCB_SYNC);···396384 }397385out:398386 revert_creds(old_cred);387387+out_fdput:399388 fdput(real);400389401390out_unlock:
+2-10
fs/vboxsf/super.c
···21212222#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */23232424-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')2525-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')2626-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')2727-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')2424+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";28252926static int follow_symlinks;3027module_param(follow_symlinks, int, 0444);···383386384387static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)385388{386386- unsigned char *options = data;387387-388388- if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&389389- options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&390390- options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&391391- options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {389389+ if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {392390 vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");393391 return -EINVAL;394392 }
+1-1
fs/verity/enable.c
···177177 * (level 0) and ascending to the root node (level 'num_levels - 1').178178 * Then at the end (level 'num_levels'), calculate the root hash.179179 */180180- blocks = (inode->i_size + params->block_size - 1) >>180180+ blocks = ((u64)inode->i_size + params->block_size - 1) >>181181 params->log_blocksize;182182 for (level = 0; level <= params->num_levels; level++) {183183 err = build_merkle_tree_level(filp, level, blocks, params,
+1-1
fs/verity/open.c
···8989 */90909191 /* Compute number of levels and the number of blocks in each level */9292- blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;9292+ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;9393 pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);9494 while (blocks > 1) {9595 if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
···578578 * programs only. Should not be used with normal calls and indirect calls.579579 */580580#define BPF_TRAMP_F_SKIP_FRAME BIT(2)581581-582581/* Store IP address of the caller on the trampoline stack,583582 * so it's available for trampoline's programs.584583 */585584#define BPF_TRAMP_F_IP_ARG BIT(3)585585+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */586586+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)586587587588/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50588589 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
···2222 * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.2323 * NOT_DEVICE: The fwnode will never be populated as a struct device.2424 * INITIALIZED: The hardware corresponding to fwnode has been initialized.2525+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its2626+ * driver needs its child devices to be bound with2727+ * their respective drivers as soon as they are2828+ * added.2529 */2626-#define FWNODE_FLAG_LINKS_ADDED BIT(0)2727-#define FWNODE_FLAG_NOT_DEVICE BIT(1)2828-#define FWNODE_FLAG_INITIALIZED BIT(2)3030+#define FWNODE_FLAG_LINKS_ADDED BIT(0)3131+#define FWNODE_FLAG_NOT_DEVICE BIT(1)3232+#define FWNODE_FLAG_INITIALIZED BIT(2)3333+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)29343035struct fwnode_handle {3136 struct fwnode_handle *secondary;
+1-1
include/linux/irqdomain.h
···251251}252252253253void irq_domain_free_fwnode(struct fwnode_handle *fwnode);254254-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,254254+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,255255 irq_hw_number_t hwirq_max, int direct_max,256256 const struct irq_domain_ops *ops,257257 void *host_data);
-6
include/linux/kvm_host.h
···608608 unsigned long mmu_notifier_range_start;609609 unsigned long mmu_notifier_range_end;610610#endif611611- long tlbs_dirty;612611 struct list_head devices;613612 u64 manual_dirty_log_protect;614613 struct dentry *debugfs_dentry;···718719 if (vcpu->vcpu_id == id)719720 return vcpu;720721 return NULL;721721-}722722-723723-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)724724-{725725- return vcpu->vcpu_idx;726722}727723728724#define kvm_for_each_memslot(memslot, slots) \
+3
include/linux/mdio.h
···80808181 /* Clears up any memory if needed */8282 void (*remove)(struct mdio_device *mdiodev);8383+8484+ /* Quiesces the device on system shutdown, turns off interrupts etc */8585+ void (*shutdown)(struct mdio_device *mdiodev);8386};84878588static inline struct mdio_driver *
+5-1
include/linux/migrate.h
···1919 */2020#define MIGRATEPAGE_SUCCESS 021212222+/*2323+ * Keep sync with:2424+ * - macro MIGRATE_REASON in include/trace/events/migrate.h2525+ * - migrate_reason_names[MR_TYPES] in mm/debug.c2626+ */2227enum migrate_reason {2328 MR_COMPACTION,2429 MR_MEMORY_FAILURE,···3732 MR_TYPES3833};39344040-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */4135extern const char *migrate_reason_names[MR_TYPES];42364337#ifdef CONFIG_MIGRATION
···163163static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }164164#endif165165166166+#ifdef CONFIG_KVM167167+void kvm_host_pmu_init(struct arm_pmu *pmu);168168+#else169169+#define kvm_host_pmu_init(x) do { } while(0)170170+#endif171171+166172/* Internal functions only for core arm_pmu code */167173struct arm_pmu *armpmu_alloc(void);168174struct arm_pmu *armpmu_alloc_atomic(void);
+3-1
include/linux/perf_event.h
···683683 /*684684 * timestamp shadows the actual context timing but it can685685 * be safely used in NMI interrupt context. It reflects the686686- * context time as it was when the event was last scheduled in.686686+ * context time as it was when the event was last scheduled in,687687+ * or when ctx_sched_in failed to schedule the event because we688688+ * run out of PMC.687689 *688690 * ctx_time already accounts for ctx->timestamp. Therefore to689691 * compute ctx_time for a sample, simply add perf_clock().
···28182818 * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag28192819 * when they are able to replace in-use PTK keys according to the following28202820 * requirements:28212821- * 1) They do not hand over frames decrypted with the old key to28222822- mac80211 once the call to set_key() with command %DISABLE_KEY has been28232823- completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,28212821+ * 1) They do not hand over frames decrypted with the old key to mac8021128222822+ once the call to set_key() with command %DISABLE_KEY has been completed,28242823 2) either drop or continue to use the old key for any outgoing frames queued28252824 at the time of the key deletion (including re-transmits),28262825 3) never send out a frame queued prior to the set_key() %SET_KEY command28272827- encrypted with the new key and28262826+ encrypted with the new key when also needing28272827+ @IEEE80211_KEY_FLAG_GENERATE_IV and28282828 4) never send out a frame unencrypted when it should be encrypted.28292829 Mac80211 will not queue any new frames for a deleted key to the driver.28302830 */
···1202120212031203void nft_obj_notify(struct net *net, const struct nft_table *table,12041204 struct nft_object *obj, u32 portid, u32 seq,12051205- int event, int family, int report, gfp_t gfp);12051205+ int event, u16 flags, int family, int report, gfp_t gfp);1206120612071207/**12081208 * struct nft_object_type - stateful object type
+6
include/net/netns/netfilter.h
···2727#if IS_ENABLED(CONFIG_DECNET)2828 struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];2929#endif3030+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)3131+ unsigned int defrag_ipv4_users;3232+#endif3333+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)3434+ unsigned int defrag_ipv6_users;3535+#endif3036};3137#endif
+1-1
include/net/nexthop.h
···325325 struct fib_nh_common *nhc = &nhi->fib_nhc;326326 int weight = nhg->nh_entries[i].weight;327327328328- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)328328+ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)329329 return -EMSGSIZE;330330 }331331
+1
include/net/pkt_sched.h
···1111#include <uapi/linux/pkt_sched.h>12121313#define DEFAULT_TX_QUEUE_LEN 10001414+#define STAB_SIZE_LOG_MAX 3014151516struct qdisc_walker {1617 int stop;
+34-1
include/net/sock.h
···307307 * @sk_priority: %SO_PRIORITY setting308308 * @sk_type: socket type (%SOCK_STREAM, etc)309309 * @sk_protocol: which protocol this socket belongs in this network family310310+ * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred310311 * @sk_peer_pid: &struct pid for this socket's peer311312 * @sk_peer_cred: %SO_PEERCRED setting312313 * @sk_rcvlowat: %SO_RCVLOWAT setting···489488 u8 sk_prefer_busy_poll;490489 u16 sk_busy_poll_budget;491490#endif491491+ spinlock_t sk_peer_lock;492492 struct pid *sk_peer_pid;493493 const struct cred *sk_peer_cred;494494+494495 long sk_rcvtimeo;495496 ktime_t sk_stamp;496497#if BITS_PER_LONG==32···16261623 SINGLE_DEPTH_NESTING)16271624#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))1628162516291629-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);16261626+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);16271627+16281628+/**16291629+ * lock_sock_fast - fast version of lock_sock16301630+ * @sk: socket16311631+ *16321632+ * This version should be used for very small section, where process wont block16331633+ * return false if fast path is taken:16341634+ *16351635+ * sk_lock.slock locked, owned = 0, BH disabled16361636+ *16371637+ * return true if slow path is taken:16381638+ *16391639+ * sk_lock.slock unlocked, owned = 1, BH enabled16401640+ */16411641+static inline bool lock_sock_fast(struct sock *sk)16421642+{16431643+ /* The sk_lock has mutex_lock() semantics here. */16441644+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);16451645+16461646+ return __lock_sock_fast(sk);16471647+}16481648+16491649+/* fast socket lock variant for caller already holding a [different] socket lock */16501650+static inline bool lock_sock_fast_nested(struct sock *sk)16511651+{16521652+ mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);16531653+16541654+ return __lock_sock_fast(sk);16551655+}1630165616311657/**16321658 * unlock_sock_fast - complement of lock_sock_fast···16721640 release_sock(sk);16731641 __release(&sk->sk_lock.slock);16741642 } else {16431643+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);16751644 spin_unlock_bh(&sk->sk_lock.slock);16761645 }16771646}
-1
include/scsi/scsi_device.h
···146146 struct scsi_vpd __rcu *vpd_pg83;147147 struct scsi_vpd __rcu *vpd_pg80;148148 struct scsi_vpd __rcu *vpd_pg89;149149- unsigned char current_tag; /* current tag */150149 struct scsi_target *sdev_target;151150152151 blist_flags_t sdev_bflags; /* black/white flags as also found in
···225225226226struct binder_frozen_status_info {227227 __u32 pid;228228+229229+ /* process received sync transactions since last frozen230230+ * bit 0: received sync transaction after being frozen231231+ * bit 1: new pending sync transaction during freezing232232+ */228233 __u32 sync_recv;234234+235235+ /* process received async transactions since last frozen */229236 __u32 async_recv;230237};231238
+1-1
include/uapi/linux/hyperv.h
···2626#ifndef _UAPI_HYPERV_H2727#define _UAPI_HYPERV_H28282929-#include <linux/uuid.h>2929+#include <linux/types.h>30303131/*3232 * Framework version for util services.
···136136 * Allocates and initializes an irq_domain structure.137137 * Returns pointer to IRQ domain, or NULL on failure.138138 */139139-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,139139+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,140140 irq_hw_number_t hwirq_max, int direct_max,141141 const struct irq_domain_ops *ops,142142 void *host_data)
+11-3
kernel/rseq.c
···282282283283 if (unlikely(t->flags & PF_EXITING))284284 return;285285- ret = rseq_ip_fixup(regs);286286- if (unlikely(ret < 0))287287- goto error;285285+286286+ /*287287+ * regs is NULL if and only if the caller is in a syscall path. Skip288288+ * fixup and leave rseq_cs as is so that rseq_sycall() will detect and289289+ * kill a misbehaving userspace on debug kernels.290290+ */291291+ if (regs) {292292+ ret = rseq_ip_fixup(regs);293293+ if (unlikely(ret < 0))294294+ goto error;295295+ }288296 if (unlikely(rseq_update_cpu_id(t)))289297 goto error;290298 return;
+7-1
kernel/sched/debug.c
···173173 size_t cnt, loff_t *ppos)174174{175175 char buf[16];176176+ unsigned int scaling;176177177178 if (cnt > 15)178179 cnt = 15;179180180181 if (copy_from_user(&buf, ubuf, cnt))181182 return -EFAULT;183183+ buf[cnt] = '\0';182184183183- if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))185185+ if (kstrtouint(buf, 10, &scaling))184186 return -EINVAL;185187188188+ if (scaling >= SCHED_TUNABLESCALING_END)189189+ return -EINVAL;190190+191191+ sysctl_sched_tunable_scaling = scaling;186192 if (sched_update_scaling())187193 return -EINVAL;188194
+5-1
kernel/sched/fair.c
···49364936 /* update hierarchical throttle state */49374937 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);4938493849394939- if (!cfs_rq->load.weight)49394939+ /* Nothing to run but something to decay (on_list)? Complete the branch */49404940+ if (!cfs_rq->load.weight) {49414941+ if (cfs_rq->on_list)49424942+ goto unthrottle_throttle;49404943 return;49444944+ }4941494549424946 task_delta = cfs_rq->h_nr_running;49434947 idle_task_delta = cfs_rq->idle_h_nr_running;
···346346 int "Warn for stack frames larger than"347347 range 0 8192348348 default 2048 if GCC_PLUGIN_LATENT_ENTROPY349349- default 1536 if (!64BIT && PARISC)349349+ default 1536 if (!64BIT && (PARISC || XTENSA))350350 default 1024 if (!64BIT && !PARISC)351351 default 2048 if 64BIT352352 help
+2
lib/Kconfig.kasan
···6666config KASAN_GENERIC6767 bool "Generic mode"6868 depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC6969+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS6970 select SLUB_DEBUG if SLUB7071 select CONSTRUCTORS7172 help···8786config KASAN_SW_TAGS8887 bool "Software tag-based mode"8988 depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS8989+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS9090 select SLUB_DEBUG if SLUB9191 select CONSTRUCTORS9292 help
···306306 struct vm_area_struct *vma)307307{308308 unsigned long address = vma_address(page, vma);309309+ unsigned long ret = 0;309310 pgd_t *pgd;310311 p4d_t *p4d;311312 pud_t *pud;···330329 if (pmd_devmap(*pmd))331330 return PMD_SHIFT;332331 pte = pte_offset_map(pmd, address);333333- if (!pte_present(*pte))334334- return 0;335335- if (pte_devmap(*pte))336336- return PAGE_SHIFT;337337- return 0;332332+ if (pte_present(*pte) && pte_devmap(*pte))333333+ ret = PAGE_SHIFT;334334+ pte_unmap(pte);335335+ return ret;338336}339337340338/*···11261126 */11271127static inline bool HWPoisonHandlable(struct page *page)11281128{11291129- return PageLRU(page) || __PageMovable(page);11291129+ return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);11301130}1131113111321132static int __get_hwpoison_page(struct page *page)
+2-2
mm/shmem.c
···490490 case SHMEM_HUGE_ALWAYS:491491 return true;492492 case SHMEM_HUGE_WITHIN_SIZE:493493- index = round_up(index, HPAGE_PMD_NR);493493+ index = round_up(index + 1, HPAGE_PMD_NR);494494 i_size = round_up(i_size_read(inode), PAGE_SIZE);495495- if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)495495+ if (i_size >> PAGE_SHIFT >= index)496496 return true;497497 fallthrough;498498 case SHMEM_HUGE_ADVISE:
+16-3
mm/swap.c
···620620 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);621621622622 activate_page_drain(cpu);623623- invalidate_bh_lrus_cpu(cpu);624623}625624626625/**···702703 local_unlock(&lru_pvecs.lock);703704}704705706706+/*707707+ * It's called from per-cpu workqueue context in SMP case so708708+ * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on709709+ * the same cpu. It shouldn't be a problem in !SMP case since710710+ * the core is only one and the locks will disable preemption.711711+ */712712+static void lru_add_and_bh_lrus_drain(void)713713+{714714+ local_lock(&lru_pvecs.lock);715715+ lru_add_drain_cpu(smp_processor_id());716716+ local_unlock(&lru_pvecs.lock);717717+ invalidate_bh_lrus_cpu();718718+}719719+705720void lru_add_drain_cpu_zone(struct zone *zone)706721{707722 local_lock(&lru_pvecs.lock);···730717731718static void lru_add_drain_per_cpu(struct work_struct *dummy)732719{733733- lru_add_drain();720720+ lru_add_and_bh_lrus_drain();734721}735722736723/*···871858 */872859 __lru_add_drain_all(true);873860#else874874- lru_add_drain();861861+ lru_add_and_bh_lrus_drain();875862#endif876863}877864
+2-2
mm/util.c
···787787 size_t *lenp, loff_t *ppos)788788{789789 struct ctl_table t;790790- int new_policy;790790+ int new_policy = -1;791791 int ret;792792793793 /*···805805 t = *table;806806 t.data = &new_policy;807807 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);808808- if (ret)808808+ if (ret || new_policy == -1)809809 return ret;810810811811 mm_compute_batch(new_policy);
+1
mm/workingset.c
···352352353353 inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);354354355355+ mem_cgroup_flush_stats();355356 /*356357 * Compare the distance to the existing workingset size. We357358 * don't activate pages that couldn't stay resident even if
+9-5
net/bpf/test_run.c
···552552 __skb->gso_segs = skb_shinfo(skb)->gso_segs;553553}554554555555+static struct proto bpf_dummy_proto = {556556+ .name = "bpf_dummy",557557+ .owner = THIS_MODULE,558558+ .obj_size = sizeof(struct sock),559559+};560560+555561int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,556562 union bpf_attr __user *uattr)557563{···602596 break;603597 }604598605605- sk = kzalloc(sizeof(struct sock), GFP_USER);599599+ sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);606600 if (!sk) {607601 kfree(data);608602 kfree(ctx);609603 return -ENOMEM;610604 }611611- sock_net_set(sk, net);612605 sock_init_data(NULL, sk);613606614607 skb = build_skb(data, 0);615608 if (!skb) {616609 kfree(data);617610 kfree(ctx);618618- kfree(sk);611611+ sk_free(sk);619612 return -ENOMEM;620613 }621614 skb->sk = sk;···687682 if (dev && dev != net->loopback_dev)688683 dev_put(dev);689684 kfree_skb(skb);690690- bpf_sk_storage_free(sk);691691- kfree(sk);685685+ sk_free(sk);692686 kfree(ctx);693687 return ret;694688}
···13761376}13771377EXPORT_SYMBOL(sock_setsockopt);1378137813791379+static const struct cred *sk_get_peer_cred(struct sock *sk)13801380+{13811381+ const struct cred *cred;13821382+13831383+ spin_lock(&sk->sk_peer_lock);13841384+ cred = get_cred(sk->sk_peer_cred);13851385+ spin_unlock(&sk->sk_peer_lock);13861386+13871387+ return cred;13881388+}1379138913801390static void cred_to_ucred(struct pid *pid, const struct cred *cred,13811391 struct ucred *ucred)···15621552 struct ucred peercred;15631553 if (len > sizeof(peercred))15641554 len = sizeof(peercred);15551555+15561556+ spin_lock(&sk->sk_peer_lock);15651557 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);15581558+ spin_unlock(&sk->sk_peer_lock);15591559+15661560 if (copy_to_user(optval, &peercred, len))15671561 return -EFAULT;15681562 goto lenout;···1574156015751561 case SO_PEERGROUPS:15761562 {15631563+ const struct cred *cred;15771564 int ret, n;1578156515791579- if (!sk->sk_peer_cred)15661566+ cred = sk_get_peer_cred(sk);15671567+ if (!cred)15801568 return -ENODATA;1581156915821582- n = sk->sk_peer_cred->group_info->ngroups;15701570+ n = cred->group_info->ngroups;15831571 if (len < n * sizeof(gid_t)) {15841572 len = n * sizeof(gid_t);15731573+ put_cred(cred);15851574 return put_user(len, optlen) ? -EFAULT : -ERANGE;15861575 }15871576 len = n * sizeof(gid_t);1588157715891589- ret = groups_to_user((gid_t __user *)optval,15901590- sk->sk_peer_cred->group_info);15781578+ ret = groups_to_user((gid_t __user *)optval, cred->group_info);15791579+ put_cred(cred);15911580 if (ret)15921581 return ret;15931582 goto lenout;···19521935 sk->sk_frag.page = NULL;19531936 }1954193719551955- if (sk->sk_peer_cred)19561956- put_cred(sk->sk_peer_cred);19381938+ /* We do not need to acquire sk->sk_peer_lock, we are the last user. */19391939+ put_cred(sk->sk_peer_cred);19571940 put_pid(sk->sk_peer_pid);19411941+19581942 if (likely(sk->sk_net_refcnt))19591943 put_net(sock_net(sk));19601944 sk_prot_free(sk->sk_prot_creator, sk);···3163314531643146 sk->sk_peer_pid = NULL;31653147 sk->sk_peer_cred = NULL;31483148+ spin_lock_init(&sk->sk_peer_lock);31493149+31663150 sk->sk_write_pending = 0;31673151 sk->sk_rcvlowat = 1;31683152 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;···3199317932003180void lock_sock_nested(struct sock *sk, int subclass)32013181{31823182+ /* The sk_lock has mutex_lock() semantics here. */31833183+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);31843184+32023185 might_sleep();32033186 spin_lock_bh(&sk->sk_lock.slock);32043187 if (sk->sk_lock.owned)32053188 __lock_sock(sk);32063189 sk->sk_lock.owned = 1;32073207- spin_unlock(&sk->sk_lock.slock);32083208- /*32093209- * The sk_lock has mutex_lock() semantics here:32103210- */32113211- mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);32123212- local_bh_enable();31903190+ spin_unlock_bh(&sk->sk_lock.slock);32133191}32143192EXPORT_SYMBOL(lock_sock_nested);32153193···32303212}32313213EXPORT_SYMBOL(release_sock);3232321432333233-/**32343234- * lock_sock_fast - fast version of lock_sock32353235- * @sk: socket32363236- *32373237- * This version should be used for very small section, where process wont block32383238- * return false if fast path is taken:32393239- *32403240- * sk_lock.slock locked, owned = 0, BH disabled32413241- *32423242- * return true if slow path is taken:32433243- *32443244- * sk_lock.slock unlocked, owned = 1, BH enabled32453245- */32463246-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)32153215+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)32473216{32483217 might_sleep();32493218 spin_lock_bh(&sk->sk_lock.slock);3250321932513251- if (!sk->sk_lock.owned)32203220+ if (!sk->sk_lock.owned) {32523221 /*32533253- * Note : We must disable BH32223222+ * Fast path return with bottom halves disabled and32233223+ * sock::sk_lock.slock held.32243224+ *32253225+ * The 'mutex' is not contended and holding32263226+ * sock::sk_lock.slock prevents all other lockers to32273227+ * proceed so the corresponding unlock_sock_fast() can32283228+ * avoid the slow path of release_sock() completely and32293229+ * just release slock.32303230+ *32313231+ * From a semantical POV this is equivalent to 'acquiring'32323232+ * the 'mutex', hence the corresponding lockdep32333233+ * mutex_release() has to happen in the fast path of32343234+ * unlock_sock_fast().32543235 */32553236 return false;32373237+ }3256323832573239 __lock_sock(sk);32583240 sk->sk_lock.owned = 1;32593259- spin_unlock(&sk->sk_lock.slock);32603260- /*32613261- * The sk_lock has mutex_lock() semantics here:32623262- */32633263- mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);32643241 __acquire(&sk->sk_lock.slock);32653265- local_bh_enable();32423242+ spin_unlock_bh(&sk->sk_lock.slock);32663243 return true;32673244}32683268-EXPORT_SYMBOL(lock_sock_fast);32453245+EXPORT_SYMBOL(__lock_sock_fast);3269324632703247int sock_gettstamp(struct socket *sock, void __user *userstamp,32713248 bool timeval, bool time32)
+106-8
net/dsa/dsa2.c
···429429{430430 struct devlink_port *dlp = &dp->devlink_port;431431 bool dsa_port_link_registered = false;432432+ struct dsa_switch *ds = dp->ds;432433 bool dsa_port_enabled = false;433434 int err = 0;434435···438437439438 INIT_LIST_HEAD(&dp->fdbs);440439 INIT_LIST_HEAD(&dp->mdbs);440440+441441+ if (ds->ops->port_setup) {442442+ err = ds->ops->port_setup(ds, dp->index);443443+ if (err)444444+ return err;445445+ }441446442447 switch (dp->type) {443448 case DSA_PORT_TYPE_UNUSED:···487480 dsa_port_disable(dp);488481 if (err && dsa_port_link_registered)489482 dsa_port_link_unregister_of(dp);490490- if (err)483483+ if (err) {484484+ if (ds->ops->port_teardown)485485+ ds->ops->port_teardown(ds, dp->index);491486 return err;487487+ }492488493489 dp->setup = true;494490···543533static void dsa_port_teardown(struct dsa_port *dp)544534{545535 struct devlink_port *dlp = &dp->devlink_port;536536+ struct dsa_switch *ds = dp->ds;546537 struct dsa_mac_addr *a, *tmp;547538548539 if (!dp->setup)549540 return;541541+542542+ if (ds->ops->port_teardown)543543+ ds->ops->port_teardown(ds, dp->index);550544551545 devlink_port_type_clear(dlp);552546···593579 if (dp->devlink_port_setup)594580 devlink_port_unregister(dlp);595581 dp->devlink_port_setup = false;582582+}583583+584584+/* Destroy the current devlink port, and create a new one which has the UNUSED585585+ * flavour. At this point, any call to ds->ops->port_setup has been already586586+ * balanced out by a call to ds->ops->port_teardown, so we know that any587587+ * devlink port regions the driver had are now unregistered. We then call its588588+ * ds->ops->port_setup again, in order for the driver to re-create them on the589589+ * new devlink port.590590+ */591591+static int dsa_port_reinit_as_unused(struct dsa_port *dp)592592+{593593+ struct dsa_switch *ds = dp->ds;594594+ int err;595595+596596+ dsa_port_devlink_teardown(dp);597597+ dp->type = DSA_PORT_TYPE_UNUSED;598598+ err = dsa_port_devlink_setup(dp);599599+ if (err)600600+ return err;601601+602602+ if (ds->ops->port_setup) {603603+ /* On error, leave the devlink port registered,604604+ * dsa_switch_teardown will clean it up later.605605+ */606606+ err = ds->ops->port_setup(ds, dp->index);607607+ if (err)608608+ return err;609609+ }610610+611611+ return 0;596612}597613598614static int dsa_devlink_info_get(struct devlink *dl,···880836 devlink_params_publish(ds->devlink);881837882838 if (!ds->slave_mii_bus && ds->ops->phy_read) {883883- ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);839839+ ds->slave_mii_bus = mdiobus_alloc();884840 if (!ds->slave_mii_bus) {885841 err = -ENOMEM;886842 goto teardown;···890846891847 err = mdiobus_register(ds->slave_mii_bus);892848 if (err < 0)893893- goto teardown;849849+ goto free_slave_mii_bus;894850 }895851896852 ds->setup = true;897853898854 return 0;899855856856+free_slave_mii_bus:857857+ if (ds->slave_mii_bus && ds->ops->phy_read)858858+ mdiobus_free(ds->slave_mii_bus);900859teardown:901860 if (ds->ops->teardown)902861 ds->ops->teardown(ds);···924877 if (!ds->setup)925878 return;926879927927- if (ds->slave_mii_bus && ds->ops->phy_read)880880+ if (ds->slave_mii_bus && ds->ops->phy_read) {928881 mdiobus_unregister(ds->slave_mii_bus);882882+ mdiobus_free(ds->slave_mii_bus);883883+ ds->slave_mii_bus = NULL;884884+ }929885930886 dsa_switch_unregister_notifier(ds);931887···988938 list_for_each_entry(dp, &dst->ports, list) {989939 err = dsa_port_setup(dp);990940 if (err) {991991- dsa_port_devlink_teardown(dp);992992- dp->type = DSA_PORT_TYPE_UNUSED;993993- err = dsa_port_devlink_setup(dp);941941+ err = dsa_port_reinit_as_unused(dp);994942 if (err)995943 goto teardown;996996- continue;997944 }998945 }999946···10951048teardown_master:10961049 dsa_tree_teardown_master(dst);10971050teardown_switches:10511051+ dsa_tree_teardown_ports(dst);10981052 dsa_tree_teardown_switches(dst);10991053teardown_cpu_ports:11001054 dsa_tree_teardown_cpu_ports(dst);···16101562 mutex_unlock(&dsa2_mutex);16111563}16121564EXPORT_SYMBOL_GPL(dsa_unregister_switch);15651565+15661566+/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is15671567+ * blocking that operation from completion, due to the dev_hold taken inside15681568+ * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of15691569+ * the DSA master, so that the system can reboot successfully.15701570+ */15711571+void dsa_switch_shutdown(struct dsa_switch *ds)15721572+{15731573+ struct net_device *master, *slave_dev;15741574+ LIST_HEAD(unregister_list);15751575+ struct dsa_port *dp;15761576+15771577+ mutex_lock(&dsa2_mutex);15781578+ rtnl_lock();15791579+15801580+ list_for_each_entry(dp, &ds->dst->ports, list) {15811581+ if (dp->ds != ds)15821582+ continue;15831583+15841584+ if (!dsa_port_is_user(dp))15851585+ continue;15861586+15871587+ master = dp->cpu_dp->master;15881588+ slave_dev = dp->slave;15891589+15901590+ netdev_upper_dev_unlink(master, slave_dev);15911591+ /* Just unlinking ourselves as uppers of the master is not15921592+ * sufficient. When the master net device unregisters, that will15931593+ * also call dev_close, which we will catch as NETDEV_GOING_DOWN15941594+ * and trigger a dev_close on our own devices (dsa_slave_close).15951595+ * In turn, that will call dev_mc_unsync on the master's net15961596+ * device. If the master is also a DSA switch port, this will15971597+ * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on15981598+ * its own master. Lockdep will complain about the fact that15991599+ * all cascaded masters have the same dsa_master_addr_list_lock_key,16001600+ * which it normally would not do if the cascaded masters would16011601+ * be in a proper upper/lower relationship, which we've just16021602+ * destroyed.16031603+ * To suppress the lockdep warnings, let's actually unregister16041604+ * the DSA slave interfaces too, to avoid the nonsensical16051605+ * multicast address list synchronization on shutdown.16061606+ */16071607+ unregister_netdevice_queue(slave_dev, &unregister_list);16081608+ }16091609+ unregister_netdevice_many(&unregister_list);16101610+16111611+ rtnl_unlock();16121612+ mutex_unlock(&dsa2_mutex);16131613+}16141614+EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
···11// SPDX-License-Identifier: GPL-2.022-/* Copyright 2020-2021 NXP Semiconductors22+/* Copyright 2020-2021 NXP33 *44 * An implementation of the software-defined tag_8021q.c tagger format, which55 * also preserves full functionality under a vlan_filtering bridge. It does
···273273 * things we don't know, ie. tcp syn flag or ports). If the274274 * rule is also a fragment-specific rule, non-fragments won't275275 * match it. */276276+ acpar.fragoff = 0;276277 acpar.hotdrop = false;277278 acpar.state = state;278279
+1-1
net/ipv6/netfilter/nf_conntrack_reasm.c
···33333434static const char nf_frags_cache_name[] = "nf-frags";35353636-unsigned int nf_frag_pernet_id __read_mostly;3636+static unsigned int nf_frag_pernet_id __read_mostly;3737static struct inet_frags nf_frags;38383939static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
+9-16
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
···2525#include <net/netfilter/nf_conntrack_zones.h>2626#include <net/netfilter/ipv6/nf_defrag_ipv6.h>27272828-extern unsigned int nf_frag_pernet_id;2929-3028static DEFINE_MUTEX(defrag6_mutex);31293230static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,···89919092static void __net_exit defrag6_net_exit(struct net *net)9193{9292- struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);9393-9494- if (nf_frag->users) {9494+ if (net->nf.defrag_ipv6_users) {9595 nf_unregister_net_hooks(net, ipv6_defrag_ops,9696 ARRAY_SIZE(ipv6_defrag_ops));9797- nf_frag->users = 0;9797+ net->nf.defrag_ipv6_users = 0;9898 }9999}100100···130134131135int nf_defrag_ipv6_enable(struct net *net)132136{133133- struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);134137 int err = 0;135138136139 mutex_lock(&defrag6_mutex);137137- if (nf_frag->users == UINT_MAX) {140140+ if (net->nf.defrag_ipv6_users == UINT_MAX) {138141 err = -EOVERFLOW;139142 goto out_unlock;140143 }141144142142- if (nf_frag->users) {143143- nf_frag->users++;145145+ if (net->nf.defrag_ipv6_users) {146146+ net->nf.defrag_ipv6_users++;144147 goto out_unlock;145148 }146149147150 err = nf_register_net_hooks(net, ipv6_defrag_ops,148151 ARRAY_SIZE(ipv6_defrag_ops));149152 if (err == 0)150150- nf_frag->users = 1;153153+ net->nf.defrag_ipv6_users = 1;151154152155 out_unlock:153156 mutex_unlock(&defrag6_mutex);···156161157162void nf_defrag_ipv6_disable(struct net *net)158163{159159- struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);160160-161164 mutex_lock(&defrag6_mutex);162162- if (nf_frag->users) {163163- nf_frag->users--;164164- if (nf_frag->users == 0)165165+ if (net->nf.defrag_ipv6_users) {166166+ net->nf.defrag_ipv6_users--;167167+ if (net->nf.defrag_ipv6_users == 0)165168 nf_unregister_net_hooks(net, ipv6_defrag_ops,166169 ARRAY_SIZE(ipv6_defrag_ops));167170 }
···22/*33 * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>44 * Copyright 2012-2013, cozybit Inc.55+ * Copyright (C) 2021 Intel Corporation56 */6778#include "mesh.h"···589588590589 /* only transmit to PS STA with announced, non-zero awake window */591590 if (test_sta_flag(sta, WLAN_STA_PS_STA) &&592592- (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))591591+ (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))593592 return;594593595594 if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
-4
net/mac80211/rate.c
···392392 int mcast_rate;393393 bool use_basicrate = false;394394395395- if (ieee80211_is_tx_data(txrc->skb) &&396396- info->flags & IEEE80211_TX_CTL_NO_ACK)397397- return false;398398-399395 if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {400396 __rate_control_send_low(txrc->hw, sband, pubsta, info,401397 txrc->rate_idx_mask);
+2-1
net/mac80211/rx.c
···41314131 if (!bssid)41324132 return false;41334133 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||41344134- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))41344134+ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||41354135+ !is_valid_ether_addr(hdr->addr2))41354136 return false;41364137 if (ieee80211_is_beacon(hdr->frame_control))41374138 return true;
+12
net/mac80211/tx.c
···22092209 }2210221022112211 vht_mcs = iterator.this_arg[4] >> 4;22122212+ if (vht_mcs > 11)22132213+ vht_mcs = 0;22122214 vht_nss = iterator.this_arg[4] & 0xF;22152215+ if (!vht_nss || vht_nss > 8)22162216+ vht_nss = 1;22132217 break;2214221822152219 /*···3383337933843380 if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))33853381 goto out;33823382+33833383+ /* If n == 2, the "while (*frag_tail)" loop above didn't execute33843384+ * and frag_tail should be &skb_shinfo(head)->frag_list.33853385+ * However, ieee80211_amsdu_prepare_head() can reallocate it.33863386+ * Reload frag_tail to have it pointing to the correct place.33873387+ */33883388+ if (n == 2)33893389+ frag_tail = &skb_shinfo(head)->frag_list;3386339033873391 /*33883392 * Pad out the previous subframe to a multiple of 4 by adding the
+6
net/mac80211/wpa.c
···520520 return RX_DROP_UNUSABLE;521521 }522522523523+ /* reload hdr - skb might have been reallocated */524524+ hdr = (void *)rx->skb->data;525525+523526 data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;524527 if (!rx->sta || data_len < 0)525528 return RX_DROP_UNUSABLE;···751748 if (skb_linearize(rx->skb))752749 return RX_DROP_UNUSABLE;753750 }751751+752752+ /* reload hdr - skb might have been reallocated */753753+ hdr = (void *)rx->skb->data;754754755755 data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;756756 if (!rx->sta || data_len < 0)
+1-1
net/mptcp/mptcp_diag.c
···3636 struct sock *sk;37373838 net = sock_net(in_skb->sk);3939- msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);3939+ msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);4040 if (!msk)4141 goto out_nosk;4242
+1-3
net/mptcp/pm_netlink.c
···1718171817191719 list_for_each_entry(entry, &pernet->local_addr_list, list) {17201720 if (addresses_equal(&entry->addr, &addr.addr, true)) {17211721- ret = mptcp_nl_addr_backup(net, &entry->addr, bkup);17221722- if (ret)17231723- return ret;17211721+ mptcp_nl_addr_backup(net, &entry->addr, bkup);1724172217251723 if (bkup)17261724 entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
···709709void mptcp_token_accept(struct mptcp_subflow_request_sock *r,710710 struct mptcp_sock *msk);711711bool mptcp_token_exists(u32 token);712712-struct mptcp_sock *mptcp_token_get_sock(u32 token);712712+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);713713struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,714714 long *s_num);715715void mptcp_token_destroy(struct mptcp_sock *msk);
+1-1
net/mptcp/subflow.c
···8686 struct mptcp_sock *msk;8787 int local_id;88888989- msk = mptcp_token_get_sock(subflow_req->token);8989+ msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);9090 if (!msk) {9191 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);9292 return NULL;
+1-12
net/mptcp/syncookies.c
···108108109109 e->valid = 0;110110111111- msk = mptcp_token_get_sock(e->token);111111+ msk = mptcp_token_get_sock(net, e->token);112112 if (!msk) {113113 spin_unlock_bh(&join_entry_locks[i]);114114 return false;115115 }116116-117117- /* If this fails, the token got re-used in the mean time by another118118- * mptcp socket in a different netns, i.e. entry is outdated.119119- */120120- if (!net_eq(sock_net((struct sock *)msk), net))121121- goto err_put;122116123117 subflow_req->remote_nonce = e->remote_nonce;124118 subflow_req->local_nonce = e->local_nonce;···122128 subflow_req->msk = msk;123129 spin_unlock_bh(&join_entry_locks[i]);124130 return true;125125-126126-err_put:127127- spin_unlock_bh(&join_entry_locks[i]);128128- sock_put((struct sock *)msk);129129- return false;130131}131132132133void __init mptcp_join_cookie_init(void)
+8-3
net/mptcp/token.c
···231231232232/**233233 * mptcp_token_get_sock - retrieve mptcp connection sock using its token234234+ * @net: restrict to this namespace234235 * @token: token of the mptcp connection to retrieve235236 *236237 * This function returns the mptcp connection structure with the given token.···239238 *240239 * returns NULL if no connection with the given token value exists.241240 */242242-struct mptcp_sock *mptcp_token_get_sock(u32 token)241241+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)243242{244243 struct hlist_nulls_node *pos;245244 struct token_bucket *bucket;···252251again:253252 sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {254253 msk = mptcp_sk(sk);255255- if (READ_ONCE(msk->token) != token)254254+ if (READ_ONCE(msk->token) != token ||255255+ !net_eq(sock_net(sk), net))256256 continue;257257+257258 if (!refcount_inc_not_zero(&sk->sk_refcnt))258259 goto not_found;259259- if (READ_ONCE(msk->token) != token) {260260+261261+ if (READ_ONCE(msk->token) != token ||262262+ !net_eq(sock_net(sk), net)) {260263 sock_put(sk);261264 goto again;262265 }
···130130{131131 size_t hsize;132132133133- /* We must fit both into u32 in jhash and size_t */133133+ /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */134134 if (hbits > 31)135135 return 0;136136 hsize = jhash_size(hbits);137137- if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)137137+ if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)138138 < hsize)139139 return 0;140140
+4
net/netfilter/ipvs/ip_vs_conn.c
···14681468 int idx;1469146914701470 /* Compute size and mask */14711471+ if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {14721472+ pr_info("conn_tab_bits not in [8, 20]. Using default value\n");14731473+ ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;14741474+ }14711475 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;14721476 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;14731477
+100-54
net/netfilter/nf_conntrack_core.c
···7474static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);7575static __read_mostly bool nf_conntrack_locks_all;76767777+/* serialize hash resizes and nf_ct_iterate_cleanup */7878+static DEFINE_MUTEX(nf_conntrack_mutex);7979+7780#define GC_SCAN_INTERVAL (120u * HZ)7881#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)79828080-#define MAX_CHAINLEN 64u8383+#define MIN_CHAINLEN 8u8484+#define MAX_CHAINLEN (32u - MIN_CHAINLEN)81858286static struct conntrack_gc_work conntrack_gc_work;8387···192188static siphash_key_t nf_conntrack_hash_rnd __read_mostly;193189194190static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,191191+ unsigned int zoneid,195192 const struct net *net)196193{197194 struct {198195 struct nf_conntrack_man src;199196 union nf_inet_addr dst_addr;197197+ unsigned int zone;200198 u32 net_mix;201199 u16 dport;202200 u16 proto;···211205 /* The direction must be ignored, so handle usable members manually. */212206 combined.src = tuple->src;213207 combined.dst_addr = tuple->dst.u3;208208+ combined.zone = zoneid;214209 combined.net_mix = net_hash_mix(net);215210 combined.dport = (__force __u16)tuple->dst.u.all;216211 combined.proto = tuple->dst.protonum;···226219227220static u32 __hash_conntrack(const struct net *net,228221 const struct nf_conntrack_tuple *tuple,222222+ unsigned int zoneid,229223 unsigned int size)230224{231231- return reciprocal_scale(hash_conntrack_raw(tuple, net), size);225225+ return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);232226}233227234228static u32 hash_conntrack(const struct net *net,235235- const struct nf_conntrack_tuple *tuple)229229+ const struct nf_conntrack_tuple *tuple,230230+ unsigned int zoneid)236231{237237- return scale_hash(hash_conntrack_raw(tuple, net));232232+ return scale_hash(hash_conntrack_raw(tuple, zoneid, net));238233}239234240235static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,···659650 do {660651 sequence = read_seqcount_begin(&nf_conntrack_generation);661652 hash = hash_conntrack(net,662662- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);653653+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,654654+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));663655 reply_hash = hash_conntrack(net,664664- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);656656+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,657657+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));665658 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));666659667660 clean_from_lists(ct);···830819nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,831820 const struct nf_conntrack_tuple *tuple)832821{833833- return __nf_conntrack_find_get(net, zone, tuple,834834- hash_conntrack_raw(tuple, net));822822+ unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);823823+ struct nf_conntrack_tuple_hash *thash;824824+825825+ thash = __nf_conntrack_find_get(net, zone, tuple,826826+ hash_conntrack_raw(tuple, zone_id, net));827827+828828+ if (thash)829829+ return thash;830830+831831+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);832832+ if (rid != zone_id)833833+ return __nf_conntrack_find_get(net, zone, tuple,834834+ hash_conntrack_raw(tuple, rid, net));835835+ return thash;835836}836837EXPORT_SYMBOL_GPL(nf_conntrack_find_get);837838···865842 unsigned int hash, reply_hash;866843 struct nf_conntrack_tuple_hash *h;867844 struct hlist_nulls_node *n;845845+ unsigned int max_chainlen;868846 unsigned int chainlen = 0;869847 unsigned int sequence;870848 int err = -EEXIST;···876852 do {877853 sequence = read_seqcount_begin(&nf_conntrack_generation);878854 hash = hash_conntrack(net,879879- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);855855+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,856856+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));880857 reply_hash = hash_conntrack(net,881881- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);858858+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,859859+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));882860 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));861861+862862+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);883863884864 /* See if there's one in the list already, including reverse */885865 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {···891863 zone, net))892864 goto out;893865894894- if (chainlen++ > MAX_CHAINLEN)866866+ if (chainlen++ > max_chainlen)895867 goto chaintoolong;896868 }897869···901873 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,902874 zone, net))903875 goto out;904904- if (chainlen++ > MAX_CHAINLEN)876876+ if (chainlen++ > max_chainlen)905877 goto chaintoolong;906878 }907879···11311103int11321104__nf_conntrack_confirm(struct sk_buff *skb)11331105{11061106+ unsigned int chainlen = 0, sequence, max_chainlen;11341107 const struct nf_conntrack_zone *zone;11351135- unsigned int chainlen = 0, sequence;11361108 unsigned int hash, reply_hash;11371109 struct nf_conntrack_tuple_hash *h;11381110 struct nf_conn *ct;···11611133 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;11621134 hash = scale_hash(hash);11631135 reply_hash = hash_conntrack(net,11641164- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);11651165-11361136+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,11371137+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));11661138 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));1167113911681140 /* We're not in hash table, and we refuse to set up related···11961168 goto dying;11971169 }1198117011711171+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);11991172 /* See if there's one in the list already, including reverse:12001173 NAT could have grabbed it without realizing, since we're12011174 not in the hash. If there is, we lost race. */···12041175 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,12051176 zone, net))12061177 goto out;12071207- if (chainlen++ > MAX_CHAINLEN)11781178+ if (chainlen++ > max_chainlen)12081179 goto chaintoolong;12091180 }12101181···12131184 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,12141185 zone, net))12151186 goto out;12161216- if (chainlen++ > MAX_CHAINLEN) {11871187+ if (chainlen++ > max_chainlen) {12171188chaintoolong:12181189 nf_ct_add_to_dying_list(ct);12191190 NF_CT_STAT_INC(net, chaintoolong);···12751246 rcu_read_lock();12761247 begin:12771248 nf_conntrack_get_ht(&ct_hash, &hsize);12781278- hash = __hash_conntrack(net, tuple, hsize);12491249+ hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);1279125012801251 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {12811252 ct = nf_ct_tuplehash_to_ctrack(h);···17161687 struct nf_conntrack_tuple_hash *h;17171688 enum ip_conntrack_info ctinfo;17181689 struct nf_conntrack_zone tmp;16901690+ u32 hash, zone_id, rid;17191691 struct nf_conn *ct;17201720- u32 hash;1721169217221693 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),17231694 dataoff, state->pf, protonum, state->net,···1728169917291700 /* look for tuple match */17301701 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);17311731- hash = hash_conntrack_raw(&tuple, state->net);17021702+17031703+ zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);17041704+ hash = hash_conntrack_raw(&tuple, zone_id, state->net);17321705 h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);17061706+17071707+ if (!h) {17081708+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);17091709+ if (zone_id != rid) {17101710+ u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);17111711+17121712+ h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);17131713+ }17141714+ }17151715+17331716 if (!h) {17341717 h = init_conntrack(state->net, tmpl, &tuple,17351718 skb, dataoff, hash);···22662225 spinlock_t *lockp;2267222622682227 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {22282228+ struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];22292229+22302230+ if (hlist_nulls_empty(hslot))22312231+ continue;22322232+22692233 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];22702234 local_bh_disable();22712235 nf_conntrack_lock(lockp);22722272- if (*bucket < nf_conntrack_htable_size) {22732273- hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {22742274- if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)22752275- continue;22762276- /* All nf_conn objects are added to hash table twice, one22772277- * for original direction tuple, once for the reply tuple.22782278- *22792279- * Exception: In the IPS_NAT_CLASH case, only the reply22802280- * tuple is added (the original tuple already existed for22812281- * a different object).22822282- *22832283- * We only need to call the iterator once for each22842284- * conntrack, so we just use the 'reply' direction22852285- * tuple while iterating.22862286- */22872287- ct = nf_ct_tuplehash_to_ctrack(h);22882288- if (iter(ct, data))22892289- goto found;22902290- }22362236+ hlist_nulls_for_each_entry(h, n, hslot, hnnode) {22372237+ if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)22382238+ continue;22392239+ /* All nf_conn objects are added to hash table twice, one22402240+ * for original direction tuple, once for the reply tuple.22412241+ *22422242+ * Exception: In the IPS_NAT_CLASH case, only the reply22432243+ * tuple is added (the original tuple already existed for22442244+ * a different object).22452245+ *22462246+ * We only need to call the iterator once for each22472247+ * conntrack, so we just use the 'reply' direction22482248+ * tuple while iterating.22492249+ */22502250+ ct = nf_ct_tuplehash_to_ctrack(h);22512251+ if (iter(ct, data))22522252+ goto found;22912253 }22922254 spin_unlock(lockp);22932255 local_bh_enable();···23082264static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),23092265 void *data, u32 portid, int report)23102266{23112311- unsigned int bucket = 0, sequence;22672267+ unsigned int bucket = 0;23122268 struct nf_conn *ct;2313226923142270 might_sleep();2315227123162316- for (;;) {23172317- sequence = read_seqcount_begin(&nf_conntrack_generation);22722272+ mutex_lock(&nf_conntrack_mutex);22732273+ while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {22742274+ /* Time to push up daises... */2318227523192319- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {23202320- /* Time to push up daises... */23212321-23222322- nf_ct_delete(ct, portid, report);23232323- nf_ct_put(ct);23242324- cond_resched();23252325- }23262326-23272327- if (!read_seqcount_retry(&nf_conntrack_generation, sequence))23282328- break;23292329- bucket = 0;22762276+ nf_ct_delete(ct, portid, report);22772277+ nf_ct_put(ct);22782278+ cond_resched();23302279 }22802280+ mutex_unlock(&nf_conntrack_mutex);23312281}2332228223332283struct iter_data {···25572519 if (!hash)25582520 return -ENOMEM;2559252125222522+ mutex_lock(&nf_conntrack_mutex);25602523 old_size = nf_conntrack_htable_size;25612524 if (old_size == hashsize) {25252525+ mutex_unlock(&nf_conntrack_mutex);25622526 kvfree(hash);25632527 return 0;25642528 }···2577253725782538 for (i = 0; i < nf_conntrack_htable_size; i++) {25792539 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {25402540+ unsigned int zone_id;25412541+25802542 h = hlist_nulls_entry(nf_conntrack_hash[i].first,25812543 struct nf_conntrack_tuple_hash, hnnode);25822544 ct = nf_ct_tuplehash_to_ctrack(h);25832545 hlist_nulls_del_rcu(&h->hnnode);25462546+25472547+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));25842548 bucket = __hash_conntrack(nf_ct_net(ct),25852585- &h->tuple, hashsize);25492549+ &h->tuple, zone_id, hashsize);25862550 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);25872551 }25882552 }···25992555 write_seqcount_end(&nf_conntrack_generation);26002556 nf_conntrack_all_unlock();26012557 local_bh_enable();25582558+25592559+ mutex_unlock(&nf_conntrack_mutex);2602256026032561 synchronize_net();26042562 kvfree(old_hash);
+12-5
net/netfilter/nf_nat_core.c
···150150151151/* We keep an extra hash for each conntrack, for fast searching. */152152static unsigned int153153-hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)153153+hash_by_src(const struct net *net,154154+ const struct nf_conntrack_zone *zone,155155+ const struct nf_conntrack_tuple *tuple)154156{155157 unsigned int hash;156158 struct {157159 struct nf_conntrack_man src;158160 u32 net_mix;159161 u32 protonum;162162+ u32 zone;160163 } __aligned(SIPHASH_ALIGNMENT) combined;161164162165 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));···168165169166 /* Original src, to ensure we map it consistently if poss. */170167 combined.src = tuple->src;171171- combined.net_mix = net_hash_mix(n);168168+ combined.net_mix = net_hash_mix(net);172169 combined.protonum = tuple->dst.protonum;170170+171171+ /* Zone ID can be used provided its valid for both directions */172172+ if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)173173+ combined.zone = zone->id;173174174175 hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);175176···279272 struct nf_conntrack_tuple *result,280273 const struct nf_nat_range2 *range)281274{282282- unsigned int h = hash_by_src(net, tuple);275275+ unsigned int h = hash_by_src(net, zone, tuple);283276 const struct nf_conn *ct;284277285278 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {···626619 unsigned int srchash;627620 spinlock_t *lock;628621629629- srchash = hash_by_src(net,622622+ srchash = hash_by_src(net, nf_ct_zone(ct),630623 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);631624 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];632625 spin_lock_bh(lock);···795788{796789 unsigned int h;797790798798- h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);791791+ h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);799792 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);800793 hlist_del_rcu(&ct->nat_bysource);801794 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
+97-71
net/netfilter/nf_nat_masquerade.c
···991010#include <net/netfilter/nf_nat_masquerade.h>11111212+struct masq_dev_work {1313+ struct work_struct work;1414+ struct net *net;1515+ union nf_inet_addr addr;1616+ int ifindex;1717+ int (*iter)(struct nf_conn *i, void *data);1818+};1919+2020+#define MAX_MASQ_WORKER_COUNT 162121+1222static DEFINE_MUTEX(masq_mutex);1323static unsigned int masq_refcnt __read_mostly;2424+static atomic_t masq_worker_count __read_mostly;14251526unsigned int1627nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,···7463}7564EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);76657777-static int device_cmp(struct nf_conn *i, void *ifindex)6666+static void iterate_cleanup_work(struct work_struct *work)6767+{6868+ struct masq_dev_work *w;6969+7070+ w = container_of(work, struct masq_dev_work, work);7171+7272+ nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);7373+7474+ put_net(w->net);7575+ kfree(w);7676+ atomic_dec(&masq_worker_count);7777+ module_put(THIS_MODULE);7878+}7979+8080+/* Iterate conntrack table in the background and remove conntrack entries8181+ * that use the device/address being removed.8282+ *8383+ * In case too many work items have been queued already or memory allocation8484+ * fails iteration is skipped, conntrack entries will time out eventually.8585+ */8686+static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,8787+ int ifindex,8888+ int (*iter)(struct nf_conn *i, void *data),8989+ gfp_t gfp_flags)9090+{9191+ struct masq_dev_work *w;9292+9393+ if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)9494+ return;9595+9696+ net = maybe_get_net(net);9797+ if (!net)9898+ return;9999+100100+ if (!try_module_get(THIS_MODULE))101101+ goto err_module;102102+103103+ w = kzalloc(sizeof(*w), gfp_flags);104104+ if (w) {105105+ /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */106106+ atomic_inc(&masq_worker_count);107107+108108+ INIT_WORK(&w->work, iterate_cleanup_work);109109+ w->ifindex = ifindex;110110+ w->net = net;111111+ w->iter = iter;112112+ if (addr)113113+ w->addr = *addr;114114+ schedule_work(&w->work);115115+ return;116116+ }117117+118118+ module_put(THIS_MODULE);119119+ err_module:120120+ put_net(net);121121+}122122+123123+static int device_cmp(struct nf_conn *i, void *arg)78124{79125 const struct nf_conn_nat *nat = nfct_nat(i);126126+ const struct masq_dev_work *w = arg;8012781128 if (!nat)82129 return 0;8383- return nat->masq_index == (int)(long)ifindex;130130+ return nat->masq_index == w->ifindex;84131}8513286133static int masq_device_event(struct notifier_block *this,···15485 * and forget them.15586 */15687157157- nf_ct_iterate_cleanup_net(net, device_cmp,158158- (void *)(long)dev->ifindex, 0, 0);8888+ nf_nat_masq_schedule(net, NULL, dev->ifindex,8989+ device_cmp, GFP_KERNEL);15990 }1609116192 return NOTIFY_DONE;···1639416495static int inet_cmp(struct nf_conn *ct, void *ptr)16596{166166- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;167167- struct net_device *dev = ifa->ifa_dev->dev;16897 struct nf_conntrack_tuple *tuple;9898+ struct masq_dev_work *w = ptr;16999170170- if (!device_cmp(ct, (void *)(long)dev->ifindex))100100+ if (!device_cmp(ct, ptr))171101 return 0;172102173103 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;174104175175- return ifa->ifa_address == tuple->dst.u3.ip;105105+ return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);176106}177107178108static int masq_inet_event(struct notifier_block *this,179109 unsigned long event,180110 void *ptr)181111{182182- struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;183183- struct net *net = dev_net(idev->dev);112112+ const struct in_ifaddr *ifa = ptr;113113+ const struct in_device *idev;114114+ const struct net_device *dev;115115+ union nf_inet_addr addr;116116+117117+ if (event != NETDEV_DOWN)118118+ return NOTIFY_DONE;184119185120 /* The masq_dev_notifier will catch the case of the device going186121 * down. So if the inetdev is dead and being destroyed we have187122 * no work to do. Otherwise this is an individual address removal188123 * and we have to perform the flush.189124 */125125+ idev = ifa->ifa_dev;190126 if (idev->dead)191127 return NOTIFY_DONE;192128193193- if (event == NETDEV_DOWN)194194- nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);129129+ memset(&addr, 0, sizeof(addr));130130+131131+ addr.ip = ifa->ifa_address;132132+133133+ dev = idev->dev;134134+ nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,135135+ inet_cmp, GFP_KERNEL);195136196137 return NOTIFY_DONE;197138}···215136};216137217138#if IS_ENABLED(CONFIG_IPV6)218218-static atomic_t v6_worker_count __read_mostly;219219-220139static int221140nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,222141 const struct in6_addr *daddr, unsigned int srcprefs,···264187}265188EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);266189267267-struct masq_dev_work {268268- struct work_struct work;269269- struct net *net;270270- struct in6_addr addr;271271- int ifindex;272272-};273273-274274-static int inet6_cmp(struct nf_conn *ct, void *work)275275-{276276- struct masq_dev_work *w = (struct masq_dev_work *)work;277277- struct nf_conntrack_tuple *tuple;278278-279279- if (!device_cmp(ct, (void *)(long)w->ifindex))280280- return 0;281281-282282- tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;283283-284284- return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);285285-}286286-287287-static void iterate_cleanup_work(struct work_struct *work)288288-{289289- struct masq_dev_work *w;290290-291291- w = container_of(work, struct masq_dev_work, work);292292-293293- nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);294294-295295- put_net(w->net);296296- kfree(w);297297- atomic_dec(&v6_worker_count);298298- module_put(THIS_MODULE);299299-}300300-301190/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).302191 *303192 * Defer it to the system workqueue.···276233{277234 struct inet6_ifaddr *ifa = ptr;278235 const struct net_device *dev;279279- struct masq_dev_work *w;280280- struct net *net;236236+ union nf_inet_addr addr;281237282282- if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)238238+ if (event != NETDEV_DOWN)283239 return NOTIFY_DONE;284240285241 dev = ifa->idev->dev;286286- net = maybe_get_net(dev_net(dev));287287- if (!net)288288- return NOTIFY_DONE;289242290290- if (!try_module_get(THIS_MODULE))291291- goto err_module;243243+ memset(&addr, 0, sizeof(addr));292244293293- w = kmalloc(sizeof(*w), GFP_ATOMIC);294294- if (w) {295295- atomic_inc(&v6_worker_count);245245+ addr.in6 = ifa->addr;296246297297- INIT_WORK(&w->work, iterate_cleanup_work);298298- w->ifindex = dev->ifindex;299299- w->net = net;300300- w->addr = ifa->addr;301301- schedule_work(&w->work);302302-303303- return NOTIFY_DONE;304304- }305305-306306- module_put(THIS_MODULE);307307- err_module:308308- put_net(net);247247+ nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,248248+ GFP_ATOMIC);309249 return NOTIFY_DONE;310250}311251
···1919#include <linux/netfilter_bridge/ebtables.h>2020#include <linux/netfilter_arp/arp_tables.h>2121#include <net/netfilter/nf_tables.h>2222+#include <net/netfilter/nf_log.h>22232324/* Used for matches where *info is larger than X byte */2425#define NFT_MATCH_LARGE_THRESH 192···258257 nft_compat_wait_for_destructors();259258260259 ret = xt_check_target(&par, size, proto, inv);261261- if (ret < 0)260260+ if (ret < 0) {261261+ if (ret == -ENOENT) {262262+ const char *modname = NULL;263263+264264+ if (strcmp(target->name, "LOG") == 0)265265+ modname = "nf_log_syslog";266266+ else if (strcmp(target->name, "NFLOG") == 0)267267+ modname = "nfnetlink_log";268268+269269+ if (modname &&270270+ nft_request_module(ctx->net, "%s", modname) == -EAGAIN)271271+ return -EAGAIN;272272+ }273273+262274 return ret;275275+ }263276264277 /* The standard target cannot be used */265278 if (!target->target)
···4444static int log_tg_check(const struct xt_tgchk_param *par)4545{4646 const struct xt_log_info *loginfo = par->targinfo;4747+ int ret;47484849 if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)4950 return -EINVAL;···5958 return -EINVAL;6059 }61606262- return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);6161+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);6262+ if (ret != 0 && !par->nft_compat) {6363+ request_module("%s", "nf_log_syslog");6464+6565+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);6666+ }6767+6868+ return ret;6369}64706571static void log_tg_destroy(const struct xt_tgdtor_param *par)
+9-1
net/netfilter/xt_NFLOG.c
···4242static int nflog_tg_check(const struct xt_tgchk_param *par)4343{4444 const struct xt_nflog_info *info = par->targinfo;4545+ int ret;45464647 if (info->flags & ~XT_NFLOG_MASK)4748 return -EINVAL;4849 if (info->prefix[sizeof(info->prefix) - 1] != '\0')4950 return -EINVAL;50515151- return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);5252+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);5353+ if (ret != 0 && !par->nft_compat) {5454+ request_module("%s", "nfnetlink_log");5555+5656+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);5757+ }5858+5959+ return ret;5260}53615462static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
+10-4
net/netlink/af_netlink.c
···594594595595 /* We need to ensure that the socket is hashed and visible. */596596 smp_wmb();597597- nlk_sk(sk)->bound = portid;597597+ /* Paired with lockless reads from netlink_bind(),598598+ * netlink_connect() and netlink_sendmsg().599599+ */600600+ WRITE_ONCE(nlk_sk(sk)->bound, portid);598601599602err:600603 release_sock(sk);···10151012 if (nlk->ngroups < BITS_PER_LONG)10161013 groups &= (1UL << nlk->ngroups) - 1;1017101410181018- bound = nlk->bound;10151015+ /* Paired with WRITE_ONCE() in netlink_insert() */10161016+ bound = READ_ONCE(nlk->bound);10191017 if (bound) {10201018 /* Ensure nlk->portid is up-to-date. */10211019 smp_rmb();···1102109811031099 /* No need for barriers here as we return to user-space without11041100 * using any of the bound attributes.11011101+ * Paired with WRITE_ONCE() in netlink_insert().11051102 */11061106- if (!nlk->bound)11031103+ if (!READ_ONCE(nlk->bound))11071104 err = netlink_autobind(sock);1108110511091106 if (err == 0) {···18931888 dst_group = nlk->dst_group;18941889 }1895189018961896- if (!nlk->bound) {18911891+ /* Paired with WRITE_ONCE() in netlink_insert() */18921892+ if (!READ_ONCE(nlk->bound)) {18971893 err = netlink_autobind(sock);18981894 if (err)18991895 goto out;
···233233 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)234234 return 0;235235236236+ if (!q->ops->change)237237+ return 0;238238+236239 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);237240 if (nla) {238241 nla->nla_type = RTM_NEWQDISC;
+4
net/sched/sch_taprio.c
···16411641 list_del(&q->taprio_list);16421642 spin_unlock(&taprio_list_lock);1643164316441644+ /* Note that taprio_reset() might not be called if an error16451645+ * happens in qdisc_create(), after taprio_init() has been called.16461646+ */16471647+ hrtimer_cancel(&q->advance_timer);1644164816451649 taprio_disable_offload(dev, q, NULL);16461650
+1-1
net/sctp/input.c
···702702 ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);703703704704 /* Break out if chunk length is less then minimal. */705705- if (ntohs(ch->length) < sizeof(_ch))705705+ if (!ch || ntohs(ch->length) < sizeof(_ch))706706 break;707707708708 ch_end = offset + SCTP_PAD4(ntohs(ch->length));
+2-1
net/smc/smc_clc.c
···230230 goto out_rel;231231 }232232 /* get address to which the internal TCP socket is bound */233233- kernel_getsockname(clcsock, (struct sockaddr *)&addrs);233233+ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)234234+ goto out_rel;234235 /* analyze IP specific data of net_device belonging to TCP socket */235236 addr6 = (struct sockaddr_in6 *)&addrs;236237 rcu_read_lock();
···276276277277 /* This is just for v2/v3 protocol. */278278 for (i = 0; i < data_blocks; ++i) {279279- *frames = (be32_to_cpu(buffer[1]) << 16) |280280- (be32_to_cpu(buffer[2]) >> 16);279279+ *frames = be32_to_cpu(buffer[1]);280280+ *frames <<= 16;281281+ *frames |= be32_to_cpu(buffer[2]) >> 16;282282+ ++frames;281283 buffer += data_block_quadlets;282282- frames++;283284 }284285}285286
+8-5
sound/firewire/oxfw/oxfw.c
···184184 model = val;185185 }186186187187- /*188188- * Mackie Onyx Satellite with base station has a quirk to report a wrong189189- * value in 'dbs' field of CIP header against its format information.190190- */191191- if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)187187+ if (vendor == VENDOR_LOUD) {188188+ // Mackie Onyx Satellite with base station has a quirk to report a wrong189189+ // value in 'dbs' field of CIP header against its format information.192190 oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;191191+192192+ // OXFW971-based models may transfer events by blocking method.193193+ if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD))194194+ oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;195195+ }193196194197 return 0;195198}
···10731073 if (ret < 0)10741074 goto err_pm_get_sync;1075107510761076+ /*10771077+ * Register platform component before registering cpu dai for there10781078+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().10791079+ */10801080+ ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);10811081+ if (ret) {10821082+ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);10831083+ goto err_pm_get_sync;10841084+ }10851085+10761086 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,10771087 &fsl_esai_dai, 1);10781088 if (ret) {···10911081 }1092108210931083 INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);10941094-10951095- ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);10961096- if (ret) {10971097- dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);10981098- goto err_pm_get_sync;10991099- }1100108411011085 return ret;11021086
+10-5
sound/soc/fsl/fsl_micfil.c
···737737 pm_runtime_enable(&pdev->dev);738738 regcache_cache_only(micfil->regmap, true);739739740740+ /*741741+ * Register platform component before registering cpu dai for there742742+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().743743+ */744744+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);745745+ if (ret) {746746+ dev_err(&pdev->dev, "failed to pcm register\n");747747+ return ret;748748+ }749749+740750 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,741751 &fsl_micfil_dai, 1);742752 if (ret) {743753 dev_err(&pdev->dev, "failed to register component %s\n",744754 fsl_micfil_component.name);745745- return ret;746755 }747747-748748- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);749749- if (ret)750750- dev_err(&pdev->dev, "failed to pcm register\n");751756752757 return ret;753758}
+9-5
sound/soc/fsl/fsl_sai.c
···11521152 if (ret < 0)11531153 goto err_pm_get_sync;1154115411551155- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,11561156- &sai->cpu_dai_drv, 1);11571157- if (ret)11581158- goto err_pm_get_sync;11591159-11551155+ /*11561156+ * Register platform component before registering cpu dai for there11571157+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().11581158+ */11601159 if (sai->soc_data->use_imx_pcm) {11611160 ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);11621161 if (ret)···11651166 if (ret)11661167 goto err_pm_get_sync;11671168 }11691169+11701170+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,11711171+ &sai->cpu_dai_drv, 1);11721172+ if (ret)11731173+ goto err_pm_get_sync;1168117411691175 return ret;11701176
+10-6
sound/soc/fsl/fsl_spdif.c
···14341434 pm_runtime_enable(&pdev->dev);14351435 regcache_cache_only(spdif_priv->regmap, true);1436143614371437+ /*14381438+ * Register platform component before registering cpu dai for there14391439+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().14401440+ */14411441+ ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);14421442+ if (ret) {14431443+ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");14441444+ goto err_pm_disable;14451445+ }14461446+14371447 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,14381448 &spdif_priv->cpu_dai_drv, 1);14391449 if (ret) {14401450 dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);14411441- goto err_pm_disable;14421442- }14431443-14441444- ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);14451445- if (ret) {14461446- dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");14471451 goto err_pm_disable;14481452 }14491453
+10-5
sound/soc/fsl/fsl_xcvr.c
···12151215 pm_runtime_enable(dev);12161216 regcache_cache_only(xcvr->regmap, true);1217121712181218+ /*12191219+ * Register platform component before registering cpu dai for there12201220+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().12211221+ */12221222+ ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);12231223+ if (ret) {12241224+ dev_err(dev, "failed to pcm register\n");12251225+ return ret;12261226+ }12271227+12181228 ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,12191229 &fsl_xcvr_dai, 1);12201230 if (ret) {12211231 dev_err(dev, "failed to register component %s\n",12221232 fsl_xcvr_comp.name);12231223- return ret;12241233 }12251225-12261226- ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);12271227- if (ret)12281228- dev_err(dev, "failed to pcm register\n");1229123412301235 return ret;12311236}
+5
sound/soc/intel/boards/sof_sdw.c
···929929 cpus + *cpu_id, cpu_dai_num,930930 codecs, codec_num,931931 NULL, &sdw_ops);932932+ /*933933+ * SoundWire DAILINKs use 'stream' functions and Bank Switch operations934934+ * based on wait_for_completion(), tag them as 'nonatomic'.935935+ */936936+ dai_links[*be_index].nonatomic = true;932937933938 ret = set_codec_init_func(card, link, dai_links + (*be_index)++,934939 playback, group_id);
+3
sound/soc/mediatek/Kconfig
···11# SPDX-License-Identifier: GPL-2.0-only22config SND_SOC_MEDIATEK33 tristate44+ select REGMAP_MMIO4556config SND_SOC_MT270167 tristate "ASoC support for Mediatek MT2701 chip"···189188config SND_SOC_MT8195190189 tristate "ASoC support for Mediatek MT8195 chip"191190 depends on ARCH_MEDIATEK || COMPILE_TEST191191+ depends on COMMON_CLK192192 select SND_SOC_MEDIATEK193193+ select MFD_SYSCON if SND_SOC_MT6359193194 help194195 This adds ASoC platform driver support for Mediatek MT8195 chip195196 that can be used with other codecs.
+11-8
sound/soc/mediatek/common/mtk-afe-fe-dai.c
···334334 devm_kcalloc(dev, afe->reg_back_up_list_num,335335 sizeof(unsigned int), GFP_KERNEL);336336337337- for (i = 0; i < afe->reg_back_up_list_num; i++)338338- regmap_read(regmap, afe->reg_back_up_list[i],339339- &afe->reg_back_up[i]);337337+ if (afe->reg_back_up) {338338+ for (i = 0; i < afe->reg_back_up_list_num; i++)339339+ regmap_read(regmap, afe->reg_back_up_list[i],340340+ &afe->reg_back_up[i]);341341+ }340342341343 afe->suspended = true;342344 afe->runtime_suspend(dev);···358356359357 afe->runtime_resume(dev);360358361361- if (!afe->reg_back_up)359359+ if (!afe->reg_back_up) {362360 dev_dbg(dev, "%s no reg_backup\n", __func__);363363-364364- for (i = 0; i < afe->reg_back_up_list_num; i++)365365- mtk_regmap_write(regmap, afe->reg_back_up_list[i],366366- afe->reg_back_up[i]);361361+ } else {362362+ for (i = 0; i < afe->reg_back_up_list_num; i++)363363+ mtk_regmap_write(regmap, afe->reg_back_up_list[i],364364+ afe->reg_back_up[i]);365365+ }367366368367 afe->suspended = false;369368 return 0;
···365365/* on i.MX8 there is 1 to 1 match between type and BAR idx */366366static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)367367{368368- return type;368368+ /* Only IRAM and SRAM bars are valid */369369+ switch (type) {370370+ case SOF_FW_BLK_TYPE_IRAM:371371+ case SOF_FW_BLK_TYPE_SRAM:372372+ return type;373373+ default:374374+ return -EINVAL;375375+ }369376}370377371378static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
+8-1
sound/soc/sof/imx/imx8m.c
···228228/* on i.MX8 there is 1 to 1 match between type and BAR idx */229229static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)230230{231231- return type;231231+ /* Only IRAM and SRAM bars are valid */232232+ switch (type) {233233+ case SOF_FW_BLK_TYPE_IRAM:234234+ case SOF_FW_BLK_TYPE_SRAM:235235+ return type;236236+ default:237237+ return -EINVAL;238238+ }232239}233240234241static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
+5-3
sound/soc/sof/loader.c
···729729 ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);730730731731 if (ret < 0) {732732- dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",733733- fw_filename, ret);734732 dev_err(sdev->dev,735735- "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n");733733+ "error: sof firmware file is missing, you might need to\n");734734+ dev_err(sdev->dev,735735+ " download it from https://github.com/thesofproject/sof-bin/\n");736736 goto err;737737 } else {738738 dev_dbg(sdev->dev, "request_firmware %s successful\n",···880880void snd_sof_fw_unload(struct snd_sof_dev *sdev)881881{882882 /* TODO: support module unloading at runtime */883883+ release_firmware(sdev->pdata->fw);884884+ sdev->pdata->fw = NULL;883885}884886EXPORT_SYMBOL(snd_sof_fw_unload);
-1
sound/soc/sof/trace.c
···530530 return;531531532532 if (sdev->dtrace_is_enabled) {533533- dev_err(sdev->dev, "error: waking up any trace sleepers\n");534533 sdev->dtrace_error = true;535534 wake_up(&sdev->trace_sleep);536535 }
+2-2
sound/soc/sof/xtensa/core.c
···122122 * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63123123 */124124 for (i = 0; i < stack_words; i += 4) {125125- hex_dump_to_buffer(stack + i * 4, 16, 16, 4,125125+ hex_dump_to_buffer(stack + i, 16, 16, 4,126126 buf, sizeof(buf), false);127127- dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);127127+ dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);128128 }129129}130130
+4-14
sound/usb/card.c
···10541054 return 0;10551055}1056105610571057-static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)10571057+static int usb_audio_resume(struct usb_interface *intf)10581058{10591059 struct snd_usb_audio *chip = usb_get_intfdata(intf);10601060 struct snd_usb_stream *as;···10801080 * we just notify and restart the mixers10811081 */10821082 list_for_each_entry(mixer, &chip->mixer_list, list) {10831083- err = snd_usb_mixer_resume(mixer, reset_resume);10831083+ err = snd_usb_mixer_resume(mixer);10841084 if (err < 0)10851085 goto err_out;10861086 }···11001100 atomic_dec(&chip->active); /* allow autopm after this point */11011101 return err;11021102}11031103-11041104-static int usb_audio_resume(struct usb_interface *intf)11051105-{11061106- return __usb_audio_resume(intf, false);11071107-}11081108-11091109-static int usb_audio_reset_resume(struct usb_interface *intf)11101110-{11111111- return __usb_audio_resume(intf, true);11121112-}11131103#else11141104#define usb_audio_suspend NULL11151105#define usb_audio_resume NULL11161116-#define usb_audio_reset_resume NULL11061106+#define usb_audio_resume NULL11171107#endif /* CONFIG_PM */1118110811191109static const struct usb_device_id usb_audio_ids [] = {···11251135 .disconnect = usb_audio_disconnect,11261136 .suspend = usb_audio_suspend,11271137 .resume = usb_audio_resume,11281128- .reset_resume = usb_audio_reset_resume,11381138+ .reset_resume = usb_audio_resume,11291139 .id_table = usb_audio_ids,11301140 .supports_autosuspend = 1,11311141};
+4-22
sound/usb/mixer.c
···36533653 return 0;36543654}3655365536563656-static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)36573657-{36583658- int err;36593659-36603660- if (list->resume) {36613661- err = list->resume(list);36623662- if (err < 0)36633663- return err;36643664- }36653665- return restore_mixer_value(list);36663666-}36673667-36683668-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)36563656+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer)36693657{36703658 struct usb_mixer_elem_list *list;36713671- usb_mixer_elem_resume_func_t f;36723659 int id, err;3673366036743661 /* restore cached mixer values */36753662 for (id = 0; id < MAX_ID_ELEMS; id++) {36763663 for_each_mixer_elem(list, mixer, id) {36773677- if (reset_resume)36783678- f = list->reset_resume;36793679- else36803680- f = list->resume;36813681- if (f) {36823682- err = f(list);36643664+ if (list->resume) {36653665+ err = list->resume(list);36833666 if (err < 0)36843667 return err;36853668 }···36833700 list->id = unitid;36843701 list->dump = snd_usb_mixer_dump_cval;36853702#ifdef CONFIG_PM36863686- list->resume = NULL;36873687- list->reset_resume = default_mixer_reset_resume;37033703+ list->resume = restore_mixer_value;36883704#endif36893705}
+1-2
sound/usb/mixer.h
···7070 bool is_std_info;7171 usb_mixer_elem_dump_func_t dump;7272 usb_mixer_elem_resume_func_t resume;7373- usb_mixer_elem_resume_func_t reset_resume;7473};75747675/* iterate over mixer element list of the given unit id */···120121121122#ifdef CONFIG_PM122123int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);123123-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);124124+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer);124125#endif125126126127int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
···6894689468956895 if (obj->gen_loader) {68966896 /* reset FDs */68976897- btf__set_fd(obj->btf, -1);68976897+ if (obj->btf)68986898+ btf__set_fd(obj->btf, -1);68986899 for (i = 0; i < obj->nr_maps; i++)68996900 obj->maps[i].fd = -1;69006901 if (!err)
+7-1
tools/lib/bpf/linker.c
···16491649static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,16501650 int *out_btf_sec_id, int *out_btf_id)16511651{16521652- int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;16521652+ int i, j, n, m, btf_id = 0;16531653 const struct btf_type *t;16541654 const struct btf_var_secinfo *vi;16551655 const char *name;1656165616571657+ if (!obj->btf) {16581658+ pr_warn("failed to find BTF info for object '%s'\n", obj->filename);16591659+ return -EINVAL;16601660+ }16611661+16621662+ n = btf__get_nr_types(obj->btf);16571663 for (i = 1; i <= n; i++) {16581664 t = btf__type_by_id(obj->btf, i);16591665
···164164The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html165165166166167167-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html167167+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html168168169169170170NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
+1-1
tools/perf/Documentation/perf-c2c.txt
···261261User can specify how to sort offsets for cacheline.262262263263Following fields are available and governs the final264264-output fields set for caheline offsets output:264264+output fields set for cacheline offsets output:265265266266 tid - coalesced by process TIDs267267 pid - coalesced by process PIDs
+1-1
tools/perf/Documentation/perf-intel-pt.txt
···883883884884"Transactions" events correspond to the start or end of transactions. The885885'flags' field can be used in perf script to determine whether the event is a886886-tranasaction start, commit or abort.886886+transaction start, commit or abort.887887888888Note that "instructions", "branches" and "transactions" events depend on code889889flow packets which can be disabled by using the config term "branch=0". Refer
+1-1
tools/perf/Documentation/perf-lock.txt
···44444545-f::4646--force::4747- Don't complan, do it.4747+ Don't complain, do it.48484949REPORT OPTIONS5050--------------
+1-1
tools/perf/Documentation/perf-script-perl.txt
···5454Traces meant to be processed using a script should be recorded with5555the above option: -a to enable system-wide collection.56565757-The format file for the sched_wakep event defines the following fields5757+The format file for the sched_wakeup event defines the following fields5858(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):59596060----
+1-1
tools/perf/Documentation/perf-script-python.txt
···448448Traces meant to be processed using a script should be recorded with449449the above option: -a to enable system-wide collection.450450451451-The format file for the sched_wakep event defines the following fields451451+The format file for the sched_wakeup event defines the following fields452452(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):453453454454----
+1-1
tools/perf/Documentation/perf-stat.txt
···385385Print metrics or metricgroups specified in a comma separated list.386386For a group all metrics from the group are added.387387The events from the metrics are automatically measured.388388-See perf list output for the possble metrics and metricgroups.388388+See perf list output for the possible metrics and metricgroups.389389390390-A::391391--no-aggr::
+1-1
tools/perf/Documentation/topdown.txt
···22-----------------------------------3344Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown55-methology to break down CPU pipeline execution into 4 bottlenecks:55+methodology to break down CPU pipeline execution into 4 bottlenecks:66frontend bound, backend bound, bad speculation, retiring.7788For more details on Topdown see [1][5]
···10461046 {10471047 "EventCode": "0x4e010",10481048 "EventName": "PM_GCT_NOSLOT_IC_L3MISS",10491049- "BriefDescription": "Gct empty for this thread due to icach l3 miss",10491049+ "BriefDescription": "Gct empty for this thread due to icache l3 miss",10501050 "PublicDescription": ""10511051 },10521052 {
···2020/* For bsearch. We try to unwind functions in shared object. */2121#include <stdlib.h>22222323+/*2424+ * The test will assert frames are on the stack but tail call optimizations lose2525+ * the frame of the caller. Clang can disable this optimization on a called2626+ * function but GCC currently (11/2020) lacks this attribute. The barrier is2727+ * used to inhibit tail calls in these cases.2828+ */2929+#ifdef __has_attribute3030+#if __has_attribute(disable_tail_calls)3131+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))3232+#define NO_TAIL_CALL_BARRIER3333+#endif3434+#endif3535+#ifndef NO_TAIL_CALL_ATTRIBUTE3636+#define NO_TAIL_CALL_ATTRIBUTE3737+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");3838+#endif3939+2340static int mmap_handler(struct perf_tool *tool __maybe_unused,2441 union perf_event *event,2542 struct perf_sample *sample,···10891 return strcmp((const char *) symbol, funcs[idx]);10992}11093111111-noinline int test_dwarf_unwind__thread(struct thread *thread)9494+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)11295{11396 struct perf_sample sample;11497 unsigned long cnt = 0;···139122140123static int global_unwind_retval = -INT_MAX;141124142142-noinline int test_dwarf_unwind__compare(void *p1, void *p2)125125+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)143126{144127 /* Any possible value should be 'thread' */145128 struct thread *thread = *(struct thread **)p1;···158141 return p1 - p2;159142}160143161161-noinline int test_dwarf_unwind__krava_3(struct thread *thread)144144+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)162145{163146 struct thread *array[2] = {thread, thread};164147 void *fp = &bsearch;···177160 return global_unwind_retval;178161}179162180180-noinline int test_dwarf_unwind__krava_2(struct thread *thread)163163+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)181164{182182- return test_dwarf_unwind__krava_3(thread);165165+ int ret;166166+167167+ ret = test_dwarf_unwind__krava_3(thread);168168+ NO_TAIL_CALL_BARRIER;169169+ return ret;183170}184171185185-noinline int test_dwarf_unwind__krava_1(struct thread *thread)172172+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)186173{187187- return test_dwarf_unwind__krava_2(thread);174174+ int ret;175175+176176+ ret = test_dwarf_unwind__krava_2(thread);177177+ NO_TAIL_CALL_BARRIER;178178+ return ret;188179}189180190181int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
+1-1
tools/perf/util/config.c
···801801 section->name, item->name);802802 ret = fn(key, value, data);803803 if (ret < 0) {804804- pr_err("Error: wrong config key-value pair %s=%s\n",804804+ pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",805805 key, value);806806 /*807807 * Can't be just a 'break', as perf_config_set__for_each_entry()
···112112 ip netns add "${NS2}"113113 ip netns add "${NS3}"114114115115+ # rp_filter gets confused by what these tests are doing, so disable it116116+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0117117+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0118118+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0119119+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0120120+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0121121+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0122122+115123 ip link add veth1 type veth peer name veth2116124 ip link add veth3 type veth peer name veth4117125 ip link add veth5 type veth peer name veth6···243235 ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev244236 ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}245237 ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}246246-247247- # rp_filter gets confused by what these tests are doing, so disable it248248- ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0249249- ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0250250- ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0251238252239 TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)253240
···371371 printf(" -v: specify the number of vCPUs to run.\n");372372 printf(" -o: Overlap guest memory accesses instead of partitioning\n"373373 " them into a separate region of memory for each vCPU.\n");374374- printf(" -s: specify the type of memory that should be used to\n"375375- " back the guest data region.\n\n");376376- backing_src_help();374374+ backing_src_help("-s");377375 puts("");378376 exit(0);379377}···379381int main(int argc, char *argv[])380382{381383 struct test_params params = {382382- .backing_src = VM_MEM_SRC_ANONYMOUS,384384+ .backing_src = DEFAULT_VM_MEM_SRC,383385 .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,384386 .vcpus = 1,385387 };
+7-8
tools/testing/selftests/kvm/demand_paging_test.c
···179179 return NULL;180180 }181181182182- if (!pollfd[0].revents & POLLIN)182182+ if (!(pollfd[0].revents & POLLIN))183183 continue;184184185185 r = read(uffd, &msg, sizeof(msg));···416416{417417 puts("");418418 printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"419419- " [-b memory] [-t type] [-v vcpus] [-o]\n", name);419419+ " [-b memory] [-s type] [-v vcpus] [-o]\n", name);420420 guest_modes_help();421421 printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"422422 " UFFD registration mode: 'MISSING' or 'MINOR'.\n");···426426 printf(" -b: specify the size of the memory region which should be\n"427427 " demand paged by each vCPU. e.g. 10M or 3G.\n"428428 " Default: 1G\n");429429- printf(" -t: The type of backing memory to use. Default: anonymous\n");430430- backing_src_help();429429+ backing_src_help("-s");431430 printf(" -v: specify the number of vCPUs to run.\n");432431 printf(" -o: Overlap guest memory accesses instead of partitioning\n"433432 " them into a separate region of memory for each vCPU.\n");···438439{439440 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);440441 struct test_params p = {441441- .src_type = VM_MEM_SRC_ANONYMOUS,442442+ .src_type = DEFAULT_VM_MEM_SRC,442443 .partition_vcpu_memory_access = true,443444 };444445 int opt;445446446447 guest_modes_append_default();447448448448- while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {449449+ while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {449450 switch (opt) {450451 case 'm':451452 guest_modes_cmdline(optarg);···464465 case 'b':465466 guest_percpu_mem_size = parse_size(optarg);466467 break;467467- case 't':468468+ case 's':468469 p.src_type = parse_backing_src_type(optarg);469470 break;470471 case 'v':···484485485486 if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&486487 !backing_src_is_shared(p.src_type)) {487487- TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");488488+ TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");488489 }489490490491 for_each_guest_mode(run_test, &p);
+42-20
tools/testing/selftests/kvm/dirty_log_perf_test.c
···118118 toggle_dirty_logging(vm, slots, false);119119}120120121121-static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,122122- uint64_t nr_pages)121121+static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)123122{124124- uint64_t slot_pages = nr_pages / slots;125123 int i;126124127125 for (i = 0; i < slots; i++) {128126 int slot = PERF_TEST_MEM_SLOT_INDEX + i;129129- unsigned long *slot_bitmap = bitmap + i * slot_pages;130127131131- kvm_vm_get_dirty_log(vm, slot, slot_bitmap);128128+ kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);132129 }133130}134131135135-static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,136136- uint64_t nr_pages)132132+static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],133133+ int slots, uint64_t pages_per_slot)137134{138138- uint64_t slot_pages = nr_pages / slots;139135 int i;140136141137 for (i = 0; i < slots; i++) {142138 int slot = PERF_TEST_MEM_SLOT_INDEX + i;143143- unsigned long *slot_bitmap = bitmap + i * slot_pages;144139145145- kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages);140140+ kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);146141 }142142+}143143+144144+static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)145145+{146146+ unsigned long **bitmaps;147147+ int i;148148+149149+ bitmaps = malloc(slots * sizeof(bitmaps[0]));150150+ TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");151151+152152+ for (i = 0; i < slots; i++) {153153+ bitmaps[i] = bitmap_zalloc(pages_per_slot);154154+ TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");155155+ }156156+157157+ return bitmaps;158158+}159159+160160+static void free_bitmaps(unsigned long *bitmaps[], int slots)161161+{162162+ int i;163163+164164+ for (i = 0; i < slots; i++)165165+ free(bitmaps[i]);166166+167167+ free(bitmaps);147168}148169149170static void run_test(enum vm_guest_mode mode, void *arg)···172151 struct test_params *p = arg;173152 pthread_t *vcpu_threads;174153 struct kvm_vm *vm;175175- unsigned long *bmap;154154+ unsigned long **bitmaps;176155 uint64_t guest_num_pages;177156 uint64_t host_num_pages;157157+ uint64_t pages_per_slot;178158 int vcpu_id;179159 struct timespec start;180160 struct timespec ts_diff;···193171 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);194172 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);195173 host_num_pages = vm_num_host_pages(mode, guest_num_pages);196196- bmap = bitmap_zalloc(host_num_pages);174174+ pages_per_slot = host_num_pages / p->slots;175175+176176+ bitmaps = alloc_bitmaps(p->slots, pages_per_slot);197177198178 if (dirty_log_manual_caps) {199179 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;···263239 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);264240265241 clock_gettime(CLOCK_MONOTONIC, &start);266266- get_dirty_log(vm, p->slots, bmap, host_num_pages);242242+ get_dirty_log(vm, bitmaps, p->slots);267243 ts_diff = timespec_elapsed(start);268244 get_dirty_log_total = timespec_add(get_dirty_log_total,269245 ts_diff);···272248273249 if (dirty_log_manual_caps) {274250 clock_gettime(CLOCK_MONOTONIC, &start);275275- clear_dirty_log(vm, p->slots, bmap, host_num_pages);251251+ clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);276252 ts_diff = timespec_elapsed(start);277253 clear_dirty_log_total = timespec_add(clear_dirty_log_total,278254 ts_diff);···305281 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);306282 }307283308308- free(bmap);284284+ free_bitmaps(bitmaps, p->slots);309285 free(vcpu_threads);310286 perf_test_destroy_vm(vm);311287}···332308 printf(" -v: specify the number of vCPUs to run.\n");333309 printf(" -o: Overlap guest memory accesses instead of partitioning\n"334310 " them into a separate region of memory for each vCPU.\n");335335- printf(" -s: specify the type of memory that should be used to\n"336336- " back the guest data region.\n\n");311311+ backing_src_help("-s");337312 printf(" -x: Split the memory region into this number of memslots.\n"338338- " (default: 1)");339339- backing_src_help();313313+ " (default: 1)\n");340314 puts("");341315 exit(0);342316}···346324 .iterations = TEST_HOST_LOOP_N,347325 .wr_fract = 1,348326 .partition_vcpu_memory_access = true,349349- .backing_src = VM_MEM_SRC_ANONYMOUS,327327+ .backing_src = DEFAULT_VM_MEM_SRC,350328 .slots = 1,351329 };352330 int opt;
+6-1
tools/testing/selftests/kvm/include/test_util.h
···9090 NUM_SRC_TYPES,9191};92929393+#define DEFAULT_VM_MEM_SRC VM_MEM_SRC_ANONYMOUS9494+9395struct vm_mem_backing_src_alias {9496 const char *name;9597 uint32_t flag;9698};9999+100100+#define MIN_RUN_DELAY_NS 200000UL9710198102bool thp_configured(void);99103size_t get_trans_hugepagesz(void);100104size_t get_def_hugetlb_pagesz(void);101105const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);102106size_t get_backing_src_pagesz(uint32_t i);103103-void backing_src_help(void);107107+void backing_src_help(const char *flag);104108enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);109109+long get_run_delay(void);105110106111/*107112 * Whether or not the given source type is shared memory (as opposed to
···456456 " (default: 1G)\n");457457 printf(" -v: specify the number of vCPUs to run\n"458458 " (default: 1)\n");459459- printf(" -s: specify the type of memory that should be used to\n"460460- " back the guest data region.\n"461461- " (default: anonymous)\n\n");462462- backing_src_help();459459+ backing_src_help("-s");463460 puts("");464461}465462···465468 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);466469 struct test_params p = {467470 .test_mem_size = DEFAULT_TEST_MEM_SIZE,468468- .src_type = VM_MEM_SRC_ANONYMOUS,471471+ .src_type = DEFAULT_VM_MEM_SRC,469472 };470473 int opt;471474
+34-5
tools/testing/selftests/kvm/lib/test_util.c
···1111#include <stdlib.h>1212#include <time.h>1313#include <sys/stat.h>1414+#include <sys/syscall.h>1415#include <linux/mman.h>1516#include "linux/kernel.h"1617···130129{131130 size_t size;132131 FILE *f;132132+ int ret;133133134134 TEST_ASSERT(thp_configured(), "THP is not configured in host kernel");135135136136 f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r");137137 TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size");138138139139- fscanf(f, "%ld", &size);139139+ ret = fscanf(f, "%ld", &size);140140+ ret = fscanf(f, "%ld", &size);141141+ TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size");140142 fclose(f);141143142144 return size;···283279 }284280}285281286286-void backing_src_help(void)282282+static void print_available_backing_src_types(const char *prefix)287283{288284 int i;289285290290- printf("Available backing src types:\n");286286+ printf("%sAvailable backing src types:\n", prefix);287287+291288 for (i = 0; i < NUM_SRC_TYPES; i++)292292- printf("\t%s\n", vm_mem_backing_src_alias(i)->name);289289+ printf("%s %s\n", prefix, vm_mem_backing_src_alias(i)->name);290290+}291291+292292+void backing_src_help(const char *flag)293293+{294294+ printf(" %s: specify the type of memory that should be used to\n"295295+ " back the guest data region. (default: %s)\n",296296+ flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name);297297+ print_available_backing_src_types(" ");293298}294299295300enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)···309296 if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))310297 return i;311298312312- backing_src_help();299299+ print_available_backing_src_types("");313300 TEST_FAIL("Unknown backing src type: %s", type_name);314301 return -1;302302+}303303+304304+long get_run_delay(void)305305+{306306+ char path[64];307307+ long val[2];308308+ FILE *fp;309309+310310+ sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));311311+ fp = fopen(path, "r");312312+ /* Return MIN_RUN_DELAY_NS upon failure just to be safe */313313+ if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2)314314+ val[1] = MIN_RUN_DELAY_NS;315315+ fclose(fp);316316+317317+ return val[1];315318}
+286
tools/testing/selftests/kvm/rseq_test.c
···11+// SPDX-License-Identifier: GPL-2.0-only22+#define _GNU_SOURCE /* for program_invocation_short_name */33+#include <errno.h>44+#include <fcntl.h>55+#include <pthread.h>66+#include <sched.h>77+#include <stdio.h>88+#include <stdlib.h>99+#include <string.h>1010+#include <signal.h>1111+#include <syscall.h>1212+#include <sys/ioctl.h>1313+#include <sys/sysinfo.h>1414+#include <asm/barrier.h>1515+#include <linux/atomic.h>1616+#include <linux/rseq.h>1717+#include <linux/unistd.h>1818+1919+#include "kvm_util.h"2020+#include "processor.h"2121+#include "test_util.h"2222+2323+#define VCPU_ID 02424+2525+static __thread volatile struct rseq __rseq = {2626+ .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,2727+};2828+2929+/*3030+ * Use an arbitrary, bogus signature for configuring rseq, this test does not3131+ * actually enter an rseq critical section.3232+ */3333+#define RSEQ_SIG 0xdeadbeef3434+3535+/*3636+ * Any bug related to task migration is likely to be timing-dependent; perform3737+ * a large number of migrations to reduce the odds of a false negative.3838+ */3939+#define NR_TASK_MIGRATIONS 1000004040+4141+static pthread_t migration_thread;4242+static cpu_set_t possible_mask;4343+static int min_cpu, max_cpu;4444+static bool done;4545+4646+static atomic_t seq_cnt;4747+4848+static void guest_code(void)4949+{5050+ for (;;)5151+ GUEST_SYNC(0);5252+}5353+5454+static void sys_rseq(int flags)5555+{5656+ int r;5757+5858+ r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG);5959+ TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));6060+}6161+6262+static int next_cpu(int cpu)6363+{6464+ /*6565+ * Advance to the next CPU, skipping those that weren't in the original6666+ * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's6767+ * data storage is considered as opaque. Note, if this task is pinned6868+ * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will6969+ * burn a lot cycles and the test will take longer than normal to7070+ * complete.7171+ */7272+ do {7373+ cpu++;7474+ if (cpu > max_cpu) {7575+ cpu = min_cpu;7676+ TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),7777+ "Min CPU = %d must always be usable", cpu);7878+ break;7979+ }8080+ } while (!CPU_ISSET(cpu, &possible_mask));8181+8282+ return cpu;8383+}8484+8585+static void *migration_worker(void *ign)8686+{8787+ cpu_set_t allowed_mask;8888+ int r, i, cpu;8989+9090+ CPU_ZERO(&allowed_mask);9191+9292+ for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {9393+ CPU_SET(cpu, &allowed_mask);9494+9595+ /*9696+ * Bump the sequence count twice to allow the reader to detect9797+ * that a migration may have occurred in between rseq and sched9898+ * CPU ID reads. An odd sequence count indicates a migration9999+ * is in-progress, while a completely different count indicates100100+ * a migration occurred since the count was last read.101101+ */102102+ atomic_inc(&seq_cnt);103103+104104+ /*105105+ * Ensure the odd count is visible while sched_getcpu() isn't106106+ * stable, i.e. while changing affinity is in-progress.107107+ */108108+ smp_wmb();109109+ r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);110110+ TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",111111+ errno, strerror(errno));112112+ smp_wmb();113113+ atomic_inc(&seq_cnt);114114+115115+ CPU_CLR(cpu, &allowed_mask);116116+117117+ /*118118+ * Wait 1-10us before proceeding to the next iteration and more119119+ * specifically, before bumping seq_cnt again. A delay is120120+ * needed on three fronts:121121+ *122122+ * 1. To allow sched_setaffinity() to prompt migration before123123+ * ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME124124+ * (or TIF_NEED_RESCHED, which indirectly leads to handling125125+ * NOTIFY_RESUME) is handled in KVM context.126126+ *127127+ * If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters128128+ * the guest, the guest will trigger a IO/MMIO exit all the129129+ * way to userspace and the TIF flags will be handled by130130+ * the generic "exit to userspace" logic, not by KVM. The131131+ * exit to userspace is necessary to give the test a chance132132+ * to check the rseq CPU ID (see #2).133133+ *134134+ * Alternatively, guest_code() could include an instruction135135+ * to trigger an exit that is handled by KVM, but any such136136+ * exit requires architecture specific code.137137+ *138138+ * 2. To let ioctl(KVM_RUN) make its way back to the test139139+ * before the next round of migration. The test's check on140140+ * the rseq CPU ID must wait for migration to complete in141141+ * order to avoid false positive, thus any kernel rseq bug142142+ * will be missed if the next migration starts before the143143+ * check completes.144144+ *145145+ * 3. To ensure the read-side makes efficient forward progress,146146+ * e.g. if sched_getcpu() involves a syscall. Stalling the147147+ * read-side means the test will spend more time waiting for148148+ * sched_getcpu() to stabilize and less time trying to hit149149+ * the timing-dependent bug.150150+ *151151+ * Because any bug in this area is likely to be timing-dependent,152152+ * run with a range of delays at 1us intervals from 1us to 10us153153+ * as a best effort to avoid tuning the test to the point where154154+ * it can hit _only_ the original bug and not detect future155155+ * regressions.156156+ *157157+ * The original bug can reproduce with a delay up to ~500us on158158+ * x86-64, but starts to require more iterations to reproduce159159+ * as the delay creeps above ~10us, and the average runtime of160160+ * each iteration obviously increases as well. Cap the delay161161+ * at 10us to keep test runtime reasonable while minimizing162162+ * potential coverage loss.163163+ *164164+ * The lower bound for reproducing the bug is likely below 1us,165165+ * e.g. failures occur on x86-64 with nanosleep(0), but at that166166+ * point the overhead of the syscall likely dominates the delay.167167+ * Use usleep() for simplicity and to avoid unnecessary kernel168168+ * dependencies.169169+ */170170+ usleep((i % 10) + 1);171171+ }172172+ done = true;173173+ return NULL;174174+}175175+176176+static int calc_min_max_cpu(void)177177+{178178+ int i, cnt, nproc;179179+180180+ if (CPU_COUNT(&possible_mask) < 2)181181+ return -EINVAL;182182+183183+ /*184184+ * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that185185+ * this task is affined to in order to reduce the time spent querying186186+ * unusable CPUs, e.g. if this task is pinned to a small percentage of187187+ * total CPUs.188188+ */189189+ nproc = get_nprocs_conf();190190+ min_cpu = -1;191191+ max_cpu = -1;192192+ cnt = 0;193193+194194+ for (i = 0; i < nproc; i++) {195195+ if (!CPU_ISSET(i, &possible_mask))196196+ continue;197197+ if (min_cpu == -1)198198+ min_cpu = i;199199+ max_cpu = i;200200+ cnt++;201201+ }202202+203203+ return (cnt < 2) ? -EINVAL : 0;204204+}205205+206206+int main(int argc, char *argv[])207207+{208208+ int r, i, snapshot;209209+ struct kvm_vm *vm;210210+ u32 cpu, rseq_cpu;211211+212212+ /* Tell stdout not to buffer its content */213213+ setbuf(stdout, NULL);214214+215215+ r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);216216+ TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,217217+ strerror(errno));218218+219219+ if (calc_min_max_cpu()) {220220+ print_skip("Only one usable CPU, task migration not possible");221221+ exit(KSFT_SKIP);222222+ }223223+224224+ sys_rseq(0);225225+226226+ /*227227+ * Create and run a dummy VM that immediately exits to userspace via228228+ * GUEST_SYNC, while concurrently migrating the process by setting its229229+ * CPU affinity.230230+ */231231+ vm = vm_create_default(VCPU_ID, 0, guest_code);232232+ ucall_init(vm, NULL);233233+234234+ pthread_create(&migration_thread, NULL, migration_worker, 0);235235+236236+ for (i = 0; !done; i++) {237237+ vcpu_run(vm, VCPU_ID);238238+ TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,239239+ "Guest failed?");240240+241241+ /*242242+ * Verify rseq's CPU matches sched's CPU. Ensure migration243243+ * doesn't occur between sched_getcpu() and reading the rseq244244+ * cpu_id by rereading both if the sequence count changes, or245245+ * if the count is odd (migration in-progress).246246+ */247247+ do {248248+ /*249249+ * Drop bit 0 to force a mismatch if the count is odd,250250+ * i.e. if a migration is in-progress.251251+ */252252+ snapshot = atomic_read(&seq_cnt) & ~1;253253+254254+ /*255255+ * Ensure reading sched_getcpu() and rseq.cpu_id256256+ * complete in a single "no migration" window, i.e. are257257+ * not reordered across the seq_cnt reads.258258+ */259259+ smp_rmb();260260+ cpu = sched_getcpu();261261+ rseq_cpu = READ_ONCE(__rseq.cpu_id);262262+ smp_rmb();263263+ } while (snapshot != atomic_read(&seq_cnt));264264+265265+ TEST_ASSERT(rseq_cpu == cpu,266266+ "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);267267+ }268268+269269+ /*270270+ * Sanity check that the test was able to enter the guest a reasonable271271+ * number of times, e.g. didn't get stalled too often/long waiting for272272+ * sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a273273+ * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs274274+ * than migrations given the 1us+ delay in the migration task.275275+ */276276+ TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),277277+ "Only performed %d KVM_RUNs, task stalled too much?\n", i);278278+279279+ pthread_join(migration_thread, NULL);280280+281281+ kvm_vm_free(vm);282282+283283+ sys_rseq(RSEQ_FLAG_UNREGISTER);284284+285285+ return 0;286286+}
···4848# When local build is done, headers are installed in the default4949# INSTALL_HDR_PATH usr/include.5050.PHONY: khdr5151+.NOTPARALLEL:5152khdr:5253ifndef KSFT_KHDR_INSTALL_DONE5354ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
···11+#!/bin/bash22+33+# Test insertion speed for packets with identical addresses/ports44+# that are all placed in distinct conntrack zones.55+66+sfx=$(mktemp -u "XXXXXXXX")77+ns="ns-$sfx"88+99+# Kselftest framework requirement - SKIP code is 4.1010+ksft_skip=41111+1212+zones=200001313+have_ct_tool=01414+ret=01515+1616+cleanup()1717+{1818+ ip netns del $ns1919+}2020+2121+ip netns add $ns2222+if [ $? -ne 0 ];then2323+ echo "SKIP: Could not create net namespace $gw"2424+ exit $ksft_skip2525+fi2626+2727+trap cleanup EXIT2828+2929+conntrack -V > /dev/null 2>&13030+if [ $? -eq 0 ];then3131+ have_ct_tool=13232+fi3333+3434+ip -net "$ns" link set lo up3535+3636+test_zones() {3737+ local max_zones=$13838+3939+ip netns exec $ns sysctl -q net.netfilter.nf_conntrack_udp_timeout=36004040+ip netns exec $ns nft -f /dev/stdin<<EOF4141+flush ruleset4242+table inet raw {4343+ map rndzone {4444+ typeof numgen inc mod $max_zones : ct zone4545+ }4646+4747+ chain output {4848+ type filter hook output priority -64000; policy accept;4949+ udp dport 12345 ct zone set numgen inc mod 65536 map @rndzone5050+ }5151+}5252+EOF5353+ (5454+ echo "add element inet raw rndzone {"5555+ for i in $(seq 1 $max_zones);do5656+ echo -n "$i : $i"5757+ if [ $i -lt $max_zones ]; then5858+ echo ","5959+ else6060+ echo "}"6161+ fi6262+ done6363+ ) | ip netns exec $ns nft -f /dev/stdin6464+6565+ local i=06666+ local j=06767+ local outerstart=$(date +%s%3N)6868+ local stop=$outerstart6969+7070+ while [ $i -lt $max_zones ]; do7171+ local start=$(date +%s%3N)7272+ i=$((i + 10000))7373+ j=$((j + 1))7474+ dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null7575+ if [ $? -ne 0 ] ;then7676+ ret=17777+ break7878+ fi7979+8080+ stop=$(date +%s%3N)8181+ local duration=$((stop-start))8282+ echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"8383+ done8484+8585+ if [ $have_ct_tool -eq 1 ]; then8686+ local count=$(ip netns exec "$ns" conntrack -C)8787+ local duration=$((stop-outerstart))8888+8989+ if [ $count -eq $max_zones ]; then9090+ echo "PASS: inserted $count entries from packet path in $duration ms total"9191+ else9292+ ip netns exec $ns conntrack -S 1>&29393+ echo "FAIL: inserted $count entries from packet path in $duration ms total, expected $max_zones entries"9494+ ret=19595+ fi9696+ fi9797+9898+ if [ $ret -ne 0 ];then9999+ echo "FAIL: insert $max_zones entries from packet path" 1>&2100100+ fi101101+}102102+103103+test_conntrack_tool() {104104+ local max_zones=$1105105+106106+ ip netns exec $ns conntrack -F >/dev/null 2>/dev/null107107+108108+ local outerstart=$(date +%s%3N)109109+ local start=$(date +%s%3N)110110+ local stop=$start111111+ local i=0112112+ while [ $i -lt $max_zones ]; do113113+ i=$((i + 1))114114+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \115115+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i >/dev/null 2>&1116116+ if [ $? -ne 0 ];then117117+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \118118+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i > /dev/null119119+ echo "FAIL: conntrack -I returned an error"120120+ ret=1121121+ break122122+ fi123123+124124+ if [ $((i%10000)) -eq 0 ];then125125+ stop=$(date +%s%3N)126126+127127+ local duration=$((stop-start))128128+ echo "PASS: added 10000 entries in $duration ms (now $i total)"129129+ start=$stop130130+ fi131131+ done132132+133133+ local count=$(ip netns exec "$ns" conntrack -C)134134+ local duration=$((stop-outerstart))135135+136136+ if [ $count -eq $max_zones ]; then137137+ echo "PASS: inserted $count entries via ctnetlink in $duration ms"138138+ else139139+ ip netns exec $ns conntrack -S 1>&2140140+ echo "FAIL: inserted $count entries via ctnetlink in $duration ms, expected $max_zones entries ($duration ms)"141141+ ret=1142142+ fi143143+}144144+145145+test_zones $zones146146+147147+if [ $have_ct_tool -eq 1 ];then148148+ test_conntrack_tool $zones149149+else150150+ echo "SKIP: Could not run ctnetlink insertion test without conntrack tool"151151+ if [ $ret -eq 0 ];then152152+ exit $ksft_skip153153+ fi154154+fi155155+156156+exit $ret
+8-6
tools/usb/testusb.c
···265265 }266266267267 entry->ifnum = ifnum;268268-269269- /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */270270-271271- fprintf(stderr, "%s speed\t%s\t%u\n",272272- speed(entry->speed), entry->name, entry->ifnum);273273-274268 entry->next = testdevs;275269 testdevs = entry;276270 return 0;···292298 perror ("can't open dev file r/w");293299 return 0;294300 }301301+302302+ status = ioctl(fd, USBDEVFS_GET_SPEED, NULL);303303+ if (status < 0)304304+ fprintf(stderr, "USBDEVFS_GET_SPEED failed %d\n", status);305305+ else306306+ dev->speed = status;307307+ fprintf(stderr, "%s speed\t%s\t%u\n",308308+ speed(dev->speed), dev->name, dev->ifnum);295309296310restart:297311 for (i = 0; i < TEST_CASES; i++) {
+1-1
tools/vm/page-types.c
···13311331 if (opt_list && opt_list_mapcnt)13321332 kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY);1333133313341334- if (opt_mark_idle && opt_file)13341334+ if (opt_mark_idle)13351335 page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR);1336133613371337 if (opt_list && opt_pid)
+49-19
virt/kvm/kvm_main.c
···235235{236236}237237238238-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)238238+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)239239{240240- if (unlikely(!cpus))240240+ const struct cpumask *cpus;241241+242242+ if (likely(cpumask_available(tmp)))243243+ cpus = tmp;244244+ else241245 cpus = cpu_online_mask;242246243247 if (cpumask_empty(cpus))···267263 continue;268264269265 kvm_make_request(req, vcpu);270270- cpu = vcpu->cpu;271266272267 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))273268 continue;274269275275- if (tmp != NULL && cpu != -1 && cpu != me &&276276- kvm_request_needs_ipi(vcpu, req))277277- __cpumask_set_cpu(cpu, tmp);270270+ /*271271+ * tmp can be "unavailable" if cpumasks are allocated off stack272272+ * as allocation of the mask is deliberately not fatal and is273273+ * handled by falling back to kicking all online CPUs.274274+ */275275+ if (!cpumask_available(tmp))276276+ continue;277277+278278+ /*279279+ * Note, the vCPU could get migrated to a different pCPU at any280280+ * point after kvm_request_needs_ipi(), which could result in281281+ * sending an IPI to the previous pCPU. But, that's ok because282282+ * the purpose of the IPI is to ensure the vCPU returns to283283+ * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.284284+ * Entering READING_SHADOW_PAGE_TABLES after this point is also285285+ * ok, as the requirement is only that KVM wait for vCPUs that286286+ * were reading SPTEs _before_ any changes were finalized. See287287+ * kvm_vcpu_kick() for more details on handling requests.288288+ */289289+ if (kvm_request_needs_ipi(vcpu, req)) {290290+ cpu = READ_ONCE(vcpu->cpu);291291+ if (cpu != -1 && cpu != me)292292+ __cpumask_set_cpu(cpu, tmp);293293+ }278294 }279295280296 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));···326302#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL327303void kvm_flush_remote_tlbs(struct kvm *kvm)328304{329329- /*330330- * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in331331- * kvm_make_all_cpus_request.332332- */333333- long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);334334-335305 ++kvm->stat.generic.remote_tlb_flush_requests;306306+336307 /*337308 * We want to publish modifications to the page tables before reading338309 * mode. Pairs with a memory barrier in arch-specific code.···342323 if (!kvm_arch_flush_remote_tlb(kvm)343324 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))344325 ++kvm->stat.generic.remote_tlb_flush;345345- cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);346326}347327EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);348328#endif···546528 }547529 }548530549549- if (range->flush_on_ret && (ret || kvm->tlbs_dirty))531531+ if (range->flush_on_ret && ret)550532 kvm_flush_remote_tlbs(kvm);551533552534 if (locked)···3152313431533135static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)31543136{31553155- unsigned int old, val, shrink;31373137+ unsigned int old, val, shrink, grow_start;3156313831573139 old = val = vcpu->halt_poll_ns;31583140 shrink = READ_ONCE(halt_poll_ns_shrink);31413141+ grow_start = READ_ONCE(halt_poll_ns_grow_start);31593142 if (shrink == 0)31603143 val = 0;31613144 else31623145 val /= shrink;31463146+31473147+ if (val < grow_start)31483148+ val = 0;3163314931643150 vcpu->halt_poll_ns = val;31653151 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);···33123290 */33133291void kvm_vcpu_kick(struct kvm_vcpu *vcpu)33143292{33153315- int me;33163316- int cpu = vcpu->cpu;32933293+ int me, cpu;3317329433183295 if (kvm_vcpu_wake_up(vcpu))33193296 return;3320329732983298+ /*32993299+ * Note, the vCPU could get migrated to a different pCPU at any point33003300+ * after kvm_arch_vcpu_should_kick(), which could result in sending an33013301+ * IPI to the previous pCPU. But, that's ok because the purpose of the33023302+ * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the33033303+ * vCPU also requires it to leave IN_GUEST_MODE.33043304+ */33213305 me = get_cpu();33223322- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))33233323- if (kvm_arch_vcpu_should_kick(vcpu))33063306+ if (kvm_arch_vcpu_should_kick(vcpu)) {33073307+ cpu = READ_ONCE(vcpu->cpu);33083308+ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))33243309 smp_send_reschedule(cpu);33103310+ }33253311 put_cpu();33263312}33273313EXPORT_SYMBOL_GPL(kvm_vcpu_kick);