Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'v5.8-rc7' into i2c/for-5.9

+10234 -6716
+3
.gitignore
··· 143 143 /allrandom.config 144 144 /allyes.config 145 145 146 + # Kconfig savedefconfig output 147 + /defconfig 148 + 146 149 # Kdevelop4 147 150 *.kdev4 148 151
+8
.mailmap
··· 90 90 Frank Zago <fzago@systemfabricworks.com> 91 91 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com> 92 92 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com> 93 + Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com> 94 + Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com> 95 + Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com> 93 96 Greg Kroah-Hartman <greg@echidna.(none)> 94 97 Greg Kroah-Hartman <gregkh@suse.de> 95 98 Greg Kroah-Hartman <greg@kroah.com> 96 99 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com> 97 100 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org> 101 + Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com> 102 + Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com> 98 103 Henk Vergonet <Henk.Vergonet@gmail.com> 99 104 Henrik Kretzschmar <henne@nachtwindheim.de> 100 105 Henrik Rydberg <rydberg@bitmath.org> ··· 198 193 Mayuresh Janorkar <mayur@ti.com> 199 194 Michael Buesch <m@bues.ch> 200 195 Michel Dänzer <michel@tungstengraphics.com> 196 + Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il> 197 + Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com> 198 + Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com> 201 199 Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com> 202 200 Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com> 203 201 Mitesh shah <mshah@teja.com>
+10 -1
Documentation/ABI/testing/debugfs-driver-habanalabs
··· 16 16 gating mechanism in Gaudi. Due to how Gaudi is built, the 17 17 clock gating needs to be disabled in order to access the 18 18 registers of the TPC and MME engines. This is sometimes needed 19 - during debug by the user and hence the user needs this option 19 + during debug by the user and hence the user needs this option. 20 + The user can supply a bitmask value, each bit represents 21 + a different engine to disable/enable its clock gating feature. 22 + The bitmask is composed of 20 bits: 23 + 0 - 7 : DMA channels 24 + 8 - 11 : MME engines 25 + 12 - 19 : TPC engines 26 + The bit's location of a specific engine can be determined 27 + using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values 28 + are defined in uapi habanalabs.h file in enum gaudi_engine_id 20 29 21 30 What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers 22 31 Date: Jan 2019
+1 -1
Documentation/admin-guide/README.rst
··· 258 258 Compiling the kernel 259 259 -------------------- 260 260 261 - - Make sure you have at least gcc 4.6 available. 261 + - Make sure you have at least gcc 4.9 available. 262 262 For more information, refer to :ref:`Documentation/process/changes.rst <changes>`. 263 263 264 264 Please note that you can still run a.out user programs with this kernel.
+2
Documentation/arm64/cpu-feature-registers.rst
··· 171 171 172 172 173 173 3) ID_AA64PFR1_EL1 - Processor Feature Register 1 174 + 174 175 +------------------------------+---------+---------+ 175 176 | Name | bits | visible | 176 177 +------------------------------+---------+---------+ ··· 182 181 183 182 184 183 4) MIDR_EL1 - Main ID Register 184 + 185 185 +------------------------------+---------+---------+ 186 186 | Name | bits | visible | 187 187 +------------------------------+---------+---------+
+8
Documentation/arm64/silicon-errata.rst
··· 147 147 +----------------+-----------------+-----------------+-----------------------------+ 148 148 | Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | 149 149 +----------------+-----------------+-----------------+-----------------------------+ 150 + | Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1463225 | 151 + +----------------+-----------------+-----------------+-----------------------------+ 152 + | Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1418040 | 153 + +----------------+-----------------+-----------------+-----------------------------+ 154 + | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1530923 | 155 + +----------------+-----------------+-----------------+-----------------------------+ 156 + | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 | 157 + +----------------+-----------------+-----------------+-----------------------------+ 150 158 +----------------+-----------------+-----------------+-----------------------------+ 151 159 | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | 152 160 +----------------+-----------------+-----------------+-----------------------------+
+1 -8
Documentation/block/bfq-iosched.rst
··· 492 492 it with auto-tuning. An alternative way to achieve this goal is to 493 493 just increase the value of timeout_sync, leaving max_budget equal to 0. 494 494 495 - weights 496 - ------- 497 - 498 - Read-only parameter, used to show the weights of the currently active 499 - BFQ queues. 500 - 501 - 502 495 4. Group scheduling with BFQ 503 496 ============================ 504 497 ··· 559 566 For each group, there is only the following parameter to set. 560 567 561 568 weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the 562 - group inside its parent. Available values: 1..10000 (default 100). The 569 + group inside its parent. Available values: 1..1000 (default 100). The 563 570 linear mapping between ioprio and weights, described at the beginning 564 571 of the tunable section, is still valid, but all weights higher than 565 572 IOPRIO_BE_NR*10 are mapped to ioprio 0.
+8
Documentation/core-api/dma-api.rst
··· 206 206 207 207 :: 208 208 209 + bool 210 + dma_need_sync(struct device *dev, dma_addr_t dma_addr); 211 + 212 + Returns %true if dma_sync_single_for_{device,cpu} calls are required to 213 + transfer memory ownership. Returns %false if those calls can be skipped. 214 + 215 + :: 216 + 209 217 unsigned long 210 218 dma_get_merge_boundary(struct device *dev); 211 219
+40
Documentation/dev-tools/kunit/faq.rst
··· 61 61 kernel by installing a production configuration of the kernel on production 62 62 hardware with a production userspace and then trying to exercise some behavior 63 63 that depends on interactions between the hardware, the kernel, and userspace. 64 + 65 + KUnit isn't working, what should I do? 66 + ====================================== 67 + 68 + Unfortunately, there are a number of things which can break, but here are some 69 + things to try. 70 + 71 + 1. Try running ``./tools/testing/kunit/kunit.py run`` with the ``--raw_output`` 72 + parameter. This might show details or error messages hidden by the kunit_tool 73 + parser. 74 + 2. Instead of running ``kunit.py run``, try running ``kunit.py config``, 75 + ``kunit.py build``, and ``kunit.py exec`` independently. This can help track 76 + down where an issue is occurring. (If you think the parser is at fault, you 77 + can run it manually against stdin or a file with ``kunit.py parse``.) 78 + 3. Running the UML kernel directly can often reveal issues or error messages 79 + kunit_tool ignores. This should be as simple as running ``./vmlinux`` after 80 + building the UML kernel (e.g., by using ``kunit.py build``). Note that UML 81 + has some unusual requirements (such as the host having a tmpfs filesystem 82 + mounted), and has had issues in the past when built statically and the host 83 + has KASLR enabled. (On older host kernels, you may need to run ``setarch 84 + `uname -m` -R ./vmlinux`` to disable KASLR.) 85 + 4. Make sure the kernel .config has ``CONFIG_KUNIT=y`` and at least one test 86 + (e.g. ``CONFIG_KUNIT_EXAMPLE_TEST=y``). kunit_tool will keep its .config 87 + around, so you can see what config was used after running ``kunit.py run``. 88 + It also preserves any config changes you might make, so you can 89 + enable/disable things with ``make ARCH=um menuconfig`` or similar, and then 90 + re-run kunit_tool. 91 + 5. Try to run ``make ARCH=um defconfig`` before running ``kunit.py run``. This 92 + may help clean up any residual config items which could be causing problems. 93 + 6. Finally, try running KUnit outside UML. KUnit and KUnit tests can run be 94 + built into any kernel, or can be built as a module and loaded at runtime. 95 + Doing so should allow you to determine if UML is causing the issue you're 96 + seeing. When tests are built-in, they will execute when the kernel boots, and 97 + modules will automatically execute associated tests when loaded. Test results 98 + can be collected from ``/sys/kernel/debug/kunit/<test suite>/results``, and 99 + can be parsed with ``kunit.py parse``. For more details, see "KUnit on 100 + non-UML architectures" in :doc:`usage`. 101 + 102 + If none of the above tricks help, you are always welcome to email any issues to 103 + kunit-dev@googlegroups.com.
+28 -10
Documentation/devicetree/bindings/Makefile
··· 2 2 DT_DOC_CHECKER ?= dt-doc-validate 3 3 DT_EXTRACT_EX ?= dt-extract-example 4 4 DT_MK_SCHEMA ?= dt-mk-schema 5 - DT_MK_SCHEMA_USERONLY_FLAG := $(if $(DT_SCHEMA_FILES), -u) 6 5 7 6 DT_SCHEMA_MIN_VERSION = 2020.5 8 7 ··· 34 35 35 36 DT_DOCS = $(shell $(find_cmd) | sed -e 's|^$(srctree)/||') 36 37 37 - DT_SCHEMA_FILES ?= $(DT_DOCS) 38 - 39 - extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) 40 - extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES)) 41 - extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml 42 - 43 38 override DTC_FLAGS := \ 44 39 -Wno-avoid_unnecessary_addr_size \ 45 - -Wno-graph_child_address 40 + -Wno-graph_child_address \ 41 + -Wno-interrupt_provider 46 42 47 43 $(obj)/processed-schema-examples.yaml: $(DT_DOCS) check_dtschema_version FORCE 48 44 $(call if_changed,mk_schema) 49 45 50 - $(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := $(DT_MK_SCHEMA_USERONLY_FLAG) 46 + ifeq ($(DT_SCHEMA_FILES),) 47 + 48 + # Unless DT_SCHEMA_FILES is specified, use the full schema for dtbs_check too. 49 + # Just copy processed-schema-examples.yaml 50 + 51 + $(obj)/processed-schema.yaml: $(obj)/processed-schema-examples.yaml FORCE 52 + $(call if_changed,copy) 53 + 54 + DT_SCHEMA_FILES = $(DT_DOCS) 55 + 56 + else 57 + 58 + # If DT_SCHEMA_FILES is specified, use it for processed-schema.yaml 59 + 60 + $(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := -u 51 61 $(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) check_dtschema_version FORCE 52 62 $(call if_changed,mk_schema) 53 63 54 - extra-y += processed-schema.yaml 64 + endif 65 + 66 + extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) 67 + extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES)) 68 + extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml 69 + extra-$(CHECK_DTBS) += processed-schema.yaml 70 + 71 + # Hack: avoid 'Argument list too long' error for 'make clean'. Remove most of 72 + # build artifacts here before they are processed by scripts/Makefile.clean 73 + clean-files = $(shell find $(obj) \( -name '*.example.dts' -o \ 74 + -name '*.example.dt.yaml' \) -delete 2>/dev/null)
+1 -1
Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
··· 47 47 &lsio_mu1 1 2 48 48 &lsio_mu1 1 3 49 49 &lsio_mu1 3 3>; 50 - See Documentation/devicetree/bindings/mailbox/fsl,mu.txt 50 + See Documentation/devicetree/bindings/mailbox/fsl,mu.yaml 51 51 for detailed mailbox binding. 52 52 53 53 Note: Each mu which supports general interrupt should have an alias correctly
+2 -2
Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml
··· 80 80 ranges = <1 0x00000000 0x42000000 0x02000000>, 81 81 <5 0x00000000 0x46000000 0x01000000>; 82 82 83 - ethernet@1,01f00000 { 83 + ethernet@1,1f00000 { 84 84 compatible = "smsc,lan9115"; 85 85 reg = <1 0x01f00000 0x1000>; 86 86 interrupts = <0 48 4>; 87 87 phy-mode = "mii"; 88 88 }; 89 89 90 - uart@5,00200000 { 90 + serial@5,200000 { 91 91 compatible = "ns16550a"; 92 92 reg = <5 0x00200000 0x20>; 93 93 interrupts = <0 49 4>;
+1 -1
Documentation/devicetree/bindings/clock/imx27-clock.yaml
··· 7 7 title: Clock bindings for Freescale i.MX27 8 8 9 9 maintainers: 10 - - Fabio Estevam <fabio.estevam@freescale.com> 10 + - Fabio Estevam <fabio.estevam@nxp.com> 11 11 12 12 description: | 13 13 The clock consumer should specify the desired clock by having the clock
+1 -1
Documentation/devicetree/bindings/clock/imx31-clock.yaml
··· 7 7 title: Clock bindings for Freescale i.MX31 8 8 9 9 maintainers: 10 - - Fabio Estevam <fabio.estevam@freescale.com> 10 + - Fabio Estevam <fabio.estevam@nxp.com> 11 11 12 12 description: | 13 13 The clock consumer should specify the desired clock by having the clock
+1 -1
Documentation/devicetree/bindings/clock/imx5-clock.yaml
··· 7 7 title: Clock bindings for Freescale i.MX5 8 8 9 9 maintainers: 10 - - Fabio Estevam <fabio.estevam@freescale.com> 10 + - Fabio Estevam <fabio.estevam@nxp.com> 11 11 12 12 description: | 13 13 The clock consumer should specify the desired clock by having the clock
+1 -1
Documentation/devicetree/bindings/display/bridge/sii902x.txt
··· 37 37 simple-card or audio-graph-card binding. See their binding 38 38 documents on how to describe the way the sii902x device is 39 39 connected to the rest of the audio system: 40 - Documentation/devicetree/bindings/sound/simple-card.txt 40 + Documentation/devicetree/bindings/sound/simple-card.yaml 41 41 Documentation/devicetree/bindings/sound/audio-graph-card.txt 42 42 Note: In case of the audio-graph-card binding the used port 43 43 index should be 3.
+2 -2
Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
··· 68 68 datasheet 69 69 - clocks : phandle to the PRE axi clock input, as described 70 70 in Documentation/devicetree/bindings/clock/clock-bindings.txt and 71 - Documentation/devicetree/bindings/clock/imx6q-clock.txt. 71 + Documentation/devicetree/bindings/clock/imx6q-clock.yaml. 72 72 - clock-names: should be "axi" 73 73 - interrupts: should contain the PRE interrupt 74 74 - fsl,iram: phandle pointing to the mmio-sram device node, that should be ··· 94 94 datasheet 95 95 - clocks : phandles to the PRG ipg and axi clock inputs, as described 96 96 in Documentation/devicetree/bindings/clock/clock-bindings.txt and 97 - Documentation/devicetree/bindings/clock/imx6q-clock.txt. 97 + Documentation/devicetree/bindings/clock/imx6q-clock.yaml. 98 98 - clock-names: should be "ipg" and "axi" 99 99 - fsl,pres: phandles to the PRE units attached to this PRG, with the fixed 100 100 PRE as the first entry and the muxable PREs following.
+2 -2
Documentation/devicetree/bindings/display/imx/ldb.txt
··· 30 30 "di2_sel" - IPU2 DI0 mux 31 31 "di3_sel" - IPU2 DI1 mux 32 32 The needed clock numbers for each are documented in 33 - Documentation/devicetree/bindings/clock/imx5-clock.txt, and in 34 - Documentation/devicetree/bindings/clock/imx6q-clock.txt. 33 + Documentation/devicetree/bindings/clock/imx5-clock.yaml, and in 34 + Documentation/devicetree/bindings/clock/imx6q-clock.yaml. 35 35 36 36 Optional properties: 37 37 - pinctrl-names : should be "default" on i.MX53, not used on i.MX6q
+1 -1
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
··· 33 33 34 34 examples: 35 35 - | 36 - sysreg { 36 + sysreg@0 { 37 37 compatible = "arm,versatile-sysreg", "syscon", "simple-mfd"; 38 38 reg = <0x00000 0x1000>; 39 39
+1 -1
Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
··· 24 24 description: | 25 25 Should contain a list of phandles pointing to display interface port 26 26 of vop devices. vop definitions as defined in 27 - Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt 27 + Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml 28 28 29 29 required: 30 30 - compatible
+1 -1
Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
··· 12 12 Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported. 13 13 - #interrupt-cells : Specifies the number of cells needed to encode an 14 14 interrupt. Should be 2. The first cell defines the interrupt number, 15 - the second encodes the triger flags encoded as described in 15 + the second encodes the trigger flags encoded as described in 16 16 Documentation/devicetree/bindings/interrupt-controller/interrupts.txt 17 17 - compatible: 18 18 - "mediatek,mt7621-gpio" for Mediatek controllers
+1 -1
Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt
··· 10 10 16-31 : private irq, and we use 16 as the co-processor timer. 11 11 31-1024: common irq for soc ip. 12 12 13 - Interrupt triger mode: (Defined in dt-bindings/interrupt-controller/irq.h) 13 + Interrupt trigger mode: (Defined in dt-bindings/interrupt-controller/irq.h) 14 14 IRQ_TYPE_LEVEL_HIGH (default) 15 15 IRQ_TYPE_LEVEL_LOW 16 16 IRQ_TYPE_EDGE_RISING
+1 -1
Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
··· 87 87 ranges; 88 88 89 89 /* APU<->RPU0 IPI mailbox controller */ 90 - ipi_mailbox_rpu0: mailbox@ff90400 { 90 + ipi_mailbox_rpu0: mailbox@ff990400 { 91 91 reg = <0xff990400 0x20>, 92 92 <0xff990420 0x20>, 93 93 <0xff990080 0x20>,
+1 -1
Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.txt
··· 8 8 to receive a transfer (that is, when TX FIFO contains the response data) by 9 9 strobing the ACK pin with the ready signal. See the "ready-gpios" property of the 10 10 SSP binding as documented in: 11 - <Documentation/devicetree/bindings/spi/spi-pxa2xx.txt>. 11 + <Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml>. 12 12 13 13 Example: 14 14 &ssp3 {
+1 -1
Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
··· 3 3 4 4 This device is a serial attached device to BTIF device and thus it must be a 5 5 child node of the serial node with BTIF. The dt-bindings details for BTIF 6 - device can be known via Documentation/devicetree/bindings/serial/8250.txt. 6 + device can be known via Documentation/devicetree/bindings/serial/8250.yaml. 7 7 8 8 Required properties: 9 9
+1 -1
Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
··· 114 114 [flags]> 115 115 116 116 On other mach-shmobile platforms GPIO is handled by the gpio-rcar driver. 117 - Please refer to Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt 117 + Please refer to Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml 118 118 for documentation of the GPIO device tree bindings on those platforms. 119 119 120 120
+1 -1
Documentation/devicetree/bindings/sound/audio-graph-card.txt
··· 5 5 see ${LINUX}/Documentation/devicetree/bindings/graph.txt 6 6 7 7 Basically, Audio Graph Card property is same as Simple Card. 8 - see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt 8 + see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.yaml 9 9 10 10 Below are same as Simple-Card. 11 11
+13 -4
Documentation/devicetree/bindings/sound/simple-card.yaml
··· 378 378 - | 379 379 sound { 380 380 compatible = "simple-audio-card"; 381 + #address-cells = <1>; 382 + #size-cells = <0>; 381 383 382 384 simple-audio-card,name = "rsnd-ak4643"; 383 385 simple-audio-card,format = "left_j"; ··· 393 391 "ak4642 Playback", "DAI1 Playback"; 394 392 395 393 dpcmcpu: simple-audio-card,cpu@0 { 394 + reg = <0>; 396 395 sound-dai = <&rcar_sound 0>; 397 396 }; 398 397 399 398 simple-audio-card,cpu@1 { 399 + reg = <1>; 400 400 sound-dai = <&rcar_sound 1>; 401 401 }; 402 402 ··· 422 418 - | 423 419 sound { 424 420 compatible = "simple-audio-card"; 421 + #address-cells = <1>; 422 + #size-cells = <0>; 425 423 426 424 simple-audio-card,routing = 427 425 "pcm3168a Playback", "DAI1 Playback", ··· 432 426 "pcm3168a Playback", "DAI4 Playback"; 433 427 434 428 simple-audio-card,dai-link@0 { 429 + reg = <0>; 435 430 format = "left_j"; 436 431 bitclock-master = <&sndcpu0>; 437 432 frame-master = <&sndcpu0>; ··· 446 439 }; 447 440 448 441 simple-audio-card,dai-link@1 { 442 + reg = <1>; 449 443 format = "i2s"; 450 444 bitclock-master = <&sndcpu1>; 451 445 frame-master = <&sndcpu1>; 452 446 453 447 convert-channels = <8>; /* TDM Split */ 454 448 455 - sndcpu1: cpu@0 { 449 + sndcpu1: cpu0 { 456 450 sound-dai = <&rcar_sound 1>; 457 451 }; 458 - cpu@1 { 452 + cpu1 { 459 453 sound-dai = <&rcar_sound 2>; 460 454 }; 461 - cpu@2 { 455 + cpu2 { 462 456 sound-dai = <&rcar_sound 3>; 463 457 }; 464 - cpu@3 { 458 + cpu3 { 465 459 sound-dai = <&rcar_sound 4>; 466 460 }; 467 461 codec { ··· 474 466 }; 475 467 476 468 simple-audio-card,dai-link@2 { 469 + reg = <2>; 477 470 format = "i2s"; 478 471 bitclock-master = <&sndcpu2>; 479 472 frame-master = <&sndcpu2>;
+1 -1
Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
··· 5 5 6 6 sti sound drivers allows to expose sti SoC audio interface through the 7 7 generic ASoC simple card. For details about sound card declaration please refer to 8 - Documentation/devicetree/bindings/sound/simple-card.txt. 8 + Documentation/devicetree/bindings/sound/simple-card.yaml. 9 9 10 10 1) sti-uniperiph-dai: audio dai device. 11 11 ---------------------------------------
+1 -1
Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
··· 19 19 20 20 SPI Controller nodes must be child of GENI based Qualcomm Universal 21 21 Peripharal. Please refer GENI based QUP wrapper controller node bindings 22 - described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt. 22 + described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml. 23 23 24 24 SPI slave nodes must be children of the SPI master node and conform to SPI bus 25 25 binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
+1 -1
Documentation/devicetree/bindings/thermal/thermal-sensor.yaml
··· 41 41 #include <dt-bindings/interrupt-controller/arm-gic.h> 42 42 43 43 // Example 1: SDM845 TSENS 44 - soc: soc@0 { 44 + soc: soc { 45 45 #address-cells = <2>; 46 46 #size-cells = <2>; 47 47
+1 -1
Documentation/devicetree/bindings/thermal/thermal-zones.yaml
··· 224 224 #include <dt-bindings/thermal/thermal.h> 225 225 226 226 // Example 1: SDM845 TSENS 227 - soc: soc@0 { 227 + soc { 228 228 #address-cells = <2>; 229 229 #size-cells = <2>; 230 230
+1 -1
Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml
··· 35 35 #include <dt-bindings/soc/ti,sci_pm_domain.h> 36 36 vtm: thermal@42050000 { 37 37 compatible = "ti,am654-vtm"; 38 - reg = <0x0 0x42050000 0x0 0x25c>; 38 + reg = <0x42050000 0x25c>; 39 39 power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>; 40 40 #thermal-sensor-cells = <1>; 41 41 };
+1 -1
Documentation/devicetree/bindings/timer/csky,mptimer.txt
··· 8 8 - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer. 9 9 - PTIM_TSR "cr<1, 14>" Interrupt cleanup status reg. 10 10 - PTIM_CCVR "cr<3, 14>" Current counter value reg. 11 - - PTIM_LVR "cr<6, 14>" Window value reg to triger next event. 11 + - PTIM_LVR "cr<6, 14>" Window value reg to trigger next event. 12 12 13 13 ============================== 14 14 timer node bindings definition
+2 -2
Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
··· 127 127 #address-cells = <1>; 128 128 #size-cells = <0>; 129 129 130 - string@0409 { 131 - reg = <0x0409>; 130 + string@409 { 131 + reg = <0x409>; 132 132 manufacturer = "ASPEED"; 133 133 product = "USB Virtual Hub"; 134 134 serial-number = "0000";
+6 -3
Documentation/devicetree/writing-schema.rst
··· 1 - :orphan: 1 + .. SPDX-License-Identifier: GPL-2.0 2 2 3 3 Writing DeviceTree Bindings in json-schema 4 4 ========================================== ··· 124 124 libyaml and its headers be installed on the host system. For some distributions 125 125 that involves installing the development package, such as: 126 126 127 - Debian: 127 + Debian:: 128 + 128 129 apt-get install libyaml-dev 129 - Fedora: 130 + 131 + Fedora:: 132 + 130 133 dnf -y install libyaml-devel 131 134 132 135 Running checks
+12
Documentation/driver-api/ptp.rst
··· 23 23 + Ancillary clock features 24 24 - Time stamp external events 25 25 - Period output signals configurable from user space 26 + - Low Pass Filter (LPF) access from user space 26 27 - Synchronization of the Linux system time via the PPS subsystem 27 28 28 29 PTP hardware clock kernel API ··· 95 94 96 95 - Auxiliary Slave/Master Mode Snapshot (optional interrupt) 97 96 - Target Time (optional interrupt) 97 + 98 + * Renesas (IDT) ClockMatrix™ 99 + 100 + - Up to 4 independent PHC channels 101 + - Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2) 102 + - Programmable output periodic signals 103 + - Programmable inputs can time stamp external triggers 104 + - Driver and/or hardware configuration through firmware (idtcm.bin) 105 + - LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2)) 106 + - Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs) 107 + - Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional)
+2 -2
Documentation/filesystems/overlayfs.rst
··· 560 560 verified on mount time to check that upper file handles are not stale. 561 561 This verification may cause significant overhead in some cases. 562 562 563 - Note: the mount options index=off,nfs_export=on are conflicting and will 564 - result in an error. 563 + Note: the mount options index=off,nfs_export=on are conflicting for a 564 + read-write mount and will result in an error. 565 565 566 566 567 567 Testsuite
+17 -5
Documentation/i2c/slave-eeprom-backend.rst
··· 1 1 ============================== 2 - Linux I2C slave eeprom backend 2 + Linux I2C slave EEPROM backend 3 3 ============================== 4 4 5 - by Wolfram Sang <wsa@sang-engineering.com> in 2014-15 5 + by Wolfram Sang <wsa@sang-engineering.com> in 2014-20 6 6 7 - This is a proof-of-concept backend which acts like an EEPROM on the connected 8 - I2C bus. The memory contents can be modified from userspace via this file 9 - located in sysfs:: 7 + This backend simulates an EEPROM on the connected I2C bus. Its memory contents 8 + can be accessed from userspace via this file located in sysfs:: 10 9 11 10 /sys/bus/i2c/devices/<device-directory>/slave-eeprom 11 + 12 + The following types are available: 24c02, 24c32, 24c64, and 24c512. Read-only 13 + variants are also supported. The name needed for instantiating has the form 14 + 'slave-<type>[ro]'. Examples follow: 15 + 16 + 24c02, read/write, address 0x64: 17 + # echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-1/new_device 18 + 19 + 24c512, read-only, address 0x42: 20 + # echo slave-24c512ro 0x1042 > /sys/bus/i2c/devices/i2c-1/new_device 21 + 22 + You can also preload data during boot if a device-property named 23 + 'firmware-name' contains a valid filename (DT or ACPI only). 12 24 13 25 As of 2015, Linux doesn't support poll on binary sysfs files, so there is no 14 26 notification when another master changed the content.
+4 -3
Documentation/kbuild/modules.rst
··· 182 182 8123_pci.c 183 183 8123_bin.o_shipped <= Binary blob 184 184 185 - --- 3.1 Shared Makefile 185 + 3.1 Shared Makefile 186 + ------------------- 186 187 187 188 An external module always includes a wrapper makefile that 188 189 supports building the module using "make" with no arguments. ··· 471 470 472 471 The syntax of the Module.symvers file is:: 473 472 474 - <CRC> <Symbol> <Module> <Export Type> <Namespace> 473 + <CRC> <Symbol> <Module> <Export Type> <Namespace> 475 474 476 - 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE 475 + 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE 477 476 478 477 The fields are separated by tabs and values may be empty (e.g. 479 478 if no namespace is defined for an exported symbol).
+1 -1
Documentation/kbuild/reproducible-builds.rst
··· 101 101 102 102 If you enable ``CONFIG_GCC_PLUGIN_RANDSTRUCT``, you will need to 103 103 pre-generate the random seed in 104 - ``scripts/gcc-plgins/randomize_layout_seed.h`` so the same value 104 + ``scripts/gcc-plugins/randomize_layout_seed.h`` so the same value 105 105 is used in rebuilds. 106 106 107 107 Debug info conflicts
+1 -1
Documentation/mips/ingenic-tcu.rst
··· 68 68 drivers access their registers through the same regmap. 69 69 70 70 For more information regarding the devicetree bindings of the TCU drivers, 71 - have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.txt. 71 + have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.yaml.
+1 -1
Documentation/networking/arcnet.rst
··· 434 434 ifconfig arc0 insight 435 435 route add insight arc0 436 436 route add freedom arc0 /* I would use the subnet here (like I said 437 - to to in "single protocol" above), 437 + to in "single protocol" above), 438 438 but the rest of the subnet 439 439 unfortunately lies across the PPP 440 440 link on freedom, which confuses
+1 -1
Documentation/networking/ax25.rst
··· 6 6 7 7 To use the amateur radio protocols within Linux you will need to get a 8 8 suitable copy of the AX.25 Utilities. More detailed information about 9 - AX.25, NET/ROM and ROSE, associated programs and and utilities can be 9 + AX.25, NET/ROM and ROSE, associated programs and utilities can be 10 10 found on http://www.linux-ax25.org. 11 11 12 12 There is an active mailing list for discussing Linux amateur radio matters
+13 -6
Documentation/networking/bareudp.rst
··· 26 26 27 27 1) Device creation & deletion 28 28 29 - a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847. 29 + a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc 30 30 31 31 This creates a bareudp tunnel device which tunnels L3 traffic with ethertype 32 32 0x8847 (MPLS traffic). The destination port of the UDP header will be set to ··· 34 34 35 35 b) ip link delete bareudp0 36 36 37 - 2) Device creation with multiple proto mode enabled 37 + 2) Device creation with multiproto mode enabled 38 38 39 - There are two ways to create a bareudp device for MPLS & IP with multiproto mode 40 - enabled. 39 + The multiproto mode allows bareudp tunnels to handle several protocols of the 40 + same family. It is currently only available for IP and MPLS. This mode has to 41 + be enabled explicitly with the "multiproto" flag. 41 42 42 - a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto 43 + a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto 43 44 44 - b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls 45 + For an IPv4 tunnel the multiproto mode allows the tunnel to also handle 46 + IPv6. 47 + 48 + b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto 49 + 50 + For MPLS, the multiproto mode allows the tunnel to handle both unicast 51 + and multicast MPLS packets. 45 52 46 53 3) Device Usage 47 54
+2 -2
Documentation/networking/can_ucan_protocol.rst
··· 144 144 145 145 *Host2Dev; mandatory* 146 146 147 - Setup bittiming by sending the the structure 147 + Setup bittiming by sending the structure 148 148 ``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for 149 149 details) 150 150 ··· 232 232 zero 233 233 234 234 The CAN device has sent a message to the CAN bus. It answers with a 235 - list of of tuples <echo-ids, flags>. 235 + list of tuples <echo-ids, flags>. 236 236 237 237 The echo-id identifies the frame from (echos the id from a previous 238 238 UCAN_OUT_TX message). The flag indicates the result of the
+1 -1
Documentation/networking/dsa/dsa.rst
··· 95 95 Networking stack hooks 96 96 ---------------------- 97 97 98 - When a master netdev is used with DSA, a small hook is placed in in the 98 + When a master netdev is used with DSA, a small hook is placed in the 99 99 networking stack is in order to have the DSA subsystem process the Ethernet 100 100 switch specific tagging protocol. DSA accomplishes this by registering a 101 101 specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
+1 -1
Documentation/networking/ip-sysctl.rst
··· 741 741 742 742 Default: 0x1 743 743 744 - Note that that additional client or server features are only 744 + Note that additional client or server features are only 745 745 effective if the basic support (0x1 and 0x2) are enabled respectively. 746 746 747 747 tcp_fastopen_blackhole_timeout_sec - INTEGER
+1 -1
Documentation/networking/ipvs-sysctl.rst
··· 114 114 modes (when there is no enough available memory, the strategy 115 115 is enabled and the variable is automatically set to 2, 116 116 otherwise the strategy is disabled and the variable is set to 117 - 1), and 3 means that that the strategy is always enabled. 117 + 1), and 3 means that the strategy is always enabled. 118 118 119 119 drop_packet - INTEGER 120 120 - 0 - disabled (default)
+1 -1
Documentation/networking/rxrpc.rst
··· 186 186 time [tunable] after the last connection using it discarded, in case a new 187 187 connection is made that could use it. 188 188 189 - (#) A client-side connection is only shared between calls if they have have 189 + (#) A client-side connection is only shared between calls if they have 190 190 the same key struct describing their security (and assuming the calls 191 191 would otherwise share the connection). Non-secured calls would also be 192 192 able to share connections with each other.
+1 -1
Documentation/powerpc/vas-api.rst
··· 213 213 updating CSB with the following data: 214 214 215 215 csb.flags = CSB_V; 216 - csb.cc = CSB_CC_TRANSLATION; 216 + csb.cc = CSB_CC_FAULT_ADDRESS; 217 217 csb.ce = CSB_CE_TERMINATION; 218 218 csb.address = fault_address; 219 219
+1 -1
Documentation/process/changes.rst
··· 29 29 ====================== =============== ======================================== 30 30 Program Minimal version Command to check the version 31 31 ====================== =============== ======================================== 32 - GNU C 4.8 gcc --version 32 + GNU C 4.9 gcc --version 33 33 GNU make 3.81 make --version 34 34 binutils 2.23 ld -v 35 35 flex 2.5.35 flex --version
+20
Documentation/process/coding-style.rst
··· 319 319 problem, which is called the function-growth-hormone-imbalance syndrome. 320 320 See chapter 6 (Functions). 321 321 322 + For symbol names and documentation, avoid introducing new usage of 323 + 'master / slave' (or 'slave' independent of 'master') and 'blacklist / 324 + whitelist'. 325 + 326 + Recommended replacements for 'master / slave' are: 327 + '{primary,main} / {secondary,replica,subordinate}' 328 + '{initiator,requester} / {target,responder}' 329 + '{controller,host} / {device,worker,proxy}' 330 + 'leader / follower' 331 + 'director / performer' 332 + 333 + Recommended replacements for 'blacklist/whitelist' are: 334 + 'denylist / allowlist' 335 + 'blocklist / passlist' 336 + 337 + Exceptions for introducing new usage is to maintain a userspace ABI/API, 338 + or when updating code for an existing (as of 2020) hardware or protocol 339 + specification that mandates those terms. For new specifications 340 + translate specification usage of the terminology to the kernel coding 341 + standard where possible. 322 342 323 343 5) Typedefs 324 344 -----------
+3 -2
Documentation/virt/kvm/api.rst
··· 4339 4339 #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 4340 4340 4341 4341 struct kvm_vmx_nested_state_hdr { 4342 - __u32 flags; 4343 4342 __u64 vmxon_pa; 4344 4343 __u64 vmcs12_pa; 4345 - __u64 preemption_timer_deadline; 4346 4344 4347 4345 struct { 4348 4346 __u16 flags; 4349 4347 } smm; 4348 + 4349 + __u32 flags; 4350 + __u64 preemption_timer_deadline; 4350 4351 }; 4351 4352 4352 4353 struct kvm_vmx_nested_state_data {
+29 -16
MAINTAINERS
··· 2929 2929 2930 2930 ATMEL MACB ETHERNET DRIVER 2931 2931 M: Nicolas Ferre <nicolas.ferre@microchip.com> 2932 + M: Claudiu Beznea <claudiu.beznea@microchip.com> 2932 2933 S: Supported 2933 2934 F: drivers/net/ethernet/cadence/ 2934 2935 ··· 3307 3306 3308 3307 BPF JIT for S390 3309 3308 M: Ilya Leoshkevich <iii@linux.ibm.com> 3310 - M: Heiko Carstens <heiko.carstens@de.ibm.com> 3309 + M: Heiko Carstens <hca@linux.ibm.com> 3311 3310 M: Vasily Gorbik <gor@linux.ibm.com> 3312 3311 L: netdev@vger.kernel.org 3313 3312 L: bpf@vger.kernel.org ··· 3947 3946 S: Supported 3948 3947 F: drivers/char/hw_random/cctrng.c 3949 3948 F: drivers/char/hw_random/cctrng.h 3950 - F: Documentation/devicetree/bindings/rng/arm-cctrng.txt 3949 + F: Documentation/devicetree/bindings/rng/arm-cctrng.yaml 3951 3950 W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family 3952 3951 3953 3952 CEC FRAMEWORK ··· 5022 5021 F: drivers/pinctrl/pinctrl-da90??.c 5023 5022 F: drivers/power/supply/da9052-battery.c 5024 5023 F: drivers/power/supply/da91??-*.c 5025 - F: drivers/regulator/da903x.c 5026 5024 F: drivers/regulator/da9???-regulator.[ch] 5027 5025 F: drivers/regulator/slg51000-regulator.[ch] 5028 5026 F: drivers/rtc/rtc-da90??.c ··· 5111 5111 L: dmaengine@vger.kernel.org 5112 5112 S: Maintained 5113 5113 Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 5114 - T: git git://git.infradead.org/users/vkoul/slave-dma.git 5114 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git 5115 5115 F: Documentation/devicetree/bindings/dma/ 5116 5116 F: Documentation/driver-api/dmaengine/ 5117 5117 F: drivers/dma/ ··· 5490 5490 DRM DRIVER FOR RAYDIUM RM67191 PANELS 5491 5491 M: Robert Chiras <robert.chiras@nxp.com> 5492 5492 S: Maintained 5493 - F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt 5493 + F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml 5494 5494 F: drivers/gpu/drm/panel/panel-raydium-rm67191.c 5495 5495 5496 5496 DRM DRIVER FOR ROCKTECH JH057N00900 PANELS ··· 6956 6956 M: Nicolin Chen <nicoleotsuka@gmail.com> 6957 6957 M: Xiubo Li <Xiubo.Lee@gmail.com> 6958 6958 R: Fabio Estevam <festevam@gmail.com> 6959 + R: Shengjiu Wang <shengjiu.wang@gmail.com> 6959 6960 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 6960 6961 L: linuxppc-dev@lists.ozlabs.org 6961 6962 S: Maintained ··· 9306 9305 F: scripts/Kconfig.include 9307 9306 F: scripts/kconfig/ 9308 9307 9308 + KCOV 9309 + R: Dmitry Vyukov <dvyukov@google.com> 9310 + R: Andrey Konovalov <andreyknvl@google.com> 9311 + L: kasan-dev@googlegroups.com 9312 + S: Maintained 9313 + F: Documentation/dev-tools/kcov.rst 9314 + F: include/linux/kcov.h 9315 + F: include/uapi/linux/kcov.h 9316 + F: kernel/kcov.c 9317 + F: scripts/Makefile.kcov 9318 + 9309 9319 KCSAN 9310 9320 M: Marco Elver <elver@google.com> 9311 9321 R: Dmitry Vyukov <dvyukov@google.com> ··· 11252 11240 F: drivers/crypto/atmel-ecc.* 11253 11241 11254 11242 MICROCHIP I2C DRIVER 11255 - M: Ludovic Desroches <ludovic.desroches@microchip.com> 11243 + M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com> 11256 11244 L: linux-i2c@vger.kernel.org 11257 11245 S: Supported 11258 11246 F: drivers/i2c/busses/i2c-at91-*.c ··· 11345 11333 F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h 11346 11334 11347 11335 MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER 11348 - M: Nicolas Ferre <nicolas.ferre@microchip.com> 11336 + M: Claudiu Beznea <claudiu.beznea@microchip.com> 11349 11337 S: Supported 11350 11338 F: drivers/power/reset/at91-sama5d2_shdwc.c 11351 11339 11352 11340 MICROCHIP SPI DRIVER 11353 - M: Nicolas Ferre <nicolas.ferre@microchip.com> 11341 + M: Tudor Ambarus <tudor.ambarus@microchip.com> 11354 11342 S: Supported 11355 11343 F: drivers/spi/spi-atmel.* 11356 11344 11357 11345 MICROCHIP SSC DRIVER 11358 - M: Nicolas Ferre <nicolas.ferre@microchip.com> 11346 + M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com> 11359 11347 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 11360 11348 S: Supported 11361 11349 F: drivers/misc/atmel-ssc.c ··· 14586 14574 M: Niklas Söderlund <niklas.soderlund@ragnatech.se> 14587 14575 L: linux-renesas-soc@vger.kernel.org 14588 14576 S: Supported 14589 - F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt 14590 - F: Documentation/devicetree/bindings/thermal/rcar-thermal.txt 14577 + F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml 14578 + F: Documentation/devicetree/bindings/thermal/rcar-thermal.yaml 14591 14579 F: drivers/thermal/rcar_gen3_thermal.c 14592 14580 F: drivers/thermal/rcar_thermal.c 14593 14581 ··· 14843 14831 F: drivers/video/fbdev/savage/ 14844 14832 14845 14833 S390 14846 - M: Heiko Carstens <heiko.carstens@de.ibm.com> 14834 + M: Heiko Carstens <hca@linux.ibm.com> 14847 14835 M: Vasily Gorbik <gor@linux.ibm.com> 14848 14836 M: Christian Borntraeger <borntraeger@de.ibm.com> 14849 14837 L: linux-s390@vger.kernel.org ··· 14874 14862 F: include/linux/dasd_mod.h 14875 14863 14876 14864 S390 IOMMU (PCI) 14877 - M: Gerald Schaefer <gerald.schaefer@de.ibm.com> 14865 + M: Matthew Rosato <mjrosato@linux.ibm.com> 14866 + M: Gerald Schaefer <gerald.schaefer@linux.ibm.com> 14878 14867 L: linux-s390@vger.kernel.org 14879 14868 S: Supported 14880 14869 W: http://www.ibm.com/developerworks/linux/linux390/ ··· 14903 14890 14904 14891 S390 PCI SUBSYSTEM 14905 14892 M: Niklas Schnelle <schnelle@linux.ibm.com> 14906 - M: Gerald Schaefer <gerald.schaefer@de.ibm.com> 14893 + M: Gerald Schaefer <gerald.schaefer@linux.ibm.com> 14907 14894 L: linux-s390@vger.kernel.org 14908 14895 S: Supported 14909 14896 W: http://www.ibm.com/developerworks/linux/linux390/ ··· 17526 17513 F: fs/ufs/ 17527 17514 17528 17515 UHID USERSPACE HID IO DRIVER 17529 - M: David Herrmann <dh.herrmann@googlemail.com> 17516 + M: David Rheinsberg <david.rheinsberg@gmail.com> 17530 17517 L: linux-input@vger.kernel.org 17531 17518 S: Maintained 17532 17519 F: drivers/hid/uhid.c ··· 18485 18472 F: drivers/rtc/rtc-sd3078.c 18486 18473 18487 18474 WIIMOTE HID DRIVER 18488 - M: David Herrmann <dh.herrmann@googlemail.com> 18475 + M: David Rheinsberg <david.rheinsberg@gmail.com> 18489 18476 L: linux-input@vger.kernel.org 18490 18477 S: Maintained 18491 18478 F: drivers/hid/hid-wiimote*
+5 -5
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 8 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc7 6 6 NAME = Kleptomaniac Octopus 7 7 8 8 # *DOCUMENTATION* ··· 567 567 ifneq ($(CROSS_COMPILE),) 568 568 CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) 569 569 GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) 570 - CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) 570 + CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) 571 571 GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) 572 572 endif 573 573 ifneq ($(GCC_TOOLCHAIN),) ··· 970 970 endif 971 971 972 972 # Align the bit size of userspace programs with the kernel 973 - KBUILD_USERCFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS)) 974 - KBUILD_USERLDFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS)) 973 + KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) 974 + KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) 975 975 976 976 # make the checker run with the right architecture 977 977 CHECKFLAGS += --arch=$(ARCH) ··· 1754 1754 descend: $(build-dirs) 1755 1755 $(build-dirs): prepare 1756 1756 $(Q)$(MAKE) $(build)=$@ \ 1757 - single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \ 1757 + single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \ 1758 1758 need-builtin=1 need-modorder=1 1759 1759 1760 1760 clean-dirs := $(addprefix _clean_, $(clean-dirs))
+15
arch/arc/Kconfig
··· 170 170 171 171 endchoice 172 172 173 + config ARC_TUNE_MCPU 174 + string "Override default -mcpu compiler flag" 175 + default "" 176 + help 177 + Override default -mcpu=xxx compiler flag (which is set depending on 178 + the ISA version) with the specified value. 179 + NOTE: If specified flag isn't supported by current compiler the 180 + ISA default value will be used as a fallback. 181 + 173 182 config CPU_BIG_ENDIAN 174 183 bool "Enable Big Endian Mode" 175 184 help ··· 473 464 On HS cores, taken interrupt auto saves the regfile on stack. 474 465 This is programmable and can be optionally disabled in which case 475 466 software INTERRUPT_PROLOGUE/EPILGUE do the needed work 467 + 468 + config ARC_LPB_DISABLE 469 + bool "Disable loop buffer (LPB)" 470 + help 471 + On HS cores, loop buffer (LPB) is programmable in runtime and can 472 + be optionally disabled. 476 473 477 474 endif # ISA_ARCV2 478 475
+19 -2
arch/arc/Makefile
··· 10 10 endif 11 11 12 12 cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ 13 - cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 14 - cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38 13 + 14 + tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT) := -mcpu=arc700 15 + tune-mcpu-def-$(CONFIG_ISA_ARCV2) := -mcpu=hs38 16 + 17 + ifeq ($(CONFIG_ARC_TUNE_MCPU),"") 18 + cflags-y += $(tune-mcpu-def-y) 19 + else 20 + tune-mcpu := $(shell echo $(CONFIG_ARC_TUNE_MCPU)) 21 + tune-mcpu-ok := $(call cc-option-yn, $(tune-mcpu)) 22 + ifeq ($(tune-mcpu-ok),y) 23 + cflags-y += $(tune-mcpu) 24 + else 25 + # The flag provided by 'CONFIG_ARC_TUNE_MCPU' option isn't known by this compiler 26 + # (probably the compiler is too old). Use ISA default mcpu flag instead as a safe option. 27 + $(warning ** WARNING ** CONFIG_ARC_TUNE_MCPU flag '$(tune-mcpu)' is unknown, fallback to '$(tune-mcpu-def-y)') 28 + cflags-y += $(tune-mcpu-def-y) 29 + endif 30 + endif 31 + 15 32 16 33 ifdef CONFIG_ARC_CURR_IN_REG 17 34 # For a global register defintion, make sure it gets passed to every file
+1 -1
arch/arc/include/asm/elf.h
··· 19 19 #define R_ARC_32_PCREL 0x31 20 20 21 21 /*to set parameters in the core dumps */ 22 - #define ELF_ARCH EM_ARCOMPACT 22 + #define ELF_ARCH EM_ARC_INUSE 23 23 #define ELF_CLASS ELFCLASS32 24 24 25 25 #ifdef CONFIG_CPU_BIG_ENDIAN
+4 -1
arch/arc/include/asm/irqflags-compact.h
··· 90 90 /* 91 91 * Unconditionally Enable IRQs 92 92 */ 93 + #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS 94 + extern void arch_local_irq_enable(void); 95 + #else 93 96 static inline void arch_local_irq_enable(void) 94 97 { 95 98 unsigned long temp; ··· 105 102 : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) 106 103 : "cc", "memory"); 107 104 } 108 - 105 + #endif 109 106 110 107 /* 111 108 * Unconditionally Disable IRQs
+5 -11
arch/arc/kernel/entry.S
··· 165 165 tracesys: 166 166 ; save EFA in case tracer wants the PC of traced task 167 167 ; using ERET won't work since next-PC has already committed 168 - lr r12, [efa] 169 168 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 170 169 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address 171 170 ··· 207 208 ; Breakpoint TRAP 208 209 ; --------------------------------------------- 209 210 trap_with_param: 210 - 211 - ; stop_pc info by gdb needs this info 212 - lr r0, [efa] 211 + mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc 213 212 mov r1, sp 214 - 215 - ; Now that we have read EFA, it is safe to do "fake" rtie 216 - ; and get out of CPU exception mode 217 - FAKE_RET_FROM_EXCPN 218 213 219 214 ; Save callee regs in case gdb wants to have a look 220 215 ; SP will grow up by size of CALLEE Reg-File ··· 236 243 237 244 EXCEPTION_PROLOGUE 238 245 246 + lr r12, [efa] 247 + 248 + FAKE_RET_FROM_EXCPN 249 + 239 250 ;============ TRAP 1 :breakpoints 240 251 ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR) 241 252 bmsk.f 0, r10, 7 242 253 bnz trap_with_param 243 254 244 255 ;============ TRAP (no param): syscall top level 245 - 246 - ; First return from Exception to pure K mode (Exception/IRQs renabled) 247 - FAKE_RET_FROM_EXCPN 248 256 249 257 ; If syscall tracing ongoing, invoke pre-post-hooks 250 258 GET_CURR_THR_INFO_FLAGS r10
+8
arch/arc/kernel/head.S
··· 59 59 bclr r5, r5, STATUS_AD_BIT 60 60 #endif 61 61 kflag r5 62 + 63 + #ifdef CONFIG_ARC_LPB_DISABLE 64 + lr r5, [ARC_REG_LPB_BUILD] 65 + breq r5, 0, 1f ; LPB doesn't exist 66 + mov r5, 1 67 + sr r5, [ARC_REG_LPB_CTRL] 68 + 1: 69 + #endif /* CONFIG_ARC_LPB_DISABLE */ 62 70 #endif 63 71 ; Config DSP_CTRL properly, so kernel may use integer multiply, 64 72 ; multiply-accumulate, and divide operations
+7 -12
arch/arc/kernel/setup.c
··· 58 58 { 0x00, NULL } 59 59 }; 60 60 61 - static const struct id_to_str arc_cpu_rel[] = { 61 + static const struct id_to_str arc_hs_ver54_rel[] = { 62 62 /* UARCH.MAJOR, Release */ 63 63 { 0, "R3.10a"}, 64 64 { 1, "R3.50a"}, 65 + { 2, "R3.60a"}, 66 + { 3, "R4.00a"}, 65 67 { 0xFF, NULL } 66 68 }; 67 69 ··· 119 117 struct bcr_uarch_build_arcv2 uarch; 120 118 const struct id_to_str *tbl; 121 119 122 - /* 123 - * Up until (including) the first core4 release (0x54) things were 124 - * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family 125 - * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue) 126 - */ 127 - 128 120 if (cpu->core.family < 0x54) { /* includes arc700 */ 129 121 130 122 for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) { ··· 139 143 } 140 144 141 145 /* 142 - * However the subsequent HS release (same 0x54) allow HS38 or HS48 143 - * configurations and encode this info in a different BCR. 144 - * The BCR was introduced in 0x54 so can't be read unconditionally. 146 + * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until 147 + * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent 148 + * releases only update it. 145 149 */ 146 - 147 150 READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch); 148 151 149 152 if (uarch.prod == 4) { ··· 153 158 cpu->name = "HS38"; 154 159 } 155 160 156 - for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) { 161 + for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) { 157 162 if (uarch.maj == tbl->id) { 158 163 cpu->release = tbl->str; 159 164 break;
+10 -4
arch/arm/boot/dts/am437x-l4.dtsi
··· 1540 1540 reg = <0xcc020 0x4>; 1541 1541 reg-names = "rev"; 1542 1542 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1543 - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; 1544 - clock-names = "fck"; 1543 + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>, 1544 + <&dcan0_fck>; 1545 + clock-names = "fck", "osc"; 1545 1546 #address-cells = <1>; 1546 1547 #size-cells = <1>; 1547 1548 ranges = <0x0 0xcc000 0x2000>; ··· 1550 1549 dcan0: can@0 { 1551 1550 compatible = "ti,am4372-d_can", "ti,am3352-d_can"; 1552 1551 reg = <0x0 0x2000>; 1552 + clocks = <&dcan0_fck>; 1553 + clock-names = "fck"; 1553 1554 syscon-raminit = <&scm_conf 0x644 0>; 1554 1555 interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; 1555 1556 status = "disabled"; ··· 1563 1560 reg = <0xd0020 0x4>; 1564 1561 reg-names = "rev"; 1565 1562 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1566 - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; 1567 - clock-names = "fck"; 1563 + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>, 1564 + <&dcan1_fck>; 1565 + clock-names = "fck", "osc"; 1568 1566 #address-cells = <1>; 1569 1567 #size-cells = <1>; 1570 1568 ranges = <0x0 0xd0000 0x2000>; ··· 1573 1569 dcan1: can@0 { 1574 1570 compatible = "ti,am4372-d_can", "ti,am3352-d_can"; 1575 1571 reg = <0x0 0x2000>; 1572 + clocks = <&dcan1_fck>; 1573 + clock-name = "fck"; 1576 1574 syscon-raminit = <&scm_conf 0x644 1>; 1577 1575 interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; 1578 1576 status = "disabled";
+1 -1
arch/arm/boot/dts/imx6qdl-gw551x.dtsi
··· 110 110 simple-audio-card,frame-master = <&sound_codec>; 111 111 112 112 sound_cpu: simple-audio-card,cpu { 113 - sound-dai = <&ssi2>; 113 + sound-dai = <&ssi1>; 114 114 }; 115 115 116 116 sound_codec: simple-audio-card,codec {
+1 -1
arch/arm/boot/dts/meson.dtsi
··· 11 11 #size-cells = <1>; 12 12 interrupt-parent = <&gic>; 13 13 14 - L2: l2-cache-controller@c4200000 { 14 + L2: cache-controller@c4200000 { 15 15 compatible = "arm,pl310-cache"; 16 16 reg = <0xc4200000 0x1000>; 17 17 cache-unified;
+8 -4
arch/arm/boot/dts/omap3-n900.dts
··· 105 105 linux,code = <SW_FRONT_PROXIMITY>; 106 106 linux,can-disable; 107 107 }; 108 + 109 + machine_cover { 110 + label = "Machine Cover"; 111 + gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ 112 + linux,input-type = <EV_SW>; 113 + linux,code = <SW_MACHINE_COVER>; 114 + linux,can-disable; 115 + }; 108 116 }; 109 117 110 118 isp1707: isp1707 { ··· 827 819 pinctrl-0 = <&mmc1_pins>; 828 820 vmmc-supply = <&vmmc1>; 829 821 bus-width = <4>; 830 - /* For debugging, it is often good idea to remove this GPIO. 831 - It means you can remove back cover (to reboot by removing 832 - battery) and still use the MMC card. */ 833 - cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ 834 822 }; 835 823 836 824 /* most boards use vaux3, only some old versions use vmmc2 instead */
+1 -1
arch/arm/boot/dts/socfpga.dtsi
··· 726 726 }; 727 727 }; 728 728 729 - L2: l2-cache@fffef000 { 729 + L2: cache-controller@fffef000 { 730 730 compatible = "arm,pl310-cache"; 731 731 reg = <0xfffef000 0x1000>; 732 732 interrupts = <0 38 0x04>;
+1 -1
arch/arm/boot/dts/socfpga_arria10.dtsi
··· 636 636 reg = <0xffcfb100 0x80>; 637 637 }; 638 638 639 - L2: l2-cache@fffff000 { 639 + L2: cache-controller@fffff000 { 640 640 compatible = "arm,pl310-cache"; 641 641 reg = <0xfffff000 0x1000>; 642 642 interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
-9
arch/arm/kernel/asm-offsets.c
··· 31 31 #if defined(__APCS_26__) 32 32 #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 33 33 #endif 34 - /* 35 - * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854 36 - * miscompiles find_get_entry(), and can result in EXT3 and EXT4 37 - * filesystem corruption (possibly other FS too). 38 - */ 39 - #if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803 40 - #error Your compiler is too buggy; it is known to miscompile kernels 41 - #error and result in filesystem corruption and oopses. 42 - #endif 43 34 44 35 int main(void) 45 36 {
+1 -1
arch/arm/mach-imx/devices/devices-common.h
··· 289 289 const struct spi_imx_master *pdata); 290 290 291 291 struct platform_device *imx_add_imx_dma(char *name, resource_size_t iobase, 292 - int irq, int irq_err); 292 + int irq); 293 293 struct platform_device *imx_add_imx_sdma(char *name, 294 294 resource_size_t iobase, int irq, struct sdma_platform_data *pdata);
+3 -2
arch/arm/mach-imx/devices/platform-gpio-mxc.c
··· 24 24 .flags = IORESOURCE_IRQ, 25 25 }, 26 26 }; 27 + unsigned int nres; 27 28 28 - return platform_device_register_resndata(&mxc_aips_bus, 29 - name, id, res, ARRAY_SIZE(res), NULL, 0); 29 + nres = irq_high ? ARRAY_SIZE(res) : ARRAY_SIZE(res) - 1; 30 + return platform_device_register_resndata(&mxc_aips_bus, name, id, res, nres, NULL, 0); 30 31 }
+1 -5
arch/arm/mach-imx/devices/platform-imx-dma.c
··· 6 6 #include "devices-common.h" 7 7 8 8 struct platform_device __init __maybe_unused *imx_add_imx_dma(char *name, 9 - resource_size_t iobase, int irq, int irq_err) 9 + resource_size_t iobase, int irq) 10 10 { 11 11 struct resource res[] = { 12 12 { ··· 16 16 }, { 17 17 .start = irq, 18 18 .end = irq, 19 - .flags = IORESOURCE_IRQ, 20 - }, { 21 - .start = irq_err, 22 - .end = irq_err, 23 19 .flags = IORESOURCE_IRQ, 24 20 }, 25 21 };
+1 -2
arch/arm/mach-imx/mm-imx21.c
··· 78 78 mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); 79 79 80 80 pinctrl_provide_dummies(); 81 - imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR, 82 - MX21_INT_DMACH0, 0); /* No ERR irq */ 81 + imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR, MX21_INT_DMACH0); 83 82 platform_device_register_simple("imx21-audmux", 0, imx21_audmux_res, 84 83 ARRAY_SIZE(imx21_audmux_res)); 85 84 }
+1 -2
arch/arm/mach-imx/mm-imx27.c
··· 79 79 mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); 80 80 81 81 pinctrl_provide_dummies(); 82 - imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR, 83 - MX27_INT_DMACH0, 0); /* No ERR irq */ 82 + imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR, MX27_INT_DMACH0); 84 83 /* imx27 has the imx21 type audmux */ 85 84 platform_device_register_simple("imx21-audmux", 0, imx27_audmux_res, 86 85 ARRAY_SIZE(imx27_audmux_res));
+11 -3
arch/arm/mach-omap2/omap_hwmod.c
··· 3435 3435 regs = ioremap(data->module_pa, 3436 3436 data->module_size); 3437 3437 if (!regs) 3438 - return -ENOMEM; 3438 + goto out_free_sysc; 3439 3439 } 3440 3440 3441 3441 /* ··· 3445 3445 if (oh->class->name && strcmp(oh->class->name, data->name)) { 3446 3446 class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL); 3447 3447 if (!class) 3448 - return -ENOMEM; 3448 + goto out_unmap; 3449 3449 } 3450 3450 3451 3451 if (list_empty(&oh->slave_ports)) { 3452 3452 oi = kcalloc(1, sizeof(*oi), GFP_KERNEL); 3453 3453 if (!oi) 3454 - return -ENOMEM; 3454 + goto out_free_class; 3455 3455 3456 3456 /* 3457 3457 * Note that we assume interconnect interface clocks will be ··· 3478 3478 spin_unlock_irqrestore(&oh->_lock, flags); 3479 3479 3480 3480 return 0; 3481 + 3482 + out_free_class: 3483 + kfree(class); 3484 + out_unmap: 3485 + iounmap(regs); 3486 + out_free_sysc: 3487 + kfree(sysc); 3488 + return -ENOMEM; 3481 3489 } 3482 3490 3483 3491 static const struct omap_hwmod_reset omap24xx_reset_quirks[] = {
-1
arch/arm/xen/enlighten.c
··· 241 241 * see Documentation/devicetree/bindings/arm/xen.txt for the 242 242 * documentation of the Xen Device Tree format. 243 243 */ 244 - #define GRANT_TABLE_PHYSADDR 0 245 244 void __init xen_early_init(void) 246 245 { 247 246 of_scan_flat_dt(fdt_find_hyper_node, NULL);
+1 -1
arch/arm64/Makefile
··· 137 137 138 138 core-y += arch/arm64/ 139 139 libs-y := arch/arm64/lib/ $(libs-y) 140 - core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a 140 + libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a 141 141 142 142 # Default target when executing plain make 143 143 boot := arch/arm64/boot
+4 -4
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
··· 77 77 method = "smc"; 78 78 }; 79 79 80 - intc: intc@fffc1000 { 80 + intc: interrupt-controller@fffc1000 { 81 81 compatible = "arm,gic-400", "arm,cortex-a15-gic"; 82 82 #interrupt-cells = <3>; 83 83 interrupt-controller; ··· 302 302 status = "disabled"; 303 303 }; 304 304 305 - nand: nand@ffb90000 { 305 + nand: nand-controller@ffb90000 { 306 306 #address-cells = <1>; 307 307 #size-cells = <0>; 308 308 compatible = "altr,socfpga-denali-nand"; ··· 445 445 clock-names = "timer"; 446 446 }; 447 447 448 - uart0: serial0@ffc02000 { 448 + uart0: serial@ffc02000 { 449 449 compatible = "snps,dw-apb-uart"; 450 450 reg = <0xffc02000 0x100>; 451 451 interrupts = <0 108 4>; ··· 456 456 status = "disabled"; 457 457 }; 458 458 459 - uart1: serial1@ffc02100 { 459 + uart1: serial@ffc02100 { 460 460 compatible = "snps,dw-apb-uart"; 461 461 reg = <0xffc02100 0x100>; 462 462 interrupts = <0 109 4>;
+1
arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
··· 155 155 }; 156 156 157 157 &qspi { 158 + status = "okay"; 158 159 flash@0 { 159 160 #address-cells = <1>; 160 161 #size-cells = <1>;
+4 -3
arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
··· 188 188 }; 189 189 190 190 &qspi { 191 + status = "okay"; 191 192 flash@0 { 192 193 #address-cells = <1>; 193 194 #size-cells = <1>; ··· 212 211 213 212 qspi_boot: partition@0 { 214 213 label = "Boot and fpga data"; 215 - reg = <0x0 0x034B0000>; 214 + reg = <0x0 0x03FE0000>; 216 215 }; 217 216 218 - qspi_rootfs: partition@4000000 { 217 + qspi_rootfs: partition@3FE0000 { 219 218 label = "Root Filesystem - JFFS2"; 220 - reg = <0x034B0000 0x0EB50000>; 219 + reg = <0x03FE0000 0x0C020000>; 221 220 }; 222 221 }; 223 222 };
+1 -1
arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
··· 10 10 #include <dt-bindings/input/input.h> 11 11 #include <dt-bindings/sound/meson-aiu.h> 12 12 13 - #include "meson-gxl-s905x.dtsi" 13 + #include "meson-gxl-s805x.dtsi" 14 14 15 15 / { 16 16 compatible = "libretech,aml-s805x-ac", "amlogic,s805x",
+1 -1
arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
··· 9 9 10 10 #include <dt-bindings/input/input.h> 11 11 12 - #include "meson-gxl-s905x.dtsi" 12 + #include "meson-gxl-s805x.dtsi" 13 13 14 14 / { 15 15 compatible = "amlogic,p241", "amlogic,s805x", "amlogic,meson-gxl";
+24
arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi
··· 1 + // SPDX-License-Identifier: (GPL-2.0+ OR MIT) 2 + /* 3 + * Copyright (c) 2020 BayLibre SAS 4 + * Author: Neil Armstrong <narmstrong@baylibre.com> 5 + */ 6 + 7 + #include "meson-gxl-s905x.dtsi" 8 + 9 + / { 10 + compatible = "amlogic,s805x", "amlogic,meson-gxl"; 11 + }; 12 + 13 + /* The S805X Package doesn't seem to handle the 744MHz OPP correctly */ 14 + &mali { 15 + assigned-clocks = <&clkc CLKID_MALI_0_SEL>, 16 + <&clkc CLKID_MALI_0>, 17 + <&clkc CLKID_MALI>; /* Glitch free mux */ 18 + assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>, 19 + <0>, /* Do Nothing */ 20 + <&clkc CLKID_MALI_0>; 21 + assigned-clock-rates = <0>, /* Do Nothing */ 22 + <666666666>, 23 + <0>; /* Do Nothing */ 24 + };
+5
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
··· 337 337 }; 338 338 }; 339 339 340 + &hwrng { 341 + clocks = <&clkc CLKID_RNG0>; 342 + clock-names = "core"; 343 + }; 344 + 340 345 &i2c_A { 341 346 clocks = <&clkc CLKID_I2C>; 342 347 };
+1
arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
··· 98 98 }; 99 99 100 100 &qspi { 101 + status = "okay"; 101 102 flash@0 { 102 103 #address-cells = <1>; 103 104 #size-cells = <1>;
+1 -4
arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
··· 454 454 status = "okay"; 455 455 phy-mode = "2500base-x"; 456 456 phys = <&cp1_comphy5 2>; 457 - fixed-link { 458 - speed = <2500>; 459 - full-duplex; 460 - }; 457 + managed = "in-band-status"; 461 458 }; 462 459 463 460 &cp1_spi1 {
+1 -1
arch/arm64/configs/defconfig
··· 194 194 CONFIG_HOTPLUG_PCI_ACPI=y 195 195 CONFIG_PCI_AARDVARK=y 196 196 CONFIG_PCI_TEGRA=y 197 - CONFIG_PCIE_RCAR=y 197 + CONFIG_PCIE_RCAR_HOST=y 198 198 CONFIG_PCI_HOST_GENERIC=y 199 199 CONFIG_PCI_XGENE=y 200 200 CONFIG_PCIE_ALTERA=y
+8 -8
arch/arm64/include/asm/alternative.h
··· 73 73 ".pushsection .altinstructions,\"a\"\n" \ 74 74 ALTINSTR_ENTRY(feature) \ 75 75 ".popsection\n" \ 76 - ".pushsection .altinstr_replacement, \"a\"\n" \ 76 + ".subsection 1\n" \ 77 77 "663:\n\t" \ 78 78 newinstr "\n" \ 79 79 "664:\n\t" \ 80 - ".popsection\n\t" \ 80 + ".previous\n\t" \ 81 81 ".org . - (664b-663b) + (662b-661b)\n\t" \ 82 82 ".org . - (662b-661b) + (664b-663b)\n" \ 83 83 ".endif\n" ··· 117 117 662: .pushsection .altinstructions, "a" 118 118 altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f 119 119 .popsection 120 - .pushsection .altinstr_replacement, "ax" 120 + .subsection 1 121 121 663: \insn2 122 - 664: .popsection 122 + 664: .previous 123 123 .org . - (664b-663b) + (662b-661b) 124 124 .org . - (662b-661b) + (664b-663b) 125 125 .endif ··· 160 160 .pushsection .altinstructions, "a" 161 161 altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f 162 162 .popsection 163 - .pushsection .altinstr_replacement, "ax" 163 + .subsection 1 164 164 .align 2 /* So GAS knows label 661 is suitably aligned */ 165 165 661: 166 166 .endm ··· 179 179 .macro alternative_else 180 180 662: 181 181 .if .Lasm_alt_mode==0 182 - .pushsection .altinstr_replacement, "ax" 182 + .subsection 1 183 183 .else 184 - .popsection 184 + .previous 185 185 .endif 186 186 663: 187 187 .endm ··· 192 192 .macro alternative_endif 193 193 664: 194 194 .if .Lasm_alt_mode==0 195 - .popsection 195 + .previous 196 196 .endif 197 197 .org . - (664b-663b) + (662b-661b) 198 198 .org . - (662b-661b) + (664b-663b)
+1 -1
arch/arm64/include/asm/arch_gicv3.h
··· 109 109 return read_sysreg_s(SYS_ICC_PMR_EL1); 110 110 } 111 111 112 - static inline void gic_write_pmr(u32 val) 112 + static __always_inline void gic_write_pmr(u32 val) 113 113 { 114 114 write_sysreg_s(val, SYS_ICC_PMR_EL1); 115 115 }
+1
arch/arm64/include/asm/arch_timer.h
··· 58 58 u64 (*read_cntvct_el0)(void); 59 59 int (*set_next_event_phys)(unsigned long, struct clock_event_device *); 60 60 int (*set_next_event_virt)(unsigned long, struct clock_event_device *); 61 + bool disable_compat_vdso; 61 62 }; 62 63 63 64 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
+1 -1
arch/arm64/include/asm/cpufeature.h
··· 675 675 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); 676 676 } 677 677 678 - static inline bool system_uses_irq_prio_masking(void) 678 + static __always_inline bool system_uses_irq_prio_masking(void) 679 679 { 680 680 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && 681 681 cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
+2
arch/arm64/include/asm/cputype.h
··· 86 86 #define QCOM_CPU_PART_FALKOR 0xC00 87 87 #define QCOM_CPU_PART_KRYO 0x200 88 88 #define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 89 + #define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804 89 90 #define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 90 91 91 92 #define NVIDIA_CPU_PART_DENVER 0x003 ··· 115 114 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) 116 115 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) 117 116 #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) 117 + #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD) 118 118 #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) 119 119 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) 120 120 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+2
arch/arm64/include/asm/debug-monitors.h
··· 109 109 110 110 void user_rewind_single_step(struct task_struct *task); 111 111 void user_fastforward_single_step(struct task_struct *task); 112 + void user_regs_reset_single_step(struct user_pt_regs *regs, 113 + struct task_struct *task); 112 114 113 115 void kernel_enable_single_step(struct pt_regs *regs); 114 116 void kernel_disable_single_step(void);
+1 -1
arch/arm64/include/asm/pgtable-prot.h
··· 67 67 #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) 68 68 #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) 69 69 #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) 70 - #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) 70 + #define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN) 71 71 72 72 #define PAGE_S2_MEMATTR(attr) \ 73 73 ({ \
+11 -1
arch/arm64/include/asm/syscall.h
··· 34 34 struct pt_regs *regs) 35 35 { 36 36 unsigned long error = regs->regs[0]; 37 + 38 + if (is_compat_thread(task_thread_info(task))) 39 + error = sign_extend64(error, 31); 40 + 37 41 return IS_ERR_VALUE(error) ? error : 0; 38 42 } 39 43 ··· 51 47 struct pt_regs *regs, 52 48 int error, long val) 53 49 { 54 - regs->regs[0] = (long) error ? error : val; 50 + if (error) 51 + val = error; 52 + 53 + if (is_compat_thread(task_thread_info(task))) 54 + val = lower_32_bits(val); 55 + 56 + regs->regs[0] = val; 55 57 } 56 58 57 59 #define SYSCALL_MAX_ARGS 6
+1
arch/arm64/include/asm/thread_info.h
··· 93 93 #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 94 94 #define _TIF_UPROBE (1 << TIF_UPROBE) 95 95 #define _TIF_FSCHECK (1 << TIF_FSCHECK) 96 + #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 96 97 #define _TIF_32BIT (1 << TIF_32BIT) 97 98 #define _TIF_SVE (1 << TIF_SVE) 98 99
+5 -2
arch/arm64/include/asm/vdso/clocksource.h
··· 2 2 #ifndef __ASM_VDSOCLOCKSOURCE_H 3 3 #define __ASM_VDSOCLOCKSOURCE_H 4 4 5 - #define VDSO_ARCH_CLOCKMODES \ 6 - VDSO_CLOCKMODE_ARCHTIMER 5 + #define VDSO_ARCH_CLOCKMODES \ 6 + /* vdso clocksource for both 32 and 64bit tasks */ \ 7 + VDSO_CLOCKMODE_ARCHTIMER, \ 8 + /* vdso clocksource for 64bit tasks only */ \ 9 + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT 7 10 8 11 #endif
+7 -1
arch/arm64/include/asm/vdso/compat_gettimeofday.h
··· 111 111 * update. Return something. Core will do another round and then 112 112 * see the mode change and fallback to the syscall. 113 113 */ 114 - if (clock_mode == VDSO_CLOCKMODE_NONE) 114 + if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER) 115 115 return 0; 116 116 117 117 /* ··· 151 151 152 152 return ret; 153 153 } 154 + 155 + static inline bool vdso_clocksource_ok(const struct vdso_data *vd) 156 + { 157 + return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER; 158 + } 159 + #define vdso_clocksource_ok vdso_clocksource_ok 154 160 155 161 #endif /* !__ASSEMBLY__ */ 156 162
+2 -14
arch/arm64/kernel/alternative.c
··· 43 43 */ 44 44 static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) 45 45 { 46 - unsigned long replptr; 47 - 48 - if (kernel_text_address(pc)) 49 - return true; 50 - 51 - replptr = (unsigned long)ALT_REPL_PTR(alt); 52 - if (pc >= replptr && pc <= (replptr + alt->alt_len)) 53 - return false; 54 - 55 - /* 56 - * Branching into *another* alternate sequence is doomed, and 57 - * we're not even trying to fix it up. 58 - */ 59 - BUG(); 46 + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); 47 + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); 60 48 } 61 49 62 50 #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
+16 -6
arch/arm64/kernel/cpu_errata.c
··· 472 472 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, 473 473 int scope) 474 474 { 475 - u32 midr = read_cpuid_id(); 476 - /* Cortex-A76 r0p0 - r3p1 */ 477 - struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); 478 - 479 - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 480 - return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); 475 + return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); 481 476 } 482 477 #endif 483 478 ··· 723 728 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 724 729 /* Neoverse-N1 r0p0 to r3p1 */ 725 730 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), 731 + /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 732 + MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 726 733 {}, 727 734 }; 728 735 #endif ··· 769 772 #ifdef CONFIG_ARM64_ERRATUM_1530923 770 773 /* Cortex A55 r0p0 to r2p0 */ 771 774 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), 775 + /* Kryo4xx Silver (rdpe => r1p0) */ 776 + MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), 772 777 #endif 778 + {}, 779 + }; 780 + #endif 781 + 782 + #ifdef CONFIG_ARM64_ERRATUM_1463225 783 + static const struct midr_range erratum_1463225[] = { 784 + /* Cortex-A76 r0p0 - r3p1 */ 785 + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 786 + /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 787 + MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 773 788 {}, 774 789 }; 775 790 #endif ··· 925 916 .capability = ARM64_WORKAROUND_1463225, 926 917 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 927 918 .matches = has_cortex_a76_erratum_1463225, 919 + .midr_range_list = erratum_1463225, 928 920 }, 929 921 #endif 930 922 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+2
arch/arm64/kernel/cpufeature.c
··· 1408 1408 static const struct midr_range cpus[] = { 1409 1409 #ifdef CONFIG_ARM64_ERRATUM_1024718 1410 1410 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0 1411 + /* Kryo4xx Silver (rdpe => r1p0) */ 1412 + MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), 1411 1413 #endif 1412 1414 {}, 1413 1415 };
+18 -6
arch/arm64/kernel/debug-monitors.c
··· 141 141 /* 142 142 * Single step API and exception handling. 143 143 */ 144 - static void set_regs_spsr_ss(struct pt_regs *regs) 144 + static void set_user_regs_spsr_ss(struct user_pt_regs *regs) 145 145 { 146 146 regs->pstate |= DBG_SPSR_SS; 147 147 } 148 - NOKPROBE_SYMBOL(set_regs_spsr_ss); 148 + NOKPROBE_SYMBOL(set_user_regs_spsr_ss); 149 149 150 - static void clear_regs_spsr_ss(struct pt_regs *regs) 150 + static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) 151 151 { 152 152 regs->pstate &= ~DBG_SPSR_SS; 153 153 } 154 - NOKPROBE_SYMBOL(clear_regs_spsr_ss); 154 + NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); 155 + 156 + #define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) 157 + #define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) 155 158 156 159 static DEFINE_SPINLOCK(debug_hook_lock); 157 160 static LIST_HEAD(user_step_hook); ··· 394 391 * If single step is active for this thread, then set SPSR.SS 395 392 * to 1 to avoid returning to the active-pending state. 396 393 */ 397 - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) 394 + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) 398 395 set_regs_spsr_ss(task_pt_regs(task)); 399 396 } 400 397 NOKPROBE_SYMBOL(user_rewind_single_step); 401 398 402 399 void user_fastforward_single_step(struct task_struct *task) 403 400 { 404 - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) 401 + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) 405 402 clear_regs_spsr_ss(task_pt_regs(task)); 403 + } 404 + 405 + void user_regs_reset_single_step(struct user_pt_regs *regs, 406 + struct task_struct *task) 407 + { 408 + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) 409 + set_user_regs_spsr_ss(regs); 410 + else 411 + clear_user_regs_spsr_ss(regs); 406 412 } 407 413 408 414 /* Kernel API */
+1 -1
arch/arm64/kernel/entry-common.c
··· 57 57 /* 58 58 * The CPU masked interrupts, and we are leaving them masked during 59 59 * do_debug_exception(). Update PMR as if we had called 60 - * local_mask_daif(). 60 + * local_daif_mask(). 61 61 */ 62 62 if (system_uses_irq_prio_masking()) 63 63 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+31 -21
arch/arm64/kernel/entry.S
··· 126 126 add \dst, \dst, #(\sym - .entry.tramp.text) 127 127 .endm 128 128 129 - // This macro corrupts x0-x3. It is the caller's duty 130 - // to save/restore them if required. 129 + /* 130 + * This macro corrupts x0-x3. It is the caller's duty to save/restore 131 + * them if required. 132 + */ 131 133 .macro apply_ssbd, state, tmp1, tmp2 132 134 #ifdef CONFIG_ARM64_SSBD 133 135 alternative_cb arm64_enable_wa2_handling ··· 169 167 stp x28, x29, [sp, #16 * 14] 170 168 171 169 .if \el == 0 170 + .if \regsize == 32 171 + /* 172 + * If we're returning from a 32-bit task on a system affected by 173 + * 1418040 then re-enable userspace access to the virtual counter. 174 + */ 175 + #ifdef CONFIG_ARM64_ERRATUM_1418040 176 + alternative_if ARM64_WORKAROUND_1418040 177 + mrs x0, cntkctl_el1 178 + orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN 179 + msr cntkctl_el1, x0 180 + alternative_else_nop_endif 181 + #endif 182 + .endif 172 183 clear_gp_regs 173 184 mrs x21, sp_el0 174 185 ldr_this_cpu tsk, __entry_task, x20 175 186 msr sp_el0, tsk 176 187 177 - // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions 178 - // when scheduling. 188 + /* 189 + * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions 190 + * when scheduling. 191 + */ 179 192 ldr x19, [tsk, #TSK_TI_FLAGS] 180 193 disable_step_tsk x19, x20 181 194 ··· 337 320 tst x22, #PSR_MODE32_BIT // native task? 338 321 b.eq 3f 339 322 323 + #ifdef CONFIG_ARM64_ERRATUM_1418040 324 + alternative_if ARM64_WORKAROUND_1418040 325 + mrs x0, cntkctl_el1 326 + bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN 327 + msr cntkctl_el1, x0 328 + alternative_else_nop_endif 329 + #endif 330 + 340 331 #ifdef CONFIG_ARM64_ERRATUM_845719 341 332 alternative_if ARM64_WORKAROUND_845719 342 333 #ifdef CONFIG_PID_IN_CONTEXTIDR ··· 356 331 alternative_else_nop_endif 357 332 #endif 358 333 3: 359 - #ifdef CONFIG_ARM64_ERRATUM_1418040 360 - alternative_if_not ARM64_WORKAROUND_1418040 361 - b 4f 362 - alternative_else_nop_endif 363 - /* 364 - * if (x22.mode32 == cntkctl_el1.el0vcten) 365 - * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten 366 - */ 367 - mrs x1, cntkctl_el1 368 - eon x0, x1, x22, lsr #3 369 - tbz x0, #1, 4f 370 - eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN 371 - msr cntkctl_el1, x1 372 - 4: 373 - #endif 374 334 scs_save tsk, x0 375 335 376 336 /* No kernel C function calls after this as user keys are set. */ ··· 387 377 .if \el == 0 388 378 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 389 379 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 390 - bne 5f 380 + bne 4f 391 381 msr far_el1, x30 392 382 tramp_alias x30, tramp_exit_native 393 383 br x30 394 - 5: 384 + 4: 395 385 tramp_alias x30, tramp_exit_compat 396 386 br x30 397 387 #endif
+1 -1
arch/arm64/kernel/kgdb.c
··· 252 252 if (!kgdb_single_step) 253 253 return DBG_HOOK_ERROR; 254 254 255 - kgdb_handle_exception(1, SIGTRAP, 0, regs); 255 + kgdb_handle_exception(0, SIGTRAP, 0, regs); 256 256 return DBG_HOOK_HANDLED; 257 257 } 258 258 NOKPROBE_SYMBOL(kgdb_step_brk_fn);
+1 -1
arch/arm64/kernel/probes/kprobes.c
··· 122 122 { 123 123 return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, 124 124 GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS, 125 - NUMA_NO_NODE, __func__); 125 + NUMA_NO_NODE, __builtin_return_address(0)); 126 126 } 127 127 128 128 /* arm kprobe: install breakpoint in text */
+37 -12
arch/arm64/kernel/ptrace.c
··· 1811 1811 unsigned long saved_reg; 1812 1812 1813 1813 /* 1814 - * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1815 - * used to denote syscall entry/exit: 1814 + * We have some ABI weirdness here in the way that we handle syscall 1815 + * exit stops because we indicate whether or not the stop has been 1816 + * signalled from syscall entry or syscall exit by clobbering a general 1817 + * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 1818 + * and restoring its old value after the stop. This means that: 1819 + * 1820 + * - Any writes by the tracer to this register during the stop are 1821 + * ignored/discarded. 1822 + * 1823 + * - The actual value of the register is not available during the stop, 1824 + * so the tracer cannot save it and restore it later. 1825 + * 1826 + * - Syscall stops behave differently to seccomp and pseudo-step traps 1827 + * (the latter do not nobble any registers). 1816 1828 */ 1817 1829 regno = (is_compat_task() ? 12 : 7); 1818 1830 saved_reg = regs->regs[regno]; 1819 1831 regs->regs[regno] = dir; 1820 1832 1821 - if (dir == PTRACE_SYSCALL_EXIT) 1833 + if (dir == PTRACE_SYSCALL_ENTER) { 1834 + if (tracehook_report_syscall_entry(regs)) 1835 + forget_syscall(regs); 1836 + regs->regs[regno] = saved_reg; 1837 + } else if (!test_thread_flag(TIF_SINGLESTEP)) { 1822 1838 tracehook_report_syscall_exit(regs, 0); 1823 - else if (tracehook_report_syscall_entry(regs)) 1824 - forget_syscall(regs); 1839 + regs->regs[regno] = saved_reg; 1840 + } else { 1841 + regs->regs[regno] = saved_reg; 1825 1842 1826 - regs->regs[regno] = saved_reg; 1843 + /* 1844 + * Signal a pseudo-step exception since we are stepping but 1845 + * tracer modifications to the registers may have rewound the 1846 + * state machine. 1847 + */ 1848 + tracehook_report_syscall_exit(regs, 1); 1849 + } 1827 1850 } 1828 1851 1829 1852 int syscall_trace_enter(struct pt_regs *regs) ··· 1856 1833 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 1857 1834 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1858 1835 if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU)) 1859 - return -1; 1836 + return NO_SYSCALL; 1860 1837 } 1861 1838 1862 1839 /* Do the secure computing after ptrace; failures should be fast. */ 1863 1840 if (secure_computing() == -1) 1864 - return -1; 1841 + return NO_SYSCALL; 1865 1842 1866 1843 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1867 1844 trace_sys_enter(regs, regs->syscallno); ··· 1874 1851 1875 1852 void syscall_trace_exit(struct pt_regs *regs) 1876 1853 { 1854 + unsigned long flags = READ_ONCE(current_thread_info()->flags); 1855 + 1877 1856 audit_syscall_exit(regs); 1878 1857 1879 - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1858 + if (flags & _TIF_SYSCALL_TRACEPOINT) 1880 1859 trace_sys_exit(regs, regs_return_value(regs)); 1881 1860 1882 - if (test_thread_flag(TIF_SYSCALL_TRACE)) 1861 + if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 1883 1862 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1884 1863 1885 1864 rseq_syscall(regs); ··· 1959 1934 */ 1960 1935 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1961 1936 { 1962 - if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) 1963 - regs->pstate &= ~DBG_SPSR_SS; 1937 + /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 1938 + user_regs_reset_single_step(regs, task); 1964 1939 1965 1940 if (is_compat_thread(task_thread_info(task))) 1966 1941 return valid_compat_regs(regs);
+2 -9
arch/arm64/kernel/signal.c
··· 800 800 */ 801 801 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 802 802 { 803 - struct task_struct *tsk = current; 804 803 sigset_t *oldset = sigmask_to_save(); 805 804 int usig = ksig->sig; 806 805 int ret; ··· 823 824 */ 824 825 ret |= !valid_user_regs(&regs->user_regs, current); 825 826 826 - /* 827 - * Fast forward the stepping logic so we step into the signal 828 - * handler. 829 - */ 830 - if (!ret) 831 - user_fastforward_single_step(tsk); 832 - 833 - signal_setup_done(ret, ksig, 0); 827 + /* Step into the signal handler if we are stepping */ 828 + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 834 829 } 835 830 836 831 /*
+19 -2
arch/arm64/kernel/syscall.c
··· 50 50 ret = do_ni_syscall(regs, scno); 51 51 } 52 52 53 + if (is_compat_task()) 54 + ret = lower_32_bits(ret); 55 + 53 56 regs->regs[0] = ret; 54 57 } 55 58 ··· 124 121 user_exit(); 125 122 126 123 if (has_syscall_work(flags)) { 127 - /* set default errno for user-issued syscall(-1) */ 124 + /* 125 + * The de-facto standard way to skip a system call using ptrace 126 + * is to set the system call to -1 (NO_SYSCALL) and set x0 to a 127 + * suitable error code for consumption by userspace. However, 128 + * this cannot be distinguished from a user-issued syscall(-1) 129 + * and so we must set x0 to -ENOSYS here in case the tracer doesn't 130 + * issue the skip and we fall into trace_exit with x0 preserved. 131 + * 132 + * This is slightly odd because it also means that if a tracer 133 + * sets the system call number to -1 but does not initialise x0, 134 + * then x0 will be preserved for all system calls apart from a 135 + * user-issued syscall(-1). However, requesting a skip and not 136 + * setting the return value is unlikely to do anything sensible 137 + * anyway. 138 + */ 128 139 if (scno == NO_SYSCALL) 129 140 regs->regs[0] = -ENOSYS; 130 141 scno = syscall_trace_enter(regs); ··· 156 139 if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { 157 140 local_daif_mask(); 158 141 flags = current_thread_info()->flags; 159 - if (!has_syscall_work(flags)) { 142 + if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) { 160 143 /* 161 144 * We're off to userspace, where interrupts are 162 145 * always enabled after we restore the flags from
+1 -1
arch/arm64/kernel/vdso32/Makefile
··· 14 14 COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..) 15 15 16 16 CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) 17 - CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR) 17 + CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT)) 18 18 CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments 19 19 ifneq ($(COMPAT_GCC_TOOLCHAIN),) 20 20 CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
-3
arch/arm64/kernel/vmlinux.lds.S
··· 165 165 *(.altinstructions) 166 166 __alt_instructions_end = .; 167 167 } 168 - .altinstr_replacement : { 169 - *(.altinstr_replacement) 170 - } 171 168 172 169 . = ALIGN(SEGMENT_ALIGN); 173 170 __inittext_end = .;
+7 -4
arch/arm64/kvm/hyp-init.S
··· 136 136 137 137 1: cmp x0, #HVC_RESET_VECTORS 138 138 b.ne 1f 139 - reset: 139 + 140 140 /* 141 - * Reset kvm back to the hyp stub. Do not clobber x0-x4 in 142 - * case we coming via HVC_SOFT_RESTART. 141 + * Set the HVC_RESET_VECTORS return code before entering the common 142 + * path so that we do not clobber x0-x2 in case we are coming via 143 + * HVC_SOFT_RESTART. 143 144 */ 145 + mov x0, xzr 146 + reset: 147 + /* Reset kvm back to the hyp stub. */ 144 148 mrs x5, sctlr_el2 145 149 mov_q x6, SCTLR_ELx_FLAGS 146 150 bic x5, x5, x6 // Clear SCTL_M and etc ··· 155 151 /* Install stub vectors */ 156 152 adr_l x5, __hyp_stub_vectors 157 153 msr vbar_el2, x5 158 - mov x0, xzr 159 154 eret 160 155 161 156 1: /* Bad stub call */
+6 -1
arch/arm64/kvm/pmu.c
··· 159 159 } 160 160 161 161 /* 162 - * On VHE ensure that only guest events have EL0 counting enabled 162 + * On VHE ensure that only guest events have EL0 counting enabled. 163 + * This is called from both vcpu_{load,put} and the sysreg handling. 164 + * Since the latter is preemptible, special care must be taken to 165 + * disable preemption. 163 166 */ 164 167 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) 165 168 { ··· 172 169 if (!has_vhe()) 173 170 return; 174 171 172 + preempt_disable(); 175 173 host = this_cpu_ptr(&kvm_host_data); 176 174 events_guest = host->pmu_events.events_guest; 177 175 events_host = host->pmu_events.events_host; 178 176 179 177 kvm_vcpu_pmu_enable_el0(events_guest); 180 178 kvm_vcpu_pmu_disable_el0(events_host); 179 + preempt_enable(); 181 180 } 182 181 183 182 /*
+12 -3
arch/arm64/kvm/pvtime.c
··· 3 3 4 4 #include <linux/arm-smccc.h> 5 5 #include <linux/kvm_host.h> 6 + #include <linux/sched/stat.h> 6 7 7 8 #include <asm/kvm_mmu.h> 8 9 #include <asm/pvclock-abi.h> ··· 74 73 return base; 75 74 } 76 75 76 + static bool kvm_arm_pvtime_supported(void) 77 + { 78 + return !!sched_info_on(); 79 + } 80 + 77 81 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 78 82 struct kvm_device_attr *attr) 79 83 { ··· 88 82 int ret = 0; 89 83 int idx; 90 84 91 - if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA) 85 + if (!kvm_arm_pvtime_supported() || 86 + attr->attr != KVM_ARM_VCPU_PVTIME_IPA) 92 87 return -ENXIO; 93 88 94 89 if (get_user(ipa, user)) ··· 117 110 u64 __user *user = (u64 __user *)attr->addr; 118 111 u64 ipa; 119 112 120 - if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA) 113 + if (!kvm_arm_pvtime_supported() || 114 + attr->attr != KVM_ARM_VCPU_PVTIME_IPA) 121 115 return -ENXIO; 122 116 123 117 ipa = vcpu->arch.steal.base; ··· 133 125 { 134 126 switch (attr->attr) { 135 127 case KVM_ARM_VCPU_PVTIME_IPA: 136 - return 0; 128 + if (kvm_arm_pvtime_supported()) 129 + return 0; 137 130 } 138 131 return -ENXIO; 139 132 }
+7 -3
arch/arm64/kvm/reset.c
··· 245 245 */ 246 246 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 247 247 { 248 - int ret = -EINVAL; 248 + int ret; 249 249 bool loaded; 250 250 u32 pstate; 251 251 ··· 269 269 270 270 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 271 271 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { 272 - if (kvm_vcpu_enable_ptrauth(vcpu)) 272 + if (kvm_vcpu_enable_ptrauth(vcpu)) { 273 + ret = -EINVAL; 273 274 goto out; 275 + } 274 276 } 275 277 276 278 switch (vcpu->arch.target) { 277 279 default: 278 280 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 279 - if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) 281 + if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) { 282 + ret = -EINVAL; 280 283 goto out; 284 + } 281 285 pstate = VCPU_RESET_PSTATE_SVC; 282 286 } else { 283 287 pstate = VCPU_RESET_PSTATE_EL1;
+8
arch/arm64/kvm/vgic/vgic-v4.c
··· 90 90 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) 91 91 disable_irq_nosync(irq); 92 92 93 + /* 94 + * The v4.1 doorbell can fire concurrently with the vPE being 95 + * made non-resident. Ensure we only update pending_last 96 + * *after* the non-residency sequence has completed. 97 + */ 98 + raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); 93 99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; 100 + raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); 101 + 94 102 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 95 103 kvm_vcpu_kick(vcpu); 96 104
+2 -1
arch/m68k/kernel/setup_no.c
··· 138 138 pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", 139 139 __bss_stop, memory_start, memory_start, memory_end); 140 140 141 - memblock_add(memory_start, memory_end - memory_start); 141 + memblock_add(_rambase, memory_end - _rambase); 142 + memblock_reserve(_rambase, memory_start - _rambase); 142 143 143 144 /* Keep a copy of command line */ 144 145 *cmdline_p = &command_line[0];
+1 -1
arch/m68k/mm/mcfmmu.c
··· 174 174 m68k_memory[0].addr = _rambase; 175 175 m68k_memory[0].size = _ramend - _rambase; 176 176 177 - memblock_add(m68k_memory[0].addr, m68k_memory[0].size); 177 + memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0); 178 178 179 179 /* compute total pages in system */ 180 180 num_pages = PFN_DOWN(_ramend - _rambase);
+1 -1
arch/mips/boot/dts/ingenic/gcw0.dts
··· 92 92 "MIC1N", "Built-in Mic"; 93 93 simple-audio-card,pin-switches = "Speaker", "Headphones"; 94 94 95 - simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_HIGH>; 95 + simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_LOW>; 96 96 simple-audio-card,aux-devs = <&speaker_amp>, <&headphones_amp>; 97 97 98 98 simple-audio-card,bitclock-master = <&dai_codec>;
+3 -5
arch/mips/include/asm/unroll.h
··· 19 19 \ 20 20 /* \ 21 21 * We can't unroll if the number of iterations isn't \ 22 - * compile-time constant. Unfortunately GCC versions \ 23 - * up until 4.6 tend to miss obvious constants & cause \ 22 + * compile-time constant. Unfortunately clang versions \ 23 + * up until 8.0 tend to miss obvious constants & cause \ 24 24 * this check to fail, even though they go on to \ 25 25 * generate reasonable code for the switch statement, \ 26 26 * so we skip the sanity check for those compilers. \ 27 27 */ \ 28 - BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 || \ 29 - CONFIG_CLANG_VERSION >= 80000) && \ 30 - !__builtin_constant_p(times)); \ 28 + BUILD_BUG_ON(!__builtin_constant_p(times)); \ 31 29 \ 32 30 switch (times) { \ 33 31 case 32: fn(__VA_ARGS__); /* fall through */ \
+6 -3
arch/mips/kernel/traps.c
··· 723 723 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 724 724 725 725 /* Do not emulate on unsupported core models. */ 726 - if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) 726 + preempt_disable(); 727 + if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) { 728 + preempt_enable(); 727 729 return -1; 728 - 730 + } 729 731 regs->regs[rd] = loongson3_cpucfg_read_synthesized( 730 732 &current_cpu_data, sel); 731 - 733 + preempt_enable(); 732 734 return 0; 733 735 } 734 736 ··· 2171 2169 2172 2170 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 2173 2171 status_set); 2172 + back_to_back_c0_hazard(); 2174 2173 } 2175 2174 2176 2175 unsigned int hwrena;
+4
arch/mips/kvm/emulate.c
··· 1722 1722 vcpu->arch.gprs[rt], *(u32 *)data); 1723 1723 break; 1724 1724 1725 + #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 1725 1726 case sdl_op: 1726 1727 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1727 1728 vcpu->arch.host_cp0_badvaddr) & (~0x7); ··· 1816 1815 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, 1817 1816 vcpu->arch.gprs[rt], *(u64 *)data); 1818 1817 break; 1818 + #endif 1819 1819 1820 1820 #ifdef CONFIG_CPU_LOONGSON64 1821 1821 case sdc2_op: ··· 2004 2002 } 2005 2003 break; 2006 2004 2005 + #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 2007 2006 case ldl_op: 2008 2007 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 2009 2008 vcpu->arch.host_cp0_badvaddr) & (~0x7); ··· 2076 2073 break; 2077 2074 } 2078 2075 break; 2076 + #endif 2079 2077 2080 2078 #ifdef CONFIG_CPU_LOONGSON64 2081 2079 case ldc2_op:
+4 -4
arch/mips/lantiq/xway/sysctrl.c
··· 514 514 clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | 515 515 PMU_PPE_DP | PMU_PPE_TC); 516 516 clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); 517 - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); 518 - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); 517 + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); 518 + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); 519 519 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); 520 520 clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); 521 521 clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); ··· 538 538 PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | 539 539 PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | 540 540 PMU_PPE_QSB | PMU_PPE_TOP); 541 - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); 542 - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); 541 + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); 542 + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); 543 543 clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); 544 544 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); 545 545 clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+3 -2
arch/mips/pci/pci-xtalk-bridge.c
··· 627 627 return -ENOMEM; 628 628 domain = irq_domain_create_hierarchy(parent, 0, 8, fn, 629 629 &bridge_domain_ops, NULL); 630 - irq_domain_free_fwnode(fn); 631 - if (!domain) 630 + if (!domain) { 631 + irq_domain_free_fwnode(fn); 632 632 return -ENOMEM; 633 + } 633 634 634 635 pci_set_flags(PCI_PROBE_ONLY); 635 636
+2
arch/parisc/include/asm/atomic.h
··· 212 212 _atomic_spin_unlock_irqrestore(v, flags); 213 213 } 214 214 215 + #define atomic64_set_release(v, i) atomic64_set((v), (i)) 216 + 215 217 static __inline__ s64 216 218 atomic64_read(const atomic64_t *v) 217 219 {
+2
arch/parisc/include/asm/cmpxchg.h
··· 60 60 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, 61 61 unsigned int new_); 62 62 extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); 63 + extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); 63 64 64 65 /* don't worry...optimizer will get rid of most of this */ 65 66 static inline unsigned long ··· 72 71 #endif 73 72 case 4: return __cmpxchg_u32((unsigned int *)ptr, 74 73 (unsigned int)old, (unsigned int)new_); 74 + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); 75 75 } 76 76 __cmpxchg_called_with_bad_pointer(); 77 77 return old;
+12
arch/parisc/lib/bitops.c
··· 79 79 _atomic_spin_unlock_irqrestore(ptr, flags); 80 80 return (unsigned long)prev; 81 81 } 82 + 83 + u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) 84 + { 85 + unsigned long flags; 86 + u8 prev; 87 + 88 + _atomic_spin_lock_irqsave(ptr, flags); 89 + if ((prev = *ptr) == old) 90 + *ptr = new; 91 + _atomic_spin_unlock_irqrestore(ptr, flags); 92 + return prev; 93 + }
+2
arch/powerpc/include/asm/icswx.h
··· 77 77 #define CSB_CC_CHAIN (37) 78 78 #define CSB_CC_SEQUENCE (38) 79 79 #define CSB_CC_HW (39) 80 + /* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */ 81 + #define CSB_CC_FAULT_ADDRESS (250) 80 82 81 83 #define CSB_SIZE (0x10) 82 84 #define CSB_ALIGN CSB_SIZE
+1 -1
arch/powerpc/kernel/exceptions-64s.S
··· 2551 2551 INT_DEFINE_BEGIN(denorm_exception) 2552 2552 IVEC=0x1500 2553 2553 IHSRR=1 2554 - IBRANCH_COMMON=0 2554 + IBRANCH_TO_COMMON=0 2555 2555 IKVM_REAL=1 2556 2556 INT_DEFINE_END(denorm_exception) 2557 2557
+1 -1
arch/powerpc/kernel/paca.c
··· 87 87 * This is very early in boot, so no harm done if the kernel crashes at 88 88 * this point. 89 89 */ 90 - BUG_ON(shared_lppaca_size >= shared_lppaca_total_size); 90 + BUG_ON(shared_lppaca_size > shared_lppaca_total_size); 91 91 92 92 return ptr; 93 93 }
+7 -8
arch/powerpc/mm/book3s64/pkeys.c
··· 353 353 int pkey_shift; 354 354 u64 amr; 355 355 356 - if (!is_pkey_enabled(pkey)) 357 - return true; 358 - 359 356 pkey_shift = pkeyshift(pkey); 360 - if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift))) 361 - return true; 357 + if (execute) 358 + return !(read_iamr() & (IAMR_EX_BIT << pkey_shift)); 362 359 363 - amr = read_amr(); /* Delay reading amr until absolutely needed */ 364 - return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) || 365 - (write && !(amr & (AMR_WR_BIT << pkey_shift)))); 360 + amr = read_amr(); 361 + if (write) 362 + return !(amr & (AMR_WR_BIT << pkey_shift)); 363 + 364 + return !(amr & (AMR_RD_BIT << pkey_shift)); 366 365 } 367 366 368 367 bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
+1 -1
arch/powerpc/platforms/powernv/vas-fault.c
··· 79 79 csb_addr = (void __user *)be64_to_cpu(crb->csb_addr); 80 80 81 81 memset(&csb, 0, sizeof(csb)); 82 - csb.cc = CSB_CC_TRANSLATION; 82 + csb.cc = CSB_CC_FAULT_ADDRESS; 83 83 csb.ce = CSB_CE_TERMINATION; 84 84 csb.cs = 0; 85 85 csb.count = 0;
+2
arch/riscv/Kconfig
··· 23 23 select ARCH_HAS_SET_DIRECT_MAP 24 24 select ARCH_HAS_SET_MEMORY 25 25 select ARCH_HAS_STRICT_KERNEL_RWX if MMU 26 + select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX 27 + select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT 26 28 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU 27 29 select ARCH_WANT_FRAME_POINTERS 28 30 select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
+9 -1
arch/riscv/include/asm/barrier.h
··· 58 58 * The AQ/RL pair provides a RCpc critical section, but there's not really any 59 59 * way we can take advantage of that here because the ordering is only enforced 60 60 * on that one lock. Thus, we're just doing a full fence. 61 + * 62 + * Since we allow writeX to be called from preemptive regions we need at least 63 + * an "o" in the predecessor set to ensure device writes are visible before the 64 + * task is marked as available for scheduling on a new hart. While I don't see 65 + * any concrete reason we need a full IO fence, it seems safer to just upgrade 66 + * this in order to avoid any IO crossing a scheduling boundary. In both 67 + * instances the scheduler pairs this with an mb(), so nothing is necessary on 68 + * the new hart. 61 69 */ 62 - #define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) 70 + #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) 63 71 64 72 #include <asm-generic/barrier.h> 65 73
+1 -2
arch/riscv/include/asm/gdb_xml.h
··· 3 3 #ifndef __ASM_GDB_XML_H_ 4 4 #define __ASM_GDB_XML_H_ 5 5 6 - #define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature 7 - static const char riscv_gdb_stub_feature[64] = 6 + const char riscv_gdb_stub_feature[64] = 8 7 "PacketSize=800;qXfer:features:read+;"; 9 8 10 9 static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
+3 -2
arch/riscv/include/asm/kgdb.h
··· 19 19 20 20 #ifndef __ASSEMBLY__ 21 21 22 - extern int kgdb_has_hit_break(unsigned long addr); 23 22 extern unsigned long kgdb_compiled_break; 24 23 25 24 static inline void arch_kgdb_breakpoint(void) ··· 105 106 #define DBG_REG_BADADDR_OFF 34 106 107 #define DBG_REG_CAUSE_OFF 35 107 108 108 - #include <asm/gdb_xml.h> 109 + extern const char riscv_gdb_stub_feature[64]; 110 + 111 + #define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature 109 112 110 113 #endif 111 114 #endif
+4
arch/riscv/include/asm/thread_info.h
··· 12 12 #include <linux/const.h> 13 13 14 14 /* thread information allocation */ 15 + #ifdef CONFIG_64BIT 16 + #define THREAD_SIZE_ORDER (2) 17 + #else 15 18 #define THREAD_SIZE_ORDER (1) 19 + #endif 16 20 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 17 21 18 22 #ifndef __ASSEMBLY__
+5 -5
arch/riscv/kernel/kgdb.c
··· 44 44 DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) 45 45 DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) 46 46 47 - int decode_register_index(unsigned long opcode, int offset) 47 + static int decode_register_index(unsigned long opcode, int offset) 48 48 { 49 49 return (opcode >> offset) & 0x1F; 50 50 } 51 51 52 - int decode_register_index_short(unsigned long opcode, int offset) 52 + static int decode_register_index_short(unsigned long opcode, int offset) 53 53 { 54 54 return ((opcode >> offset) & 0x7) + 8; 55 55 } 56 56 57 57 /* Calculate the new address for after a step */ 58 - int get_step_address(struct pt_regs *regs, unsigned long *next_addr) 58 + static int get_step_address(struct pt_regs *regs, unsigned long *next_addr) 59 59 { 60 60 unsigned long pc = regs->epc; 61 61 unsigned long *regs_ptr = (unsigned long *)regs; ··· 136 136 return 0; 137 137 } 138 138 139 - int do_single_step(struct pt_regs *regs) 139 + static int do_single_step(struct pt_regs *regs) 140 140 { 141 141 /* Determine where the target instruction will send us to */ 142 142 unsigned long addr = 0; ··· 320 320 return err; 321 321 } 322 322 323 - int kgdb_riscv_kgdbbreak(unsigned long addr) 323 + static int kgdb_riscv_kgdbbreak(unsigned long addr) 324 324 { 325 325 if (stepped_address == addr) 326 326 return KGDB_SW_SINGLE_STEP;
+47 -23
arch/riscv/mm/init.c
··· 95 95 #ifdef CONFIG_BLK_DEV_INITRD 96 96 static void __init setup_initrd(void) 97 97 { 98 + phys_addr_t start; 98 99 unsigned long size; 99 100 100 - if (initrd_start >= initrd_end) { 101 - pr_info("initrd not found or empty"); 102 - goto disable; 103 - } 104 - if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { 105 - pr_err("initrd extends beyond end of memory"); 101 + /* Ignore the virtul address computed during device tree parsing */ 102 + initrd_start = initrd_end = 0; 103 + 104 + if (!phys_initrd_size) 105 + return; 106 + /* 107 + * Round the memory region to page boundaries as per free_initrd_mem() 108 + * This allows us to detect whether the pages overlapping the initrd 109 + * are in use, but more importantly, reserves the entire set of pages 110 + * as we don't want these pages allocated for other purposes. 111 + */ 112 + start = round_down(phys_initrd_start, PAGE_SIZE); 113 + size = phys_initrd_size + (phys_initrd_start - start); 114 + size = round_up(size, PAGE_SIZE); 115 + 116 + if (!memblock_is_region_memory(start, size)) { 117 + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", 118 + (u64)start, size); 106 119 goto disable; 107 120 } 108 121 109 - size = initrd_end - initrd_start; 110 - memblock_reserve(__pa_symbol(initrd_start), size); 122 + if (memblock_is_region_reserved(start, size)) { 123 + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", 124 + (u64)start, size); 125 + goto disable; 126 + } 127 + 128 + memblock_reserve(start, size); 129 + /* Now convert initrd to virtual addresses */ 130 + initrd_start = (unsigned long)__va(phys_initrd_start); 131 + initrd_end = initrd_start + phys_initrd_size; 111 132 initrd_below_start_ok = 1; 112 133 113 134 pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", ··· 147 126 { 148 127 struct memblock_region *reg; 149 128 phys_addr_t mem_size = 0; 129 + phys_addr_t total_mem = 0; 130 + phys_addr_t mem_start, end = 0; 150 131 phys_addr_t vmlinux_end = __pa_symbol(&_end); 151 132 phys_addr_t vmlinux_start = __pa_symbol(&_start); 152 133 153 134 /* Find the memory region containing the kernel */ 154 135 for_each_memblock(memory, reg) { 155 - phys_addr_t end = reg->base + reg->size; 156 - 157 - if (reg->base <= vmlinux_start && vmlinux_end <= end) { 158 - mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); 159 - 160 - /* 161 - * Remove memblock from the end of usable area to the 162 - * end of region 163 - */ 164 - if (reg->base + mem_size < end) 165 - memblock_remove(reg->base + mem_size, 166 - end - reg->base - mem_size); 167 - } 136 + end = reg->base + reg->size; 137 + if (!total_mem) 138 + mem_start = reg->base; 139 + if (reg->base <= vmlinux_start && vmlinux_end <= end) 140 + BUG_ON(reg->size == 0); 141 + total_mem = total_mem + reg->size; 168 142 } 169 - BUG_ON(mem_size == 0); 143 + 144 + /* 145 + * Remove memblock from the end of usable area to the 146 + * end of region 147 + */ 148 + mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET); 149 + if (mem_start + mem_size < end) 150 + memblock_remove(mem_start + mem_size, 151 + end - mem_start - mem_size); 170 152 171 153 /* Reserve from the start of the kernel to the end of the kernel */ 172 154 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); 173 155 174 - set_max_mapnr(PFN_DOWN(mem_size)); 175 156 max_pfn = PFN_DOWN(memblock_end_of_DRAM()); 176 157 max_low_pfn = max_pfn; 158 + set_max_mapnr(max_low_pfn); 177 159 178 160 #ifdef CONFIG_BLK_DEV_INITRD 179 161 setup_initrd();
+2 -2
arch/riscv/mm/kasan_init.c
··· 44 44 (__pa(((uintptr_t) kasan_early_shadow_pmd))), 45 45 __pgprot(_PAGE_TABLE))); 46 46 47 - flush_tlb_all(); 47 + local_flush_tlb_all(); 48 48 } 49 49 50 50 static void __init populate(void *start, void *end) ··· 79 79 pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), 80 80 __pgprot(_PAGE_TABLE))); 81 81 82 - flush_tlb_all(); 82 + local_flush_tlb_all(); 83 83 memset(start, 0, end - start); 84 84 } 85 85
+35 -9
arch/s390/configs/debug_defconfig
··· 1 1 CONFIG_SYSVIPC=y 2 2 CONFIG_POSIX_MQUEUE=y 3 + CONFIG_WATCH_QUEUE=y 3 4 CONFIG_AUDIT=y 4 5 CONFIG_NO_HZ_IDLE=y 5 6 CONFIG_HIGH_RES_TIMERS=y ··· 15 14 CONFIG_IKCONFIG_PROC=y 16 15 CONFIG_NUMA_BALANCING=y 17 16 CONFIG_MEMCG=y 18 - CONFIG_MEMCG_SWAP=y 19 17 CONFIG_BLK_CGROUP=y 20 18 CONFIG_CFS_BANDWIDTH=y 21 19 CONFIG_RT_GROUP_SCHED=y ··· 31 31 CONFIG_USER_NS=y 32 32 CONFIG_CHECKPOINT_RESTORE=y 33 33 CONFIG_SCHED_AUTOGROUP=y 34 - CONFIG_BLK_DEV_INITRD=y 35 34 CONFIG_EXPERT=y 36 35 # CONFIG_SYSFS_SYSCALL is not set 36 + CONFIG_BPF_LSM=y 37 37 CONFIG_BPF_SYSCALL=y 38 38 CONFIG_USERFAULTFD=y 39 39 # CONFIG_COMPAT_BRK is not set ··· 51 51 CONFIG_VFIO_CCW=m 52 52 CONFIG_VFIO_AP=m 53 53 CONFIG_CRASH_DUMP=y 54 - CONFIG_HIBERNATION=y 55 - CONFIG_PM_DEBUG=y 56 54 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y 57 55 CONFIG_CMM=m 58 56 CONFIG_APPLDATA_BASE=y 59 57 CONFIG_KVM=m 60 - CONFIG_VHOST_NET=m 61 - CONFIG_VHOST_VSOCK=m 58 + CONFIG_S390_UNWIND_SELFTEST=y 62 59 CONFIG_OPROFILE=m 63 60 CONFIG_KPROBES=y 64 61 CONFIG_JUMP_LABEL=y ··· 74 77 CONFIG_BLK_WBT=y 75 78 CONFIG_BLK_CGROUP_IOLATENCY=y 76 79 CONFIG_BLK_CGROUP_IOCOST=y 80 + CONFIG_BLK_INLINE_ENCRYPTION=y 81 + CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y 77 82 CONFIG_PARTITION_ADVANCED=y 78 83 CONFIG_IBM_PARTITION=y 79 84 CONFIG_BSD_DISKLABEL=y ··· 95 96 CONFIG_CMA_DEBUGFS=y 96 97 CONFIG_MEM_SOFT_DIRTY=y 97 98 CONFIG_ZSWAP=y 98 - CONFIG_ZBUD=m 99 99 CONFIG_ZSMALLOC=m 100 100 CONFIG_ZSMALLOC_STAT=y 101 101 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y ··· 128 130 CONFIG_NET_IPVTI=m 129 131 CONFIG_INET_AH=m 130 132 CONFIG_INET_ESP=m 133 + CONFIG_INET_ESPINTCP=y 131 134 CONFIG_INET_IPCOMP=m 132 135 CONFIG_INET_DIAG=m 133 136 CONFIG_INET_UDP_DIAG=m ··· 143 144 CONFIG_IPV6_ROUTER_PREF=y 144 145 CONFIG_INET6_AH=m 145 146 CONFIG_INET6_ESP=m 147 + CONFIG_INET6_ESPINTCP=y 146 148 CONFIG_INET6_IPCOMP=m 147 149 CONFIG_IPV6_MIP6=m 148 150 CONFIG_IPV6_VTI=m ··· 151 151 CONFIG_IPV6_GRE=m 152 152 CONFIG_IPV6_MULTIPLE_TABLES=y 153 153 CONFIG_IPV6_SUBTREES=y 154 + CONFIG_IPV6_RPL_LWTUNNEL=y 155 + CONFIG_MPTCP=y 154 156 CONFIG_NETFILTER=y 157 + CONFIG_BRIDGE_NETFILTER=m 155 158 CONFIG_NF_CONNTRACK=m 156 159 CONFIG_NF_CONNTRACK_SECMARK=y 157 160 CONFIG_NF_CONNTRACK_EVENTS=y ··· 320 317 CONFIG_L2TP_IP=m 321 318 CONFIG_L2TP_ETH=m 322 319 CONFIG_BRIDGE=m 320 + CONFIG_BRIDGE_MRP=y 323 321 CONFIG_VLAN_8021Q=m 324 322 CONFIG_VLAN_8021Q_GVRP=y 325 323 CONFIG_NET_SCHED=y ··· 345 341 CONFIG_NET_SCH_FQ_CODEL=m 346 342 CONFIG_NET_SCH_INGRESS=m 347 343 CONFIG_NET_SCH_PLUG=m 344 + CONFIG_NET_SCH_ETS=m 348 345 CONFIG_NET_CLS_BASIC=m 349 346 CONFIG_NET_CLS_TCINDEX=m 350 347 CONFIG_NET_CLS_ROUTE4=m ··· 369 364 CONFIG_NET_ACT_SIMP=m 370 365 CONFIG_NET_ACT_SKBEDIT=m 371 366 CONFIG_NET_ACT_CSUM=m 367 + CONFIG_NET_ACT_GATE=m 372 368 CONFIG_DNS_RESOLVER=y 373 369 CONFIG_OPENVSWITCH=m 374 370 CONFIG_VSOCKETS=m ··· 380 374 CONFIG_NET_PKTGEN=m 381 375 # CONFIG_NET_DROP_MONITOR is not set 382 376 CONFIG_PCI=y 377 + # CONFIG_PCIEASPM is not set 383 378 CONFIG_PCI_DEBUG=y 384 379 CONFIG_HOTPLUG_PCI=y 385 380 CONFIG_HOTPLUG_PCI_S390=y ··· 442 435 CONFIG_DM_MULTIPATH=m 443 436 CONFIG_DM_MULTIPATH_QL=m 444 437 CONFIG_DM_MULTIPATH_ST=m 438 + CONFIG_DM_MULTIPATH_HST=m 445 439 CONFIG_DM_DELAY=m 446 440 CONFIG_DM_UEVENT=y 447 441 CONFIG_DM_FLAKEY=m ··· 456 448 CONFIG_IFB=m 457 449 CONFIG_MACVLAN=m 458 450 CONFIG_MACVTAP=m 451 + CONFIG_VXLAN=m 452 + CONFIG_BAREUDP=m 459 453 CONFIG_TUN=m 460 454 CONFIG_VETH=m 461 455 CONFIG_VIRTIO_NET=m ··· 491 481 CONFIG_MLX4_EN=m 492 482 CONFIG_MLX5_CORE=m 493 483 CONFIG_MLX5_CORE_EN=y 494 - # CONFIG_MLXFW is not set 495 484 # CONFIG_NET_VENDOR_MICREL is not set 496 485 # CONFIG_NET_VENDOR_MICROCHIP is not set 497 486 # CONFIG_NET_VENDOR_MICROSEMI is not set ··· 523 514 # CONFIG_NET_VENDOR_TI is not set 524 515 # CONFIG_NET_VENDOR_VIA is not set 525 516 # CONFIG_NET_VENDOR_WIZNET is not set 517 + # CONFIG_NET_VENDOR_XILINX is not set 526 518 CONFIG_PPP=m 527 519 CONFIG_PPP_BSDCOMP=m 528 520 CONFIG_PPP_DEFLATE=m ··· 571 561 CONFIG_VIRTIO_PCI=m 572 562 CONFIG_VIRTIO_BALLOON=m 573 563 CONFIG_VIRTIO_INPUT=y 564 + CONFIG_VHOST_NET=m 565 + CONFIG_VHOST_VSOCK=m 574 566 CONFIG_S390_CCW_IOMMU=y 575 567 CONFIG_S390_AP_IOMMU=y 576 568 CONFIG_EXT4_FS=y ··· 620 608 CONFIG_UDF_FS=m 621 609 CONFIG_MSDOS_FS=m 622 610 CONFIG_VFAT_FS=m 611 + CONFIG_EXFAT_FS=m 623 612 CONFIG_NTFS_FS=m 624 613 CONFIG_NTFS_RW=y 625 614 CONFIG_PROC_KCORE=y ··· 663 650 CONFIG_DLM=m 664 651 CONFIG_UNICODE=y 665 652 CONFIG_PERSISTENT_KEYRINGS=y 666 - CONFIG_BIG_KEYS=y 667 653 CONFIG_ENCRYPTED_KEYS=m 654 + CONFIG_KEY_NOTIFICATIONS=y 668 655 CONFIG_SECURITY=y 669 656 CONFIG_SECURITY_NETWORK=y 670 657 CONFIG_FORTIFY_SOURCE=y ··· 688 675 CONFIG_CRYPTO_DH=m 689 676 CONFIG_CRYPTO_ECDH=m 690 677 CONFIG_CRYPTO_ECRDSA=m 678 + CONFIG_CRYPTO_CURVE25519=m 679 + CONFIG_CRYPTO_GCM=y 691 680 CONFIG_CRYPTO_CHACHA20POLY1305=m 692 681 CONFIG_CRYPTO_AEGIS128=m 682 + CONFIG_CRYPTO_SEQIV=y 693 683 CONFIG_CRYPTO_CFB=m 694 684 CONFIG_CRYPTO_LRW=m 695 685 CONFIG_CRYPTO_PCBC=m ··· 701 685 CONFIG_CRYPTO_XCBC=m 702 686 CONFIG_CRYPTO_VMAC=m 703 687 CONFIG_CRYPTO_CRC32=m 688 + CONFIG_CRYPTO_BLAKE2S=m 704 689 CONFIG_CRYPTO_MICHAEL_MIC=m 705 690 CONFIG_CRYPTO_RMD128=m 706 691 CONFIG_CRYPTO_RMD160=m ··· 718 701 CONFIG_CRYPTO_CAMELLIA=m 719 702 CONFIG_CRYPTO_CAST5=m 720 703 CONFIG_CRYPTO_CAST6=m 704 + CONFIG_CRYPTO_DES=m 721 705 CONFIG_CRYPTO_FCRYPT=m 722 706 CONFIG_CRYPTO_KHAZAD=m 723 707 CONFIG_CRYPTO_SALSA20=m ··· 737 719 CONFIG_CRYPTO_USER_API_RNG=m 738 720 CONFIG_CRYPTO_USER_API_AEAD=m 739 721 CONFIG_CRYPTO_STATS=y 722 + CONFIG_CRYPTO_LIB_BLAKE2S=m 723 + CONFIG_CRYPTO_LIB_CURVE25519=m 724 + CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m 740 725 CONFIG_ZCRYPT=m 741 726 CONFIG_PKEY=m 742 727 CONFIG_CRYPTO_PAES_S390=m ··· 795 774 CONFIG_PANIC_ON_OOPS=y 796 775 CONFIG_DETECT_HUNG_TASK=y 797 776 CONFIG_WQ_WATCHDOG=y 777 + CONFIG_TEST_LOCKUP=m 798 778 CONFIG_DEBUG_TIMEKEEPING=y 799 779 CONFIG_PROVE_LOCKING=y 800 780 CONFIG_LOCK_STAT=y ··· 808 786 CONFIG_DEBUG_CREDENTIALS=y 809 787 CONFIG_RCU_TORTURE_TEST=m 810 788 CONFIG_RCU_CPU_STALL_TIMEOUT=300 789 + # CONFIG_RCU_TRACE is not set 811 790 CONFIG_LATENCYTOP=y 791 + CONFIG_BOOTTIME_TRACING=y 812 792 CONFIG_FUNCTION_PROFILER=y 813 793 CONFIG_STACK_TRACER=y 814 794 CONFIG_IRQSOFF_TRACER=y ··· 832 808 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y 833 809 CONFIG_LKDTM=m 834 810 CONFIG_TEST_LIST_SORT=y 811 + CONFIG_TEST_MIN_HEAP=y 835 812 CONFIG_TEST_SORT=y 836 813 CONFIG_KPROBES_SANITY_TEST=y 837 814 CONFIG_RBTREE_TEST=y 838 815 CONFIG_INTERVAL_TREE_TEST=m 839 816 CONFIG_PERCPU_TEST=m 840 817 CONFIG_ATOMIC64_SELFTEST=y 818 + CONFIG_TEST_BITOPS=m 841 819 CONFIG_TEST_BPF=m
+33 -10
arch/s390/configs/defconfig
··· 1 1 CONFIG_SYSVIPC=y 2 2 CONFIG_POSIX_MQUEUE=y 3 + CONFIG_WATCH_QUEUE=y 3 4 CONFIG_AUDIT=y 4 5 CONFIG_NO_HZ_IDLE=y 5 6 CONFIG_HIGH_RES_TIMERS=y ··· 14 13 CONFIG_IKCONFIG_PROC=y 15 14 CONFIG_NUMA_BALANCING=y 16 15 CONFIG_MEMCG=y 17 - CONFIG_MEMCG_SWAP=y 18 16 CONFIG_BLK_CGROUP=y 19 17 CONFIG_CFS_BANDWIDTH=y 20 18 CONFIG_RT_GROUP_SCHED=y ··· 30 30 CONFIG_USER_NS=y 31 31 CONFIG_CHECKPOINT_RESTORE=y 32 32 CONFIG_SCHED_AUTOGROUP=y 33 - CONFIG_BLK_DEV_INITRD=y 34 33 CONFIG_EXPERT=y 35 34 # CONFIG_SYSFS_SYSCALL is not set 35 + CONFIG_BPF_LSM=y 36 36 CONFIG_BPF_SYSCALL=y 37 37 CONFIG_USERFAULTFD=y 38 38 # CONFIG_COMPAT_BRK is not set ··· 41 41 CONFIG_TUNE_ZEC12=y 42 42 CONFIG_NR_CPUS=512 43 43 CONFIG_NUMA=y 44 - # CONFIG_NUMA_EMU is not set 45 44 CONFIG_HZ_100=y 46 45 CONFIG_KEXEC_FILE=y 47 46 CONFIG_KEXEC_SIG=y ··· 50 51 CONFIG_VFIO_CCW=m 51 52 CONFIG_VFIO_AP=m 52 53 CONFIG_CRASH_DUMP=y 53 - CONFIG_HIBERNATION=y 54 - CONFIG_PM_DEBUG=y 55 54 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y 56 55 CONFIG_CMM=m 57 56 CONFIG_APPLDATA_BASE=y 58 57 CONFIG_KVM=m 59 - CONFIG_VHOST_NET=m 60 - CONFIG_VHOST_VSOCK=m 58 + CONFIG_S390_UNWIND_SELFTEST=m 61 59 CONFIG_OPROFILE=m 62 60 CONFIG_KPROBES=y 63 61 CONFIG_JUMP_LABEL=y ··· 70 74 CONFIG_BLK_WBT=y 71 75 CONFIG_BLK_CGROUP_IOLATENCY=y 72 76 CONFIG_BLK_CGROUP_IOCOST=y 77 + CONFIG_BLK_INLINE_ENCRYPTION=y 78 + CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y 73 79 CONFIG_PARTITION_ADVANCED=y 74 80 CONFIG_IBM_PARTITION=y 75 81 CONFIG_BSD_DISKLABEL=y ··· 89 91 CONFIG_FRONTSWAP=y 90 92 CONFIG_MEM_SOFT_DIRTY=y 91 93 CONFIG_ZSWAP=y 92 - CONFIG_ZBUD=m 93 94 CONFIG_ZSMALLOC=m 94 95 CONFIG_ZSMALLOC_STAT=y 95 96 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y ··· 122 125 CONFIG_NET_IPVTI=m 123 126 CONFIG_INET_AH=m 124 127 CONFIG_INET_ESP=m 128 + CONFIG_INET_ESPINTCP=y 125 129 CONFIG_INET_IPCOMP=m 126 130 CONFIG_INET_DIAG=m 127 131 CONFIG_INET_UDP_DIAG=m ··· 137 139 CONFIG_IPV6_ROUTER_PREF=y 138 140 CONFIG_INET6_AH=m 139 141 CONFIG_INET6_ESP=m 142 + CONFIG_INET6_ESPINTCP=y 140 143 CONFIG_INET6_IPCOMP=m 141 144 CONFIG_IPV6_MIP6=m 142 145 CONFIG_IPV6_VTI=m ··· 145 146 CONFIG_IPV6_GRE=m 146 147 CONFIG_IPV6_MULTIPLE_TABLES=y 147 148 CONFIG_IPV6_SUBTREES=y 149 + CONFIG_IPV6_RPL_LWTUNNEL=y 150 + CONFIG_MPTCP=y 148 151 CONFIG_NETFILTER=y 152 + CONFIG_BRIDGE_NETFILTER=m 149 153 CONFIG_NF_CONNTRACK=m 150 154 CONFIG_NF_CONNTRACK_SECMARK=y 151 155 CONFIG_NF_CONNTRACK_EVENTS=y ··· 313 311 CONFIG_L2TP_IP=m 314 312 CONFIG_L2TP_ETH=m 315 313 CONFIG_BRIDGE=m 314 + CONFIG_BRIDGE_MRP=y 316 315 CONFIG_VLAN_8021Q=m 317 316 CONFIG_VLAN_8021Q_GVRP=y 318 317 CONFIG_NET_SCHED=y ··· 338 335 CONFIG_NET_SCH_FQ_CODEL=m 339 336 CONFIG_NET_SCH_INGRESS=m 340 337 CONFIG_NET_SCH_PLUG=m 338 + CONFIG_NET_SCH_ETS=m 341 339 CONFIG_NET_CLS_BASIC=m 342 340 CONFIG_NET_CLS_TCINDEX=m 343 341 CONFIG_NET_CLS_ROUTE4=m ··· 362 358 CONFIG_NET_ACT_SIMP=m 363 359 CONFIG_NET_ACT_SKBEDIT=m 364 360 CONFIG_NET_ACT_CSUM=m 361 + CONFIG_NET_ACT_GATE=m 365 362 CONFIG_DNS_RESOLVER=y 366 363 CONFIG_OPENVSWITCH=m 367 364 CONFIG_VSOCKETS=m ··· 373 368 CONFIG_NET_PKTGEN=m 374 369 # CONFIG_NET_DROP_MONITOR is not set 375 370 CONFIG_PCI=y 371 + # CONFIG_PCIEASPM is not set 376 372 CONFIG_HOTPLUG_PCI=y 377 373 CONFIG_HOTPLUG_PCI_S390=y 378 374 CONFIG_UEVENT_HELPER=y ··· 436 430 CONFIG_DM_MULTIPATH=m 437 431 CONFIG_DM_MULTIPATH_QL=m 438 432 CONFIG_DM_MULTIPATH_ST=m 433 + CONFIG_DM_MULTIPATH_HST=m 439 434 CONFIG_DM_DELAY=m 440 435 CONFIG_DM_UEVENT=y 441 436 CONFIG_DM_FLAKEY=m ··· 451 444 CONFIG_IFB=m 452 445 CONFIG_MACVLAN=m 453 446 CONFIG_MACVTAP=m 447 + CONFIG_VXLAN=m 448 + CONFIG_BAREUDP=m 454 449 CONFIG_TUN=m 455 450 CONFIG_VETH=m 456 451 CONFIG_VIRTIO_NET=m ··· 486 477 CONFIG_MLX4_EN=m 487 478 CONFIG_MLX5_CORE=m 488 479 CONFIG_MLX5_CORE_EN=y 489 - # CONFIG_MLXFW is not set 490 480 # CONFIG_NET_VENDOR_MICREL is not set 491 481 # CONFIG_NET_VENDOR_MICROCHIP is not set 492 482 # CONFIG_NET_VENDOR_MICROSEMI is not set ··· 518 510 # CONFIG_NET_VENDOR_TI is not set 519 511 # CONFIG_NET_VENDOR_VIA is not set 520 512 # CONFIG_NET_VENDOR_WIZNET is not set 513 + # CONFIG_NET_VENDOR_XILINX is not set 521 514 CONFIG_PPP=m 522 515 CONFIG_PPP_BSDCOMP=m 523 516 CONFIG_PPP_DEFLATE=m ··· 566 557 CONFIG_VIRTIO_PCI=m 567 558 CONFIG_VIRTIO_BALLOON=m 568 559 CONFIG_VIRTIO_INPUT=y 560 + CONFIG_VHOST_NET=m 561 + CONFIG_VHOST_VSOCK=m 569 562 CONFIG_S390_CCW_IOMMU=y 570 563 CONFIG_S390_AP_IOMMU=y 571 564 CONFIG_EXT4_FS=y ··· 611 600 CONFIG_UDF_FS=m 612 601 CONFIG_MSDOS_FS=m 613 602 CONFIG_VFAT_FS=m 603 + CONFIG_EXFAT_FS=m 614 604 CONFIG_NTFS_FS=m 615 605 CONFIG_NTFS_RW=y 616 606 CONFIG_PROC_KCORE=y ··· 654 642 CONFIG_DLM=m 655 643 CONFIG_UNICODE=y 656 644 CONFIG_PERSISTENT_KEYRINGS=y 657 - CONFIG_BIG_KEYS=y 658 645 CONFIG_ENCRYPTED_KEYS=m 646 + CONFIG_KEY_NOTIFICATIONS=y 659 647 CONFIG_SECURITY=y 660 648 CONFIG_SECURITY_NETWORK=y 661 649 CONFIG_SECURITY_SELINUX=y ··· 679 667 CONFIG_CRYPTO_DH=m 680 668 CONFIG_CRYPTO_ECDH=m 681 669 CONFIG_CRYPTO_ECRDSA=m 670 + CONFIG_CRYPTO_CURVE25519=m 671 + CONFIG_CRYPTO_GCM=y 682 672 CONFIG_CRYPTO_CHACHA20POLY1305=m 683 673 CONFIG_CRYPTO_AEGIS128=m 674 + CONFIG_CRYPTO_SEQIV=y 684 675 CONFIG_CRYPTO_CFB=m 685 676 CONFIG_CRYPTO_LRW=m 686 677 CONFIG_CRYPTO_OFB=m ··· 693 678 CONFIG_CRYPTO_XCBC=m 694 679 CONFIG_CRYPTO_VMAC=m 695 680 CONFIG_CRYPTO_CRC32=m 681 + CONFIG_CRYPTO_BLAKE2S=m 696 682 CONFIG_CRYPTO_MICHAEL_MIC=m 697 683 CONFIG_CRYPTO_RMD128=m 698 684 CONFIG_CRYPTO_RMD160=m ··· 710 694 CONFIG_CRYPTO_CAMELLIA=m 711 695 CONFIG_CRYPTO_CAST5=m 712 696 CONFIG_CRYPTO_CAST6=m 697 + CONFIG_CRYPTO_DES=m 713 698 CONFIG_CRYPTO_FCRYPT=m 714 699 CONFIG_CRYPTO_KHAZAD=m 715 700 CONFIG_CRYPTO_SALSA20=m ··· 729 712 CONFIG_CRYPTO_USER_API_RNG=m 730 713 CONFIG_CRYPTO_USER_API_AEAD=m 731 714 CONFIG_CRYPTO_STATS=y 715 + CONFIG_CRYPTO_LIB_BLAKE2S=m 716 + CONFIG_CRYPTO_LIB_CURVE25519=m 717 + CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m 732 718 CONFIG_ZCRYPT=m 733 719 CONFIG_PKEY=m 734 720 CONFIG_CRYPTO_PAES_S390=m ··· 745 725 CONFIG_CRYPTO_GHASH_S390=m 746 726 CONFIG_CRYPTO_CRC32_S390=y 747 727 CONFIG_CORDIC=m 728 + CONFIG_PRIME_NUMBERS=m 748 729 CONFIG_CRC4=m 749 730 CONFIG_CRC7=m 750 731 CONFIG_CRC8=m ··· 760 739 CONFIG_MAGIC_SYSRQ=y 761 740 CONFIG_DEBUG_MEMORY_INIT=y 762 741 CONFIG_PANIC_ON_OOPS=y 742 + CONFIG_TEST_LOCKUP=m 763 743 CONFIG_BUG_ON_DATA_CORRUPTION=y 764 744 CONFIG_RCU_TORTURE_TEST=m 765 745 CONFIG_RCU_CPU_STALL_TIMEOUT=60 766 746 CONFIG_LATENCYTOP=y 747 + CONFIG_BOOTTIME_TRACING=y 767 748 CONFIG_FUNCTION_PROFILER=y 768 749 CONFIG_STACK_TRACER=y 769 750 CONFIG_SCHED_TRACER=y
+5
arch/s390/configs/zfcpdump_defconfig
··· 30 30 # CONFIG_BOUNCE is not set 31 31 CONFIG_NET=y 32 32 # CONFIG_IUCV is not set 33 + # CONFIG_ETHTOOL_NETLINK is not set 33 34 CONFIG_DEVTMPFS=y 34 35 CONFIG_BLK_DEV_RAM=y 35 36 # CONFIG_BLK_DEV_XPRAM is not set ··· 56 55 # CONFIG_MONWRITER is not set 57 56 # CONFIG_S390_VMUR is not set 58 57 # CONFIG_HID is not set 58 + # CONFIG_VIRTIO_MENU is not set 59 + # CONFIG_VHOST_MENU is not set 59 60 # CONFIG_IOMMU_SUPPORT is not set 60 61 # CONFIG_DNOTIFY is not set 61 62 # CONFIG_INOTIFY_USER is not set ··· 65 62 # CONFIG_MISC_FILESYSTEMS is not set 66 63 # CONFIG_NETWORK_FILESYSTEMS is not set 67 64 CONFIG_LSM="yama,loadpin,safesetid,integrity" 65 + # CONFIG_ZLIB_DFLTCC is not set 68 66 CONFIG_PRINTK_TIME=y 67 + # CONFIG_SYMBOLIC_ERRNAME is not set 69 68 CONFIG_DEBUG_INFO=y 70 69 CONFIG_DEBUG_FS=y 71 70 CONFIG_DEBUG_KERNEL=y
+4 -4
arch/s390/include/asm/kvm_host.h
··· 31 31 #define KVM_USER_MEM_SLOTS 32 32 32 33 33 /* 34 - * These seem to be used for allocating ->chip in the routing table, 35 - * which we don't use. 4096 is an out-of-thin-air value. If we need 36 - * to look at ->chip later on, we'll need to revisit this. 34 + * These seem to be used for allocating ->chip in the routing table, which we 35 + * don't use. 1 is as small as we can get to reduce the needed memory. If we 36 + * need to look at ->chip later on, we'll need to revisit this. 37 37 */ 38 38 #define KVM_NR_IRQCHIPS 1 39 - #define KVM_IRQCHIP_NUM_PINS 4096 39 + #define KVM_IRQCHIP_NUM_PINS 1 40 40 #define KVM_HALT_POLL_NS_DEFAULT 50000 41 41 42 42 /* s390-specific vcpu->requests bit members */
+2 -2
arch/s390/kernel/perf_cpum_cf_events.c
··· 292 292 CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7); 293 293 CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc); 294 294 CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); 295 - CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109); 295 + CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); 296 296 CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 297 297 CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 298 298 ··· 629 629 CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS), 630 630 CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES), 631 631 CPUMF_EVENT_PTR(cf_z15, DFLT_CC), 632 - CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR), 632 + CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH), 633 633 CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE), 634 634 CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE), 635 635 NULL,
+10 -1
arch/s390/kernel/perf_cpum_sf.c
··· 881 881 return err; 882 882 } 883 883 884 + static bool is_callchain_event(struct perf_event *event) 885 + { 886 + u64 sample_type = event->attr.sample_type; 887 + 888 + return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER | 889 + PERF_SAMPLE_STACK_USER); 890 + } 891 + 884 892 static int cpumsf_pmu_event_init(struct perf_event *event) 885 893 { 886 894 int err; 887 895 888 896 /* No support for taken branch sampling */ 889 - if (has_branch_stack(event)) 897 + /* No support for callchain, stacks and registers */ 898 + if (has_branch_stack(event) || is_callchain_event(event)) 890 899 return -EOPNOTSUPP; 891 900 892 901 switch (event->attr.type) {
+1
arch/s390/kernel/setup.c
··· 1100 1100 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) 1101 1101 nospec_auto_detect(); 1102 1102 1103 + jump_label_init(); 1103 1104 parse_early_param(); 1104 1105 #ifdef CONFIG_CRASH_DUMP 1105 1106 /* Deactivate elfcorehdr= kernel parameter */
+1 -1
arch/s390/mm/hugetlbpage.c
··· 117 117 _PAGE_YOUNG); 118 118 #ifdef CONFIG_MEM_SOFT_DIRTY 119 119 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, 120 - _PAGE_DIRTY); 120 + _PAGE_SOFT_DIRTY); 121 121 #endif 122 122 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, 123 123 _PAGE_NOEXEC);
+9 -5
arch/s390/mm/maccess.c
··· 62 62 long copied; 63 63 64 64 spin_lock_irqsave(&s390_kernel_write_lock, flags); 65 - while (size) { 66 - copied = s390_kernel_write_odd(tmp, src, size); 67 - tmp += copied; 68 - src += copied; 69 - size -= copied; 65 + if (!(flags & PSW_MASK_DAT)) { 66 + memcpy(dst, src, size); 67 + } else { 68 + while (size) { 69 + copied = s390_kernel_write_odd(tmp, src, size); 70 + tmp += copied; 71 + src += copied; 72 + size -= copied; 73 + } 70 74 } 71 75 spin_unlock_irqrestore(&s390_kernel_write_lock, flags); 72 76
+12 -1
arch/s390/pci/pci_event.c
··· 94 94 } 95 95 zdev->fh = ccdf->fh; 96 96 zdev->state = ZPCI_FN_STATE_CONFIGURED; 97 - zpci_create_device(zdev); 97 + ret = zpci_enable_device(zdev); 98 + if (ret) 99 + break; 100 + 101 + pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn); 102 + if (!pdev) 103 + break; 104 + 105 + pci_bus_add_device(pdev); 106 + pci_lock_rescan_remove(); 107 + pci_bus_add_devices(zdev->zbus->bus); 108 + pci_unlock_rescan_remove(); 98 109 break; 99 110 case 0x0302: /* Reserved -> Standby */ 100 111 if (!zdev) {
+2 -2
arch/x86/boot/compressed/Makefile
··· 90 90 91 91 vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o 92 92 93 - vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a 94 93 vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o 94 + efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a 95 95 96 96 # The compressed kernel is built with -fPIC/-fPIE so that a boot loader 97 97 # can place it anywhere in memory and it will still run. However, since ··· 115 115 quiet_cmd_check-and-link-vmlinux = LD $@ 116 116 cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) 117 117 118 - $(obj)/vmlinux: $(vmlinux-objs-y) FORCE 118 + $(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE 119 119 $(call if_changed,check-and-link-vmlinux) 120 120 121 121 OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
+11 -3
arch/x86/entry/Makefile
··· 7 7 UBSAN_SANITIZE := n 8 8 KCOV_INSTRUMENT := n 9 9 10 - CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong 11 - CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong 12 - CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong 10 + CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) 11 + CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) 12 + CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) 13 + CFLAGS_REMOVE_syscall_x32.o = $(CC_FLAGS_FTRACE) 14 + 15 + CFLAGS_common.o += -fno-stack-protector 16 + CFLAGS_syscall_64.o += -fno-stack-protector 17 + CFLAGS_syscall_32.o += -fno-stack-protector 18 + CFLAGS_syscall_x32.o += -fno-stack-protector 13 19 14 20 CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) 15 21 CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) 22 + CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,) 23 + 16 24 obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o 17 25 obj-y += common.o 18 26
+47 -4
arch/x86/entry/common.c
··· 45 45 #define CREATE_TRACE_POINTS 46 46 #include <trace/events/syscalls.h> 47 47 48 + /* Check that the stack and regs on entry from user mode are sane. */ 49 + static noinstr void check_user_regs(struct pt_regs *regs) 50 + { 51 + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) { 52 + /* 53 + * Make sure that the entry code gave us a sensible EFLAGS 54 + * register. Native because we want to check the actual CPU 55 + * state, not the interrupt state as imagined by Xen. 56 + */ 57 + unsigned long flags = native_save_fl(); 58 + WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF | 59 + X86_EFLAGS_NT)); 60 + 61 + /* We think we came from user mode. Make sure pt_regs agrees. */ 62 + WARN_ON_ONCE(!user_mode(regs)); 63 + 64 + /* 65 + * All entries from user mode (except #DF) should be on the 66 + * normal thread stack and should have user pt_regs in the 67 + * correct location. 68 + */ 69 + WARN_ON_ONCE(!on_thread_stack()); 70 + WARN_ON_ONCE(regs != task_pt_regs(current)); 71 + } 72 + } 73 + 48 74 #ifdef CONFIG_CONTEXT_TRACKING 49 75 /** 50 76 * enter_from_user_mode - Establish state when coming from user mode ··· 152 126 struct thread_info *ti = current_thread_info(); 153 127 unsigned long ret = 0; 154 128 u32 work; 155 - 156 - if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 157 - BUG_ON(regs != task_pt_regs(current)); 158 129 159 130 work = READ_ONCE(ti->flags); 160 131 ··· 294 271 #endif 295 272 } 296 273 297 - __visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs) 274 + static noinstr void prepare_exit_to_usermode(struct pt_regs *regs) 298 275 { 299 276 instrumentation_begin(); 300 277 __prepare_exit_to_usermode(regs); ··· 369 346 { 370 347 struct thread_info *ti; 371 348 349 + check_user_regs(regs); 350 + 372 351 enter_from_user_mode(); 373 352 instrumentation_begin(); 374 353 ··· 434 409 /* Handles int $0x80 */ 435 410 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs) 436 411 { 412 + check_user_regs(regs); 413 + 437 414 enter_from_user_mode(); 438 415 instrumentation_begin(); 439 416 ··· 487 460 vdso_image_32.sym_int80_landing_pad; 488 461 bool success; 489 462 463 + check_user_regs(regs); 464 + 490 465 /* 491 466 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward 492 467 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction. ··· 539 510 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; 540 511 #endif 541 512 } 513 + 514 + /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */ 515 + __visible noinstr long do_SYSENTER_32(struct pt_regs *regs) 516 + { 517 + /* SYSENTER loses RSP, but the vDSO saved it in RBP. */ 518 + regs->sp = regs->bp; 519 + 520 + /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */ 521 + regs->flags |= X86_EFLAGS_IF; 522 + 523 + return do_fast_syscall_32(regs); 524 + } 542 525 #endif 543 526 544 527 SYSCALL_DEFINE0(ni_syscall) ··· 594 553 bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) 595 554 { 596 555 if (user_mode(regs)) { 556 + check_user_regs(regs); 597 557 enter_from_user_mode(); 598 558 return false; 599 559 } ··· 728 686 */ 729 687 void noinstr idtentry_enter_user(struct pt_regs *regs) 730 688 { 689 + check_user_regs(regs); 731 690 enter_from_user_mode(); 732 691 } 733 692
+2 -3
arch/x86/entry/entry_32.S
··· 933 933 934 934 .Lsysenter_past_esp: 935 935 pushl $__USER_DS /* pt_regs->ss */ 936 - pushl %ebp /* pt_regs->sp (stashed in bp) */ 936 + pushl $0 /* pt_regs->sp (placeholder) */ 937 937 pushfl /* pt_regs->flags (except IF = 0) */ 938 - orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 939 938 pushl $__USER_CS /* pt_regs->cs */ 940 939 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 941 940 pushl %eax /* pt_regs->orig_ax */ ··· 964 965 .Lsysenter_flags_fixed: 965 966 966 967 movl %esp, %eax 967 - call do_fast_syscall_32 968 + call do_SYSENTER_32 968 969 /* XEN PV guests always use IRET path */ 969 970 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ 970 971 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+16 -15
arch/x86/entry/entry_64_compat.S
··· 57 57 58 58 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 59 59 60 + /* Construct struct pt_regs on stack */ 61 + pushq $__USER32_DS /* pt_regs->ss */ 62 + pushq $0 /* pt_regs->sp = 0 (placeholder) */ 63 + 64 + /* 65 + * Push flags. This is nasty. First, interrupts are currently 66 + * off, but we need pt_regs->flags to have IF set. Second, if TS 67 + * was set in usermode, it's still set, and we're singlestepping 68 + * through this code. do_SYSENTER_32() will fix up IF. 69 + */ 70 + pushfq /* pt_regs->flags (except IF = 0) */ 71 + pushq $__USER32_CS /* pt_regs->cs */ 72 + pushq $0 /* pt_regs->ip = 0 (placeholder) */ 73 + SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) 74 + 60 75 /* 61 76 * User tracing code (ptrace or signal handlers) might assume that 62 77 * the saved RAX contains a 32-bit number when we're invoking a 32-bit ··· 81 66 */ 82 67 movl %eax, %eax 83 68 84 - /* Construct struct pt_regs on stack */ 85 - pushq $__USER32_DS /* pt_regs->ss */ 86 - pushq %rbp /* pt_regs->sp (stashed in bp) */ 87 - 88 - /* 89 - * Push flags. This is nasty. First, interrupts are currently 90 - * off, but we need pt_regs->flags to have IF set. Second, even 91 - * if TF was set when SYSENTER started, it's clear by now. We fix 92 - * that later using TIF_SINGLESTEP. 93 - */ 94 - pushfq /* pt_regs->flags (except IF = 0) */ 95 - orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ 96 - pushq $__USER32_CS /* pt_regs->cs */ 97 - pushq $0 /* pt_regs->ip = 0 (placeholder) */ 98 69 pushq %rax /* pt_regs->orig_ax */ 99 70 pushq %rdi /* pt_regs->di */ 100 71 pushq %rsi /* pt_regs->si */ ··· 136 135 .Lsysenter_flags_fixed: 137 136 138 137 movq %rsp, %rdi 139 - call do_fast_syscall_32 138 + call do_SYSENTER_32 140 139 /* XEN PV guests always use IRET path */ 141 140 ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \ 142 141 "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
+2 -1
arch/x86/hyperv/hv_init.c
··· 377 377 378 378 hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, 379 379 VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX, 380 - VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __func__); 380 + VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, 381 + __builtin_return_address(0)); 381 382 if (hv_hypercall_pg == NULL) { 382 383 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 383 384 goto remove_cpuhp_state;
+5
arch/x86/include/asm/fpu/internal.h
··· 623 623 * MXCSR and XCR definitions: 624 624 */ 625 625 626 + static inline void ldmxcsr(u32 mxcsr) 627 + { 628 + asm volatile("ldmxcsr %0" :: "m" (mxcsr)); 629 + } 630 + 626 631 extern unsigned int mxcsr_feature_mask; 627 632 628 633 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
+27 -38
arch/x86/include/asm/idtentry.h
··· 353 353 354 354 #else /* CONFIG_X86_64 */ 355 355 356 - /* Maps to a regular IDTENTRY on 32bit for now */ 357 - # define DECLARE_IDTENTRY_IST DECLARE_IDTENTRY 358 - # define DEFINE_IDTENTRY_IST DEFINE_IDTENTRY 359 - 360 356 /** 361 357 * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant 362 358 * @vector: Vector number (ignored for C) ··· 383 387 #endif /* !CONFIG_X86_64 */ 384 388 385 389 /* C-Code mapping */ 390 + #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW 391 + #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW 392 + 393 + #ifdef CONFIG_X86_64 386 394 #define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST 387 395 #define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST 388 396 #define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST 389 397 390 - #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW 391 - #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW 392 - 393 398 #define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST 394 399 #define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST 395 400 #define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST 396 - 397 - /** 398 - * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points 399 - * @vector: Vector number (ignored for C) 400 - * @func: Function name of the entry point 401 - * 402 - * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM 403 - * indirection magic. 404 - */ 405 - #define DECLARE_IDTENTRY_XEN(vector, func) \ 406 - asmlinkage void xen_asm_exc_xen##func(void); \ 407 - asmlinkage void asm_exc_xen##func(void) 401 + #endif 408 402 409 403 #else /* !__ASSEMBLY__ */ 410 404 ··· 441 455 # define DECLARE_IDTENTRY_MCE(vector, func) \ 442 456 DECLARE_IDTENTRY(vector, func) 443 457 444 - # define DECLARE_IDTENTRY_DEBUG(vector, func) \ 445 - DECLARE_IDTENTRY(vector, func) 446 - 447 458 /* No ASM emitted for DF as this goes through a C shim */ 448 459 # define DECLARE_IDTENTRY_DF(vector, func) 449 460 ··· 451 468 452 469 /* No ASM code emitted for NMI */ 453 470 #define DECLARE_IDTENTRY_NMI(vector, func) 454 - 455 - /* XEN NMI and DB wrapper */ 456 - #define DECLARE_IDTENTRY_XEN(vector, func) \ 457 - idtentry vector asm_exc_xen##func exc_##func has_error_code=0 458 471 459 472 /* 460 473 * ASM code to emit the common vector entry stubs where each stub is ··· 469 490 .align 8 470 491 SYM_CODE_START(irq_entries_start) 471 492 vector=FIRST_EXTERNAL_VECTOR 472 - pos = . 473 493 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 474 494 UNWIND_HINT_IRET_REGS 495 + 0 : 475 496 .byte 0x6a, vector 476 497 jmp asm_common_interrupt 477 498 nop 478 499 /* Ensure that the above is 8 bytes max */ 479 - . = pos + 8 480 - pos=pos+8 481 - vector=vector+1 500 + . = 0b + 8 501 + vector = vector+1 482 502 .endr 483 503 SYM_CODE_END(irq_entries_start) 484 504 ··· 485 507 .align 8 486 508 SYM_CODE_START(spurious_entries_start) 487 509 vector=FIRST_SYSTEM_VECTOR 488 - pos = . 489 510 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) 490 511 UNWIND_HINT_IRET_REGS 512 + 0 : 491 513 .byte 0x6a, vector 492 514 jmp asm_spurious_interrupt 493 515 nop 494 516 /* Ensure that the above is 8 bytes max */ 495 - . = pos + 8 496 - pos=pos+8 497 - vector=vector+1 517 + . = 0b + 8 518 + vector = vector+1 498 519 .endr 499 520 SYM_CODE_END(spurious_entries_start) 500 521 #endif ··· 542 565 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault); 543 566 544 567 #ifdef CONFIG_X86_MCE 568 + #ifdef CONFIG_X86_64 545 569 DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check); 570 + #else 571 + DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check); 572 + #endif 546 573 #endif 547 574 548 575 /* NMI */ 549 576 DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi); 550 - DECLARE_IDTENTRY_XEN(X86_TRAP_NMI, nmi); 577 + #if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) 578 + DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi); 579 + #endif 551 580 552 581 /* #DB */ 582 + #ifdef CONFIG_X86_64 553 583 DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug); 554 - DECLARE_IDTENTRY_XEN(X86_TRAP_DB, debug); 584 + #else 585 + DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug); 586 + #endif 587 + #if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) 588 + DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug); 589 + #endif 555 590 556 591 /* #DF */ 557 592 DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault); ··· 624 635 625 636 #if IS_ENABLED(CONFIG_HYPERV) 626 637 DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback); 627 - DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); 628 - DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_STIMER0_VECTOR, sysvec_hyperv_stimer0); 638 + DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); 639 + DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); 629 640 #endif 630 641 631 642 #if IS_ENABLED(CONFIG_ACRN_GUEST)
+16
arch/x86/include/asm/io_bitmap.h
··· 19 19 void io_bitmap_share(struct task_struct *tsk); 20 20 void io_bitmap_exit(struct task_struct *tsk); 21 21 22 + static inline void native_tss_invalidate_io_bitmap(void) 23 + { 24 + /* 25 + * Invalidate the I/O bitmap by moving io_bitmap_base outside the 26 + * TSS limit so any subsequent I/O access from user space will 27 + * trigger a #GP. 28 + * 29 + * This is correct even when VMEXIT rewrites the TSS limit 30 + * to 0x67 as the only requirement is that the base points 31 + * outside the limit. 32 + */ 33 + this_cpu_write(cpu_tss_rw.x86_tss.io_bitmap_base, 34 + IO_BITMAP_OFFSET_INVALID); 35 + } 36 + 22 37 void native_tss_update_io_bitmap(void); 23 38 24 39 #ifdef CONFIG_PARAVIRT_XXL 25 40 #include <asm/paravirt.h> 26 41 #else 27 42 #define tss_update_io_bitmap native_tss_update_io_bitmap 43 + #define tss_invalidate_io_bitmap native_tss_invalidate_io_bitmap 28 44 #endif 29 45 30 46 #else
+1
arch/x86/include/asm/iosf_mbi.h
··· 39 39 #define BT_MBI_UNIT_PMC 0x04 40 40 #define BT_MBI_UNIT_GFX 0x06 41 41 #define BT_MBI_UNIT_SMI 0x0C 42 + #define BT_MBI_UNIT_CCK 0x14 42 43 #define BT_MBI_UNIT_USB 0x43 43 44 #define BT_MBI_UNIT_SATA 0xA3 44 45 #define BT_MBI_UNIT_PCIE 0xA6
+5
arch/x86/include/asm/paravirt.h
··· 302 302 } 303 303 304 304 #ifdef CONFIG_X86_IOPL_IOPERM 305 + static inline void tss_invalidate_io_bitmap(void) 306 + { 307 + PVOP_VCALL0(cpu.invalidate_io_bitmap); 308 + } 309 + 305 310 static inline void tss_update_io_bitmap(void) 306 311 { 307 312 PVOP_VCALL0(cpu.update_io_bitmap);
+1
arch/x86/include/asm/paravirt_types.h
··· 141 141 void (*load_sp0)(unsigned long sp0); 142 142 143 143 #ifdef CONFIG_X86_IOPL_IOPERM 144 + void (*invalidate_io_bitmap)(void); 144 145 void (*update_io_bitmap)(void); 145 146 #endif 146 147
+3 -2
arch/x86/include/uapi/asm/kvm.h
··· 408 408 }; 409 409 410 410 struct kvm_vmx_nested_state_hdr { 411 - __u32 flags; 412 411 __u64 vmxon_pa; 413 412 __u64 vmcs12_pa; 414 - __u64 preemption_timer_deadline; 415 413 416 414 struct { 417 415 __u16 flags; 418 416 } smm; 417 + 418 + __u32 flags; 419 + __u64 preemption_timer_deadline; 419 420 }; 420 421 421 422 struct kvm_svm_nested_state_data {
+5 -5
arch/x86/kernel/apic/io_apic.c
··· 2316 2316 ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops, 2317 2317 (void *)(long)ioapic); 2318 2318 2319 - /* Release fw handle if it was allocated above */ 2320 - if (!cfg->dev) 2321 - irq_domain_free_fwnode(fn); 2322 - 2323 - if (!ip->irqdomain) 2319 + if (!ip->irqdomain) { 2320 + /* Release fw handle if it was allocated above */ 2321 + if (!cfg->dev) 2322 + irq_domain_free_fwnode(fn); 2324 2323 return -ENOMEM; 2324 + } 2325 2325 2326 2326 ip->irqdomain->parent = parent; 2327 2327
+12 -6
arch/x86/kernel/apic/msi.c
··· 263 263 msi_default_domain = 264 264 pci_msi_create_irq_domain(fn, &pci_msi_domain_info, 265 265 parent); 266 - irq_domain_free_fwnode(fn); 267 266 } 268 - if (!msi_default_domain) 267 + if (!msi_default_domain) { 268 + irq_domain_free_fwnode(fn); 269 269 pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); 270 - else 270 + } else { 271 271 msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; 272 + } 272 273 } 273 274 274 275 #ifdef CONFIG_IRQ_REMAP ··· 302 301 if (!fn) 303 302 return NULL; 304 303 d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent); 305 - irq_domain_free_fwnode(fn); 304 + if (!d) 305 + irq_domain_free_fwnode(fn); 306 306 return d; 307 307 } 308 308 #endif ··· 366 364 if (fn) { 367 365 dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, 368 366 x86_vector_domain); 369 - irq_domain_free_fwnode(fn); 367 + if (!dmar_domain) 368 + irq_domain_free_fwnode(fn); 370 369 } 371 370 out: 372 371 mutex_unlock(&dmar_lock); ··· 492 489 } 493 490 494 491 d = msi_create_irq_domain(fn, domain_info, parent); 495 - irq_domain_free_fwnode(fn); 492 + if (!d) { 493 + irq_domain_free_fwnode(fn); 494 + kfree(domain_info); 495 + } 496 496 return d; 497 497 } 498 498
+5 -18
arch/x86/kernel/apic/vector.c
··· 446 446 trace_vector_activate(irqd->irq, apicd->is_managed, 447 447 apicd->can_reserve, reserve); 448 448 449 - /* Nothing to do for fixed assigned vectors */ 450 - if (!apicd->can_reserve && !apicd->is_managed) 451 - return 0; 452 - 453 449 raw_spin_lock_irqsave(&vector_lock, flags); 454 - if (reserve || irqd_is_managed_and_shutdown(irqd)) 450 + if (!apicd->can_reserve && !apicd->is_managed) 451 + assign_irq_vector_any_locked(irqd); 452 + else if (reserve || irqd_is_managed_and_shutdown(irqd)) 455 453 vector_assign_managed_shutdown(irqd); 456 454 else if (apicd->is_managed) 457 455 ret = activate_managed(irqd); ··· 707 709 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, 708 710 NULL); 709 711 BUG_ON(x86_vector_domain == NULL); 710 - irq_domain_free_fwnode(fn); 711 712 irq_set_default_host(x86_vector_domain); 712 713 713 714 arch_init_msi_domain(x86_vector_domain); ··· 772 775 static int apic_set_affinity(struct irq_data *irqd, 773 776 const struct cpumask *dest, bool force) 774 777 { 775 - struct apic_chip_data *apicd = apic_chip_data(irqd); 776 778 int err; 777 779 778 - /* 779 - * Core code can call here for inactive interrupts. For inactive 780 - * interrupts which use managed or reservation mode there is no 781 - * point in going through the vector assignment right now as the 782 - * activation will assign a vector which fits the destination 783 - * cpumask. Let the core code store the destination mask and be 784 - * done with it. 785 - */ 786 - if (!irqd_is_activated(irqd) && 787 - (apicd->is_managed || apicd->can_reserve)) 788 - return IRQ_SET_MASK_OK; 780 + if (WARN_ON_ONCE(!irqd_is_activated(irqd))) 781 + return -EIO; 789 782 790 783 raw_spin_lock(&vector_lock); 791 784 cpumask_and(vector_searchmask, dest, cpu_online_mask);
+10 -1
arch/x86/kernel/cpu/intel.c
··· 50 50 static u64 msr_test_ctrl_cache __ro_after_init; 51 51 52 52 /* 53 + * With a name like MSR_TEST_CTL it should go without saying, but don't touch 54 + * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it 55 + * on CPUs that do not support SLD can cause fireworks, even when writing '0'. 56 + */ 57 + static bool cpu_model_supports_sld __ro_after_init; 58 + 59 + /* 53 60 * Processors which have self-snooping capability can handle conflicting 54 61 * memory type across CPUs by snooping its own cache. However, there exists 55 62 * CPU models in which having conflicting memory types still leads to ··· 1078 1071 1079 1072 static void split_lock_init(void) 1080 1073 { 1081 - split_lock_verify_msr(sld_state != sld_off); 1074 + if (cpu_model_supports_sld) 1075 + split_lock_verify_msr(sld_state != sld_off); 1082 1076 } 1083 1077 1084 1078 static void split_lock_warn(unsigned long ip) ··· 1185 1177 return; 1186 1178 } 1187 1179 1180 + cpu_model_supports_sld = true; 1188 1181 split_lock_setup(); 1189 1182 }
+3 -1
arch/x86/kernel/cpu/mce/core.c
··· 1901 1901 1902 1902 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) 1903 1903 { 1904 + WARN_ON_ONCE(user_mode(regs)); 1905 + 1904 1906 /* 1905 1907 * Only required when from kernel mode. See 1906 1908 * mce_check_crashing_cpu() for details. ··· 1956 1954 } 1957 1955 #else 1958 1956 /* 32bit unified entry point */ 1959 - DEFINE_IDTENTRY_MCE(exc_machine_check) 1957 + DEFINE_IDTENTRY_RAW(exc_machine_check) 1960 1958 { 1961 1959 unsigned long dr7; 1962 1960
+17 -10
arch/x86/kernel/dumpstack.c
··· 71 71 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); 72 72 } 73 73 74 + static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, 75 + unsigned int nbytes) 76 + { 77 + if (!user_mode(regs)) 78 + return copy_from_kernel_nofault(buf, (u8 *)src, nbytes); 79 + 80 + /* 81 + * Make sure userspace isn't trying to trick us into dumping kernel 82 + * memory by pointing the userspace instruction pointer at it. 83 + */ 84 + if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) 85 + return -EINVAL; 86 + 87 + return copy_from_user_nmi(buf, (void __user *)src, nbytes); 88 + } 89 + 74 90 /* 75 91 * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus: 76 92 * ··· 113 97 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) 114 98 u8 opcodes[OPCODE_BUFSIZE]; 115 99 unsigned long prologue = regs->ip - PROLOGUE_SIZE; 116 - bool bad_ip; 117 100 118 - /* 119 - * Make sure userspace isn't trying to trick us into dumping kernel 120 - * memory by pointing the userspace instruction pointer at it. 121 - */ 122 - bad_ip = user_mode(regs) && 123 - __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); 124 - 125 - if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue, 126 - OPCODE_BUFSIZE)) { 101 + if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { 127 102 printk("%sCode: Bad RIP value.\n", loglvl); 128 103 } else { 129 104 printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
+6
arch/x86/kernel/fpu/core.c
··· 101 101 copy_fpregs_to_fpstate(&current->thread.fpu); 102 102 } 103 103 __cpu_invalidate_fpregs_state(); 104 + 105 + if (boot_cpu_has(X86_FEATURE_XMM)) 106 + ldmxcsr(MXCSR_DEFAULT); 107 + 108 + if (boot_cpu_has(X86_FEATURE_FPU)) 109 + asm volatile ("fninit"); 104 110 } 105 111 EXPORT_SYMBOL_GPL(kernel_fpu_begin); 106 112
+1 -1
arch/x86/kernel/fpu/xstate.c
··· 1074 1074 copy_part(offsetof(struct fxregs_state, st_space), 128, 1075 1075 &xsave->i387.st_space, &kbuf, &offset_start, &count); 1076 1076 if (header.xfeatures & XFEATURE_MASK_SSE) 1077 - copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256, 1077 + copy_part(xstate_offsets[XFEATURE_SSE], 256, 1078 1078 &xsave->i387.xmm_space, &kbuf, &offset_start, &count); 1079 1079 /* 1080 1080 * Fill xsave->i387.sw_reserved value for ptrace frame:
+25 -1
arch/x86/kernel/ldt.c
··· 29 29 #include <asm/mmu_context.h> 30 30 #include <asm/pgtable_areas.h> 31 31 32 + #include <xen/xen.h> 33 + 32 34 /* This is a multiple of PAGE_SIZE. */ 33 35 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) 34 36 ··· 545 543 return bytecount; 546 544 } 547 545 546 + static bool allow_16bit_segments(void) 547 + { 548 + if (!IS_ENABLED(CONFIG_X86_16BIT)) 549 + return false; 550 + 551 + #ifdef CONFIG_XEN_PV 552 + /* 553 + * Xen PV does not implement ESPFIX64, which means that 16-bit 554 + * segments will not work correctly. Until either Xen PV implements 555 + * ESPFIX64 and can signal this fact to the guest or unless someone 556 + * provides compelling evidence that allowing broken 16-bit segments 557 + * is worthwhile, disallow 16-bit segments under Xen PV. 558 + */ 559 + if (xen_pv_domain()) { 560 + pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n"); 561 + return false; 562 + } 563 + #endif 564 + 565 + return true; 566 + } 567 + 548 568 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) 549 569 { 550 570 struct mm_struct *mm = current->mm; ··· 598 574 /* The user wants to clear the entry. */ 599 575 memset(&ldt, 0, sizeof(ldt)); 600 576 } else { 601 - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 577 + if (!ldt_info.seg_32bit && !allow_16bit_segments()) { 602 578 error = -EINVAL; 603 579 goto out; 604 580 }
+2 -1
arch/x86/kernel/paravirt.c
··· 324 324 .cpu.swapgs = native_swapgs, 325 325 326 326 #ifdef CONFIG_X86_IOPL_IOPERM 327 - .cpu.update_io_bitmap = native_tss_update_io_bitmap, 327 + .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap, 328 + .cpu.update_io_bitmap = native_tss_update_io_bitmap, 328 329 #endif 329 330 330 331 .cpu.start_context_switch = paravirt_nop,
+2 -16
arch/x86/kernel/process.c
··· 322 322 } 323 323 324 324 #ifdef CONFIG_X86_IOPL_IOPERM 325 - static inline void tss_invalidate_io_bitmap(struct tss_struct *tss) 326 - { 327 - /* 328 - * Invalidate the I/O bitmap by moving io_bitmap_base outside the 329 - * TSS limit so any subsequent I/O access from user space will 330 - * trigger a #GP. 331 - * 332 - * This is correct even when VMEXIT rewrites the TSS limit 333 - * to 0x67 as the only requirement is that the base points 334 - * outside the limit. 335 - */ 336 - tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; 337 - } 338 - 339 325 static inline void switch_to_bitmap(unsigned long tifp) 340 326 { 341 327 /* ··· 332 346 * user mode. 333 347 */ 334 348 if (tifp & _TIF_IO_BITMAP) 335 - tss_invalidate_io_bitmap(this_cpu_ptr(&cpu_tss_rw)); 349 + tss_invalidate_io_bitmap(); 336 350 } 337 351 338 352 static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm) ··· 366 380 u16 *base = &tss->x86_tss.io_bitmap_base; 367 381 368 382 if (!test_thread_flag(TIF_IO_BITMAP)) { 369 - tss_invalidate_io_bitmap(tss); 383 + native_tss_invalidate_io_bitmap(); 370 384 return; 371 385 } 372 386
-5
arch/x86/kernel/stacktrace.c
··· 58 58 * or a page fault), which can make frame pointers 59 59 * unreliable. 60 60 */ 61 - 62 61 if (IS_ENABLED(CONFIG_FRAME_POINTER)) 63 62 return -EINVAL; 64 63 } ··· 78 79 79 80 /* Check for stack corruption */ 80 81 if (unwind_error(&state)) 81 - return -EINVAL; 82 - 83 - /* Success path for non-user tasks, i.e. kthreads and idle tasks */ 84 - if (!(task->flags & (PF_KTHREAD | PF_IDLE))) 85 82 return -EINVAL; 86 83 87 84 return 0;
+15 -1
arch/x86/kernel/traps.c
··· 303 303 304 304 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs, 305 305 error_code, BUS_ADRALN, NULL); 306 + 307 + local_irq_disable(); 306 308 } 307 309 308 310 #ifdef CONFIG_VMAP_STACK ··· 872 870 trace_hardirqs_off_finish(); 873 871 874 872 /* 873 + * If something gets miswired and we end up here for a user mode 874 + * #DB, we will malfunction. 875 + */ 876 + WARN_ON_ONCE(user_mode(regs)); 877 + 878 + /* 875 879 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a 876 880 * watchpoint at the same time then that will still be handled. 877 881 */ ··· 895 887 static __always_inline void exc_debug_user(struct pt_regs *regs, 896 888 unsigned long dr6) 897 889 { 890 + /* 891 + * If something gets miswired and we end up here for a kernel mode 892 + * #DB, we will malfunction. 893 + */ 894 + WARN_ON_ONCE(!user_mode(regs)); 895 + 898 896 idtentry_enter_user(regs); 899 897 instrumentation_begin(); 900 898 ··· 931 917 } 932 918 #else 933 919 /* 32 bit does not have separate entry points. */ 934 - DEFINE_IDTENTRY_DEBUG(exc_debug) 920 + DEFINE_IDTENTRY_RAW(exc_debug) 935 921 { 936 922 unsigned long dr6, dr7; 937 923
+6 -2
arch/x86/kernel/unwind_orc.c
··· 440 440 /* 441 441 * Find the orc_entry associated with the text address. 442 442 * 443 - * Decrement call return addresses by one so they work for sibling 444 - * calls and calls to noreturn functions. 443 + * For a call frame (as opposed to a signal frame), state->ip points to 444 + * the instruction after the call. That instruction's stack layout 445 + * could be different from the call instruction's layout, for example 446 + * if the call was to a noreturn function. So get the ORC data for the 447 + * call instruction itself. 445 448 */ 446 449 orc = orc_find(state->signal ? state->ip : state->ip - 1); 447 450 if (!orc) { ··· 665 662 state->sp = task->thread.sp; 666 663 state->bp = READ_ONCE_NOCHECK(frame->bp); 667 664 state->ip = READ_ONCE_NOCHECK(frame->ret_addr); 665 + state->signal = (void *)state->ip == ret_from_fork; 668 666 } 669 667 670 668 if (get_stack_info((unsigned long *)state->sp, state->task,
+1
arch/x86/kernel/vmlinux.lds.S
··· 358 358 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 359 359 __bss_start = .; 360 360 *(.bss..page_aligned) 361 + . = ALIGN(PAGE_SIZE); 361 362 *(BSS_MAIN) 362 363 BSS_DECRYPTED 363 364 . = ALIGN(PAGE_SIZE);
+1 -1
arch/x86/kvm/kvm_cache_regs.h
··· 7 7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS 8 8 #define KVM_POSSIBLE_CR4_GUEST_BITS \ 9 9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ 10 - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE) 10 + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD) 11 11 12 12 #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ 13 13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
+1 -1
arch/x86/kvm/mmu/mmu.c
··· 4449 4449 nonleaf_bit8_rsvd | rsvd_bits(7, 7) | 4450 4450 rsvd_bits(maxphyaddr, 51); 4451 4451 rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | 4452 - nonleaf_bit8_rsvd | gbpages_bit_rsvd | 4452 + gbpages_bit_rsvd | 4453 4453 rsvd_bits(maxphyaddr, 51); 4454 4454 rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | 4455 4455 rsvd_bits(maxphyaddr, 51);
+3 -2
arch/x86/kvm/vmx/nested.c
··· 4109 4109 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4110 4110 * (KVM doesn't change it); 4111 4111 */ 4112 - vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 4112 + vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4113 4113 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4114 4114 4115 4115 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ ··· 4259 4259 */ 4260 4260 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4261 4261 4262 - vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 4262 + vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4263 4263 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4264 4264 4265 4265 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); ··· 6176 6176 goto error_guest_mode; 6177 6177 } 6178 6178 6179 + vmx->nested.has_preemption_timer_deadline = false; 6179 6180 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6180 6181 vmx->nested.has_preemption_timer_deadline = true; 6181 6182 vmx->nested.preemption_timer_deadline =
+5 -8
arch/x86/kvm/vmx/vmx.c
··· 133 133 #define KVM_VM_CR0_ALWAYS_ON \ 134 134 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ 135 135 X86_CR0_WP | X86_CR0_PG | X86_CR0_PE) 136 - #define KVM_CR4_GUEST_OWNED_BITS \ 137 - (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ 138 - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) 139 136 140 137 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE 141 138 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) ··· 4031 4034 4032 4035 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) 4033 4036 { 4034 - vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; 4035 - if (enable_ept) 4036 - vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; 4037 + vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS; 4038 + if (!enable_ept) 4039 + vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE; 4037 4040 if (is_guest_mode(&vmx->vcpu)) 4038 4041 vmx->vcpu.arch.cr4_guest_owned_bits &= 4039 4042 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; ··· 4330 4333 /* 22.2.1, 20.8.1 */ 4331 4334 vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); 4332 4335 4333 - vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; 4334 - vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); 4336 + vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4337 + vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); 4335 4338 4336 4339 set_cr4_guest_host_mask(vmx); 4337 4340
+5
arch/x86/kvm/x86.c
··· 975 975 if (is_long_mode(vcpu)) { 976 976 if (!(cr4 & X86_CR4_PAE)) 977 977 return 1; 978 + if ((cr4 ^ old_cr4) & X86_CR4_LA57) 979 + return 1; 978 980 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 979 981 && ((cr4 ^ old_cr4) & pdptr_bits) 980 982 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, ··· 2693 2691 2694 2692 /* Bits 4:5 are reserved, Should be zero */ 2695 2693 if (data & 0x30) 2694 + return 1; 2695 + 2696 + if (!lapic_in_kernel(vcpu)) 2696 2697 return 1; 2697 2698 2698 2699 vcpu->arch.apf.msr_en_val = data;
+1 -1
arch/x86/math-emu/wm_sqrt.S
··· 209 209 210 210 #ifdef PARANOID 211 211 /* It should be possible to get here only if the arg is ffff....ffff */ 212 - cmp $0xffffffff,FPU_fsqrt_arg_1 212 + cmpl $0xffffffff,FPU_fsqrt_arg_1 213 213 jnz sqrt_stage_2_error 214 214 #endif /* PARANOID */ 215 215
+2 -1
arch/x86/platform/uv/uv_irq.c
··· 167 167 goto out; 168 168 169 169 uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL); 170 - irq_domain_free_fwnode(fn); 171 170 if (uv_domain) 172 171 uv_domain->parent = x86_vector_domain; 172 + else 173 + irq_domain_free_fwnode(fn); 173 174 out: 174 175 mutex_unlock(&uv_lock); 175 176
+36 -4
arch/x86/xen/enlighten_pv.c
··· 598 598 } 599 599 600 600 #ifdef CONFIG_X86_64 601 + void noist_exc_debug(struct pt_regs *regs); 602 + 603 + DEFINE_IDTENTRY_RAW(xenpv_exc_nmi) 604 + { 605 + /* On Xen PV, NMI doesn't use IST. The C part is the sane as native. */ 606 + exc_nmi(regs); 607 + } 608 + 609 + DEFINE_IDTENTRY_RAW(xenpv_exc_debug) 610 + { 611 + /* 612 + * There's no IST on Xen PV, but we still need to dispatch 613 + * to the correct handler. 614 + */ 615 + if (user_mode(regs)) 616 + noist_exc_debug(regs); 617 + else 618 + exc_debug(regs); 619 + } 620 + 601 621 struct trap_array_entry { 602 622 void (*orig)(void); 603 623 void (*xen)(void); ··· 629 609 .xen = xen_asm_##func, \ 630 610 .ist_okay = ist_ok } 631 611 632 - #define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) { \ 612 + #define TRAP_ENTRY_REDIR(func, ist_ok) { \ 633 613 .orig = asm_##func, \ 634 - .xen = xen_asm_##xenfunc, \ 614 + .xen = xen_asm_xenpv_##func, \ 635 615 .ist_okay = ist_ok } 636 616 637 617 static struct trap_array_entry trap_array[] = { 638 - TRAP_ENTRY_REDIR(exc_debug, exc_xendebug, true ), 618 + TRAP_ENTRY_REDIR(exc_debug, true ), 639 619 TRAP_ENTRY(exc_double_fault, true ), 640 620 #ifdef CONFIG_X86_MCE 641 621 TRAP_ENTRY(exc_machine_check, true ), 642 622 #endif 643 - TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi, true ), 623 + TRAP_ENTRY_REDIR(exc_nmi, true ), 644 624 TRAP_ENTRY(exc_int3, false ), 645 625 TRAP_ENTRY(exc_overflow, false ), 646 626 #ifdef CONFIG_IA32_EMULATION ··· 870 850 } 871 851 872 852 #ifdef CONFIG_X86_IOPL_IOPERM 853 + static void xen_invalidate_io_bitmap(void) 854 + { 855 + struct physdev_set_iobitmap iobitmap = { 856 + .bitmap = 0, 857 + .nr_ports = 0, 858 + }; 859 + 860 + native_tss_invalidate_io_bitmap(); 861 + HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap); 862 + } 863 + 873 864 static void xen_update_io_bitmap(void) 874 865 { 875 866 struct physdev_set_iobitmap iobitmap; ··· 1110 1079 .load_sp0 = xen_load_sp0, 1111 1080 1112 1081 #ifdef CONFIG_X86_IOPL_IOPERM 1082 + .invalidate_io_bitmap = xen_invalidate_io_bitmap, 1113 1083 .update_io_bitmap = xen_update_io_bitmap, 1114 1084 #endif 1115 1085 .io_delay = xen_io_delay,
+18 -7
arch/x86/xen/xen-asm_64.S
··· 29 29 .endm 30 30 31 31 xen_pv_trap asm_exc_divide_error 32 - xen_pv_trap asm_exc_debug 33 - xen_pv_trap asm_exc_xendebug 32 + xen_pv_trap asm_xenpv_exc_debug 34 33 xen_pv_trap asm_exc_int3 35 - xen_pv_trap asm_exc_xennmi 34 + xen_pv_trap asm_xenpv_exc_nmi 36 35 xen_pv_trap asm_exc_overflow 37 36 xen_pv_trap asm_exc_bounds 38 37 xen_pv_trap asm_exc_invalid_op ··· 160 161 161 162 /* 32-bit compat sysenter target */ 162 163 SYM_FUNC_START(xen_sysenter_target) 163 - mov 0*8(%rsp), %rcx 164 - mov 1*8(%rsp), %r11 165 - mov 5*8(%rsp), %rsp 166 - jmp entry_SYSENTER_compat 164 + /* 165 + * NB: Xen is polite and clears TF from EFLAGS for us. This means 166 + * that we don't need to guard against single step exceptions here. 167 + */ 168 + popq %rcx 169 + popq %r11 170 + 171 + /* 172 + * Neither Xen nor the kernel really knows what the old SS and 173 + * CS were. The kernel expects __USER32_DS and __USER32_CS, so 174 + * report those values even though Xen will guess its own values. 175 + */ 176 + movq $__USER32_DS, 4*8(%rsp) 177 + movq $__USER32_CS, 1*8(%rsp) 178 + 179 + jmp entry_SYSENTER_compat_after_hwframe 167 180 SYM_FUNC_END(xen_sysenter_target) 168 181 169 182 #else /* !CONFIG_IA32_EMULATION */
+1 -1
arch/xtensa/include/asm/checksum.h
··· 57 57 __wsum csum_and_copy_from_user(const void __user *src, void *dst, 58 58 int len, __wsum sum, int *err_ptr) 59 59 { 60 - if (access_ok(dst, len)) 60 + if (access_ok(src, len)) 61 61 return csum_partial_copy_generic((__force const void *)src, dst, 62 62 len, sum, err_ptr, NULL); 63 63 if (len)
+1 -3
arch/xtensa/kernel/perf_event.c
··· 362 362 struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); 363 363 unsigned i; 364 364 365 - for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS); 366 - i < XCHAL_NUM_PERF_COUNTERS; 367 - i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) { 365 + for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) { 368 366 uint32_t v = get_er(XTENSA_PMU_PMSTAT(i)); 369 367 struct perf_event *event = ev->event[i]; 370 368 struct hw_perf_event *hwc = &event->hw;
+2 -1
arch/xtensa/kernel/setup.c
··· 724 724 static void * 725 725 c_next(struct seq_file *f, void *v, loff_t *pos) 726 726 { 727 - return NULL; 727 + ++*pos; 728 + return c_start(f, pos); 728 729 } 729 730 730 731 static void
+2 -2
arch/xtensa/kernel/xtensa_ksyms.c
··· 87 87 } 88 88 EXPORT_SYMBOL(__xtensa_libgcc_window_spill); 89 89 90 - unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) 90 + unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) 91 91 { 92 92 BUG(); 93 93 } 94 94 EXPORT_SYMBOL(__sync_fetch_and_and_4); 95 95 96 - unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) 96 + unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) 97 97 { 98 98 BUG(); 99 99 }
+2 -1
block/bio-integrity.c
··· 24 24 flush_workqueue(kintegrityd_wq); 25 25 } 26 26 27 - void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip) 27 + static void __bio_integrity_free(struct bio_set *bs, 28 + struct bio_integrity_payload *bip) 28 29 { 29 30 if (bs && mempool_initialized(&bs->bio_integrity_pool)) { 30 31 if (bip->bip_vec)
+3
block/blk-mq-debugfs.c
··· 125 125 QUEUE_FLAG_NAME(REGISTERED), 126 126 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), 127 127 QUEUE_FLAG_NAME(QUIESCED), 128 + QUEUE_FLAG_NAME(PCI_P2PDMA), 129 + QUEUE_FLAG_NAME(ZONE_RESETALL), 130 + QUEUE_FLAG_NAME(RQ_ALLOC_TIME), 128 131 }; 129 132 #undef QUEUE_FLAG_NAME 130 133
+2 -2
block/blk-mq.c
··· 828 828 void *priv, bool reserved) 829 829 { 830 830 /* 831 - * If we find a request that is inflight and the queue matches, 831 + * If we find a request that isn't idle and the queue matches, 832 832 * we know the queue is busy. Return false to stop the iteration. 833 833 */ 834 - if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { 834 + if (blk_mq_request_started(rq) && rq->q == hctx->queue) { 835 835 bool *busy = priv; 836 836 837 837 *busy = true;
+1 -2
block/keyslot-manager.c
··· 374 374 if (!ksm) 375 375 return; 376 376 kvfree(ksm->slot_hashtable); 377 - memzero_explicit(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots); 378 - kvfree(ksm->slots); 377 + kvfree_sensitive(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots); 379 378 memzero_explicit(ksm, sizeof(*ksm)); 380 379 } 381 380 EXPORT_SYMBOL_GPL(blk_ksm_destroy);
+11 -15
crypto/af_alg.c
··· 128 128 void af_alg_release_parent(struct sock *sk) 129 129 { 130 130 struct alg_sock *ask = alg_sk(sk); 131 - unsigned int nokey = ask->nokey_refcnt; 132 - bool last = nokey && !ask->refcnt; 131 + unsigned int nokey = atomic_read(&ask->nokey_refcnt); 133 132 134 133 sk = ask->parent; 135 134 ask = alg_sk(sk); 136 135 137 - local_bh_disable(); 138 - bh_lock_sock(sk); 139 - ask->nokey_refcnt -= nokey; 140 - if (!last) 141 - last = !--ask->refcnt; 142 - bh_unlock_sock(sk); 143 - local_bh_enable(); 136 + if (nokey) 137 + atomic_dec(&ask->nokey_refcnt); 144 138 145 - if (last) 139 + if (atomic_dec_and_test(&ask->refcnt)) 146 140 sock_put(sk); 147 141 } 148 142 EXPORT_SYMBOL_GPL(af_alg_release_parent); ··· 181 187 182 188 err = -EBUSY; 183 189 lock_sock(sk); 184 - if (ask->refcnt | ask->nokey_refcnt) 190 + if (atomic_read(&ask->refcnt)) 185 191 goto unlock; 186 192 187 193 swap(ask->type, type); ··· 230 236 int err = -EBUSY; 231 237 232 238 lock_sock(sk); 233 - if (ask->refcnt) 239 + if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) 234 240 goto unlock; 235 241 236 242 type = ask->type; ··· 295 301 if (err) 296 302 goto unlock; 297 303 298 - if (nokey || !ask->refcnt++) 304 + if (atomic_inc_return_relaxed(&ask->refcnt) == 1) 299 305 sock_hold(sk); 300 - ask->nokey_refcnt += nokey; 306 + if (nokey) { 307 + atomic_inc(&ask->nokey_refcnt); 308 + atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); 309 + } 301 310 alg_sk(sk2)->parent = sk; 302 311 alg_sk(sk2)->type = type; 303 - alg_sk(sk2)->nokey_refcnt = nokey; 304 312 305 313 newsock->ops = type->ops; 306 314 newsock->state = SS_CONNECTED;
+3 -6
crypto/algif_aead.c
··· 384 384 struct alg_sock *ask = alg_sk(sk); 385 385 386 386 lock_sock(sk); 387 - if (ask->refcnt) 387 + if (!atomic_read(&ask->nokey_refcnt)) 388 388 goto unlock_child; 389 389 390 390 psk = ask->parent; ··· 396 396 if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 397 397 goto unlock; 398 398 399 - if (!pask->refcnt++) 400 - sock_hold(psk); 401 - 402 - ask->refcnt = 1; 403 - sock_put(psk); 399 + atomic_dec(&pask->nokey_refcnt); 400 + atomic_set(&ask->nokey_refcnt, 0); 404 401 405 402 err = 0; 406 403
+3 -6
crypto/algif_hash.c
··· 301 301 struct alg_sock *ask = alg_sk(sk); 302 302 303 303 lock_sock(sk); 304 - if (ask->refcnt) 304 + if (!atomic_read(&ask->nokey_refcnt)) 305 305 goto unlock_child; 306 306 307 307 psk = ask->parent; ··· 313 313 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 314 314 goto unlock; 315 315 316 - if (!pask->refcnt++) 317 - sock_hold(psk); 318 - 319 - ask->refcnt = 1; 320 - sock_put(psk); 316 + atomic_dec(&pask->nokey_refcnt); 317 + atomic_set(&ask->nokey_refcnt, 0); 321 318 322 319 err = 0; 323 320
+3 -6
crypto/algif_skcipher.c
··· 211 211 struct alg_sock *ask = alg_sk(sk); 212 212 213 213 lock_sock(sk); 214 - if (ask->refcnt) 214 + if (!atomic_read(&ask->nokey_refcnt)) 215 215 goto unlock_child; 216 216 217 217 psk = ask->parent; ··· 223 223 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 224 224 goto unlock; 225 225 226 - if (!pask->refcnt++) 227 - sock_hold(psk); 228 - 229 - ask->refcnt = 1; 230 - sock_put(psk); 226 + atomic_dec(&pask->nokey_refcnt); 227 + atomic_set(&ask->nokey_refcnt, 0); 231 228 232 229 err = 0; 233 230
+1
crypto/asymmetric_keys/public_key.c
··· 119 119 if (IS_ERR(tfm)) 120 120 return PTR_ERR(tfm); 121 121 122 + ret = -ENOMEM; 122 123 key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, 123 124 GFP_KERNEL); 124 125 if (!key)
+1
drivers/acpi/dptf/dptf_power.c
··· 228 228 {"INT3407", 0}, 229 229 {"INT3532", 0}, 230 230 {"INTC1047", 0}, 231 + {"INTC1050", 0}, 231 232 {"", 0}, 232 233 }; 233 234 MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
+1 -1
drivers/acpi/fan.c
··· 25 25 26 26 static const struct acpi_device_id fan_device_ids[] = { 27 27 {"PNP0C0B", 0}, 28 - {"INT1044", 0}, 29 28 {"INT3404", 0}, 29 + {"INTC1044", 0}, 30 30 {"", 0}, 31 31 }; 32 32 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+1 -1
drivers/android/binder_alloc.c
··· 947 947 trace_binder_unmap_user_end(alloc, index); 948 948 } 949 949 mmap_read_unlock(mm); 950 - mmput(mm); 950 + mmput_async(mm); 951 951 952 952 trace_binder_unmap_kernel_start(alloc, index); 953 953
-1
drivers/base/base.h
··· 153 153 extern int devres_release_all(struct device *dev); 154 154 extern void device_block_probing(void); 155 155 extern void device_unblock_probing(void); 156 - extern void driver_deferred_probe_force_trigger(void); 157 156 158 157 /* /sys/devices directory */ 159 158 extern struct kset *devices_kset;
+32 -12
drivers/base/core.c
··· 50 50 static LIST_HEAD(deferred_sync); 51 51 static unsigned int defer_sync_state_count = 1; 52 52 static unsigned int defer_fw_devlink_count; 53 + static LIST_HEAD(deferred_fw_devlink); 53 54 static DEFINE_MUTEX(defer_fw_devlink_lock); 54 55 static bool fw_devlink_is_permissive(void); 55 56 ··· 755 754 */ 756 755 dev->state_synced = true; 757 756 758 - if (WARN_ON(!list_empty(&dev->links.defer_sync))) 757 + if (WARN_ON(!list_empty(&dev->links.defer_hook))) 759 758 return; 760 759 761 760 get_device(dev); 762 - list_add_tail(&dev->links.defer_sync, list); 761 + list_add_tail(&dev->links.defer_hook, list); 763 762 } 764 763 765 764 /** ··· 777 776 { 778 777 struct device *dev, *tmp; 779 778 780 - list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { 781 - list_del_init(&dev->links.defer_sync); 779 + list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { 780 + list_del_init(&dev->links.defer_hook); 782 781 783 782 if (dev != dont_lock_dev) 784 783 device_lock(dev); ··· 816 815 if (defer_sync_state_count) 817 816 goto out; 818 817 819 - list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { 818 + list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { 820 819 /* 821 820 * Delete from deferred_sync list before queuing it to 822 - * sync_list because defer_sync is used for both lists. 821 + * sync_list because defer_hook is used for both lists. 823 822 */ 824 - list_del_init(&dev->links.defer_sync); 823 + list_del_init(&dev->links.defer_hook); 825 824 __device_links_queue_sync_state(dev, &sync_list); 826 825 } 827 826 out: ··· 839 838 840 839 static void __device_links_supplier_defer_sync(struct device *sup) 841 840 { 842 - if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) 843 - list_add_tail(&sup->links.defer_sync, &deferred_sync); 841 + if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) 842 + list_add_tail(&sup->links.defer_hook, &deferred_sync); 844 843 } 845 844 846 845 static void device_link_drop_managed(struct device_link *link) ··· 1053 1052 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1054 1053 } 1055 1054 1056 - list_del_init(&dev->links.defer_sync); 1055 + list_del_init(&dev->links.defer_hook); 1057 1056 __device_links_no_driver(dev); 1058 1057 1059 1058 device_links_write_unlock(); ··· 1245 1244 fw_ret = -EAGAIN; 1246 1245 } else { 1247 1246 fw_ret = -ENODEV; 1247 + /* 1248 + * defer_hook is not used to add device to deferred_sync list 1249 + * until device is bound. Since deferred fw devlink also blocks 1250 + * probing, same list hook can be used for deferred_fw_devlink. 1251 + */ 1252 + list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); 1248 1253 } 1249 1254 1250 1255 if (fw_ret == -ENODEV) ··· 1319 1312 */ 1320 1313 void fw_devlink_resume(void) 1321 1314 { 1315 + struct device *dev, *tmp; 1316 + LIST_HEAD(probe_list); 1317 + 1322 1318 mutex_lock(&defer_fw_devlink_lock); 1323 1319 if (!defer_fw_devlink_count) { 1324 1320 WARN(true, "Unmatched fw_devlink pause/resume!"); ··· 1333 1323 goto out; 1334 1324 1335 1325 device_link_add_missing_supplier_links(); 1336 - driver_deferred_probe_force_trigger(); 1326 + list_splice_tail_init(&deferred_fw_devlink, &probe_list); 1337 1327 out: 1338 1328 mutex_unlock(&defer_fw_devlink_lock); 1329 + 1330 + /* 1331 + * bus_probe_device() can cause new devices to get added and they'll 1332 + * try to grab defer_fw_devlink_lock. So, this needs to be done outside 1333 + * the defer_fw_devlink_lock. 1334 + */ 1335 + list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { 1336 + list_del_init(&dev->links.defer_hook); 1337 + bus_probe_device(dev); 1338 + } 1339 1339 } 1340 1340 /* Device links support end. */ 1341 1341 ··· 2192 2172 INIT_LIST_HEAD(&dev->links.consumers); 2193 2173 INIT_LIST_HEAD(&dev->links.suppliers); 2194 2174 INIT_LIST_HEAD(&dev->links.needs_suppliers); 2195 - INIT_LIST_HEAD(&dev->links.defer_sync); 2175 + INIT_LIST_HEAD(&dev->links.defer_hook); 2196 2176 dev->links.status = DL_DEV_NO_DRIVER; 2197 2177 } 2198 2178 EXPORT_SYMBOL_GPL(device_initialize);
-5
drivers/base/dd.c
··· 164 164 if (!driver_deferred_probe_enable) 165 165 return; 166 166 167 - driver_deferred_probe_force_trigger(); 168 - } 169 - 170 - void driver_deferred_probe_force_trigger(void) 171 - { 172 167 /* 173 168 * A successful probe means that all the devices in the pending list 174 169 * should be triggered to be reprobed. Move all the deferred devices
+1 -1
drivers/base/property.c
··· 721 721 return next; 722 722 723 723 /* When no more children in primary, continue with secondary */ 724 - if (!IS_ERR_OR_NULL(fwnode->secondary)) 724 + if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) 725 725 next = fwnode_get_next_child_node(fwnode->secondary, child); 726 726 727 727 return next;
+1 -1
drivers/base/regmap/Kconfig
··· 4 4 # subsystems should select the appropriate symbols. 5 5 6 6 config REGMAP 7 - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C) 7 + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C) 8 8 select IRQ_DOMAIN if REGMAP_IRQ 9 9 bool 10 10
+31 -25
drivers/base/regmap/regmap-debugfs.c
··· 463 463 { 464 464 struct regmap *map = container_of(file->private_data, 465 465 struct regmap, cache_only); 466 - ssize_t result; 467 - bool was_enabled, require_sync = false; 466 + bool new_val, require_sync = false; 468 467 int err; 468 + 469 + err = kstrtobool_from_user(user_buf, count, &new_val); 470 + /* Ignore malforned data like debugfs_write_file_bool() */ 471 + if (err) 472 + return count; 473 + 474 + err = debugfs_file_get(file->f_path.dentry); 475 + if (err) 476 + return err; 469 477 470 478 map->lock(map->lock_arg); 471 479 472 - was_enabled = map->cache_only; 473 - 474 - result = debugfs_write_file_bool(file, user_buf, count, ppos); 475 - if (result < 0) { 476 - map->unlock(map->lock_arg); 477 - return result; 478 - } 479 - 480 - if (map->cache_only && !was_enabled) { 480 + if (new_val && !map->cache_only) { 481 481 dev_warn(map->dev, "debugfs cache_only=Y forced\n"); 482 482 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 483 - } else if (!map->cache_only && was_enabled) { 483 + } else if (!new_val && map->cache_only) { 484 484 dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); 485 485 require_sync = true; 486 486 } 487 + map->cache_only = new_val; 487 488 488 489 map->unlock(map->lock_arg); 490 + debugfs_file_put(file->f_path.dentry); 489 491 490 492 if (require_sync) { 491 493 err = regcache_sync(map); ··· 495 493 dev_err(map->dev, "Failed to sync cache %d\n", err); 496 494 } 497 495 498 - return result; 496 + return count; 499 497 } 500 498 501 499 static const struct file_operations regmap_cache_only_fops = { ··· 510 508 { 511 509 struct regmap *map = container_of(file->private_data, 512 510 struct regmap, cache_bypass); 513 - ssize_t result; 514 - bool was_enabled; 511 + bool new_val; 512 + int err; 513 + 514 + err = kstrtobool_from_user(user_buf, count, &new_val); 515 + /* Ignore malforned data like debugfs_write_file_bool() */ 516 + if (err) 517 + return count; 518 + 519 + err = debugfs_file_get(file->f_path.dentry); 520 + if (err) 521 + return err; 515 522 516 523 map->lock(map->lock_arg); 517 524 518 - was_enabled = map->cache_bypass; 519 - 520 - result = debugfs_write_file_bool(file, user_buf, count, ppos); 521 - if (result < 0) 522 - goto out; 523 - 524 - if (map->cache_bypass && !was_enabled) { 525 + if (new_val && !map->cache_bypass) { 525 526 dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); 526 527 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 527 - } else if (!map->cache_bypass && was_enabled) { 528 + } else if (!new_val && map->cache_bypass) { 528 529 dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); 529 530 } 531 + map->cache_bypass = new_val; 530 532 531 - out: 532 533 map->unlock(map->lock_arg); 534 + debugfs_file_put(file->f_path.dentry); 533 535 534 - return result; 536 + return count; 535 537 } 536 538 537 539 static const struct file_operations regmap_cache_bypass_fops = {
+1 -1
drivers/base/regmap/regmap.c
··· 1364 1364 1365 1365 /* If the user didn't specify a name match any */ 1366 1366 if (data) 1367 - return (*r)->name == data; 1367 + return !strcmp((*r)->name, data); 1368 1368 else 1369 1369 return 1; 1370 1370 }
+15 -10
drivers/block/nbd.c
··· 1033 1033 test_bit(NBD_RT_BOUND, &config->runtime_flags))) { 1034 1034 dev_err(disk_to_dev(nbd->disk), 1035 1035 "Device being setup by another task"); 1036 - sockfd_put(sock); 1037 - return -EBUSY; 1036 + err = -EBUSY; 1037 + goto put_socket; 1038 + } 1039 + 1040 + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); 1041 + if (!nsock) { 1042 + err = -ENOMEM; 1043 + goto put_socket; 1038 1044 } 1039 1045 1040 1046 socks = krealloc(config->socks, (config->num_connections + 1) * 1041 1047 sizeof(struct nbd_sock *), GFP_KERNEL); 1042 1048 if (!socks) { 1043 - sockfd_put(sock); 1044 - return -ENOMEM; 1049 + kfree(nsock); 1050 + err = -ENOMEM; 1051 + goto put_socket; 1045 1052 } 1046 1053 1047 1054 config->socks = socks; 1048 - 1049 - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); 1050 - if (!nsock) { 1051 - sockfd_put(sock); 1052 - return -ENOMEM; 1053 - } 1054 1055 1055 1056 nsock->fallback_index = -1; 1056 1057 nsock->dead = false; ··· 1064 1063 atomic_inc(&config->live_connections); 1065 1064 1066 1065 return 0; 1066 + 1067 + put_socket: 1068 + sockfd_put(sock); 1069 + return err; 1067 1070 } 1068 1071 1069 1072 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
+1
drivers/block/virtio_blk.c
··· 878 878 put_disk(vblk->disk); 879 879 out_free_vq: 880 880 vdev->config->del_vqs(vdev); 881 + kfree(vblk->vqs); 881 882 out_free_vblk: 882 883 kfree(vblk); 883 884 out_free_index:
+2 -1
drivers/block/zram/zram_drv.c
··· 2021 2021 return ret; 2022 2022 return scnprintf(buf, PAGE_SIZE, "%d\n", ret); 2023 2023 } 2024 - static CLASS_ATTR_RO(hot_add); 2024 + static struct class_attribute class_attr_hot_add = 2025 + __ATTR(hot_add, 0400, hot_add_show, NULL); 2025 2026 2026 2027 static ssize_t hot_remove_store(struct class *class, 2027 2028 struct class_attribute *attr,
+34 -11
drivers/bus/ti-sysc.c
··· 236 236 syss_done = ddata->cfg.syss_mask; 237 237 238 238 if (syss_offset >= 0) { 239 - error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, 240 - (rstval & ddata->cfg.syss_mask) == 241 - syss_done, 242 - 100, MAX_MODULE_SOFTRESET_WAIT); 239 + error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata, 240 + rstval, (rstval & ddata->cfg.syss_mask) == 241 + syss_done, 100, MAX_MODULE_SOFTRESET_WAIT); 243 242 244 243 } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { 245 - error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, 246 - !(rstval & sysc_mask), 247 - 100, MAX_MODULE_SOFTRESET_WAIT); 244 + error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata, 245 + rstval, !(rstval & sysc_mask), 246 + 100, MAX_MODULE_SOFTRESET_WAIT); 248 247 } 249 248 250 249 return error; ··· 1278 1279 1279 1280 ddata = dev_get_drvdata(dev); 1280 1281 1281 - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) 1282 + if (ddata->cfg.quirks & 1283 + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) 1282 1284 return 0; 1283 1285 1284 1286 return pm_runtime_force_suspend(dev); ··· 1291 1291 1292 1292 ddata = dev_get_drvdata(dev); 1293 1293 1294 - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) 1294 + if (ddata->cfg.quirks & 1295 + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) 1295 1296 return 0; 1296 1297 1297 1298 return pm_runtime_force_resume(dev); ··· 1729 1728 1730 1729 local_irq_save(flags); 1731 1730 /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ 1732 - error = readl_poll_timeout(ddata->module_va + 0x44, val, 1733 - !(val & BIT(0)), 100, 50); 1731 + error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val, 1732 + !(val & BIT(0)), 100, 50); 1734 1733 if (error) 1735 1734 dev_warn(ddata->dev, "rtc busy timeout\n"); 1736 1735 /* Now we have ~15 microseconds to read/write various registers */ ··· 2865 2864 return error; 2866 2865 } 2867 2866 2867 + /* 2868 + * Ignore timers tagged with no-reset and no-idle. These are likely in use, 2869 + * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks 2870 + * are needed, we could also look at the timer register configuration. 2871 + */ 2872 + static int sysc_check_active_timer(struct sysc *ddata) 2873 + { 2874 + if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && 2875 + ddata->cap->type != TI_SYSC_OMAP4_TIMER) 2876 + return 0; 2877 + 2878 + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && 2879 + (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) 2880 + return -EBUSY; 2881 + 2882 + return 0; 2883 + } 2884 + 2868 2885 static const struct of_device_id sysc_match_table[] = { 2869 2886 { .compatible = "simple-bus", }, 2870 2887 { /* sentinel */ }, ··· 2936 2917 sysc_init_early_quirks(ddata); 2937 2918 2938 2919 error = sysc_check_disabled_devices(ddata); 2920 + if (error) 2921 + return error; 2922 + 2923 + error = sysc_check_active_timer(ddata); 2939 2924 if (error) 2940 2925 return error; 2941 2926
+7 -3
drivers/char/mem.c
··· 814 814 #ifdef CONFIG_IO_STRICT_DEVMEM 815 815 void revoke_devmem(struct resource *res) 816 816 { 817 - struct inode *inode = READ_ONCE(devmem_inode); 817 + /* pairs with smp_store_release() in devmem_init_inode() */ 818 + struct inode *inode = smp_load_acquire(&devmem_inode); 818 819 819 820 /* 820 821 * Check that the initialization has completed. Losing the race ··· 1029 1028 return rc; 1030 1029 } 1031 1030 1032 - /* publish /dev/mem initialized */ 1033 - WRITE_ONCE(devmem_inode, inode); 1031 + /* 1032 + * Publish /dev/mem initialized. 1033 + * Pairs with smp_load_acquire() in revoke_devmem(). 1034 + */ 1035 + smp_store_release(&devmem_inode, inode); 1034 1036 1035 1037 return 0; 1036 1038 }
+1 -1
drivers/char/tpm/st33zp24/i2c.c
··· 210 210 211 211 /* 212 212 * st33zp24_i2c_probe initialize the TPM device 213 - * @param: client, the i2c_client drescription (TPM I2C description). 213 + * @param: client, the i2c_client description (TPM I2C description). 214 214 * @param: id, the i2c_device_id struct. 215 215 * @return: 0 in case of success. 216 216 * -1 in other case.
+2 -2
drivers/char/tpm/st33zp24/spi.c
··· 329 329 330 330 /* 331 331 * st33zp24_spi_probe initialize the TPM device 332 - * @param: dev, the spi_device drescription (TPM SPI description). 332 + * @param: dev, the spi_device description (TPM SPI description). 333 333 * @return: 0 in case of success. 334 334 * or a negative value describing the error. 335 335 */ ··· 378 378 379 379 /* 380 380 * st33zp24_spi_remove remove the TPM device 381 - * @param: client, the spi_device drescription (TPM SPI description). 381 + * @param: client, the spi_device description (TPM SPI description). 382 382 * @return: 0 in case of success. 383 383 */ 384 384 static int st33zp24_spi_remove(struct spi_device *dev)
+1 -1
drivers/char/tpm/st33zp24/st33zp24.c
··· 502 502 503 503 /* 504 504 * st33zp24_probe initialize the TPM device 505 - * @param: client, the i2c_client drescription (TPM I2C description). 505 + * @param: client, the i2c_client description (TPM I2C description). 506 506 * @param: id, the i2c_device_id struct. 507 507 * @return: 0 in case of success. 508 508 * -1 in other case.
+9 -10
drivers/char/tpm/tpm-dev-common.c
··· 189 189 goto out; 190 190 } 191 191 192 - /* atomic tpm command send and result receive. We only hold the ops 193 - * lock during this period so that the tpm can be unregistered even if 194 - * the char dev is held open. 195 - */ 196 - if (tpm_try_get_ops(priv->chip)) { 197 - ret = -EPIPE; 198 - goto out; 199 - } 200 - 201 192 priv->response_length = 0; 202 193 priv->response_read = false; 203 194 *off = 0; ··· 202 211 if (file->f_flags & O_NONBLOCK) { 203 212 priv->command_enqueued = true; 204 213 queue_work(tpm_dev_wq, &priv->async_work); 205 - tpm_put_ops(priv->chip); 206 214 mutex_unlock(&priv->buffer_mutex); 207 215 return size; 216 + } 217 + 218 + /* atomic tpm command send and result receive. We only hold the ops 219 + * lock during this period so that the tpm can be unregistered even if 220 + * the char dev is held open. 221 + */ 222 + if (tpm_try_get_ops(priv->chip)) { 223 + ret = -EPIPE; 224 + goto out; 208 225 } 209 226 210 227 ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+7 -7
drivers/char/tpm/tpm_ibmvtpm.c
··· 683 683 if (rc) 684 684 goto init_irq_cleanup; 685 685 686 - if (!strcmp(id->compat, "IBM,vtpm20")) { 687 - chip->flags |= TPM_CHIP_FLAG_TPM2; 688 - rc = tpm2_get_cc_attrs_tbl(chip); 689 - if (rc) 690 - goto init_irq_cleanup; 691 - } 692 - 693 686 if (!wait_event_timeout(ibmvtpm->crq_queue.wq, 694 687 ibmvtpm->rtce_buf != NULL, 695 688 HZ)) { 696 689 dev_err(dev, "CRQ response timed out\n"); 697 690 goto init_irq_cleanup; 691 + } 692 + 693 + if (!strcmp(id->compat, "IBM,vtpm20")) { 694 + chip->flags |= TPM_CHIP_FLAG_TPM2; 695 + rc = tpm2_get_cc_attrs_tbl(chip); 696 + if (rc) 697 + goto init_irq_cleanup; 698 698 } 699 699 700 700 return tpm_chip_register(chip);
+7
drivers/char/tpm/tpm_tis.c
··· 235 235 return tpm_tis_init(&pnp_dev->dev, &tpm_info); 236 236 } 237 237 238 + /* 239 + * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module 240 + * parameter"). This commit added IFX0102 device ID, which is also used by 241 + * tpm_infineon but ignored to add quirks to probe which driver ought to be 242 + * used. 243 + */ 244 + 238 245 static struct pnp_device_id tpm_pnp_tbl[] = { 239 246 {"PNP0C31", 0}, /* TPM */ 240 247 {"ATM1200", 0}, /* Atmel */
+1 -1
drivers/char/tpm/tpm_tis_core.c
··· 1085 1085 1086 1086 return 0; 1087 1087 out_err: 1088 - if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) 1088 + if (chip->ops->clk_enable != NULL) 1089 1089 chip->ops->clk_enable(chip, false); 1090 1090 1091 1091 tpm_tis_remove(chip);
+5 -5
drivers/char/tpm/tpm_tis_spi_main.c
··· 53 53 54 54 if ((phy->iobuf[3] & 0x01) == 0) { 55 55 // handle SPI wait states 56 - phy->iobuf[0] = 0; 57 - 58 56 for (i = 0; i < TPM_RETRY; i++) { 59 57 spi_xfer->len = 1; 60 58 spi_message_init(&m); ··· 102 104 if (ret < 0) 103 105 goto exit; 104 106 107 + /* Flow control transfers are receive only */ 108 + spi_xfer.tx_buf = NULL; 105 109 ret = phy->flow_control(phy, &spi_xfer); 106 110 if (ret < 0) 107 111 goto exit; ··· 113 113 spi_xfer.delay.value = 5; 114 114 spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS; 115 115 116 - if (in) { 117 - spi_xfer.tx_buf = NULL; 118 - } else if (out) { 116 + if (out) { 117 + spi_xfer.tx_buf = phy->iobuf; 119 118 spi_xfer.rx_buf = NULL; 120 119 memcpy(phy->iobuf, out, transfer_len); 121 120 out += transfer_len; ··· 287 288 .pm = &tpm_tis_pm, 288 289 .of_match_table = of_match_ptr(of_tis_spi_match), 289 290 .acpi_match_table = ACPI_PTR(acpi_tis_spi_match), 291 + .probe_type = PROBE_PREFER_ASYNCHRONOUS, 290 292 }, 291 293 .probe = tpm_tis_spi_driver_probe, 292 294 .remove = tpm_tis_spi_remove,
+2 -1
drivers/char/virtio_console.c
··· 2116 2116 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, 2117 2117 { 0 }, 2118 2118 }; 2119 + MODULE_DEVICE_TABLE(virtio, id_table); 2119 2120 2120 2121 static unsigned int features[] = { 2121 2122 VIRTIO_CONSOLE_F_SIZE, ··· 2129 2128 #endif 2130 2129 { 0 }, 2131 2130 }; 2131 + MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); 2132 2132 2133 2133 static unsigned int rproc_serial_features[] = { 2134 2134 }; ··· 2282 2280 module_init(init); 2283 2281 module_exit(fini); 2284 2282 2285 - MODULE_DEVICE_TABLE(virtio, id_table); 2286 2283 MODULE_DESCRIPTION("Virtio console driver"); 2287 2284 MODULE_LICENSE("GPL");
+1
drivers/clk/Kconfig
··· 50 50 config CLK_HSDK 51 51 bool "PLL Driver for HSDK platform" 52 52 depends on OF || COMPILE_TEST 53 + depends on IOMEM 53 54 help 54 55 This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs 55 56 control.
+41 -8
drivers/clk/clk-ast2600.c
··· 131 131 { 0 } 132 132 }; 133 133 134 + static const struct clk_div_table ast2600_emmc_extclk_div_table[] = { 135 + { 0x0, 2 }, 136 + { 0x1, 4 }, 137 + { 0x2, 6 }, 138 + { 0x3, 8 }, 139 + { 0x4, 10 }, 140 + { 0x5, 12 }, 141 + { 0x6, 14 }, 142 + { 0x7, 16 }, 143 + { 0 } 144 + }; 145 + 134 146 static const struct clk_div_table ast2600_mac_div_table[] = { 135 147 { 0x0, 4 }, 136 148 { 0x1, 4 }, ··· 402 390 return hw; 403 391 } 404 392 393 + static const char *const emmc_extclk_parent_names[] = { 394 + "emmc_extclk_hpll_in", 395 + "mpll", 396 + }; 397 + 405 398 static const char * const vclk_parent_names[] = { 406 399 "dpll", 407 400 "d1pll", ··· 476 459 return PTR_ERR(hw); 477 460 aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw; 478 461 479 - /* EMMC ext clock divider */ 480 - hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0, 481 - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0, 482 - &aspeed_g6_clk_lock); 462 + /* EMMC ext clock */ 463 + hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll", 464 + 0, 1, 2); 483 465 if (IS_ERR(hw)) 484 466 return PTR_ERR(hw); 485 - hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0, 486 - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0, 487 - ast2600_div_table, 488 - &aspeed_g6_clk_lock); 467 + 468 + hw = clk_hw_register_mux(dev, "emmc_extclk_mux", 469 + emmc_extclk_parent_names, 470 + ARRAY_SIZE(emmc_extclk_parent_names), 0, 471 + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1, 472 + 0, &aspeed_g6_clk_lock); 473 + if (IS_ERR(hw)) 474 + return PTR_ERR(hw); 475 + 476 + hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux", 477 + 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, 478 + 15, 0, &aspeed_g6_clk_lock); 479 + if (IS_ERR(hw)) 480 + return PTR_ERR(hw); 481 + 482 + hw = clk_hw_register_divider_table(dev, "emmc_extclk", 483 + "emmc_extclk_gate", 0, 484 + scu_g6_base + 485 + ASPEED_G6_CLK_SELECTION1, 12, 486 + 3, 0, ast2600_emmc_extclk_div_table, 487 + &aspeed_g6_clk_lock); 489 488 if (IS_ERR(hw)) 490 489 return PTR_ERR(hw); 491 490 aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
+1
drivers/clk/mvebu/Kconfig
··· 42 42 43 43 config ARMADA_AP_CPU_CLK 44 44 bool 45 + select ARMADA_AP_CP_HELPER 45 46 46 47 config ARMADA_CP110_SYSCON 47 48 bool
+11
drivers/clocksource/arm_arch_timer.c
··· 480 480 .set_next_event_virt = erratum_set_next_event_tval_virt, 481 481 }, 482 482 #endif 483 + #ifdef CONFIG_ARM64_ERRATUM_1418040 484 + { 485 + .match_type = ate_match_local_cap_id, 486 + .id = (void *)ARM64_WORKAROUND_1418040, 487 + .desc = "ARM erratum 1418040", 488 + .disable_compat_vdso = true, 489 + }, 490 + #endif 483 491 }; 484 492 485 493 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, ··· 574 566 if (wa->read_cntvct_el0) { 575 567 clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; 576 568 vdso_default = VDSO_CLOCKMODE_NONE; 569 + } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { 570 + vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; 571 + clocksource_counter.vdso_clock_mode = vdso_default; 577 572 } 578 573 } 579 574
+36 -10
drivers/clocksource/timer-ti-dm-systimer.c
··· 19 19 /* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */ 20 20 #define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \ 21 21 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE) 22 - 22 + #define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE) 23 23 #define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2) 24 24 #define DMTIMER_RESET_WAIT 100000 25 25 ··· 44 44 u8 ctrl; 45 45 u8 wakeup; 46 46 u8 ifctrl; 47 + struct clk *fck; 48 + struct clk *ick; 47 49 unsigned long rate; 48 50 }; 49 51 ··· 300 298 } 301 299 302 300 /* Interface clocks are only available on some SoCs variants */ 303 - static int __init dmtimer_systimer_init_clock(struct device_node *np, 301 + static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, 302 + struct device_node *np, 304 303 const char *name, 305 304 unsigned long *rate) 306 305 { 307 306 struct clk *clock; 308 307 unsigned long r; 308 + bool is_ick = false; 309 309 int error; 310 310 311 + is_ick = !strncmp(name, "ick", 3); 312 + 311 313 clock = of_clk_get_by_name(np, name); 312 - if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3)) 314 + if ((PTR_ERR(clock) == -EINVAL) && is_ick) 313 315 return 0; 314 316 else if (IS_ERR(clock)) 315 317 return PTR_ERR(clock); ··· 325 319 r = clk_get_rate(clock); 326 320 if (!r) 327 321 return -ENODEV; 322 + 323 + if (is_ick) 324 + t->ick = clock; 325 + else 326 + t->fck = clock; 328 327 329 328 *rate = r; 330 329 ··· 350 339 351 340 static void dmtimer_systimer_disable(struct dmtimer_systimer *t) 352 341 { 353 - writel_relaxed(0, t->base + t->sysc); 342 + if (!dmtimer_systimer_revision1(t)) 343 + return; 344 + 345 + writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc); 354 346 } 355 347 356 348 static int __init dmtimer_systimer_setup(struct device_node *np, ··· 380 366 pr_err("%s: clock source init failed: %i\n", __func__, error); 381 367 382 368 /* For ti-sysc, we have timer clocks at the parent module level */ 383 - error = dmtimer_systimer_init_clock(np->parent, "fck", &rate); 369 + error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate); 384 370 if (error) 385 371 goto err_unmap; 386 372 387 373 t->rate = rate; 388 374 389 - error = dmtimer_systimer_init_clock(np->parent, "ick", &rate); 375 + error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate); 390 376 if (error) 391 377 goto err_unmap; 392 378 ··· 510 496 struct dmtimer_systimer *t = &clkevt->t; 511 497 512 498 dmtimer_systimer_disable(t); 499 + clk_disable(t->fck); 513 500 } 514 501 515 502 static void omap_clockevent_unidle(struct clock_event_device *evt) 516 503 { 517 504 struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt); 518 505 struct dmtimer_systimer *t = &clkevt->t; 506 + int error; 507 + 508 + error = clk_enable(t->fck); 509 + if (error) 510 + pr_err("could not enable timer fck on resume: %i\n", error); 519 511 520 512 dmtimer_systimer_enable(t); 521 513 writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena); ··· 590 570 3, /* Timer internal resynch latency */ 591 571 0xffffffff); 592 572 593 - if (of_device_is_compatible(np, "ti,am33xx") || 594 - of_device_is_compatible(np, "ti,am43")) { 573 + if (of_machine_is_compatible("ti,am33xx") || 574 + of_machine_is_compatible("ti,am43")) { 595 575 dev->suspend = omap_clockevent_idle; 596 576 dev->resume = omap_clockevent_unidle; 597 577 } ··· 636 616 637 617 clksrc->loadval = readl_relaxed(t->base + t->counter); 638 618 dmtimer_systimer_disable(t); 619 + clk_disable(t->fck); 639 620 } 640 621 641 622 static void dmtimer_clocksource_resume(struct clocksource *cs) 642 623 { 643 624 struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs); 644 625 struct dmtimer_systimer *t = &clksrc->t; 626 + int error; 627 + 628 + error = clk_enable(t->fck); 629 + if (error) 630 + pr_err("could not enable timer fck on resume: %i\n", error); 645 631 646 632 dmtimer_systimer_enable(t); 647 633 writel_relaxed(clksrc->loadval, t->base + t->counter); ··· 679 653 dev->mask = CLOCKSOURCE_MASK(32); 680 654 dev->flags = CLOCK_SOURCE_IS_CONTINUOUS; 681 655 682 - if (of_device_is_compatible(np, "ti,am33xx") || 683 - of_device_is_compatible(np, "ti,am43")) { 656 + /* Unlike for clockevent, legacy code sets suspend only for am4 */ 657 + if (of_machine_is_compatible("ti,am43")) { 684 658 dev->suspend = dmtimer_clocksource_suspend; 685 659 dev->resume = dmtimer_clocksource_resume; 686 660 }
+19 -3
drivers/counter/104-quad-8.c
··· 1274 1274 struct counter_signal *signal, 1275 1275 void *private, char *buf) 1276 1276 { 1277 - const struct quad8_iio *const priv = counter->priv; 1277 + struct quad8_iio *const priv = counter->priv; 1278 1278 const size_t channel_id = signal->id / 2; 1279 - const bool disabled = !(priv->cable_fault_enable & BIT(channel_id)); 1279 + bool disabled; 1280 1280 unsigned int status; 1281 1281 unsigned int fault; 1282 1282 1283 - if (disabled) 1283 + mutex_lock(&priv->lock); 1284 + 1285 + disabled = !(priv->cable_fault_enable & BIT(channel_id)); 1286 + 1287 + if (disabled) { 1288 + mutex_unlock(&priv->lock); 1284 1289 return -EINVAL; 1290 + } 1285 1291 1286 1292 /* Logic 0 = cable fault */ 1287 1293 status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); 1294 + 1295 + mutex_unlock(&priv->lock); 1288 1296 1289 1297 /* Mask respective channel and invert logic */ 1290 1298 fault = !(status & BIT(channel_id)); ··· 1325 1317 if (ret) 1326 1318 return ret; 1327 1319 1320 + mutex_lock(&priv->lock); 1321 + 1328 1322 if (enable) 1329 1323 priv->cable_fault_enable |= BIT(channel_id); 1330 1324 else ··· 1336 1326 cable_fault_enable = ~priv->cable_fault_enable; 1337 1327 1338 1328 outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); 1329 + 1330 + mutex_unlock(&priv->lock); 1339 1331 1340 1332 return len; 1341 1333 } ··· 1365 1353 if (ret) 1366 1354 return ret; 1367 1355 1356 + mutex_lock(&priv->lock); 1357 + 1368 1358 priv->fck_prescaler[channel_id] = prescaler; 1369 1359 1370 1360 /* Reset Byte Pointer */ ··· 1376 1362 outb(prescaler, base_offset); 1377 1363 outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC, 1378 1364 base_offset + 1); 1365 + 1366 + mutex_unlock(&priv->lock); 1379 1367 1380 1368 return len; 1381 1369 }
+6 -2
drivers/cpufreq/intel_pstate.c
··· 2464 2464 .name = "intel_cpufreq", 2465 2465 }; 2466 2466 2467 - static struct cpufreq_driver *default_driver = &intel_pstate; 2467 + static struct cpufreq_driver *default_driver; 2468 2468 2469 2469 static void intel_pstate_driver_cleanup(void) 2470 2470 { ··· 2758 2758 hwp_active++; 2759 2759 hwp_mode_bdw = id->driver_data; 2760 2760 intel_pstate.attr = hwp_cpufreq_attrs; 2761 + default_driver = &intel_pstate; 2761 2762 goto hwp_cpu_matched; 2762 2763 } 2763 2764 } else { ··· 2776 2775 return -ENODEV; 2777 2776 } 2778 2777 /* Without HWP start in the passive mode. */ 2779 - default_driver = &intel_cpufreq; 2778 + if (!default_driver) 2779 + default_driver = &intel_cpufreq; 2780 2780 2781 2781 hwp_cpu_matched: 2782 2782 /* ··· 2822 2820 2823 2821 if (!strcmp(str, "disable")) { 2824 2822 no_load = 1; 2823 + } else if (!strcmp(str, "active")) { 2824 + default_driver = &intel_pstate; 2825 2825 } else if (!strcmp(str, "passive")) { 2826 2826 default_driver = &intel_cpufreq; 2827 2827 no_hwp = 1;
+1 -1
drivers/crypto/chelsio/chtls/chtls_cm.c
··· 102 102 case PF_INET: 103 103 if (likely(!inet_sk(sk)->inet_rcv_saddr)) 104 104 return ndev; 105 - ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); 105 + ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false); 106 106 break; 107 107 #if IS_ENABLED(CONFIG_IPV6) 108 108 case PF_INET6:
+4 -3
drivers/crypto/chelsio/chtls/chtls_io.c
··· 1052 1052 &record_type); 1053 1053 if (err) 1054 1054 goto out_err; 1055 + 1056 + /* Avoid appending tls handshake, alert to tls data */ 1057 + if (skb) 1058 + tx_skb_finalize(skb); 1055 1059 } 1056 1060 1057 1061 recordsz = size; 1058 1062 csk->tlshws.txleft = recordsz; 1059 1063 csk->tlshws.type = record_type; 1060 - 1061 - if (skb) 1062 - ULP_SKB_CB(skb)->ulp.tls.type = record_type; 1063 1064 } 1064 1065 1065 1066 if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
+32 -33
drivers/dma-buf/dma-buf.c
··· 45 45 size_t ret = 0; 46 46 47 47 dmabuf = dentry->d_fsdata; 48 - dma_resv_lock(dmabuf->resv, NULL); 48 + spin_lock(&dmabuf->name_lock); 49 49 if (dmabuf->name) 50 50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); 51 - dma_resv_unlock(dmabuf->resv); 51 + spin_unlock(&dmabuf->name_lock); 52 52 53 53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s", 54 54 dentry->d_name.name, ret > 0 ? name : ""); 55 55 } 56 56 57 - static const struct dentry_operations dma_buf_dentry_ops = { 58 - .d_dname = dmabuffs_dname, 59 - }; 60 - 61 - static struct vfsmount *dma_buf_mnt; 62 - 63 - static int dma_buf_fs_init_context(struct fs_context *fc) 64 - { 65 - struct pseudo_fs_context *ctx; 66 - 67 - ctx = init_pseudo(fc, DMA_BUF_MAGIC); 68 - if (!ctx) 69 - return -ENOMEM; 70 - ctx->dops = &dma_buf_dentry_ops; 71 - return 0; 72 - } 73 - 74 - static struct file_system_type dma_buf_fs_type = { 75 - .name = "dmabuf", 76 - .init_fs_context = dma_buf_fs_init_context, 77 - .kill_sb = kill_anon_super, 78 - }; 79 - 80 - static int dma_buf_release(struct inode *inode, struct file *file) 57 + static void dma_buf_release(struct dentry *dentry) 81 58 { 82 59 struct dma_buf *dmabuf; 83 60 84 - if (!is_dma_buf_file(file)) 85 - return -EINVAL; 86 - 87 - dmabuf = file->private_data; 61 + dmabuf = dentry->d_fsdata; 88 62 89 63 BUG_ON(dmabuf->vmapping_counter); 90 64 ··· 84 110 module_put(dmabuf->owner); 85 111 kfree(dmabuf->name); 86 112 kfree(dmabuf); 113 + } 114 + 115 + static const struct dentry_operations dma_buf_dentry_ops = { 116 + .d_dname = dmabuffs_dname, 117 + .d_release = dma_buf_release, 118 + }; 119 + 120 + static struct vfsmount *dma_buf_mnt; 121 + 122 + static int dma_buf_fs_init_context(struct fs_context *fc) 123 + { 124 + struct pseudo_fs_context *ctx; 125 + 126 + ctx = init_pseudo(fc, DMA_BUF_MAGIC); 127 + if (!ctx) 128 + return -ENOMEM; 129 + ctx->dops = &dma_buf_dentry_ops; 87 130 return 0; 88 131 } 132 + 133 + static struct file_system_type dma_buf_fs_type = { 134 + .name = "dmabuf", 135 + .init_fs_context = dma_buf_fs_init_context, 136 + .kill_sb = kill_anon_super, 137 + }; 89 138 90 139 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) 91 140 { ··· 338 341 kfree(name); 339 342 goto out_unlock; 340 343 } 344 + spin_lock(&dmabuf->name_lock); 341 345 kfree(dmabuf->name); 342 346 dmabuf->name = name; 347 + spin_unlock(&dmabuf->name_lock); 343 348 344 349 out_unlock: 345 350 dma_resv_unlock(dmabuf->resv); ··· 404 405 /* Don't count the temporary reference taken inside procfs seq_show */ 405 406 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); 406 407 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); 407 - dma_resv_lock(dmabuf->resv, NULL); 408 + spin_lock(&dmabuf->name_lock); 408 409 if (dmabuf->name) 409 410 seq_printf(m, "name:\t%s\n", dmabuf->name); 410 - dma_resv_unlock(dmabuf->resv); 411 + spin_unlock(&dmabuf->name_lock); 411 412 } 412 413 413 414 static const struct file_operations dma_buf_fops = { 414 - .release = dma_buf_release, 415 415 .mmap = dma_buf_mmap_internal, 416 416 .llseek = dma_buf_llseek, 417 417 .poll = dma_buf_poll, ··· 544 546 dmabuf->size = exp_info->size; 545 547 dmabuf->exp_name = exp_info->exp_name; 546 548 dmabuf->owner = exp_info->owner; 549 + spin_lock_init(&dmabuf->name_lock); 547 550 init_waitqueue_head(&dmabuf->poll); 548 551 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; 549 552 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+2
drivers/dma/dmatest.c
··· 1176 1176 } else if (dmatest_run) { 1177 1177 if (!is_threaded_test_pending(info)) { 1178 1178 pr_info("No channels configured, continue with any\n"); 1179 + if (!is_threaded_test_run(info)) 1180 + stop_threaded_test(info); 1179 1181 add_threaded_test(info); 1180 1182 } 1181 1183 start_threaded_tests(info);
-12
drivers/dma/dw/core.c
··· 118 118 { 119 119 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 120 120 121 - if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) 122 - return; 123 - 124 121 dw->initialize_chan(dwc); 125 122 126 123 /* Enable interrupts */ 127 124 channel_set_bit(dw, MASK.XFER, dwc->mask); 128 125 channel_set_bit(dw, MASK.ERROR, dwc->mask); 129 - 130 - set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); 131 126 } 132 127 133 128 /*----------------------------------------------------------------------*/ ··· 949 954 950 955 void do_dw_dma_off(struct dw_dma *dw) 951 956 { 952 - unsigned int i; 953 - 954 957 dma_writel(dw, CFG, 0); 955 958 956 959 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); ··· 959 966 960 967 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 961 968 cpu_relax(); 962 - 963 - for (i = 0; i < dw->dma.chancnt; i++) 964 - clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); 965 969 } 966 970 967 971 void do_dw_dma_on(struct dw_dma *dw) ··· 1021 1031 1022 1032 /* Clear custom channel configuration */ 1023 1033 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); 1024 - 1025 - clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); 1026 1034 1027 1035 /* Disable interrupts */ 1028 1036 channel_clear_bit(dw, MASK.XFER, dwc->mask);
+16 -12
drivers/dma/fsl-edma-common.c
··· 352 352 /* 353 353 * TCD parameters are stored in struct fsl_edma_hw_tcd in little 354 354 * endian format. However, we need to load the TCD registers in 355 - * big- or little-endian obeying the eDMA engine model endian. 355 + * big- or little-endian obeying the eDMA engine model endian, 356 + * and this is performed from specific edma_write functions 356 357 */ 357 358 edma_writew(edma, 0, &regs->tcd[ch].csr); 358 - edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr); 359 - edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr); 360 359 361 - edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr); 362 - edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff); 360 + edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr); 361 + edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr); 363 362 364 - edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes); 365 - edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast); 363 + edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr); 364 + edma_writew(edma, tcd->soff, &regs->tcd[ch].soff); 366 365 367 - edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer); 368 - edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter); 369 - edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff); 366 + edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes); 367 + edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast); 370 368 371 - edma_writel(edma, le32_to_cpu(tcd->dlast_sga), 369 + edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer); 370 + edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter); 371 + edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff); 372 + 373 + edma_writel(edma, (s32)tcd->dlast_sga, 372 374 &regs->tcd[ch].dlast_sga); 373 375 374 - edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr); 376 + edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr); 375 377 } 376 378 377 379 static inline ··· 590 588 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) 591 589 { 592 590 struct virt_dma_desc *vdesc; 591 + 592 + lockdep_assert_held(&fsl_chan->vchan.lock); 593 593 594 594 vdesc = vchan_next_desc(&fsl_chan->vchan); 595 595 if (!vdesc)
+1 -1
drivers/dma/fsl-edma-common.h
··· 33 33 #define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0) 34 34 #define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1) 35 35 #define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1)) 36 - #define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0)) 36 + #define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0)) 37 37 #define EDMA_TCD_ATTR_SSIZE_8BIT 0 38 38 #define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8) 39 39 #define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
+7
drivers/dma/fsl-edma.c
··· 45 45 fsl_chan = &fsl_edma->chans[ch]; 46 46 47 47 spin_lock(&fsl_chan->vchan.lock); 48 + 49 + if (!fsl_chan->edesc) { 50 + /* terminate_all called before */ 51 + spin_unlock(&fsl_chan->vchan.lock); 52 + continue; 53 + } 54 + 48 55 if (!fsl_chan->edesc->iscyclic) { 49 56 list_del(&fsl_chan->edesc->vdesc.node); 50 57 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+16 -3
drivers/dma/idxd/cdev.c
··· 74 74 struct idxd_device *idxd; 75 75 struct idxd_wq *wq; 76 76 struct device *dev; 77 + int rc = 0; 77 78 78 79 wq = inode_wq(inode); 79 80 idxd = wq->idxd; ··· 82 81 83 82 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); 84 83 85 - if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) 86 - return -EBUSY; 87 - 88 84 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 89 85 if (!ctx) 90 86 return -ENOMEM; 91 87 88 + mutex_lock(&wq->wq_lock); 89 + 90 + if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { 91 + rc = -EBUSY; 92 + goto failed; 93 + } 94 + 92 95 ctx->wq = wq; 93 96 filp->private_data = ctx; 94 97 idxd_wq_get(wq); 98 + mutex_unlock(&wq->wq_lock); 95 99 return 0; 100 + 101 + failed: 102 + mutex_unlock(&wq->wq_lock); 103 + kfree(ctx); 104 + return rc; 96 105 } 97 106 98 107 static int idxd_cdev_release(struct inode *node, struct file *filep) ··· 116 105 filep->private_data = NULL; 117 106 118 107 kfree(ctx); 108 + mutex_lock(&wq->wq_lock); 119 109 idxd_wq_put(wq); 110 + mutex_unlock(&wq->wq_lock); 120 111 return 0; 121 112 } 122 113
+25
drivers/dma/idxd/device.c
··· 320 320 devm_iounmap(dev, wq->dportal); 321 321 } 322 322 323 + void idxd_wq_disable_cleanup(struct idxd_wq *wq) 324 + { 325 + struct idxd_device *idxd = wq->idxd; 326 + struct device *dev = &idxd->pdev->dev; 327 + int i, wq_offset; 328 + 329 + lockdep_assert_held(&idxd->dev_lock); 330 + memset(&wq->wqcfg, 0, sizeof(wq->wqcfg)); 331 + wq->type = IDXD_WQT_NONE; 332 + wq->size = 0; 333 + wq->group = NULL; 334 + wq->threshold = 0; 335 + wq->priority = 0; 336 + clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 337 + memset(wq->name, 0, WQ_NAME_SIZE); 338 + 339 + for (i = 0; i < 8; i++) { 340 + wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); 341 + iowrite32(0, idxd->reg_base + wq_offset); 342 + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", 343 + wq->id, i, wq_offset, 344 + ioread32(idxd->reg_base + wq_offset)); 345 + } 346 + } 347 + 323 348 /* Device control bits */ 324 349 static inline bool idxd_is_enabled(struct idxd_device *idxd) 325 350 {
+1
drivers/dma/idxd/idxd.h
··· 290 290 int idxd_wq_disable(struct idxd_wq *wq); 291 291 int idxd_wq_map_portal(struct idxd_wq *wq); 292 292 void idxd_wq_unmap_portal(struct idxd_wq *wq); 293 + void idxd_wq_disable_cleanup(struct idxd_wq *wq); 293 294 294 295 /* submission */ 295 296 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+2 -1
drivers/dma/idxd/irq.c
··· 141 141 142 142 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); 143 143 if (!err) 144 - return IRQ_HANDLED; 144 + goto out; 145 145 146 146 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 147 147 if (gensts.state == IDXD_DEVICE_STATE_HALT) { ··· 162 162 spin_unlock_bh(&idxd->dev_lock); 163 163 } 164 164 165 + out: 165 166 idxd_unmask_msix_vector(idxd, irq_entry->id); 166 167 return IRQ_HANDLED; 167 168 }
+5
drivers/dma/idxd/sysfs.c
··· 315 315 idxd_unregister_dma_device(idxd); 316 316 spin_lock_irqsave(&idxd->dev_lock, flags); 317 317 rc = idxd_device_disable(idxd); 318 + for (i = 0; i < idxd->max_wqs; i++) { 319 + struct idxd_wq *wq = &idxd->wqs[i]; 320 + 321 + idxd_wq_disable_cleanup(wq); 322 + } 318 323 spin_unlock_irqrestore(&idxd->dev_lock, flags); 319 324 module_put(THIS_MODULE); 320 325 if (rc < 0)
+4 -7
drivers/dma/imx-sdma.c
··· 1331 1331 1332 1332 sdma_channel_synchronize(chan); 1333 1333 1334 - if (sdmac->event_id0 >= 0) 1335 - sdma_event_disable(sdmac, sdmac->event_id0); 1334 + sdma_event_disable(sdmac, sdmac->event_id0); 1336 1335 if (sdmac->event_id1) 1337 1336 sdma_event_disable(sdmac, sdmac->event_id1); 1338 1337 ··· 1631 1632 memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); 1632 1633 1633 1634 /* Set ENBLn earlier to make sure dma request triggered after that */ 1634 - if (sdmac->event_id0 >= 0) { 1635 - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) 1636 - return -EINVAL; 1637 - sdma_event_enable(sdmac, sdmac->event_id0); 1638 - } 1635 + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) 1636 + return -EINVAL; 1637 + sdma_event_enable(sdmac, sdmac->event_id0); 1639 1638 1640 1639 if (sdmac->event_id1) { 1641 1640 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
+12
drivers/dma/ioat/dma.c
··· 26 26 27 27 #include "../dmaengine.h" 28 28 29 + int completion_timeout = 200; 30 + module_param(completion_timeout, int, 0644); 31 + MODULE_PARM_DESC(completion_timeout, 32 + "set ioat completion timeout [msec] (default 200 [msec])"); 33 + int idle_timeout = 2000; 34 + module_param(idle_timeout, int, 0644); 35 + MODULE_PARM_DESC(idle_timeout, 36 + "set ioat idel timeout [msec] (default 2000 [msec])"); 37 + 38 + #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) 39 + #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) 40 + 29 41 static char *chanerr_str[] = { 30 42 "DMA Transfer Source Address Error", 31 43 "DMA Transfer Destination Address Error",
-2
drivers/dma/ioat/dma.h
··· 104 104 #define IOAT_RUN 5 105 105 #define IOAT_CHAN_ACTIVE 6 106 106 struct timer_list timer; 107 - #define COMPLETION_TIMEOUT msecs_to_jiffies(100) 108 - #define IDLE_TIMEOUT msecs_to_jiffies(2000) 109 107 #define RESET_DELAY msecs_to_jiffies(100) 110 108 struct ioatdma_device *ioat_dma; 111 109 dma_addr_t completion_dma;
+7
drivers/dma/mcf-edma.c
··· 35 35 mcf_chan = &mcf_edma->chans[ch]; 36 36 37 37 spin_lock(&mcf_chan->vchan.lock); 38 + 39 + if (!mcf_chan->edesc) { 40 + /* terminate_all called before */ 41 + spin_unlock(&mcf_chan->vchan.lock); 42 + continue; 43 + } 44 + 38 45 if (!mcf_chan->edesc->iscyclic) { 39 46 list_del(&mcf_chan->edesc->vdesc.node); 40 47 vchan_cookie_complete(&mcf_chan->edesc->vdesc);
+2
drivers/dma/sh/usb-dmac.c
··· 586 586 desc->residue = usb_dmac_get_current_residue(chan, desc, 587 587 desc->sg_index - 1); 588 588 desc->done_cookie = desc->vd.tx.cookie; 589 + desc->vd.tx_result.result = DMA_TRANS_NOERROR; 590 + desc->vd.tx_result.residue = desc->residue; 589 591 vchan_cookie_complete(&desc->vd); 590 592 591 593 /* Restart the next transfer if this driver has a next desc */
+4 -1
drivers/dma/tegra210-adma.c
··· 658 658 659 659 ret = pm_runtime_get_sync(tdc2dev(tdc)); 660 660 if (ret < 0) { 661 + pm_runtime_put_noidle(tdc2dev(tdc)); 661 662 free_irq(tdc->irq, tdc); 662 663 return ret; 663 664 } ··· 870 869 pm_runtime_enable(&pdev->dev); 871 870 872 871 ret = pm_runtime_get_sync(&pdev->dev); 873 - if (ret < 0) 872 + if (ret < 0) { 873 + pm_runtime_put_noidle(&pdev->dev); 874 874 goto rpm_disable; 875 + } 875 876 876 877 ret = tegra_adma_init(tdma); 877 878 if (ret)
+1
drivers/dma/ti/k3-udma-private.c
··· 42 42 ud = platform_get_drvdata(pdev); 43 43 if (!ud) { 44 44 pr_debug("UDMA has not been probed\n"); 45 + put_device(&pdev->dev); 45 46 return ERR_PTR(-EPROBE_DEFER); 46 47 } 47 48
+19 -20
drivers/dma/ti/k3-udma.c
··· 1753 1753 dev_err(ud->ddev.dev, 1754 1754 "Descriptor pool allocation failed\n"); 1755 1755 uc->use_dma_pool = false; 1756 - return -ENOMEM; 1756 + ret = -ENOMEM; 1757 + goto err_cleanup; 1757 1758 } 1758 1759 } 1759 1760 ··· 1774 1773 1775 1774 ret = udma_get_chan_pair(uc); 1776 1775 if (ret) 1777 - return ret; 1776 + goto err_cleanup; 1778 1777 1779 1778 ret = udma_alloc_tx_resources(uc); 1780 - if (ret) 1781 - return ret; 1779 + if (ret) { 1780 + udma_put_rchan(uc); 1781 + goto err_cleanup; 1782 + } 1782 1783 1783 1784 ret = udma_alloc_rx_resources(uc); 1784 1785 if (ret) { 1785 1786 udma_free_tx_resources(uc); 1786 - return ret; 1787 + goto err_cleanup; 1787 1788 } 1788 1789 1789 1790 uc->config.src_thread = ud->psil_base + uc->tchan->id; ··· 1803 1800 uc->id); 1804 1801 1805 1802 ret = udma_alloc_tx_resources(uc); 1806 - if (ret) { 1807 - uc->config.remote_thread_id = -1; 1808 - return ret; 1809 - } 1803 + if (ret) 1804 + goto err_cleanup; 1810 1805 1811 1806 uc->config.src_thread = ud->psil_base + uc->tchan->id; 1812 1807 uc->config.dst_thread = uc->config.remote_thread_id; ··· 1821 1820 uc->id); 1822 1821 1823 1822 ret = udma_alloc_rx_resources(uc); 1824 - if (ret) { 1825 - uc->config.remote_thread_id = -1; 1826 - return ret; 1827 - } 1823 + if (ret) 1824 + goto err_cleanup; 1828 1825 1829 1826 uc->config.src_thread = uc->config.remote_thread_id; 1830 1827 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | ··· 1837 1838 /* Can not happen */ 1838 1839 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 1839 1840 __func__, uc->id, uc->config.dir); 1840 - return -EINVAL; 1841 + ret = -EINVAL; 1842 + goto err_cleanup; 1843 + 1841 1844 } 1842 1845 1843 1846 /* check if the channel configuration was successful */ ··· 1848 1847 1849 1848 if (udma_is_chan_running(uc)) { 1850 1849 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 1851 - udma_stop(uc); 1850 + udma_reset_chan(uc, false); 1852 1851 if (udma_is_chan_running(uc)) { 1853 1852 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 1854 1853 ret = -EBUSY; ··· 1907 1906 1908 1907 udma_reset_rings(uc); 1909 1908 1910 - INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 1911 - udma_check_tx_completion); 1912 1909 return 0; 1913 1910 1914 1911 err_irq_free: ··· 1918 1919 err_res_free: 1919 1920 udma_free_tx_resources(uc); 1920 1921 udma_free_rx_resources(uc); 1921 - 1922 + err_cleanup: 1922 1923 udma_reset_uchan(uc); 1923 1924 1924 1925 if (uc->use_dma_pool) { ··· 3018 3019 } 3019 3020 3020 3021 cancel_delayed_work_sync(&uc->tx_drain.work); 3021 - destroy_delayed_work_on_stack(&uc->tx_drain.work); 3022 3022 3023 3023 if (uc->irq_num_ring > 0) { 3024 3024 free_irq(uc->irq_num_ring, uc); ··· 3591 3593 return ret; 3592 3594 } 3593 3595 3594 - ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype); 3596 + ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype); 3595 3597 if (!ret && ud->atype > 2) { 3596 3598 dev_err(dev, "Invalid atype: %u\n", ud->atype); 3597 3599 return -EINVAL; ··· 3709 3711 tasklet_init(&uc->vc.task, udma_vchan_complete, 3710 3712 (unsigned long)&uc->vc); 3711 3713 init_completion(&uc->teardown_completed); 3714 + INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); 3712 3715 } 3713 3716 3714 3717 ret = dma_async_device_register(&ud->ddev);
+1 -4
drivers/firmware/efi/efi-pstore.c
··· 356 356 357 357 static __init int efivars_pstore_init(void) 358 358 { 359 - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) 360 - return 0; 361 - 362 - if (!efivars_kobject()) 359 + if (!efivars_kobject() || !efivar_supports_writes()) 363 360 return 0; 364 361 365 362 if (efivars_pstore_disable)
+8 -4
drivers/firmware/efi/efi.c
··· 176 176 static int generic_ops_register(void) 177 177 { 178 178 generic_ops.get_variable = efi.get_variable; 179 - generic_ops.set_variable = efi.set_variable; 180 - generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 181 179 generic_ops.get_next_variable = efi.get_next_variable; 182 180 generic_ops.query_variable_store = efi_query_variable_store; 183 181 182 + if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 183 + generic_ops.set_variable = efi.set_variable; 184 + generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 185 + } 184 186 return efivars_register(&generic_efivars, &generic_ops, efi_kobj); 185 187 } 186 188 ··· 384 382 return -ENOMEM; 385 383 } 386 384 387 - if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) { 385 + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 386 + EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 388 387 efivar_ssdt_load(); 389 388 error = generic_ops_register(); 390 389 if (error) ··· 419 416 err_remove_group: 420 417 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 421 418 err_unregister: 422 - if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) 419 + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 420 + EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 423 421 generic_ops_unregister(); 424 422 err_put: 425 423 kobject_put(efi_kobj);
+1 -4
drivers/firmware/efi/efivars.c
··· 680 680 struct kobject *parent_kobj = efivars_kobject(); 681 681 int error = 0; 682 682 683 - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) 684 - return -ENODEV; 685 - 686 683 /* No efivars has been registered yet */ 687 - if (!parent_kobj) 684 + if (!parent_kobj || !efivar_supports_writes()) 688 685 return 0; 689 686 690 687 printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+1 -2
drivers/firmware/efi/libstub/Makefile
··· 6 6 # enabled, even if doing so doesn't break the build. 7 7 # 8 8 cflags-$(CONFIG_X86_32) := -march=i386 9 - cflags-$(CONFIG_X86_64) := -mcmodel=small \ 10 - $(call cc-option,-maccumulate-outgoing-args) 9 + cflags-$(CONFIG_X86_64) := -mcmodel=small 11 10 cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ 12 11 -fPIC -fno-strict-aliasing -mno-red-zone \ 13 12 -mno-mmx -mno-sse -fshort-wchar \
+1 -1
drivers/firmware/efi/libstub/alignedmem.c
··· 44 44 *addr = ALIGN((unsigned long)alloc_addr, align); 45 45 46 46 if (slack > 0) { 47 - int l = (alloc_addr % align) / EFI_PAGE_SIZE; 47 + int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE; 48 48 49 49 if (l) { 50 50 efi_bs_call(free_pages, alloc_addr, slack - l + 1);
+14 -11
drivers/firmware/efi/libstub/arm64-stub.c
··· 35 35 } 36 36 37 37 /* 38 - * Relocatable kernels can fix up the misalignment with respect to 39 - * MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN 40 - * (which accounts for the alignment of statically allocated objects such as 41 - * the swapper stack.) 38 + * Although relocatable kernels can fix up the misalignment with respect to 39 + * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of 40 + * sync with those recorded in the vmlinux when kaslr is disabled but the 41 + * image required relocation anyway. Therefore retain 2M alignment unless 42 + * KASLR is in use. 42 43 */ 43 - static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN 44 - : MIN_KIMG_ALIGN; 44 + static u64 min_kimg_align(void) 45 + { 46 + return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; 47 + } 45 48 46 49 efi_status_t handle_kernel_image(unsigned long *image_addr, 47 50 unsigned long *image_size, ··· 77 74 78 75 kernel_size = _edata - _text; 79 76 kernel_memsize = kernel_size + (_end - _edata); 80 - *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align; 77 + *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align(); 81 78 82 79 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) { 83 80 /* 84 81 * If KASLR is enabled, and we have some randomness available, 85 82 * locate the kernel at a randomized offset in physical memory. 86 83 */ 87 - status = efi_random_alloc(*reserve_size, min_kimg_align, 84 + status = efi_random_alloc(*reserve_size, min_kimg_align(), 88 85 reserve_addr, phys_seed); 89 86 } else { 90 87 status = EFI_OUT_OF_RESOURCES; 91 88 } 92 89 93 90 if (status != EFI_SUCCESS) { 94 - if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) { 91 + if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align())) { 95 92 /* 96 93 * Just execute from wherever we were loaded by the 97 94 * UEFI PE/COFF loader if the alignment is suitable. ··· 102 99 } 103 100 104 101 status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, 105 - ULONG_MAX, min_kimg_align); 102 + ULONG_MAX, min_kimg_align()); 106 103 107 104 if (status != EFI_SUCCESS) { 108 105 efi_err("Failed to relocate kernel\n"); ··· 111 108 } 112 109 } 113 110 114 - *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align; 111 + *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align(); 115 112 memcpy((void *)*image_addr, _text, kernel_size); 116 113 117 114 return EFI_SUCCESS;
+1 -1
drivers/firmware/efi/libstub/efi-stub-helper.c
··· 19 19 #include "efistub.h" 20 20 21 21 bool efi_nochunk; 22 - bool efi_nokaslr; 22 + bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE); 23 23 bool efi_noinitrd; 24 24 int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; 25 25 bool efi_novamap;
-17
drivers/firmware/efi/libstub/efi-stub.c
··· 122 122 } 123 123 124 124 /* 125 - * This function handles the architcture specific differences between arm and 126 - * arm64 regarding where the kernel image must be loaded and any memory that 127 - * must be reserved. On failure it is required to free all 128 - * all allocations it has made. 129 - */ 130 - efi_status_t handle_kernel_image(unsigned long *image_addr, 131 - unsigned long *image_size, 132 - unsigned long *reserve_addr, 133 - unsigned long *reserve_size, 134 - unsigned long dram_base, 135 - efi_loaded_image_t *image); 136 - 137 - asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, 138 - unsigned long fdt_addr, 139 - unsigned long fdt_size); 140 - 141 - /* 142 125 * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint 143 126 * that is described in the PE/COFF header. Most of the code is the same 144 127 * for both archictectures, with the arch-specific code provided in the
+16
drivers/firmware/efi/libstub/efistub.h
··· 776 776 unsigned long *load_size, 777 777 unsigned long soft_limit, 778 778 unsigned long hard_limit); 779 + /* 780 + * This function handles the architcture specific differences between arm and 781 + * arm64 regarding where the kernel image must be loaded and any memory that 782 + * must be reserved. On failure it is required to free all 783 + * all allocations it has made. 784 + */ 785 + efi_status_t handle_kernel_image(unsigned long *image_addr, 786 + unsigned long *image_size, 787 + unsigned long *reserve_addr, 788 + unsigned long *reserve_size, 789 + unsigned long dram_base, 790 + efi_loaded_image_t *image); 791 + 792 + asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, 793 + unsigned long fdt_addr, 794 + unsigned long fdt_size); 779 795 780 796 void efi_handle_post_ebs_state(void); 781 797
+4 -4
drivers/firmware/efi/libstub/x86-stub.c
··· 8 8 9 9 #include <linux/efi.h> 10 10 #include <linux/pci.h> 11 + #include <linux/stddef.h> 11 12 12 13 #include <asm/efi.h> 13 14 #include <asm/e820/types.h> ··· 362 361 int options_size = 0; 363 362 efi_status_t status; 364 363 char *cmdline_ptr; 365 - unsigned long ramdisk_addr; 366 - unsigned long ramdisk_size; 367 364 368 365 efi_system_table = sys_table_arg; 369 366 ··· 389 390 390 391 hdr = &boot_params->hdr; 391 392 392 - /* Copy the second sector to boot_params */ 393 - memcpy(&hdr->jump, image_base + 512, 512); 393 + /* Copy the setup header from the second sector to boot_params */ 394 + memcpy(&hdr->jump, image_base + 512, 395 + sizeof(struct setup_header) - offsetof(struct setup_header, jump)); 394 396 395 397 /* 396 398 * Fill out some of the header fields ourselves because the
+6
drivers/firmware/efi/vars.c
··· 1229 1229 return rv; 1230 1230 } 1231 1231 EXPORT_SYMBOL_GPL(efivars_unregister); 1232 + 1233 + int efivar_supports_writes(void) 1234 + { 1235 + return __efivars && __efivars->ops->set_variable; 1236 + } 1237 + EXPORT_SYMBOL_GPL(efivar_supports_writes);
+5 -3
drivers/firmware/psci/psci_checker.c
··· 157 157 158 158 cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), 159 159 GFP_KERNEL); 160 - if (!cpu_groups) 160 + if (!cpu_groups) { 161 + free_cpumask_var(tmp); 161 162 return -ENOMEM; 163 + } 162 164 163 165 cpumask_copy(tmp, cpu_online_mask); 164 166 ··· 169 167 topology_core_cpumask(cpumask_any(tmp)); 170 168 171 169 if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { 170 + free_cpumask_var(tmp); 172 171 free_cpu_groups(num_groups, &cpu_groups); 173 172 return -ENOMEM; 174 173 } ··· 199 196 if (!page_buf) 200 197 goto out_free_cpu_groups; 201 198 202 - err = 0; 203 199 /* 204 200 * Of course the last CPU cannot be powered down and cpu_down() should 205 201 * refuse doing that. 206 202 */ 207 203 pr_info("Trying to turn off and on again all CPUs\n"); 208 - err += down_and_up_cpus(cpu_online_mask, offlined_cpus); 204 + err = down_and_up_cpus(cpu_online_mask, offlined_cpus); 209 205 210 206 /* 211 207 * Take down CPUs by cpu group this time. When the last CPU is turned
+2 -1
drivers/fpga/dfl-afu-main.c
··· 83 83 * on this port and minimum soft reset pulse width has elapsed. 84 84 * Driver polls port_soft_reset_ack to determine if reset done by HW. 85 85 */ 86 - if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST, 86 + if (readq_poll_timeout(base + PORT_HDR_CTRL, v, 87 + v & PORT_CTRL_SFTRST_ACK, 87 88 RST_POLL_INVL, RST_POLL_TIMEOUT)) { 88 89 dev_err(&pdev->dev, "timeout, fail to reset device\n"); 89 90 return -ETIMEDOUT;
+2 -1
drivers/fpga/dfl-pci.c
··· 227 227 { 228 228 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); 229 229 struct dfl_fpga_cdev *cdev = drvdata->cdev; 230 - int ret = 0; 231 230 232 231 if (!num_vfs) { 233 232 /* ··· 238 239 dfl_fpga_cdev_config_ports_pf(cdev); 239 240 240 241 } else { 242 + int ret; 243 + 241 244 /* 242 245 * before enable SRIOV, put released ports into VF access mode 243 246 * first of all.
+6 -1
drivers/gpio/gpio-arizona.c
··· 64 64 ret = pm_runtime_get_sync(chip->parent); 65 65 if (ret < 0) { 66 66 dev_err(chip->parent, "Failed to resume: %d\n", ret); 67 + pm_runtime_put_autosuspend(chip->parent); 67 68 return ret; 68 69 } 69 70 ··· 73 72 if (ret < 0) { 74 73 dev_err(chip->parent, "Failed to drop cache: %d\n", 75 74 ret); 75 + pm_runtime_put_autosuspend(chip->parent); 76 76 return ret; 77 77 } 78 78 79 79 ret = regmap_read(arizona->regmap, reg, &val); 80 - if (ret < 0) 80 + if (ret < 0) { 81 + pm_runtime_put_autosuspend(chip->parent); 81 82 return ret; 83 + } 82 84 83 85 pm_runtime_mark_last_busy(chip->parent); 84 86 pm_runtime_put_autosuspend(chip->parent); ··· 110 106 ret = pm_runtime_get_sync(chip->parent); 111 107 if (ret < 0) { 112 108 dev_err(chip->parent, "Failed to resume: %d\n", ret); 109 + pm_runtime_put(chip->parent); 113 110 return ret; 114 111 } 115 112 }
+94 -5
drivers/gpio/gpio-pca953x.c
··· 107 107 }; 108 108 MODULE_DEVICE_TABLE(i2c, pca953x_id); 109 109 110 + #ifdef CONFIG_GPIO_PCA953X_IRQ 111 + 112 + #include <linux/dmi.h> 113 + #include <linux/gpio.h> 114 + #include <linux/list.h> 115 + 116 + static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = { 117 + { 118 + /* 119 + * On Intel Galileo Gen 2 board the IRQ pin of one of 120 + * the I²C GPIO expanders, which has GpioInt() resource, 121 + * is provided as an absolute number instead of being 122 + * relative. Since first controller (gpio-sch.c) and 123 + * second (gpio-dwapb.c) are at the fixed bases, we may 124 + * safely refer to the number in the global space to get 125 + * an IRQ out of it. 126 + */ 127 + .matches = { 128 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), 129 + }, 130 + }, 131 + {} 132 + }; 133 + 134 + #ifdef CONFIG_ACPI 135 + static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data) 136 + { 137 + struct acpi_resource_gpio *agpio; 138 + int *pin = data; 139 + 140 + if (acpi_gpio_get_irq_resource(ares, &agpio)) 141 + *pin = agpio->pin_table[0]; 142 + return 1; 143 + } 144 + 145 + static int pca953x_acpi_find_pin(struct device *dev) 146 + { 147 + struct acpi_device *adev = ACPI_COMPANION(dev); 148 + int pin = -ENOENT, ret; 149 + LIST_HEAD(r); 150 + 151 + ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin); 152 + acpi_dev_free_resource_list(&r); 153 + if (ret < 0) 154 + return ret; 155 + 156 + return pin; 157 + } 158 + #else 159 + static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; } 160 + #endif 161 + 162 + static int pca953x_acpi_get_irq(struct device *dev) 163 + { 164 + int pin, ret; 165 + 166 + pin = pca953x_acpi_find_pin(dev); 167 + if (pin < 0) 168 + return pin; 169 + 170 + dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin); 171 + 172 + if (!gpio_is_valid(pin)) 173 + return -EINVAL; 174 + 175 + ret = gpio_request(pin, "pca953x interrupt"); 176 + if (ret) 177 + return ret; 178 + 179 + ret = gpio_to_irq(pin); 180 + 181 + /* When pin is used as an IRQ, no need to keep it requested */ 182 + gpio_free(pin); 183 + 184 + return ret; 185 + } 186 + #endif 187 + 110 188 static const struct acpi_device_id pca953x_acpi_ids[] = { 111 189 { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, 112 190 { } ··· 400 322 .writeable_reg = pca953x_writeable_register, 401 323 .volatile_reg = pca953x_volatile_register, 402 324 325 + .disable_locking = true, 403 326 .cache_type = REGCACHE_RBTREE, 404 327 .max_register = 0x7f, 405 328 }; ··· 702 623 DECLARE_BITMAP(reg_direction, MAX_LINE); 703 624 int level; 704 625 705 - pca953x_read_regs(chip, chip->regs->direction, reg_direction); 706 - 707 626 if (chip->driver_data & PCA_PCAL) { 708 627 /* Enable latch on interrupt-enabled inputs */ 709 628 pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask); ··· 712 635 pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask); 713 636 } 714 637 638 + /* Switch direction to input if needed */ 639 + pca953x_read_regs(chip, chip->regs->direction, reg_direction); 640 + 715 641 bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio); 642 + bitmap_complement(reg_direction, reg_direction, gc->ngpio); 716 643 bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio); 717 644 718 645 /* Look for any newly setup interrupt */ ··· 815 734 struct gpio_chip *gc = &chip->gpio_chip; 816 735 DECLARE_BITMAP(pending, MAX_LINE); 817 736 int level; 737 + bool ret; 818 738 819 - if (!pca953x_irq_pending(chip, pending)) 820 - return IRQ_NONE; 739 + mutex_lock(&chip->i2c_lock); 740 + ret = pca953x_irq_pending(chip, pending); 741 + mutex_unlock(&chip->i2c_lock); 821 742 822 743 for_each_set_bit(level, pending, gc->ngpio) 823 744 handle_nested_irq(irq_find_mapping(gc->irq.domain, level)); 824 745 825 - return IRQ_HANDLED; 746 + return IRQ_RETVAL(ret); 826 747 } 827 748 828 749 static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base) ··· 834 751 DECLARE_BITMAP(reg_direction, MAX_LINE); 835 752 DECLARE_BITMAP(irq_stat, MAX_LINE); 836 753 int ret; 754 + 755 + if (dmi_first_match(pca953x_dmi_acpi_irq_info)) { 756 + ret = pca953x_acpi_get_irq(&client->dev); 757 + if (ret > 0) 758 + client->irq = ret; 759 + } 837 760 838 761 if (!client->irq) 839 762 return 0;
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
··· 204 204 (mode_info->atom_context->bios + data_offset); 205 205 switch (crev) { 206 206 case 11: 207 + case 12: 207 208 mem_channel_number = igp_info->v11.umachannelnumber; 208 209 /* channel width is 64 */ 209 210 if (vram_width)
+15 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 1295 1295 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) 1296 1296 { 1297 1297 struct amdgpu_job *job; 1298 - struct drm_sched_job *s_job; 1298 + struct drm_sched_job *s_job, *tmp; 1299 1299 uint32_t preempt_seq; 1300 1300 struct dma_fence *fence, **ptr; 1301 1301 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1302 1302 struct drm_gpu_scheduler *sched = &ring->sched; 1303 + bool preempted = true; 1303 1304 1304 1305 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 1305 1306 return; 1306 1307 1307 1308 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); 1308 - if (preempt_seq <= atomic_read(&drv->last_seq)) 1309 - return; 1309 + if (preempt_seq <= atomic_read(&drv->last_seq)) { 1310 + preempted = false; 1311 + goto no_preempt; 1312 + } 1310 1313 1311 1314 preempt_seq &= drv->num_fences_mask; 1312 1315 ptr = &drv->fences[preempt_seq]; 1313 1316 fence = rcu_dereference_protected(*ptr, 1); 1314 1317 1318 + no_preempt: 1315 1319 spin_lock(&sched->job_list_lock); 1316 - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { 1320 + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 1321 + if (dma_fence_is_signaled(&s_job->s_fence->finished)) { 1322 + /* remove job from ring_mirror_list */ 1323 + list_del_init(&s_job->node); 1324 + sched->ops->free_job(s_job); 1325 + continue; 1326 + } 1317 1327 job = to_amdgpu_job(s_job); 1318 - if (job->fence == fence) 1328 + if (preempted && job->fence == fence) 1319 1329 /* mark the job as preempted */ 1320 1330 job->preemption_status |= AMDGPU_IB_PREEMPTED; 1321 1331 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 37 37 38 38 memset(&ti, 0, sizeof(struct amdgpu_task_info)); 39 39 40 - if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { 40 + if (amdgpu_gpu_recovery && 41 + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { 41 42 DRM_ERROR("ring %s timeout, but soft recovered\n", 42 43 s_job->sched->name); 43 44 return;
+5 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 778 778 tmp_str++; 779 779 while (isspace(*++tmp_str)); 780 780 781 - while (tmp_str[0]) { 782 - sub_str = strsep(&tmp_str, delimiter); 781 + while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 783 782 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 784 783 if (ret) 785 784 return -EINVAL; ··· 1038 1039 memcpy(buf_cpy, buf, bytes); 1039 1040 buf_cpy[bytes] = '\0'; 1040 1041 tmp = buf_cpy; 1041 - while (tmp[0]) { 1042 - sub_str = strsep(&tmp, delimiter); 1042 + while ((sub_str = strsep(&tmp, delimiter)) != NULL) { 1043 1043 if (strlen(sub_str)) { 1044 1044 ret = kstrtol(sub_str, 0, &level); 1045 1045 if (ret) ··· 1635 1637 i++; 1636 1638 memcpy(buf_cpy, buf, count-i); 1637 1639 tmp_str = buf_cpy; 1638 - while (tmp_str[0]) { 1639 - sub_str = strsep(&tmp_str, delimiter); 1640 + while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 1640 1641 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 1641 1642 if (ret) 1642 1643 return -EINVAL; ··· 2781 2784 if (r) 2782 2785 return r; 2783 2786 2784 - return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); 2787 + return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); 2785 2788 } 2786 2789 2787 2790 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, ··· 2816 2819 if (r) 2817 2820 return r; 2818 2821 2819 - return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); 2822 + return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); 2820 2823 } 2821 2824 2822 2825 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
+59 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 372 372 return ret; 373 373 } 374 374 375 + static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 376 + struct psp_gfx_cmd_resp *cmd) 377 + { 378 + if (amdgpu_sriov_vf(psp->adev)) 379 + cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 380 + else 381 + cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 382 + } 383 + 384 + static int psp_tmr_unload(struct psp_context *psp) 385 + { 386 + int ret; 387 + struct psp_gfx_cmd_resp *cmd; 388 + 389 + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 390 + if (!cmd) 391 + return -ENOMEM; 392 + 393 + psp_prep_tmr_unload_cmd_buf(psp, cmd); 394 + DRM_INFO("free PSP TMR buffer\n"); 395 + 396 + ret = psp_cmd_submit_buf(psp, NULL, cmd, 397 + psp->fence_buf_mc_addr); 398 + 399 + kfree(cmd); 400 + 401 + return ret; 402 + } 403 + 404 + static int psp_tmr_terminate(struct psp_context *psp) 405 + { 406 + int ret; 407 + void *tmr_buf; 408 + void **pptr; 409 + 410 + ret = psp_tmr_unload(psp); 411 + if (ret) 412 + return ret; 413 + 414 + /* free TMR memory buffer */ 415 + pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 416 + amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 417 + 418 + return 0; 419 + } 420 + 375 421 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 376 422 uint64_t asd_mc, uint32_t size) 377 423 { ··· 1825 1779 { 1826 1780 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1827 1781 struct psp_context *psp = &adev->psp; 1828 - void *tmr_buf; 1829 - void **pptr; 1830 1782 1831 1783 if (psp->adev->psp.ta_fw) { 1832 1784 psp_ras_terminate(psp); ··· 1834 1790 1835 1791 psp_asd_unload(psp); 1836 1792 1793 + psp_tmr_terminate(psp); 1837 1794 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 1838 1795 1839 - pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 1840 - amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 1841 1796 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 1842 1797 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 1843 1798 amdgpu_bo_free_kernel(&psp->fence_buf_bo, ··· 1881 1838 DRM_ERROR("Failed to terminate dtm ta\n"); 1882 1839 return ret; 1883 1840 } 1841 + } 1842 + 1843 + ret = psp_asd_unload(psp); 1844 + if (ret) { 1845 + DRM_ERROR("Failed to unload asd\n"); 1846 + return ret; 1847 + } 1848 + 1849 + ret = psp_tmr_terminate(psp); 1850 + if (ret) { 1851 + DRM_ERROR("Falied to terminate tmr\n"); 1852 + return ret; 1884 1853 } 1885 1854 1886 1855 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
+8 -1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 7513 7513 struct amdgpu_device *adev = ring->adev; 7514 7514 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 7515 7515 struct amdgpu_ring *kiq_ring = &kiq->ring; 7516 + unsigned long flags; 7516 7517 7517 7518 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 7518 7519 return -EINVAL; 7519 7520 7520 - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) 7521 + spin_lock_irqsave(&kiq->ring_lock, flags); 7522 + 7523 + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 7524 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 7521 7525 return -ENOMEM; 7526 + } 7522 7527 7523 7528 /* assert preemption condition */ 7524 7529 amdgpu_ring_set_preempt_cond_exec(ring, false); ··· 7533 7528 ring->trail_fence_gpu_addr, 7534 7529 ++ring->trail_seq); 7535 7530 amdgpu_ring_commit(kiq_ring); 7531 + 7532 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 7536 7533 7537 7534 /* poll the trailing fence */ 7538 7535 for (i = 0; i < adev->usec_timeout; i++) {
+8 -18
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 314 314 static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 315 315 { 316 316 struct amdgpu_device *adev = ring->adev; 317 - u64 *wptr = NULL; 318 - uint64_t local_wptr = 0; 317 + u64 wptr; 319 318 320 319 if (ring->use_doorbell) { 321 320 /* XXX check if swapping is necessary on BE */ 322 - wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); 323 - DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); 324 - *wptr = (*wptr) >> 2; 325 - DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); 321 + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); 322 + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); 326 323 } else { 327 - u32 lowbit, highbit; 328 - 329 - wptr = &local_wptr; 330 - lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; 331 - highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; 332 - 333 - DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", 334 - ring->me, highbit, lowbit); 335 - *wptr = highbit; 336 - *wptr = (*wptr) << 32; 337 - *wptr |= lowbit; 324 + wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); 325 + wptr = wptr << 32; 326 + wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); 327 + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); 338 328 } 339 329 340 - return *wptr; 330 + return wptr >> 2; 341 331 } 342 332 343 333 /**
+15 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 974 974 /* Update the actual used number of crtc */ 975 975 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 976 976 977 + /* create fake encoders for MST */ 978 + dm_dp_create_fake_mst_encoders(adev); 979 + 977 980 /* TODO: Add_display_info? */ 978 981 979 982 /* TODO use dynamic cursor width */ ··· 1000 997 1001 998 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1002 999 { 1000 + int i; 1001 + 1002 + for (i = 0; i < adev->dm.display_indexes_num; i++) { 1003 + drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); 1004 + } 1005 + 1003 1006 amdgpu_dm_audio_fini(adev); 1004 1007 1005 1008 amdgpu_dm_destroy_drm_device(&adev->dm); ··· 1367 1358 struct dmcu *dmcu = NULL; 1368 1359 bool ret; 1369 1360 1370 - if (!adev->dm.fw_dmcu) 1361 + if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw) 1371 1362 return detect_mst_link_for_all_connectors(adev->ddev); 1372 1363 1373 1364 dmcu = adev->dm.dc->res_pool->dmcu; ··· 2019 2010 struct amdgpu_display_manager *dm; 2020 2011 struct drm_connector *conn_base; 2021 2012 struct amdgpu_device *adev; 2013 + struct dc_link *link = NULL; 2022 2014 static const u8 pre_computed_values[] = { 2023 2015 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 2024 2016 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; 2025 2017 2026 2018 if (!aconnector || !aconnector->dc_link) 2019 + return; 2020 + 2021 + link = aconnector->dc_link; 2022 + if (link->connector_signal != SIGNAL_TYPE_EDP) 2027 2023 return; 2028 2024 2029 2025 conn_base = &aconnector->base;
+10 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 43 43 */ 44 44 45 45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 46 + 47 + #define AMDGPU_DM_MAX_CRTC 6 48 + 46 49 /* 47 50 #include "include/amdgpu_dal_power_if.h" 48 51 #include "amdgpu_dm_irq.h" ··· 331 328 * available in FW 332 329 */ 333 330 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 331 + 332 + /** 333 + * @mst_encoders: 334 + * 335 + * fake encoders used for DP MST. 336 + */ 337 + struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 334 338 }; 335 339 336 340 struct amdgpu_dm_connector { ··· 366 356 struct amdgpu_dm_dp_aux dm_dp_aux; 367 357 struct drm_dp_mst_port *port; 368 358 struct amdgpu_dm_connector *mst_port; 369 - struct amdgpu_encoder *mst_encoder; 370 359 struct drm_dp_aux *dsc_aux; 371 360 372 361 /* TODO see if we can merge with ddc_bus or make a dm_connector */
+26 -27
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 95 95 { 96 96 struct amdgpu_dm_connector *aconnector = 97 97 to_amdgpu_dm_connector(connector); 98 - struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder; 99 98 100 99 if (aconnector->dc_sink) { 101 100 dc_link_remove_remote_sink(aconnector->dc_link, ··· 104 105 105 106 kfree(aconnector->edid); 106 107 107 - drm_encoder_cleanup(&amdgpu_encoder->base); 108 - kfree(amdgpu_encoder); 109 108 drm_connector_cleanup(connector); 110 109 drm_dp_mst_put_port_malloc(aconnector->port); 111 110 kfree(aconnector); ··· 240 243 dm_mst_atomic_best_encoder(struct drm_connector *connector, 241 244 struct drm_connector_state *connector_state) 242 245 { 243 - return &to_amdgpu_dm_connector(connector)->mst_encoder->base; 246 + struct drm_device *dev = connector->dev; 247 + struct amdgpu_device *adev = dev->dev_private; 248 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); 249 + 250 + return &adev->dm.mst_encoders[acrtc->crtc_id].base; 244 251 } 245 252 246 253 static int ··· 307 306 .destroy = amdgpu_dm_encoder_destroy, 308 307 }; 309 308 310 - static struct amdgpu_encoder * 311 - dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) 309 + void 310 + dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) 312 311 { 313 - struct drm_device *dev = connector->base.dev; 314 - struct amdgpu_device *adev = dev->dev_private; 315 - struct amdgpu_encoder *amdgpu_encoder; 316 - struct drm_encoder *encoder; 312 + struct drm_device *dev = adev->ddev; 313 + int i; 317 314 318 - amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); 319 - if (!amdgpu_encoder) 320 - return NULL; 315 + for (i = 0; i < adev->dm.display_indexes_num; i++) { 316 + struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; 317 + struct drm_encoder *encoder = &amdgpu_encoder->base; 321 318 322 - encoder = &amdgpu_encoder->base; 323 - encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 319 + encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 324 320 325 - drm_encoder_init( 326 - dev, 327 - &amdgpu_encoder->base, 328 - &amdgpu_dm_encoder_funcs, 329 - DRM_MODE_ENCODER_DPMST, 330 - NULL); 321 + drm_encoder_init( 322 + dev, 323 + &amdgpu_encoder->base, 324 + &amdgpu_dm_encoder_funcs, 325 + DRM_MODE_ENCODER_DPMST, 326 + NULL); 331 327 332 - drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); 333 - 334 - return amdgpu_encoder; 328 + drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); 329 + } 335 330 } 336 331 337 332 static struct drm_connector * ··· 340 343 struct amdgpu_device *adev = dev->dev_private; 341 344 struct amdgpu_dm_connector *aconnector; 342 345 struct drm_connector *connector; 346 + int i; 343 347 344 348 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 345 349 if (!aconnector) ··· 367 369 master->dc_link, 368 370 master->connector_id); 369 371 370 - aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); 371 - drm_connector_attach_encoder(&aconnector->base, 372 - &aconnector->mst_encoder->base); 372 + for (i = 0; i < adev->dm.display_indexes_num; i++) { 373 + drm_connector_attach_encoder(&aconnector->base, 374 + &adev->dm.mst_encoders[i].base); 375 + } 373 376 374 377 connector->max_bpc_property = master->base.max_bpc_property; 375 378 if (connector->max_bpc_property)
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
··· 35 35 struct amdgpu_dm_connector *aconnector, 36 36 int link_index); 37 37 38 + void 39 + dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); 40 + 38 41 #if defined(CONFIG_DRM_AMD_DC_DCN) 39 42 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 40 43 struct dc_state *dc_state);
+6 -4
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 2538 2538 2539 2539 copy_stream_update_to_stream(dc, context, stream, stream_update); 2540 2540 2541 - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 2542 - DC_ERROR("Mode validation failed for stream update!\n"); 2543 - dc_release_state(context); 2544 - return; 2541 + if (update_type > UPDATE_TYPE_FAST) { 2542 + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 2543 + DC_ERROR("Mode validation failed for stream update!\n"); 2544 + dc_release_state(context); 2545 + return; 2546 + } 2545 2547 } 2546 2548 2547 2549 commit_planes_for_stream(
+16 -3
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 56 56 } 57 57 } 58 58 59 - static void dc_stream_construct(struct dc_stream_state *stream, 59 + static bool dc_stream_construct(struct dc_stream_state *stream, 60 60 struct dc_sink *dc_sink_data) 61 61 { 62 62 uint32_t i = 0; ··· 118 118 update_stream_signal(stream, dc_sink_data); 119 119 120 120 stream->out_transfer_func = dc_create_transfer_func(); 121 + if (stream->out_transfer_func == NULL) { 122 + dc_sink_release(dc_sink_data); 123 + return false; 124 + } 121 125 stream->out_transfer_func->type = TF_TYPE_BYPASS; 122 126 stream->out_transfer_func->ctx = stream->ctx; 123 127 124 128 stream->stream_id = stream->ctx->dc_stream_id_count; 125 129 stream->ctx->dc_stream_id_count++; 130 + 131 + return true; 126 132 } 127 133 128 134 static void dc_stream_destruct(struct dc_stream_state *stream) ··· 170 164 171 165 stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); 172 166 if (stream == NULL) 173 - return NULL; 167 + goto alloc_fail; 174 168 175 - dc_stream_construct(stream, sink); 169 + if (dc_stream_construct(stream, sink) == false) 170 + goto construct_fail; 176 171 177 172 kref_init(&stream->refcount); 178 173 179 174 return stream; 175 + 176 + construct_fail: 177 + kfree(stream); 178 + 179 + alloc_fail: 180 + return NULL; 180 181 } 181 182 182 183 struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
+1 -1
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
··· 689 689 return -EINVAL; 690 690 } 691 691 692 - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 692 + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 693 693 1 << workload_type, 694 694 NULL); 695 695 if (ret) {
+7 -4
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
··· 522 522 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01; 523 523 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t); 524 524 525 - ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c); 526 - if (ret) 527 - goto err4; 525 + if (adev->psp.ras.ras) { 526 + ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c); 527 + if (ret) 528 + goto err4; 529 + } 528 530 529 531 return 0; 530 532 ··· 562 560 (struct vega20_smumgr *)(hwmgr->smu_backend); 563 561 struct amdgpu_device *adev = hwmgr->adev; 564 562 565 - smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c); 563 + if (adev->psp.ras.ras) 564 + smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c); 566 565 567 566 if (priv) { 568 567 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
+6 -4
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
··· 644 644 645 645 /* sclk is bigger than max sclk in the dependence table */ 646 646 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; 647 - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), 648 - (dep_table->entries[i - 1].vddc - 649 - (uint16_t)VDDC_VDDCI_DELTA)); 650 647 651 648 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) 652 649 *voltage |= (data->vbios_boot_state.vddci_bootup_value * ··· 651 654 else if (dep_table->entries[i - 1].vddci) 652 655 *voltage |= (dep_table->entries[i - 1].vddci * 653 656 VOLTAGE_SCALE) << VDDC_SHIFT; 654 - else 657 + else { 658 + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), 659 + (dep_table->entries[i - 1].vddc - 660 + (uint16_t)VDDC_VDDCI_DELTA)); 661 + 655 662 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; 663 + } 656 664 657 665 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) 658 666 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+1 -2
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
··· 173 173 174 174 drm_mode_config_reset(drm); 175 175 176 - drm_fbdev_generic_setup(drm, 32); 177 - 178 176 return 0; 179 177 } 180 178 ··· 223 225 if (ret) 224 226 goto err_unload; 225 227 228 + drm_fbdev_generic_setup(&priv->drm, 32); 226 229 return 0; 227 230 228 231 err_unload:
+2 -2
drivers/gpu/drm/exynos/exynos_drm_dma.c
··· 61 61 struct device *subdrv_dev, void **dma_priv) 62 62 { 63 63 struct exynos_drm_private *priv = drm_dev->dev_private; 64 - int ret; 64 + int ret = 0; 65 65 66 66 if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) { 67 67 DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n", ··· 92 92 if (ret) 93 93 clear_dma_max_seg_size(subdrv_dev); 94 94 95 - return 0; 95 + return ret; 96 96 } 97 97 98 98 /*
-1
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 1498 1498 1499 1499 g2d->irq = platform_get_irq(pdev, 0); 1500 1500 if (g2d->irq < 0) { 1501 - dev_err(dev, "failed to get irq\n"); 1502 1501 ret = g2d->irq; 1503 1502 goto err_put_clk; 1504 1503 }
+3 -1
drivers/gpu/drm/exynos/exynos_drm_mic.c
··· 269 269 goto unlock; 270 270 271 271 ret = pm_runtime_get_sync(mic->dev); 272 - if (ret < 0) 272 + if (ret < 0) { 273 + pm_runtime_put_noidle(mic->dev); 273 274 goto unlock; 275 + } 274 276 275 277 mic_set_path(mic, 1); 276 278
+3 -2
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
··· 307 307 /* reset all the states of crtc/plane/encoder/connector */ 308 308 drm_mode_config_reset(dev); 309 309 310 - drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); 311 - 312 310 return 0; 313 311 314 312 err: ··· 353 355 ret); 354 356 goto err_unload; 355 357 } 358 + 359 + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); 360 + 356 361 return 0; 357 362 358 363 err_unload:
+11
drivers/gpu/drm/i915/display/intel_display.c
··· 3822 3822 return true; 3823 3823 } 3824 3824 3825 + unsigned int 3826 + intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 3827 + { 3828 + int x = 0, y = 0; 3829 + 3830 + intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3831 + plane_state->color_plane[0].offset, 0); 3832 + 3833 + return y; 3834 + } 3835 + 3825 3836 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3826 3837 { 3827 3838 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+1
drivers/gpu/drm/i915/display/intel_display.h
··· 608 608 u32 pixel_format, u64 modifier, 609 609 unsigned int rotation); 610 610 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); 611 + unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); 611 612 612 613 struct intel_display_error_state * 613 614 intel_display_capture_error_state(struct drm_i915_private *dev_priv);
+36 -29
drivers/gpu/drm/i915/display/intel_fbc.c
··· 48 48 #include "intel_frontbuffer.h" 49 49 50 50 /* 51 - * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the 52 - * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's 53 - * origin so the x and y offsets can actually fit the registers. As a 54 - * consequence, the fence doesn't really start exactly at the display plane 55 - * address we program because it starts at the real start of the buffer, so we 56 - * have to take this into consideration here. 57 - */ 58 - static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc) 59 - { 60 - return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y; 61 - } 62 - 63 - /* 64 51 * For SKL+, the plane source size used by the hardware is based on the value we 65 52 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value 66 53 * we wrote to PIPESRC. ··· 128 141 fbc_ctl2 |= FBC_CTL_CPU_FENCE; 129 142 intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); 130 143 intel_de_write(dev_priv, FBC_FENCE_OFF, 131 - params->crtc.fence_y_offset); 144 + params->fence_y_offset); 132 145 } 133 146 134 147 /* enable it... */ ··· 162 175 if (params->fence_id >= 0) { 163 176 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; 164 177 intel_de_write(dev_priv, DPFC_FENCE_YOFF, 165 - params->crtc.fence_y_offset); 178 + params->fence_y_offset); 166 179 } else { 167 180 intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); 168 181 } ··· 230 243 intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 231 244 SNB_CPU_FENCE_ENABLE | params->fence_id); 232 245 intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 233 - params->crtc.fence_y_offset); 246 + params->fence_y_offset); 234 247 } 235 248 } else { 236 249 if (IS_GEN(dev_priv, 6)) { ··· 240 253 } 241 254 242 255 intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, 243 - params->crtc.fence_y_offset); 256 + params->fence_y_offset); 244 257 /* enable it... */ 245 258 intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 246 259 ··· 307 320 intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 308 321 SNB_CPU_FENCE_ENABLE | params->fence_id); 309 322 intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 310 - params->crtc.fence_y_offset); 323 + params->fence_y_offset); 311 324 } else if (dev_priv->ggtt.num_fences) { 312 325 intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); 313 326 intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); ··· 618 631 /* 619 632 * For some reason, the hardware tracking starts looking at whatever we 620 633 * programmed as the display plane base address register. It does not look at 621 - * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} 622 - * variables instead of just looking at the pipe/plane size. 634 + * the X and Y offset registers. That's why we include the src x/y offsets 635 + * instead of just looking at the plane size. 623 636 */ 624 637 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 625 638 { ··· 692 705 cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 693 706 cache->plane.adjusted_x = plane_state->color_plane[0].x; 694 707 cache->plane.adjusted_y = plane_state->color_plane[0].y; 695 - cache->plane.y = plane_state->uapi.src.y1 >> 16; 696 708 697 709 cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; 698 710 699 711 cache->fb.format = fb->format; 700 712 cache->fb.stride = fb->pitches[0]; 701 713 cache->fb.modifier = fb->modifier; 714 + 715 + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); 702 716 703 717 drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && 704 718 !plane_state->vma->fence); ··· 717 729 718 730 return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > 719 731 fbc->compressed_fb.size * fbc->threshold; 732 + } 733 + 734 + static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) 735 + { 736 + struct intel_fbc *fbc = &dev_priv->fbc; 737 + struct intel_fbc_state_cache *cache = &fbc->state_cache; 738 + 739 + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && 740 + cache->fb.modifier != I915_FORMAT_MOD_X_TILED) 741 + return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; 742 + else 743 + return 0; 744 + } 745 + 746 + static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv) 747 + { 748 + struct intel_fbc *fbc = &dev_priv->fbc; 749 + 750 + return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv); 720 751 } 721 752 722 753 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) ··· 890 883 memset(params, 0, sizeof(*params)); 891 884 892 885 params->fence_id = cache->fence_id; 886 + params->fence_y_offset = cache->fence_y_offset; 893 887 894 888 params->crtc.pipe = crtc->pipe; 895 889 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; 896 - params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc); 897 890 898 891 params->fb.format = cache->fb.format; 892 + params->fb.modifier = cache->fb.modifier; 899 893 params->fb.stride = cache->fb.stride; 900 894 901 895 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); ··· 924 916 return false; 925 917 926 918 if (params->fb.format != cache->fb.format) 919 + return false; 920 + 921 + if (params->fb.modifier != cache->fb.modifier) 927 922 return false; 928 923 929 924 if (params->fb.stride != cache->fb.stride) ··· 1208 1197 1209 1198 if (fbc->crtc) { 1210 1199 if (fbc->crtc != crtc || 1211 - !intel_fbc_cfb_size_changed(dev_priv)) 1200 + (!intel_fbc_cfb_size_changed(dev_priv) && 1201 + !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv))) 1212 1202 goto out; 1213 1203 1214 1204 __intel_fbc_disable(dev_priv); ··· 1231 1219 goto out; 1232 1220 } 1233 1221 1234 - if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && 1235 - plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) 1236 - cache->gen9_wa_cfb_stride = 1237 - DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; 1238 - else 1239 - cache->gen9_wa_cfb_stride = 0; 1222 + cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv); 1240 1223 1241 1224 drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", 1242 1225 pipe_name(crtc->pipe));
+2 -8
drivers/gpu/drm/i915/display/intel_hdmi.c
··· 2867 2867 return ret; 2868 2868 } 2869 2869 2870 - static void intel_hdmi_destroy(struct drm_connector *connector) 2870 + static void intel_hdmi_connector_unregister(struct drm_connector *connector) 2871 2871 { 2872 2872 struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; 2873 2873 2874 2874 cec_notifier_conn_unregister(n); 2875 2875 2876 - intel_connector_destroy(connector); 2877 - } 2878 - 2879 - static void intel_hdmi_connector_unregister(struct drm_connector *connector) 2880 - { 2881 2876 intel_hdmi_remove_i2c_symlink(connector); 2882 - 2883 2877 intel_connector_unregister(connector); 2884 2878 } 2885 2879 ··· 2885 2891 .atomic_set_property = intel_digital_connector_atomic_set_property, 2886 2892 .late_register = intel_hdmi_connector_register, 2887 2893 .early_unregister = intel_hdmi_connector_unregister, 2888 - .destroy = intel_hdmi_destroy, 2894 + .destroy = intel_connector_destroy, 2889 2895 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2890 2896 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 2891 2897 };
+6 -6
drivers/gpu/drm/i915/gt/intel_context.c
··· 204 204 { 205 205 int err; 206 206 207 - err = i915_active_acquire(&ring->vma->active); 207 + err = intel_ring_pin(ring); 208 208 if (err) 209 209 return err; 210 210 211 - err = intel_ring_pin(ring); 211 + err = i915_active_acquire(&ring->vma->active); 212 212 if (err) 213 - goto err_active; 213 + goto err_pin; 214 214 215 215 return 0; 216 216 217 - err_active: 218 - i915_active_release(&ring->vma->active); 217 + err_pin: 218 + intel_ring_unpin(ring); 219 219 return err; 220 220 } 221 221 222 222 static void __ring_retire(struct intel_ring *ring) 223 223 { 224 - intel_ring_unpin(ring); 225 224 i915_active_release(&ring->vma->active); 225 + intel_ring_unpin(ring); 226 226 } 227 227 228 228 __i915_active_call
+5 -14
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 5396 5396 * typically be the first we inspect for submission. 5397 5397 */ 5398 5398 swp = prandom_u32_max(ve->num_siblings); 5399 - if (!swp) 5400 - return; 5401 - 5402 - swap(ve->siblings[swp], ve->siblings[0]); 5403 - if (!intel_engine_has_relative_mmio(ve->siblings[0])) 5404 - virtual_update_register_offsets(ve->context.lrc_reg_state, 5405 - ve->siblings[0]); 5399 + if (swp) 5400 + swap(ve->siblings[swp], ve->siblings[0]); 5406 5401 } 5407 5402 5408 5403 static int virtual_context_alloc(struct intel_context *ce) ··· 5410 5415 static int virtual_context_pin(struct intel_context *ce) 5411 5416 { 5412 5417 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5413 - int err; 5414 5418 5415 5419 /* Note: we must use a real engine class for setting up reg state */ 5416 - err = __execlists_context_pin(ce, ve->siblings[0]); 5417 - if (err) 5418 - return err; 5419 - 5420 - virtual_engine_initial_hint(ve); 5421 - return 0; 5420 + return __execlists_context_pin(ce, ve->siblings[0]); 5422 5421 } 5423 5422 5424 5423 static void virtual_context_enter(struct intel_context *ce) ··· 5677 5688 intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); 5678 5689 intel_engine_init_breadcrumbs(&ve->base); 5679 5690 intel_engine_init_execlists(&ve->base); 5691 + ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ 5680 5692 5681 5693 ve->base.cops = &virtual_context_ops; 5682 5694 ve->base.request_alloc = execlists_request_alloc; ··· 5759 5769 5760 5770 ve->base.flags |= I915_ENGINE_IS_VIRTUAL; 5761 5771 5772 + virtual_engine_initial_hint(ve); 5762 5773 return &ve->context; 5763 5774 5764 5775 err_put:
+4 -4
drivers/gpu/drm/i915/gt/selftest_rps.c
··· 44 44 { 45 45 const u64 *a = A, *b = B; 46 46 47 - if (a < b) 47 + if (*a < *b) 48 48 return -1; 49 - else if (a > b) 49 + else if (*a > *b) 50 50 return 1; 51 51 else 52 52 return 0; ··· 56 56 { 57 57 const u32 *a = A, *b = B; 58 58 59 - if (a < b) 59 + if (*a < *b) 60 60 return -1; 61 - else if (a > b) 61 + else if (*a > *b) 62 62 return 1; 63 63 else 64 64 return 0;
+46
drivers/gpu/drm/i915/gt/shaders/README
··· 1 + ASM sources for auto generated shaders 2 + ====================================== 3 + 4 + The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain 5 + pre-compiled batch chunks that will clear any residual render cache during 6 + context switch. 7 + 8 + They are generated from their respective platform ASM files present on 9 + i915/gt/shaders/clear_kernel directory. 10 + 11 + The generated .c files should never be modified directly. Instead, any modification 12 + needs to be done on the on their respective ASM files and build instructions below 13 + needes to be followed. 14 + 15 + Building 16 + ======== 17 + 18 + Environment 19 + ----------- 20 + 21 + IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used 22 + on building. 23 + 24 + Please make sure your Mesa tool is compiled with "-Dtools=intel" and 25 + "-Ddri-drivers=i965", and run this script from IGT source root directory" 26 + 27 + The instructions bellow assume: 28 + * IGT gpu tools source code is located on your home directory (~) as ~/igt 29 + * Mesa source code is located on your home directory (~) as ~/mesa 30 + and built under the ~/mesa/build directory 31 + * Linux kernel source code is under your home directory (~) as ~/linux 32 + 33 + Instructions 34 + ------------ 35 + 36 + ~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \ 37 + ~/igt/lib/i915/shaders/clear_kernel/ivb.asm 38 + ~ $ cd ~/igt 39 + igt $ ./scripts/generate_clear_kernel.sh -g ivb \ 40 + -m ~/mesa/build/src/intel/tools/i965_asm 41 + 42 + ~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \ 43 + ~/igt/lib/i915/shaders/clear_kernel/hsw.asm 44 + ~ $ cd ~/igt 45 + igt $ ./scripts/generate_clear_kernel.sh -g hsw \ 46 + -m ~/mesa/build/src/intel/tools/i965_asm
+119
drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + /* 7 + * Kernel for PAVP buffer clear. 8 + * 9 + * 1. Clear all 64 GRF registers assigned to the kernel with designated value; 10 + * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears 11 + * 512 bytes of Render Cache. 12 + */ 13 + 14 + /* Store designated "clear GRF" value */ 15 + mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N }; 16 + 17 + /** 18 + * Curbe Format 19 + * 20 + * DW 1.0 - Block Offset to write Render Cache 21 + * DW 1.1 [15:0] - Clear Word 22 + * DW 1.2 - Delay iterations 23 + * DW 1.3 - Enable Instrumentation (only for debug) 24 + * DW 1.4 - Rsvd (intended for context ID) 25 + * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount 26 + * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count) 27 + * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count) 28 + * 29 + * Binding Table 30 + * 31 + * BTI 0: 2D Surface to help clear L3 (Render/Data Cache) 32 + * BTI 1: Wait/Instrumentation Buffer 33 + * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT) 34 + * Expected to be initialized to 0 by driver/another kernel 35 + * Layout: 36 + * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS] 37 + * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N 38 + */ 39 + add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */ 40 + cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N }; 41 + (+f0.0) jmpi(1) 352D { align1 WE_all 1N }; 42 + 43 + /** 44 + * State Register has info on where this thread is running 45 + * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID 46 + * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID 47 + */ 48 + mov(8) g3<1>UD 0x00000000UD { align1 1Q }; 49 + shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N }; 50 + and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */ 51 + shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N }; 52 + and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */ 53 + mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N }; 54 + add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */ 55 + shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N }; 56 + and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */ 57 + mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N }; 58 + add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */ 59 + 60 + mov(8) g5<1>UD 0x00000000UD { align1 1Q }; 61 + and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N }; 62 + mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N }; 63 + 64 + mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */ 65 + mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */ 66 + mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */ 67 + mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */ 68 + and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N }; 69 + 70 + /* Media block read to fetch current value at specified location in instrumentation buffer */ 71 + sendc(8) g5<1>UD g4<8,8,1>F 0x02190001 72 + 73 + render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q }; 74 + add(1) g5<1>D g5<0,1,0>D 1D { align1 1N }; 75 + 76 + /* Media block write for updated value at specified location in instrumentation buffer */ 77 + sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001 78 + render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q }; 79 + 80 + /* Delay thread for specified parameter */ 81 + add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N }; 82 + (+f0.0) jmpi(1) -32D { align1 WE_all 1N }; 83 + 84 + /* Store designated "clear GRF" value */ 85 + mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N }; 86 + 87 + /* Initialize looping parameters */ 88 + mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */ 89 + mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */ 90 + 91 + /* Write 32x16 all "0" block */ 92 + mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q }; 93 + mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q }; 94 + mov(2) g2<1>UD g1<2,2,1>UW { align1 1N }; 95 + mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */ 96 + and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N }; 97 + mov(16) g3<1>UD 0x00000000UD { align1 1H }; 98 + mov(16) g4<1>UD 0x00000000UD { align1 1H }; 99 + mov(16) g5<1>UD 0x00000000UD { align1 1H }; 100 + mov(16) g6<1>UD 0x00000000UD { align1 1H }; 101 + mov(16) g7<1>UD 0x00000000UD { align1 1H }; 102 + mov(16) g8<1>UD 0x00000000UD { align1 1H }; 103 + mov(16) g9<1>UD 0x00000000UD { align1 1H }; 104 + mov(16) g10<1>UD 0x00000000UD { align1 1H }; 105 + sendc(8) null<1>UD g2<8,8,1>F 0x120a8000 106 + render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q }; 107 + add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N }; 108 + sendc(8) null<1>UD g2<8,8,1>F 0x120a8000 109 + render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q }; 110 + 111 + /* Now, clear all GRF registers */ 112 + add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N }; 113 + mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H }; 114 + add(1) a0<1>D a0<0,1,0>D 32D { align1 1N }; 115 + (+f0.0) jmpi(1) -64D { align1 WE_all 1N }; 116 + 117 + /* Terminante the thread */ 118 + sendc(8) null<1>UD g127<8,8,1>F 0x82000010 119 + thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
+117
drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + /* 7 + * Kernel for PAVP buffer clear. 8 + * 9 + * 1. Clear all 64 GRF registers assigned to the kernel with designated value; 10 + * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears 11 + * 512 bytes of Render Cache. 12 + */ 13 + 14 + /* Store designated "clear GRF" value */ 15 + mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N }; 16 + 17 + /** 18 + * Curbe Format 19 + * 20 + * DW 1.0 - Block Offset to write Render Cache 21 + * DW 1.1 [15:0] - Clear Word 22 + * DW 1.2 - Delay iterations 23 + * DW 1.3 - Enable Instrumentation (only for debug) 24 + * DW 1.4 - Rsvd (intended for context ID) 25 + * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount 26 + * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count) 27 + * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count) 28 + * 29 + * Binding Table 30 + * 31 + * BTI 0: 2D Surface to help clear L3 (Render/Data Cache) 32 + * BTI 1: Wait/Instrumentation Buffer 33 + * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT) 34 + * Expected to be initialized to 0 by driver/another kernel 35 + * Layout : 36 + * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS] 37 + * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N 38 + */ 39 + add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */ 40 + cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N }; 41 + (+f0.0) jmpi(1) 44D { align1 WE_all 1N }; 42 + 43 + /** 44 + * State Register has info on where this thread is running 45 + * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID 46 + * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID 47 + */ 48 + mov(8) g3<1>UD 0x00000000UD { align1 1Q }; 49 + shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N }; 50 + and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */ 51 + shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N }; 52 + and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */ 53 + mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N }; 54 + add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */ 55 + shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N }; 56 + and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */ 57 + mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N }; 58 + add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */ 59 + 60 + mov(8) g5<1>UD 0x00000000UD { align1 1Q }; 61 + and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N }; 62 + mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N }; 63 + 64 + mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */ 65 + mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */ 66 + mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */ 67 + mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */ 68 + and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N }; 69 + 70 + /* Media block read to fetch current value at specified location in instrumentation buffer */ 71 + sendc(8) g5<1>UD g4<8,8,1>F 0x02190001 72 + render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q }; 73 + add(1) g5<1>D g5<0,1,0>D 1D { align1 1N }; 74 + 75 + /* Media block write for updated value at specified location in instrumentation buffer */ 76 + sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001 77 + render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q }; 78 + /* Delay thread for specified parameter */ 79 + add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N }; 80 + (+f0.0) jmpi(1) -4D { align1 WE_all 1N }; 81 + 82 + /* Store designated "clear GRF" value */ 83 + mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N }; 84 + 85 + /* Initialize looping parameters */ 86 + mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */ 87 + mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */ 88 + 89 + /* Write 32x16 all "0" block */ 90 + mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q }; 91 + mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q }; 92 + mov(2) g2<1>UD g1<2,2,1>UW { align1 1N }; 93 + mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */ 94 + and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N }; 95 + mov(16) g3<1>UD 0x00000000UD { align1 1H }; 96 + mov(16) g4<1>UD 0x00000000UD { align1 1H }; 97 + mov(16) g5<1>UD 0x00000000UD { align1 1H }; 98 + mov(16) g6<1>UD 0x00000000UD { align1 1H }; 99 + mov(16) g7<1>UD 0x00000000UD { align1 1H }; 100 + mov(16) g8<1>UD 0x00000000UD { align1 1H }; 101 + mov(16) g9<1>UD 0x00000000UD { align1 1H }; 102 + mov(16) g10<1>UD 0x00000000UD { align1 1H }; 103 + sendc(8) null<1>UD g2<8,8,1>F 0x120a8000 104 + render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q }; 105 + add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N }; 106 + sendc(8) null<1>UD g2<8,8,1>F 0x120a8000 107 + render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q }; 108 + 109 + /* Now, clear all GRF registers */ 110 + add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N }; 111 + mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H }; 112 + add(1) a0<1>D a0<0,1,0>D 32D { align1 1N }; 113 + (+f0.0) jmpi(1) -8D { align1 WE_all 1N }; 114 + 115 + /* Terminante the thread */ 116 + sendc(8) null<1>UD g127<8,8,1>F 0x82000010 117 + thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
+1 -1
drivers/gpu/drm/i915/gvt/debugfs.c
··· 66 66 vreg = vgpu_vreg(param->vgpu, offset); 67 67 68 68 if (preg != vreg) { 69 - node = kmalloc(sizeof(*node), GFP_KERNEL); 69 + node = kmalloc(sizeof(*node), GFP_ATOMIC); 70 70 if (!node) 71 71 return -ENOMEM; 72 72
+13 -11
drivers/gpu/drm/i915/gvt/handlers.c
··· 1726 1726 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2); 1727 1727 write_vreg(vgpu, offset, p_data, bytes); 1728 1728 1729 - if (data & _MASKED_BIT_ENABLE(1)) { 1729 + if (IS_MASKED_BITS_ENABLED(data, 1)) { 1730 1730 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); 1731 1731 return 0; 1732 1732 } 1733 1733 1734 1734 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) && 1735 - data & _MASKED_BIT_ENABLE(2)) { 1735 + IS_MASKED_BITS_ENABLED(data, 2)) { 1736 1736 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); 1737 1737 return 0; 1738 1738 } ··· 1741 1741 * pvinfo, if not, we will treat this guest as non-gvtg-aware 1742 1742 * guest, and stop emulating its cfg space, mmio, gtt, etc. 1743 1743 */ 1744 - if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) || 1745 - (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))) 1746 - && !vgpu->pv_notified) { 1744 + if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) || 1745 + IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) && 1746 + !vgpu->pv_notified) { 1747 1747 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); 1748 1748 return 0; 1749 1749 } 1750 - if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) 1751 - || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { 1750 + if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) || 1751 + IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) { 1752 1752 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); 1753 1753 1754 1754 gvt_dbg_core("EXECLIST %s on ring %s\n", ··· 1809 1809 write_vreg(vgpu, offset, p_data, bytes); 1810 1810 data = vgpu_vreg(vgpu, offset); 1811 1811 1812 - if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)) 1812 + if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET)) 1813 1813 data |= RESET_CTL_READY_TO_RESET; 1814 1814 else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)) 1815 1815 data &= ~RESET_CTL_READY_TO_RESET; ··· 1827 1827 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18); 1828 1828 write_vreg(vgpu, offset, p_data, bytes); 1829 1829 1830 - if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8)) 1830 + if (IS_MASKED_BITS_ENABLED(data, 0x10) || 1831 + IS_MASKED_BITS_ENABLED(data, 0x8)) 1831 1832 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); 1832 1833 1833 1834 return 0; ··· 3056 3055 MMIO_D(_MMIO(0x72380), D_SKL_PLUS); 3057 3056 MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); 3058 3057 MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS); 3058 + MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS); 3059 3059 3060 3060 MMIO_D(CSR_SSP_BASE, D_SKL_PLUS); 3061 3061 MMIO_D(CSR_HTP_SKL, D_SKL_PLUS); ··· 3133 3131 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3134 3132 NULL, NULL); 3135 3133 3136 - MMIO_D(GAMT_CHKN_BIT_REG, D_KBL); 3137 - MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL); 3134 + MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL); 3135 + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); 3138 3136 3139 3137 return 0; 3140 3138 }
+3 -3
drivers/gpu/drm/i915/gvt/mmio_context.h
··· 54 54 55 55 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, 56 56 struct i915_request *req); 57 - #define IS_RESTORE_INHIBIT(a) \ 58 - (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \ 59 - ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT))) 57 + 58 + #define IS_RESTORE_INHIBIT(a) \ 59 + IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) 60 60 61 61 #endif
+5
drivers/gpu/drm/i915/gvt/reg.h
··· 94 94 #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ 95 95 ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) 96 96 97 + #define IS_MASKED_BITS_ENABLED(_val, _b) \ 98 + (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b)) 99 + #define IS_MASKED_BITS_DISABLED(_val, _b) \ 100 + ((_val) & _MASKED_BIT_DISABLE(_b)) 101 + 97 102 #define FORCEWAKE_RENDER_GEN9_REG 0xa278 98 103 #define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84 99 104 #define FORCEWAKE_BLITTER_GEN9_REG 0xa188
+1 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 230 230 struct file_stats *stats = data; 231 231 struct i915_vma *vma; 232 232 233 - if (!kref_get_unless_zero(&obj->base.refcount)) 233 + if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount)) 234 234 return 0; 235 235 236 236 stats->count++;
+4 -3
drivers/gpu/drm/i915/i915_drv.h
··· 410 410 int adjusted_x; 411 411 int adjusted_y; 412 412 413 - int y; 414 - 415 413 u16 pixel_blend_mode; 416 414 } plane; 417 415 ··· 418 420 unsigned int stride; 419 421 u64 modifier; 420 422 } fb; 423 + 424 + unsigned int fence_y_offset; 421 425 u16 gen9_wa_cfb_stride; 422 426 s8 fence_id; 423 427 } state_cache; ··· 435 435 struct { 436 436 enum pipe pipe; 437 437 enum i9xx_plane_id i9xx_plane; 438 - unsigned int fence_y_offset; 439 438 } crtc; 440 439 441 440 struct { 442 441 const struct drm_format_info *format; 443 442 unsigned int stride; 443 + u64 modifier; 444 444 } fb; 445 445 446 446 int cfb_size; 447 + unsigned int fence_y_offset; 447 448 u16 gen9_wa_cfb_stride; 448 449 s8 fence_id; 449 450 bool plane_visible;
+1
drivers/gpu/drm/i915/i915_perf.c
··· 1592 1592 u32 d; 1593 1593 1594 1594 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1595 + cmd |= MI_SRM_LRM_GLOBAL_GTT; 1595 1596 if (INTEL_GEN(stream->perf->i915) >= 8) 1596 1597 cmd++; 1597 1598
+6 -9
drivers/gpu/drm/i915/i915_vma.c
··· 104 104 struct i915_address_space *vm, 105 105 const struct i915_ggtt_view *view) 106 106 { 107 + struct i915_vma *pos = ERR_PTR(-E2BIG); 107 108 struct i915_vma *vma; 108 109 struct rb_node *rb, **p; 109 110 ··· 185 184 rb = NULL; 186 185 p = &obj->vma.tree.rb_node; 187 186 while (*p) { 188 - struct i915_vma *pos; 189 187 long cmp; 190 188 191 189 rb = *p; ··· 196 196 * and dispose of ours. 197 197 */ 198 198 cmp = i915_vma_compare(pos, vm, view); 199 - if (cmp == 0) { 200 - spin_unlock(&obj->vma.lock); 201 - i915_vma_free(vma); 202 - return pos; 203 - } 204 - 205 199 if (cmp < 0) 206 200 p = &rb->rb_right; 207 - else 201 + else if (cmp > 0) 208 202 p = &rb->rb_left; 203 + else 204 + goto err_unlock; 209 205 } 210 206 rb_link_node(&vma->obj_node, rb, p); 211 207 rb_insert_color(&vma->obj_node, &obj->vma.tree); ··· 224 228 err_unlock: 225 229 spin_unlock(&obj->vma.lock); 226 230 err_vma: 231 + i915_vm_put(vm); 227 232 i915_vma_free(vma); 228 - return ERR_PTR(-E2BIG); 233 + return pos; 229 234 } 230 235 231 236 static struct i915_vma *
+2
drivers/gpu/drm/lima/lima_pp.c
··· 271 271 272 272 int lima_pp_bcast_resume(struct lima_ip *ip) 273 273 { 274 + /* PP has been reset by individual PP resume */ 275 + ip->data.async_reset = false; 274 276 return 0; 275 277 } 276 278
+1 -1
drivers/gpu/drm/mediatek/Kconfig
··· 6 6 depends on COMMON_CLK 7 7 depends on HAVE_ARM_SMCCC 8 8 depends on OF 9 + depends on MTK_MMSYS 9 10 select DRM_GEM_CMA_HELPER 10 11 select DRM_KMS_HELPER 11 12 select DRM_MIPI_DSI 12 13 select DRM_PANEL 13 14 select MEMORY 14 - select MTK_MMSYS 15 15 select MTK_SMI 16 16 select VIDEOMODE_HELPERS 17 17 help
+2 -6
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
··· 193 193 int ret; 194 194 int i; 195 195 196 - DRM_DEBUG_DRIVER("%s\n", __func__); 197 196 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 198 197 ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk); 199 198 if (ret) { ··· 212 213 { 213 214 int i; 214 215 215 - DRM_DEBUG_DRIVER("%s\n", __func__); 216 216 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 217 217 clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk); 218 218 } ··· 256 258 int ret; 257 259 int i; 258 260 259 - DRM_DEBUG_DRIVER("%s\n", __func__); 260 261 if (WARN_ON(!crtc->state)) 261 262 return -EINVAL; 262 263 ··· 296 299 goto err_mutex_unprepare; 297 300 } 298 301 299 - DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n"); 300 302 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { 301 303 mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, 302 304 mtk_crtc->ddp_comp[i]->id, ··· 345 349 struct drm_crtc *crtc = &mtk_crtc->base; 346 350 int i; 347 351 348 - DRM_DEBUG_DRIVER("%s\n", __func__); 349 352 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 350 353 mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); 351 354 if (i == 1) ··· 826 831 827 832 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 828 833 mtk_crtc->cmdq_client = 829 - cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), 834 + cmdq_mbox_create(mtk_crtc->mmsys_dev, 835 + drm_crtc_index(&mtk_crtc->base), 830 836 2000); 831 837 if (IS_ERR(mtk_crtc->cmdq_client)) { 832 838 dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
+2 -4
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 444 444 if (!private) 445 445 return -ENOMEM; 446 446 447 - private->data = of_device_get_match_data(dev); 448 447 private->mmsys_dev = dev->parent; 449 448 if (!private->mmsys_dev) { 450 449 dev_err(dev, "Failed to get MMSYS device\n"); ··· 513 514 goto err_node; 514 515 } 515 516 516 - ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); 517 + ret = mtk_ddp_comp_init(dev->parent, node, comp, 518 + comp_id, NULL); 517 519 if (ret) { 518 520 of_node_put(node); 519 521 goto err_node; ··· 571 571 int ret; 572 572 573 573 ret = drm_mode_config_helper_suspend(drm); 574 - DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); 575 574 576 575 return ret; 577 576 } ··· 582 583 int ret; 583 584 584 585 ret = drm_mode_config_helper_resume(drm); 585 - DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); 586 586 587 587 return ret; 588 588 }
+15 -10
drivers/gpu/drm/mediatek/mtk_drm_plane.c
··· 164 164 true, true); 165 165 } 166 166 167 + static void mtk_plane_atomic_disable(struct drm_plane *plane, 168 + struct drm_plane_state *old_state) 169 + { 170 + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); 171 + 172 + state->pending.enable = false; 173 + wmb(); /* Make sure the above parameter is set before update */ 174 + state->pending.dirty = true; 175 + } 176 + 167 177 static void mtk_plane_atomic_update(struct drm_plane *plane, 168 178 struct drm_plane_state *old_state) 169 179 { ··· 187 177 188 178 if (!crtc || WARN_ON(!fb)) 189 179 return; 180 + 181 + if (!plane->state->visible) { 182 + mtk_plane_atomic_disable(plane, old_state); 183 + return; 184 + } 190 185 191 186 gem = fb->obj[0]; 192 187 mtk_gem = to_mtk_gem_obj(gem); ··· 212 197 state->pending.height = drm_rect_height(&plane->state->dst); 213 198 state->pending.rotation = plane->state->rotation; 214 199 wmb(); /* Make sure the above parameters are set before update */ 215 - state->pending.dirty = true; 216 - } 217 - 218 - static void mtk_plane_atomic_disable(struct drm_plane *plane, 219 - struct drm_plane_state *old_state) 220 - { 221 - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); 222 - 223 - state->pending.enable = false; 224 - wmb(); /* Make sure the above parameter is set before update */ 225 200 state->pending.dirty = true; 226 201 } 227 202
+1 -4
drivers/gpu/drm/mediatek/mtk_dsi.c
··· 316 316 317 317 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) 318 318 { 319 - u32 tmp_reg1; 320 - 321 - tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON); 322 - return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false; 319 + return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN; 323 320 } 324 321 325 322 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
+1 -11
drivers/gpu/drm/mediatek/mtk_hdmi.c
··· 1630 1630 { 1631 1631 struct mtk_hdmi *hdmi = dev_get_drvdata(dev); 1632 1632 1633 - dev_dbg(dev, "%s\n", __func__); 1634 - 1635 1633 mtk_hdmi_audio_enable(hdmi); 1636 1634 1637 1635 return 0; ··· 1639 1641 { 1640 1642 struct mtk_hdmi *hdmi = dev_get_drvdata(dev); 1641 1643 1642 - dev_dbg(dev, "%s\n", __func__); 1643 - 1644 1644 mtk_hdmi_audio_disable(hdmi); 1645 1645 } 1646 1646 ··· 1646 1650 mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) 1647 1651 { 1648 1652 struct mtk_hdmi *hdmi = dev_get_drvdata(dev); 1649 - 1650 - dev_dbg(dev, "%s(%d)\n", __func__, enable); 1651 1653 1652 1654 if (enable) 1653 1655 mtk_hdmi_hw_aud_mute(hdmi); ··· 1658 1664 static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) 1659 1665 { 1660 1666 struct mtk_hdmi *hdmi = dev_get_drvdata(dev); 1661 - 1662 - dev_dbg(dev, "%s\n", __func__); 1663 1667 1664 1668 memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len)); 1665 1669 ··· 1758 1766 goto err_bridge_remove; 1759 1767 } 1760 1768 1761 - dev_dbg(dev, "mediatek hdmi probe success\n"); 1762 1769 return 0; 1763 1770 1764 1771 err_bridge_remove: ··· 1780 1789 struct mtk_hdmi *hdmi = dev_get_drvdata(dev); 1781 1790 1782 1791 mtk_hdmi_clk_disable_audio(hdmi); 1783 - dev_dbg(dev, "hdmi suspend success!\n"); 1792 + 1784 1793 return 0; 1785 1794 } 1786 1795 ··· 1795 1804 return ret; 1796 1805 } 1797 1806 1798 - dev_dbg(dev, "hdmi resume success!\n"); 1799 1807 return 0; 1800 1808 } 1801 1809 #endif
-52
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
··· 107 107 #define RGS_HDMITX_5T1_EDG (0xf << 4) 108 108 #define RGS_HDMITX_PLUG_TST BIT(0) 109 109 110 - static const u8 PREDIV[3][4] = { 111 - {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */ 112 - {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */ 113 - {0x1, 0x1, 0x1, 0x1} /* 148Mhz */ 114 - }; 115 - 116 - static const u8 TXDIV[3][4] = { 117 - {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */ 118 - {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */ 119 - {0x1, 0x0, 0x0, 0x0} /* 148Mhz */ 120 - }; 121 - 122 - static const u8 FBKSEL[3][4] = { 123 - {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */ 124 - {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */ 125 - {0x1, 0x0, 0x1, 0x1} /* 148Mhz */ 126 - }; 127 - 128 - static const u8 FBKDIV[3][4] = { 129 - {19, 24, 29, 19}, /* 27Mhz */ 130 - {19, 24, 14, 19}, /* 74Mhz */ 131 - {19, 24, 14, 19} /* 148Mhz */ 132 - }; 133 - 134 - static const u8 DIVEN[3][4] = { 135 - {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */ 136 - {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */ 137 - {0x2, 0x2, 0x2, 0x2} /* 148Mhz */ 138 - }; 139 - 140 - static const u8 HTPLLBP[3][4] = { 141 - {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */ 142 - {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */ 143 - {0xc, 0xf, 0xf, 0xc} /* 148Mhz */ 144 - }; 145 - 146 - static const u8 HTPLLBC[3][4] = { 147 - {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */ 148 - {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */ 149 - {0x2, 0x3, 0x3, 0x2} /* 148Mhz */ 150 - }; 151 - 152 - static const u8 HTPLLBR[3][4] = { 153 - {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */ 154 - {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */ 155 - {0x1, 0x2, 0x2, 0x1} /* 148Mhz */ 156 - }; 157 - 158 110 static int mtk_hdmi_pll_prepare(struct clk_hw *hw) 159 111 { 160 112 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); 161 - 162 - dev_dbg(hdmi_phy->dev, "%s\n", __func__); 163 113 164 114 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN); 165 115 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); ··· 127 177 static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) 128 178 { 129 179 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); 130 - 131 - dev_dbg(hdmi_phy->dev, "%s\n", __func__); 132 180 133 181 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN); 134 182 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+6
drivers/gpu/drm/meson/meson_registers.h
··· 261 261 #define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12) 262 262 #define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22) 263 263 #define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24) 264 + #define VIU_OSD_BURST_LENGTH_24 (0x0 << 31 | 0x0 << 10) 265 + #define VIU_OSD_BURST_LENGTH_32 (0x0 << 31 | 0x1 << 10) 266 + #define VIU_OSD_BURST_LENGTH_48 (0x0 << 31 | 0x2 << 10) 267 + #define VIU_OSD_BURST_LENGTH_64 (0x0 << 31 | 0x3 << 10) 268 + #define VIU_OSD_BURST_LENGTH_96 (0x1 << 31 | 0x0 << 10) 269 + #define VIU_OSD_BURST_LENGTH_128 (0x1 << 31 | 0x1 << 10) 264 270 265 271 #define VD1_IF0_GEN_REG 0x1a50 266 272 #define VD1_IF0_CANVAS0 0x1a51
+2 -9
drivers/gpu/drm/meson/meson_viu.c
··· 411 411 priv->io_base + _REG(VIU_MISC_CTRL1)); 412 412 } 413 413 414 - static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length) 415 - { 416 - uint32_t val = (((length & 0x80) % 24) / 12); 417 - 418 - return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31)); 419 - } 420 - 421 414 void meson_viu_init(struct meson_drm *priv) 422 415 { 423 416 uint32_t reg; ··· 437 444 VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ 438 445 439 446 if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) 440 - reg |= meson_viu_osd_burst_length_reg(32); 447 + reg |= VIU_OSD_BURST_LENGTH_32; 441 448 else 442 - reg |= meson_viu_osd_burst_length_reg(64); 449 + reg |= VIU_OSD_BURST_LENGTH_64; 443 450 444 451 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); 445 452 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
+1 -1
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
··· 408 408 struct msm_gem_address_space *aspace; 409 409 410 410 aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, 411 - SZ_16M + 0xfff * SZ_64K); 411 + 0xfff * SZ_64K); 412 412 413 413 if (IS_ERR(aspace) && !IS_ERR(mmu)) 414 414 mmu->funcs->destroy(mmu);
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 1121 1121 return -ENODEV; 1122 1122 1123 1123 mmu = msm_iommu_new(gmu->dev, domain); 1124 - gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff); 1124 + gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); 1125 1125 if (IS_ERR(gmu->aspace)) { 1126 1126 iommu_domain_free(domain); 1127 1127 return PTR_ERR(gmu->aspace);
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 893 893 #if defined(CONFIG_DRM_MSM_GPU_STATE) 894 894 .gpu_state_get = a6xx_gpu_state_get, 895 895 .gpu_state_put = a6xx_gpu_state_put, 896 - .create_address_space = adreno_iommu_create_address_space, 897 896 #endif 897 + .create_address_space = adreno_iommu_create_address_space, 898 898 }, 899 899 .get_timestamp = a6xx_get_timestamp, 900 900 };
+1 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 194 194 struct msm_gem_address_space *aspace; 195 195 196 196 aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, 197 - 0xfffffff); 197 + 0xffffffff - SZ_16M); 198 198 199 199 if (IS_ERR(aspace) && !IS_ERR(mmu)) 200 200 mmu->funcs->destroy(mmu);
+11 -7
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 521 521 struct dpu_kms *dpu_kms, 522 522 struct drm_display_mode *mode) 523 523 { 524 - struct msm_display_topology topology; 524 + struct msm_display_topology topology = {0}; 525 525 int i, intf_count = 0; 526 526 527 527 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) ··· 537 537 * 1 LM, 1 INTF 538 538 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 539 539 * 540 - * Adding color blocks only to primary interface 540 + * Adding color blocks only to primary interface if available in 541 + * sufficient number 541 542 */ 542 543 if (intf_count == 2) 543 544 topology.num_lm = 2; ··· 547 546 else 548 547 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 549 548 550 - if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) 551 - topology.num_dspp = topology.num_lm; 549 + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) { 550 + if (dpu_kms->catalog->dspp && 551 + (dpu_kms->catalog->dspp_count >= topology.num_lm)) 552 + topology.num_dspp = topology.num_lm; 553 + } 552 554 553 555 topology.num_enc = 0; 554 556 topology.num_intf = intf_count; ··· 2140 2136 2141 2137 dpu_enc = to_dpu_encoder_virt(enc); 2142 2138 2143 - mutex_init(&dpu_enc->enc_lock); 2144 2139 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2145 2140 if (ret) 2146 2141 goto fail; ··· 2154 2151 0); 2155 2152 2156 2153 2157 - mutex_init(&dpu_enc->rc_lock); 2158 2154 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2159 2155 dpu_encoder_off_work); 2160 2156 dpu_enc->idle_timeout = IDLE_TIMEOUT; ··· 2185 2183 2186 2184 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); 2187 2185 if (!dpu_enc) 2188 - return ERR_PTR(ENOMEM); 2186 + return ERR_PTR(-ENOMEM); 2189 2187 2190 2188 rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, 2191 2189 drm_enc_mode, NULL); ··· 2198 2196 2199 2197 spin_lock_init(&dpu_enc->enc_spinlock); 2200 2198 dpu_enc->enabled = false; 2199 + mutex_init(&dpu_enc->enc_lock); 2200 + mutex_init(&dpu_enc->rc_lock); 2201 2201 2202 2202 return &dpu_enc->base; 2203 2203 }
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 780 780 781 781 mmu = msm_iommu_new(dpu_kms->dev->dev, domain); 782 782 aspace = msm_gem_address_space_create(mmu, "dpu1", 783 - 0x1000, 0xfffffff); 783 + 0x1000, 0x100000000 - 0x1000); 784 784 785 785 if (IS_ERR(aspace)) { 786 786 mmu->funcs->destroy(mmu);
+1 -1
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
··· 514 514 config->iommu); 515 515 516 516 aspace = msm_gem_address_space_create(mmu, 517 - "mdp4", 0x1000, 0xffffffff); 517 + "mdp4", 0x1000, 0x100000000 - 0x1000); 518 518 519 519 if (IS_ERR(aspace)) { 520 520 if (!IS_ERR(mmu))
+1 -1
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 633 633 mmu = msm_iommu_new(iommu_dev, config->platform.iommu); 634 634 635 635 aspace = msm_gem_address_space_create(mmu, "mdp5", 636 - 0x1000, 0xffffffff); 636 + 0x1000, 0x100000000 - 0x1000); 637 637 638 638 if (IS_ERR(aspace)) { 639 639 if (!IS_ERR(mmu))
+3 -1
drivers/gpu/drm/msm/msm_submitqueue.c
··· 71 71 queue->flags = flags; 72 72 73 73 if (priv->gpu) { 74 - if (prio >= priv->gpu->nr_rings) 74 + if (prio >= priv->gpu->nr_rings) { 75 + kfree(queue); 75 76 return -EINVAL; 77 + } 76 78 77 79 queue->prio = prio; 78 80 }
+3
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 601 601 (0x0100 << nv_crtc->index), 602 602 }; 603 603 604 + if (!nv_encoder->audio) 605 + return; 606 + 604 607 nv_encoder->audio = false; 605 608 nvif_mthd(&disp->disp->object, 0, &args, sizeof(args)); 606 609
+1 -1
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 550 550 DMA_BIDIRECTIONAL); 551 551 if (dma_mapping_error(dev, *dma_addr)) 552 552 goto out_free_page; 553 - if (drm->dmem->migrate.copy_func(drm, page_size(spage), 553 + if (drm->dmem->migrate.copy_func(drm, 1, 554 554 NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr)) 555 555 goto out_dma_unmap; 556 556 } else {
+1
drivers/gpu/drm/nouveau/nouveau_svm.c
··· 562 562 .end = notifier->notifier.interval_tree.last + 1, 563 563 .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE, 564 564 .hmm_pfns = hmm_pfns, 565 + .dev_private_owner = drm->dev, 565 566 }; 566 567 struct mm_struct *mm = notifier->notifier.mm; 567 568 int ret;
+2 -2
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
··· 118 118 if (retries) 119 119 udelay(400); 120 120 121 - /* transaction request, wait up to 1ms for it to complete */ 121 + /* transaction request, wait up to 2ms for it to complete */ 122 122 nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl); 123 123 124 - timeout = 1000; 124 + timeout = 2000; 125 125 do { 126 126 ctrl = nvkm_rd32(device, 0x00e4e4 + base); 127 127 udelay(1);
+2 -2
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
··· 118 118 if (retries) 119 119 udelay(400); 120 120 121 - /* transaction request, wait up to 1ms for it to complete */ 121 + /* transaction request, wait up to 2ms for it to complete */ 122 122 nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl); 123 123 124 - timeout = 1000; 124 + timeout = 2000; 125 125 do { 126 126 ctrl = nvkm_rd32(device, 0x00d954 + base); 127 127 udelay(1);
+3 -4
drivers/gpu/drm/radeon/ci_dpm.c
··· 5563 5563 if (!rdev->pm.dpm.ps) 5564 5564 return -ENOMEM; 5565 5565 power_state_offset = (u8 *)state_array->states; 5566 + rdev->pm.dpm.num_ps = 0; 5566 5567 for (i = 0; i < state_array->ucNumEntries; i++) { 5567 5568 u8 *idx; 5568 5569 power_state = (union pplib_power_state *)power_state_offset; ··· 5573 5572 if (!rdev->pm.power_state[i].clock_info) 5574 5573 return -EINVAL; 5575 5574 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); 5576 - if (ps == NULL) { 5577 - kfree(rdev->pm.dpm.ps); 5575 + if (ps == NULL) 5578 5576 return -ENOMEM; 5579 - } 5580 5577 rdev->pm.dpm.ps[i].ps_priv = ps; 5581 5578 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 5582 5579 non_clock_info, ··· 5596 5597 k++; 5597 5598 } 5598 5599 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5600 + rdev->pm.dpm.num_ps = i + 1; 5599 5601 } 5600 - rdev->pm.dpm.num_ps = state_array->ucNumEntries; 5601 5602 5602 5603 /* fill in the vce power states */ 5603 5604 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
+2 -3
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 259 259 struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); 260 260 unsigned long reg; 261 261 262 - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg, 263 - reg & SUN4I_HDMI_HPD_HIGH, 264 - 0, 500000)) { 262 + reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); 263 + if (!(reg & SUN4I_HDMI_HPD_HIGH)) { 265 264 cec_phys_addr_invalidate(hdmi->cec_adap); 266 265 return connector_status_disconnected; 267 266 }
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 1069 1069 if (new_content_type != SAME_AS_DISPLAY) { 1070 1070 struct vmw_surface_metadata metadata = {0}; 1071 1071 1072 - metadata.base_size.width = hdisplay; 1073 - metadata.base_size.height = vdisplay; 1074 - metadata.base_size.depth = 1; 1075 - 1076 1072 /* 1077 1073 * If content buffer is a buffer object, then we have to 1078 1074 * construct surface info ··· 1099 1103 } else { 1100 1104 metadata = new_vfbs->surface->metadata; 1101 1105 } 1106 + 1107 + metadata.base_size.width = hdisplay; 1108 + metadata.base_size.height = vdisplay; 1109 + metadata.base_size.depth = 1; 1102 1110 1103 1111 if (vps->surf) { 1104 1112 struct drm_vmw_size cur_base_size =
+2
drivers/hid/hid-alps.c
··· 25 25 26 26 #define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */ 27 27 #define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */ 28 + #define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */ 28 29 #define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */ 29 30 #define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */ 30 31 ··· 369 368 case U1_FEATURE_REPORT_ID: 370 369 break; 371 370 case U1_ABSOLUTE_REPORT_ID: 371 + case U1_ABSOLUTE_REPORT_ID_SECD: 372 372 for (i = 0; i < hdata->max_fingers; i++) { 373 373 u8 *contact = &data[i * 5]; 374 374
+18
drivers/hid/hid-apple.c
··· 60 60 struct apple_sc { 61 61 unsigned long quirks; 62 62 unsigned int fn_on; 63 + unsigned int fn_found; 63 64 DECLARE_BITMAP(pressed_numlock, KEY_CNT); 64 65 }; 65 66 ··· 366 365 struct hid_field *field, struct hid_usage *usage, 367 366 unsigned long **bit, int *max) 368 367 { 368 + struct apple_sc *asc = hid_get_drvdata(hdev); 369 + 369 370 if (usage->hid == (HID_UP_CUSTOM | 0x0003) || 370 371 usage->hid == (HID_UP_MSVENDOR | 0x0003) || 371 372 usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { 372 373 /* The fn key on Apple USB keyboards */ 373 374 set_bit(EV_REP, hi->input->evbit); 374 375 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); 376 + asc->fn_found = true; 375 377 apple_setup_input(hi->input); 376 378 return 1; 377 379 } ··· 396 392 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_2); 397 393 else if (usage->code == BTN_2) 398 394 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_1); 395 + } 396 + 397 + return 0; 398 + } 399 + 400 + static int apple_input_configured(struct hid_device *hdev, 401 + struct hid_input *hidinput) 402 + { 403 + struct apple_sc *asc = hid_get_drvdata(hdev); 404 + 405 + if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { 406 + hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); 407 + asc->quirks = 0; 399 408 } 400 409 401 410 return 0; ··· 628 611 .event = apple_event, 629 612 .input_mapping = apple_input_mapping, 630 613 .input_mapped = apple_input_mapped, 614 + .input_configured = apple_input_configured, 631 615 }; 632 616 module_hid_driver(apple_driver); 633 617
+3
drivers/hid/hid-ids.h
··· 618 618 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 619 619 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2 620 620 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096 621 + #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293 621 622 622 623 #define USB_VENDOR_ID_IMATION 0x0718 623 624 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 ··· 998 997 #define USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW 0x31ce 999 998 #define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232 1000 999 #define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a 1000 + 1001 + #define USB_VENDOR_ID_SAI 0x17dd 1001 1002 1002 1003 #define USB_VENDOR_ID_SAITEK 0x06a3 1003 1004 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
+3 -3
drivers/hid/hid-logitech-dj.c
··· 1153 1153 if (!dj_report) 1154 1154 return -ENOMEM; 1155 1155 dj_report->report_id = REPORT_ID_DJ_SHORT; 1156 - dj_report->device_index = 0xFF; 1156 + dj_report->device_index = HIDPP_RECEIVER_INDEX; 1157 1157 dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES; 1158 1158 retval = logi_dj_recv_send_report(djrcv_dev, dj_report); 1159 1159 kfree(dj_report); ··· 1175 1175 1176 1176 if (djrcv_dev->type == recvr_type_dj) { 1177 1177 dj_report->report_id = REPORT_ID_DJ_SHORT; 1178 - dj_report->device_index = 0xFF; 1178 + dj_report->device_index = HIDPP_RECEIVER_INDEX; 1179 1179 dj_report->report_type = REPORT_TYPE_CMD_SWITCH; 1180 1180 dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F; 1181 1181 dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = ··· 1204 1204 memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH); 1205 1205 1206 1206 buf[0] = REPORT_ID_HIDPP_SHORT; 1207 - buf[1] = 0xFF; 1207 + buf[1] = HIDPP_RECEIVER_INDEX; 1208 1208 buf[2] = 0x80; 1209 1209 buf[3] = 0x00; 1210 1210 buf[4] = 0x00;
+1 -1
drivers/hid/hid-logitech-hidpp.c
··· 3146 3146 multiplier = 1; 3147 3147 3148 3148 hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; 3149 - hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier); 3149 + hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier); 3150 3150 return 0; 3151 3151 } 3152 3152
+6
drivers/hid/hid-magicmouse.c
··· 535 535 __set_bit(MSC_RAW, input->mscbit); 536 536 } 537 537 538 + /* 539 + * hid-input may mark device as using autorepeat, but neither 540 + * the trackpad, nor the mouse actually want it. 541 + */ 542 + __clear_bit(EV_REP, input->evbit); 543 + 538 544 return 0; 539 545 } 540 546
+2
drivers/hid/hid-quirks.c
··· 88 88 { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 89 89 { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 90 90 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, 91 + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, 91 92 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, 92 93 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, 93 94 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, ··· 833 832 { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, 834 833 { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, 835 834 { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, 835 + { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) }, 836 836 #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) 837 837 { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) }, 838 838 { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
+4 -2
drivers/hid/hid-steam.c
··· 526 526 steam_battery_register(steam); 527 527 528 528 mutex_lock(&steam_devices_lock); 529 - list_add(&steam->list, &steam_devices); 529 + if (list_empty(&steam->list)) 530 + list_add(&steam->list, &steam_devices); 530 531 mutex_unlock(&steam_devices_lock); 531 532 } 532 533 ··· 553 552 hid_info(steam->hdev, "Steam Controller '%s' disconnected", 554 553 steam->serial_no); 555 554 mutex_lock(&steam_devices_lock); 556 - list_del(&steam->list); 555 + list_del_init(&steam->list); 557 556 mutex_unlock(&steam_devices_lock); 558 557 steam->serial_no[0] = 0; 559 558 } ··· 739 738 mutex_init(&steam->mutex); 740 739 steam->quirks = id->driver_data; 741 740 INIT_WORK(&steam->work_connect, steam_work_connect_cb); 741 + INIT_LIST_HEAD(&steam->list); 742 742 743 743 steam->client_hdev = steam_create_client_hid(hdev); 744 744 if (IS_ERR(steam->client_hdev)) {
+8
drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
··· 374 374 .driver_data = (void *)&sipodev_desc 375 375 }, 376 376 { 377 + .ident = "Mediacom FlexBook edge 13", 378 + .matches = { 379 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), 380 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"), 381 + }, 382 + .driver_data = (void *)&sipodev_desc 383 + }, 384 + { 377 385 .ident = "Odys Winbook 13", 378 386 .matches = { 379 387 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"),
+1 -1
drivers/hv/vmbus_drv.c
··· 1368 1368 * Write dump contents to the page. No need to synchronize; panic should 1369 1369 * be single-threaded. 1370 1370 */ 1371 - kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE, 1371 + kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE, 1372 1372 &bytes_written); 1373 1373 if (bytes_written) 1374 1374 hyperv_report_panic_msg(panic_pa, bytes_written);
+3 -1
drivers/hwmon/acpi_power_meter.c
··· 883 883 884 884 res = setup_attrs(resource); 885 885 if (res) 886 - goto exit_free; 886 + goto exit_free_capability; 887 887 888 888 resource->hwmon_dev = hwmon_device_register(&device->dev); 889 889 if (IS_ERR(resource->hwmon_dev)) { ··· 896 896 897 897 exit_remove: 898 898 remove_attrs(resource); 899 + exit_free_capability: 900 + free_capabilities(resource); 899 901 exit_free: 900 902 kfree(resource); 901 903 exit:
+1 -1
drivers/hwmon/amd_energy.c
··· 362 362 static struct platform_device *amd_energy_platdev; 363 363 364 364 static const struct x86_cpu_id cpu_ids[] __initconst = { 365 - X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL), 365 + X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x17, 0x31, NULL), 366 366 {} 367 367 }; 368 368 MODULE_DEVICE_TABLE(x86cpu, cpu_ids);
+2
drivers/hwmon/aspeed-pwm-tacho.c
··· 851 851 ret = of_property_read_u32(child, "reg", &pwm_port); 852 852 if (ret) 853 853 return ret; 854 + if (pwm_port >= ARRAY_SIZE(pwm_port_params)) 855 + return -EINVAL; 854 856 aspeed_create_pwm_port(priv, (u8)pwm_port); 855 857 856 858 ret = of_property_count_u8_elems(child, "cooling-levels");
+6 -6
drivers/hwmon/bt1-pvt.c
··· 64 64 * 48380, 65 65 * where T = [-48380, 147438] mC and N = [0, 1023]. 66 66 */ 67 - static const struct pvt_poly poly_temp_to_N = { 67 + static const struct pvt_poly __maybe_unused poly_temp_to_N = { 68 68 .total_divider = 10000, 69 69 .terms = { 70 70 {4, 18322, 10000, 10000}, ··· 96 96 * N = (18658e-3*V - 11572) / 10, 97 97 * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658. 98 98 */ 99 - static const struct pvt_poly poly_volt_to_N = { 99 + static const struct pvt_poly __maybe_unused poly_volt_to_N = { 100 100 .total_divider = 10, 101 101 .terms = { 102 102 {1, 18658, 1000, 1}, ··· 300 300 return IRQ_HANDLED; 301 301 } 302 302 303 - inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) 303 + static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) 304 304 { 305 305 return 0644; 306 306 } 307 307 308 - inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) 308 + static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) 309 309 { 310 310 return 0444; 311 311 } ··· 462 462 463 463 #define pvt_soft_isr NULL 464 464 465 - inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) 465 + static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) 466 466 { 467 467 return 0; 468 468 } 469 469 470 - inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) 470 + static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) 471 471 { 472 472 return 0; 473 473 }
+43
drivers/hwmon/drivetemp.c
··· 285 285 return err; 286 286 } 287 287 288 + static const char * const sct_avoid_models[] = { 289 + /* 290 + * These drives will have WRITE FPDMA QUEUED command timeouts and sometimes just 291 + * freeze until power-cycled under heavy write loads when their temperature is 292 + * getting polled in SCT mode. The SMART mode seems to be fine, though. 293 + * 294 + * While only the 3 TB model (DT01ACA3) was actually caught exhibiting the 295 + * problem let's play safe here to avoid data corruption and ban the whole 296 + * DT01ACAx family. 297 + 298 + * The models from this array are prefix-matched. 299 + */ 300 + "TOSHIBA DT01ACA", 301 + }; 302 + 303 + static bool drivetemp_sct_avoid(struct drivetemp_data *st) 304 + { 305 + struct scsi_device *sdev = st->sdev; 306 + unsigned int ctr; 307 + 308 + if (!sdev->model) 309 + return false; 310 + 311 + /* 312 + * The "model" field contains just the raw SCSI INQUIRY response 313 + * "product identification" field, which has a width of 16 bytes. 314 + * This field is space-filled, but is NOT NULL-terminated. 315 + */ 316 + for (ctr = 0; ctr < ARRAY_SIZE(sct_avoid_models); ctr++) 317 + if (!strncmp(sdev->model, sct_avoid_models[ctr], 318 + strlen(sct_avoid_models[ctr]))) 319 + return true; 320 + 321 + return false; 322 + } 323 + 288 324 static int drivetemp_identify_sata(struct drivetemp_data *st) 289 325 { 290 326 struct scsi_device *sdev = st->sdev; ··· 362 326 /* bail out if this is not a SATA device */ 363 327 if (!is_ata || !is_sata) 364 328 return -ENODEV; 329 + 330 + if (have_sct && drivetemp_sct_avoid(st)) { 331 + dev_notice(&sdev->sdev_gendev, 332 + "will avoid using SCT for temperature monitoring\n"); 333 + have_sct = false; 334 + } 335 + 365 336 if (!have_sct) 366 337 goto skip_sct; 367 338
+1 -1
drivers/hwmon/emc2103.c
··· 443 443 } 444 444 445 445 result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); 446 - if (result) { 446 + if (result < 0) { 447 447 count = result; 448 448 goto err; 449 449 }
+4 -3
drivers/hwmon/max6697.c
··· 38 38 * Map device tree / platform data register bit map to chip bit map. 39 39 * Applies to alert register and over-temperature register. 40 40 */ 41 - #define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ 41 + #define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ 42 42 (((reg) & 0x01) << 6) | ((reg) & 0x80)) 43 + #define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) 43 44 44 45 #define MAX6697_REG_STAT(n) (0x44 + (n)) 45 46 ··· 563 562 return ret; 564 563 565 564 ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, 566 - MAX6697_MAP_BITS(pdata->alert_mask)); 565 + MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); 567 566 if (ret < 0) 568 567 return ret; 569 568 570 569 ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, 571 - MAX6697_MAP_BITS(pdata->over_temperature_mask)); 570 + MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); 572 571 if (ret < 0) 573 572 return ret; 574 573
+3 -3
drivers/hwmon/nct6775.c
··· 786 786 "Agent1 Dimm1", 787 787 "BYTE_TEMP0", 788 788 "BYTE_TEMP1", 789 - "", 790 - "", 789 + "PECI Agent 0 Calibration", /* undocumented */ 790 + "PECI Agent 1 Calibration", /* undocumented */ 791 791 "", 792 792 "Virtual_TEMP" 793 793 }; 794 794 795 - #define NCT6798_TEMP_MASK 0x8fff0ffe 795 + #define NCT6798_TEMP_MASK 0xbfff0ffe 796 796 #define NCT6798_VIRT_TEMP_MASK 0x80000c00 797 797 798 798 /* NCT6102D/NCT6106D specific data */
+1 -1
drivers/hwmon/pmbus/Kconfig
··· 71 71 Infineon IR35221 controller. 72 72 73 73 This driver can also be built as a module. If so, the module will 74 - be called ir35521. 74 + be called ir35221. 75 75 76 76 config SENSORS_IR38064 77 77 tristate "Infineon IR38064"
+8 -2
drivers/hwmon/pmbus/adm1275.c
··· 465 465 static int adm1275_probe(struct i2c_client *client, 466 466 const struct i2c_device_id *id) 467 467 { 468 + s32 (*config_read_fn)(const struct i2c_client *client, u8 reg); 468 469 u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; 469 470 int config, device_config; 470 471 int ret; ··· 511 510 "Device mismatch: Configured %s, detected %s\n", 512 511 id->name, mid->name); 513 512 514 - config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG); 513 + if (mid->driver_data == adm1272 || mid->driver_data == adm1278 || 514 + mid->driver_data == adm1293 || mid->driver_data == adm1294) 515 + config_read_fn = i2c_smbus_read_word_data; 516 + else 517 + config_read_fn = i2c_smbus_read_byte_data; 518 + config = config_read_fn(client, ADM1275_PMON_CONFIG); 515 519 if (config < 0) 516 520 return config; 517 521 518 - device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG); 522 + device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG); 519 523 if (device_config < 0) 520 524 return device_config; 521 525
+4 -4
drivers/hwmon/pmbus/pmbus_core.c
··· 1869 1869 struct pmbus_sensor *sensor; 1870 1870 1871 1871 sensor = pmbus_add_sensor(data, "fan", "target", index, page, 1872 - PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN, 1872 + 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN, 1873 1873 false, false, true); 1874 1874 1875 1875 if (!sensor) ··· 1880 1880 return 0; 1881 1881 1882 1882 sensor = pmbus_add_sensor(data, "pwm", NULL, index, page, 1883 - PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM, 1883 + 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM, 1884 1884 false, false, true); 1885 1885 1886 1886 if (!sensor) 1887 1887 return -ENOMEM; 1888 1888 1889 1889 sensor = pmbus_add_sensor(data, "pwm", "enable", index, page, 1890 - PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM, 1890 + 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM, 1891 1891 true, false, false); 1892 1892 1893 1893 if (!sensor) ··· 1929 1929 continue; 1930 1930 1931 1931 if (pmbus_add_sensor(data, "fan", "input", index, 1932 - page, pmbus_fan_registers[f], 0xff, 1932 + page, 0xff, pmbus_fan_registers[f], 1933 1933 PSC_FAN, true, true, true) == NULL) 1934 1934 return -ENOMEM; 1935 1935
+1 -1
drivers/hwmon/scmi-hwmon.c
··· 147 147 [ENERGY] = hwmon_energy, 148 148 }; 149 149 150 - static u32 hwmon_attributes[] = { 150 + static u32 hwmon_attributes[hwmon_max] = { 151 151 [hwmon_chip] = HWMON_C_REGISTER_TZ, 152 152 [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, 153 153 [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
+54 -42
drivers/hwtracing/coresight/coresight-cti.c
··· 747 747 return 0; 748 748 } 749 749 750 + static int cti_pm_setup(struct cti_drvdata *drvdata) 751 + { 752 + int ret; 753 + 754 + if (drvdata->ctidev.cpu == -1) 755 + return 0; 756 + 757 + if (nr_cti_cpu) 758 + goto done; 759 + 760 + cpus_read_lock(); 761 + ret = cpuhp_setup_state_nocalls_cpuslocked( 762 + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, 763 + "arm/coresight_cti:starting", 764 + cti_starting_cpu, cti_dying_cpu); 765 + if (ret) { 766 + cpus_read_unlock(); 767 + return ret; 768 + } 769 + 770 + ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); 771 + cpus_read_unlock(); 772 + if (ret) { 773 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); 774 + return ret; 775 + } 776 + 777 + done: 778 + nr_cti_cpu++; 779 + cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; 780 + 781 + return 0; 782 + } 783 + 750 784 /* release PM registrations */ 751 785 static void cti_pm_release(struct cti_drvdata *drvdata) 752 786 { 753 - if (drvdata->ctidev.cpu >= 0) { 754 - if (--nr_cti_cpu == 0) { 755 - cpu_pm_unregister_notifier(&cti_cpu_pm_nb); 787 + if (drvdata->ctidev.cpu == -1) 788 + return; 756 789 757 - cpuhp_remove_state_nocalls( 758 - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); 759 - } 760 - cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; 790 + cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; 791 + if (--nr_cti_cpu == 0) { 792 + cpu_pm_unregister_notifier(&cti_cpu_pm_nb); 793 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); 761 794 } 762 795 } 763 796 ··· 856 823 857 824 /* driver data*/ 858 825 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 859 - if (!drvdata) { 860 - ret = -ENOMEM; 861 - dev_info(dev, "%s, mem err\n", __func__); 862 - goto err_out; 863 - } 826 + if (!drvdata) 827 + return -ENOMEM; 864 828 865 829 /* Validity for the resource is already checked by the AMBA core */ 866 830 base = devm_ioremap_resource(dev, res); 867 - if (IS_ERR(base)) { 868 - ret = PTR_ERR(base); 869 - dev_err(dev, "%s, remap err\n", __func__); 870 - goto err_out; 871 - } 831 + if (IS_ERR(base)) 832 + return PTR_ERR(base); 833 + 872 834 drvdata->base = base; 873 835 874 836 dev_set_drvdata(dev, drvdata); ··· 882 854 pdata = coresight_cti_get_platform_data(dev); 883 855 if (IS_ERR(pdata)) { 884 856 dev_err(dev, "coresight_cti_get_platform_data err\n"); 885 - ret = PTR_ERR(pdata); 886 - goto err_out; 857 + return PTR_ERR(pdata); 887 858 } 888 859 889 860 /* default to powered - could change on PM notifications */ ··· 894 867 drvdata->ctidev.cpu); 895 868 else 896 869 cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev); 897 - if (!cti_desc.name) { 898 - ret = -ENOMEM; 899 - goto err_out; 900 - } 870 + if (!cti_desc.name) 871 + return -ENOMEM; 901 872 902 873 /* setup CPU power management handling for CPU bound CTI devices. */ 903 - if (drvdata->ctidev.cpu >= 0) { 904 - cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; 905 - if (!nr_cti_cpu++) { 906 - cpus_read_lock(); 907 - ret = cpuhp_setup_state_nocalls_cpuslocked( 908 - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, 909 - "arm/coresight_cti:starting", 910 - cti_starting_cpu, cti_dying_cpu); 911 - 912 - if (!ret) 913 - ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); 914 - cpus_read_unlock(); 915 - if (ret) 916 - goto err_out; 917 - } 918 - } 874 + ret = cti_pm_setup(drvdata); 875 + if (ret) 876 + return ret; 919 877 920 878 /* create dynamic attributes for connections */ 921 879 ret = cti_create_cons_sysfs(dev, drvdata); 922 880 if (ret) { 923 881 dev_err(dev, "%s: create dynamic sysfs entries failed\n", 924 882 cti_desc.name); 925 - goto err_out; 883 + goto pm_release; 926 884 } 927 885 928 886 /* set up coresight component description */ ··· 920 908 drvdata->csdev = coresight_register(&cti_desc); 921 909 if (IS_ERR(drvdata->csdev)) { 922 910 ret = PTR_ERR(drvdata->csdev); 923 - goto err_out; 911 + goto pm_release; 924 912 } 925 913 926 914 /* add to list of CTI devices */ ··· 939 927 dev_info(&drvdata->csdev->dev, "CTI initialized\n"); 940 928 return 0; 941 929 942 - err_out: 930 + pm_release: 943 931 cti_pm_release(drvdata); 944 932 return ret; 945 933 }
+54 -30
drivers/hwtracing/coresight/coresight-etm4x.c
··· 1388 1388 .notifier_call = etm4_cpu_pm_notify, 1389 1389 }; 1390 1390 1391 - static int etm4_cpu_pm_register(void) 1391 + /* Setup PM. Called with cpus locked. Deals with error conditions and counts */ 1392 + static int etm4_pm_setup_cpuslocked(void) 1392 1393 { 1393 - if (IS_ENABLED(CONFIG_CPU_PM)) 1394 - return cpu_pm_register_notifier(&etm4_cpu_pm_nb); 1394 + int ret; 1395 1395 1396 - return 0; 1396 + if (etm4_count++) 1397 + return 0; 1398 + 1399 + ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb); 1400 + if (ret) 1401 + goto reduce_count; 1402 + 1403 + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, 1404 + "arm/coresight4:starting", 1405 + etm4_starting_cpu, etm4_dying_cpu); 1406 + 1407 + if (ret) 1408 + goto unregister_notifier; 1409 + 1410 + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, 1411 + "arm/coresight4:online", 1412 + etm4_online_cpu, NULL); 1413 + 1414 + /* HP dyn state ID returned in ret on success */ 1415 + if (ret > 0) { 1416 + hp_online = ret; 1417 + return 0; 1418 + } 1419 + 1420 + /* failed dyn state - remove others */ 1421 + cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING); 1422 + 1423 + unregister_notifier: 1424 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 1425 + 1426 + reduce_count: 1427 + --etm4_count; 1428 + return ret; 1397 1429 } 1398 1430 1399 - static void etm4_cpu_pm_unregister(void) 1431 + static void etm4_pm_clear(void) 1400 1432 { 1401 - if (IS_ENABLED(CONFIG_CPU_PM)) 1402 - cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 1433 + if (--etm4_count != 0) 1434 + return; 1435 + 1436 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 1437 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 1438 + if (hp_online) { 1439 + cpuhp_remove_state_nocalls(hp_online); 1440 + hp_online = 0; 1441 + } 1403 1442 } 1404 1443 1405 1444 static int etm4_probe(struct amba_device *adev, const struct amba_id *id) ··· 1492 1453 etm4_init_arch_data, drvdata, 1)) 1493 1454 dev_err(dev, "ETM arch init failed\n"); 1494 1455 1495 - if (!etm4_count++) { 1496 - cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, 1497 - "arm/coresight4:starting", 1498 - etm4_starting_cpu, etm4_dying_cpu); 1499 - ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, 1500 - "arm/coresight4:online", 1501 - etm4_online_cpu, NULL); 1502 - if (ret < 0) 1503 - goto err_arch_supported; 1504 - hp_online = ret; 1505 - 1506 - ret = etm4_cpu_pm_register(); 1507 - if (ret) 1508 - goto err_arch_supported; 1509 - } 1510 - 1456 + ret = etm4_pm_setup_cpuslocked(); 1511 1457 cpus_read_unlock(); 1458 + 1459 + /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */ 1460 + if (ret) { 1461 + etmdrvdata[drvdata->cpu] = NULL; 1462 + return ret; 1463 + } 1512 1464 1513 1465 if (etm4_arch_supported(drvdata->arch) == false) { 1514 1466 ret = -EINVAL; ··· 1547 1517 1548 1518 err_arch_supported: 1549 1519 etmdrvdata[drvdata->cpu] = NULL; 1550 - if (--etm4_count == 0) { 1551 - etm4_cpu_pm_unregister(); 1552 - 1553 - cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 1554 - if (hp_online) 1555 - cpuhp_remove_state_nocalls(hp_online); 1556 - } 1520 + etm4_pm_clear(); 1557 1521 return ret; 1558 1522 } 1559 1523
+18 -3
drivers/hwtracing/intel_th/core.c
··· 1021 1021 { 1022 1022 struct intel_th_device *hub = to_intel_th_hub(thdev); 1023 1023 struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); 1024 + int ret; 1024 1025 1025 1026 /* In host mode, this is up to the external debugger, do nothing. */ 1026 1027 if (hub->host_mode) 1027 1028 return 0; 1028 1029 1029 - if (!hubdrv->set_output) 1030 - return -ENOTSUPP; 1030 + /* 1031 + * hub is instantiated together with the source device that 1032 + * calls here, so guaranteed to be present. 1033 + */ 1034 + hubdrv = to_intel_th_driver(hub->dev.driver); 1035 + if (!hubdrv || !try_module_get(hubdrv->driver.owner)) 1036 + return -EINVAL; 1031 1037 1032 - return hubdrv->set_output(hub, master); 1038 + if (!hubdrv->set_output) { 1039 + ret = -ENOTSUPP; 1040 + goto out; 1041 + } 1042 + 1043 + ret = hubdrv->set_output(hub, master); 1044 + 1045 + out: 1046 + module_put(hubdrv->driver.owner); 1047 + return ret; 1033 1048 } 1034 1049 EXPORT_SYMBOL_GPL(intel_th_set_output); 1035 1050
+15
drivers/hwtracing/intel_th/pci.c
··· 234 234 .driver_data = (kernel_ulong_t)&intel_th_2x, 235 235 }, 236 236 { 237 + /* Tiger Lake PCH-H */ 238 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6), 239 + .driver_data = (kernel_ulong_t)&intel_th_2x, 240 + }, 241 + { 237 242 /* Jasper Lake PCH */ 238 243 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), 244 + .driver_data = (kernel_ulong_t)&intel_th_2x, 245 + }, 246 + { 247 + /* Jasper Lake CPU */ 248 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29), 239 249 .driver_data = (kernel_ulong_t)&intel_th_2x, 240 250 }, 241 251 { ··· 256 246 { 257 247 /* Elkhart Lake */ 258 248 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), 249 + .driver_data = (kernel_ulong_t)&intel_th_2x, 250 + }, 251 + { 252 + /* Emmitsburg PCH */ 253 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc), 259 254 .driver_data = (kernel_ulong_t)&intel_th_2x, 260 255 }, 261 256 { 0 },
+1 -3
drivers/hwtracing/intel_th/sth.c
··· 161 161 { 162 162 struct sth_device *sth = container_of(stm_data, struct sth_device, stm); 163 163 164 - intel_th_set_output(to_intel_th_device(sth->dev), master); 165 - 166 - return 0; 164 + return intel_th_set_output(to_intel_th_device(sth->dev), master); 167 165 } 168 166 169 167 static int intel_th_sw_init(struct sth_device *sth)
+7
drivers/i2c/Kconfig
··· 113 113 114 114 config I2C_SLAVE 115 115 bool "I2C slave support" 116 + help 117 + This enables Linux to act as an I2C slave device. Note that your I2C 118 + bus master driver also needs to support this functionality. Please 119 + read Documentation/i2c/slave-interface.rst for further details. 116 120 117 121 if I2C_SLAVE 118 122 119 123 config I2C_SLAVE_EEPROM 120 124 tristate "I2C eeprom slave driver" 125 + help 126 + This backend makes Linux behave like an I2C EEPROM. Please read 127 + Documentation/i2c/slave-eeprom-backend.rst for further details. 121 128 122 129 endif 123 130
+2 -1
drivers/i2c/algos/i2c-algo-pca.c
··· 314 314 DEB2("BUS ERROR - SDA Stuck low\n"); 315 315 pca_reset(adap); 316 316 goto out; 317 - case 0x90: /* Bus error - SCL stuck low */ 317 + case 0x78: /* Bus error - SCL stuck low (PCA9665) */ 318 + case 0x90: /* Bus error - SCL stuck low (PCA9564) */ 318 319 DEB2("BUS ERROR - SCL Stuck low\n"); 319 320 pca_reset(adap); 320 321 goto out;
+12 -16
drivers/i2c/busses/i2c-cadence.c
··· 421 421 /* Read data if receive data valid is set */ 422 422 while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & 423 423 CDNS_I2C_SR_RXDV) { 424 - /* 425 - * Clear hold bit that was set for FIFO control if 426 - * RX data left is less than FIFO depth, unless 427 - * repeated start is selected. 428 - */ 429 - if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) && 430 - !id->bus_hold_flag) 431 - cdns_i2c_clear_bus_hold(id); 432 - 433 424 if (id->recv_count > 0) { 434 425 *(id->p_recv_buf)++ = 435 426 cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); 436 427 id->recv_count--; 437 428 id->curr_recv_count--; 429 + 430 + /* 431 + * Clear hold bit that was set for FIFO control 432 + * if RX data left is less than or equal to 433 + * FIFO DEPTH unless repeated start is selected 434 + */ 435 + if (id->recv_count <= CDNS_I2C_FIFO_DEPTH && 436 + !id->bus_hold_flag) 437 + cdns_i2c_clear_bus_hold(id); 438 + 438 439 } else { 439 440 dev_err(id->adap.dev.parent, 440 441 "xfer_size reg rollover. xfer aborted!\n"); ··· 595 594 * Check for the message size against FIFO depth and set the 596 595 * 'hold bus' bit if it is greater than FIFO depth. 597 596 */ 598 - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) 597 + if (id->recv_count > CDNS_I2C_FIFO_DEPTH) 599 598 ctrl_reg |= CDNS_I2C_CR_HOLD; 600 - else 601 - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; 602 599 603 600 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 604 601 ··· 653 654 * Check for the message size against FIFO depth and set the 654 655 * 'hold bus' bit if it is greater than FIFO depth. 655 656 */ 656 - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) 657 + if (id->send_count > CDNS_I2C_FIFO_DEPTH) 657 658 ctrl_reg |= CDNS_I2C_CR_HOLD; 658 - else 659 - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; 660 - 661 659 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 662 660 663 661 /* Clear the interrupts in interrupt status register. */
+14 -1
drivers/i2c/busses/i2c-designware-platdrv.c
··· 12 12 #include <linux/clk-provider.h> 13 13 #include <linux/clk.h> 14 14 #include <linux/delay.h> 15 + #include <linux/dmi.h> 15 16 #include <linux/err.h> 16 17 #include <linux/errno.h> 17 18 #include <linux/i2c.h> ··· 192 191 return ret; 193 192 } 194 193 194 + static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = { 195 + { 196 + .ident = "Qtechnology QT5222", 197 + .matches = { 198 + DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"), 199 + DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"), 200 + }, 201 + }, 202 + { } /* terminate list */ 203 + }; 204 + 195 205 static int dw_i2c_plat_probe(struct platform_device *pdev) 196 206 { 197 207 struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev); ··· 279 267 280 268 adap = &dev->adapter; 281 269 adap->owner = THIS_MODULE; 282 - adap->class = I2C_CLASS_DEPRECATED; 270 + adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ? 271 + I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; 283 272 ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); 284 273 adap->dev.of_node = pdev->dev.of_node; 285 274 adap->nr = -1;
+1
drivers/i2c/busses/i2c-eg20t.c
··· 180 180 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, 181 181 {0,} 182 182 }; 183 + MODULE_DEVICE_TABLE(pci, pch_pcidev_id); 183 184 184 185 static irqreturn_t pch_i2c_handler(int irq, void *pData); 185 186
+2 -2
drivers/i2c/busses/i2c-mlxcpld.c
··· 337 337 if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) { 338 338 mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG, 339 339 &datalen, 1); 340 - if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) { 340 + if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) { 341 341 dev_err(priv->dev, "Incorrect smbus block read message len\n"); 342 - return -E2BIG; 342 + return -EPROTO; 343 343 } 344 344 } else { 345 345 datalen = priv->xfer.data_len;
+4 -2
drivers/i2c/busses/i2c-qcom-geni.c
··· 367 367 geni_se_select_mode(se, GENI_SE_FIFO); 368 368 369 369 writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN); 370 - geni_se_setup_m_cmd(se, I2C_READ, m_param); 371 370 372 371 if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) { 373 372 geni_se_select_mode(se, GENI_SE_FIFO); 374 373 i2c_put_dma_safe_msg_buf(dma_buf, msg, false); 375 374 dma_buf = NULL; 376 375 } 376 + 377 + geni_se_setup_m_cmd(se, I2C_READ, m_param); 377 378 378 379 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); 379 380 if (!time_left) ··· 409 408 geni_se_select_mode(se, GENI_SE_FIFO); 410 409 411 410 writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN); 412 - geni_se_setup_m_cmd(se, I2C_WRITE, m_param); 413 411 414 412 if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) { 415 413 geni_se_select_mode(se, GENI_SE_FIFO); 416 414 i2c_put_dma_safe_msg_buf(dma_buf, msg, false); 417 415 dma_buf = NULL; 418 416 } 417 + 418 + geni_se_setup_m_cmd(se, I2C_WRITE, m_param); 419 419 420 420 if (!dma_buf) /* Get FIFO IRQ */ 421 421 writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
+3
drivers/i2c/busses/i2c-rcar.c
··· 869 869 /* disable irqs and ensure none is running before clearing ptr */ 870 870 rcar_i2c_write(priv, ICSIER, 0); 871 871 rcar_i2c_write(priv, ICSCR, 0); 872 + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ 872 873 873 874 synchronize_irq(priv->irq); 874 875 priv->slave = NULL; ··· 970 969 ret = rcar_i2c_clock_calculate(priv); 971 970 if (ret < 0) 972 971 goto out_pm_put; 972 + 973 + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ 973 974 974 975 if (priv->devtype == I2C_RCAR_GEN3) { 975 976 priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+4 -1
drivers/iio/accel/mma8452.c
··· 1685 1685 1686 1686 ret = mma8452_set_freefall_mode(data, false); 1687 1687 if (ret < 0) 1688 - goto buffer_cleanup; 1688 + goto unregister_device; 1689 1689 1690 1690 return 0; 1691 + 1692 + unregister_device: 1693 + iio_device_unregister(indio_dev); 1691 1694 1692 1695 buffer_cleanup: 1693 1696 iio_triggered_buffer_cleanup(indio_dev);
+1 -1
drivers/iio/adc/ad7780.c
··· 329 329 330 330 ret = ad7780_init_gpios(&spi->dev, st); 331 331 if (ret) 332 - goto error_cleanup_buffer_and_trigger; 332 + return ret; 333 333 334 334 st->reg = devm_regulator_get(&spi->dev, "avdd"); 335 335 if (IS_ERR(st->reg))
+2 -2
drivers/iio/adc/adi-axi-adc.c
··· 332 332 if (cl->dev->of_node != cln) 333 333 continue; 334 334 335 - if (!try_module_get(dev->driver->owner)) { 335 + if (!try_module_get(cl->dev->driver->owner)) { 336 336 mutex_unlock(&registered_clients_lock); 337 337 return ERR_PTR(-ENODEV); 338 338 } 339 339 340 - get_device(dev); 340 + get_device(cl->dev); 341 341 cl->info = info; 342 342 mutex_unlock(&registered_clients_lock); 343 343 return cl;
+6 -3
drivers/iio/health/afe4403.c
··· 65 65 * @regulator: Pointer to the regulator for the IC 66 66 * @trig: IIO trigger for this device 67 67 * @irq: ADC_RDY line interrupt number 68 + * @buffer: Used to construct data layout to push into IIO buffer. 68 69 */ 69 70 struct afe4403_data { 70 71 struct device *dev; ··· 75 74 struct regulator *regulator; 76 75 struct iio_trigger *trig; 77 76 int irq; 77 + /* Ensure suitable alignment for timestamp */ 78 + s32 buffer[8] __aligned(8); 78 79 }; 79 80 80 81 enum afe4403_chan_id { ··· 312 309 struct iio_dev *indio_dev = pf->indio_dev; 313 310 struct afe4403_data *afe = iio_priv(indio_dev); 314 311 int ret, bit, i = 0; 315 - s32 buffer[8]; 316 312 u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; 317 313 u8 rx[3]; 318 314 ··· 328 326 if (ret) 329 327 goto err; 330 328 331 - buffer[i++] = get_unaligned_be24(&rx[0]); 329 + afe->buffer[i++] = get_unaligned_be24(&rx[0]); 332 330 } 333 331 334 332 /* Disable reading from the device */ ··· 337 335 if (ret) 338 336 goto err; 339 337 340 - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); 338 + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, 339 + pf->timestamp); 341 340 err: 342 341 iio_trigger_notify_done(indio_dev->trig); 343 342
+5 -3
drivers/iio/health/afe4404.c
··· 83 83 * @regulator: Pointer to the regulator for the IC 84 84 * @trig: IIO trigger for this device 85 85 * @irq: ADC_RDY line interrupt number 86 + * @buffer: Used to construct a scan to push to the iio buffer. 86 87 */ 87 88 struct afe4404_data { 88 89 struct device *dev; ··· 92 91 struct regulator *regulator; 93 92 struct iio_trigger *trig; 94 93 int irq; 94 + s32 buffer[10] __aligned(8); 95 95 }; 96 96 97 97 enum afe4404_chan_id { ··· 330 328 struct iio_dev *indio_dev = pf->indio_dev; 331 329 struct afe4404_data *afe = iio_priv(indio_dev); 332 330 int ret, bit, i = 0; 333 - s32 buffer[10]; 334 331 335 332 for_each_set_bit(bit, indio_dev->active_scan_mask, 336 333 indio_dev->masklength) { 337 334 ret = regmap_read(afe->regmap, afe4404_channel_values[bit], 338 - &buffer[i++]); 335 + &afe->buffer[i++]); 339 336 if (ret) 340 337 goto err; 341 338 } 342 339 343 - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); 340 + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, 341 + pf->timestamp); 344 342 err: 345 343 iio_trigger_notify_done(indio_dev->trig); 346 344
+7 -3
drivers/iio/humidity/hdc100x.c
··· 38 38 39 39 /* integration time of the sensor */ 40 40 int adc_int_us[2]; 41 + /* Ensure natural alignment of timestamp */ 42 + struct { 43 + __be16 channels[2]; 44 + s64 ts __aligned(8); 45 + } scan; 41 46 }; 42 47 43 48 /* integration time in us */ ··· 327 322 struct i2c_client *client = data->client; 328 323 int delay = data->adc_int_us[0] + data->adc_int_us[1]; 329 324 int ret; 330 - s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */ 331 325 332 326 /* dual read starts at temp register */ 333 327 mutex_lock(&data->lock); ··· 337 333 } 338 334 usleep_range(delay, delay + 1000); 339 335 340 - ret = i2c_master_recv(client, (u8 *)buf, 4); 336 + ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4); 341 337 if (ret < 0) { 342 338 dev_err(&client->dev, "cannot read sensor data\n"); 343 339 goto err; 344 340 } 345 341 346 - iio_push_to_buffers_with_timestamp(indio_dev, buf, 342 + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, 347 343 iio_get_time_ns(indio_dev)); 348 344 err: 349 345 mutex_unlock(&data->lock);
+5 -2
drivers/iio/humidity/hts221.h
··· 14 14 15 15 #include <linux/iio/iio.h> 16 16 17 - #define HTS221_DATA_SIZE 2 18 - 19 17 enum hts221_sensor_type { 20 18 HTS221_SENSOR_H, 21 19 HTS221_SENSOR_T, ··· 37 39 38 40 bool enabled; 39 41 u8 odr; 42 + /* Ensure natural alignment of timestamp */ 43 + struct { 44 + __le16 channels[2]; 45 + s64 ts __aligned(8); 46 + } scan; 40 47 }; 41 48 42 49 extern const struct dev_pm_ops hts221_pm_ops;
+5 -4
drivers/iio/humidity/hts221_buffer.c
··· 160 160 161 161 static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) 162 162 { 163 - u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)]; 164 163 struct iio_poll_func *pf = p; 165 164 struct iio_dev *iio_dev = pf->indio_dev; 166 165 struct hts221_hw *hw = iio_priv(iio_dev); ··· 169 170 /* humidity data */ 170 171 ch = &iio_dev->channels[HTS221_SENSOR_H]; 171 172 err = regmap_bulk_read(hw->regmap, ch->address, 172 - buffer, HTS221_DATA_SIZE); 173 + &hw->scan.channels[0], 174 + sizeof(hw->scan.channels[0])); 173 175 if (err < 0) 174 176 goto out; 175 177 176 178 /* temperature data */ 177 179 ch = &iio_dev->channels[HTS221_SENSOR_T]; 178 180 err = regmap_bulk_read(hw->regmap, ch->address, 179 - buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE); 181 + &hw->scan.channels[1], 182 + sizeof(hw->scan.channels[1])); 180 183 if (err < 0) 181 184 goto out; 182 185 183 - iio_push_to_buffers_with_timestamp(iio_dev, buffer, 186 + iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, 184 187 iio_get_time_ns(iio_dev)); 185 188 186 189 out:
+2
drivers/iio/industrialio-core.c
··· 130 130 [IIO_MOD_PM2P5] = "pm2p5", 131 131 [IIO_MOD_PM4] = "pm4", 132 132 [IIO_MOD_PM10] = "pm10", 133 + [IIO_MOD_ETHANOL] = "ethanol", 134 + [IIO_MOD_H2] = "h2", 133 135 }; 134 136 135 137 /* relies on pairs of these shared then separate */
+17 -12
drivers/iio/magnetometer/ak8974.c
··· 192 192 bool drdy_irq; 193 193 struct completion drdy_complete; 194 194 bool drdy_active_low; 195 + /* Ensure timestamp is naturally aligned */ 196 + struct { 197 + __le16 channels[3]; 198 + s64 ts __aligned(8); 199 + } scan; 195 200 }; 196 201 197 202 static const char ak8974_reg_avdd[] = "avdd"; ··· 662 657 { 663 658 struct ak8974 *ak8974 = iio_priv(indio_dev); 664 659 int ret; 665 - __le16 hw_values[8]; /* Three axes + 64bit padding */ 666 660 667 661 pm_runtime_get_sync(&ak8974->i2c->dev); 668 662 mutex_lock(&ak8974->lock); ··· 671 667 dev_err(&ak8974->i2c->dev, "error triggering measure\n"); 672 668 goto out_unlock; 673 669 } 674 - ret = ak8974_getresult(ak8974, hw_values); 670 + ret = ak8974_getresult(ak8974, ak8974->scan.channels); 675 671 if (ret) { 676 672 dev_err(&ak8974->i2c->dev, "error getting measures\n"); 677 673 goto out_unlock; 678 674 } 679 675 680 - iio_push_to_buffers_with_timestamp(indio_dev, hw_values, 676 + iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan, 681 677 iio_get_time_ns(indio_dev)); 682 678 683 679 out_unlock: ··· 866 862 ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); 867 863 if (IS_ERR(ak8974->map)) { 868 864 dev_err(&i2c->dev, "failed to allocate register map\n"); 865 + pm_runtime_put_noidle(&i2c->dev); 866 + pm_runtime_disable(&i2c->dev); 869 867 return PTR_ERR(ak8974->map); 870 868 } 871 869 872 870 ret = ak8974_set_power(ak8974, AK8974_PWR_ON); 873 871 if (ret) { 874 872 dev_err(&i2c->dev, "could not power on\n"); 875 - goto power_off; 873 + goto disable_pm; 876 874 } 877 875 878 876 ret = ak8974_detect(ak8974); 879 877 if (ret) { 880 878 dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n"); 881 - goto power_off; 879 + goto disable_pm; 882 880 } 883 881 884 882 ret = ak8974_selftest(ak8974); ··· 890 884 ret = ak8974_reset(ak8974); 891 885 if (ret) { 892 886 dev_err(&i2c->dev, "AK8974 reset failed\n"); 893 - goto power_off; 887 + goto disable_pm; 894 888 } 895 - 896 - pm_runtime_set_autosuspend_delay(&i2c->dev, 897 - AK8974_AUTOSUSPEND_DELAY); 898 - pm_runtime_use_autosuspend(&i2c->dev); 899 - pm_runtime_put(&i2c->dev); 900 889 901 890 indio_dev->dev.parent = &i2c->dev; 902 891 switch (ak8974->variant) { ··· 958 957 goto cleanup_buffer; 959 958 } 960 959 960 + pm_runtime_set_autosuspend_delay(&i2c->dev, 961 + AK8974_AUTOSUSPEND_DELAY); 962 + pm_runtime_use_autosuspend(&i2c->dev); 963 + pm_runtime_put(&i2c->dev); 964 + 961 965 return 0; 962 966 963 967 cleanup_buffer: ··· 971 965 pm_runtime_put_noidle(&i2c->dev); 972 966 pm_runtime_disable(&i2c->dev); 973 967 ak8974_set_power(ak8974, AK8974_PWR_OFF); 974 - power_off: 975 968 regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs); 976 969 977 970 return ret;
+8 -3
drivers/iio/pressure/ms5611_core.c
··· 212 212 struct iio_poll_func *pf = p; 213 213 struct iio_dev *indio_dev = pf->indio_dev; 214 214 struct ms5611_state *st = iio_priv(indio_dev); 215 - s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ 215 + /* Ensure buffer elements are naturally aligned */ 216 + struct { 217 + s32 channels[2]; 218 + s64 ts __aligned(8); 219 + } scan; 216 220 int ret; 217 221 218 222 mutex_lock(&st->lock); 219 - ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); 223 + ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1], 224 + &scan.channels[0]); 220 225 mutex_unlock(&st->lock); 221 226 if (ret < 0) 222 227 goto err; 223 228 224 - iio_push_to_buffers_with_timestamp(indio_dev, buf, 229 + iio_push_to_buffers_with_timestamp(indio_dev, &scan, 225 230 iio_get_time_ns(indio_dev)); 226 231 227 232 err:
+3 -1
drivers/iio/pressure/zpa2326.c
··· 665 665 int err; 666 666 667 667 err = pm_runtime_get_sync(indio_dev->dev.parent); 668 - if (err < 0) 668 + if (err < 0) { 669 + pm_runtime_put(indio_dev->dev.parent); 669 670 return err; 671 + } 670 672 671 673 if (err > 0) { 672 674 /*
+2
drivers/infiniband/core/cm.c
··· 3676 3676 return ret; 3677 3677 } 3678 3678 cm_id_priv->id.state = IB_CM_IDLE; 3679 + spin_lock_irq(&cm.lock); 3679 3680 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { 3680 3681 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3681 3682 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); 3682 3683 } 3684 + spin_unlock_irq(&cm.lock); 3683 3685 return 0; 3684 3686 } 3685 3687
+3 -3
drivers/infiniband/core/rdma_core.c
··· 649 649 { 650 650 struct ib_uverbs_file *ufile = attrs->ufile; 651 651 652 - /* alloc_commit consumes the uobj kref */ 653 - uobj->uapi_object->type_class->alloc_commit(uobj); 654 - 655 652 /* kref is held so long as the uobj is on the uobj list. */ 656 653 uverbs_uobject_get(uobj); 657 654 spin_lock_irq(&ufile->uobjects_lock); ··· 657 660 658 661 /* matches atomic_set(-1) in alloc_uobj */ 659 662 atomic_set(&uobj->usecnt, 0); 663 + 664 + /* alloc_commit consumes the uobj kref */ 665 + uobj->uapi_object->type_class->alloc_commit(uobj); 660 666 661 667 /* Matches the down_read in rdma_alloc_begin_uobject */ 662 668 up_read(&ufile->hw_destroy_rwsem);
+18 -22
drivers/infiniband/core/sa_query.c
··· 829 829 return len; 830 830 } 831 831 832 - static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) 832 + static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 833 833 { 834 834 struct sk_buff *skb = NULL; 835 835 struct nlmsghdr *nlh; 836 836 void *data; 837 837 struct ib_sa_mad *mad; 838 838 int len; 839 + unsigned long flags; 840 + unsigned long delay; 841 + gfp_t gfp_flag; 842 + int ret; 843 + 844 + INIT_LIST_HEAD(&query->list); 845 + query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 839 846 840 847 mad = query->mad_buf->mad; 841 848 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); ··· 867 860 /* Repair the nlmsg header length */ 868 861 nlmsg_end(skb, nlh); 869 862 870 - return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); 871 - } 863 + gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : 864 + GFP_NOWAIT; 872 865 873 - static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 874 - { 875 - unsigned long flags; 876 - unsigned long delay; 877 - int ret; 878 - 879 - INIT_LIST_HEAD(&query->list); 880 - query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 881 - 882 - /* Put the request on the list first.*/ 883 866 spin_lock_irqsave(&ib_nl_request_lock, flags); 867 + ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); 868 + 869 + if (ret) 870 + goto out; 871 + 872 + /* Put the request on the list.*/ 884 873 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 885 874 query->timeout = delay + jiffies; 886 875 list_add_tail(&query->list, &ib_nl_request_list); 887 876 /* Start the timeout if this is the only request */ 888 877 if (ib_nl_request_list.next == &query->list) 889 878 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 890 - spin_unlock_irqrestore(&ib_nl_request_lock, flags); 891 879 892 - ret = ib_nl_send_msg(query, gfp_mask); 893 - if (ret) { 894 - ret = -EIO; 895 - /* Remove the request */ 896 - spin_lock_irqsave(&ib_nl_request_lock, flags); 897 - list_del(&query->list); 898 - spin_unlock_irqrestore(&ib_nl_request_lock, flags); 899 - } 880 + out: 881 + spin_unlock_irqrestore(&ib_nl_request_lock, flags); 900 882 901 883 return ret; 902 884 }
+28 -9
drivers/infiniband/hw/hfi1/init.c
··· 831 831 } 832 832 833 833 /** 834 + * destroy_workqueues - destroy per port workqueues 835 + * @dd: the hfi1_ib device 836 + */ 837 + static void destroy_workqueues(struct hfi1_devdata *dd) 838 + { 839 + int pidx; 840 + struct hfi1_pportdata *ppd; 841 + 842 + for (pidx = 0; pidx < dd->num_pports; ++pidx) { 843 + ppd = dd->pport + pidx; 844 + 845 + if (ppd->hfi1_wq) { 846 + destroy_workqueue(ppd->hfi1_wq); 847 + ppd->hfi1_wq = NULL; 848 + } 849 + if (ppd->link_wq) { 850 + destroy_workqueue(ppd->link_wq); 851 + ppd->link_wq = NULL; 852 + } 853 + } 854 + } 855 + 856 + /** 834 857 * enable_general_intr() - Enable the IRQs that will be handled by the 835 858 * general interrupt handler. 836 859 * @dd: valid devdata ··· 1126 1103 * We can't count on interrupts since we are stopping. 1127 1104 */ 1128 1105 hfi1_quiet_serdes(ppd); 1129 - 1130 - if (ppd->hfi1_wq) { 1131 - destroy_workqueue(ppd->hfi1_wq); 1132 - ppd->hfi1_wq = NULL; 1133 - } 1134 - if (ppd->link_wq) { 1135 - destroy_workqueue(ppd->link_wq); 1136 - ppd->link_wq = NULL; 1137 - } 1106 + if (ppd->hfi1_wq) 1107 + flush_workqueue(ppd->hfi1_wq); 1108 + if (ppd->link_wq) 1109 + flush_workqueue(ppd->link_wq); 1138 1110 } 1139 1111 sdma_exit(dd); 1140 1112 } ··· 1774 1756 * clear dma engines, etc. 1775 1757 */ 1776 1758 shutdown_device(dd); 1759 + destroy_workqueues(dd); 1777 1760 1778 1761 stop_timers(dd); 1779 1762
+5 -2
drivers/infiniband/hw/hfi1/qp.c
··· 195 195 { 196 196 /* Constraining 10KB packets to 8KB packets */ 197 197 if (mtu == (enum ib_mtu)OPA_MTU_10240) 198 - mtu = OPA_MTU_8192; 198 + mtu = (enum ib_mtu)OPA_MTU_8192; 199 199 return opa_mtu_enum_to_int((enum opa_mtu)mtu); 200 200 } 201 201 ··· 367 367 struct hfi1_ibport *ibp = 368 368 to_iport(qp->ibqp.device, qp->port_num); 369 369 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 370 - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 370 + struct hfi1_devdata *dd = ppd->dd; 371 + 372 + if (dd->flags & HFI1_SHUTDOWN) 373 + return true; 371 374 372 375 return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, 373 376 priv->s_sde ?
+4 -1
drivers/infiniband/hw/hfi1/tid_rdma.c
··· 5406 5406 struct hfi1_ibport *ibp = 5407 5407 to_iport(qp->ibqp.device, qp->port_num); 5408 5408 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 5409 - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 5409 + struct hfi1_devdata *dd = ppd->dd; 5410 + 5411 + if ((dd->flags & HFI1_SHUTDOWN)) 5412 + return true; 5410 5413 5411 5414 return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, 5412 5415 priv->s_sde ?
+22 -12
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 3954 3954 return 0; 3955 3955 } 3956 3956 3957 + static inline enum ib_mtu get_mtu(struct ib_qp *ibqp, 3958 + const struct ib_qp_attr *attr) 3959 + { 3960 + if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) 3961 + return IB_MTU_4096; 3962 + 3963 + return attr->path_mtu; 3964 + } 3965 + 3957 3966 static int modify_qp_init_to_rtr(struct ib_qp *ibqp, 3958 3967 const struct ib_qp_attr *attr, int attr_mask, 3959 3968 struct hns_roce_v2_qp_context *context, ··· 3974 3965 struct ib_device *ibdev = &hr_dev->ib_dev; 3975 3966 dma_addr_t trrl_ba; 3976 3967 dma_addr_t irrl_ba; 3968 + enum ib_mtu mtu; 3977 3969 u8 port_num; 3978 3970 u64 *mtts; 3979 3971 u8 *dmac; ··· 4072 4062 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, 4073 4063 V2_QPC_BYTE_52_DMAC_S, 0); 4074 4064 4075 - /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */ 4065 + mtu = get_mtu(ibqp, attr); 4066 + 4067 + if (attr_mask & IB_QP_PATH_MTU) { 4068 + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, 4069 + V2_QPC_BYTE_24_MTU_S, mtu); 4070 + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, 4071 + V2_QPC_BYTE_24_MTU_S, 0); 4072 + } 4073 + 4074 + #define MAX_LP_MSG_LEN 65536 4075 + /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */ 4076 4076 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, 4077 4077 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4078 - ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096)); 4078 + ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu))); 4079 4079 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, 4080 4080 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); 4081 - 4082 - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) 4083 - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, 4084 - V2_QPC_BYTE_24_MTU_S, IB_MTU_4096); 4085 - else if (attr_mask & IB_QP_PATH_MTU) 4086 - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, 4087 - V2_QPC_BYTE_24_MTU_S, attr->path_mtu); 4088 - 4089 - roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, 4090 - V2_QPC_BYTE_24_MTU_S, 0); 4091 4081 4092 4082 roce_set_bit(qpc_mask->byte_108_rx_reqepsn, 4093 4083 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+1 -1
drivers/infiniband/hw/hns/hns_roce_mr.c
··· 120 120 121 121 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num; 122 122 buf_attr.page_shift = is_fast ? PAGE_SHIFT : 123 - hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT; 123 + hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT; 124 124 buf_attr.region[0].size = length; 125 125 buf_attr.region[0].hopnum = mr->pbl_hop_num; 126 126 buf_attr.region_count = 1;
+1 -1
drivers/infiniband/hw/mlx5/main.c
··· 511 511 mdev_port_num); 512 512 if (err) 513 513 goto out; 514 - ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); 514 + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); 515 515 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); 516 516 517 517 props->active_width = IB_WIDTH_4X;
+19 -3
drivers/infiniband/hw/mlx5/odp.c
··· 601 601 */ 602 602 synchronize_srcu(&dev->odp_srcu); 603 603 604 + /* 605 + * All work on the prefetch list must be completed, xa_erase() prevented 606 + * new work from being created. 607 + */ 608 + wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); 609 + 610 + /* 611 + * At this point it is forbidden for any other thread to enter 612 + * pagefault_mr() on this imr. It is already forbidden to call 613 + * pagefault_mr() on an implicit child. Due to this additions to 614 + * implicit_children are prevented. 615 + */ 616 + 617 + /* 618 + * Block destroy_unused_implicit_child_mr() from incrementing 619 + * num_deferred_work. 620 + */ 604 621 xa_lock(&imr->implicit_children); 605 622 xa_for_each (&imr->implicit_children, idx, mtt) { 606 623 __xa_erase(&imr->implicit_children, idx); ··· 626 609 xa_unlock(&imr->implicit_children); 627 610 628 611 /* 629 - * num_deferred_work can only be incremented inside the odp_srcu, or 630 - * under xa_lock while the child is in the xarray. Thus at this point 631 - * it is only decreasing, and all work holding it is now on the wq. 612 + * Wait for any concurrent destroy_unused_implicit_child_mr() to 613 + * complete. 632 614 */ 633 615 wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); 634 616
+6 -1
drivers/infiniband/hw/mlx5/qp.c
··· 2668 2668 if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) 2669 2669 return (create_flags) ? -EINVAL : 0; 2670 2670 2671 + process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP, 2672 + mlx5_get_flow_namespace(dev->mdev, 2673 + MLX5_FLOW_NAMESPACE_BYPASS), 2674 + qp); 2671 2675 process_create_flag(dev, &create_flags, 2672 2676 IB_QP_CREATE_INTEGRITY_EN, 2673 2677 MLX5_CAP_GEN(mdev, sho), qp); ··· 3005 3001 mlx5_ib_destroy_dct(qp); 3006 3002 } else { 3007 3003 /* 3008 - * The two lines below are temp solution till QP allocation 3004 + * These lines below are temp solution till QP allocation 3009 3005 * will be moved to be under IB/core responsiblity. 3010 3006 */ 3011 3007 qp->ibqp.send_cq = attr->send_cq; 3012 3008 qp->ibqp.recv_cq = attr->recv_cq; 3009 + qp->ibqp.pd = pd; 3013 3010 destroy_qp_common(dev, qp, udata); 3014 3011 } 3015 3012
+2 -2
drivers/infiniband/hw/mlx5/srq_cmd.c
··· 83 83 struct mlx5_srq_table *table = &dev->srq_table; 84 84 struct mlx5_core_srq *srq; 85 85 86 - xa_lock(&table->array); 86 + xa_lock_irq(&table->array); 87 87 srq = xa_load(&table->array, srqn); 88 88 if (srq) 89 89 refcount_inc(&srq->common.refcount); 90 - xa_unlock(&table->array); 90 + xa_unlock_irq(&table->array); 91 91 92 92 return srq; 93 93 }
+2 -1
drivers/infiniband/sw/siw/siw_main.c
··· 67 67 static int dev_id = 1; 68 68 int rv; 69 69 70 + sdev->vendor_part_id = dev_id++; 71 + 70 72 rv = ib_register_device(base_dev, name); 71 73 if (rv) { 72 74 pr_warn("siw: device registration error %d\n", rv); 73 75 return rv; 74 76 } 75 - sdev->vendor_part_id = dev_id++; 76 77 77 78 siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr); 78 79
+5 -4
drivers/input/mouse/elan_i2c_core.c
··· 951 951 u8 hover_info = packet[ETP_HOVER_INFO_OFFSET]; 952 952 bool contact_valid, hover_event; 953 953 954 + pm_wakeup_event(&data->client->dev, 0); 955 + 954 956 hover_event = hover_info & 0x40; 955 957 for (i = 0; i < ETP_MAX_FINGERS; i++) { 956 958 contact_valid = tp_info & (1U << (3 + i)); ··· 975 973 struct input_dev *input = data->tp_input; 976 974 u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1]; 977 975 int x, y; 976 + 977 + pm_wakeup_event(&data->client->dev, 0); 978 978 979 979 if (!data->tp_input) { 980 980 dev_warn_once(&data->client->dev, ··· 1002 998 static irqreturn_t elan_isr(int irq, void *dev_id) 1003 999 { 1004 1000 struct elan_tp_data *data = dev_id; 1005 - struct device *dev = &data->client->dev; 1006 1001 int error; 1007 1002 u8 report[ETP_MAX_REPORT_LEN]; 1008 1003 ··· 1019 1016 if (error) 1020 1017 goto out; 1021 1018 1022 - pm_wakeup_event(dev, 0); 1023 - 1024 1019 switch (report[ETP_REPORT_ID_OFFSET]) { 1025 1020 case ETP_REPORT_ID: 1026 1021 elan_report_absolute(data, report); ··· 1027 1026 elan_report_trackpoint(data, report); 1028 1027 break; 1029 1028 default: 1030 - dev_err(dev, "invalid report id data (%x)\n", 1029 + dev_err(&data->client->dev, "invalid report id data (%x)\n", 1031 1030 report[ETP_REPORT_ID_OFFSET]); 1032 1031 } 1033 1032
+1
drivers/input/mouse/synaptics.c
··· 179 179 "LEN0093", /* T480 */ 180 180 "LEN0096", /* X280 */ 181 181 "LEN0097", /* X280 -> ALPS trackpoint */ 182 + "LEN0099", /* X1 Extreme 1st */ 182 183 "LEN009b", /* T580 */ 183 184 "LEN200f", /* T450s */ 184 185 "LEN2044", /* L470 */
+7
drivers/input/serio/i8042-x86ia64io.h
··· 426 426 }, 427 427 }, 428 428 { 429 + /* Lenovo XiaoXin Air 12 */ 430 + .matches = { 431 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 432 + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), 433 + }, 434 + }, 435 + { 429 436 .matches = { 430 437 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 431 438 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
-1
drivers/input/touchscreen/elants_i2c.c
··· 1325 1325 0, MT_TOOL_PALM, 0, 0); 1326 1326 input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); 1327 1327 input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); 1328 - input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); 1329 1328 1330 1329 touchscreen_parse_properties(ts->input, true, &ts->prop); 1331 1330
+9 -3
drivers/interconnect/core.c
··· 243 243 { 244 244 struct icc_provider *p = node->provider; 245 245 struct icc_req *r; 246 + u32 avg_bw, peak_bw; 246 247 247 248 node->avg_bw = 0; 248 249 node->peak_bw = 0; ··· 252 251 p->pre_aggregate(node); 253 252 254 253 hlist_for_each_entry(r, &node->req_list, req_node) { 255 - if (!r->enabled) 256 - continue; 257 - p->aggregate(node, r->tag, r->avg_bw, r->peak_bw, 254 + if (r->enabled) { 255 + avg_bw = r->avg_bw; 256 + peak_bw = r->peak_bw; 257 + } else { 258 + avg_bw = 0; 259 + peak_bw = 0; 260 + } 261 + p->aggregate(node, r->tag, avg_bw, peak_bw, 258 262 &node->avg_bw, &node->peak_bw); 259 263 } 260 264
+7 -7
drivers/interconnect/qcom/msm8916.c
··· 197 197 DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); 198 198 DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0); 199 199 DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); 200 - DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); 201 - DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); 202 - DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); 203 - DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); 204 - DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); 205 - DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); 206 - DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); 200 + DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); 201 + DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); 202 + DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); 203 + DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); 204 + DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); 205 + DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); 206 + DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); 207 207 DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV); 208 208 DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1); 209 209 DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
+1
drivers/iommu/Kconfig
··· 305 305 306 306 config SUN50I_IOMMU 307 307 bool "Allwinner H6 IOMMU Support" 308 + depends on HAS_DMA 308 309 depends on ARCH_SUNXI || COMPILE_TEST 309 310 select ARM_DMA_USE_IOMMU 310 311 select IOMMU_API
+1 -1
drivers/iommu/amd/amd_iommu.h
··· 102 102 #ifdef CONFIG_DMI 103 103 void amd_iommu_apply_ivrs_quirks(void); 104 104 #else 105 - static void amd_iommu_apply_ivrs_quirks(void) { } 105 + static inline void amd_iommu_apply_ivrs_quirks(void) { } 106 106 #endif 107 107 108 108 #endif
+3 -2
drivers/iommu/amd/iommu.c
··· 3985 3985 if (!fn) 3986 3986 return -ENOMEM; 3987 3987 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); 3988 - irq_domain_free_fwnode(fn); 3989 - if (!iommu->ir_domain) 3988 + if (!iommu->ir_domain) { 3989 + irq_domain_free_fwnode(fn); 3990 3990 return -ENOMEM; 3991 + } 3991 3992 3992 3993 iommu->ir_domain->parent = arch_get_ir_parent_domain(); 3993 3994 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
+1 -1
drivers/iommu/arm-smmu-qcom.c
··· 12 12 struct arm_smmu_device smmu; 13 13 }; 14 14 15 - static const struct of_device_id qcom_smmu_client_of_match[] = { 15 + static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { 16 16 { .compatible = "qcom,adreno" }, 17 17 { .compatible = "qcom,mdp4" }, 18 18 { .compatible = "qcom,mdss" },
+4 -1
drivers/iommu/hyperv-iommu.c
··· 155 155 0, IOAPIC_REMAPPING_ENTRY, fn, 156 156 &hyperv_ir_domain_ops, NULL); 157 157 158 - irq_domain_free_fwnode(fn); 158 + if (!ioapic_ir_domain) { 159 + irq_domain_free_fwnode(fn); 160 + return -ENOMEM; 161 + } 159 162 160 163 /* 161 164 * Hyper-V doesn't provide irq remapping function for
+1 -1
drivers/iommu/intel/irq_remapping.c
··· 563 563 0, INTR_REMAP_TABLE_ENTRIES, 564 564 fn, &intel_ir_domain_ops, 565 565 iommu); 566 - irq_domain_free_fwnode(fn); 567 566 if (!iommu->ir_domain) { 567 + irq_domain_free_fwnode(fn); 568 568 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); 569 569 goto out_free_bitmap; 570 570 }
+1 -1
drivers/iommu/iommu.c
··· 295 295 return; 296 296 297 297 iommu_device_unlink(dev->iommu->iommu_dev, dev); 298 - iommu_group_remove_device(dev); 299 298 300 299 ops->release_device(dev); 301 300 301 + iommu_group_remove_device(dev); 302 302 module_put(ops->owner); 303 303 dev_iommu_free(dev); 304 304 }
+17 -20
drivers/iommu/qcom_iommu.c
··· 65 65 struct mutex init_mutex; /* Protects iommu pointer */ 66 66 struct iommu_domain domain; 67 67 struct qcom_iommu_dev *iommu; 68 + struct iommu_fwspec *fwspec; 68 69 }; 69 70 70 71 static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) ··· 85 84 return dev_iommu_priv_get(dev); 86 85 } 87 86 88 - static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) 87 + static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) 89 88 { 90 - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); 89 + struct qcom_iommu_dev *qcom_iommu = d->iommu; 91 90 if (!qcom_iommu) 92 91 return NULL; 93 92 return qcom_iommu->ctxs[asid - 1]; ··· 119 118 120 119 static void qcom_iommu_tlb_sync(void *cookie) 121 120 { 122 - struct iommu_fwspec *fwspec; 123 - struct device *dev = cookie; 121 + struct qcom_iommu_domain *qcom_domain = cookie; 122 + struct iommu_fwspec *fwspec = qcom_domain->fwspec; 124 123 unsigned i; 125 124 126 - fwspec = dev_iommu_fwspec_get(dev); 127 - 128 125 for (i = 0; i < fwspec->num_ids; i++) { 129 - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); 126 + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 130 127 unsigned int val, ret; 131 128 132 129 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); ··· 138 139 139 140 static void qcom_iommu_tlb_inv_context(void *cookie) 140 141 { 141 - struct device *dev = cookie; 142 - struct iommu_fwspec *fwspec; 142 + struct qcom_iommu_domain *qcom_domain = cookie; 143 + struct iommu_fwspec *fwspec = qcom_domain->fwspec; 143 144 unsigned i; 144 145 145 - fwspec = dev_iommu_fwspec_get(dev); 146 - 147 146 for (i = 0; i < fwspec->num_ids; i++) { 148 - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); 147 + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 149 148 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); 150 149 } 151 150 ··· 153 156 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, 154 157 size_t granule, bool leaf, void *cookie) 155 158 { 156 - struct device *dev = cookie; 157 - struct iommu_fwspec *fwspec; 159 + struct qcom_iommu_domain *qcom_domain = cookie; 160 + struct iommu_fwspec *fwspec = qcom_domain->fwspec; 158 161 unsigned i, reg; 159 162 160 163 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; 161 164 162 - fwspec = dev_iommu_fwspec_get(dev); 163 - 164 165 for (i = 0; i < fwspec->num_ids; i++) { 165 - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); 166 + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 166 167 size_t s = size; 167 168 168 169 iova = (iova >> 12) << 12; ··· 251 256 }; 252 257 253 258 qcom_domain->iommu = qcom_iommu; 254 - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); 259 + qcom_domain->fwspec = fwspec; 260 + 261 + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain); 255 262 if (!pgtbl_ops) { 256 263 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); 257 264 ret = -ENOMEM; ··· 266 269 domain->geometry.force_aperture = true; 267 270 268 271 for (i = 0; i < fwspec->num_ids; i++) { 269 - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); 272 + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 270 273 271 274 if (!ctx->secure_init) { 272 275 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); ··· 416 419 417 420 pm_runtime_get_sync(qcom_iommu->dev); 418 421 for (i = 0; i < fwspec->num_ids; i++) { 419 - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); 422 + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); 420 423 421 424 /* Disable the context bank: */ 422 425 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+3 -5
drivers/iommu/sun50i-iommu.c
··· 313 313 IOMMU_TLB_FLUSH_MICRO_TLB(1) | 314 314 IOMMU_TLB_FLUSH_MICRO_TLB(0)); 315 315 316 - ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG, 317 - reg, !reg, 318 - 1, 2000); 316 + ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG, 317 + reg, !reg, 318 + 1, 2000); 319 319 if (ret) 320 320 dev_warn(iommu->dev, "TLB Flush timed out!\n"); 321 321 ··· 556 556 { 557 557 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 558 558 phys_addr_t pt_phys; 559 - dma_addr_t pte_dma; 560 559 u32 *pte_addr; 561 560 u32 dte; 562 561 ··· 565 566 566 567 pt_phys = sun50i_dte_get_pt_address(dte); 567 568 pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova); 568 - pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE; 569 569 570 570 if (!sun50i_pte_is_page_valid(*pte_addr)) 571 571 return 0;
+1 -1
drivers/irqchip/Kconfig
··· 563 563 Support for the Loongson PCH PIC Controller. 564 564 565 565 config LOONGSON_PCH_MSI 566 - bool "Loongson PCH PIC Controller" 566 + bool "Loongson PCH MSI Controller" 567 567 depends on MACH_LOONGSON64 || COMPILE_TEST 568 568 depends on PCI 569 569 default MACH_LOONGSON64
+12 -4
drivers/irqchip/irq-gic-v3-its.c
··· 3797 3797 if (!gic_rdists->has_vpend_valid_dirty) 3798 3798 return; 3799 3799 3800 - WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER, 3801 - val, 3802 - !(val & GICR_VPENDBASER_Dirty), 3803 - 10, 500)); 3800 + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, 3801 + val, 3802 + !(val & GICR_VPENDBASER_Dirty), 3803 + 10, 500)); 3804 3804 } 3805 3805 3806 3806 static void its_vpe_schedule(struct its_vpe *vpe) ··· 4054 4054 u64 val; 4055 4055 4056 4056 if (info->req_db) { 4057 + unsigned long flags; 4058 + 4057 4059 /* 4058 4060 * vPE is going to block: make the vPE non-resident with 4059 4061 * PendingLast clear and DB set. The GIC guarantees that if 4060 4062 * we read-back PendingLast clear, then a doorbell will be 4061 4063 * delivered when an interrupt comes. 4064 + * 4065 + * Note the locking to deal with the concurrent update of 4066 + * pending_last from the doorbell interrupt handler that can 4067 + * run concurrently. 4062 4068 */ 4069 + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); 4063 4070 val = its_clear_vpend_valid(vlpi_base, 4064 4071 GICR_VPENDBASER_PendingLast, 4065 4072 GICR_VPENDBASER_4_1_DB); 4066 4073 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); 4074 + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); 4067 4075 } else { 4068 4076 /* 4069 4077 * We're not blocking, so just make the vPE non-resident
+3 -11
drivers/irqchip/irq-gic.c
··· 329 329 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 330 330 bool force) 331 331 { 332 - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 333 - unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 334 - u32 val, mask, bit; 335 - unsigned long flags; 332 + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); 333 + unsigned int cpu; 336 334 337 335 if (!force) 338 336 cpu = cpumask_any_and(mask_val, cpu_online_mask); ··· 340 342 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 341 343 return -EINVAL; 342 344 343 - gic_lock_irqsave(flags); 344 - mask = 0xff << shift; 345 - bit = gic_cpu_map[cpu] << shift; 346 - val = readl_relaxed(reg) & ~mask; 347 - writel_relaxed(val | bit, reg); 348 - gic_unlock_irqrestore(flags); 349 - 345 + writeb_relaxed(gic_cpu_map[cpu], reg); 350 346 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 351 347 352 348 return IRQ_SET_MASK_OK_DONE;
+1 -1
drivers/irqchip/irq-riscv-intc.c
··· 99 99 100 100 hartid = riscv_of_parent_hartid(node); 101 101 if (hartid < 0) { 102 - pr_warn("unable to fine hart id for %pOF\n", node); 102 + pr_warn("unable to find hart id for %pOF\n", node); 103 103 return 0; 104 104 } 105 105
+2 -2
drivers/md/dm-integrity.c
··· 2420 2420 unsigned prev_free_sectors; 2421 2421 2422 2422 /* the following test is not needed, but it tests the replay code */ 2423 - if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) 2423 + if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) 2424 2424 return; 2425 2425 2426 2426 spin_lock_irq(&ic->endio_wait.lock); ··· 2481 2481 2482 2482 next_chunk: 2483 2483 2484 - if (unlikely(dm_suspended(ic->ti))) 2484 + if (unlikely(dm_post_suspending(ic->ti))) 2485 2485 goto unlock_ret; 2486 2486 2487 2487 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
-4
drivers/md/dm-rq.c
··· 146 146 */ 147 147 static void rq_completed(struct mapped_device *md) 148 148 { 149 - /* nudge anyone waiting on suspend queue */ 150 - if (unlikely(wq_has_sleeper(&md->wait))) 151 - wake_up(&md->wait); 152 - 153 149 /* 154 150 * dm_put() must be at the end of this function. See the comment above 155 151 */
+6
drivers/md/dm-writecache.c
··· 2266 2266 } 2267 2267 2268 2268 if (WC_MODE_PMEM(wc)) { 2269 + if (!dax_synchronous(wc->ssd_dev->dax_dev)) { 2270 + r = -EOPNOTSUPP; 2271 + ti->error = "Asynchronous persistent memory not supported as pmem cache"; 2272 + goto bad; 2273 + } 2274 + 2269 2275 r = persistent_memory_claim(wc); 2270 2276 if (r) { 2271 2277 ti->error = "Unable to map persistent memory for cache";
+8 -1
drivers/md/dm-zoned-metadata.c
··· 2217 2217 { 2218 2218 struct list_head *list; 2219 2219 struct dm_zone *zone; 2220 - int i = 0; 2220 + int i; 2221 2221 2222 + /* Schedule reclaim to ensure free zones are available */ 2223 + if (!(flags & DMZ_ALLOC_RECLAIM)) { 2224 + for (i = 0; i < zmd->nr_devs; i++) 2225 + dmz_schedule_reclaim(zmd->dev[i].reclaim); 2226 + } 2227 + 2228 + i = 0; 2222 2229 again: 2223 2230 if (flags & DMZ_ALLOC_CACHE) 2224 2231 list = &zmd->unmap_cache_list;
+3 -4
drivers/md/dm-zoned-reclaim.c
··· 456 456 nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx); 457 457 nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx); 458 458 } 459 + if (nr_unmap <= 1) 460 + return 0; 459 461 return nr_unmap * 100 / nr_zones; 460 462 } 461 463 ··· 503 501 { 504 502 struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work); 505 503 struct dmz_metadata *zmd = zrc->metadata; 506 - unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0; 504 + unsigned int p_unmap; 507 505 int ret; 508 506 509 507 if (dmz_dev_is_dying(zmd)) ··· 528 526 /* Busy but we still have some random zone: throttle */ 529 527 zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2); 530 528 } 531 - 532 - nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx); 533 - nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx); 534 529 535 530 DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)", 536 531 dmz_metadata_label(zmd), zrc->dev_idx,
+1 -9
drivers/md/dm-zoned-target.c
··· 400 400 dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); 401 401 struct dmz_metadata *zmd = dmz->metadata; 402 402 struct dm_zone *zone; 403 - int i, ret; 404 - 405 - /* 406 - * Write may trigger a zone allocation. So make sure the 407 - * allocation can succeed. 408 - */ 409 - if (bio_op(bio) == REQ_OP_WRITE) 410 - for (i = 0; i < dmz->nr_ddevs; i++) 411 - dmz_schedule_reclaim(dmz->dev[i].reclaim); 403 + int ret; 412 404 413 405 dmz_lock_metadata(zmd); 414 406
+70 -31
drivers/md/dm.c
··· 12 12 #include <linux/init.h> 13 13 #include <linux/module.h> 14 14 #include <linux/mutex.h> 15 + #include <linux/sched/mm.h> 15 16 #include <linux/sched/signal.h> 16 17 #include <linux/blkpg.h> 17 18 #include <linux/bio.h> ··· 143 142 #define DMF_NOFLUSH_SUSPENDING 5 144 143 #define DMF_DEFERRED_REMOVE 6 145 144 #define DMF_SUSPENDED_INTERNALLY 7 145 + #define DMF_POST_SUSPENDING 8 146 146 147 147 #define DM_NUMA_NODE NUMA_NO_NODE 148 148 static int dm_numa_node = DM_NUMA_NODE; ··· 654 652 if (tio->inside_dm_io) 655 653 return; 656 654 bio_put(&tio->clone); 657 - } 658 - 659 - static bool md_in_flight_bios(struct mapped_device *md) 660 - { 661 - int cpu; 662 - struct hd_struct *part = &dm_disk(md)->part0; 663 - long sum = 0; 664 - 665 - for_each_possible_cpu(cpu) { 666 - sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 667 - sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 668 - } 669 - 670 - return sum != 0; 671 - } 672 - 673 - static bool md_in_flight(struct mapped_device *md) 674 - { 675 - if (queue_is_mq(md->queue)) 676 - return blk_mq_queue_inflight(md->queue); 677 - else 678 - return md_in_flight_bios(md); 679 655 } 680 656 681 657 u64 dm_start_time_ns_from_clone(struct bio *bio) ··· 1445 1465 BUG_ON(bio_has_data(ci->bio)); 1446 1466 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1447 1467 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1448 - 1449 - bio_disassociate_blkg(ci->bio); 1450 - 1451 1468 return 0; 1452 1469 } 1453 1470 ··· 1632 1655 ci.bio = &flush_bio; 1633 1656 ci.sector_count = 0; 1634 1657 error = __send_empty_flush(&ci); 1658 + bio_uninit(ci.bio); 1635 1659 /* dec_pending submits any data associated with flush */ 1636 1660 } else if (op_is_zone_mgmt(bio_op(bio))) { 1637 1661 ci.bio = bio; ··· 1707 1729 ci.bio = &flush_bio; 1708 1730 ci.sector_count = 0; 1709 1731 error = __send_empty_flush(&ci); 1732 + bio_uninit(ci.bio); 1710 1733 /* dec_pending submits any data associated with flush */ 1711 1734 } else { 1712 1735 struct dm_target_io *tio; ··· 2409 2430 if (!dm_suspended_md(md)) { 2410 2431 dm_table_presuspend_targets(map); 2411 2432 set_bit(DMF_SUSPENDED, &md->flags); 2433 + set_bit(DMF_POST_SUSPENDING, &md->flags); 2412 2434 dm_table_postsuspend_targets(map); 2413 2435 } 2414 2436 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ ··· 2450 2470 } 2451 2471 EXPORT_SYMBOL_GPL(dm_put); 2452 2472 2453 - static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2473 + static bool md_in_flight_bios(struct mapped_device *md) 2474 + { 2475 + int cpu; 2476 + struct hd_struct *part = &dm_disk(md)->part0; 2477 + long sum = 0; 2478 + 2479 + for_each_possible_cpu(cpu) { 2480 + sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 2481 + sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 2482 + } 2483 + 2484 + return sum != 0; 2485 + } 2486 + 2487 + static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) 2454 2488 { 2455 2489 int r = 0; 2456 2490 DEFINE_WAIT(wait); 2457 2491 2458 - while (1) { 2492 + while (true) { 2459 2493 prepare_to_wait(&md->wait, &wait, task_state); 2460 2494 2461 - if (!md_in_flight(md)) 2495 + if (!md_in_flight_bios(md)) 2462 2496 break; 2463 2497 2464 2498 if (signal_pending_state(task_state, current)) { ··· 2483 2489 io_schedule(); 2484 2490 } 2485 2491 finish_wait(&md->wait, &wait); 2492 + 2493 + return r; 2494 + } 2495 + 2496 + static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2497 + { 2498 + int r = 0; 2499 + 2500 + if (!queue_is_mq(md->queue)) 2501 + return dm_wait_for_bios_completion(md, task_state); 2502 + 2503 + while (true) { 2504 + if (!blk_mq_queue_inflight(md->queue)) 2505 + break; 2506 + 2507 + if (signal_pending_state(task_state, current)) { 2508 + r = -EINTR; 2509 + break; 2510 + } 2511 + 2512 + msleep(5); 2513 + } 2486 2514 2487 2515 return r; 2488 2516 } ··· 2768 2752 if (r) 2769 2753 goto out_unlock; 2770 2754 2755 + set_bit(DMF_POST_SUSPENDING, &md->flags); 2771 2756 dm_table_postsuspend_targets(map); 2757 + clear_bit(DMF_POST_SUSPENDING, &md->flags); 2772 2758 2773 2759 out_unlock: 2774 2760 mutex_unlock(&md->suspend_lock); ··· 2867 2849 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2868 2850 DMF_SUSPENDED_INTERNALLY); 2869 2851 2852 + set_bit(DMF_POST_SUSPENDING, &md->flags); 2870 2853 dm_table_postsuspend_targets(map); 2854 + clear_bit(DMF_POST_SUSPENDING, &md->flags); 2871 2855 } 2872 2856 2873 2857 static void __dm_internal_resume(struct mapped_device *md) ··· 2946 2926 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2947 2927 unsigned cookie) 2948 2928 { 2929 + int r; 2930 + unsigned noio_flag; 2949 2931 char udev_cookie[DM_COOKIE_LENGTH]; 2950 2932 char *envp[] = { udev_cookie, NULL }; 2951 2933 2934 + noio_flag = memalloc_noio_save(); 2935 + 2952 2936 if (!cookie) 2953 - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2937 + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2954 2938 else { 2955 2939 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2956 2940 DM_COOKIE_ENV_VAR_NAME, cookie); 2957 - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2958 - action, envp); 2941 + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2942 + action, envp); 2959 2943 } 2944 + 2945 + memalloc_noio_restore(noio_flag); 2946 + 2947 + return r; 2960 2948 } 2961 2949 2962 2950 uint32_t dm_next_uevent_seq(struct mapped_device *md) ··· 3030 3002 return test_bit(DMF_SUSPENDED, &md->flags); 3031 3003 } 3032 3004 3005 + static int dm_post_suspending_md(struct mapped_device *md) 3006 + { 3007 + return test_bit(DMF_POST_SUSPENDING, &md->flags); 3008 + } 3009 + 3033 3010 int dm_suspended_internally_md(struct mapped_device *md) 3034 3011 { 3035 3012 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); ··· 3050 3017 return dm_suspended_md(dm_table_get_md(ti->table)); 3051 3018 } 3052 3019 EXPORT_SYMBOL_GPL(dm_suspended); 3020 + 3021 + int dm_post_suspending(struct dm_target *ti) 3022 + { 3023 + return dm_post_suspending_md(dm_table_get_md(ti->table)); 3024 + } 3025 + EXPORT_SYMBOL_GPL(dm_post_suspending); 3053 3026 3054 3027 int dm_noflush_suspending(struct dm_target *ti) 3055 3028 {
+20 -21
drivers/message/fusion/mptbase.c
··· 1324 1324 return 0; /* fw doesn't need any host buffers */ 1325 1325 1326 1326 /* spin till we get enough memory */ 1327 - while(host_page_buffer_sz > 0) { 1328 - 1329 - if((ioc->HostPageBuffer = pci_alloc_consistent( 1330 - ioc->pcidev, 1331 - host_page_buffer_sz, 1332 - &ioc->HostPageBuffer_dma)) != NULL) { 1333 - 1327 + while (host_page_buffer_sz > 0) { 1328 + ioc->HostPageBuffer = 1329 + dma_alloc_coherent(&ioc->pcidev->dev, 1330 + host_page_buffer_sz, 1331 + &ioc->HostPageBuffer_dma, 1332 + GFP_KERNEL); 1333 + if (ioc->HostPageBuffer) { 1334 1334 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 1335 1335 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", 1336 1336 ioc->name, ioc->HostPageBuffer, ··· 2741 2741 sz = ioc->alloc_sz; 2742 2742 dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n", 2743 2743 ioc->name, ioc->alloc, ioc->alloc_sz)); 2744 - pci_free_consistent(ioc->pcidev, sz, 2745 - ioc->alloc, ioc->alloc_dma); 2744 + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc, 2745 + ioc->alloc_dma); 2746 2746 ioc->reply_frames = NULL; 2747 2747 ioc->req_frames = NULL; 2748 2748 ioc->alloc = NULL; ··· 2751 2751 2752 2752 if (ioc->sense_buf_pool != NULL) { 2753 2753 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); 2754 - pci_free_consistent(ioc->pcidev, sz, 2755 - ioc->sense_buf_pool, ioc->sense_buf_pool_dma); 2754 + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool, 2755 + ioc->sense_buf_pool_dma); 2756 2756 ioc->sense_buf_pool = NULL; 2757 2757 ioc->alloc_total -= sz; 2758 2758 } ··· 2802 2802 "HostPageBuffer free @ %p, sz=%d bytes\n", 2803 2803 ioc->name, ioc->HostPageBuffer, 2804 2804 ioc->HostPageBuffer_sz)); 2805 - pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, 2805 + dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz, 2806 2806 ioc->HostPageBuffer, ioc->HostPageBuffer_dma); 2807 2807 ioc->HostPageBuffer = NULL; 2808 2808 ioc->HostPageBuffer_sz = 0; ··· 4497 4497 ioc->name, sz, sz, num_chain)); 4498 4498 4499 4499 total_size += sz; 4500 - mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma); 4500 + mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size, 4501 + &alloc_dma, GFP_KERNEL); 4501 4502 if (mem == NULL) { 4502 4503 printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n", 4503 4504 ioc->name); ··· 4575 4574 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 4576 4575 4577 4576 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); 4578 - ioc->sense_buf_pool = 4579 - pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma); 4577 + ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz, 4578 + &ioc->sense_buf_pool_dma, GFP_KERNEL); 4580 4579 if (ioc->sense_buf_pool == NULL) { 4581 4580 printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n", 4582 4581 ioc->name); ··· 4614 4613 4615 4614 if (ioc->alloc != NULL) { 4616 4615 sz = ioc->alloc_sz; 4617 - pci_free_consistent(ioc->pcidev, 4618 - sz, 4619 - ioc->alloc, ioc->alloc_dma); 4616 + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc, 4617 + ioc->alloc_dma); 4620 4618 ioc->reply_frames = NULL; 4621 4619 ioc->req_frames = NULL; 4622 4620 ioc->alloc_total -= sz; 4623 4621 } 4624 4622 if (ioc->sense_buf_pool != NULL) { 4625 4623 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); 4626 - pci_free_consistent(ioc->pcidev, 4627 - sz, 4628 - ioc->sense_buf_pool, ioc->sense_buf_pool_dma); 4624 + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool, 4625 + ioc->sense_buf_pool_dma); 4629 4626 ioc->sense_buf_pool = NULL; 4630 4627 } 4631 4628
+3 -2
drivers/mfd/ioc3.c
··· 142 142 goto err; 143 143 144 144 domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd); 145 - if (!domain) 145 + if (!domain) { 146 + irq_domain_free_fwnode(fn); 146 147 goto err; 148 + } 147 149 148 - irq_domain_free_fwnode(fn); 149 150 ipd->domain = domain; 150 151 151 152 irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain);
+12 -12
drivers/misc/atmel-ssc.c
··· 10 10 #include <linux/clk.h> 11 11 #include <linux/err.h> 12 12 #include <linux/io.h> 13 - #include <linux/spinlock.h> 13 + #include <linux/mutex.h> 14 14 #include <linux/atmel-ssc.h> 15 15 #include <linux/slab.h> 16 16 #include <linux/module.h> ··· 20 20 #include "../../sound/soc/atmel/atmel_ssc_dai.h" 21 21 22 22 /* Serialize access to ssc_list and user count */ 23 - static DEFINE_SPINLOCK(user_lock); 23 + static DEFINE_MUTEX(user_lock); 24 24 static LIST_HEAD(ssc_list); 25 25 26 26 struct ssc_device *ssc_request(unsigned int ssc_num) ··· 28 28 int ssc_valid = 0; 29 29 struct ssc_device *ssc; 30 30 31 - spin_lock(&user_lock); 31 + mutex_lock(&user_lock); 32 32 list_for_each_entry(ssc, &ssc_list, list) { 33 33 if (ssc->pdev->dev.of_node) { 34 34 if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") ··· 44 44 } 45 45 46 46 if (!ssc_valid) { 47 - spin_unlock(&user_lock); 47 + mutex_unlock(&user_lock); 48 48 pr_err("ssc: ssc%d platform device is missing\n", ssc_num); 49 49 return ERR_PTR(-ENODEV); 50 50 } 51 51 52 52 if (ssc->user) { 53 - spin_unlock(&user_lock); 53 + mutex_unlock(&user_lock); 54 54 dev_dbg(&ssc->pdev->dev, "module busy\n"); 55 55 return ERR_PTR(-EBUSY); 56 56 } 57 57 ssc->user++; 58 - spin_unlock(&user_lock); 58 + mutex_unlock(&user_lock); 59 59 60 60 clk_prepare(ssc->clk); 61 61 ··· 67 67 { 68 68 bool disable_clk = true; 69 69 70 - spin_lock(&user_lock); 70 + mutex_lock(&user_lock); 71 71 if (ssc->user) 72 72 ssc->user--; 73 73 else { 74 74 disable_clk = false; 75 75 dev_dbg(&ssc->pdev->dev, "device already free\n"); 76 76 } 77 - spin_unlock(&user_lock); 77 + mutex_unlock(&user_lock); 78 78 79 79 if (disable_clk) 80 80 clk_unprepare(ssc->clk); ··· 237 237 return -ENXIO; 238 238 } 239 239 240 - spin_lock(&user_lock); 240 + mutex_lock(&user_lock); 241 241 list_add_tail(&ssc->list, &ssc_list); 242 - spin_unlock(&user_lock); 242 + mutex_unlock(&user_lock); 243 243 244 244 platform_set_drvdata(pdev, ssc); 245 245 ··· 258 258 259 259 ssc_sound_dai_remove(ssc); 260 260 261 - spin_lock(&user_lock); 261 + mutex_lock(&user_lock); 262 262 list_del(&ssc->list); 263 - spin_unlock(&user_lock); 263 + mutex_unlock(&user_lock); 264 264 265 265 return 0; 266 266 }
+11 -3
drivers/misc/habanalabs/command_submission.c
··· 499 499 struct asic_fixed_properties *asic = &hdev->asic_prop; 500 500 struct hw_queue_properties *hw_queue_prop; 501 501 502 + /* This must be checked here to prevent out-of-bounds access to 503 + * hw_queues_props array 504 + */ 505 + if (chunk->queue_index >= HL_MAX_QUEUES) { 506 + dev_err(hdev->dev, "Queue index %d is invalid\n", 507 + chunk->queue_index); 508 + return -EINVAL; 509 + } 510 + 502 511 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; 503 512 504 - if ((chunk->queue_index >= HL_MAX_QUEUES) || 505 - (hw_queue_prop->type == QUEUE_TYPE_NA)) { 506 - dev_err(hdev->dev, "Queue index %d is invalid\n", 513 + if (hw_queue_prop->type == QUEUE_TYPE_NA) { 514 + dev_err(hdev->dev, "Queue index %d is not applicable\n", 507 515 chunk->queue_index); 508 516 return -EINVAL; 509 517 }
+8 -15
drivers/misc/habanalabs/debugfs.c
··· 36 36 pkt.i2c_reg = i2c_reg; 37 37 38 38 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 39 - HL_DEVICE_TIMEOUT_USEC, (long *) val); 39 + 0, (long *) val); 40 40 41 41 if (rc) 42 42 dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); ··· 63 63 pkt.value = cpu_to_le64(val); 64 64 65 65 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 66 - HL_DEVICE_TIMEOUT_USEC, NULL); 66 + 0, NULL); 67 67 68 68 if (rc) 69 69 dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc); ··· 87 87 pkt.value = cpu_to_le64(state); 88 88 89 89 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 90 - HL_DEVICE_TIMEOUT_USEC, NULL); 90 + 0, NULL); 91 91 92 92 if (rc) 93 93 dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc); ··· 981 981 if (*ppos) 982 982 return 0; 983 983 984 - sprintf(tmp_buf, "%d\n", hdev->clock_gating); 984 + sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask); 985 985 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, 986 986 strlen(tmp_buf) + 1); 987 987 ··· 993 993 { 994 994 struct hl_dbg_device_entry *entry = file_inode(f)->i_private; 995 995 struct hl_device *hdev = entry->hdev; 996 - u32 value; 996 + u64 value; 997 997 ssize_t rc; 998 998 999 999 if (atomic_read(&hdev->in_reset)) { ··· 1002 1002 return 0; 1003 1003 } 1004 1004 1005 - rc = kstrtouint_from_user(buf, count, 10, &value); 1005 + rc = kstrtoull_from_user(buf, count, 16, &value); 1006 1006 if (rc) 1007 1007 return rc; 1008 1008 1009 - if (value) { 1010 - hdev->clock_gating = 1; 1011 - if (hdev->asic_funcs->enable_clock_gating) 1012 - hdev->asic_funcs->enable_clock_gating(hdev); 1013 - } else { 1014 - if (hdev->asic_funcs->disable_clock_gating) 1015 - hdev->asic_funcs->disable_clock_gating(hdev); 1016 - hdev->clock_gating = 0; 1017 - } 1009 + hdev->clock_gating_mask = value; 1010 + hdev->asic_funcs->set_clock_gating(hdev); 1018 1011 1019 1012 return count; 1020 1013 }
+1 -1
drivers/misc/habanalabs/device.c
··· 608 608 hdev->in_debug = 0; 609 609 610 610 if (!hdev->hard_reset_pending) 611 - hdev->asic_funcs->enable_clock_gating(hdev); 611 + hdev->asic_funcs->set_clock_gating(hdev); 612 612 613 613 goto out; 614 614 }
+5 -5
drivers/misc/habanalabs/firmware_if.c
··· 61 61 pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT); 62 62 63 63 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, 64 - sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL); 64 + sizeof(pkt), 0, NULL); 65 65 } 66 66 67 67 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, ··· 144 144 pkt.value = cpu_to_le64(event_type); 145 145 146 146 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 147 - HL_DEVICE_TIMEOUT_USEC, &result); 147 + 0, &result); 148 148 149 149 if (rc) 150 150 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); ··· 183 183 ARMCP_PKT_CTL_OPCODE_SHIFT); 184 184 185 185 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, 186 - total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result); 186 + total_pkt_size, 0, &result); 187 187 188 188 if (rc) 189 189 dev_err(hdev->dev, "failed to unmask IRQ array\n"); ··· 204 204 test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); 205 205 206 206 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt, 207 - sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result); 207 + sizeof(test_pkt), 0, &result); 208 208 209 209 if (!rc) { 210 210 if (result != ARMCP_PACKET_FENCE_VAL) ··· 248 248 hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); 249 249 250 250 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, 251 - sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result); 251 + sizeof(hb_pkt), 0, &result); 252 252 253 253 if ((rc) || (result != ARMCP_PACKET_FENCE_VAL)) 254 254 rc = -EIO;
+84 -39
drivers/misc/habanalabs/gaudi/gaudi.c
··· 80 80 #define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) 81 81 #define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) 82 82 #define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ 83 + #define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ 83 84 84 85 #define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9 85 86 ··· 99 98 100 99 #define GAUDI_ARB_WDT_TIMEOUT 0x1000000 101 100 101 + #define GAUDI_CLK_GATE_DEBUGFS_MASK (\ 102 + BIT(GAUDI_ENGINE_ID_MME_0) |\ 103 + BIT(GAUDI_ENGINE_ID_MME_2) |\ 104 + GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0)) 105 + 102 106 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { 103 107 "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", 104 108 "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", ··· 112 106 }; 113 107 114 108 static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { 115 - [GAUDI_PCI_DMA_1] = 0, 116 - [GAUDI_PCI_DMA_2] = 1, 117 - [GAUDI_PCI_DMA_3] = 5, 118 - [GAUDI_HBM_DMA_1] = 2, 119 - [GAUDI_HBM_DMA_2] = 3, 120 - [GAUDI_HBM_DMA_3] = 4, 121 - [GAUDI_HBM_DMA_4] = 6, 122 - [GAUDI_HBM_DMA_5] = 7 109 + [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, 110 + [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, 111 + [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5, 112 + [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2, 113 + [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3, 114 + [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4, 115 + [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6, 116 + [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7 123 117 }; 124 118 125 119 static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { ··· 1825 1819 1826 1820 gaudi_init_rate_limiter(hdev); 1827 1821 1828 - gaudi_disable_clock_gating(hdev); 1822 + hdev->asic_funcs->disable_clock_gating(hdev); 1829 1823 1830 1824 for (tpc_id = 0, tpc_offset = 0; 1831 1825 tpc_id < TPC_NUMBER_OF_ENGINES; ··· 2537 2531 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); 2538 2532 } 2539 2533 2540 - static void gaudi_enable_clock_gating(struct hl_device *hdev) 2534 + static void gaudi_set_clock_gating(struct hl_device *hdev) 2541 2535 { 2542 2536 struct gaudi_device *gaudi = hdev->asic_specific; 2543 2537 u32 qman_offset; 2544 2538 int i; 2545 - 2546 - if (!hdev->clock_gating) 2547 - return; 2548 - 2549 - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) 2550 - return; 2551 2539 2552 2540 /* In case we are during debug session, don't enable the clock gate 2553 2541 * as it may interfere ··· 2549 2549 if (hdev->in_debug) 2550 2550 return; 2551 2551 2552 - for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) { 2552 + for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) { 2553 + if (!(hdev->clock_gating_mask & 2554 + (BIT_ULL(gaudi_dma_assignment[i])))) 2555 + continue; 2556 + 2553 2557 qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; 2554 2558 WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); 2555 2559 WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 2556 2560 QMAN_UPPER_CP_CGM_PWR_GATE_EN); 2557 2561 } 2558 2562 2559 - for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) { 2563 + for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) { 2564 + if (!(hdev->clock_gating_mask & 2565 + (BIT_ULL(gaudi_dma_assignment[i])))) 2566 + continue; 2567 + 2560 2568 qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; 2561 2569 WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); 2562 2570 WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 2563 2571 QMAN_COMMON_CP_CGM_PWR_GATE_EN); 2564 2572 } 2565 2573 2566 - WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); 2567 - WREG32(mmMME0_QM_CGM_CFG, 2568 - QMAN_COMMON_CP_CGM_PWR_GATE_EN); 2569 - WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); 2570 - WREG32(mmMME2_QM_CGM_CFG, 2571 - QMAN_COMMON_CP_CGM_PWR_GATE_EN); 2574 + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) { 2575 + WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); 2576 + WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); 2577 + } 2578 + 2579 + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) { 2580 + WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); 2581 + WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); 2582 + } 2572 2583 2573 2584 for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { 2585 + if (!(hdev->clock_gating_mask & 2586 + (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)))) 2587 + continue; 2588 + 2574 2589 WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, 2575 2590 QMAN_CGM1_PWR_GATE_EN); 2576 2591 WREG32(mmTPC0_QM_CGM_CFG + qman_offset, ··· 2678 2663 gaudi_stop_hbm_dma_qmans(hdev); 2679 2664 gaudi_stop_pci_dma_qmans(hdev); 2680 2665 2681 - gaudi_disable_clock_gating(hdev); 2666 + hdev->asic_funcs->disable_clock_gating(hdev); 2682 2667 2683 2668 msleep(wait_timeout_ms); 2684 2669 ··· 3018 3003 3019 3004 gaudi_init_tpc_qmans(hdev); 3020 3005 3021 - gaudi_enable_clock_gating(hdev); 3006 + hdev->asic_funcs->set_clock_gating(hdev); 3022 3007 3023 3008 gaudi_enable_timestamp(hdev); 3024 3009 ··· 3127 3112 HW_CAP_HBM_DMA | HW_CAP_PLL | 3128 3113 HW_CAP_MMU | 3129 3114 HW_CAP_SRAM_SCRAMBLER | 3130 - HW_CAP_HBM_SCRAMBLER); 3115 + HW_CAP_HBM_SCRAMBLER | 3116 + HW_CAP_CLK_GATE); 3117 + 3131 3118 memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); 3132 3119 } 3133 3120 ··· 3479 3462 *result = 0; 3480 3463 return 0; 3481 3464 } 3465 + 3466 + if (!timeout) 3467 + timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC; 3482 3468 3483 3469 return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len, 3484 3470 timeout, result); ··· 3885 3865 rc = -EPERM; 3886 3866 break; 3887 3867 3868 + case PACKET_WREG_BULK: 3869 + dev_err(hdev->dev, 3870 + "User not allowed to use WREG_BULK\n"); 3871 + rc = -EPERM; 3872 + break; 3873 + 3888 3874 case PACKET_LOAD_AND_EXE: 3889 3875 rc = gaudi_validate_load_and_exe_pkt(hdev, parser, 3890 3876 (struct packet_load_and_exe *) user_pkt); ··· 3906 3880 break; 3907 3881 3908 3882 case PACKET_WREG_32: 3909 - case PACKET_WREG_BULK: 3910 3883 case PACKET_MSG_LONG: 3911 3884 case PACKET_MSG_SHORT: 3912 3885 case PACKET_REPEAT: ··· 4546 4521 int rc = 0; 4547 4522 4548 4523 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { 4549 - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { 4524 + 4525 + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && 4526 + (hdev->clock_gating_mask & 4527 + GAUDI_CLK_GATE_DEBUGFS_MASK)) { 4528 + 4550 4529 dev_err_ratelimited(hdev->dev, 4551 4530 "Can't read register - clock gating is enabled!\n"); 4552 4531 rc = -EFAULT; 4553 4532 } else { 4554 4533 *val = RREG32(addr - CFG_BASE); 4555 4534 } 4535 + 4556 4536 } else if ((addr >= SRAM_BASE_ADDR) && 4557 4537 (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { 4558 4538 *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + ··· 4593 4563 int rc = 0; 4594 4564 4595 4565 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { 4596 - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { 4566 + 4567 + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && 4568 + (hdev->clock_gating_mask & 4569 + GAUDI_CLK_GATE_DEBUGFS_MASK)) { 4570 + 4597 4571 dev_err_ratelimited(hdev->dev, 4598 4572 "Can't write register - clock gating is enabled!\n"); 4599 4573 rc = -EFAULT; 4600 4574 } else { 4601 4575 WREG32(addr - CFG_BASE, val); 4602 4576 } 4577 + 4603 4578 } else if ((addr >= SRAM_BASE_ADDR) && 4604 4579 (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { 4605 4580 writel(val, hdev->pcie_bar[SRAM_BAR_ID] + ··· 4640 4605 int rc = 0; 4641 4606 4642 4607 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { 4643 - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { 4608 + 4609 + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && 4610 + (hdev->clock_gating_mask & 4611 + GAUDI_CLK_GATE_DEBUGFS_MASK)) { 4612 + 4644 4613 dev_err_ratelimited(hdev->dev, 4645 4614 "Can't read register - clock gating is enabled!\n"); 4646 4615 rc = -EFAULT; ··· 4654 4615 4655 4616 *val = (((u64) val_h) << 32) | val_l; 4656 4617 } 4618 + 4657 4619 } else if ((addr >= SRAM_BASE_ADDR) && 4658 4620 (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { 4659 4621 *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + ··· 4691 4651 int rc = 0; 4692 4652 4693 4653 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { 4694 - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { 4654 + 4655 + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && 4656 + (hdev->clock_gating_mask & 4657 + GAUDI_CLK_GATE_DEBUGFS_MASK)) { 4658 + 4695 4659 dev_err_ratelimited(hdev->dev, 4696 4660 "Can't write register - clock gating is enabled!\n"); 4697 4661 rc = -EFAULT; ··· 4704 4660 WREG32(addr + sizeof(u32) - CFG_BASE, 4705 4661 upper_32_bits(val)); 4706 4662 } 4663 + 4707 4664 } else if ((addr >= SRAM_BASE_ADDR) && 4708 4665 (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { 4709 4666 writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + ··· 4926 4881 gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid); 4927 4882 gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid); 4928 4883 4929 - hdev->asic_funcs->enable_clock_gating(hdev); 4884 + hdev->asic_funcs->set_clock_gating(hdev); 4930 4885 4931 4886 mutex_unlock(&gaudi->clk_gate_mutex); 4932 4887 } ··· 5307 5262 } 5308 5263 5309 5264 if (disable_clock_gating) { 5310 - hdev->asic_funcs->enable_clock_gating(hdev); 5265 + hdev->asic_funcs->set_clock_gating(hdev); 5311 5266 mutex_unlock(&gaudi->clk_gate_mutex); 5312 5267 } 5313 5268 } ··· 5794 5749 /* Clear interrupts */ 5795 5750 WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0); 5796 5751 5797 - hdev->asic_funcs->enable_clock_gating(hdev); 5752 + hdev->asic_funcs->set_clock_gating(hdev); 5798 5753 5799 5754 mutex_unlock(&gaudi->clk_gate_mutex); 5800 5755 ··· 6310 6265 if (s) 6311 6266 seq_puts(s, "\n"); 6312 6267 6313 - hdev->asic_funcs->enable_clock_gating(hdev); 6268 + hdev->asic_funcs->set_clock_gating(hdev); 6314 6269 6315 6270 mutex_unlock(&gaudi->clk_gate_mutex); 6316 6271 ··· 6411 6366 dev_err(hdev->dev, 6412 6367 "Timeout while waiting for TPC%d icache prefetch\n", 6413 6368 tpc_id); 6414 - hdev->asic_funcs->enable_clock_gating(hdev); 6369 + hdev->asic_funcs->set_clock_gating(hdev); 6415 6370 mutex_unlock(&gaudi->clk_gate_mutex); 6416 6371 return -EIO; 6417 6372 } ··· 6440 6395 1000, 6441 6396 kernel_timeout); 6442 6397 6443 - hdev->asic_funcs->enable_clock_gating(hdev); 6398 + hdev->asic_funcs->set_clock_gating(hdev); 6444 6399 mutex_unlock(&gaudi->clk_gate_mutex); 6445 6400 6446 6401 if (rc) { ··· 6781 6736 .mmu_invalidate_cache = gaudi_mmu_invalidate_cache, 6782 6737 .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range, 6783 6738 .send_heartbeat = gaudi_send_heartbeat, 6784 - .enable_clock_gating = gaudi_enable_clock_gating, 6739 + .set_clock_gating = gaudi_set_clock_gating, 6785 6740 .disable_clock_gating = gaudi_disable_clock_gating, 6786 6741 .debug_coresight = gaudi_debug_coresight, 6787 6742 .is_device_idle = gaudi_is_device_idle,
+12 -8
drivers/misc/habanalabs/goya/goya.c
··· 88 88 #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) 89 89 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) 90 90 #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ 91 + #define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ 91 92 92 93 #define GOYA_QMAN0_FENCE_VAL 0xD169B243 93 94 ··· 2831 2830 return 0; 2832 2831 } 2833 2832 2833 + if (!timeout) 2834 + timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC; 2835 + 2834 2836 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len, 2835 2837 timeout, result); 2836 2838 } ··· 4435 4431 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << 4436 4432 ARMCP_PKT_CTL_OPCODE_SHIFT); 4437 4433 4438 - rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size, 4439 - HL_DEVICE_TIMEOUT_USEC, &result); 4434 + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, 4435 + total_pkt_size, 0, &result); 4440 4436 4441 4437 if (rc) 4442 4438 dev_err(hdev->dev, "failed to unmask IRQ array\n"); ··· 4468 4464 ARMCP_PKT_CTL_OPCODE_SHIFT); 4469 4465 pkt.value = cpu_to_le64(event_type); 4470 4466 4471 - rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 4472 - HL_DEVICE_TIMEOUT_USEC, &result); 4467 + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 4468 + 0, &result); 4473 4469 4474 4470 if (rc) 4475 4471 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); ··· 5032 5028 return 0; 5033 5029 } 5034 5030 5035 - static void goya_enable_clock_gating(struct hl_device *hdev) 5031 + static void goya_set_clock_gating(struct hl_device *hdev) 5036 5032 { 5037 - 5033 + /* clock gating not supported in Goya */ 5038 5034 } 5039 5035 5040 5036 static void goya_disable_clock_gating(struct hl_device *hdev) 5041 5037 { 5042 - 5038 + /* clock gating not supported in Goya */ 5043 5039 } 5044 5040 5045 5041 static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask, ··· 5263 5259 .mmu_invalidate_cache = goya_mmu_invalidate_cache, 5264 5260 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, 5265 5261 .send_heartbeat = goya_send_heartbeat, 5266 - .enable_clock_gating = goya_enable_clock_gating, 5262 + .set_clock_gating = goya_set_clock_gating, 5267 5263 .disable_clock_gating = goya_disable_clock_gating, 5268 5264 .debug_coresight = goya_debug_coresight, 5269 5265 .is_device_idle = goya_is_device_idle,
+13 -6
drivers/misc/habanalabs/habanalabs.h
··· 578 578 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with 579 579 * ASID-VA-size mask. 580 580 * @send_heartbeat: send is-alive packet to ArmCP and verify response. 581 - * @enable_clock_gating: enable clock gating for reducing power consumption. 582 - * @disable_clock_gating: disable clock for accessing registers on HBW. 581 + * @set_clock_gating: enable/disable clock gating per engine according to 582 + * clock gating mask in hdev 583 + * @disable_clock_gating: disable clock gating completely 583 584 * @debug_coresight: perform certain actions on Coresight for debugging. 584 585 * @is_device_idle: return true if device is idle, false otherwise. 585 586 * @soft_reset_late_init: perform certain actions needed after soft reset. ··· 588 587 * @hw_queues_unlock: release H/W queues lock. 589 588 * @get_pci_id: retrieve PCI ID. 590 589 * @get_eeprom_data: retrieve EEPROM data from F/W. 591 - * @send_cpu_message: send buffer to ArmCP. 590 + * @send_cpu_message: send message to F/W. If the message is timedout, the 591 + * driver will eventually reset the device. The timeout can 592 + * be determined by the calling function or it can be 0 and 593 + * then the timeout is the default timeout for the specific 594 + * ASIC 592 595 * @get_hw_state: retrieve the H/W state 593 596 * @pci_bars_map: Map PCI BARs. 594 597 * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns ··· 685 680 int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, 686 681 u32 asid, u64 va, u64 size); 687 682 int (*send_heartbeat)(struct hl_device *hdev); 688 - void (*enable_clock_gating)(struct hl_device *hdev); 683 + void (*set_clock_gating)(struct hl_device *hdev); 689 684 void (*disable_clock_gating)(struct hl_device *hdev); 690 685 int (*debug_coresight)(struct hl_device *hdev, void *data); 691 686 bool (*is_device_idle)(struct hl_device *hdev, u32 *mask, ··· 1403 1398 * @max_power: the max power of the device, as configured by the sysadmin. This 1404 1399 * value is saved so in case of hard-reset, the driver will restore 1405 1400 * this value and update the F/W after the re-initialization 1401 + * @clock_gating_mask: is clock gating enabled. bitmask that represents the 1402 + * different engines. See debugfs-driver-habanalabs for 1403 + * details. 1406 1404 * @in_reset: is device in reset flow. 1407 1405 * @curr_pll_profile: current PLL profile. 1408 1406 * @cs_active_cnt: number of active command submissions on this device (active ··· 1433 1425 * @init_done: is the initialization of the device done. 1434 1426 * @mmu_enable: is MMU enabled. 1435 1427 * @mmu_huge_page_opt: is MMU huge pages optimization enabled. 1436 - * @clock_gating: is clock gating enabled. 1437 1428 * @device_cpu_disabled: is the device CPU disabled (due to timeouts) 1438 1429 * @dma_mask: the dma mask that was set for this device 1439 1430 * @in_debug: is device under debug. This, together with fpriv_list, enforces ··· 1500 1493 atomic64_t dram_used_mem; 1501 1494 u64 timeout_jiffies; 1502 1495 u64 max_power; 1496 + u64 clock_gating_mask; 1503 1497 atomic_t in_reset; 1504 1498 enum hl_pll_frequency curr_pll_profile; 1505 1499 int cs_active_cnt; ··· 1522 1514 u8 dram_default_page_mapping; 1523 1515 u8 pmmu_huge_range; 1524 1516 u8 init_done; 1525 - u8 clock_gating; 1526 1517 u8 device_cpu_disabled; 1527 1518 u8 dma_mask; 1528 1519 u8 in_debug;
+1 -1
drivers/misc/habanalabs/habanalabs_drv.c
··· 232 232 hdev->fw_loading = 1; 233 233 hdev->cpu_queues_enable = 1; 234 234 hdev->heartbeat = 1; 235 - hdev->clock_gating = 1; 235 + hdev->clock_gating_mask = ULONG_MAX; 236 236 237 237 hdev->reset_pcilink = 0; 238 238 hdev->axi_drain = 0;
+9 -10
drivers/misc/habanalabs/hwmon.c
··· 10 10 #include <linux/pci.h> 11 11 #include <linux/hwmon.h> 12 12 13 - #define SENSORS_PKT_TIMEOUT 1000000 /* 1s */ 14 13 #define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1) 15 14 16 15 int hl_build_hwmon_channel_info(struct hl_device *hdev, ··· 322 323 pkt.type = __cpu_to_le16(attr); 323 324 324 325 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 325 - SENSORS_PKT_TIMEOUT, value); 326 + 0, value); 326 327 327 328 if (rc) { 328 329 dev_err(hdev->dev, ··· 349 350 pkt.value = __cpu_to_le64(value); 350 351 351 352 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 352 - SENSORS_PKT_TIMEOUT, NULL); 353 + 0, NULL); 353 354 354 355 if (rc) 355 356 dev_err(hdev->dev, ··· 373 374 pkt.type = __cpu_to_le16(attr); 374 375 375 376 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 376 - SENSORS_PKT_TIMEOUT, value); 377 + 0, value); 377 378 378 379 if (rc) { 379 380 dev_err(hdev->dev, ··· 399 400 pkt.type = __cpu_to_le16(attr); 400 401 401 402 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 402 - SENSORS_PKT_TIMEOUT, value); 403 + 0, value); 403 404 404 405 if (rc) { 405 406 dev_err(hdev->dev, ··· 425 426 pkt.type = __cpu_to_le16(attr); 426 427 427 428 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 428 - SENSORS_PKT_TIMEOUT, value); 429 + 0, value); 429 430 430 431 if (rc) { 431 432 dev_err(hdev->dev, ··· 451 452 pkt.type = __cpu_to_le16(attr); 452 453 453 454 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 454 - SENSORS_PKT_TIMEOUT, value); 455 + 0, value); 455 456 456 457 if (rc) { 457 458 dev_err(hdev->dev, ··· 478 479 pkt.value = cpu_to_le64(value); 479 480 480 481 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 481 - SENSORS_PKT_TIMEOUT, NULL); 482 + 0, NULL); 482 483 483 484 if (rc) 484 485 dev_err(hdev->dev, ··· 501 502 pkt.value = __cpu_to_le64(value); 502 503 503 504 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 504 - SENSORS_PKT_TIMEOUT, NULL); 505 + 0, NULL); 505 506 506 507 if (rc) 507 508 dev_err(hdev->dev, ··· 526 527 pkt.value = __cpu_to_le64(value); 527 528 528 529 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 529 - SENSORS_PKT_TIMEOUT, NULL); 530 + 0, NULL); 530 531 531 532 if (rc) 532 533 dev_err(hdev->dev,
+4 -7
drivers/misc/habanalabs/sysfs.c
··· 9 9 10 10 #include <linux/pci.h> 11 11 12 - #define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */ 13 - #define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */ 14 - 15 12 long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) 16 13 { 17 14 struct armcp_packet pkt; ··· 26 29 pkt.pll_index = cpu_to_le32(pll_index); 27 30 28 31 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 29 - SET_CLK_PKT_TIMEOUT, &result); 32 + 0, &result); 30 33 31 34 if (rc) { 32 35 dev_err(hdev->dev, ··· 51 54 pkt.value = cpu_to_le64(freq); 52 55 53 56 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 54 - SET_CLK_PKT_TIMEOUT, NULL); 57 + 0, NULL); 55 58 56 59 if (rc) 57 60 dev_err(hdev->dev, ··· 71 74 ARMCP_PKT_CTL_OPCODE_SHIFT); 72 75 73 76 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 74 - SET_PWR_PKT_TIMEOUT, &result); 77 + 0, &result); 75 78 76 79 if (rc) { 77 80 dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); ··· 93 96 pkt.value = cpu_to_le64(value); 94 97 95 98 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 96 - SET_PWR_PKT_TIMEOUT, NULL); 99 + 0, NULL); 97 100 98 101 if (rc) 99 102 dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
+1 -2
drivers/misc/mei/bus.c
··· 745 745 746 746 mei_cl_bus_module_put(cldev); 747 747 module_put(THIS_MODULE); 748 - dev->driver = NULL; 749 - return ret; 750 748 749 + return ret; 751 750 } 752 751 753 752 static ssize_t name_show(struct device *dev, struct device_attribute *a,
+4 -2
drivers/mmc/host/meson-gx-mmc.c
··· 1146 1146 1147 1147 mmc->caps |= MMC_CAP_CMD23; 1148 1148 if (host->dram_access_quirk) { 1149 + /* Limit segments to 1 due to low available sram memory */ 1150 + mmc->max_segs = 1; 1149 1151 /* Limit to the available sram memory */ 1150 - mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size; 1151 - mmc->max_blk_count = mmc->max_segs; 1152 + mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN / 1153 + mmc->max_blk_size; 1152 1154 } else { 1153 1155 mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 1154 1156 mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
+1 -1
drivers/mmc/host/owl-mmc.c
··· 689 689 static struct platform_driver owl_mmc_driver = { 690 690 .driver = { 691 691 .name = "owl_mmc", 692 - .of_match_table = of_match_ptr(owl_mmc_of_match), 692 + .of_match_table = owl_mmc_of_match, 693 693 }, 694 694 .probe = owl_mmc_probe, 695 695 .remove = owl_mmc_remove,
+3 -2
drivers/mmc/host/sdhci-msm.c
··· 618 618 config &= ~CORE_CLK_PWRSAVE; 619 619 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); 620 620 621 - config = msm_host->dll_config; 622 - writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); 621 + if (msm_host->dll_config) 622 + writel_relaxed(msm_host->dll_config, 623 + host->ioaddr + msm_offset->core_dll_config); 623 624 624 625 if (msm_host->use_14lpp_dll_reset) { 625 626 config = readl_relaxed(host->ioaddr +
+1 -1
drivers/mmc/host/sdhci-of-aspeed.c
··· 68 68 if (WARN_ON(clock > host->max_clk)) 69 69 clock = host->max_clk; 70 70 71 - for (div = 1; div < 256; div *= 2) { 71 + for (div = 2; div < 256; div *= 2) { 72 72 if ((parent / div) <= clock) 73 73 break; 74 74 }
+2 -2
drivers/mtd/mtdcore.c
··· 1273 1273 return -EROFS; 1274 1274 if (!len) 1275 1275 return 0; 1276 - if (!mtd->oops_panic_write) 1277 - mtd->oops_panic_write = true; 1276 + if (!master->oops_panic_write) 1277 + master->oops_panic_write = true; 1278 1278 1279 1279 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, 1280 1280 retlen, buf);
+1 -1
drivers/mtd/nand/raw/nandsim.c
··· 1761 1761 1762 1762 NS_DBG("switch_state: operation is unknown, try to find it\n"); 1763 1763 1764 - if (!ns_find_operation(ns, 0)) 1764 + if (ns_find_operation(ns, 0)) 1765 1765 return; 1766 1766 1767 1767 if ((ns->state & ACTION_MASK) &&
+1 -1
drivers/mtd/nand/raw/xway_nand.c
··· 224 224 struct nand_chip *chip = &data->chip; 225 225 int ret; 226 226 227 - ret = mtd_device_unregister(mtd); 227 + ret = mtd_device_unregister(nand_to_mtd(chip)); 228 228 WARN_ON(ret); 229 229 nand_cleanup(chip); 230 230
+7 -3
drivers/net/bonding/bond_main.c
··· 5053 5053 bond_dev->rtnl_link_ops = &bond_link_ops; 5054 5054 5055 5055 res = register_netdevice(bond_dev); 5056 + if (res < 0) { 5057 + free_netdev(bond_dev); 5058 + rtnl_unlock(); 5059 + 5060 + return res; 5061 + } 5056 5062 5057 5063 netif_carrier_off(bond_dev); 5058 5064 5059 5065 bond_work_init_all(bond); 5060 5066 5061 5067 rtnl_unlock(); 5062 - if (res < 0) 5063 - free_netdev(bond_dev); 5064 - return res; 5068 + return 0; 5065 5069 } 5066 5070 5067 5071 static int __net_init bond_net_init(struct net *net)
+1 -2
drivers/net/bonding/bond_netlink.c
··· 456 456 return err; 457 457 458 458 err = register_netdevice(bond_dev); 459 - 460 - netif_carrier_off(bond_dev); 461 459 if (!err) { 462 460 struct bonding *bond = netdev_priv(bond_dev); 463 461 462 + netif_carrier_off(bond_dev); 464 463 bond_work_init_all(bond); 465 464 } 466 465
+3
drivers/net/dsa/microchip/ksz8795.c
··· 1268 1268 return -ENOMEM; 1269 1269 } 1270 1270 1271 + /* set the real number of ports */ 1272 + dev->ds->num_ports = dev->port_cnt; 1273 + 1271 1274 return 0; 1272 1275 } 1273 1276
+26 -19
drivers/net/dsa/microchip/ksz9477.c
··· 974 974 PORT_MIRROR_SNIFFER, false); 975 975 } 976 976 977 - static void ksz9477_phy_setup(struct ksz_device *dev, int port, 978 - struct phy_device *phy) 979 - { 980 - /* Only apply to port with PHY. */ 981 - if (port >= dev->phy_port_cnt) 982 - return; 983 - 984 - /* The MAC actually cannot run in 1000 half-duplex mode. */ 985 - phy_remove_link_mode(phy, 986 - ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 987 - 988 - /* PHY does not support gigabit. */ 989 - if (!(dev->features & GBIT_SUPPORT)) 990 - phy_remove_link_mode(phy, 991 - ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 992 - } 993 - 994 977 static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data) 995 978 { 996 979 bool gbit; ··· 1571 1588 return -ENOMEM; 1572 1589 } 1573 1590 1591 + /* set the real number of ports */ 1592 + dev->ds->num_ports = dev->port_cnt; 1593 + 1574 1594 return 0; 1575 1595 } 1576 1596 ··· 1586 1600 .get_port_addr = ksz9477_get_port_addr, 1587 1601 .cfg_port_member = ksz9477_cfg_port_member, 1588 1602 .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, 1589 - .phy_setup = ksz9477_phy_setup, 1590 1603 .port_setup = ksz9477_port_setup, 1591 1604 .r_mib_cnt = ksz9477_r_mib_cnt, 1592 1605 .r_mib_pkt = ksz9477_r_mib_pkt, ··· 1599 1614 1600 1615 int ksz9477_switch_register(struct ksz_device *dev) 1601 1616 { 1602 - return ksz_switch_register(dev, &ksz9477_dev_ops); 1617 + int ret, i; 1618 + struct phy_device *phydev; 1619 + 1620 + ret = ksz_switch_register(dev, &ksz9477_dev_ops); 1621 + if (ret) 1622 + return ret; 1623 + 1624 + for (i = 0; i < dev->phy_port_cnt; ++i) { 1625 + if (!dsa_is_user_port(dev->ds, i)) 1626 + continue; 1627 + 1628 + phydev = dsa_to_port(dev->ds, i)->slave->phydev; 1629 + 1630 + /* The MAC actually cannot run in 1000 half-duplex mode. */ 1631 + phy_remove_link_mode(phydev, 1632 + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1633 + 1634 + /* PHY does not support gigabit. */ 1635 + if (!(dev->features & GBIT_SUPPORT)) 1636 + phy_remove_link_mode(phydev, 1637 + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1638 + } 1639 + return ret; 1603 1640 } 1604 1641 EXPORT_SYMBOL(ksz9477_switch_register); 1605 1642
+1
drivers/net/dsa/microchip/ksz9477_i2c.c
··· 79 79 static const struct of_device_id ksz9477_dt_ids[] = { 80 80 { .compatible = "microchip,ksz9477" }, 81 81 { .compatible = "microchip,ksz9897" }, 82 + { .compatible = "microchip,ksz9893" }, 82 83 { .compatible = "microchip,ksz9567" }, 83 84 {}, 84 85 };
-2
drivers/net/dsa/microchip/ksz_common.c
··· 358 358 359 359 /* setup slave port */ 360 360 dev->dev_ops->port_setup(dev, port, false); 361 - if (dev->dev_ops->phy_setup) 362 - dev->dev_ops->phy_setup(dev, port, phy); 363 361 364 362 /* port_stp_state_set() will be called after to enable the port so 365 363 * there is no need to do anything.
-2
drivers/net/dsa/microchip/ksz_common.h
··· 119 119 u32 (*get_port_addr)(int port, int offset); 120 120 void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); 121 121 void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); 122 - void (*phy_setup)(struct ksz_device *dev, int port, 123 - struct phy_device *phy); 124 122 void (*port_cleanup)(struct ksz_device *dev, int port); 125 123 void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); 126 124 void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+19 -3
drivers/net/dsa/mv88e6xxx/chip.c
··· 664 664 const struct phylink_link_state *state) 665 665 { 666 666 struct mv88e6xxx_chip *chip = ds->priv; 667 + struct mv88e6xxx_port *p; 667 668 int err; 669 + 670 + p = &chip->ports[port]; 668 671 669 672 /* FIXME: is this the correct test? If we're in fixed mode on an 670 673 * internal port, why should we process this any different from ··· 678 675 return; 679 676 680 677 mv88e6xxx_reg_lock(chip); 681 - /* FIXME: should we force the link down here - but if we do, how 682 - * do we restore the link force/unforce state? The driver layering 683 - * gets in the way. 678 + /* In inband mode, the link may come up at any time while the link 679 + * is not forced down. Force the link down while we reconfigure the 680 + * interface mode. 684 681 */ 682 + if (mode == MLO_AN_INBAND && p->interface != state->interface && 683 + chip->info->ops->port_set_link) 684 + chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); 685 + 685 686 err = mv88e6xxx_port_config_interface(chip, port, state->interface); 686 687 if (err && err != -EOPNOTSUPP) 687 688 goto err_unlock; ··· 697 690 */ 698 691 if (err > 0) 699 692 err = 0; 693 + 694 + /* Undo the forced down state above after completing configuration 695 + * irrespective of its state on entry, which allows the link to come up. 696 + */ 697 + if (mode == MLO_AN_INBAND && p->interface != state->interface && 698 + chip->info->ops->port_set_link) 699 + chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); 700 + 701 + p->interface = state->interface; 700 702 701 703 err_unlock: 702 704 mv88e6xxx_reg_unlock(chip);
+1
drivers/net/dsa/mv88e6xxx/chip.h
··· 232 232 u64 atu_full_violation; 233 233 u64 vtu_member_violation; 234 234 u64 vtu_miss_violation; 235 + phy_interface_t interface; 235 236 u8 cmode; 236 237 bool mirror_ingress; 237 238 bool mirror_egress;
+1
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
··· 64 64 u8 rx_rings; 65 65 bool flow_control; 66 66 bool is_64_dma; 67 + u32 quirks; 67 68 u32 priv_data_len; 68 69 }; 69 70
+9
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
··· 415 415 self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) { 416 416 self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX; 417 417 err = aq_phy_init(self->aq_hw); 418 + 419 + /* Disable the PTP on NICs where it's known to cause datapath 420 + * problems. 421 + * Ideally this should have been done by PHY provisioning, but 422 + * many units have been shipped with enabled PTP block already. 423 + */ 424 + if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP) 425 + if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX) 426 + aq_phy_disable_ptp(self->aq_hw); 418 427 } 419 428 420 429 for (i = 0U; i < self->aq_vecs; i++) {
+2
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
··· 81 81 #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U 82 82 #define AQ_NIC_FLAG_ERR_HW 0x80000000U 83 83 84 + #define AQ_NIC_QUIRK_BAD_PTP BIT(0) 85 + 84 86 #define AQ_NIC_WOL_MODES (WAKE_MAGIC |\ 85 87 WAKE_PHY) 86 88
+27 -2
drivers/net/ethernet/aquantia/atlantic/aq_phy.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - /* aQuantia Corporation Network Driver 3 - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved 2 + /* Atlantic Network Driver 3 + * 4 + * Copyright (C) 2018-2019 aQuantia Corporation 5 + * Copyright (C) 2019-2020 Marvell International Ltd. 4 6 */ 5 7 6 8 #include "aq_phy.h" 9 + 10 + #define HW_ATL_PTP_DISABLE_MSK BIT(10) 7 11 8 12 bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw) 9 13 { ··· 148 144 } 149 145 150 146 return true; 147 + } 148 + 149 + void aq_phy_disable_ptp(struct aq_hw_s *aq_hw) 150 + { 151 + static const u16 ptp_registers[] = { 152 + 0x031e, 153 + 0x031d, 154 + 0x031c, 155 + 0x031b, 156 + }; 157 + u16 val; 158 + int i; 159 + 160 + for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) { 161 + val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1, 162 + ptp_registers[i]); 163 + 164 + aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1, 165 + ptp_registers[i], 166 + val & ~HW_ATL_PTP_DISABLE_MSK); 167 + } 151 168 }
+6 -2
drivers/net/ethernet/aquantia/atlantic/aq_phy.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* aQuantia Corporation Network Driver 3 - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved 2 + /* Atlantic Network Driver 3 + * 4 + * Copyright (C) 2018-2019 aQuantia Corporation 5 + * Copyright (C) 2019-2020 Marvell International Ltd. 4 6 */ 5 7 6 8 #ifndef AQ_PHY_H ··· 30 28 bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw); 31 29 32 30 bool aq_phy_init(struct aq_hw_s *aq_hw); 31 + 32 + void aq_phy_disable_ptp(struct aq_hw_s *aq_hw); 33 33 34 34 #endif /* AQ_PHY_H */
+25 -1
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
··· 93 93 AQ_NIC_RATE_100M, 94 94 }; 95 95 96 + const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = { 97 + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 98 + .media_type = AQ_HW_MEDIA_TYPE_TP, 99 + .link_speed_msk = AQ_NIC_RATE_5G | 100 + AQ_NIC_RATE_2G5 | 101 + AQ_NIC_RATE_1G | 102 + AQ_NIC_RATE_100M, 103 + .quirks = AQ_NIC_QUIRK_BAD_PTP, 104 + }; 105 + 106 + const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = { 107 + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 108 + .media_type = AQ_HW_MEDIA_TYPE_TP, 109 + .link_speed_msk = AQ_NIC_RATE_2G5 | 110 + AQ_NIC_RATE_1G | 111 + AQ_NIC_RATE_100M, 112 + .quirks = AQ_NIC_QUIRK_BAD_PTP, 113 + }; 114 + 96 115 static int hw_atl_b0_hw_reset(struct aq_hw_s *self) 97 116 { 98 117 int err = 0; ··· 373 354 374 355 /* WSP, if min_rate is set for at least one TC. 375 356 * RR otherwise. 357 + * 358 + * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't 359 + * overwrite it here in that case. 376 360 */ 377 - hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); 361 + if (!nic_cfg->is_ptp) 362 + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); 363 + 378 364 /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, 379 365 * leave Descriptor TC Arbiter as RR. 380 366 */
+4 -6
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
··· 18 18 extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107; 19 19 extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108; 20 20 extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109; 21 - 22 - #define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108 23 - #define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109 21 + extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111; 22 + extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112; 24 23 25 24 #define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100 26 25 #define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107 27 26 #define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108 28 27 #define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109 29 - 30 - #define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108 31 - #define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109 28 + #define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111 29 + #define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112 32 30 33 31 extern const struct aq_hw_ops hw_atl_ops_b0; 34 32
+2 -2
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
··· 1700 1700 for (i = 0; i < 4; ++i) 1701 1701 aq_hw_write_reg(aq_hw, 1702 1702 HW_ATL_RPF_L3_SRCA_ADR(location + i), 1703 - ipv6_src[i]); 1703 + ipv6_src[3 - i]); 1704 1704 } 1705 1705 1706 1706 void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, ··· 1711 1711 for (i = 0; i < 4; ++i) 1712 1712 aq_hw_write_reg(aq_hw, 1713 1713 HW_ATL_RPF_L3_DSTA_ADR(location + i), 1714 - ipv6_dest[i]); 1714 + ipv6_dest[3 - i]); 1715 1715 } 1716 1716 1717 1717 u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
+1 -1
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
··· 1360 1360 */ 1361 1361 1362 1362 /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */ 1363 - #define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4) 1363 + #define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4) 1364 1364 /* Bitmask for bitfield l3_da0[1F:0] */ 1365 1365 #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu 1366 1366 /* Inverted bitmask for bitfield l3_da0[1F:0] */
+2 -1
drivers/net/ethernet/atheros/ag71xx.c
··· 556 556 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); 557 557 if (IS_ERR(ag->mdio_reset)) { 558 558 netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); 559 - return PTR_ERR(ag->mdio_reset); 559 + err = PTR_ERR(ag->mdio_reset); 560 + goto mdio_err_put_clk; 560 561 } 561 562 562 563 mii_bus->name = "ag71xx_mdio";
+15 -7
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3418 3418 */ 3419 3419 void bnxt_set_ring_params(struct bnxt *bp) 3420 3420 { 3421 - u32 ring_size, rx_size, rx_space; 3421 + u32 ring_size, rx_size, rx_space, max_rx_cmpl; 3422 3422 u32 agg_factor = 0, agg_ring_size = 0; 3423 3423 3424 3424 /* 8 for CRC and VLAN */ ··· 3474 3474 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3475 3475 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3476 3476 3477 - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 3477 + max_rx_cmpl = bp->rx_ring_size; 3478 + /* MAX TPA needs to be added because TPA_START completions are 3479 + * immediately recycled, so the TPA completions are not bound by 3480 + * the RX ring size. 3481 + */ 3482 + if (bp->flags & BNXT_FLAG_TPA) 3483 + max_rx_cmpl += bp->max_tpa; 3484 + /* RX and TPA completions are 32-byte, all others are 16-byte */ 3485 + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 3478 3486 bp->cp_ring_size = ring_size; 3479 3487 3480 3488 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); ··· 10393 10385 &bp->sp_event)) 10394 10386 bnxt_hwrm_phy_qcaps(bp); 10395 10387 10396 - if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 10397 - &bp->sp_event)) 10398 - bnxt_init_ethtool_link_settings(bp); 10399 - 10400 10388 rc = bnxt_update_link(bp, true); 10401 - mutex_unlock(&bp->link_lock); 10402 10389 if (rc) 10403 10390 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 10404 10391 rc); 10392 + 10393 + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 10394 + &bp->sp_event)) 10395 + bnxt_init_ethtool_link_settings(bp); 10396 + mutex_unlock(&bp->link_lock); 10405 10397 } 10406 10398 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 10407 10399 int rc;
+4 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 1765 1765 if (epause->tx_pause) 1766 1766 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 1767 1767 1768 - if (netif_running(dev)) 1768 + if (netif_running(dev)) { 1769 + mutex_lock(&bp->link_lock); 1769 1770 rc = bnxt_hwrm_set_pause(bp); 1771 + mutex_unlock(&bp->link_lock); 1772 + } 1770 1773 return rc; 1771 1774 } 1772 1775
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
··· 396 396 } 397 397 } 398 398 399 + bp->pf.active_vfs = 0; 399 400 kfree(bp->pf.vf); 400 401 bp->pf.vf = NULL; 401 402 } ··· 836 835 837 836 bnxt_free_vf_resources(bp); 838 837 839 - bp->pf.active_vfs = 0; 840 838 /* Reclaim all resources for the PF. */ 841 839 rtnl_lock(); 842 840 bnxt_restore_pf_fw_resources(bp);
+65 -79
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 543 543 #define VALIDATE_MASK(x) \ 544 544 bcmgenet_hfb_validate_mask(&(x), sizeof(x)) 545 545 546 - static int bcmgenet_hfb_insert_data(u32 *f, int offset, 547 - void *val, void *mask, size_t size) 546 + static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, 547 + u32 offset, void *val, void *mask, 548 + size_t size) 548 549 { 549 - int index; 550 - u32 tmp; 550 + u32 index, tmp; 551 551 552 - index = offset / 2; 553 - tmp = f[index]; 552 + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; 553 + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); 554 554 555 555 while (size--) { 556 556 if (offset++ & 1) { ··· 567 567 tmp |= 0x10000; 568 568 break; 569 569 } 570 - f[index++] = tmp; 570 + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); 571 571 if (size) 572 - tmp = f[index]; 572 + tmp = bcmgenet_hfb_readl(priv, 573 + index * sizeof(u32)); 573 574 } else { 574 575 tmp &= ~0xCFF00; 575 576 tmp |= (*(unsigned char *)val++) << 8; ··· 586 585 break; 587 586 } 588 587 if (!size) 589 - f[index] = tmp; 588 + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); 590 589 } 591 590 } 592 591 593 592 return 0; 594 593 } 595 594 596 - static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data, 597 - u32 f_length, u32 rx_queue, int f_index) 598 - { 599 - u32 base = f_index * priv->hw_params->hfb_filter_size; 600 - int i; 601 - 602 - for (i = 0; i < f_length; i++) 603 - bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32)); 604 - 605 - bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); 606 - bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); 607 - } 608 - 609 - static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, 610 - struct bcmgenet_rxnfc_rule *rule) 595 + static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, 596 + struct bcmgenet_rxnfc_rule *rule) 611 597 { 612 598 struct ethtool_rx_flow_spec *fs = &rule->fs; 613 - int err = 0, offset = 0, f_length = 0; 599 + u32 offset = 0, f_length = 0, f; 614 600 u8 val_8, mask_8; 615 601 __be16 val_16; 616 602 u16 mask_16; 617 603 size_t size; 618 - u32 *f_data; 619 604 620 - f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32), 621 - GFP_KERNEL); 622 - if (!f_data) 623 - return -ENOMEM; 624 - 605 + f = fs->location; 625 606 if (fs->flow_type & FLOW_MAC_EXT) { 626 - bcmgenet_hfb_insert_data(f_data, 0, 607 + bcmgenet_hfb_insert_data(priv, f, 0, 627 608 &fs->h_ext.h_dest, &fs->m_ext.h_dest, 628 609 sizeof(fs->h_ext.h_dest)); 629 610 } ··· 613 630 if (fs->flow_type & FLOW_EXT) { 614 631 if (fs->m_ext.vlan_etype || 615 632 fs->m_ext.vlan_tci) { 616 - bcmgenet_hfb_insert_data(f_data, 12, 633 + bcmgenet_hfb_insert_data(priv, f, 12, 617 634 &fs->h_ext.vlan_etype, 618 635 &fs->m_ext.vlan_etype, 619 636 sizeof(fs->h_ext.vlan_etype)); 620 - bcmgenet_hfb_insert_data(f_data, 14, 637 + bcmgenet_hfb_insert_data(priv, f, 14, 621 638 &fs->h_ext.vlan_tci, 622 639 &fs->m_ext.vlan_tci, 623 640 sizeof(fs->h_ext.vlan_tci)); ··· 629 646 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 630 647 case ETHER_FLOW: 631 648 f_length += DIV_ROUND_UP(ETH_HLEN, 2); 632 - bcmgenet_hfb_insert_data(f_data, 0, 649 + bcmgenet_hfb_insert_data(priv, f, 0, 633 650 &fs->h_u.ether_spec.h_dest, 634 651 &fs->m_u.ether_spec.h_dest, 635 652 sizeof(fs->h_u.ether_spec.h_dest)); 636 - bcmgenet_hfb_insert_data(f_data, ETH_ALEN, 653 + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, 637 654 &fs->h_u.ether_spec.h_source, 638 655 &fs->m_u.ether_spec.h_source, 639 656 sizeof(fs->h_u.ether_spec.h_source)); 640 - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, 657 + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, 641 658 &fs->h_u.ether_spec.h_proto, 642 659 &fs->m_u.ether_spec.h_proto, 643 660 sizeof(fs->h_u.ether_spec.h_proto)); ··· 647 664 /* Specify IP Ether Type */ 648 665 val_16 = htons(ETH_P_IP); 649 666 mask_16 = 0xFFFF; 650 - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, 667 + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, 651 668 &val_16, &mask_16, sizeof(val_16)); 652 - bcmgenet_hfb_insert_data(f_data, 15 + offset, 669 + bcmgenet_hfb_insert_data(priv, f, 15 + offset, 653 670 &fs->h_u.usr_ip4_spec.tos, 654 671 &fs->m_u.usr_ip4_spec.tos, 655 672 sizeof(fs->h_u.usr_ip4_spec.tos)); 656 - bcmgenet_hfb_insert_data(f_data, 23 + offset, 673 + bcmgenet_hfb_insert_data(priv, f, 23 + offset, 657 674 &fs->h_u.usr_ip4_spec.proto, 658 675 &fs->m_u.usr_ip4_spec.proto, 659 676 sizeof(fs->h_u.usr_ip4_spec.proto)); 660 - bcmgenet_hfb_insert_data(f_data, 26 + offset, 677 + bcmgenet_hfb_insert_data(priv, f, 26 + offset, 661 678 &fs->h_u.usr_ip4_spec.ip4src, 662 679 &fs->m_u.usr_ip4_spec.ip4src, 663 680 sizeof(fs->h_u.usr_ip4_spec.ip4src)); 664 - bcmgenet_hfb_insert_data(f_data, 30 + offset, 681 + bcmgenet_hfb_insert_data(priv, f, 30 + offset, 665 682 &fs->h_u.usr_ip4_spec.ip4dst, 666 683 &fs->m_u.usr_ip4_spec.ip4dst, 667 684 sizeof(fs->h_u.usr_ip4_spec.ip4dst)); ··· 671 688 /* Only supports 20 byte IPv4 header */ 672 689 val_8 = 0x45; 673 690 mask_8 = 0xFF; 674 - bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset, 691 + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, 675 692 &val_8, &mask_8, 676 693 sizeof(val_8)); 677 694 size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); 678 - bcmgenet_hfb_insert_data(f_data, 695 + bcmgenet_hfb_insert_data(priv, f, 679 696 ETH_HLEN + 20 + offset, 680 697 &fs->h_u.usr_ip4_spec.l4_4_bytes, 681 698 &fs->m_u.usr_ip4_spec.l4_4_bytes, ··· 684 701 break; 685 702 } 686 703 704 + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); 687 705 if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { 688 706 /* Ring 0 flows can be handled by the default Descriptor Ring 689 707 * We'll map them to ring 0, but don't enable the filter 690 708 */ 691 - bcmgenet_hfb_set_filter(priv, f_data, f_length, 0, 692 - fs->location); 709 + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); 693 710 rule->state = BCMGENET_RXNFC_STATE_DISABLED; 694 711 } else { 695 712 /* Other Rx rings are direct mapped here */ 696 - bcmgenet_hfb_set_filter(priv, f_data, f_length, 697 - fs->ring_cookie, fs->location); 698 - bcmgenet_hfb_enable_filter(priv, fs->location); 713 + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 714 + fs->ring_cookie); 715 + bcmgenet_hfb_enable_filter(priv, f); 699 716 rule->state = BCMGENET_RXNFC_STATE_ENABLED; 700 717 } 701 - 702 - kfree(f_data); 703 - 704 - return err; 705 718 } 706 719 707 720 /* bcmgenet_hfb_clear 708 721 * 709 722 * Clear Hardware Filter Block and disable all filtering. 710 723 */ 724 + static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) 725 + { 726 + u32 base, i; 727 + 728 + base = f_index * priv->hw_params->hfb_filter_size; 729 + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) 730 + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); 731 + } 732 + 711 733 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) 712 734 { 713 735 u32 i; 736 + 737 + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) 738 + return; 714 739 715 740 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); 716 741 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); ··· 731 740 bcmgenet_hfb_reg_writel(priv, 0x0, 732 741 HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); 733 742 734 - for (i = 0; i < priv->hw_params->hfb_filter_cnt * 735 - priv->hw_params->hfb_filter_size; i++) 736 - bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); 743 + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) 744 + bcmgenet_hfb_clear_filter(priv, i); 737 745 } 738 746 739 747 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) 740 748 { 741 749 int i; 742 750 751 + INIT_LIST_HEAD(&priv->rxnfc_list); 743 752 if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) 744 753 return; 745 754 746 - INIT_LIST_HEAD(&priv->rxnfc_list); 747 755 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { 748 756 INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); 749 757 priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; ··· 1427 1437 loc_rule = &priv->rxnfc_rules[cmd->fs.location]; 1428 1438 if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) 1429 1439 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); 1430 - if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) 1440 + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { 1431 1441 list_del(&loc_rule->list); 1442 + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); 1443 + } 1432 1444 loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; 1433 1445 memcpy(&loc_rule->fs, &cmd->fs, 1434 1446 sizeof(struct ethtool_rx_flow_spec)); 1435 1447 1436 - err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); 1437 - if (err) { 1438 - netdev_err(dev, "rxnfc: Could not install rule (%d)\n", 1439 - err); 1440 - return err; 1441 - } 1448 + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); 1442 1449 1443 1450 list_add_tail(&loc_rule->list, &priv->rxnfc_list); 1444 1451 ··· 1460 1473 1461 1474 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) 1462 1475 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); 1463 - if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) 1476 + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { 1464 1477 list_del(&rule->list); 1478 + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); 1479 + } 1465 1480 rule->state = BCMGENET_RXNFC_STATE_UNUSED; 1466 1481 memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); 1467 1482 ··· 3988 3999 if (err) 3989 4000 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3990 4001 if (err) 3991 - goto err; 4002 + goto err_clk_disable; 3992 4003 3993 4004 /* Mii wait queue */ 3994 4005 init_waitqueue_head(&priv->wq); ··· 4000 4011 if (IS_ERR(priv->clk_wol)) { 4001 4012 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); 4002 4013 err = PTR_ERR(priv->clk_wol); 4003 - goto err; 4014 + goto err_clk_disable; 4004 4015 } 4005 4016 4006 4017 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); 4007 4018 if (IS_ERR(priv->clk_eee)) { 4008 4019 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); 4009 4020 err = PTR_ERR(priv->clk_eee); 4010 - goto err; 4021 + goto err_clk_disable; 4011 4022 } 4012 4023 4013 4024 /* If this is an internal GPHY, power it on now, before UniMAC is ··· 4118 4129 { 4119 4130 struct net_device *dev = dev_get_drvdata(d); 4120 4131 struct bcmgenet_priv *priv = netdev_priv(dev); 4132 + struct bcmgenet_rxnfc_rule *rule; 4121 4133 unsigned long dma_ctrl; 4122 - u32 offset, reg; 4134 + u32 reg; 4123 4135 int ret; 4124 4136 4125 4137 if (!netif_running(dev)) ··· 4151 4161 4152 4162 bcmgenet_set_hw_addr(priv, dev->dev_addr); 4153 4163 4154 - offset = HFB_FLT_ENABLE_V3PLUS; 4155 - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset); 4156 - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32)); 4157 - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL); 4164 + /* Restore hardware filters */ 4165 + bcmgenet_hfb_clear(priv); 4166 + list_for_each_entry(rule, &priv->rxnfc_list, list) 4167 + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) 4168 + bcmgenet_hfb_create_rxnfc_filter(priv, rule); 4158 4169 4159 4170 if (priv->internal_phy) { 4160 4171 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); ··· 4199 4208 { 4200 4209 struct net_device *dev = dev_get_drvdata(d); 4201 4210 struct bcmgenet_priv *priv = netdev_priv(dev); 4202 - u32 offset; 4203 4211 4204 4212 if (!netif_running(dev)) 4205 4213 return 0; ··· 4210 4220 if (!device_may_wakeup(d)) 4211 4221 phy_suspend(dev->phydev); 4212 4222 4213 - /* Preserve filter state and disable filtering */ 4214 - priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); 4215 - offset = HFB_FLT_ENABLE_V3PLUS; 4216 - priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset); 4217 - priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); 4223 + /* Disable filtering */ 4218 4224 bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); 4219 4225 4220 4226 return 0;
-1
drivers/net/ethernet/broadcom/genet/bcmgenet.h
··· 696 696 u32 wolopts; 697 697 u8 sopass[SOPASS_MAX]; 698 698 bool wol_active; 699 - u32 hfb_en[3]; 700 699 701 700 struct bcmgenet_mib_counters mib; 702 701
+15 -7
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
··· 217 217 218 218 priv->wol_active = 0; 219 219 clk_disable_unprepare(priv->clk_wol); 220 + priv->crc_fwd_en = 0; 220 221 221 222 /* Disable Magic Packet Detection */ 222 - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); 223 - reg &= ~(MPD_EN | MPD_PW_EN); 224 - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); 223 + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { 224 + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); 225 + if (!(reg & MPD_EN)) 226 + return; /* already reset so skip the rest */ 227 + reg &= ~(MPD_EN | MPD_PW_EN); 228 + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); 229 + } 225 230 226 231 /* Disable WAKE_FILTER Detection */ 227 - reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); 228 - reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); 229 - bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); 232 + if (priv->wolopts & WAKE_FILTER) { 233 + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); 234 + if (!(reg & RBUF_ACPI_EN)) 235 + return; /* already reset so skip the rest */ 236 + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); 237 + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); 238 + } 230 239 231 240 /* Disable CRC Forward */ 232 241 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 233 242 reg &= ~CMD_CRC_FWD; 234 243 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 235 - priv->crc_fwd_en = 0; 236 244 }
+20 -13
drivers/net/ethernet/cadence/macb_main.c
··· 2821 2821 { 2822 2822 struct macb *bp = netdev_priv(netdev); 2823 2823 2824 - wol->supported = 0; 2825 - wol->wolopts = 0; 2826 - 2827 - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) 2824 + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2828 2825 phylink_ethtool_get_wol(bp->phylink, wol); 2826 + wol->supported |= WAKE_MAGIC; 2827 + 2828 + if (bp->wol & MACB_WOL_ENABLED) 2829 + wol->wolopts |= WAKE_MAGIC; 2830 + } 2829 2831 } 2830 2832 2831 2833 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ··· 2835 2833 struct macb *bp = netdev_priv(netdev); 2836 2834 int ret; 2837 2835 2836 + /* Pass the order to phylink layer */ 2838 2837 ret = phylink_ethtool_set_wol(bp->phylink, wol); 2839 - if (!ret) 2840 - return 0; 2838 + /* Don't manage WoL on MAC if handled by the PHY 2839 + * or if there's a failure in talking to the PHY 2840 + */ 2841 + if (!ret || ret != -EOPNOTSUPP) 2842 + return ret; 2841 2843 2842 2844 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2843 2845 (wol->wolopts & ~WAKE_MAGIC)) ··· 3736 3730 3737 3731 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3738 3732 val = 0; 3739 - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 3733 + if (phy_interface_mode_is_rgmii(bp->phy_interface)) 3740 3734 val = GEM_BIT(RGMII); 3741 3735 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3742 3736 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) ··· 4428 4422 bp->wol = 0; 4429 4423 if (of_get_property(np, "magic-packet", NULL)) 4430 4424 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 4431 - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4425 + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4432 4426 4433 4427 spin_lock_init(&bp->lock); 4434 4428 ··· 4604 4598 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); 4605 4599 } 4606 4600 4607 - netif_carrier_off(netdev); 4608 4601 if (bp->ptp_info) 4609 4602 bp->ptp_info->ptp_remove(netdev); 4610 - pm_runtime_force_suspend(dev); 4603 + if (!device_may_wakeup(dev)) 4604 + pm_runtime_force_suspend(dev); 4611 4605 4612 4606 return 0; 4613 4607 } ··· 4622 4616 if (!netif_running(netdev)) 4623 4617 return 0; 4624 4618 4625 - pm_runtime_force_resume(dev); 4619 + if (!device_may_wakeup(dev)) 4620 + pm_runtime_force_resume(dev); 4626 4621 4627 4622 if (bp->wol & MACB_WOL_ENABLED) { 4628 4623 macb_writel(bp, IDR, MACB_BIT(WOL)); ··· 4661 4654 struct net_device *netdev = dev_get_drvdata(dev); 4662 4655 struct macb *bp = netdev_priv(netdev); 4663 4656 4664 - if (!(device_may_wakeup(&bp->dev->dev))) { 4657 + if (!(device_may_wakeup(dev))) { 4665 4658 clk_disable_unprepare(bp->tx_clk); 4666 4659 clk_disable_unprepare(bp->hclk); 4667 4660 clk_disable_unprepare(bp->pclk); ··· 4677 4670 struct net_device *netdev = dev_get_drvdata(dev); 4678 4671 struct macb *bp = netdev_priv(netdev); 4679 4672 4680 - if (!(device_may_wakeup(&bp->dev->dev))) { 4673 + if (!(device_may_wakeup(dev))) { 4681 4674 clk_prepare_enable(bp->pclk); 4682 4675 clk_prepare_enable(bp->hclk); 4683 4676 clk_prepare_enable(bp->tx_clk);
+5 -5
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
··· 1112 1112 struct in_addr *addr; 1113 1113 1114 1114 addr = (struct in_addr *)ipmask; 1115 - if (ntohl(addr->s_addr) == 0xffffffff) 1115 + if (addr->s_addr == htonl(0xffffffff)) 1116 1116 return true; 1117 1117 } else if (family == AF_INET6) { 1118 1118 struct in6_addr *addr6; 1119 1119 1120 1120 addr6 = (struct in6_addr *)ipmask; 1121 - if (ntohl(addr6->s6_addr32[0]) == 0xffffffff && 1122 - ntohl(addr6->s6_addr32[1]) == 0xffffffff && 1123 - ntohl(addr6->s6_addr32[2]) == 0xffffffff && 1124 - ntohl(addr6->s6_addr32[3]) == 0xffffffff) 1121 + if (addr6->s6_addr32[0] == htonl(0xffffffff) && 1122 + addr6->s6_addr32[1] == htonl(0xffffffff) && 1123 + addr6->s6_addr32[2] == htonl(0xffffffff) && 1124 + addr6->s6_addr32[3] == htonl(0xffffffff)) 1125 1125 return true; 1126 1126 } 1127 1127 return false;
+1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2938 2938 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 2939 2939 if (unlikely(!txq_info)) { 2940 2940 WARN_ON(true); 2941 + kfree_skb(skb); 2941 2942 return NET_XMIT_DROP; 2942 2943 } 2943 2944
+4 -4
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 3493 3493 drv_fw = &fw_info->fw_hdr; 3494 3494 3495 3495 /* Read the header of the firmware on the card */ 3496 - ret = -t4_read_flash(adap, FLASH_FW_START, 3496 + ret = t4_read_flash(adap, FLASH_FW_START, 3497 3497 sizeof(*card_fw) / sizeof(uint32_t), 3498 3498 (uint32_t *)card_fw, 1); 3499 3499 if (ret == 0) { ··· 3522 3522 should_install_fs_fw(adap, card_fw_usable, 3523 3523 be32_to_cpu(fs_fw->fw_ver), 3524 3524 be32_to_cpu(card_fw->fw_ver))) { 3525 - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, 3526 - fw_size, 0); 3525 + ret = t4_fw_upgrade(adap, adap->mbox, fw_data, 3526 + fw_size, 0); 3527 3527 if (ret != 0) { 3528 3528 dev_err(adap->pdev_dev, 3529 3529 "failed to install firmware: %d\n", ret); ··· 3554 3554 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), 3555 3555 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 3556 3556 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 3557 - ret = EINVAL; 3557 + ret = -EINVAL; 3558 3558 goto bye; 3559 3559 } 3560 3560
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2938 2938 DMA_BIT_MASK(40)); 2939 2939 if (err) { 2940 2940 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); 2941 - return err; 2941 + goto free_netdev; 2942 2942 } 2943 2943 2944 2944 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+1 -1
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
··· 3632 3632 3633 3633 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); 3634 3634 dpmac_dev = fsl_mc_get_endpoint(dpni_dev); 3635 - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 3635 + if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 3636 3636 return 0; 3637 3637 3638 3638 if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+2 -2
drivers/net/ethernet/freescale/enetc/enetc.c
··· 266 266 /* disable interrupts */ 267 267 enetc_wr_reg(v->rbier, 0); 268 268 269 - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) 269 + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 270 270 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); 271 271 272 272 napi_schedule_irqoff(&v->napi); ··· 302 302 /* enable interrupts */ 303 303 enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); 304 304 305 - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) 305 + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 306 306 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 307 307 ENETC_TBIER_TXTIE); 308 308
+1
drivers/net/ethernet/freescale/enetc/enetc_pf.c
··· 906 906 return 0; 907 907 908 908 err_reg_netdev: 909 + enetc_mdio_remove(pf); 909 910 enetc_of_put_phy(priv); 910 911 enetc_free_msix(priv); 911 912 err_alloc_msix:
+1 -5
drivers/net/ethernet/freescale/fec.h
··· 525 525 unsigned int total_tx_ring_size; 526 526 unsigned int total_rx_ring_size; 527 527 528 - unsigned long work_tx; 529 - unsigned long work_rx; 530 - unsigned long work_ts; 531 - unsigned long work_mdio; 532 - 533 528 struct platform_device *pdev; 534 529 535 530 int dev_id; ··· 590 595 void fec_ptp_init(struct platform_device *pdev, int irq_idx); 591 596 void fec_ptp_stop(struct platform_device *pdev); 592 597 void fec_ptp_start_cyclecounter(struct net_device *ndev); 598 + void fec_ptp_disable_hwts(struct net_device *ndev); 593 599 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 594 600 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 595 601
+51 -72
drivers/net/ethernet/freescale/fec_main.c
··· 75 75 76 76 #define DRIVER_NAME "fec" 77 77 78 - #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) 79 - 80 78 /* Pause frame feild and FIFO threshold */ 81 79 #define FEC_ENET_FCE (1 << 5) 82 80 #define FEC_ENET_RSEM_V 0x84 ··· 1246 1248 1247 1249 fep = netdev_priv(ndev); 1248 1250 1249 - queue_id = FEC_ENET_GET_QUQUE(queue_id); 1250 - 1251 1251 txq = fep->tx_queue[queue_id]; 1252 1252 /* get next bdp of dirty_tx */ 1253 1253 nq = netdev_get_tx_queue(ndev, queue_id); ··· 1294 1298 ndev->stats.tx_bytes += skb->len; 1295 1299 } 1296 1300 1297 - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1298 - fep->bufdesc_ex) { 1301 + /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who 1302 + * are to time stamp the packet, so we still need to check time 1303 + * stamping enabled flag. 1304 + */ 1305 + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && 1306 + fep->hwts_tx_en) && 1307 + fep->bufdesc_ex) { 1299 1308 struct skb_shared_hwtstamps shhwtstamps; 1300 1309 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1301 1310 ··· 1341 1340 writel(0, txq->bd.reg_desc_active); 1342 1341 } 1343 1342 1344 - static void 1345 - fec_enet_tx(struct net_device *ndev) 1343 + static void fec_enet_tx(struct net_device *ndev) 1346 1344 { 1347 1345 struct fec_enet_private *fep = netdev_priv(ndev); 1348 - u16 queue_id; 1349 - /* First process class A queue, then Class B and Best Effort queue */ 1350 - for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { 1351 - clear_bit(queue_id, &fep->work_tx); 1352 - fec_enet_tx_queue(ndev, queue_id); 1353 - } 1354 - return; 1346 + int i; 1347 + 1348 + /* Make sure that AVB queues are processed first. */ 1349 + for (i = fep->num_tx_queues - 1; i >= 0; i--) 1350 + fec_enet_tx_queue(ndev, i); 1355 1351 } 1356 1352 1357 1353 static int ··· 1424 1426 #ifdef CONFIG_M532x 1425 1427 flush_cache_all(); 1426 1428 #endif 1427 - queue_id = FEC_ENET_GET_QUQUE(queue_id); 1428 1429 rxq = fep->rx_queue[queue_id]; 1429 1430 1430 1431 /* First, grab all of the stats for the incoming packet. ··· 1547 1550 htons(ETH_P_8021Q), 1548 1551 vlan_tag); 1549 1552 1553 + skb_record_rx_queue(skb, queue_id); 1550 1554 napi_gro_receive(&fep->napi, skb); 1551 1555 1552 1556 if (is_copybreak) { ··· 1593 1595 return pkt_received; 1594 1596 } 1595 1597 1596 - static int 1597 - fec_enet_rx(struct net_device *ndev, int budget) 1598 + static int fec_enet_rx(struct net_device *ndev, int budget) 1598 1599 { 1599 - int pkt_received = 0; 1600 - u16 queue_id; 1601 1600 struct fec_enet_private *fep = netdev_priv(ndev); 1601 + int i, done = 0; 1602 1602 1603 - for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1604 - int ret; 1603 + /* Make sure that AVB queues are processed first. */ 1604 + for (i = fep->num_rx_queues - 1; i >= 0; i--) 1605 + done += fec_enet_rx_queue(ndev, budget - done, i); 1605 1606 1606 - ret = fec_enet_rx_queue(ndev, 1607 - budget - pkt_received, queue_id); 1608 - 1609 - if (ret < budget - pkt_received) 1610 - clear_bit(queue_id, &fep->work_rx); 1611 - 1612 - pkt_received += ret; 1613 - } 1614 - return pkt_received; 1607 + return done; 1615 1608 } 1616 1609 1617 - static bool 1618 - fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) 1610 + static bool fec_enet_collect_events(struct fec_enet_private *fep) 1619 1611 { 1620 - if (int_events == 0) 1621 - return false; 1622 - 1623 - if (int_events & FEC_ENET_RXF_0) 1624 - fep->work_rx |= (1 << 2); 1625 - if (int_events & FEC_ENET_RXF_1) 1626 - fep->work_rx |= (1 << 0); 1627 - if (int_events & FEC_ENET_RXF_2) 1628 - fep->work_rx |= (1 << 1); 1629 - 1630 - if (int_events & FEC_ENET_TXF_0) 1631 - fep->work_tx |= (1 << 2); 1632 - if (int_events & FEC_ENET_TXF_1) 1633 - fep->work_tx |= (1 << 0); 1634 - if (int_events & FEC_ENET_TXF_2) 1635 - fep->work_tx |= (1 << 1); 1636 - 1637 - return true; 1638 - } 1639 - 1640 - static irqreturn_t 1641 - fec_enet_interrupt(int irq, void *dev_id) 1642 - { 1643 - struct net_device *ndev = dev_id; 1644 - struct fec_enet_private *fep = netdev_priv(ndev); 1645 1612 uint int_events; 1646 - irqreturn_t ret = IRQ_NONE; 1647 1613 1648 1614 int_events = readl(fep->hwp + FEC_IEVENT); 1649 1615 ··· 1615 1653 int_events &= ~FEC_ENET_MII; 1616 1654 1617 1655 writel(int_events, fep->hwp + FEC_IEVENT); 1618 - fec_enet_collect_events(fep, int_events); 1619 1656 1620 - if ((fep->work_tx || fep->work_rx) && fep->link) { 1657 + return int_events != 0; 1658 + } 1659 + 1660 + static irqreturn_t 1661 + fec_enet_interrupt(int irq, void *dev_id) 1662 + { 1663 + struct net_device *ndev = dev_id; 1664 + struct fec_enet_private *fep = netdev_priv(ndev); 1665 + irqreturn_t ret = IRQ_NONE; 1666 + 1667 + if (fec_enet_collect_events(fep) && fep->link) { 1621 1668 ret = IRQ_HANDLED; 1622 1669 1623 1670 if (napi_schedule_prep(&fep->napi)) { ··· 1643 1672 { 1644 1673 struct net_device *ndev = napi->dev; 1645 1674 struct fec_enet_private *fep = netdev_priv(ndev); 1646 - int pkts; 1675 + int done = 0; 1647 1676 1648 - pkts = fec_enet_rx(ndev, budget); 1677 + do { 1678 + done += fec_enet_rx(ndev, budget - done); 1679 + fec_enet_tx(ndev); 1680 + } while ((done < budget) && fec_enet_collect_events(fep)); 1649 1681 1650 - fec_enet_tx(ndev); 1651 - 1652 - if (pkts < budget) { 1653 - napi_complete_done(napi, pkts); 1682 + if (done < budget) { 1683 + napi_complete_done(napi, done); 1654 1684 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1655 1685 } 1656 - return pkts; 1686 + 1687 + return done; 1657 1688 } 1658 1689 1659 1690 /* ------------------------------------------------------------------------- */ ··· 2728 2755 return -ENODEV; 2729 2756 2730 2757 if (fep->bufdesc_ex) { 2731 - if (cmd == SIOCSHWTSTAMP) 2732 - return fec_ptp_set(ndev, rq); 2733 - if (cmd == SIOCGHWTSTAMP) 2734 - return fec_ptp_get(ndev, rq); 2758 + bool use_fec_hwts = !phy_has_hwtstamp(phydev); 2759 + 2760 + if (cmd == SIOCSHWTSTAMP) { 2761 + if (use_fec_hwts) 2762 + return fec_ptp_set(ndev, rq); 2763 + fec_ptp_disable_hwts(ndev); 2764 + } else if (cmd == SIOCGHWTSTAMP) { 2765 + if (use_fec_hwts) 2766 + return fec_ptp_get(ndev, rq); 2767 + } 2735 2768 } 2736 2769 2737 2770 return phy_mii_ioctl(phydev, rq, cmd);
+12
drivers/net/ethernet/freescale/fec_ptp.c
··· 452 452 return -EOPNOTSUPP; 453 453 } 454 454 455 + /** 456 + * fec_ptp_disable_hwts - disable hardware time stamping 457 + * @ndev: pointer to net_device 458 + */ 459 + void fec_ptp_disable_hwts(struct net_device *ndev) 460 + { 461 + struct fec_enet_private *fep = netdev_priv(ndev); 462 + 463 + fep->hwts_tx_en = 0; 464 + fep->hwts_rx_en = 0; 465 + } 466 + 455 467 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) 456 468 { 457 469 struct fec_enet_private *fep = netdev_priv(ndev);
+5 -1
drivers/net/ethernet/freescale/gianfar.c
··· 779 779 780 780 mac_addr = of_get_mac_address(np); 781 781 782 - if (!IS_ERR(mac_addr)) 782 + if (!IS_ERR(mac_addr)) { 783 783 ether_addr_copy(dev->dev_addr, mac_addr); 784 + } else { 785 + eth_hw_addr_random(dev); 786 + dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); 787 + } 784 788 785 789 if (model && !strcasecmp(model, "TSEC")) 786 790 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+1
drivers/net/ethernet/hisilicon/hns3/hnae3.h
··· 77 77 ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) 78 78 79 79 enum hns_desc_type { 80 + DESC_TYPE_UNKNOWN, 80 81 DESC_TYPE_SKB, 81 82 DESC_TYPE_FRAGLIST_SKB, 82 83 DESC_TYPE_PAGE,
+13 -14
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 1118 1118 return -ENOMEM; 1119 1119 } 1120 1120 1121 + desc_cb->priv = priv; 1121 1122 desc_cb->length = size; 1123 + desc_cb->dma = dma; 1124 + desc_cb->type = type; 1122 1125 1123 1126 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1124 - desc_cb->priv = priv; 1125 - desc_cb->dma = dma; 1126 - desc_cb->type = type; 1127 1127 desc->addr = cpu_to_le64(dma); 1128 1128 desc->tx.send_size = cpu_to_le16(size); 1129 1129 desc->tx.bdtp_fe_sc_vld_ra_ri = ··· 1135 1135 } 1136 1136 1137 1137 frag_buf_num = hns3_tx_bd_count(size); 1138 - sizeoflast = size & HNS3_TX_LAST_SIZE_M; 1138 + sizeoflast = size % HNS3_MAX_BD_SIZE; 1139 1139 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1140 1140 1141 1141 /* When frag size is bigger than hardware limit, split this frag */ 1142 1142 for (k = 0; k < frag_buf_num; k++) { 1143 - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ 1144 - desc_cb->priv = priv; 1145 - desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; 1146 - desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB || 1147 - type == DESC_TYPE_SKB) && !k) ? 1148 - type : DESC_TYPE_PAGE; 1149 - 1150 1143 /* now, fill the descriptor */ 1151 1144 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1152 1145 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? ··· 1151 1158 /* move ring pointer to next */ 1152 1159 ring_ptr_move_fw(ring, next_to_use); 1153 1160 1154 - desc_cb = &ring->desc_cb[ring->next_to_use]; 1155 1161 desc = &ring->desc[ring->next_to_use]; 1156 1162 } 1157 1163 ··· 1338 1346 unsigned int i; 1339 1347 1340 1348 for (i = 0; i < ring->desc_num; i++) { 1349 + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1350 + 1351 + memset(desc, 0, sizeof(*desc)); 1352 + 1341 1353 /* check if this is where we started */ 1342 1354 if (ring->next_to_use == next_to_use_orig) 1343 1355 break; 1344 1356 1345 1357 /* rollback one */ 1346 1358 ring_ptr_move_bw(ring, next_to_use); 1359 + 1360 + if (!ring->desc_cb[ring->next_to_use].dma) 1361 + continue; 1347 1362 1348 1363 /* unmap the descriptor dma address */ 1349 1364 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || ··· 1368 1369 1369 1370 ring->desc_cb[ring->next_to_use].length = 0; 1370 1371 ring->desc_cb[ring->next_to_use].dma = 0; 1372 + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; 1371 1373 } 1372 1374 } 1373 1375 ··· 4127 4127 4128 4128 hns3_put_ring_config(priv); 4129 4129 4130 - hns3_dbg_uninit(handle); 4131 - 4132 4130 out_netdev_free: 4131 + hns3_dbg_uninit(handle); 4133 4132 free_netdev(netdev); 4134 4133 } 4135 4134
-2
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
··· 165 165 #define HNS3_TXD_MSS_S 0 166 166 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) 167 167 168 - #define HNS3_TX_LAST_SIZE_M 0xffff 169 - 170 168 #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) 171 169 #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) 172 170
+6 -3
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
··· 180 180 { 181 181 struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; 182 182 unsigned char *packet = skb->data; 183 + u32 len = skb_headlen(skb); 183 184 u32 i; 184 185 185 - for (i = 0; i < skb->len; i++) 186 + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); 187 + 188 + for (i = 0; i < len; i++) 186 189 if (packet[i] != (unsigned char)(i & 0xff)) 187 190 break; 188 191 189 192 /* The packet is correctly received */ 190 - if (i == skb->len) 193 + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE) 191 194 tqp_vector->rx_group.total_packets++; 192 195 else 193 196 print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, 194 - skb->data, skb->len, true); 197 + skb->data, len, true); 195 198 196 199 dev_kfree_skb_any(skb); 197 200 }
+23 -28
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 2673 2673 delay_time); 2674 2674 } 2675 2675 2676 - static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2676 + static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) 2677 2677 { 2678 2678 struct hclge_link_status_cmd *req; 2679 2679 struct hclge_desc desc; 2680 - int link_status; 2681 2680 int ret; 2682 2681 2683 2682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); ··· 2688 2689 } 2689 2690 2690 2691 req = (struct hclge_link_status_cmd *)desc.data; 2691 - link_status = req->status & HCLGE_LINK_STATUS_UP_M; 2692 + *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? 2693 + HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 2692 2694 2693 - return !!link_status; 2695 + return 0; 2694 2696 } 2695 2697 2696 - static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2698 + static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) 2697 2699 { 2698 - unsigned int mac_state; 2699 - int link_stat; 2700 + struct phy_device *phydev = hdev->hw.mac.phydev; 2701 + 2702 + *link_status = HCLGE_LINK_STATUS_DOWN; 2700 2703 2701 2704 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 2702 2705 return 0; 2703 2706 2704 - mac_state = hclge_get_mac_link_status(hdev); 2707 + if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) 2708 + return 0; 2705 2709 2706 - if (hdev->hw.mac.phydev) { 2707 - if (hdev->hw.mac.phydev->state == PHY_RUNNING) 2708 - link_stat = mac_state & 2709 - hdev->hw.mac.phydev->link; 2710 - else 2711 - link_stat = 0; 2712 - 2713 - } else { 2714 - link_stat = mac_state; 2715 - } 2716 - 2717 - return !!link_stat; 2710 + return hclge_get_mac_link_status(hdev, link_status); 2718 2711 } 2719 2712 2720 2713 static void hclge_update_link_status(struct hclge_dev *hdev) ··· 2716 2725 struct hnae3_handle *rhandle; 2717 2726 struct hnae3_handle *handle; 2718 2727 int state; 2728 + int ret; 2719 2729 int i; 2720 2730 2721 2731 if (!client) ··· 2725 2733 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) 2726 2734 return; 2727 2735 2728 - state = hclge_get_mac_phy_link(hdev); 2736 + ret = hclge_get_mac_phy_link(hdev, &state); 2737 + if (ret) { 2738 + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 2739 + return; 2740 + } 2741 + 2729 2742 if (state != hdev->hw.mac.link) { 2730 2743 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2731 2744 handle = &hdev->vport[i].nic; ··· 6521 6524 { 6522 6525 #define HCLGE_MAC_LINK_STATUS_NUM 100 6523 6526 6527 + int link_status; 6524 6528 int i = 0; 6525 6529 int ret; 6526 6530 6527 6531 do { 6528 - ret = hclge_get_mac_link_status(hdev); 6529 - if (ret < 0) 6532 + ret = hclge_get_mac_link_status(hdev, &link_status); 6533 + if (ret) 6530 6534 return ret; 6531 - else if (ret == link_ret) 6535 + if (link_status == link_ret) 6532 6536 return 0; 6533 6537 6534 6538 msleep(HCLGE_LINK_STATUS_MS); ··· 6540 6542 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, 6541 6543 bool is_phy) 6542 6544 { 6543 - #define HCLGE_LINK_STATUS_DOWN 0 6544 - #define HCLGE_LINK_STATUS_UP 1 6545 - 6546 6545 int link_ret; 6547 6546 6548 6547 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; ··· 9854 9859 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 9855 9860 hdev->reset_type = HNAE3_FLR_RESET; 9856 9861 ret = hclge_reset_prepare(hdev); 9857 - if (ret) { 9862 + if (ret || hdev->reset_pending) { 9858 9863 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 9859 9864 ret); 9860 9865 if (hdev->reset_pending ||
+3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
··· 317 317 HCLGE_LF_XSFP_ABSENT, 318 318 }; 319 319 320 + #define HCLGE_LINK_STATUS_DOWN 0 321 + #define HCLGE_LINK_STATUS_UP 1 322 + 320 323 #define HCLGE_PG_NUM 4 321 324 #define HCLGE_SCH_MODE_SP 0 322 325 #define HCLGE_SCH_MODE_DWRR 1
+5
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 1793 1793 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1794 1794 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1795 1795 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1796 + if (ret) { 1797 + dev_err(&hdev->pdev->dev, 1798 + "failed to assert VF reset, ret = %d\n", ret); 1799 + return ret; 1800 + } 1796 1801 hdev->rst_stats.vf_func_rst_cnt++; 1797 1802 } 1798 1803
+2
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
··· 814 814 err_init_msix: 815 815 err_pfhwdev_alloc: 816 816 hinic_free_hwif(hwif); 817 + if (err > 0) 818 + err = -EIO; 817 819 return ERR_PTR(err); 818 820 } 819 821
+81 -28
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
··· 370 370 MSG_NOT_RESP, timeout); 371 371 } 372 372 373 + static void recv_mgmt_msg_work_handler(struct work_struct *work) 374 + { 375 + struct hinic_mgmt_msg_handle_work *mgmt_work = 376 + container_of(work, struct hinic_mgmt_msg_handle_work, work); 377 + struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt; 378 + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; 379 + u8 *buf_out = pf_to_mgmt->mgmt_ack_buf; 380 + struct hinic_mgmt_cb *mgmt_cb; 381 + unsigned long cb_state; 382 + u16 out_size = 0; 383 + 384 + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); 385 + 386 + if (mgmt_work->mod >= HINIC_MOD_MAX) { 387 + dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", 388 + mgmt_work->mod); 389 + kfree(mgmt_work->msg); 390 + kfree(mgmt_work); 391 + return; 392 + } 393 + 394 + mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod]; 395 + 396 + cb_state = cmpxchg(&mgmt_cb->state, 397 + HINIC_MGMT_CB_ENABLED, 398 + HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); 399 + 400 + if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) 401 + mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd, 402 + mgmt_work->msg, mgmt_work->msg_len, 403 + buf_out, &out_size); 404 + else 405 + dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n", 406 + mgmt_work->mod, mgmt_work->cmd); 407 + 408 + mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; 409 + 410 + if (!mgmt_work->async_mgmt_to_pf) 411 + /* MGMT sent sync msg, send the response */ 412 + msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd, 413 + buf_out, out_size, MGMT_RESP, 414 + mgmt_work->msg_id); 415 + 416 + kfree(mgmt_work->msg); 417 + kfree(mgmt_work); 418 + } 419 + 373 420 /** 374 421 * mgmt_recv_msg_handler - handler for message from mgmt cpu 375 422 * @pf_to_mgmt: PF to MGMT channel ··· 425 378 static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, 426 379 struct hinic_recv_msg *recv_msg) 427 380 { 428 - struct hinic_hwif *hwif = pf_to_mgmt->hwif; 429 - struct pci_dev *pdev = hwif->pdev; 430 - u8 *buf_out = recv_msg->buf_out; 431 - struct hinic_mgmt_cb *mgmt_cb; 432 - unsigned long cb_state; 433 - u16 out_size = 0; 381 + struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; 382 + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; 434 383 435 - if (recv_msg->mod >= HINIC_MOD_MAX) { 436 - dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", 437 - recv_msg->mod); 384 + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); 385 + if (!mgmt_work) { 386 + dev_err(&pdev->dev, "Allocate mgmt work memory failed\n"); 438 387 return; 439 388 } 440 389 441 - mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; 390 + if (recv_msg->msg_len) { 391 + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); 392 + if (!mgmt_work->msg) { 393 + dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n"); 394 + kfree(mgmt_work); 395 + return; 396 + } 397 + } 442 398 443 - cb_state = cmpxchg(&mgmt_cb->state, 444 - HINIC_MGMT_CB_ENABLED, 445 - HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); 399 + mgmt_work->pf_to_mgmt = pf_to_mgmt; 400 + mgmt_work->msg_len = recv_msg->msg_len; 401 + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); 402 + mgmt_work->msg_id = recv_msg->msg_id; 403 + mgmt_work->mod = recv_msg->mod; 404 + mgmt_work->cmd = recv_msg->cmd; 405 + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; 446 406 447 - if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) 448 - mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, 449 - recv_msg->msg, recv_msg->msg_len, 450 - buf_out, &out_size); 451 - else 452 - dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n", 453 - recv_msg->mod, recv_msg->cmd); 454 - 455 - mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; 456 - 457 - if (!recv_msg->async_mgmt_to_pf) 458 - /* MGMT sent sync msg, send the response */ 459 - msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, 460 - buf_out, out_size, MGMT_RESP, 461 - recv_msg->msg_id); 407 + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); 408 + queue_work(pf_to_mgmt->workq, &mgmt_work->work); 462 409 } 463 410 464 411 /** ··· 587 546 if (!pf_to_mgmt->sync_msg_buf) 588 547 return -ENOMEM; 589 548 549 + pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev, 550 + MAX_PF_MGMT_BUF_SIZE, 551 + GFP_KERNEL); 552 + if (!pf_to_mgmt->mgmt_ack_buf) 553 + return -ENOMEM; 554 + 590 555 return 0; 591 556 } 592 557 ··· 618 571 return 0; 619 572 620 573 sema_init(&pf_to_mgmt->sync_msg_lock, 1); 574 + pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt"); 575 + if (!pf_to_mgmt->workq) { 576 + dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n"); 577 + return -ENOMEM; 578 + } 621 579 pf_to_mgmt->sync_msg_id = 0; 622 580 623 581 err = alloc_msg_buf(pf_to_mgmt); ··· 657 605 658 606 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); 659 607 hinic_api_cmd_free(pf_to_mgmt->cmd_chain); 608 + destroy_workqueue(pf_to_mgmt->workq); 660 609 }
+16
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
··· 119 119 struct semaphore sync_msg_lock; 120 120 u16 sync_msg_id; 121 121 u8 *sync_msg_buf; 122 + void *mgmt_ack_buf; 122 123 123 124 struct hinic_recv_msg recv_resp_msg_from_mgmt; 124 125 struct hinic_recv_msg recv_msg_from_mgmt; ··· 127 126 struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; 128 127 129 128 struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; 129 + 130 + struct workqueue_struct *workq; 131 + }; 132 + 133 + struct hinic_mgmt_msg_handle_work { 134 + struct work_struct work; 135 + struct hinic_pf_to_mgmt *pf_to_mgmt; 136 + 137 + void *msg; 138 + u16 msg_len; 139 + 140 + enum hinic_mod_type mod; 141 + u8 cmd; 142 + u16 msg_id; 143 + int async_mgmt_to_pf; 130 144 }; 131 145 132 146 void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 3959 3959 /* When at 2.5G, the link partner can send frames with shortened 3960 3960 * preambles. 3961 3961 */ 3962 - if (state->speed == SPEED_2500) 3962 + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) 3963 3963 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; 3964 3964 3965 3965 if (pp->phy_interface != state->interface) {
+1 -1
drivers/net/ethernet/marvell/sky2.c
··· 203 203 204 204 static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) 205 205 { 206 - u16 v; 206 + u16 v = 0; 207 207 __gm_phy_read(hw, port, reg, &v); 208 208 return v; 209 209 }
+1
drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
··· 29 29 bool manual_buffer; 30 30 u32 cable_len; 31 31 u32 xoff; 32 + u16 port_buff_cell_sz; 32 33 }; 33 34 34 35 #define MLX5E_MAX_DSCP (64)
+18 -3
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
··· 78 78 [MLX5E_400GAUI_8] = 400000, 79 79 }; 80 80 81 + bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev) 82 + { 83 + struct mlx5e_port_eth_proto eproto; 84 + int err; 85 + 86 + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet)) 87 + return true; 88 + 89 + err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto); 90 + if (err) 91 + return false; 92 + 93 + return !!eproto.cap; 94 + } 95 + 81 96 static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, 82 97 const u32 **arr, u32 *size, 83 98 bool force_legacy) 84 99 { 85 - bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 100 + bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev); 86 101 87 102 *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : 88 103 ARRAY_SIZE(mlx5e_link_speed); ··· 192 177 bool ext; 193 178 int err; 194 179 195 - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 180 + ext = mlx5e_ptys_ext_supported(mdev); 196 181 err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); 197 182 if (err) 198 183 goto out; ··· 220 205 int err; 221 206 int i; 222 207 223 - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 208 + ext = mlx5e_ptys_ext_supported(mdev); 224 209 err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); 225 210 if (err) 226 211 return err;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/port.h
··· 54 54 int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); 55 55 u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, 56 56 bool force_legacy); 57 - 57 + bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); 58 58 int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); 59 59 int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); 60 60 int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
+28 -23
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
··· 34 34 int mlx5e_port_query_buffer(struct mlx5e_priv *priv, 35 35 struct mlx5e_port_buffer *port_buffer) 36 36 { 37 + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; 37 38 struct mlx5_core_dev *mdev = priv->mdev; 38 39 int sz = MLX5_ST_SZ_BYTES(pbmc_reg); 39 40 u32 total_used = 0; ··· 58 57 port_buffer->buffer[i].epsb = 59 58 MLX5_GET(bufferx_reg, buffer, epsb); 60 59 port_buffer->buffer[i].size = 61 - MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT; 60 + MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz; 62 61 port_buffer->buffer[i].xon = 63 - MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT; 62 + MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz; 64 63 port_buffer->buffer[i].xoff = 65 - MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT; 64 + MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz; 66 65 total_used += port_buffer->buffer[i].size; 67 66 68 67 mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i, ··· 74 73 } 75 74 76 75 port_buffer->port_buffer_size = 77 - MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT; 76 + MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; 78 77 port_buffer->spare_buffer_size = 79 78 port_buffer->port_buffer_size - total_used; 80 79 ··· 89 88 static int port_set_buffer(struct mlx5e_priv *priv, 90 89 struct mlx5e_port_buffer *port_buffer) 91 90 { 91 + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; 92 92 struct mlx5_core_dev *mdev = priv->mdev; 93 93 int sz = MLX5_ST_SZ_BYTES(pbmc_reg); 94 - void *buffer; 95 94 void *in; 96 95 int err; 97 96 int i; ··· 105 104 goto out; 106 105 107 106 for (i = 0; i < MLX5E_MAX_BUFFER; i++) { 108 - buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); 107 + void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); 108 + u64 size = port_buffer->buffer[i].size; 109 + u64 xoff = port_buffer->buffer[i].xoff; 110 + u64 xon = port_buffer->buffer[i].xon; 109 111 110 - MLX5_SET(bufferx_reg, buffer, size, 111 - port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT); 112 - MLX5_SET(bufferx_reg, buffer, lossy, 113 - port_buffer->buffer[i].lossy); 114 - MLX5_SET(bufferx_reg, buffer, xoff_threshold, 115 - port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT); 116 - MLX5_SET(bufferx_reg, buffer, xon_threshold, 117 - port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT); 112 + do_div(size, port_buff_cell_sz); 113 + do_div(xoff, port_buff_cell_sz); 114 + do_div(xon, port_buff_cell_sz); 115 + MLX5_SET(bufferx_reg, buffer, size, size); 116 + MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy); 117 + MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff); 118 + MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); 118 119 } 119 120 120 121 err = mlx5e_port_set_pbmc(mdev, in); ··· 146 143 } 147 144 148 145 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, 149 - u32 xoff, unsigned int max_mtu) 146 + u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz) 150 147 { 151 148 int i; 152 149 ··· 158 155 } 159 156 160 157 if (port_buffer->buffer[i].size < 161 - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { 158 + (xoff + max_mtu + port_buff_cell_sz)) { 162 159 pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", 163 160 i, port_buffer->buffer[i].size); 164 161 return -ENOMEM; ··· 178 175 * @pfc_en: <input> current pfc configuration 179 176 * @buffer: <input> current prio to buffer mapping 180 177 * @xoff: <input> xoff value 178 + * @port_buff_cell_sz: <input> port buffer cell_size 181 179 * @port_buffer: <output> port receive buffer configuration 182 180 * @change: <output> 183 181 * ··· 193 189 * sets change to true if buffer configuration was modified. 194 190 */ 195 191 static int update_buffer_lossy(unsigned int max_mtu, 196 - u8 pfc_en, u8 *buffer, u32 xoff, 192 + u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz, 197 193 struct mlx5e_port_buffer *port_buffer, 198 194 bool *change) 199 195 { ··· 229 225 } 230 226 231 227 if (changed) { 232 - err = update_xoff_threshold(port_buffer, xoff, max_mtu); 228 + err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); 233 229 if (err) 234 230 return err; 235 231 ··· 266 262 u32 *buffer_size, 267 263 u8 *prio2buffer) 268 264 { 265 + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; 269 266 struct mlx5e_port_buffer port_buffer; 270 267 u32 xoff = calculate_xoff(priv, mtu); 271 268 bool update_prio2buffer = false; ··· 287 282 288 283 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { 289 284 update_buffer = true; 290 - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); 285 + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); 291 286 if (err) 292 287 return err; 293 288 } ··· 297 292 if (err) 298 293 return err; 299 294 300 - err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, 295 + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz, 301 296 &port_buffer, &update_buffer); 302 297 if (err) 303 298 return err; ··· 309 304 if (err) 310 305 return err; 311 306 312 - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, 307 + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, 313 308 xoff, &port_buffer, &update_buffer); 314 309 if (err) 315 310 return err; ··· 334 329 return -EINVAL; 335 330 336 331 update_buffer = true; 337 - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); 332 + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); 338 333 if (err) 339 334 return err; 340 335 } ··· 342 337 /* Need to update buffer configuration if xoff value is changed */ 343 338 if (!update_buffer && xoff != priv->dcbx.xoff) { 344 339 update_buffer = true; 345 - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); 340 + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); 346 341 if (err) 347 342 return err; 348 343 }
-1
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
··· 36 36 #include "port.h" 37 37 38 38 #define MLX5E_MAX_BUFFER 8 39 - #define MLX5E_BUFFER_CELL_SHIFT 7 40 39 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ 41 40 42 41 #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
-1
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
··· 6 6 #include <linux/rculist.h> 7 7 #include <linux/rtnetlink.h> 8 8 #include <linux/workqueue.h> 9 - #include <linux/rwlock.h> 10 9 #include <linux/spinlock.h> 11 10 #include <linux/notifier.h> 12 11 #include <net/netevent.h>
+1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 1097 1097 struct mlx5_ct_entry *entry = ptr; 1098 1098 1099 1099 mlx5_tc_ct_entry_del_rules(ct_priv, entry); 1100 + kfree(entry); 1100 1101 } 1101 1102 1102 1103 static void
+19
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 1217 1217 return 0; 1218 1218 } 1219 1219 1220 + #define MLX5E_BUFFER_CELL_SHIFT 7 1221 + 1222 + static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv) 1223 + { 1224 + struct mlx5_core_dev *mdev = priv->mdev; 1225 + u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {}; 1226 + u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {}; 1227 + 1228 + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1229 + return (1 << MLX5E_BUFFER_CELL_SHIFT); 1230 + 1231 + if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), 1232 + MLX5_REG_SBCAM, 0, 0)) 1233 + return (1 << MLX5E_BUFFER_CELL_SHIFT); 1234 + 1235 + return MLX5_GET(sbcam_reg, out, cap_cell_size); 1236 + } 1237 + 1220 1238 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) 1221 1239 { 1222 1240 struct mlx5e_dcbx *dcbx = &priv->dcbx; ··· 1252 1234 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) 1253 1235 priv->dcbx.cap |= DCB_CAP_DCBX_HOST; 1254 1236 1237 + priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv); 1255 1238 priv->dcbx.manual_buffer = false; 1256 1239 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; 1257 1240
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 200 200 struct ptys2ethtool_config **arr, 201 201 u32 *size) 202 202 { 203 - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 203 + bool ext = mlx5e_ptys_ext_supported(mdev); 204 204 205 205 *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; 206 206 *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : ··· 883 883 struct ethtool_link_ksettings *link_ksettings) 884 884 { 885 885 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; 886 - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 886 + bool ext = mlx5e_ptys_ext_supported(mdev); 887 887 888 888 ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); 889 889 } ··· 913 913 __func__, err); 914 914 goto err_query_regs; 915 915 } 916 - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 916 + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); 917 917 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 918 918 eth_proto_capability); 919 919 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, ··· 1066 1066 autoneg = link_ksettings->base.autoneg; 1067 1067 speed = link_ksettings->base.speed; 1068 1068 1069 - ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 1069 + ext_supported = mlx5e_ptys_ext_supported(mdev); 1070 1070 ext = ext_requested(autoneg, adver, ext_supported); 1071 1071 if (!ext_supported && ext) 1072 1072 return -EOPNOTSUPP;
+8 -7
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3104 3104 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); 3105 3105 mutex_unlock(&priv->state_lock); 3106 3106 3107 - if (mlx5_vxlan_allowed(priv->mdev->vxlan)) 3108 - udp_tunnel_get_rx_info(netdev); 3109 - 3110 3107 return err; 3111 3108 } 3112 3109 ··· 5118 5121 if (err) 5119 5122 goto err_destroy_flow_steering; 5120 5123 5124 + #ifdef CONFIG_MLX5_EN_ARFS 5125 + priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev); 5126 + #endif 5127 + 5121 5128 return 0; 5122 5129 5123 5130 err_destroy_flow_steering: ··· 5203 5202 rtnl_lock(); 5204 5203 if (netif_running(netdev)) 5205 5204 mlx5e_open(netdev); 5205 + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) 5206 + udp_tunnel_get_rx_info(netdev); 5206 5207 netif_device_attach(netdev); 5207 5208 rtnl_unlock(); 5208 5209 } ··· 5219 5216 rtnl_lock(); 5220 5217 if (netif_running(priv->netdev)) 5221 5218 mlx5e_close(priv->netdev); 5219 + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) 5220 + udp_tunnel_drop_rx_info(priv->netdev); 5222 5221 netif_device_detach(priv->netdev); 5223 5222 rtnl_unlock(); 5224 5223 ··· 5292 5287 5293 5288 /* netdev init */ 5294 5289 netif_carrier_off(netdev); 5295 - 5296 - #ifdef CONFIG_MLX5_EN_ARFS 5297 - netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev); 5298 - #endif 5299 5290 5300 5291 return 0; 5301 5292
+9 -5
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 4670 4670 struct mlx5e_rep_priv *rpriv) 4671 4671 { 4672 4672 /* Offloaded flow rule is allowed to duplicate on non-uplink representor 4673 - * sharing tc block with other slaves of a lag device. 4673 + * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this 4674 + * function is called from NIC mode. 4674 4675 */ 4675 - return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK; 4676 + return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; 4676 4677 } 4677 4678 4678 4679 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, ··· 4687 4686 4688 4687 rcu_read_lock(); 4689 4688 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 4690 - rcu_read_unlock(); 4691 4689 if (flow) { 4692 4690 /* Same flow rule offloaded to non-uplink representor sharing tc block, 4693 4691 * just return 0. 4694 4692 */ 4695 4693 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) 4696 - goto out; 4694 + goto rcu_unlock; 4697 4695 4698 4696 NL_SET_ERR_MSG_MOD(extack, 4699 4697 "flow cookie already exists, ignoring"); ··· 4700 4700 "flow cookie %lx already exists, ignoring\n", 4701 4701 f->cookie); 4702 4702 err = -EEXIST; 4703 - goto out; 4703 + goto rcu_unlock; 4704 4704 } 4705 + rcu_unlock: 4706 + rcu_read_unlock(); 4707 + if (flow) 4708 + goto out; 4705 4709 4706 4710 trace_mlx5e_configure_flower(f); 4707 4711 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
-1
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
··· 217 217 } 218 218 219 219 /* Create ingress allow rule */ 220 - memset(spec, 0, sizeof(*spec)); 221 220 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 222 221 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; 223 222 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
+77 -16
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 293 293 return 0; 294 294 } 295 295 296 - static int mlx5_eeprom_page(int offset) 296 + static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, 297 + u8 *module_id) 298 + { 299 + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; 300 + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; 301 + int err, status; 302 + u8 *ptr; 303 + 304 + MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW); 305 + MLX5_SET(mcia_reg, in, module, module_num); 306 + MLX5_SET(mcia_reg, in, device_address, 0); 307 + MLX5_SET(mcia_reg, in, page_number, 0); 308 + MLX5_SET(mcia_reg, in, size, 1); 309 + MLX5_SET(mcia_reg, in, l, 0); 310 + 311 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, 312 + sizeof(out), MLX5_REG_MCIA, 0, 0); 313 + if (err) 314 + return err; 315 + 316 + status = MLX5_GET(mcia_reg, out, status); 317 + if (status) { 318 + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", 319 + status); 320 + return -EIO; 321 + } 322 + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); 323 + 324 + *module_id = ptr[0]; 325 + 326 + return 0; 327 + } 328 + 329 + static int mlx5_qsfp_eeprom_page(u16 offset) 297 330 { 298 331 if (offset < MLX5_EEPROM_PAGE_LENGTH) 299 332 /* Addresses between 0-255 - page 00 */ ··· 340 307 MLX5_EEPROM_HIGH_PAGE_LENGTH); 341 308 } 342 309 343 - static int mlx5_eeprom_high_page_offset(int page_num) 310 + static int mlx5_qsfp_eeprom_high_page_offset(int page_num) 344 311 { 345 312 if (!page_num) /* Page 0 always start from low page */ 346 313 return 0; ··· 349 316 return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; 350 317 } 351 318 319 + static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) 320 + { 321 + *i2c_addr = MLX5_I2C_ADDR_LOW; 322 + *page_num = mlx5_qsfp_eeprom_page(*offset); 323 + *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num); 324 + } 325 + 326 + static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) 327 + { 328 + *i2c_addr = MLX5_I2C_ADDR_LOW; 329 + *page_num = 0; 330 + 331 + if (*offset < MLX5_EEPROM_PAGE_LENGTH) 332 + return; 333 + 334 + *i2c_addr = MLX5_I2C_ADDR_HIGH; 335 + *offset -= MLX5_EEPROM_PAGE_LENGTH; 336 + } 337 + 352 338 int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, 353 339 u16 offset, u16 size, u8 *data) 354 340 { 355 - int module_num, page_num, status, err; 341 + int module_num, status, err, page_num = 0; 342 + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; 356 343 u32 out[MLX5_ST_SZ_DW(mcia_reg)]; 357 - u32 in[MLX5_ST_SZ_DW(mcia_reg)]; 358 - u16 i2c_addr; 359 - void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); 344 + u16 i2c_addr = 0; 345 + u8 module_id; 346 + void *ptr; 360 347 361 348 err = mlx5_query_module_num(dev, &module_num); 362 349 if (err) 363 350 return err; 364 351 365 - memset(in, 0, sizeof(in)); 366 - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); 352 + err = mlx5_query_module_id(dev, module_num, &module_id); 353 + if (err) 354 + return err; 367 355 368 - /* Get the page number related to the given offset */ 369 - page_num = mlx5_eeprom_page(offset); 370 - 371 - /* Set the right offset according to the page number, 372 - * For page_num > 0, relative offset is always >= 128 (high page). 373 - */ 374 - offset -= mlx5_eeprom_high_page_offset(page_num); 356 + switch (module_id) { 357 + case MLX5_MODULE_ID_SFP: 358 + mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); 359 + break; 360 + case MLX5_MODULE_ID_QSFP: 361 + case MLX5_MODULE_ID_QSFP_PLUS: 362 + case MLX5_MODULE_ID_QSFP28: 363 + mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); 364 + break; 365 + default: 366 + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); 367 + return -EINVAL; 368 + } 375 369 376 370 if (offset + size > MLX5_EEPROM_PAGE_LENGTH) 377 371 /* Cross pages read, read until offset 256 in low page */ 378 372 size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; 379 373 380 - i2c_addr = MLX5_I2C_ADDR_LOW; 374 + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); 381 375 382 376 MLX5_SET(mcia_reg, in, l, 0); 383 377 MLX5_SET(mcia_reg, in, module, module_num); ··· 425 365 return -EIO; 426 366 } 427 367 368 + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); 428 369 memcpy(data, ptr, size); 429 370 430 371 return size;
+2 -1
drivers/net/ethernet/mellanox/mlxsw/core.c
··· 710 710 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 711 711 mlxsw_core); 712 712 if (err) 713 - return err; 713 + goto err_trap_register; 714 714 715 715 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); 716 716 if (err) ··· 722 722 err_emad_trap_set: 723 723 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 724 724 mlxsw_core); 725 + err_trap_register: 725 726 destroy_workqueue(mlxsw_core->emad_wq); 726 727 return err; 727 728 }
+32 -16
drivers/net/ethernet/mellanox/mlxsw/core_env.c
··· 45 45 static int 46 46 mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, 47 47 u16 offset, u16 size, void *data, 48 - unsigned int *p_read_size) 48 + bool qsfp, unsigned int *p_read_size) 49 49 { 50 50 char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; 51 51 char mcia_pl[MLXSW_REG_MCIA_LEN]; ··· 54 54 int status; 55 55 int err; 56 56 57 + /* MCIA register accepts buffer size <= 48. Page of size 128 should be 58 + * read by chunks of size 48, 48, 32. Align the size of the last chunk 59 + * to avoid reading after the end of the page. 60 + */ 57 61 size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE); 58 62 59 63 if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH && ··· 67 63 68 64 i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW; 69 65 if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) { 70 - page = MLXSW_REG_MCIA_PAGE_GET(offset); 71 - offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; 72 - /* When reading upper pages 1, 2 and 3 the offset starts at 73 - * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436 74 - * specification for graphical depiction. 75 - * MCIA register accepts buffer size <= 48. Page of size 128 76 - * should be read by chunks of size 48, 48, 32. Align the size 77 - * of the last chunk to avoid reading after the end of the 78 - * page. 79 - */ 80 - if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) 81 - size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; 66 + if (qsfp) { 67 + /* When reading upper pages 1, 2 and 3 the offset 68 + * starts at 128. Please refer to "QSFP+ Memory Map" 69 + * figure in SFF-8436 specification for graphical 70 + * depiction. 71 + */ 72 + page = MLXSW_REG_MCIA_PAGE_GET(offset); 73 + offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; 74 + if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) 75 + size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; 76 + } else { 77 + /* When reading upper pages 1, 2 and 3 the offset 78 + * starts at 0 and I2C high address is used. Please refer 79 + * refer to "Memory Organization" figure in SFF-8472 80 + * specification for graphical depiction. 81 + */ 82 + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH; 83 + offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH; 84 + } 82 85 } 83 86 84 87 mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr); ··· 177 166 int err; 178 167 179 168 err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset, 180 - module_info, &read_size); 169 + module_info, false, &read_size); 181 170 if (err) 182 171 return err; 183 172 ··· 208 197 /* Verify if transceiver provides diagnostic monitoring page */ 209 198 err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 210 199 SFP_DIAGMON, 1, &diag_mon, 211 - &read_size); 200 + false, &read_size); 212 201 if (err) 213 202 return err; 214 203 ··· 236 225 int offset = ee->offset; 237 226 unsigned int read_size; 238 227 int i = 0; 228 + bool qsfp; 239 229 int err; 240 230 241 231 if (!ee->len) 242 232 return -EINVAL; 243 233 244 234 memset(data, 0, ee->len); 235 + /* Validate module identifier value. */ 236 + err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp); 237 + if (err) 238 + return err; 245 239 246 240 while (i < ee->len) { 247 241 err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset, 248 242 ee->len - i, data + i, 249 - &read_size); 243 + qsfp, &read_size); 250 244 if (err) { 251 245 netdev_err(netdev, "Eeprom query failed\n"); 252 246 return err;
+38 -16
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 1414 1414 u16 num_pages; 1415 1415 int err; 1416 1416 1417 - mutex_init(&mlxsw_pci->cmd.lock); 1418 - init_waitqueue_head(&mlxsw_pci->cmd.wait); 1419 - 1420 1417 mlxsw_pci->core = mlxsw_core; 1421 1418 1422 1419 mbox = mlxsw_cmd_mbox_alloc(); 1423 1420 if (!mbox) 1424 1421 return -ENOMEM; 1425 - 1426 - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1427 - if (err) 1428 - goto mbox_put; 1429 - 1430 - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1431 - if (err) 1432 - goto err_out_mbox_alloc; 1433 1422 1434 1423 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); 1435 1424 if (err) ··· 1526 1537 mlxsw_pci_free_irq_vectors(mlxsw_pci); 1527 1538 err_alloc_irq: 1528 1539 err_sw_reset: 1529 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1530 - err_out_mbox_alloc: 1531 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1532 1540 mbox_put: 1533 1541 mlxsw_cmd_mbox_free(mbox); 1534 1542 return err; ··· 1539 1553 mlxsw_pci_aqs_fini(mlxsw_pci); 1540 1554 mlxsw_pci_fw_area_fini(mlxsw_pci); 1541 1555 mlxsw_pci_free_irq_vectors(mlxsw_pci); 1542 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1543 - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1544 1556 } 1545 1557 1546 1558 static struct mlxsw_pci_queue * ··· 1760 1776 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, 1761 1777 }; 1762 1778 1779 + static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) 1780 + { 1781 + int err; 1782 + 1783 + mutex_init(&mlxsw_pci->cmd.lock); 1784 + init_waitqueue_head(&mlxsw_pci->cmd.wait); 1785 + 1786 + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1787 + if (err) 1788 + goto err_in_mbox_alloc; 1789 + 1790 + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1791 + if (err) 1792 + goto err_out_mbox_alloc; 1793 + 1794 + return 0; 1795 + 1796 + err_out_mbox_alloc: 1797 + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1798 + err_in_mbox_alloc: 1799 + mutex_destroy(&mlxsw_pci->cmd.lock); 1800 + return err; 1801 + } 1802 + 1803 + static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) 1804 + { 1805 + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1806 + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1807 + mutex_destroy(&mlxsw_pci->cmd.lock); 1808 + } 1809 + 1763 1810 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1764 1811 { 1765 1812 const char *driver_name = pdev->driver->name; ··· 1846 1831 mlxsw_pci->pdev = pdev; 1847 1832 pci_set_drvdata(pdev, mlxsw_pci); 1848 1833 1834 + err = mlxsw_pci_cmd_init(mlxsw_pci); 1835 + if (err) 1836 + goto err_pci_cmd_init; 1837 + 1849 1838 mlxsw_pci->bus_info.device_kind = driver_name; 1850 1839 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); 1851 1840 mlxsw_pci->bus_info.dev = &pdev->dev; ··· 1867 1848 return 0; 1868 1849 1869 1850 err_bus_device_register: 1851 + mlxsw_pci_cmd_fini(mlxsw_pci); 1852 + err_pci_cmd_init: 1870 1853 iounmap(mlxsw_pci->hw_addr); 1871 1854 err_ioremap: 1872 1855 err_pci_resource_len_check: ··· 1886 1865 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); 1887 1866 1888 1867 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); 1868 + mlxsw_pci_cmd_fini(mlxsw_pci); 1889 1869 iounmap(mlxsw_pci->hw_addr); 1890 1870 pci_release_regions(mlxsw_pci->pdev); 1891 1871 pci_disable_device(mlxsw_pci->pdev);
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 6262 6262 } 6263 6263 6264 6264 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); 6265 - if (WARN_ON(!fib_work)) 6265 + if (!fib_work) 6266 6266 return NOTIFY_BAD; 6267 6267 6268 6268 fib_work->mlxsw_sp = router->mlxsw_sp;
+1 -1
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 98 98 { 99 99 struct sk_buff **skb_ptr = NULL; 100 100 struct sk_buff **temp; 101 - #define NR_SKB_COMPLETED 128 101 + #define NR_SKB_COMPLETED 16 102 102 struct sk_buff *completed[NR_SKB_COMPLETED]; 103 103 int more; 104 104
+22 -37
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
··· 103 103 void *p) 104 104 { 105 105 struct ionic_lif *lif = netdev_priv(netdev); 106 + unsigned int offset; 106 107 unsigned int size; 107 108 108 109 regs->version = IONIC_DEV_CMD_REG_VERSION; 109 110 111 + offset = 0; 110 112 size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); 111 - memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); 113 + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); 112 114 115 + offset += size; 113 116 size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); 114 - memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); 117 + memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); 115 118 } 116 119 117 120 static int ionic_get_link_ksettings(struct net_device *netdev, ··· 471 468 ring->rx_pending = lif->nrxq_descs; 472 469 } 473 470 471 + static void ionic_set_ringsize(struct ionic_lif *lif, void *arg) 472 + { 473 + struct ethtool_ringparam *ring = arg; 474 + 475 + lif->ntxq_descs = ring->tx_pending; 476 + lif->nrxq_descs = ring->rx_pending; 477 + } 478 + 474 479 static int ionic_set_ringparam(struct net_device *netdev, 475 480 struct ethtool_ringparam *ring) 476 481 { 477 482 struct ionic_lif *lif = netdev_priv(netdev); 478 - bool running; 479 - int err; 480 483 481 484 if (ring->rx_mini_pending || ring->rx_jumbo_pending) { 482 485 netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n"); ··· 500 491 ring->rx_pending == lif->nrxq_descs) 501 492 return 0; 502 493 503 - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); 504 - if (err) 505 - return err; 506 - 507 - running = test_bit(IONIC_LIF_F_UP, lif->state); 508 - if (running) 509 - ionic_stop(netdev); 510 - 511 - lif->ntxq_descs = ring->tx_pending; 512 - lif->nrxq_descs = ring->rx_pending; 513 - 514 - if (running) 515 - ionic_open(netdev); 516 - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); 517 - 518 - return 0; 494 + return ionic_reset_queues(lif, ionic_set_ringsize, ring); 519 495 } 520 496 521 497 static void ionic_get_channels(struct net_device *netdev, ··· 515 521 ch->combined_count = lif->nxqs; 516 522 } 517 523 524 + static void ionic_set_queuecount(struct ionic_lif *lif, void *arg) 525 + { 526 + struct ethtool_channels *ch = arg; 527 + 528 + lif->nxqs = ch->combined_count; 529 + } 530 + 518 531 static int ionic_set_channels(struct net_device *netdev, 519 532 struct ethtool_channels *ch) 520 533 { 521 534 struct ionic_lif *lif = netdev_priv(netdev); 522 - bool running; 523 - int err; 524 535 525 536 if (!ch->combined_count || ch->other_count || 526 537 ch->rx_count || ch->tx_count) ··· 534 535 if (ch->combined_count == lif->nxqs) 535 536 return 0; 536 537 537 - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); 538 - if (err) 539 - return err; 540 - 541 - running = test_bit(IONIC_LIF_F_UP, lif->state); 542 - if (running) 543 - ionic_stop(netdev); 544 - 545 - lif->nxqs = ch->combined_count; 546 - 547 - if (running) 548 - ionic_open(netdev); 549 - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); 550 - 551 - return 0; 538 + return ionic_reset_queues(lif, ionic_set_queuecount, ch); 552 539 } 553 540 554 541 static u32 ionic_get_priv_flags(struct net_device *netdev)
+44 -29
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 96 96 u16 link_status; 97 97 bool link_up; 98 98 99 - if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) || 100 - test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state)) 99 + if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 101 100 return; 102 101 103 102 link_status = le16_to_cpu(lif->info->status.link_status); ··· 113 114 netif_carrier_on(netdev); 114 115 } 115 116 116 - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) 117 + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 118 + mutex_lock(&lif->queue_lock); 117 119 ionic_start_queues(lif); 120 + mutex_unlock(&lif->queue_lock); 121 + } 118 122 } else { 119 123 if (netif_carrier_ok(netdev)) { 120 124 netdev_info(netdev, "Link down\n"); 121 125 netif_carrier_off(netdev); 122 126 } 123 127 124 - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) 128 + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 129 + mutex_lock(&lif->queue_lock); 125 130 ionic_stop_queues(lif); 131 + mutex_unlock(&lif->queue_lock); 132 + } 126 133 } 127 134 128 135 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); ··· 868 863 if (f) 869 864 return 0; 870 865 871 - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, 872 - ctx.comp.rx_filter_add.filter_id); 866 + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 873 867 874 868 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 875 869 err = ionic_adminq_post_wait(lif, &ctx); ··· 897 893 return -ENOENT; 898 894 } 899 895 896 + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 897 + addr, f->filter_id); 898 + 900 899 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 901 900 ionic_rx_filter_free(lif, f); 902 901 spin_unlock_bh(&lif->rx_filters.lock); ··· 907 900 err = ionic_adminq_post_wait(lif, &ctx); 908 901 if (err && err != -EEXIST) 909 902 return err; 910 - 911 - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, 912 - ctx.cmd.rx_filter_del.filter_id); 913 903 914 904 return 0; 915 905 } ··· 1317 1313 return err; 1318 1314 1319 1315 netdev->mtu = new_mtu; 1320 - err = ionic_reset_queues(lif); 1316 + err = ionic_reset_queues(lif, NULL, NULL); 1321 1317 1322 1318 return err; 1323 1319 } ··· 1329 1325 netdev_info(lif->netdev, "Tx Timeout recovery\n"); 1330 1326 1331 1327 rtnl_lock(); 1332 - ionic_reset_queues(lif); 1328 + ionic_reset_queues(lif, NULL, NULL); 1333 1329 rtnl_unlock(); 1334 1330 } 1335 1331 ··· 1355 1351 }; 1356 1352 int err; 1357 1353 1354 + netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1358 1355 err = ionic_adminq_post_wait(lif, &ctx); 1359 1356 if (err) 1360 1357 return err; 1361 - 1362 - netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, 1363 - ctx.comp.rx_filter_add.filter_id); 1364 1358 1365 1359 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1366 1360 } ··· 1384 1382 return -ENOENT; 1385 1383 } 1386 1384 1387 - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, 1388 - le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); 1385 + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1386 + vid, f->filter_id); 1389 1387 1390 1388 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1391 1389 ionic_rx_filter_free(lif, f); ··· 1674 1672 err = ionic_txrx_init(lif); 1675 1673 if (err) 1676 1674 goto err_out; 1675 + 1676 + err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 1677 + if (err) 1678 + goto err_txrx_deinit; 1679 + 1680 + err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 1681 + if (err) 1682 + goto err_txrx_deinit; 1677 1683 1678 1684 /* don't start the queues until we have link */ 1679 1685 if (netif_carrier_ok(netdev)) { ··· 1990 1980 .ndo_get_vf_stats = ionic_get_vf_stats, 1991 1981 }; 1992 1982 1993 - int ionic_reset_queues(struct ionic_lif *lif) 1983 + int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) 1994 1984 { 1995 1985 bool running; 1996 1986 int err = 0; 1997 1987 1998 - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); 1999 - if (err) 2000 - return err; 2001 - 1988 + mutex_lock(&lif->queue_lock); 2002 1989 running = netif_running(lif->netdev); 2003 1990 if (running) { 2004 1991 netif_device_detach(lif->netdev); 2005 1992 err = ionic_stop(lif->netdev); 2006 - } 2007 - if (!err && running) { 2008 - ionic_open(lif->netdev); 2009 - netif_device_attach(lif->netdev); 1993 + if (err) 1994 + return err; 2010 1995 } 2011 1996 2012 - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); 1997 + if (cb) 1998 + cb(lif, arg); 1999 + 2000 + if (running) { 2001 + err = ionic_open(lif->netdev); 2002 + netif_device_attach(lif->netdev); 2003 + } 2004 + mutex_unlock(&lif->queue_lock); 2013 2005 2014 2006 return err; 2015 2007 } ··· 2158 2146 2159 2147 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2160 2148 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2149 + mutex_lock(&lif->queue_lock); 2161 2150 ionic_stop_queues(lif); 2151 + mutex_unlock(&lif->queue_lock); 2162 2152 } 2163 2153 2164 2154 if (netif_running(lif->netdev)) { ··· 2279 2265 cancel_work_sync(&lif->deferred.work); 2280 2266 cancel_work_sync(&lif->tx_timeout_work); 2281 2267 ionic_rx_filters_deinit(lif); 2268 + if (lif->netdev->features & NETIF_F_RXHASH) 2269 + ionic_lif_rss_deinit(lif); 2282 2270 } 2283 - 2284 - if (lif->netdev->features & NETIF_F_RXHASH) 2285 - ionic_lif_rss_deinit(lif); 2286 2271 2287 2272 napi_disable(&lif->adminqcq->napi); 2288 2273 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2289 2274 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2290 2275 2276 + mutex_destroy(&lif->queue_lock); 2291 2277 ionic_lif_reset(lif); 2292 2278 } 2293 2279 ··· 2464 2450 return err; 2465 2451 2466 2452 lif->hw_index = le16_to_cpu(comp.hw_index); 2453 + mutex_init(&lif->queue_lock); 2467 2454 2468 2455 /* now that we have the hw_index we can figure out our doorbell page */ 2469 2456 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+4 -8
drivers/net/ethernet/pensando/ionic/ionic_lif.h
··· 135 135 IONIC_LIF_F_SW_DEBUG_STATS, 136 136 IONIC_LIF_F_UP, 137 137 IONIC_LIF_F_LINK_CHECK_REQUESTED, 138 - IONIC_LIF_F_QUEUE_RESET, 139 138 IONIC_LIF_F_FW_RESET, 140 139 141 140 /* leave this as last */ ··· 164 165 unsigned int hw_index; 165 166 unsigned int kern_pid; 166 167 u64 __iomem *kern_dbpage; 168 + struct mutex queue_lock; /* lock for queue structures */ 167 169 spinlock_t adminq_lock; /* lock for AdminQ operations */ 168 170 struct ionic_qcq *adminqcq; 169 171 struct ionic_qcq *notifyqcq; ··· 213 213 #define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) 214 214 #define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) 215 215 216 - /* return 0 if successfully set the bit, else non-zero */ 217 - static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname) 218 - { 219 - return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE); 220 - } 221 - 222 216 static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) 223 217 { 224 218 u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); ··· 242 248 return (units * div) / mult; 243 249 } 244 250 251 + typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg); 252 + 245 253 void ionic_link_status_check_request(struct ionic_lif *lif); 246 254 void ionic_get_stats64(struct net_device *netdev, 247 255 struct rtnl_link_stats64 *ns); ··· 263 267 264 268 int ionic_open(struct net_device *netdev); 265 269 int ionic_stop(struct net_device *netdev); 266 - int ionic_reset_queues(struct ionic_lif *lif); 270 + int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg); 267 271 268 272 static inline void debug_stats_txq_post(struct ionic_qcq *qcq, 269 273 struct ionic_txq_desc *desc, bool dbell)
+29
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
··· 21 21 void ionic_rx_filter_replay(struct ionic_lif *lif) 22 22 { 23 23 struct ionic_rx_filter_add_cmd *ac; 24 + struct hlist_head new_id_list; 24 25 struct ionic_admin_ctx ctx; 25 26 struct ionic_rx_filter *f; 26 27 struct hlist_head *head; 27 28 struct hlist_node *tmp; 29 + unsigned int key; 28 30 unsigned int i; 29 31 int err; 30 32 33 + INIT_HLIST_HEAD(&new_id_list); 31 34 ac = &ctx.cmd.rx_filter_add; 32 35 33 36 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { ··· 61 58 ac->mac.addr); 62 59 break; 63 60 } 61 + spin_lock_bh(&lif->rx_filters.lock); 62 + ionic_rx_filter_free(lif, f); 63 + spin_unlock_bh(&lif->rx_filters.lock); 64 + 65 + continue; 64 66 } 67 + 68 + /* remove from old id list, save new id in tmp list */ 69 + spin_lock_bh(&lif->rx_filters.lock); 70 + hlist_del(&f->by_id); 71 + spin_unlock_bh(&lif->rx_filters.lock); 72 + f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id); 73 + hlist_add_head(&f->by_id, &new_id_list); 65 74 } 66 75 } 76 + 77 + /* rebuild the by_id hash lists with the new filter ids */ 78 + spin_lock_bh(&lif->rx_filters.lock); 79 + hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) { 80 + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; 81 + head = &lif->rx_filters.by_id[key]; 82 + hlist_add_head(&f->by_id, head); 83 + } 84 + spin_unlock_bh(&lif->rx_filters.lock); 67 85 } 68 86 69 87 int ionic_rx_filters_init(struct ionic_lif *lif) ··· 93 69 94 70 spin_lock_init(&lif->rx_filters.lock); 95 71 72 + spin_lock_bh(&lif->rx_filters.lock); 96 73 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { 97 74 INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); 98 75 INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); 99 76 } 77 + spin_unlock_bh(&lif->rx_filters.lock); 100 78 101 79 return 0; 102 80 } ··· 110 84 struct hlist_node *tmp; 111 85 unsigned int i; 112 86 87 + spin_lock_bh(&lif->rx_filters.lock); 113 88 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { 114 89 head = &lif->rx_filters.by_id[i]; 115 90 hlist_for_each_entry_safe(f, tmp, head, by_id) 116 91 ionic_rx_filter_free(lif, f); 117 92 } 93 + spin_unlock_bh(&lif->rx_filters.lock); 118 94 } 119 95 120 96 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, ··· 152 124 f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); 153 125 f->rxq_index = rxq_index; 154 126 memcpy(&f->cmd, ac, sizeof(f->cmd)); 127 + netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); 155 128 156 129 INIT_HLIST_NODE(&f->by_hash); 157 130 INIT_HLIST_NODE(&f->by_id);
-6
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
··· 161 161 return; 162 162 } 163 163 164 - /* no packet processing while resetting */ 165 - if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) { 166 - stats->dropped++; 167 - return; 168 - } 169 - 170 164 stats->pkts++; 171 165 stats->bytes += le16_to_cpu(comp->len); 172 166
+2
drivers/net/ethernet/qlogic/qed/qed.h
··· 876 876 struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM]; 877 877 u8 engine_for_debug; 878 878 bool disable_ilt_dump; 879 + bool dbg_bin_dump; 880 + 879 881 DECLARE_HASHTABLE(connections, 10); 880 882 const struct firmware *firmware; 881 883
+2 -2
drivers/net/ethernet/qlogic/qed/qed_cxt.c
··· 2008 2008 enum protocol_type proto; 2009 2009 2010 2010 if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { 2011 - DP_NOTICE(p_hwfn, 2012 - "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); 2011 + DP_VERBOSE(p_hwfn, QED_MSG_SP, 2012 + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); 2013 2013 p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; 2014 2014 } 2015 2015
+16 -1
drivers/net/ethernet/qlogic/qed/qed_debug.c
··· 7506 7506 if (p_hwfn->cdev->print_dbg_data) 7507 7507 qed_dbg_print_feature(text_buf, text_size_bytes); 7508 7508 7509 + /* Just return the original binary buffer if requested */ 7510 + if (p_hwfn->cdev->dbg_bin_dump) { 7511 + vfree(text_buf); 7512 + return DBG_STATUS_OK; 7513 + } 7514 + 7509 7515 /* Free the old dump_buf and point the dump_buf to the newly allocagted 7510 7516 * and formatted text buffer. 7511 7517 */ ··· 7739 7733 #define REGDUMP_HEADER_SIZE_SHIFT 0 7740 7734 #define REGDUMP_HEADER_SIZE_MASK 0xffffff 7741 7735 #define REGDUMP_HEADER_FEATURE_SHIFT 24 7742 - #define REGDUMP_HEADER_FEATURE_MASK 0x3f 7736 + #define REGDUMP_HEADER_FEATURE_MASK 0x1f 7737 + #define REGDUMP_HEADER_BIN_DUMP_SHIFT 29 7738 + #define REGDUMP_HEADER_BIN_DUMP_MASK 0x1 7743 7739 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 7744 7740 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1 7745 7741 #define REGDUMP_HEADER_ENGINE_SHIFT 31 ··· 7779 7771 feature, feature_size); 7780 7772 7781 7773 SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); 7774 + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1); 7782 7775 SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); 7783 7776 SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); 7784 7777 ··· 7803 7794 omit_engine = 1; 7804 7795 7805 7796 mutex_lock(&qed_dbg_lock); 7797 + cdev->dbg_bin_dump = true; 7806 7798 7807 7799 org_engine = qed_get_debug_engine(cdev); 7808 7800 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { ··· 7941 7931 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); 7942 7932 } 7943 7933 7934 + /* Re-populate nvm attribute info */ 7935 + qed_mcp_nvm_info_free(p_hwfn); 7936 + qed_mcp_nvm_info_populate(p_hwfn); 7937 + 7944 7938 /* nvm cfg1 */ 7945 7939 rc = qed_dbg_nvm_image(cdev, 7946 7940 (u8 *)buffer + offset + ··· 8007 7993 QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); 8008 7994 } 8009 7995 7996 + cdev->dbg_bin_dump = false; 8010 7997 mutex_unlock(&qed_dbg_lock); 8011 7998 8012 7999 return 0;
+4 -10
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 3102 3102 } 3103 3103 3104 3104 /* Log and clear previous pglue_b errors if such exist */ 3105 - qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); 3105 + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); 3106 3106 3107 3107 /* Enable the PF's internal FID_enable in the PXP */ 3108 3108 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, ··· 4472 4472 return 0; 4473 4473 } 4474 4474 4475 - static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) 4476 - { 4477 - kfree(p_hwfn->nvm_info.image_att); 4478 - p_hwfn->nvm_info.image_att = NULL; 4479 - } 4480 - 4481 4475 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, 4482 4476 void __iomem *p_regview, 4483 4477 void __iomem *p_doorbells, ··· 4556 4562 return rc; 4557 4563 err3: 4558 4564 if (IS_LEAD_HWFN(p_hwfn)) 4559 - qed_nvm_info_free(p_hwfn); 4565 + qed_mcp_nvm_info_free(p_hwfn); 4560 4566 err2: 4561 4567 if (IS_LEAD_HWFN(p_hwfn)) 4562 4568 qed_iov_free_hw_info(p_hwfn->cdev); ··· 4617 4623 if (rc) { 4618 4624 if (IS_PF(cdev)) { 4619 4625 qed_init_free(p_hwfn); 4620 - qed_nvm_info_free(p_hwfn); 4626 + qed_mcp_nvm_info_free(p_hwfn); 4621 4627 qed_mcp_free(p_hwfn); 4622 4628 qed_hw_hwfn_free(p_hwfn); 4623 4629 } ··· 4651 4657 4652 4658 qed_iov_free_hw_info(cdev); 4653 4659 4654 - qed_nvm_info_free(p_hwfn); 4660 + qed_mcp_nvm_info_free(p_hwfn); 4655 4661 } 4656 4662 4657 4663 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+31 -22
drivers/net/ethernet/qlogic/qed/qed_int.c
··· 257 257 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 258 258 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 259 259 260 - int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, 261 - struct qed_ptt *p_ptt) 260 + int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 261 + bool hw_init) 262 262 { 263 + char msg[256]; 263 264 u32 tmp; 264 265 265 266 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); ··· 274 273 details = qed_rd(p_hwfn, p_ptt, 275 274 PGLUE_B_REG_TX_ERR_WR_DETAILS); 276 275 277 - DP_NOTICE(p_hwfn, 278 - "Illegal write by chip to [%08x:%08x] blocked.\n" 279 - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 280 - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 281 - addr_hi, addr_lo, details, 282 - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 283 - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 284 - GET_FIELD(details, 285 - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 286 - tmp, 287 - GET_FIELD(tmp, 288 - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 289 - GET_FIELD(tmp, 290 - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 291 - GET_FIELD(tmp, 292 - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 276 + snprintf(msg, sizeof(msg), 277 + "Illegal write by chip to [%08x:%08x] blocked.\n" 278 + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 279 + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", 280 + addr_hi, addr_lo, details, 281 + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 282 + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 283 + !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), 284 + tmp, 285 + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), 286 + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), 287 + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); 288 + 289 + if (hw_init) 290 + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); 291 + else 292 + DP_NOTICE(p_hwfn, "%s\n", msg); 293 293 } 294 294 295 295 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); ··· 323 321 } 324 322 325 323 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 326 - if (tmp & PGLUE_ATTENTION_ICPL_VALID) 327 - DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); 324 + if (tmp & PGLUE_ATTENTION_ICPL_VALID) { 325 + snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); 326 + 327 + if (hw_init) 328 + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); 329 + else 330 + DP_NOTICE(p_hwfn, "%s\n", msg); 331 + } 328 332 329 333 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 330 334 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { ··· 369 361 370 362 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) 371 363 { 372 - return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 364 + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); 373 365 } 374 366 375 367 static int qed_fw_assertion(struct qed_hwfn *p_hwfn) ··· 1201 1193 index, attn_bits, attn_acks, asserted_bits, 1202 1194 deasserted_bits, p_sb_attn_sw->known_attn); 1203 1195 } else if (asserted_bits == 0x100) { 1204 - DP_INFO(p_hwfn, "MFW indication via attention\n"); 1196 + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1197 + "MFW indication via attention\n"); 1205 1198 } else { 1206 1199 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1207 1200 "MFW indication [deassertion]\n");
+2 -2
drivers/net/ethernet/qlogic/qed/qed_int.h
··· 442 442 443 443 #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) 444 444 445 - int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, 446 - struct qed_ptt *p_ptt); 445 + int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 446 + bool hw_init); 447 447 448 448 #endif
+7
drivers/net/ethernet/qlogic/qed/qed_mcp.c
··· 3280 3280 return rc; 3281 3281 } 3282 3282 3283 + void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) 3284 + { 3285 + kfree(p_hwfn->nvm_info.image_att); 3286 + p_hwfn->nvm_info.image_att = NULL; 3287 + p_hwfn->nvm_info.valid = false; 3288 + } 3289 + 3283 3290 int 3284 3291 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, 3285 3292 enum qed_nvm_images image_id,
+7
drivers/net/ethernet/qlogic/qed/qed_mcp.h
··· 1221 1221 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); 1222 1222 1223 1223 /** 1224 + * @brief Delete nvm info shadow in the given hardware function 1225 + * 1226 + * @param p_hwfn 1227 + */ 1228 + void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); 1229 + 1230 + /** 1224 1231 * @brief Get the engine affinity configuration. 1225 1232 * 1226 1233 * @param p_hwfn
+13 -5
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
··· 47 47 return 0; 48 48 } 49 49 50 - static int rmnet_register_real_device(struct net_device *real_dev) 50 + static int rmnet_register_real_device(struct net_device *real_dev, 51 + struct netlink_ext_ack *extack) 51 52 { 52 53 struct rmnet_port *port; 53 54 int rc, entry; 54 55 55 56 ASSERT_RTNL(); 56 57 57 - if (rmnet_is_real_dev_registered(real_dev)) 58 + if (rmnet_is_real_dev_registered(real_dev)) { 59 + port = rmnet_get_port_rtnl(real_dev); 60 + if (port->rmnet_mode != RMNET_EPMODE_VND) { 61 + NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); 62 + return -EINVAL; 63 + } 64 + 58 65 return 0; 66 + } 59 67 60 68 port = kzalloc(sizeof(*port), GFP_KERNEL); 61 69 if (!port) ··· 141 133 142 134 mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); 143 135 144 - err = rmnet_register_real_device(real_dev); 136 + err = rmnet_register_real_device(real_dev, extack); 145 137 if (err) 146 138 goto err0; 147 139 ··· 430 422 } 431 423 432 424 if (port->rmnet_mode != RMNET_EPMODE_VND) { 433 - NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); 425 + NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached"); 434 426 return -EINVAL; 435 427 } 436 428 ··· 441 433 return -EBUSY; 442 434 } 443 435 444 - err = rmnet_register_real_device(slave_dev); 436 + err = rmnet_register_real_device(slave_dev, extack); 445 437 if (err) 446 438 return -EBUSY; 447 439
+24 -2
drivers/net/ethernet/renesas/ravb_main.c
··· 1450 1450 struct ravb_private *priv = container_of(work, struct ravb_private, 1451 1451 work); 1452 1452 struct net_device *ndev = priv->ndev; 1453 + int error; 1453 1454 1454 1455 netif_tx_stop_all_queues(ndev); 1455 1456 ··· 1459 1458 ravb_ptp_stop(ndev); 1460 1459 1461 1460 /* Wait for DMA stopping */ 1462 - ravb_stop_dma(ndev); 1461 + if (ravb_stop_dma(ndev)) { 1462 + /* If ravb_stop_dma() fails, the hardware is still operating 1463 + * for TX and/or RX. So, this should not call the following 1464 + * functions because ravb_dmac_init() is possible to fail too. 1465 + * Also, this should not retry ravb_stop_dma() again and again 1466 + * here because it's possible to wait forever. So, this just 1467 + * re-enables the TX and RX and skip the following 1468 + * re-initialization procedure. 1469 + */ 1470 + ravb_rcv_snd_enable(ndev); 1471 + goto out; 1472 + } 1463 1473 1464 1474 ravb_ring_free(ndev, RAVB_BE); 1465 1475 ravb_ring_free(ndev, RAVB_NC); 1466 1476 1467 1477 /* Device init */ 1468 - ravb_dmac_init(ndev); 1478 + error = ravb_dmac_init(ndev); 1479 + if (error) { 1480 + /* If ravb_dmac_init() fails, descriptors are freed. So, this 1481 + * should return here to avoid re-enabling the TX and RX in 1482 + * ravb_emac_init(). 1483 + */ 1484 + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", 1485 + __func__, error); 1486 + return; 1487 + } 1469 1488 ravb_emac_init(ndev); 1470 1489 1490 + out: 1471 1491 /* Initialise PTP Clock driver */ 1472 1492 if (priv->chip_id == RCAR_GEN2) 1473 1493 ravb_ptp_init(ndev, priv->pdev);
+2 -2
drivers/net/ethernet/smsc/smc91x.c
··· 2274 2274 ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, 2275 2275 "power", 0, 0, 100); 2276 2276 if (ret) 2277 - return ret; 2277 + goto out_free_netdev; 2278 2278 2279 2279 /* 2280 2280 * Optional reset GPIO configured? Minimum 100 ns reset needed ··· 2283 2283 ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, 2284 2284 "reset", 0, 0, 100); 2285 2285 if (ret) 2286 - return ret; 2286 + goto out_free_netdev; 2287 2287 2288 2288 /* 2289 2289 * Need to wait for optional EEPROM to load, max 750 us according
+1 -1
drivers/net/ethernet/socionext/sni_ave.c
··· 1191 1191 ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, 1192 1192 priv->pinmode_mask, priv->pinmode_val); 1193 1193 if (ret) 1194 - return ret; 1194 + goto out_reset_assert; 1195 1195 1196 1196 ave_global_reset(ndev); 1197 1197
+2 -1
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1850 1850 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; 1851 1851 port->ndev->hw_features = NETIF_F_SG | 1852 1852 NETIF_F_RXCSUM | 1853 - NETIF_F_HW_CSUM; 1853 + NETIF_F_HW_CSUM | 1854 + NETIF_F_HW_TC; 1854 1855 port->ndev->features = port->ndev->hw_features | 1855 1856 NETIF_F_HW_VLAN_CTAG_FILTER; 1856 1857 port->ndev->vlan_features |= NETIF_F_SG;
+1 -1
drivers/net/geneve.c
··· 1615 1615 struct netlink_ext_ack *extack) 1616 1616 { 1617 1617 struct geneve_dev *geneve = netdev_priv(dev); 1618 + enum ifla_geneve_df df = geneve->df; 1618 1619 struct geneve_sock *gs4, *gs6; 1619 1620 struct ip_tunnel_info info; 1620 1621 bool metadata; 1621 1622 bool use_udp6_rx_checksums; 1622 - enum ifla_geneve_df df; 1623 1623 bool ttl_inherit; 1624 1624 int err; 1625 1625
+1 -1
drivers/net/hippi/rrunner.c
··· 1242 1242 rrpriv->info = NULL; 1243 1243 } 1244 1244 if (rrpriv->rx_ctrl) { 1245 - pci_free_consistent(pdev, sizeof(struct ring_ctrl), 1245 + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), 1246 1246 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); 1247 1247 rrpriv->rx_ctrl = NULL; 1248 1248 }
+4 -2
drivers/net/ieee802154/adf7242.c
··· 4 4 * 5 5 * Copyright 2009-2017 Analog Devices Inc. 6 6 * 7 - * http://www.analog.com/ADF7242 7 + * https://www.analog.com/ADF7242 8 8 */ 9 9 10 10 #include <linux/kernel.h> ··· 1262 1262 WQ_MEM_RECLAIM); 1263 1263 if (unlikely(!lp->wqueue)) { 1264 1264 ret = -ENOMEM; 1265 - goto err_hw_init; 1265 + goto err_alloc_wq; 1266 1266 } 1267 1267 1268 1268 ret = adf7242_hw_init(lp); ··· 1294 1294 return ret; 1295 1295 1296 1296 err_hw_init: 1297 + destroy_workqueue(lp->wqueue); 1298 + err_alloc_wq: 1297 1299 mutex_destroy(&lp->bmux); 1298 1300 ieee802154_free_hw(lp->hw); 1299 1301
+7 -9
drivers/net/ipa/gsi.c
··· 500 500 int ret; 501 501 502 502 state = gsi_channel_state(channel); 503 + 504 + /* Channel could have entered STOPPED state since last call 505 + * if it timed out. If so, we're done. 506 + */ 507 + if (state == GSI_CHANNEL_STATE_STOPPED) 508 + return 0; 509 + 503 510 if (state != GSI_CHANNEL_STATE_STARTED && 504 511 state != GSI_CHANNEL_STATE_STOP_IN_PROC) 505 512 return -EINVAL; ··· 796 789 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 797 790 { 798 791 struct gsi_channel *channel = &gsi->channel[channel_id]; 799 - enum gsi_channel_state state; 800 792 u32 retries; 801 793 int ret; 802 794 803 795 gsi_channel_freeze(channel); 804 - 805 - /* Channel could have entered STOPPED state since last call if the 806 - * STOP command timed out. We won't stop a channel if stopping it 807 - * was successful previously (so we still want the freeze above). 808 - */ 809 - state = gsi_channel_state(channel); 810 - if (state == GSI_CHANNEL_STATE_STOPPED) 811 - return 0; 812 796 813 797 /* RX channels might require a little time to enter STOPPED state */ 814 798 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
+15
drivers/net/ipa/ipa_cmd.c
··· 586 586 return 4; 587 587 } 588 588 589 + void ipa_cmd_tag_process(struct ipa *ipa) 590 + { 591 + u32 count = ipa_cmd_tag_process_count(); 592 + struct gsi_trans *trans; 593 + 594 + trans = ipa_cmd_trans_alloc(ipa, count); 595 + if (trans) { 596 + ipa_cmd_tag_process_add(trans); 597 + gsi_trans_commit_wait(trans); 598 + } else { 599 + dev_err(&ipa->pdev->dev, 600 + "error allocating %u entry tag transaction\n", count); 601 + } 602 + } 603 + 589 604 static struct ipa_cmd_info * 590 605 ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) 591 606 {
+8
drivers/net/ipa/ipa_cmd.h
··· 172 172 u32 ipa_cmd_tag_process_count(void); 173 173 174 174 /** 175 + * ipa_cmd_tag_process() - Perform a tag process 176 + * 177 + * @Return: The number of elements to allocate in a transaction 178 + * to hold tag process commands 179 + */ 180 + void ipa_cmd_tag_process(struct ipa *ipa); 181 + 182 + /** 175 183 * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint 176 184 * @ipa: IPA pointer 177 185 * @tre_count: Number of elements in the transaction
-1
drivers/net/ipa/ipa_data-sdm845.c
··· 44 44 .endpoint = { 45 45 .seq_type = IPA_SEQ_INVALID, 46 46 .config = { 47 - .checksum = true, 48 47 .aggregation = true, 49 48 .status_enable = true, 50 49 .rx = {
+2
drivers/net/ipa/ipa_endpoint.c
··· 1450 1450 if (ipa->modem_netdev) 1451 1451 ipa_modem_suspend(ipa->modem_netdev); 1452 1452 1453 + ipa_cmd_tag_process(ipa); 1454 + 1453 1455 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1454 1456 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1455 1457 }
+1
drivers/net/ipa/ipa_gsi.c
··· 6 6 7 7 #include <linux/types.h> 8 8 9 + #include "ipa_gsi.h" 9 10 #include "gsi_trans.h" 10 11 #include "ipa.h" 11 12 #include "ipa_endpoint.h"
+2
drivers/net/ipa/ipa_gsi.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 + struct gsi; 11 12 struct gsi_trans; 13 + struct ipa_gsi_endpoint_data; 12 14 13 15 /** 14 16 * ipa_gsi_trans_complete() - GSI transaction completion callback
+3 -3
drivers/net/ipa/ipa_qmi_msg.c
··· 119 119 sizeof_field(struct ipa_driver_init_complete_rsp, 120 120 rsp), 121 121 .tlv_type = 0x02, 122 - .elem_size = offsetof(struct ipa_driver_init_complete_rsp, 122 + .offset = offsetof(struct ipa_driver_init_complete_rsp, 123 123 rsp), 124 124 .ei_array = qmi_response_type_v01_ei, 125 125 }, ··· 137 137 sizeof_field(struct ipa_init_complete_ind, 138 138 status), 139 139 .tlv_type = 0x02, 140 - .elem_size = offsetof(struct ipa_init_complete_ind, 140 + .offset = offsetof(struct ipa_init_complete_ind, 141 141 status), 142 142 .ei_array = qmi_response_type_v01_ei, 143 143 }, ··· 218 218 sizeof_field(struct ipa_init_modem_driver_req, 219 219 platform_type_valid), 220 220 .tlv_type = 0x10, 221 - .elem_size = offsetof(struct ipa_init_modem_driver_req, 221 + .offset = offsetof(struct ipa_init_modem_driver_req, 222 222 platform_type_valid), 223 223 }, 224 224 {
+2 -3
drivers/net/macsec.c
··· 4052 4052 return err; 4053 4053 4054 4054 netdev_lockdep_set_classes(dev); 4055 - lockdep_set_class_and_subclass(&dev->addr_list_lock, 4056 - &macsec_netdev_addr_lock_key, 4057 - dev->lower_level); 4055 + lockdep_set_class(&dev->addr_list_lock, 4056 + &macsec_netdev_addr_lock_key); 4058 4057 4059 4058 err = netdev_upper_dev_link(real_dev, dev, extack); 4060 4059 if (err < 0)
+2 -3
drivers/net/macvlan.c
··· 880 880 static void macvlan_set_lockdep_class(struct net_device *dev) 881 881 { 882 882 netdev_lockdep_set_classes(dev); 883 - lockdep_set_class_and_subclass(&dev->addr_list_lock, 884 - &macvlan_netdev_addr_lock_key, 885 - dev->lower_level); 883 + lockdep_set_class(&dev->addr_list_lock, 884 + &macvlan_netdev_addr_lock_key); 886 885 } 887 886 888 887 static int macvlan_init(struct net_device *dev)
+2 -2
drivers/net/netdevsim/netdev.c
··· 302 302 rtnl_lock(); 303 303 err = nsim_bpf_init(ns); 304 304 if (err) 305 - goto err_free_netdev; 305 + goto err_rtnl_unlock; 306 306 307 307 nsim_ipsec_init(ns); 308 308 ··· 316 316 err_ipsec_teardown: 317 317 nsim_ipsec_teardown(ns); 318 318 nsim_bpf_uninit(ns); 319 + err_rtnl_unlock: 319 320 rtnl_unlock(); 320 - err_free_netdev: 321 321 free_netdev(dev); 322 322 return ERR_PTR(err); 323 323 }
+4
drivers/net/phy/dp83640.c
··· 1260 1260 dp83640->hwts_rx_en = 1; 1261 1261 dp83640->layer = PTP_CLASS_L4; 1262 1262 dp83640->version = PTP_CLASS_V1; 1263 + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1263 1264 break; 1264 1265 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1265 1266 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: ··· 1268 1267 dp83640->hwts_rx_en = 1; 1269 1268 dp83640->layer = PTP_CLASS_L4; 1270 1269 dp83640->version = PTP_CLASS_V2; 1270 + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 1271 1271 break; 1272 1272 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1273 1273 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: ··· 1276 1274 dp83640->hwts_rx_en = 1; 1277 1275 dp83640->layer = PTP_CLASS_L2; 1278 1276 dp83640->version = PTP_CLASS_V2; 1277 + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1279 1278 break; 1280 1279 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1281 1280 case HWTSTAMP_FILTER_PTP_V2_SYNC: ··· 1284 1281 dp83640->hwts_rx_en = 1; 1285 1282 dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; 1286 1283 dp83640->version = PTP_CLASS_V2; 1284 + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1287 1285 break; 1288 1286 default: 1289 1287 return -ERANGE;
+2
drivers/net/tun.c
··· 62 62 #include <net/rtnetlink.h> 63 63 #include <net/sock.h> 64 64 #include <net/xdp.h> 65 + #include <net/ip_tunnels.h> 65 66 #include <linux/seq_file.h> 66 67 #include <linux/uio.h> 67 68 #include <linux/skb_array.h> ··· 1352 1351 switch (tun->flags & TUN_TYPE_MASK) { 1353 1352 case IFF_TUN: 1354 1353 dev->netdev_ops = &tun_netdev_ops; 1354 + dev->header_ops = &ip_tunnel_header_ops; 1355 1355 1356 1356 /* Point-to-Point TUN Device */ 1357 1357 dev->hard_header_len = 0;
+1
drivers/net/usb/ax88172a.c
··· 187 187 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); 188 188 if (ret < ETH_ALEN) { 189 189 netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); 190 + ret = -EIO; 190 191 goto free; 191 192 } 192 193 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+3 -2
drivers/net/usb/hso.c
··· 1390 1390 unsigned long flags; 1391 1391 1392 1392 if (old) 1393 - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", 1394 - tty->termios.c_cflag, old->c_cflag); 1393 + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", 1394 + (unsigned int)tty->termios.c_cflag, 1395 + (unsigned int)old->c_cflag); 1395 1396 1396 1397 /* the actual setup */ 1397 1398 spin_lock_irqsave(&serial->serial_lock, flags);
+1
drivers/net/usb/qmi_wwan.c
··· 1370 1370 {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ 1371 1371 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ 1372 1372 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ 1373 + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ 1373 1374 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ 1374 1375 {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ 1375 1376 {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
+8 -1
drivers/net/usb/smsc95xx.c
··· 1287 1287 1288 1288 /* Init all registers */ 1289 1289 ret = smsc95xx_reset(dev); 1290 + if (ret) 1291 + goto free_pdata; 1290 1292 1291 1293 /* detect device revision as different features may be available */ 1292 1294 ret = smsc95xx_read_reg(dev, ID_REV, &val); 1293 1295 if (ret < 0) 1294 - return ret; 1296 + goto free_pdata; 1297 + 1295 1298 val >>= 16; 1296 1299 pdata->chip_id = val; 1297 1300 pdata->mdix_ctrl = get_mdix_status(dev->net); ··· 1320 1317 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); 1321 1318 1322 1319 return 0; 1320 + 1321 + free_pdata: 1322 + kfree(pdata); 1323 + return ret; 1323 1324 } 1324 1325 1325 1326 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+3 -1
drivers/net/wan/hdlc_x25.c
··· 71 71 { 72 72 unsigned char *ptr; 73 73 74 - if (skb_cow(skb, 1)) 74 + if (skb_cow(skb, 1)) { 75 + kfree_skb(skb); 75 76 return NET_RX_DROP; 77 + } 76 78 77 79 skb_push(skb, 1); 78 80 skb_reset_network_header(skb);
+13 -4
drivers/net/wan/lapbether.c
··· 128 128 { 129 129 unsigned char *ptr; 130 130 131 - skb_push(skb, 1); 132 - 133 - if (skb_cow(skb, 1)) 131 + if (skb_cow(skb, 1)) { 132 + kfree_skb(skb); 134 133 return NET_RX_DROP; 134 + } 135 + 136 + skb_push(skb, 1); 135 137 136 138 ptr = skb->data; 137 139 *ptr = X25_IFACE_DATA; ··· 305 303 dev->netdev_ops = &lapbeth_netdev_ops; 306 304 dev->needs_free_netdev = true; 307 305 dev->type = ARPHRD_X25; 308 - dev->hard_header_len = 3; 309 306 dev->mtu = 1000; 310 307 dev->addr_len = 0; 311 308 } ··· 324 323 lapbeth_setup); 325 324 if (!ndev) 326 325 goto out; 326 + 327 + /* When transmitting data: 328 + * first this driver removes a pseudo header of 1 byte, 329 + * then the lapb module prepends an LAPB header of at most 3 bytes, 330 + * then this driver prepends a length field of 2 bytes, 331 + * then the underlying Ethernet device prepends its own header. 332 + */ 333 + ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; 327 334 328 335 lapbeth = netdev_priv(ndev); 329 336 lapbeth->axdev = ndev;
+14 -7
drivers/net/wan/x25_asy.c
··· 183 183 netif_wake_queue(sl->dev); 184 184 } 185 185 186 - /* Send one completely decapsulated IP datagram to the IP layer. */ 186 + /* Send an LAPB frame to the LAPB module to process. */ 187 187 188 188 static void x25_asy_bump(struct x25_asy *sl) 189 189 { ··· 195 195 count = sl->rcount; 196 196 dev->stats.rx_bytes += count; 197 197 198 - skb = dev_alloc_skb(count+1); 198 + skb = dev_alloc_skb(count); 199 199 if (skb == NULL) { 200 200 netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); 201 201 dev->stats.rx_dropped++; 202 202 return; 203 203 } 204 - skb_push(skb, 1); /* LAPB internal control */ 205 204 skb_put_data(skb, sl->rbuff, count); 206 205 skb->protocol = x25_type_trans(skb, sl->dev); 207 206 err = lapb_data_received(skb->dev, skb); ··· 208 209 kfree_skb(skb); 209 210 printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); 210 211 } else { 211 - netif_rx(skb); 212 212 dev->stats.rx_packets++; 213 213 } 214 214 } ··· 354 356 */ 355 357 356 358 /* 357 - * Called when I frame data arrives. We did the work above - throw it 358 - * at the net layer. 359 + * Called when I frame data arrive. We add a pseudo header for upper 360 + * layers and pass it to upper layers. 359 361 */ 360 362 361 363 static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) 362 364 { 365 + if (skb_cow(skb, 1)) { 366 + kfree_skb(skb); 367 + return NET_RX_DROP; 368 + } 369 + skb_push(skb, 1); 370 + skb->data[0] = X25_IFACE_DATA; 371 + 372 + skb->protocol = x25_type_trans(skb, dev); 373 + 363 374 return netif_rx(skb); 364 375 } 365 376 ··· 664 657 switch (s) { 665 658 case X25_END: 666 659 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 667 - sl->rcount > 2) 660 + sl->rcount >= 2) 668 661 x25_asy_bump(sl); 669 662 clear_bit(SLF_ESCAPE, &sl->flags); 670 663 sl->rcount = 0;
+1
drivers/net/wireguard/device.c
··· 262 262 max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); 263 263 264 264 dev->netdev_ops = &netdev_ops; 265 + dev->header_ops = &ip_tunnel_header_ops; 265 266 dev->hard_header_len = 0; 266 267 dev->addr_len = 0; 267 268 dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
+2 -17
drivers/net/wireguard/queueing.h
··· 11 11 #include <linux/skbuff.h> 12 12 #include <linux/ip.h> 13 13 #include <linux/ipv6.h> 14 + #include <net/ip_tunnels.h> 14 15 15 16 struct wg_device; 16 17 struct wg_peer; ··· 66 65 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) 67 66 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) 68 67 69 - /* Returns either the correct skb->protocol value, or 0 if invalid. */ 70 - static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) 71 - { 72 - if (skb_network_header(skb) >= skb->head && 73 - (skb_network_header(skb) + sizeof(struct iphdr)) <= 74 - skb_tail_pointer(skb) && 75 - ip_hdr(skb)->version == 4) 76 - return htons(ETH_P_IP); 77 - if (skb_network_header(skb) >= skb->head && 78 - (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= 79 - skb_tail_pointer(skb) && 80 - ipv6_hdr(skb)->version == 6) 81 - return htons(ETH_P_IPV6); 82 - return 0; 83 - } 84 - 85 68 static inline bool wg_check_packet_protocol(struct sk_buff *skb) 86 69 { 87 - __be16 real_protocol = wg_examine_packet_protocol(skb); 70 + __be16 real_protocol = ip_tunnel_parse_protocol(skb); 88 71 return real_protocol && skb->protocol == real_protocol; 89 72 } 90 73
+1 -1
drivers/net/wireguard/receive.c
··· 387 387 */ 388 388 skb->ip_summed = CHECKSUM_UNNECESSARY; 389 389 skb->csum_level = ~0; /* All levels */ 390 - skb->protocol = wg_examine_packet_protocol(skb); 390 + skb->protocol = ip_tunnel_parse_protocol(skb); 391 391 if (skb->protocol == htons(ETH_P_IP)) { 392 392 len = ntohs(ip_hdr(skb)->tot_len); 393 393 if (unlikely(len < sizeof(struct iphdr)))
+1 -1
drivers/net/wireless/ath/ath10k/ahb.c
··· 820 820 ath10k_ahb_release_irq_legacy(ar); 821 821 822 822 err_free_pipes: 823 - ath10k_pci_free_pipes(ar); 823 + ath10k_pci_release_resource(ar); 824 824 825 825 err_resource_deinit: 826 826 ath10k_ahb_resource_deinit(ar);
+37 -41
drivers/net/wireless/ath/ath10k/pci.c
··· 3473 3473 3474 3474 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); 3475 3475 3476 + ar_pci->attr = kmemdup(pci_host_ce_config_wlan, 3477 + sizeof(pci_host_ce_config_wlan), 3478 + GFP_KERNEL); 3479 + if (!ar_pci->attr) 3480 + return -ENOMEM; 3481 + 3482 + ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, 3483 + sizeof(pci_target_ce_config_wlan), 3484 + GFP_KERNEL); 3485 + if (!ar_pci->pipe_config) { 3486 + ret = -ENOMEM; 3487 + goto err_free_attr; 3488 + } 3489 + 3490 + ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, 3491 + sizeof(pci_target_service_to_ce_map_wlan), 3492 + GFP_KERNEL); 3493 + if (!ar_pci->serv_to_pipe) { 3494 + ret = -ENOMEM; 3495 + goto err_free_pipe_config; 3496 + } 3497 + 3476 3498 if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) 3477 3499 ath10k_pci_override_ce_config(ar); 3478 3500 ··· 3502 3480 if (ret) { 3503 3481 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3504 3482 ret); 3505 - return ret; 3483 + goto err_free_serv_to_pipe; 3506 3484 } 3507 3485 3508 3486 return 0; 3487 + 3488 + err_free_serv_to_pipe: 3489 + kfree(ar_pci->serv_to_pipe); 3490 + err_free_pipe_config: 3491 + kfree(ar_pci->pipe_config); 3492 + err_free_attr: 3493 + kfree(ar_pci->attr); 3494 + return ret; 3509 3495 } 3510 3496 3511 3497 void ath10k_pci_release_resource(struct ath10k *ar) 3512 3498 { 3499 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3500 + 3513 3501 ath10k_pci_rx_retry_sync(ar); 3514 3502 netif_napi_del(&ar->napi); 3515 3503 ath10k_pci_ce_deinit(ar); 3516 3504 ath10k_pci_free_pipes(ar); 3505 + kfree(ar_pci->attr); 3506 + kfree(ar_pci->pipe_config); 3507 + kfree(ar_pci->serv_to_pipe); 3517 3508 } 3518 3509 3519 3510 static const struct ath10k_bus_ops ath10k_pci_bus_ops = { ··· 3636 3601 3637 3602 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); 3638 3603 3639 - ar_pci->attr = kmemdup(pci_host_ce_config_wlan, 3640 - sizeof(pci_host_ce_config_wlan), 3641 - GFP_KERNEL); 3642 - if (!ar_pci->attr) { 3643 - ret = -ENOMEM; 3644 - goto err_free; 3645 - } 3646 - 3647 - ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, 3648 - sizeof(pci_target_ce_config_wlan), 3649 - GFP_KERNEL); 3650 - if (!ar_pci->pipe_config) { 3651 - ret = -ENOMEM; 3652 - goto err_free; 3653 - } 3654 - 3655 - ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, 3656 - sizeof(pci_target_service_to_ce_map_wlan), 3657 - GFP_KERNEL); 3658 - if (!ar_pci->serv_to_pipe) { 3659 - ret = -ENOMEM; 3660 - goto err_free; 3661 - } 3662 - 3663 3604 ret = ath10k_pci_setup_resource(ar); 3664 3605 if (ret) { 3665 3606 ath10k_err(ar, "failed to setup resource: %d\n", ret); ··· 3716 3705 3717 3706 err_free_irq: 3718 3707 ath10k_pci_free_irq(ar); 3719 - ath10k_pci_rx_retry_sync(ar); 3720 3708 3721 3709 err_deinit_irq: 3722 - ath10k_pci_deinit_irq(ar); 3710 + ath10k_pci_release_resource(ar); 3723 3711 3724 3712 err_sleep: 3725 3713 ath10k_pci_sleep_sync(ar); ··· 3730 3720 err_core_destroy: 3731 3721 ath10k_core_destroy(ar); 3732 3722 3733 - err_free: 3734 - kfree(ar_pci->attr); 3735 - kfree(ar_pci->pipe_config); 3736 - kfree(ar_pci->serv_to_pipe); 3737 - 3738 3723 return ret; 3739 3724 } 3740 3725 3741 3726 static void ath10k_pci_remove(struct pci_dev *pdev) 3742 3727 { 3743 3728 struct ath10k *ar = pci_get_drvdata(pdev); 3744 - struct ath10k_pci *ar_pci; 3745 3729 3746 3730 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); 3747 3731 3748 3732 if (!ar) 3749 - return; 3750 - 3751 - ar_pci = ath10k_pci_priv(ar); 3752 - 3753 - if (!ar_pci) 3754 3733 return; 3755 3734 3756 3735 ath10k_core_unregister(ar); ··· 3749 3750 ath10k_pci_sleep_sync(ar); 3750 3751 ath10k_pci_release(ar); 3751 3752 ath10k_core_destroy(ar); 3752 - kfree(ar_pci->attr); 3753 - kfree(ar_pci->pipe_config); 3754 - kfree(ar_pci->serv_to_pipe); 3755 3753 } 3756 3754 3757 3755 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+3 -1
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 733 733 return; 734 734 } 735 735 736 + rx_buf->skb = nskb; 737 + 736 738 usb_fill_int_urb(urb, hif_dev->udev, 737 739 usb_rcvintpipe(hif_dev->udev, 738 740 USB_REG_IN_PIPE), 739 741 nskb->data, MAX_REG_IN_BUF_SIZE, 740 - ath9k_hif_usb_reg_in_cb, nskb, 1); 742 + ath9k_hif_usb_reg_in_cb, rx_buf, 1); 741 743 } 742 744 743 745 resubmit:
+14 -2
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
··· 271 271 { 272 272 struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data; 273 273 u32 tp = le32_to_cpu(trig->time_point); 274 + struct iwl_ucode_tlv *dup = NULL; 275 + int ret; 274 276 275 277 if (le32_to_cpu(tlv->length) < sizeof(*trig)) 276 278 return -EINVAL; ··· 285 283 return -EINVAL; 286 284 } 287 285 288 - if (!le32_to_cpu(trig->occurrences)) 286 + if (!le32_to_cpu(trig->occurrences)) { 287 + dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), 288 + GFP_KERNEL); 289 + if (!dup) 290 + return -ENOMEM; 291 + trig = (void *)dup->data; 289 292 trig->occurrences = cpu_to_le32(-1); 293 + tlv = dup; 294 + } 290 295 291 - return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); 296 + ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); 297 + kfree(dup); 298 + 299 + return ret; 292 300 } 293 301 294 302 static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
+3 -5
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 1189 1189 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) 1190 1190 iwl_mvm_change_queue_tid(mvm, i); 1191 1191 1192 + rcu_read_unlock(); 1193 + 1192 1194 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { 1193 1195 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, 1194 1196 alloc_for_sta); 1195 - if (ret) { 1196 - rcu_read_unlock(); 1197 + if (ret) 1197 1198 return ret; 1198 - } 1199 1199 } 1200 - 1201 - rcu_read_unlock(); 1202 1200 1203 1201 return free_queue; 1204 1202 }
+2
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 582 582 IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), 583 583 IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), 584 584 IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), 585 + IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), 586 + IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), 585 587 586 588 IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), 587 589
+1
drivers/net/wireless/mediatek/mt76/mt76.h
··· 301 301 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 302 302 #define MT_DRV_SW_RX_AIRTIME BIT(2) 303 303 #define MT_DRV_RX_DMA_HDR BIT(3) 304 + #define MT_DRV_HW_MGMT_TXQ BIT(4) 304 305 305 306 struct mt76_driver_ops { 306 307 u32 drv_flags;
+2
drivers/net/wireless/mediatek/mt76/mt7603/main.c
··· 642 642 { 643 643 struct mt7603_dev *dev = hw->priv; 644 644 645 + mutex_lock(&dev->mt76.mutex); 645 646 dev->coverage_class = max_t(s16, coverage_class, 0); 646 647 mt7603_mac_set_timing(dev); 648 + mutex_unlock(&dev->mt76.mutex); 647 649 } 648 650 649 651 static void mt7603_tx(struct ieee80211_hw *hw,
+5 -4
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
··· 234 234 int i; 235 235 236 236 for (i = 0; i < 16; i++) { 237 - int j, acs = i / 4, index = i % 4; 237 + int j, wmm_idx = i % MT7615_MAX_WMM_SETS; 238 + int acs = i / MT7615_MAX_WMM_SETS; 238 239 u32 ctrl, val, qlen = 0; 239 240 240 - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); 241 + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx)); 241 242 ctrl = BIT(31) | BIT(15) | (acs << 8); 242 243 243 244 for (j = 0; j < 32; j++) { ··· 246 245 continue; 247 246 248 247 mt76_wr(dev, MT_PLE_FL_Q0_CTRL, 249 - ctrl | (j + (index << 5))); 248 + ctrl | (j + (wmm_idx << 5))); 250 249 qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, 251 250 GENMASK(11, 0)); 252 251 } 253 - seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); 252 + seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); 254 253 } 255 254 256 255 return 0;
+5 -4
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
··· 36 36 mt7622_init_tx_queues_multi(struct mt7615_dev *dev) 37 37 { 38 38 static const u8 wmm_queue_map[] = { 39 - MT7622_TXQ_AC0, 40 - MT7622_TXQ_AC1, 41 - MT7622_TXQ_AC2, 42 - MT7622_TXQ_AC3, 39 + [IEEE80211_AC_BK] = MT7622_TXQ_AC0, 40 + [IEEE80211_AC_BE] = MT7622_TXQ_AC1, 41 + [IEEE80211_AC_VI] = MT7622_TXQ_AC2, 42 + [IEEE80211_AC_VO] = MT7622_TXQ_AC3, 43 43 }; 44 44 int ret; 45 45 int i; ··· 100 100 int i; 101 101 102 102 mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); 103 + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); 103 104 if (is_mt7615(&dev->mt76)) { 104 105 mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); 105 106 } else {
+1 -2
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
··· 72 72 { 73 73 int ret; 74 74 75 - ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE + 76 - MT7615_EEPROM_EXTRA_DATA); 75 + ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE); 77 76 if (ret < 0) 78 77 return ret; 79 78
+1 -1
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
··· 17 17 #define MT7615_EEPROM_TXDPD_SIZE 216 18 18 #define MT7615_EEPROM_TXDPD_COUNT (44 + 3) 19 19 20 - #define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \ 20 + #define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \ 21 21 MT7615_EEPROM_TXDPD_COUNT * \ 22 22 MT7615_EEPROM_TXDPD_SIZE) 23 23
+8 -14
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
··· 526 526 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 527 527 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 528 528 529 - if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { 530 - q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 531 - skb_get_queue_mapping(skb); 532 - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; 533 - } else if (beacon) { 534 - if (ext_phy) 535 - q_idx = MT_LMAC_BCN1; 536 - else 537 - q_idx = MT_LMAC_BCN0; 529 + if (beacon) { 538 530 p_fmt = MT_TX_TYPE_FW; 539 - } else { 540 - if (ext_phy) 541 - q_idx = MT_LMAC_ALTX1; 542 - else 543 - q_idx = MT_LMAC_ALTX0; 531 + q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; 532 + } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 544 533 p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; 534 + q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; 535 + } else { 536 + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; 537 + q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 538 + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); 545 539 } 546 540 547 541 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
-15
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
··· 124 124 MT_TX_TYPE_FW, 125 125 }; 126 126 127 - enum tx_pkt_queue_idx { 128 - MT_LMAC_AC00, 129 - MT_LMAC_AC01, 130 - MT_LMAC_AC02, 131 - MT_LMAC_AC03, 132 - MT_LMAC_ALTX0 = 0x10, 133 - MT_LMAC_BMC0, 134 - MT_LMAC_BCN0, 135 - MT_LMAC_PSMP0, 136 - MT_LMAC_ALTX1, 137 - MT_LMAC_BMC1, 138 - MT_LMAC_BCN1, 139 - MT_LMAC_PSMP1, 140 - }; 141 - 142 127 enum tx_port_idx { 143 128 MT_TX_PORT_IDX_LMAC, 144 129 MT_TX_PORT_IDX_MCU
+4
drivers/net/wireless/mediatek/mt76/mt7615/main.c
··· 397 397 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 398 398 struct mt7615_dev *dev = mt7615_hw_dev(hw); 399 399 400 + queue = mt7615_lmac_mapping(dev, queue); 400 401 queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; 401 402 402 403 return mt7615_mcu_set_wmm(dev, queue, params); ··· 736 735 mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) 737 736 { 738 737 struct mt7615_phy *phy = mt7615_hw_phy(hw); 738 + struct mt7615_dev *dev = phy->dev; 739 739 740 + mutex_lock(&dev->mt76.mutex); 740 741 phy->coverage_class = max_t(s16, coverage_class, 0); 741 742 mt7615_mac_set_timing(phy); 743 + mutex_unlock(&dev->mt76.mutex); 742 744 } 743 745 744 746 static int
+1 -1
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
··· 146 146 static const struct mt76_driver_ops drv_ops = { 147 147 /* txwi_size = txd size + txp size */ 148 148 .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common), 149 - .drv_flags = MT_DRV_TXWI_NO_FREE, 149 + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, 150 150 .survey_flags = SURVEY_INFO_TIME_TX | 151 151 SURVEY_INFO_TIME_RX | 152 152 SURVEY_INFO_TIME_BSS_RX,
+30
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
··· 282 282 struct list_head wd_head; 283 283 }; 284 284 285 + enum tx_pkt_queue_idx { 286 + MT_LMAC_AC00, 287 + MT_LMAC_AC01, 288 + MT_LMAC_AC02, 289 + MT_LMAC_AC03, 290 + MT_LMAC_ALTX0 = 0x10, 291 + MT_LMAC_BMC0, 292 + MT_LMAC_BCN0, 293 + MT_LMAC_PSMP0, 294 + MT_LMAC_ALTX1, 295 + MT_LMAC_BMC1, 296 + MT_LMAC_BCN1, 297 + MT_LMAC_PSMP1, 298 + }; 299 + 285 300 enum { 286 301 HW_BSSID_0 = 0x0, 287 302 HW_BSSID_1, ··· 460 445 return MT7663_WTBL_SIZE; 461 446 else 462 447 return MT7615_WTBL_SIZE; 448 + } 449 + 450 + static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) 451 + { 452 + static const u8 lmac_queue_map[] = { 453 + [IEEE80211_AC_BK] = MT_LMAC_AC00, 454 + [IEEE80211_AC_BE] = MT_LMAC_AC01, 455 + [IEEE80211_AC_VI] = MT_LMAC_AC02, 456 + [IEEE80211_AC_VO] = MT_LMAC_AC03, 457 + }; 458 + 459 + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) 460 + return MT_LMAC_AC01; /* BE */ 461 + 462 + return lmac_queue_map[ac]; 463 463 } 464 464 465 465 void mt7615_dma_reset(struct mt7615_dev *dev);
+7 -6
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
··· 270 270 { 271 271 static const struct mt76_driver_ops drv_ops = { 272 272 .txwi_size = MT_USB_TXD_SIZE, 273 - .drv_flags = MT_DRV_RX_DMA_HDR, 273 + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, 274 274 .tx_prepare_skb = mt7663u_tx_prepare_skb, 275 275 .tx_complete_skb = mt7663u_tx_complete_skb, 276 276 .tx_status_data = mt7663u_tx_status_data, ··· 329 329 if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, 330 330 FW_STATE_PWR_ON << 1, 500)) { 331 331 dev_err(dev->mt76.dev, "Timeout for power on\n"); 332 - return -EIO; 332 + ret = -EIO; 333 + goto error; 333 334 } 334 335 335 336 alloc_queues: 336 337 ret = mt76u_alloc_mcu_queue(&dev->mt76); 337 338 if (ret) 338 - goto error; 339 + goto error_free_q; 339 340 340 341 ret = mt76u_alloc_queues(&dev->mt76); 341 342 if (ret) 342 - goto error; 343 + goto error_free_q; 343 344 344 345 ret = mt7663u_register_device(dev); 345 346 if (ret) 346 - goto error_freeq; 347 + goto error_free_q; 347 348 348 349 return 0; 349 350 350 - error_freeq: 351 + error_free_q: 351 352 mt76u_queues_deinit(&dev->mt76); 352 353 error: 353 354 mt76u_deinit(&dev->mt76);
+3 -2
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
··· 456 456 tasklet_disable(&dev->mt76.tx_tasklet); 457 457 napi_disable(&dev->mt76.tx_napi); 458 458 459 - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) 459 + mt76_for_each_q_rx(&dev->mt76, i) { 460 460 napi_disable(&dev->mt76.napi[i]); 461 + } 461 462 462 463 mutex_lock(&dev->mt76.mutex); 463 464 ··· 516 515 517 516 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); 518 517 519 - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) { 518 + mt76_for_each_q_rx(&dev->mt76, i) { 520 519 napi_enable(&dev->mt76.napi[i]); 521 520 napi_schedule(&dev->mt76.napi[i]); 522 521 }
+3
drivers/net/wireless/mediatek/mt76/mt7915/main.c
··· 716 716 mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) 717 717 { 718 718 struct mt7915_phy *phy = mt7915_hw_phy(hw); 719 + struct mt7915_dev *dev = phy->dev; 719 720 721 + mutex_lock(&dev->mt76.mutex); 720 722 phy->coverage_class = max_t(s16, coverage_class, 0); 721 723 mt7915_mac_set_timing(phy); 724 + mutex_unlock(&dev->mt76.mutex); 722 725 } 723 726 724 727 static int
+7
drivers/net/wireless/mediatek/mt76/tx.c
··· 264 264 skb_set_queue_mapping(skb, qid); 265 265 } 266 266 267 + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 268 + !ieee80211_is_data(hdr->frame_control) && 269 + !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { 270 + qid = MT_TXQ_PSD; 271 + skb_set_queue_mapping(skb, qid); 272 + } 273 + 267 274 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 268 275 ieee80211_get_tx_rates(info->control.vif, sta, skb, 269 276 info->control.rates, 1);
+26 -13
drivers/net/wireless/mediatek/mt76/usb.c
··· 1010 1010 static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac) 1011 1011 { 1012 1012 if (mt76_chip(dev) == 0x7663) { 1013 - static const u8 wmm_queue_map[] = { 1014 - [IEEE80211_AC_VO] = 0, 1015 - [IEEE80211_AC_VI] = 1, 1016 - [IEEE80211_AC_BE] = 2, 1017 - [IEEE80211_AC_BK] = 4, 1013 + static const u8 lmac_queue_map[] = { 1014 + /* ac to lmac mapping */ 1015 + [IEEE80211_AC_BK] = 0, 1016 + [IEEE80211_AC_BE] = 1, 1017 + [IEEE80211_AC_VI] = 2, 1018 + [IEEE80211_AC_VO] = 4, 1018 1019 }; 1019 1020 1020 - if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map))) 1021 - return 2; /* BE */ 1021 + if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map))) 1022 + return 1; /* BE */ 1022 1023 1023 - return wmm_queue_map[ac]; 1024 + return lmac_queue_map[ac]; 1024 1025 } 1025 1026 1026 1027 return mt76_ac_to_hwq(ac); ··· 1067 1066 1068 1067 static void mt76u_free_tx(struct mt76_dev *dev) 1069 1068 { 1070 - struct mt76_queue *q; 1071 - int i, j; 1069 + int i; 1072 1070 1073 1071 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1072 + struct mt76_queue *q; 1073 + int j; 1074 + 1074 1075 q = dev->q_tx[i].q; 1076 + if (!q) 1077 + continue; 1078 + 1075 1079 for (j = 0; j < q->ndesc; j++) 1076 1080 usb_free_urb(q->entry[j].urb); 1077 1081 } ··· 1084 1078 1085 1079 void mt76u_stop_tx(struct mt76_dev *dev) 1086 1080 { 1087 - struct mt76_queue_entry entry; 1088 - struct mt76_queue *q; 1089 - int i, j, ret; 1081 + int ret; 1090 1082 1091 1083 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), 1092 1084 HZ / 5); 1093 1085 if (!ret) { 1086 + struct mt76_queue_entry entry; 1087 + struct mt76_queue *q; 1088 + int i, j; 1089 + 1094 1090 dev_err(dev->dev, "timed out waiting for pending tx\n"); 1095 1091 1096 1092 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1097 1093 q = dev->q_tx[i].q; 1094 + if (!q) 1095 + continue; 1096 + 1098 1097 for (j = 0; j < q->ndesc; j++) 1099 1098 usb_kill_urb(q->entry[j].urb); 1100 1099 } ··· 1111 1100 */ 1112 1101 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1113 1102 q = dev->q_tx[i].q; 1103 + if (!q) 1104 + continue; 1114 1105 1115 1106 /* Assure we are in sync with killed tasklet. */ 1116 1107 spin_lock_bh(&q->lock);
+44 -24
drivers/net/xen-netfront.c
··· 63 63 MODULE_PARM_DESC(max_queues, 64 64 "Maximum number of queues per virtual interface"); 65 65 66 + #define XENNET_TIMEOUT (5 * HZ) 67 + 66 68 static const struct ethtool_ops xennet_ethtool_ops; 67 69 68 70 struct netfront_cb { ··· 1336 1334 1337 1335 netif_carrier_off(netdev); 1338 1336 1339 - xenbus_switch_state(dev, XenbusStateInitialising); 1340 - wait_event(module_wq, 1341 - xenbus_read_driver_state(dev->otherend) != 1342 - XenbusStateClosed && 1343 - xenbus_read_driver_state(dev->otherend) != 1344 - XenbusStateUnknown); 1337 + do { 1338 + xenbus_switch_state(dev, XenbusStateInitialising); 1339 + err = wait_event_timeout(module_wq, 1340 + xenbus_read_driver_state(dev->otherend) != 1341 + XenbusStateClosed && 1342 + xenbus_read_driver_state(dev->otherend) != 1343 + XenbusStateUnknown, XENNET_TIMEOUT); 1344 + } while (!err); 1345 + 1345 1346 return netdev; 1346 1347 1347 1348 exit: ··· 2144 2139 }; 2145 2140 #endif /* CONFIG_SYSFS */ 2146 2141 2142 + static void xennet_bus_close(struct xenbus_device *dev) 2143 + { 2144 + int ret; 2145 + 2146 + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2147 + return; 2148 + do { 2149 + xenbus_switch_state(dev, XenbusStateClosing); 2150 + ret = wait_event_timeout(module_wq, 2151 + xenbus_read_driver_state(dev->otherend) == 2152 + XenbusStateClosing || 2153 + xenbus_read_driver_state(dev->otherend) == 2154 + XenbusStateClosed || 2155 + xenbus_read_driver_state(dev->otherend) == 2156 + XenbusStateUnknown, 2157 + XENNET_TIMEOUT); 2158 + } while (!ret); 2159 + 2160 + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2161 + return; 2162 + 2163 + do { 2164 + xenbus_switch_state(dev, XenbusStateClosed); 2165 + ret = wait_event_timeout(module_wq, 2166 + xenbus_read_driver_state(dev->otherend) == 2167 + XenbusStateClosed || 2168 + xenbus_read_driver_state(dev->otherend) == 2169 + XenbusStateUnknown, 2170 + XENNET_TIMEOUT); 2171 + } while (!ret); 2172 + } 2173 + 2147 2174 static int xennet_remove(struct xenbus_device *dev) 2148 2175 { 2149 2176 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2150 2177 2151 - dev_dbg(&dev->dev, "%s\n", dev->nodename); 2152 - 2153 - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 2154 - xenbus_switch_state(dev, XenbusStateClosing); 2155 - wait_event(module_wq, 2156 - xenbus_read_driver_state(dev->otherend) == 2157 - XenbusStateClosing || 2158 - xenbus_read_driver_state(dev->otherend) == 2159 - XenbusStateUnknown); 2160 - 2161 - xenbus_switch_state(dev, XenbusStateClosed); 2162 - wait_event(module_wq, 2163 - xenbus_read_driver_state(dev->otherend) == 2164 - XenbusStateClosed || 2165 - xenbus_read_driver_state(dev->otherend) == 2166 - XenbusStateUnknown); 2167 - } 2168 - 2178 + xennet_bus_close(dev); 2169 2179 xennet_disconnect_backend(info); 2170 2180 2171 2181 if (info->netdev->reg_state == NETREG_REGISTERED)
+1
drivers/nfc/s3fwrn5/core.c
··· 198 198 case S3FWRN5_MODE_FW: 199 199 return s3fwrn5_fw_recv_frame(ndev, skb); 200 200 default: 201 + kfree_skb(skb); 201 202 return -ENODEV; 202 203 } 203 204 }
+1 -1
drivers/nvdimm/security.c
··· 95 95 struct encrypted_key_payload *epayload; 96 96 struct device *dev = &nvdimm->dev; 97 97 98 - keyref = lookup_user_key(id, 0, 0); 98 + keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH); 99 99 if (IS_ERR(keyref)) 100 100 return NULL; 101 101
+10 -3
drivers/nvme/host/core.c
··· 1116 1116 dev_warn(ctrl->device, 1117 1117 "Identify Descriptors failed (%d)\n", status); 1118 1118 /* 1119 - * Don't treat an error as fatal, as we potentially already 1120 - * have a NGUID or EUI-64. 1119 + * Don't treat non-retryable errors as fatal, as we potentially 1120 + * already have a NGUID or EUI-64. If we failed with DNR set, 1121 + * we want to silently ignore the error as we can still 1122 + * identify the device, but if the status has DNR set, we want 1123 + * to propagate the error back specifically for the disk 1124 + * revalidation flow to make sure we don't abandon the 1125 + * device just because of a temporal retry-able error (such 1126 + * as path of transport errors). 1121 1127 */ 1122 - if (status > 0 && !(status & NVME_SC_DNR)) 1128 + if (status > 0 && (status & NVME_SC_DNR)) 1123 1129 status = 0; 1124 1130 goto free_data; 1125 1131 } ··· 1980 1974 if (ns->head->disk) { 1981 1975 nvme_update_disk_info(ns->head->disk, ns, id); 1982 1976 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1977 + nvme_mpath_update_disk_size(ns->head->disk); 1983 1978 } 1984 1979 #endif 1985 1980 return 0;
+4 -3
drivers/nvme/host/multipath.c
··· 672 672 } 673 673 674 674 if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) { 675 - struct backing_dev_info *info = 676 - ns->head->disk->queue->backing_dev_info; 675 + struct gendisk *disk = ns->head->disk; 677 676 678 - info->capabilities |= BDI_CAP_STABLE_WRITES; 677 + if (disk) 678 + disk->queue->backing_dev_info->capabilities |= 679 + BDI_CAP_STABLE_WRITES; 679 680 } 680 681 } 681 682
+13
drivers/nvme/host/nvme.h
··· 604 604 trace_block_bio_complete(ns->head->disk->queue, req->bio); 605 605 } 606 606 607 + static inline void nvme_mpath_update_disk_size(struct gendisk *disk) 608 + { 609 + struct block_device *bdev = bdget_disk(disk, 0); 610 + 611 + if (bdev) { 612 + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); 613 + bdput(bdev); 614 + } 615 + } 616 + 607 617 extern struct device_attribute dev_attr_ana_grpid; 608 618 extern struct device_attribute dev_attr_ana_state; 609 619 extern struct device_attribute subsys_attr_iopolicy; ··· 687 677 { 688 678 } 689 679 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 680 + { 681 + } 682 + static inline void nvme_mpath_update_disk_size(struct gendisk *disk) 690 683 { 691 684 } 692 685 #endif /* CONFIG_NVME_MULTIPATH */
+4
drivers/opp/of.c
··· 902 902 return -EINVAL; 903 903 } 904 904 905 + mutex_lock(&opp_table->lock); 906 + opp_table->parsed_static_opps = 1; 907 + mutex_unlock(&opp_table->lock); 908 + 905 909 val = prop->value; 906 910 while (nr) { 907 911 unsigned long freq = be32_to_cpup(val++) * 1000;
+3 -2
drivers/pci/controller/vmd.c
··· 546 546 547 547 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, 548 548 x86_vector_domain); 549 - irq_domain_free_fwnode(fn); 550 - if (!vmd->irq_domain) 549 + if (!vmd->irq_domain) { 550 + irq_domain_free_fwnode(fn); 551 551 return -ENODEV; 552 + } 552 553 553 554 pci_add_resource(&resources, &vmd->resources[0]); 554 555 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+9 -21
drivers/pci/pci.c
··· 4638 4638 * pcie_wait_for_link_delay - Wait until link is active or inactive 4639 4639 * @pdev: Bridge device 4640 4640 * @active: waiting for active or inactive? 4641 - * @delay: Delay to wait after link has become active (in ms). Specify %0 4642 - * for no delay. 4641 + * @delay: Delay to wait after link has become active (in ms) 4643 4642 * 4644 4643 * Use this to wait till link becomes active or inactive. 4645 4644 */ ··· 4679 4680 msleep(10); 4680 4681 timeout -= 10; 4681 4682 } 4682 - if (active && ret && delay) 4683 + if (active && ret) 4683 4684 msleep(delay); 4684 4685 else if (ret != active) 4685 4686 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", ··· 4800 4801 if (!pcie_downstream_port(dev)) 4801 4802 return; 4802 4803 4803 - /* 4804 - * Per PCIe r5.0, sec 6.6.1, for downstream ports that support 4805 - * speeds > 5 GT/s, we must wait for link training to complete 4806 - * before the mandatory delay. 4807 - * 4808 - * We can only tell when link training completes via DLL Link 4809 - * Active, which is required for downstream ports that support 4810 - * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common 4811 - * devices do not implement Link Active reporting even when it's 4812 - * required, so we'll check for that directly instead of checking 4813 - * the supported link speed. We assume devices without Link Active 4814 - * reporting can train in 100 ms regardless of speed. 4815 - */ 4816 - if (dev->link_active_reporting) { 4817 - pci_dbg(dev, "waiting for link to train\n"); 4818 - if (!pcie_wait_for_link_delay(dev, true, 0)) { 4804 + if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { 4805 + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); 4806 + msleep(delay); 4807 + } else { 4808 + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", 4809 + delay); 4810 + if (!pcie_wait_for_link_delay(dev, true, delay)) { 4819 4811 /* Did not train, no need to wait any further */ 4820 4812 return; 4821 4813 } 4822 4814 } 4823 - pci_dbg(child, "waiting %d ms to become accessible\n", delay); 4824 - msleep(delay); 4825 4815 4826 4816 if (!pci_device_is_present(child)) { 4827 4817 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+1
drivers/perf/arm-cci.c
··· 1718 1718 .driver = { 1719 1719 .name = DRIVER_NAME, 1720 1720 .of_match_table = arm_cci_pmu_matches, 1721 + .suppress_bind_attrs = true, 1721 1722 }, 1722 1723 .probe = cci_pmu_probe, 1723 1724 .remove = cci_pmu_remove,
+1
drivers/perf/arm-ccn.c
··· 1545 1545 .driver = { 1546 1546 .name = "arm-ccn", 1547 1547 .of_match_table = arm_ccn_match, 1548 + .suppress_bind_attrs = true, 1548 1549 }, 1549 1550 .probe = arm_ccn_probe, 1550 1551 .remove = arm_ccn_remove,
+1
drivers/perf/arm_dsu_pmu.c
··· 757 757 .driver = { 758 758 .name = DRVNAME, 759 759 .of_match_table = of_match_ptr(dsu_pmu_of_match), 760 + .suppress_bind_attrs = true, 760 761 }, 761 762 .probe = dsu_pmu_device_probe, 762 763 .remove = dsu_pmu_device_remove,
+2
drivers/perf/arm_smmuv3_pmu.c
··· 742 742 platform_set_drvdata(pdev, smmu_pmu); 743 743 744 744 smmu_pmu->pmu = (struct pmu) { 745 + .module = THIS_MODULE, 745 746 .task_ctx_nr = perf_invalid_context, 746 747 .pmu_enable = smmu_pmu_enable, 747 748 .pmu_disable = smmu_pmu_disable, ··· 860 859 static struct platform_driver smmu_pmu_driver = { 861 860 .driver = { 862 861 .name = "arm-smmu-v3-pmcg", 862 + .suppress_bind_attrs = true, 863 863 }, 864 864 .probe = smmu_pmu_probe, 865 865 .remove = smmu_pmu_remove,
+1
drivers/perf/arm_spe_pmu.c
··· 1226 1226 .driver = { 1227 1227 .name = DRVNAME, 1228 1228 .of_match_table = of_match_ptr(arm_spe_pmu_of_match), 1229 + .suppress_bind_attrs = true, 1229 1230 }, 1230 1231 .probe = arm_spe_pmu_device_probe, 1231 1232 .remove = arm_spe_pmu_device_remove,
+2
drivers/perf/fsl_imx8_ddr_perf.c
··· 512 512 { 513 513 *pmu = (struct ddr_pmu) { 514 514 .pmu = (struct pmu) { 515 + .module = THIS_MODULE, 515 516 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 516 517 .task_ctx_nr = perf_invalid_context, 517 518 .attr_groups = attr_groups, ··· 707 706 .driver = { 708 707 .name = "imx-ddr-pmu", 709 708 .of_match_table = imx_ddr_pmu_dt_ids, 709 + .suppress_bind_attrs = true, 710 710 }, 711 711 .probe = ddr_perf_probe, 712 712 .remove = ddr_perf_remove,
+2
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
··· 378 378 ddrc_pmu->sccl_id, ddrc_pmu->index_id); 379 379 ddrc_pmu->pmu = (struct pmu) { 380 380 .name = name, 381 + .module = THIS_MODULE, 381 382 .task_ctx_nr = perf_invalid_context, 382 383 .event_init = hisi_uncore_pmu_event_init, 383 384 .pmu_enable = hisi_uncore_pmu_enable, ··· 419 418 .driver = { 420 419 .name = "hisi_ddrc_pmu", 421 420 .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), 421 + .suppress_bind_attrs = true, 422 422 }, 423 423 .probe = hisi_ddrc_pmu_probe, 424 424 .remove = hisi_ddrc_pmu_remove,
+2
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
··· 390 390 hha_pmu->sccl_id, hha_pmu->index_id); 391 391 hha_pmu->pmu = (struct pmu) { 392 392 .name = name, 393 + .module = THIS_MODULE, 393 394 .task_ctx_nr = perf_invalid_context, 394 395 .event_init = hisi_uncore_pmu_event_init, 395 396 .pmu_enable = hisi_uncore_pmu_enable, ··· 431 430 .driver = { 432 431 .name = "hisi_hha_pmu", 433 432 .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), 433 + .suppress_bind_attrs = true, 434 434 }, 435 435 .probe = hisi_hha_pmu_probe, 436 436 .remove = hisi_hha_pmu_remove,
+2
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
··· 380 380 l3c_pmu->sccl_id, l3c_pmu->index_id); 381 381 l3c_pmu->pmu = (struct pmu) { 382 382 .name = name, 383 + .module = THIS_MODULE, 383 384 .task_ctx_nr = perf_invalid_context, 384 385 .event_init = hisi_uncore_pmu_event_init, 385 386 .pmu_enable = hisi_uncore_pmu_enable, ··· 421 420 .driver = { 422 421 .name = "hisi_l3c_pmu", 423 422 .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), 423 + .suppress_bind_attrs = true, 424 424 }, 425 425 .probe = hisi_l3c_pmu_probe, 426 426 .remove = hisi_l3c_pmu_remove,
+1
drivers/perf/qcom_l2_pmu.c
··· 1028 1028 .driver = { 1029 1029 .name = "qcom-l2cache-pmu", 1030 1030 .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), 1031 + .suppress_bind_attrs = true, 1031 1032 }, 1032 1033 .probe = l2_cache_pmu_probe, 1033 1034 .remove = l2_cache_pmu_remove,
+1
drivers/perf/qcom_l3_pmu.c
··· 814 814 .driver = { 815 815 .name = "qcom-l3cache-pmu", 816 816 .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), 817 + .suppress_bind_attrs = true, 817 818 }, 818 819 .probe = qcom_l3_cache_pmu_probe, 819 820 };
+1
drivers/perf/thunderx2_pmu.c
··· 1017 1017 .driver = { 1018 1018 .name = "tx2-uncore-pmu", 1019 1019 .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match), 1020 + .suppress_bind_attrs = true, 1020 1021 }, 1021 1022 .probe = tx2_uncore_probe, 1022 1023 .remove = tx2_uncore_remove,
+1
drivers/perf/xgene_pmu.c
··· 1975 1975 .name = "xgene-pmu", 1976 1976 .of_match_table = xgene_pmu_of_match, 1977 1977 .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), 1978 + .suppress_bind_attrs = true, 1978 1979 }, 1979 1980 }; 1980 1981
+3 -2
drivers/phy/allwinner/phy-sun4i-usb.c
··· 545 545 struct sun4i_usb_phy_data *data = 546 546 container_of(work, struct sun4i_usb_phy_data, detect.work); 547 547 struct phy *phy0 = data->phys[0].phy; 548 - struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); 548 + struct sun4i_usb_phy *phy; 549 549 bool force_session_end, id_notify = false, vbus_notify = false; 550 550 int id_det, vbus_det; 551 551 552 - if (phy0 == NULL) 552 + if (!phy0) 553 553 return; 554 554 555 + phy = phy_get_drvdata(phy0); 555 556 id_det = sun4i_usb_phy0_get_id_det(data); 556 557 vbus_det = sun4i_usb_phy0_get_vbus_det(data); 557 558
+8 -6
drivers/phy/intel/phy-intel-combo.c
··· 134 134 135 135 reg_val = readl(base + reg); 136 136 reg_val &= ~mask; 137 - reg_val |= FIELD_PREP(mask, val); 137 + reg_val |= val; 138 138 writel(reg_val, base + reg); 139 139 } 140 140 ··· 169 169 return 0; 170 170 171 171 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, 172 - PCIE_PHY_CLK_PAD, 0); 172 + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0)); 173 173 174 174 /* Delay for stable clock PLL */ 175 175 usleep_range(50, 100); ··· 192 192 return 0; 193 193 194 194 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, 195 - PCIE_PHY_CLK_PAD, 1); 195 + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1)); 196 196 197 197 return 0; 198 198 } 199 199 200 200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy) 201 201 { 202 - enum intel_combo_mode cb_mode = PHY_PCIE_MODE; 202 + enum intel_combo_mode cb_mode; 203 203 enum aggregated_mode aggr = cbphy->aggr_mode; 204 204 struct device *dev = cbphy->dev; 205 205 enum intel_phy_mode mode; ··· 224 224 225 225 cb_mode = SATA0_SATA1_MODE; 226 226 break; 227 + default: 228 + return -EINVAL; 227 229 } 228 230 229 231 ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode); ··· 387 385 388 386 /* trigger auto RX adaptation */ 389 387 combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), 390 - ADAPT_REQ_MSK, 3); 388 + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3)); 391 389 /* Wait RX adaptation to finish */ 392 390 ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id), 393 391 val, val & RX_ADAPT_ACK_BIT, 10, 5000); ··· 398 396 399 397 /* Stop RX adaptation */ 400 398 combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), 401 - ADAPT_REQ_MSK, 0); 399 + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0)); 402 400 403 401 return ret; 404 402 }
+2 -2
drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
··· 607 607 platform_set_drvdata(pdev, inno); 608 608 609 609 inno->phy_base = devm_platform_ioremap_resource(pdev, 0); 610 - if (!inno->phy_base) 611 - return -ENOMEM; 610 + if (IS_ERR(inno->phy_base)) 611 + return PTR_ERR(inno->phy_base); 612 612 613 613 inno->ref_clk = devm_clk_get(dev, "ref"); 614 614 if (IS_ERR(inno->ref_clk)) {
+1 -1
drivers/phy/ti/phy-am654-serdes.c
··· 72 72 #define to_serdes_am654_clk_mux(_hw) \ 73 73 container_of(_hw, struct serdes_am654_clk_mux, hw) 74 74 75 - static struct regmap_config serdes_am654_regmap_config = { 75 + static const struct regmap_config serdes_am654_regmap_config = { 76 76 .reg_bits = 32, 77 77 .val_bits = 32, 78 78 .reg_stride = 4,
+5 -5
drivers/phy/ti/phy-j721e-wiz.c
··· 117 117 struct wiz_clk_divider { 118 118 struct clk_hw hw; 119 119 struct regmap_field *field; 120 - struct clk_div_table *table; 120 + const struct clk_div_table *table; 121 121 struct clk_init_data clk_data; 122 122 }; 123 123 ··· 131 131 132 132 struct wiz_clk_div_sel { 133 133 struct regmap_field *field; 134 - struct clk_div_table *table; 134 + const struct clk_div_table *table; 135 135 const char *node_name; 136 136 }; 137 137 ··· 173 173 }, 174 174 }; 175 175 176 - static struct clk_div_table clk_div_table[] = { 176 + static const struct clk_div_table clk_div_table[] = { 177 177 { .val = 0, .div = 1, }, 178 178 { .val = 1, .div = 2, }, 179 179 { .val = 2, .div = 4, }, ··· 559 559 560 560 static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node, 561 561 struct regmap_field *field, 562 - struct clk_div_table *table) 562 + const struct clk_div_table *table) 563 563 { 564 564 struct device *dev = wiz->dev; 565 565 struct wiz_clk_divider *div; ··· 756 756 .deassert = wiz_phy_reset_deassert, 757 757 }; 758 758 759 - static struct regmap_config wiz_regmap_config = { 759 + static const struct regmap_config wiz_regmap_config = { 760 760 .reg_bits = 32, 761 761 .val_bits = 32, 762 762 .reg_stride = 4,
+53 -14
drivers/pinctrl/intel/pinctrl-baytrail.c
··· 800 800 pm_runtime_put(vg->dev); 801 801 } 802 802 803 + static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg, 804 + unsigned int offset) 805 + { 806 + void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 807 + 808 + /* 809 + * Before making any direction modifications, do a check if gpio is set 810 + * for direct IRQ. On Bay Trail, setting GPIO to output does not make 811 + * sense, so let's at least inform the caller before they shoot 812 + * themselves in the foot. 813 + */ 814 + if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) 815 + dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); 816 + } 817 + 803 818 static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, 804 819 struct pinctrl_gpio_range *range, 805 820 unsigned int offset, ··· 822 807 { 823 808 struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev); 824 809 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 825 - void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 826 810 unsigned long flags; 827 811 u32 value; 828 812 ··· 831 817 value &= ~BYT_DIR_MASK; 832 818 if (input) 833 819 value |= BYT_OUTPUT_EN; 834 - else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) 835 - /* 836 - * Before making any direction modifications, do a check if gpio 837 - * is set for direct IRQ. On baytrail, setting GPIO to output 838 - * does not make sense, so let's at least inform the caller before 839 - * they shoot themselves in the foot. 840 - */ 841 - dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); 820 + else 821 + byt_gpio_direct_irq_check(vg, offset); 842 822 843 823 writel(value, val_reg); 844 824 ··· 1173 1165 1174 1166 static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) 1175 1167 { 1176 - return pinctrl_gpio_direction_input(chip->base + offset); 1168 + struct intel_pinctrl *vg = gpiochip_get_data(chip); 1169 + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1170 + unsigned long flags; 1171 + u32 reg; 1172 + 1173 + raw_spin_lock_irqsave(&byt_lock, flags); 1174 + 1175 + reg = readl(val_reg); 1176 + reg &= ~BYT_DIR_MASK; 1177 + reg |= BYT_OUTPUT_EN; 1178 + writel(reg, val_reg); 1179 + 1180 + raw_spin_unlock_irqrestore(&byt_lock, flags); 1181 + return 0; 1177 1182 } 1178 1183 1184 + /* 1185 + * Note despite the temptation this MUST NOT be converted into a call to 1186 + * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this 1187 + * MUST be done as a single BYT_VAL_REG register write. 1188 + * See the commit message of the commit adding this comment for details. 1189 + */ 1179 1190 static int byt_gpio_direction_output(struct gpio_chip *chip, 1180 1191 unsigned int offset, int value) 1181 1192 { 1182 - int ret = pinctrl_gpio_direction_output(chip->base + offset); 1193 + struct intel_pinctrl *vg = gpiochip_get_data(chip); 1194 + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1195 + unsigned long flags; 1196 + u32 reg; 1183 1197 1184 - if (ret) 1185 - return ret; 1198 + raw_spin_lock_irqsave(&byt_lock, flags); 1186 1199 1187 - byt_gpio_set(chip, offset, value); 1200 + byt_gpio_direct_irq_check(vg, offset); 1188 1201 1202 + reg = readl(val_reg); 1203 + reg &= ~BYT_DIR_MASK; 1204 + if (value) 1205 + reg |= BYT_LEVEL; 1206 + else 1207 + reg &= ~BYT_LEVEL; 1208 + 1209 + writel(reg, val_reg); 1210 + 1211 + raw_spin_unlock_irqrestore(&byt_lock, flags); 1189 1212 return 0; 1190 1213 } 1191 1214
+1 -1
drivers/pinctrl/pinctrl-amd.h
··· 252 252 { 253 253 .name = "uart0", 254 254 .pins = uart0_pins, 255 - .npins = 9, 255 + .npins = 5, 256 256 }, 257 257 { 258 258 .name = "uart1",
+1
drivers/platform/x86/asus-wmi.c
··· 441 441 * battery is named BATT. 442 442 */ 443 443 if (strcmp(battery->desc->name, "BAT0") != 0 && 444 + strcmp(battery->desc->name, "BAT1") != 0 && 444 445 strcmp(battery->desc->name, "BATT") != 0) 445 446 return -ENODEV; 446 447
+3
drivers/platform/x86/intel_speed_select_if/isst_if_common.h
··· 13 13 #define INTEL_RAPL_PRIO_DEVID_0 0x3451 14 14 #define INTEL_CFG_MBOX_DEVID_0 0x3459 15 15 16 + #define INTEL_RAPL_PRIO_DEVID_1 0x3251 17 + #define INTEL_CFG_MBOX_DEVID_1 0x3259 18 + 16 19 /* 17 20 * Validate maximum commands in a single request. 18 21 * This is enough to handle command to every core in one ioctl, or all
+1
drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
··· 147 147 148 148 static const struct pci_device_id isst_if_mbox_ids[] = { 149 149 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)}, 150 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)}, 150 151 { 0 }, 151 152 }; 152 153 MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids);
+1
drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
··· 72 72 73 73 static const struct pci_device_id isst_if_ids[] = { 74 74 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)}, 75 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)}, 75 76 { 0 }, 76 77 }; 77 78 MODULE_DEVICE_TABLE(pci, isst_if_ids);
+11 -3
drivers/platform/x86/thinkpad_acpi.c
··· 885 885 886 886 if (!ibm || !ibm->write) 887 887 return -EINVAL; 888 + if (count > PAGE_SIZE - 1) 889 + return -EINVAL; 888 890 889 - kernbuf = strndup_user(userbuf, PAGE_SIZE); 890 - if (IS_ERR(kernbuf)) 891 - return PTR_ERR(kernbuf); 891 + kernbuf = kmalloc(count + 1, GFP_KERNEL); 892 + if (!kernbuf) 893 + return -ENOMEM; 892 894 895 + if (copy_from_user(kernbuf, userbuf, count)) { 896 + kfree(kernbuf); 897 + return -EFAULT; 898 + } 899 + 900 + kernbuf[count] = 0; 893 901 ret = ibm->write(kernbuf); 894 902 if (ret == 0) 895 903 ret = count;
+1 -1
drivers/regulator/Makefile
··· 31 31 obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o 32 32 obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o 33 33 obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o 34 - obj-$(CONFIG_REGULATOR_DA903X) += da903x.o 34 + obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o 35 35 obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o 36 36 obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o 37 37 obj-$(CONFIG_REGULATOR_DA9062) += da9062-regulator.o
drivers/regulator/da903x.c drivers/regulator/da903x-regulator.c
+1 -1
drivers/regulator/qcom_smd-regulator.c
··· 821 821 static const struct rpm_regulator_data rpm_pmi8994_regulators[] = { 822 822 { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" }, 823 823 { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" }, 824 - { "s2", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" }, 824 + { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" }, 825 825 { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" }, 826 826 {} 827 827 };
+1
drivers/s390/cio/vfio_ccw_chp.c
··· 8 8 * Eric Farman <farman@linux.ibm.com> 9 9 */ 10 10 11 + #include <linux/slab.h> 11 12 #include <linux/vfio.h> 12 13 #include "vfio_ccw_private.h" 13 14
+8 -5
drivers/scsi/libfc/fc_rport.c
··· 133 133 lockdep_assert_held(&lport->disc.disc_mutex); 134 134 135 135 rdata = fc_rport_lookup(lport, port_id); 136 - if (rdata) 136 + if (rdata) { 137 + kref_put(&rdata->kref, fc_rport_destroy); 137 138 return rdata; 139 + } 138 140 139 141 if (lport->rport_priv_size > 0) 140 142 rport_priv_size = lport->rport_priv_size; ··· 483 481 484 482 fc_rport_state_enter(rdata, RPORT_ST_DELETE); 485 483 486 - kref_get(&rdata->kref); 487 - if (rdata->event == RPORT_EV_NONE && 488 - !queue_work(rport_event_queue, &rdata->event_work)) 489 - kref_put(&rdata->kref, fc_rport_destroy); 484 + if (rdata->event == RPORT_EV_NONE) { 485 + kref_get(&rdata->kref); 486 + if (!queue_work(rport_event_queue, &rdata->event_work)) 487 + kref_put(&rdata->kref, fc_rport_destroy); 488 + } 490 489 491 490 rdata->event = event; 492 491 }
+1 -1
drivers/scsi/libiscsi.c
··· 2629 2629 "iscsi_q_%d", shost->host_no); 2630 2630 ihost->workq = alloc_workqueue("%s", 2631 2631 WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 2632 - 2, ihost->workq_name); 2632 + 1, ihost->workq_name); 2633 2633 if (!ihost->workq) 2634 2634 goto free_host; 2635 2635 }
-2
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 3739 3739 if (instance->mask_interrupts) 3740 3740 return IRQ_NONE; 3741 3741 3742 - #if defined(ENABLE_IRQ_POLL) 3743 3742 if (irq_context->irq_poll_scheduled) 3744 3743 return IRQ_HANDLED; 3745 - #endif 3746 3744 3747 3745 if (!instance->msix_vectors) { 3748 3746 mfiStatus = instance->instancet->clear_intr(instance);
+7 -5
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 3145 3145 if (!ioc->is_warpdrive) { 3146 3146 ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", 3147 3147 __func__); 3148 - goto out; 3148 + return 0; 3149 3149 } 3150 3150 /* pci_access_mutex lock acquired by sysfs show path */ 3151 3151 mutex_lock(&ioc->pci_access_mutex); 3152 - if (ioc->pci_error_recovery || ioc->remove_host) { 3153 - mutex_unlock(&ioc->pci_access_mutex); 3154 - return 0; 3155 - } 3152 + if (ioc->pci_error_recovery || ioc->remove_host) 3153 + goto out; 3156 3154 3157 3155 /* allocate upto GPIOVal 36 entries */ 3158 3156 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 3159 3157 io_unit_pg3 = kzalloc(sz, GFP_KERNEL); 3160 3158 if (!io_unit_pg3) { 3159 + rc = -ENOMEM; 3161 3160 ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n", 3162 3161 __func__, sz); 3163 3162 goto out; ··· 3166 3167 0) { 3167 3168 ioc_err(ioc, "%s: failed reading iounit_pg3\n", 3168 3169 __func__); 3170 + rc = -EINVAL; 3169 3171 goto out; 3170 3172 } 3171 3173 ··· 3174 3174 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 3175 3175 ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", 3176 3176 __func__, ioc_status); 3177 + rc = -EINVAL; 3177 3178 goto out; 3178 3179 } 3179 3180 3180 3181 if (io_unit_pg3->GPIOCount < 25) { 3181 3182 ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n", 3182 3183 __func__, io_unit_pg3->GPIOCount); 3184 + rc = -EINVAL; 3183 3185 goto out; 3184 3186 } 3185 3187
+1 -1
drivers/scsi/qla2xxx/qla_init.c
··· 5944 5944 break; 5945 5945 } 5946 5946 5947 - if (NVME_TARGET(vha->hw, fcport)) { 5947 + if (found && NVME_TARGET(vha->hw, fcport)) { 5948 5948 if (fcport->disc_state == DSC_DELETE_PEND) { 5949 5949 qla2x00_set_fcport_disc_state(fcport, DSC_GNL); 5950 5950 vha->fcport_count--;
+1
drivers/scsi/scsi_devinfo.c
··· 239 239 {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 240 240 {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 241 241 {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 242 + {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 242 243 {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES | 243 244 BLIST_INQUIRY_36}, 244 245 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
+1
drivers/scsi/scsi_dh.c
··· 63 63 {"LSI", "INF-01-00", "rdac", }, 64 64 {"ENGENIO", "INF-01-00", "rdac", }, 65 65 {"LENOVO", "DE_Series", "rdac", }, 66 + {"FUJITSU", "ETERNUS_AHB", "rdac", }, 66 67 {NULL, NULL, NULL }, 67 68 }; 68 69
+11 -5
drivers/scsi/scsi_lib.c
··· 547 547 scsi_uninit_cmd(cmd); 548 548 } 549 549 550 + static void scsi_run_queue_async(struct scsi_device *sdev) 551 + { 552 + if (scsi_target(sdev)->single_lun || 553 + !list_empty(&sdev->host->starved_list)) 554 + kblockd_schedule_work(&sdev->requeue_work); 555 + else 556 + blk_mq_run_hw_queues(sdev->request_queue, true); 557 + } 558 + 550 559 /* Returns false when no more bytes to process, true if there are more */ 551 560 static bool scsi_end_request(struct request *req, blk_status_t error, 552 561 unsigned int bytes) ··· 600 591 601 592 __blk_mq_end_request(req, error); 602 593 603 - if (scsi_target(sdev)->single_lun || 604 - !list_empty(&sdev->host->starved_list)) 605 - kblockd_schedule_work(&sdev->requeue_work); 606 - else 607 - blk_mq_run_hw_queues(q, true); 594 + scsi_run_queue_async(sdev); 608 595 609 596 percpu_ref_put(&q->q_usage_counter); 610 597 return false; ··· 1707 1702 */ 1708 1703 if (req->rq_flags & RQF_DONTPREP) 1709 1704 scsi_mq_uninit_cmd(cmd); 1705 + scsi_run_queue_async(sdev); 1710 1706 break; 1711 1707 } 1712 1708 return ret;
+1 -1
drivers/scsi/scsi_transport_iscsi.c
··· 4760 4760 4761 4761 iscsi_eh_timer_workq = alloc_workqueue("%s", 4762 4762 WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 4763 - 2, "iscsi_eh"); 4763 + 1, "iscsi_eh"); 4764 4764 if (!iscsi_eh_timer_workq) { 4765 4765 err = -ENOMEM; 4766 4766 goto release_nls;
+1 -1
drivers/scsi/scsi_transport_spi.c
··· 339 339 struct spi_transport_attrs *tp \ 340 340 = (struct spi_transport_attrs *)&starget->starget_data; \ 341 341 \ 342 - if (i->f->set_##field) \ 342 + if (!i->f->set_##field) \ 343 343 return -EINVAL; \ 344 344 val = simple_strtoul(buf, NULL, 0); \ 345 345 if (val > tp->max_##field) \
+5 -3
drivers/soc/amlogic/meson-gx-socinfo.c
··· 66 66 { "A113D", 0x25, 0x22, 0xff }, 67 67 { "S905D2", 0x28, 0x10, 0xf0 }, 68 68 { "S905X2", 0x28, 0x40, 0xf0 }, 69 - { "S922X", 0x29, 0x40, 0xf0 }, 70 69 { "A311D", 0x29, 0x10, 0xf0 }, 71 - { "S905X3", 0x2b, 0x5, 0xf }, 72 - { "S905D3", 0x2b, 0xb0, 0xf0 }, 70 + { "S922X", 0x29, 0x40, 0xf0 }, 71 + { "S905D3", 0x2b, 0x4, 0xf5 }, 72 + { "S905X3", 0x2b, 0x5, 0xf5 }, 73 + { "S905X3", 0x2b, 0x10, 0x3f }, 74 + { "S905D3", 0x2b, 0x30, 0x3f }, 73 75 { "A113L", 0x2c, 0x0, 0xf8 }, 74 76 }; 75 77
+3
drivers/soc/imx/soc-imx.c
··· 33 33 u32 val; 34 34 int ret; 35 35 36 + if (of_machine_is_compatible("fsl,ls1021a")) 37 + return 0; 38 + 36 39 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 37 40 if (!soc_dev_attr) 38 41 return -ENOMEM;
+3 -2
drivers/soundwire/intel.c
··· 930 930 931 931 /* TODO: Read supported rates/formats from hardware */ 932 932 for (i = off; i < (off + num); i++) { 933 - dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", 934 - cdns->instance, i); 933 + dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, 934 + "SDW%d Pin%d", 935 + cdns->instance, i); 935 936 if (!dais[i].name) 936 937 return -ENOMEM; 937 938
+22 -17
drivers/spi/spi-fsl-dspi.c
··· 1109 1109 struct spi_controller *ctlr = dev_get_drvdata(dev); 1110 1110 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); 1111 1111 1112 + if (dspi->irq) 1113 + disable_irq(dspi->irq); 1112 1114 spi_controller_suspend(ctlr); 1113 1115 clk_disable_unprepare(dspi->clk); 1114 1116 ··· 1131 1129 if (ret) 1132 1130 return ret; 1133 1131 spi_controller_resume(ctlr); 1132 + if (dspi->irq) 1133 + enable_irq(dspi->irq); 1134 1134 1135 1135 return 0; 1136 1136 } ··· 1389 1385 goto poll_mode; 1390 1386 } 1391 1387 1392 - ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 1393 - IRQF_SHARED, pdev->name, dspi); 1388 + init_completion(&dspi->xfer_done); 1389 + 1390 + ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, 1391 + IRQF_SHARED, pdev->name, dspi); 1394 1392 if (ret < 0) { 1395 1393 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); 1396 1394 goto out_clk_put; 1397 1395 } 1398 - 1399 - init_completion(&dspi->xfer_done); 1400 1396 1401 1397 poll_mode: 1402 1398 ··· 1404 1400 ret = dspi_request_dma(dspi, res->start); 1405 1401 if (ret < 0) { 1406 1402 dev_err(&pdev->dev, "can't get dma channels\n"); 1407 - goto out_clk_put; 1403 + goto out_free_irq; 1408 1404 } 1409 1405 } 1410 1406 ··· 1419 1415 ret = spi_register_controller(ctlr); 1420 1416 if (ret != 0) { 1421 1417 dev_err(&pdev->dev, "Problem registering DSPI ctlr\n"); 1422 - goto out_clk_put; 1418 + goto out_free_irq; 1423 1419 } 1424 1420 1425 1421 return ret; 1426 1422 1423 + out_free_irq: 1424 + if (dspi->irq) 1425 + free_irq(dspi->irq, dspi); 1427 1426 out_clk_put: 1428 1427 clk_disable_unprepare(dspi->clk); 1429 1428 out_ctlr_put: ··· 1441 1434 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); 1442 1435 1443 1436 /* Disconnect from the SPI framework */ 1444 - dspi_release_dma(dspi); 1445 - clk_disable_unprepare(dspi->clk); 1446 1437 spi_unregister_controller(dspi->ctlr); 1447 - 1448 - return 0; 1449 - } 1450 - 1451 - static void dspi_shutdown(struct platform_device *pdev) 1452 - { 1453 - struct spi_controller *ctlr = platform_get_drvdata(pdev); 1454 - struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); 1455 1438 1456 1439 /* Disable RX and TX */ 1457 1440 regmap_update_bits(dspi->regmap, SPI_MCR, ··· 1452 1455 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); 1453 1456 1454 1457 dspi_release_dma(dspi); 1458 + if (dspi->irq) 1459 + free_irq(dspi->irq, dspi); 1455 1460 clk_disable_unprepare(dspi->clk); 1456 - spi_unregister_controller(dspi->ctlr); 1461 + 1462 + return 0; 1463 + } 1464 + 1465 + static void dspi_shutdown(struct platform_device *pdev) 1466 + { 1467 + dspi_remove(pdev); 1457 1468 } 1458 1469 1459 1470 static struct platform_driver fsl_dspi_driver = {
+8 -7
drivers/spi/spi-mt65xx.c
··· 36 36 #define SPI_CFG0_SCK_LOW_OFFSET 8 37 37 #define SPI_CFG0_CS_HOLD_OFFSET 16 38 38 #define SPI_CFG0_CS_SETUP_OFFSET 24 39 - #define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 40 39 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 41 40 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 42 41 ··· 47 48 #define SPI_CFG1_CS_IDLE_MASK 0xff 48 49 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 49 50 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 51 + #define SPI_CFG2_SCK_HIGH_OFFSET 0 52 + #define SPI_CFG2_SCK_LOW_OFFSET 16 50 53 51 54 #define SPI_CMD_ACT BIT(0) 52 55 #define SPI_CMD_RESUME BIT(1) ··· 284 283 static void mtk_spi_prepare_transfer(struct spi_master *master, 285 284 struct spi_transfer *xfer) 286 285 { 287 - u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; 286 + u32 spi_clk_hz, div, sck_time, cs_time, reg_val; 288 287 struct mtk_spi *mdata = spi_master_get_devdata(master); 289 288 290 289 spi_clk_hz = clk_get_rate(mdata->spi_clk); ··· 297 296 cs_time = sck_time * 2; 298 297 299 298 if (mdata->dev_comp->enhance_timing) { 299 + reg_val = (((sck_time - 1) & 0xffff) 300 + << SPI_CFG2_SCK_HIGH_OFFSET); 300 301 reg_val |= (((sck_time - 1) & 0xffff) 301 - << SPI_CFG0_SCK_HIGH_OFFSET); 302 - reg_val |= (((sck_time - 1) & 0xffff) 303 - << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); 302 + << SPI_CFG2_SCK_LOW_OFFSET); 304 303 writel(reg_val, mdata->base + SPI_CFG2_REG); 305 - reg_val |= (((cs_time - 1) & 0xffff) 304 + reg_val = (((cs_time - 1) & 0xffff) 306 305 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); 307 306 reg_val |= (((cs_time - 1) & 0xffff) 308 307 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); 309 308 writel(reg_val, mdata->base + SPI_CFG0_REG); 310 309 } else { 311 - reg_val |= (((sck_time - 1) & 0xff) 310 + reg_val = (((sck_time - 1) & 0xff) 312 311 << SPI_CFG0_SCK_HIGH_OFFSET); 313 312 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 314 313 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+5
drivers/spi/spi-pxa2xx.c
··· 1485 1485 { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP }, 1486 1486 { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP }, 1487 1487 { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP }, 1488 + /* TGL-H */ 1489 + { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP }, 1490 + { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP }, 1491 + { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP }, 1492 + { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP }, 1488 1493 /* APL */ 1489 1494 { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, 1490 1495 { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
+6 -8
drivers/spi/spi-sun6i.c
··· 198 198 struct spi_transfer *tfr) 199 199 { 200 200 struct sun6i_spi *sspi = spi_master_get_devdata(master); 201 - unsigned int mclk_rate, div, timeout; 201 + unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout; 202 202 unsigned int start, end, tx_time; 203 203 unsigned int trig_level; 204 204 unsigned int tx_len = 0; ··· 287 287 * First try CDR2, and if we can't reach the expected 288 288 * frequency, fall back to CDR1. 289 289 */ 290 - div = mclk_rate / (2 * tfr->speed_hz); 291 - if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { 292 - if (div > 0) 293 - div--; 294 - 295 - reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS; 290 + div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz); 291 + div_cdr2 = DIV_ROUND_UP(div_cdr1, 2); 292 + if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { 293 + reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS; 296 294 } else { 297 - div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); 295 + div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1)); 298 296 reg = SUN6I_CLK_CTL_CDR1(div); 299 297 } 300 298
+14 -6
drivers/staging/comedi/drivers/addi_apci_1032.c
··· 106 106 unsigned int *data) 107 107 { 108 108 struct apci1032_private *devpriv = dev->private; 109 - unsigned int shift, oldmask; 109 + unsigned int shift, oldmask, himask, lomask; 110 110 111 111 switch (data[0]) { 112 112 case INSN_CONFIG_DIGITAL_TRIG: 113 113 if (data[1] != 0) 114 114 return -EINVAL; 115 115 shift = data[3]; 116 - oldmask = (1U << shift) - 1; 116 + if (shift < 32) { 117 + oldmask = (1U << shift) - 1; 118 + himask = data[4] << shift; 119 + lomask = data[5] << shift; 120 + } else { 121 + oldmask = 0xffffffffu; 122 + himask = 0; 123 + lomask = 0; 124 + } 117 125 switch (data[2]) { 118 126 case COMEDI_DIGITAL_TRIG_DISABLE: 119 127 devpriv->ctrl = 0; ··· 144 136 devpriv->mode2 &= oldmask; 145 137 } 146 138 /* configure specified channels */ 147 - devpriv->mode1 |= data[4] << shift; 148 - devpriv->mode2 |= data[5] << shift; 139 + devpriv->mode1 |= himask; 140 + devpriv->mode2 |= lomask; 149 141 break; 150 142 case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: 151 143 if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | ··· 162 154 devpriv->mode2 &= oldmask; 163 155 } 164 156 /* configure specified channels */ 165 - devpriv->mode1 |= data[4] << shift; 166 - devpriv->mode2 |= data[5] << shift; 157 + devpriv->mode1 |= himask; 158 + devpriv->mode2 |= lomask; 167 159 break; 168 160 default: 169 161 return -EINVAL;
+26 -8
drivers/staging/comedi/drivers/addi_apci_1500.c
··· 452 452 struct apci1500_private *devpriv = dev->private; 453 453 unsigned int trig = data[1]; 454 454 unsigned int shift = data[3]; 455 - unsigned int hi_mask = data[4] << shift; 456 - unsigned int lo_mask = data[5] << shift; 457 - unsigned int chan_mask = hi_mask | lo_mask; 458 - unsigned int old_mask = (1 << shift) - 1; 459 - unsigned int pm = devpriv->pm[trig] & old_mask; 460 - unsigned int pt = devpriv->pt[trig] & old_mask; 461 - unsigned int pp = devpriv->pp[trig] & old_mask; 455 + unsigned int hi_mask; 456 + unsigned int lo_mask; 457 + unsigned int chan_mask; 458 + unsigned int old_mask; 459 + unsigned int pm; 460 + unsigned int pt; 461 + unsigned int pp; 462 + unsigned int invalid_chan; 462 463 463 464 if (trig > 1) { 464 465 dev_dbg(dev->class_dev, ··· 467 466 return -EINVAL; 468 467 } 469 468 470 - if (chan_mask > 0xffff) { 469 + if (shift <= 16) { 470 + hi_mask = data[4] << shift; 471 + lo_mask = data[5] << shift; 472 + old_mask = (1U << shift) - 1; 473 + invalid_chan = (data[4] | data[5]) >> (16 - shift); 474 + } else { 475 + hi_mask = 0; 476 + lo_mask = 0; 477 + old_mask = 0xffff; 478 + invalid_chan = data[4] | data[5]; 479 + } 480 + chan_mask = hi_mask | lo_mask; 481 + 482 + if (invalid_chan) { 471 483 dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); 472 484 return -EINVAL; 473 485 } 486 + 487 + pm = devpriv->pm[trig] & old_mask; 488 + pt = devpriv->pt[trig] & old_mask; 489 + pp = devpriv->pp[trig] & old_mask; 474 490 475 491 switch (data[2]) { 476 492 case COMEDI_DIGITAL_TRIG_DISABLE:
+14 -6
drivers/staging/comedi/drivers/addi_apci_1564.c
··· 331 331 unsigned int *data) 332 332 { 333 333 struct apci1564_private *devpriv = dev->private; 334 - unsigned int shift, oldmask; 334 + unsigned int shift, oldmask, himask, lomask; 335 335 336 336 switch (data[0]) { 337 337 case INSN_CONFIG_DIGITAL_TRIG: 338 338 if (data[1] != 0) 339 339 return -EINVAL; 340 340 shift = data[3]; 341 - oldmask = (1U << shift) - 1; 341 + if (shift < 32) { 342 + oldmask = (1U << shift) - 1; 343 + himask = data[4] << shift; 344 + lomask = data[5] << shift; 345 + } else { 346 + oldmask = 0xffffffffu; 347 + himask = 0; 348 + lomask = 0; 349 + } 342 350 switch (data[2]) { 343 351 case COMEDI_DIGITAL_TRIG_DISABLE: 344 352 devpriv->ctrl = 0; ··· 370 362 devpriv->mode2 &= oldmask; 371 363 } 372 364 /* configure specified channels */ 373 - devpriv->mode1 |= data[4] << shift; 374 - devpriv->mode2 |= data[5] << shift; 365 + devpriv->mode1 |= himask; 366 + devpriv->mode2 |= lomask; 375 367 break; 376 368 case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: 377 369 if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | ··· 388 380 devpriv->mode2 &= oldmask; 389 381 } 390 382 /* configure specified channels */ 391 - devpriv->mode1 |= data[4] << shift; 392 - devpriv->mode2 |= data[5] << shift; 383 + devpriv->mode1 |= himask; 384 + devpriv->mode2 |= lomask; 393 385 break; 394 386 default: 395 387 return -EINVAL;
+1 -1
drivers/staging/comedi/drivers/ni_6527.c
··· 332 332 case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: 333 333 /* check shift amount */ 334 334 shift = data[3]; 335 - if (shift >= s->n_chan) { 335 + if (shift >= 32) { 336 336 mask = 0; 337 337 rising = 0; 338 338 falling = 0;
+1 -1
drivers/staging/media/atomisp/Kconfig
··· 22 22 module will be called atomisp 23 23 24 24 config VIDEO_ATOMISP_ISP2401 25 - bool "VIDEO_ATOMISP_ISP2401" 25 + bool "Use Intel Atom ISP on Cherrytail/Anniedale (ISP2401)" 26 26 depends on VIDEO_ATOMISP 27 27 help 28 28 Enable support for Atom ISP2401-based boards.
+1 -5
drivers/staging/media/atomisp/Makefile
··· 156 156 pci/hive_isp_css_common/host/timed_ctrl.o \ 157 157 pci/hive_isp_css_common/host/vmem.o \ 158 158 pci/hive_isp_css_shared/host/tag.o \ 159 + pci/system_local.o \ 159 160 160 161 obj-byt = \ 161 162 pci/css_2400_system/hive/ia_css_isp_configs.o \ ··· 183 182 -I$(atomisp)/include/hmm/ \ 184 183 -I$(atomisp)/include/mmu/ \ 185 184 -I$(atomisp)/pci/ \ 186 - -I$(atomisp)/pci/hrt/ \ 187 185 -I$(atomisp)/pci/base/circbuf/interface/ \ 188 186 -I$(atomisp)/pci/base/refcount/interface/ \ 189 187 -I$(atomisp)/pci/camera/pipe/interface/ \ ··· 192 192 -I$(atomisp)/pci/hive_isp_css_include/ \ 193 193 -I$(atomisp)/pci/hive_isp_css_include/device_access/ \ 194 194 -I$(atomisp)/pci/hive_isp_css_include/host/ \ 195 - -I$(atomisp)/pci/hive_isp_css_include/memory_access/ \ 196 195 -I$(atomisp)/pci/hive_isp_css_shared/ \ 197 196 -I$(atomisp)/pci/hive_isp_css_shared/host/ \ 198 197 -I$(atomisp)/pci/isp/kernels/ \ ··· 310 311 -I$(atomisp)/pci/runtime/tagger/interface/ 311 312 312 313 INCLUDES_byt += \ 313 - -I$(atomisp)/pci/css_2400_system/ \ 314 314 -I$(atomisp)/pci/css_2400_system/hive/ \ 315 - -I$(atomisp)/pci/css_2400_system/hrt/ \ 316 315 317 316 INCLUDES_cht += \ 318 317 -I$(atomisp)/pci/css_2401_system/ \ ··· 318 321 -I$(atomisp)/pci/css_2401_system/hive/ \ 319 322 -I$(atomisp)/pci/css_2401_system/hrt/ \ 320 323 321 - # -I$(atomisp)/pci/css_2401_system/hrt/ \ 322 324 # -I$(atomisp)/pci/css_2401_system/hive_isp_css_2401_system_generated/ \ 323 325 324 326 DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
+3 -3
drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
··· 495 495 ret = ov2680_read_reg(client, 1, OV2680_MIRROR_REG, &val); 496 496 if (ret) 497 497 return ret; 498 - if (value) { 498 + if (value) 499 499 val |= OV2680_FLIP_MIRROR_BIT_ENABLE; 500 - } else { 500 + else 501 501 val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE; 502 - } 502 + 503 503 ret = ov2680_write_reg(client, 1, 504 504 OV2680_MIRROR_REG, val); 505 505 if (ret)
+4 -2
drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
··· 1899 1899 { 1900 1900 struct ov5693_device *dev; 1901 1901 int i2c; 1902 - int ret = 0; 1902 + int ret; 1903 1903 void *pdata; 1904 1904 unsigned int i; 1905 1905 ··· 1929 1929 pdata = gmin_camera_platform_data(&dev->sd, 1930 1930 ATOMISP_INPUT_FORMAT_RAW_10, 1931 1931 atomisp_bayer_order_bggr); 1932 - if (!pdata) 1932 + if (!pdata) { 1933 + ret = -EINVAL; 1933 1934 goto out_free; 1935 + } 1934 1936 1935 1937 ret = ov5693_s_config(&dev->sd, client->irq, pdata); 1936 1938 if (ret)
+1
drivers/staging/media/atomisp/include/linux/atomisp_platform.h
··· 250 250 #define IS_MFLD __IS_SOC(INTEL_FAM6_ATOM_SALTWELL_MID) 251 251 #define IS_BYT __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT) 252 252 #define IS_CHT __IS_SOC(INTEL_FAM6_ATOM_AIRMONT) 253 + #define IS_MRFD __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT_MID) 253 254 #define IS_MOFD __IS_SOC(INTEL_FAM6_ATOM_AIRMONT_MID) 254 255 255 256 /* Both CHT and MOFD come with ISP2401 */
-3
drivers/staging/media/atomisp/pci/atomisp-regs.h
··· 20 20 #define ATOMISP_REGS_H 21 21 22 22 /* common register definitions */ 23 - #define PUNIT_PORT 0x04 24 - #define CCK_PORT 0x14 25 - 26 23 #define PCICMDSTS 0x01 27 24 #define INTR 0x0f 28 25 #define MSI_CAPID 0x24
+2 -2
drivers/staging/media/atomisp/pci/atomisp_acc.c
··· 355 355 356 356 pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE); 357 357 if (pgnr < ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) { 358 - dev_err(atomisp_dev, 358 + dev_err(asd->isp->dev, 359 359 "user space memory size is less than the expected size..\n"); 360 360 return -ENOMEM; 361 361 } else if (pgnr > ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) { 362 - dev_err(atomisp_dev, 362 + dev_err(asd->isp->dev, 363 363 "user space memory size is large than the expected size..\n"); 364 364 return -ENOMEM; 365 365 }
+32 -27
drivers/staging/media/atomisp/pci/atomisp_cmd.c
··· 21 21 #include <linux/firmware.h> 22 22 #include <linux/pci.h> 23 23 #include <linux/interrupt.h> 24 + #include <linux/io.h> 24 25 #include <linux/kernel.h> 25 26 #include <linux/kfifo.h> 26 27 #include <linux/pm_runtime.h> ··· 110 109 111 110 static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd) 112 111 { 113 - struct v4l2_subdev_frame_interval fi; 112 + struct v4l2_subdev_frame_interval fi = { 0 }; 114 113 struct atomisp_device *isp = asd->isp; 115 114 116 115 unsigned short fps = 0; ··· 207 206 enum atomisp_dfs_mode mode, 208 207 bool force) 209 208 { 209 + struct pci_dev *pdev = to_pci_dev(isp->dev); 210 210 /* FIXME! Only use subdev[0] status yet */ 211 211 struct atomisp_sub_device *asd = &isp->asd[0]; 212 212 const struct atomisp_dfs_config *dfs; ··· 221 219 return -EINVAL; 222 220 } 223 221 224 - if ((isp->pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) == 222 + if ((pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) == 225 223 ATOMISP_PCI_DEVICE_SOC_CHT && ATOMISP_USE_YUVPP(asd)) 226 224 isp->dfs = &dfs_config_cht_soc; 227 225 ··· 359 357 irq_clear_all(IRQ0_ID); 360 358 } 361 359 362 - void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev) 360 + void atomisp_msi_irq_init(struct atomisp_device *isp) 363 361 { 362 + struct pci_dev *pdev = to_pci_dev(isp->dev); 364 363 u32 msg32; 365 364 u16 msg16; 366 365 367 - pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32); 366 + pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32); 368 367 msg32 |= 1 << MSI_ENABLE_BIT; 369 - pci_write_config_dword(dev, PCI_MSI_CAPID, msg32); 368 + pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32); 370 369 371 370 msg32 = (1 << INTR_IER) | (1 << INTR_IIR); 372 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32); 371 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32); 373 372 374 - pci_read_config_word(dev, PCI_COMMAND, &msg16); 373 + pci_read_config_word(pdev, PCI_COMMAND, &msg16); 375 374 msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 376 375 PCI_COMMAND_INTX_DISABLE); 377 - pci_write_config_word(dev, PCI_COMMAND, msg16); 376 + pci_write_config_word(pdev, PCI_COMMAND, msg16); 378 377 } 379 378 380 - void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev) 379 + void atomisp_msi_irq_uninit(struct atomisp_device *isp) 381 380 { 381 + struct pci_dev *pdev = to_pci_dev(isp->dev); 382 382 u32 msg32; 383 383 u16 msg16; 384 384 385 - pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32); 385 + pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32); 386 386 msg32 &= ~(1 << MSI_ENABLE_BIT); 387 - pci_write_config_dword(dev, PCI_MSI_CAPID, msg32); 387 + pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32); 388 388 389 389 msg32 = 0x0; 390 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32); 390 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32); 391 391 392 - pci_read_config_word(dev, PCI_COMMAND, &msg16); 392 + pci_read_config_word(pdev, PCI_COMMAND, &msg16); 393 393 msg16 &= ~(PCI_COMMAND_MASTER); 394 - pci_write_config_word(dev, PCI_COMMAND, msg16); 394 + pci_write_config_word(pdev, PCI_COMMAND, msg16); 395 395 } 396 396 397 397 static void atomisp_sof_event(struct atomisp_sub_device *asd) ··· 484 480 /* Clear irq reg */ 485 481 static void clear_irq_reg(struct atomisp_device *isp) 486 482 { 483 + struct pci_dev *pdev = to_pci_dev(isp->dev); 487 484 u32 msg_ret; 488 485 489 - pci_read_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, &msg_ret); 486 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &msg_ret); 490 487 msg_ret |= 1 << INTR_IIR; 491 - pci_write_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, msg_ret); 488 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg_ret); 492 489 } 493 490 494 491 static struct atomisp_sub_device * ··· 670 665 void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr, 671 666 unsigned int size) 672 667 { 673 - u32 __iomem *io_virt_addr; 674 668 unsigned int data = 0; 675 669 unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32)); 676 670 677 - dev_dbg(isp->dev, "atomisp_io_base:%p\n", atomisp_io_base); 671 + dev_dbg(isp->dev, "atomisp mmio base: %p\n", isp->base); 678 672 dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__, 679 673 addr, size, size32); 680 674 if (size32 * 4 + addr > 0x4000) { ··· 682 678 return; 683 679 } 684 680 addr += SP_DMEM_BASE; 685 - io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 681 + addr &= 0x003FFFFF; 686 682 do { 687 - data = *io_virt_addr; 683 + data = readl(isp->base + addr); 688 684 dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data); 689 - io_virt_addr += sizeof(u32); 690 - size32 -= 1; 691 - } while (size32 > 0); 685 + addr += sizeof(u32); 686 + } while (--size32); 692 687 } 693 688 694 689 static struct videobuf_buffer *atomisp_css_frame_to_vbuf( ··· 1292 1289 1293 1290 static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) 1294 1291 { 1292 + struct pci_dev *pdev = to_pci_dev(isp->dev); 1295 1293 enum ia_css_pipe_id css_pipe_id; 1296 1294 bool stream_restart[MAX_STREAM_NUM] = {0}; 1297 1295 bool depth_mode = false; ··· 1376 1372 clear_isp_irq(hrt_isp_css_irq_sp); 1377 1373 1378 1374 /* Set the SRSE to 3 before resetting */ 1379 - pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control | 1380 - MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); 1375 + pci_write_config_dword(pdev, PCI_I_CONTROL, 1376 + isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); 1381 1377 1382 1378 /* reset ISP and restore its state */ 1383 1379 isp->isp_timeout = true; ··· 6162 6158 /*Turn off ISP dphy */ 6163 6159 int atomisp_ospm_dphy_down(struct atomisp_device *isp) 6164 6160 { 6161 + struct pci_dev *pdev = to_pci_dev(isp->dev); 6165 6162 unsigned long flags; 6166 6163 u32 reg; 6167 6164 ··· 6184 6179 * MRFLD HW design need all CSI ports are disabled before 6185 6180 * powering down the IUNIT. 6186 6181 */ 6187 - pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &reg); 6182 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &reg); 6188 6183 reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK; 6189 - pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, reg); 6184 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, reg); 6190 6185 return 0; 6191 6186 } 6192 6187
+2 -2
drivers/staging/media/atomisp/pci/atomisp_cmd.h
··· 68 68 /* 69 69 * Interrupt functions 70 70 */ 71 - void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev); 72 - void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev); 71 + void atomisp_msi_irq_init(struct atomisp_device *isp); 72 + void atomisp_msi_irq_uninit(struct atomisp_device *isp); 73 73 void atomisp_wdt_work(struct work_struct *work); 74 74 void atomisp_wdt(struct timer_list *t); 75 75 void atomisp_setup_flash(struct atomisp_sub_device *asd);
-2
drivers/staging/media/atomisp/pci/atomisp_compat.h
··· 29 29 struct video_device; 30 30 enum atomisp_input_stream_id; 31 31 32 - extern void __iomem *atomisp_io_base; 33 - 34 32 struct atomisp_metadata_buf { 35 33 struct ia_css_metadata *metadata; 36 34 void *md_vptr;
+34 -36
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
··· 33 33 #include "atomisp_ioctl.h" 34 34 #include "atomisp_acc.h" 35 35 36 - #include <asm/intel-mid.h> 37 - 38 36 #include "ia_css_debug.h" 39 37 #include "ia_css_isp_param.h" 40 38 #include "sh_css_hrt.h" 41 39 #include "ia_css_isys.h" 42 40 41 + #include <linux/io.h> 43 42 #include <linux/pm_runtime.h> 44 43 45 44 /* Assume max number of ACC stages */ ··· 68 69 69 70 static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data) 70 71 { 71 - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 72 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 72 73 unsigned long flags; 73 74 74 75 spin_lock_irqsave(&mmio_lock, flags); 75 - *io_virt_addr = data; 76 + writeb(data, isp->base + (addr & 0x003FFFFF)); 76 77 spin_unlock_irqrestore(&mmio_lock, flags); 77 78 } 78 79 79 80 static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data) 80 81 { 81 - s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 82 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 82 83 unsigned long flags; 83 84 84 85 spin_lock_irqsave(&mmio_lock, flags); 85 - *io_virt_addr = data; 86 + writew(data, isp->base + (addr & 0x003FFFFF)); 86 87 spin_unlock_irqrestore(&mmio_lock, flags); 87 88 } 88 89 89 90 void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data) 90 91 { 91 - s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 92 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 92 93 unsigned long flags; 93 94 94 95 spin_lock_irqsave(&mmio_lock, flags); 95 - *io_virt_addr = data; 96 + writel(data, isp->base + (addr & 0x003FFFFF)); 96 97 spin_unlock_irqrestore(&mmio_lock, flags); 97 98 } 98 99 99 100 static uint8_t atomisp_css2_hw_load_8(hrt_address addr) 100 101 { 101 - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 102 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 102 103 unsigned long flags; 103 104 u8 ret; 104 105 105 106 spin_lock_irqsave(&mmio_lock, flags); 106 - ret = *io_virt_addr; 107 + ret = readb(isp->base + (addr & 0x003FFFFF)); 107 108 spin_unlock_irqrestore(&mmio_lock, flags); 108 109 return ret; 109 110 } 110 111 111 112 static uint16_t atomisp_css2_hw_load_16(hrt_address addr) 112 113 { 113 - s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 114 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 114 115 unsigned long flags; 115 116 u16 ret; 116 117 117 118 spin_lock_irqsave(&mmio_lock, flags); 118 - ret = *io_virt_addr; 119 + ret = readw(isp->base + (addr & 0x003FFFFF)); 119 120 spin_unlock_irqrestore(&mmio_lock, flags); 120 121 return ret; 121 122 } 122 123 123 124 static uint32_t atomisp_css2_hw_load_32(hrt_address addr) 124 125 { 125 - s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 126 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 126 127 unsigned long flags; 127 128 u32 ret; 128 129 129 130 spin_lock_irqsave(&mmio_lock, flags); 130 - ret = *io_virt_addr; 131 + ret = readl(isp->base + (addr & 0x003FFFFF)); 131 132 spin_unlock_irqrestore(&mmio_lock, flags); 132 133 return ret; 133 134 } 134 135 135 - static void atomisp_css2_hw_store(hrt_address addr, 136 - const void *from, uint32_t n) 136 + static void atomisp_css2_hw_store(hrt_address addr, const void *from, uint32_t n) 137 137 { 138 - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 138 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 139 139 unsigned long flags; 140 140 unsigned int i; 141 141 142 + addr &= 0x003FFFFF; 142 143 spin_lock_irqsave(&mmio_lock, flags); 143 - for (i = 0; i < n; i++, io_virt_addr++, from++) 144 - *io_virt_addr = *(s8 *)from; 144 + for (i = 0; i < n; i++, from++) 145 + writeb(*(s8 *)from, isp->base + addr + i); 146 + 145 147 spin_unlock_irqrestore(&mmio_lock, flags); 146 148 } 147 149 148 150 static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n) 149 151 { 150 - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); 152 + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); 151 153 unsigned long flags; 152 154 unsigned int i; 153 155 156 + addr &= 0x003FFFFF; 154 157 spin_lock_irqsave(&mmio_lock, flags); 155 - for (i = 0; i < n; i++, to++, io_virt_addr++) 156 - *(s8 *)to = *io_virt_addr; 158 + for (i = 0; i < n; i++, to++) 159 + *(s8 *)to = readb(isp->base + addr + i); 157 160 spin_unlock_irqrestore(&mmio_lock, flags); 158 161 } 159 162 ··· 182 181 *data = atomisp_css2_hw_load_32(addr); 183 182 } 184 183 185 - static int hmm_get_mmu_base_addr(unsigned int *mmu_base_addr) 184 + static int hmm_get_mmu_base_addr(struct device *dev, unsigned int *mmu_base_addr) 186 185 { 187 186 if (!sh_mmu_mrfld.get_pd_base) { 188 - dev_err(atomisp_dev, "get mmu base address failed.\n"); 187 + dev_err(dev, "get mmu base address failed.\n"); 189 188 return -EINVAL; 190 189 } 191 190 ··· 840 839 int ret; 841 840 int err; 842 841 843 - ret = hmm_get_mmu_base_addr(&mmu_base_addr); 842 + ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr); 844 843 if (ret) 845 844 return ret; 846 845 ··· 942 941 unsigned int mmu_base_addr; 943 942 int ret; 944 943 945 - ret = hmm_get_mmu_base_addr(&mmu_base_addr); 944 + ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr); 946 945 if (ret) { 947 946 dev_err(isp->dev, "get base address error.\n"); 948 947 return -EINVAL; ··· 1967 1966 true, 1968 1967 0x13000, 1969 1968 &size_mem_words) != 0) { 1970 - if (intel_mid_identify_cpu() == 1971 - INTEL_MID_CPU_CHIP_TANGIER) 1969 + if (IS_MRFD) 1972 1970 size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2; 1973 1971 else 1974 1972 size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1; ··· 2414 2414 struct ia_css_resolution *effective_res = 2415 2415 &stream_config->input_config.effective_res; 2416 2416 2417 - const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} }; 2417 + static const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} }; 2418 2418 /* 2419 2419 * BZ201033: YUV decimation factor of 4 causes couple of rightmost 2420 2420 * columns to be shaded. Remove this factor to work around the CSS bug. 2421 2421 * const unsigned int yuv_dec_fct[] = {4, 2}; 2422 2422 */ 2423 - const unsigned int yuv_dec_fct[] = { 2 }; 2423 + static const unsigned int yuv_dec_fct[] = { 2 }; 2424 2424 unsigned int i; 2425 2425 2426 2426 if (width == 0 && height == 0) ··· 2540 2540 struct ia_css_resolution *effective_res = 2541 2541 &stream_config->input_config.effective_res; 2542 2542 2543 - const struct bayer_ds_factor bds_factors[] = { 2543 + static const struct bayer_ds_factor bds_factors[] = { 2544 2544 {8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2} 2545 2545 }; 2546 2546 unsigned int i; ··· 4337 4337 [IA_CSS_ACC_STANDALONE] = "Stand-alone acceleration", 4338 4338 }; 4339 4339 4340 - int atomisp_css_dump_blob_infor(void) 4340 + int atomisp_css_dump_blob_infor(struct atomisp_device *isp) 4341 4341 { 4342 4342 struct ia_css_blob_descr *bd = sh_css_blob_info; 4343 4343 unsigned int i, nm = sh_css_num_binaries; ··· 4354 4354 for (i = 0; i < sh_css_num_binaries - NUM_OF_SPS; i++) { 4355 4355 switch (bd[i].header.type) { 4356 4356 case ia_css_isp_firmware: 4357 - dev_dbg(atomisp_dev, 4358 - "Num%2d type %s (%s), binary id is %2d, name is %s\n", 4357 + dev_dbg(isp->dev, "Num%2d type %s (%s), binary id is %2d, name is %s\n", 4359 4358 i + NUM_OF_SPS, 4360 4359 fw_type_name[bd[i].header.type], 4361 4360 fw_acc_type_name[bd[i].header.info.isp.type], ··· 4362 4363 bd[i].name); 4363 4364 break; 4364 4365 default: 4365 - dev_dbg(atomisp_dev, 4366 - "Num%2d type %s, name is %s\n", 4366 + dev_dbg(isp->dev, "Num%2d type %s, name is %s\n", 4367 4367 i + NUM_OF_SPS, fw_type_name[bd[i].header.type], 4368 4368 bd[i].name); 4369 4369 }
+1 -1
drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
··· 153 153 154 154 int atomisp_css_dump_sp_raw_copy_linecount(bool reduced); 155 155 156 - int atomisp_css_dump_blob_infor(void); 156 + int atomisp_css_dump_blob_infor(struct atomisp_device *isp); 157 157 158 158 void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd, 159 159 uint32_t isp_config_id);
+7 -7
drivers/staging/media/atomisp/pci/atomisp_drvfs.c
··· 62 62 63 63 if (opt & OPTION_VALID) { 64 64 if (opt & OPTION_BIN_LIST) { 65 - ret = atomisp_css_dump_blob_infor(); 65 + ret = atomisp_css_dump_blob_infor(isp); 66 66 if (ret) { 67 - dev_err(atomisp_dev, "%s dump blob infor err[ret:%d]\n", 67 + dev_err(isp->dev, "%s dump blob infor err[ret:%d]\n", 68 68 __func__, ret); 69 69 goto opt_err; 70 70 } ··· 76 76 atomisp_css_debug_dump_isp_binary(); 77 77 } else { 78 78 ret = -EPERM; 79 - dev_err(atomisp_dev, "%s dump running bin err[ret:%d]\n", 79 + dev_err(isp->dev, "%s dump running bin err[ret:%d]\n", 80 80 __func__, ret); 81 81 goto opt_err; 82 82 } ··· 86 86 hmm_show_mem_stat(__func__, __LINE__); 87 87 } else { 88 88 ret = -EINVAL; 89 - dev_err(atomisp_dev, "%s dump nothing[ret=%d]\n", __func__, 90 - ret); 89 + dev_err(isp->dev, "%s dump nothing[ret=%d]\n", __func__, ret); 91 90 } 92 91 93 92 opt_err: ··· 184 185 driver_remove_file(drv, &iunit_drvfs_attrs[i]); 185 186 } 186 187 187 - int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp) 188 + int atomisp_drvfs_init(struct atomisp_device *isp) 188 189 { 190 + struct device_driver *drv = isp->dev->driver; 189 191 int ret; 190 192 191 193 iunit_debug.isp = isp; ··· 194 194 195 195 ret = iunit_drvfs_create_files(iunit_debug.drv); 196 196 if (ret) { 197 - dev_err(atomisp_dev, "drvfs_create_files error: %d\n", ret); 197 + dev_err(isp->dev, "drvfs_create_files error: %d\n", ret); 198 198 iunit_drvfs_remove_files(iunit_debug.drv); 199 199 } 200 200
+1 -1
drivers/staging/media/atomisp/pci/atomisp_drvfs.h
··· 19 19 #ifndef __ATOMISP_DRVFS_H__ 20 20 #define __ATOMISP_DRVFS_H__ 21 21 22 - int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp); 22 + int atomisp_drvfs_init(struct atomisp_device *isp); 23 23 void atomisp_drvfs_exit(void); 24 24 25 25 #endif /* __ATOMISP_DRVFS_H__ */
+352 -187
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
··· 26 26 #define CLK_RATE_19_2MHZ 19200000 27 27 #define CLK_RATE_25_0MHZ 25000000 28 28 29 + /* Valid clock number range from 0 to 5 */ 30 + #define MAX_CLK_COUNT 5 31 + 29 32 /* X-Powers AXP288 register set */ 30 33 #define ALDO1_SEL_REG 0x28 31 34 #define ALDO1_CTRL3_REG 0x13 ··· 64 61 65 62 struct gmin_subdev { 66 63 struct v4l2_subdev *subdev; 67 - int clock_num; 68 64 enum clock_rate clock_src; 69 - bool clock_on; 70 65 struct clk *pmc_clk; 71 66 struct gpio_desc *gpio0; 72 67 struct gpio_desc *gpio1; ··· 76 75 unsigned int csi_lanes; 77 76 enum atomisp_input_format csi_fmt; 78 77 enum atomisp_bayer_order csi_bayer; 78 + 79 + bool clock_on; 79 80 bool v1p8_on; 80 81 bool v2p8_on; 81 82 bool v1p2_on; 82 83 bool v2p8_vcm_on; 84 + 85 + int v1p8_gpio; 86 + int v2p8_gpio; 83 87 84 88 u8 pwm_i2c_addr; 85 89 ··· 96 90 static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS]; 97 91 98 92 /* ACPI HIDs for the PMICs that could be used by this driver */ 99 - #define PMIC_ACPI_AXP "INT33F4:00" /* XPower AXP288 PMIC */ 100 - #define PMIC_ACPI_TI "INT33F5:00" /* Dollar Cove TI PMIC */ 101 - #define PMIC_ACPI_CRYSTALCOVE "INT33FD:00" /* Crystal Cove PMIC */ 93 + #define PMIC_ACPI_AXP "INT33F4" /* XPower AXP288 PMIC */ 94 + #define PMIC_ACPI_TI "INT33F5" /* Dollar Cove TI PMIC */ 95 + #define PMIC_ACPI_CRYSTALCOVE "INT33FD" /* Crystal Cove PMIC */ 102 96 103 97 #define PMIC_PLATFORM_TI "intel_soc_pmic_chtdc_ti" 104 98 ··· 111 105 } pmic_id; 112 106 113 107 static const char *pmic_name[] = { 114 - [PMIC_UNSET] = "unset", 108 + [PMIC_UNSET] = "ACPI device PM", 115 109 [PMIC_REGULATOR] = "regulator driver", 116 110 [PMIC_AXP] = "XPower AXP288 PMIC", 117 111 [PMIC_TI] = "Dollar Cove TI PMIC", ··· 124 118 static const struct atomisp_platform_data pdata = { 125 119 .subdevs = pdata_subdevs, 126 120 }; 127 - 128 - /* 129 - * Something of a hack. The ECS E7 board drives camera 2.8v from an 130 - * external regulator instead of the PMIC. There's a gmin_CamV2P8 131 - * config variable that specifies the GPIO to handle this particular 132 - * case, but this needs a broader architecture for handling camera 133 - * power. 134 - */ 135 - enum { V2P8_GPIO_UNSET = -2, V2P8_GPIO_NONE = -1 }; 136 - static int v2p8_gpio = V2P8_GPIO_UNSET; 137 - 138 - /* 139 - * Something of a hack. The CHT RVP board drives camera 1.8v from an 140 - * external regulator instead of the PMIC just like ECS E7 board, see the 141 - * comments above. 142 - */ 143 - enum { V1P8_GPIO_UNSET = -2, V1P8_GPIO_NONE = -1 }; 144 - static int v1p8_gpio = V1P8_GPIO_UNSET; 145 121 146 122 static LIST_HEAD(vcm_devices); 147 123 static DEFINE_MUTEX(vcm_lock); ··· 187 199 * gmin_subdev struct is already initialized for us. 188 200 */ 189 201 gs = find_gmin_subdev(subdev); 202 + if (!gs) 203 + return -ENODEV; 190 204 191 205 pdata.subdevs[i].type = type; 192 206 pdata.subdevs[i].port = gs->csi_port; ··· 284 294 {"INT33F8:00_CsiFmt", "13"}, 285 295 {"INT33F8:00_CsiBayer", "0"}, 286 296 {"INT33F8:00_CamClk", "0"}, 297 + 287 298 {"INT33F9:00_CamType", "1"}, 288 299 {"INT33F9:00_CsiPort", "0"}, 289 300 {"INT33F9:00_CsiLanes", "1"}, ··· 300 309 {"INT33BE:00_CsiFmt", "13"}, 301 310 {"INT33BE:00_CsiBayer", "2"}, 302 311 {"INT33BE:00_CamClk", "0"}, 312 + 303 313 {"INT33F0:00_CsiPort", "0"}, 304 314 {"INT33F0:00_CsiLanes", "1"}, 305 315 {"INT33F0:00_CsiFmt", "13"}, ··· 314 322 {"XXOV2680:00_CsiPort", "1"}, 315 323 {"XXOV2680:00_CsiLanes", "1"}, 316 324 {"XXOV2680:00_CamClk", "0"}, 325 + 317 326 {"XXGC0310:00_CsiPort", "0"}, 318 327 {"XXGC0310:00_CsiLanes", "1"}, 319 328 {"XXGC0310:00_CamClk", "1"}, ··· 374 381 #define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */ 375 382 static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME]; 376 383 377 - static int gmin_i2c_match_one(struct device *dev, const void *data) 378 - { 379 - const char *name = data; 380 - struct i2c_client *client; 381 - 382 - if (dev->type != &i2c_client_type) 383 - return 0; 384 - 385 - client = to_i2c_client(dev); 386 - 387 - return (!strcmp(name, client->name)); 388 - } 389 - 390 384 static struct i2c_client *gmin_i2c_dev_exists(struct device *dev, char *name, 391 385 struct i2c_client **client) 392 386 { 387 + struct acpi_device *adev; 393 388 struct device *d; 394 389 395 - while ((d = bus_find_device(&i2c_bus_type, NULL, name, 396 - gmin_i2c_match_one))) { 397 - *client = to_i2c_client(d); 398 - dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n", 399 - (*client)->name, (*client)->addr, 400 - (*client)->adapter->nr); 401 - return *client; 402 - } 390 + adev = acpi_dev_get_first_match_dev(name, NULL, -1); 391 + if (!adev) 392 + return NULL; 403 393 404 - return NULL; 394 + d = bus_find_device_by_acpi_dev(&i2c_bus_type, adev); 395 + acpi_dev_put(adev); 396 + if (!d) 397 + return NULL; 398 + 399 + *client = i2c_verify_client(d); 400 + put_device(d); 401 + 402 + dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n", 403 + (*client)->name, (*client)->addr, (*client)->adapter->nr); 404 + return *client; 405 405 } 406 406 407 407 static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg, ··· 413 427 "I2C write, addr: 0x%02x, reg: 0x%02x, value: 0x%02x, mask: 0x%02x\n", 414 428 i2c_addr, reg, value, mask); 415 429 416 - ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, 417 - value, mask); 418 - 419 - if (ret == -EOPNOTSUPP) { 430 + ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, value, mask); 431 + if (ret == -EOPNOTSUPP) 420 432 dev_err(dev, 421 433 "ACPI didn't mapped the OpRegion needed to access I2C address 0x%02x.\n" 422 - "Need to compile the Kernel using CONFIG_*_PMIC_OPREGION settings\n", 434 + "Need to compile the kernel using CONFIG_*_PMIC_OPREGION settings\n", 423 435 i2c_addr); 424 - return ret; 425 - } 426 436 427 437 return ret; 428 438 } 429 439 430 - static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev) 440 + static int atomisp_get_acpi_power(struct device *dev) 431 441 { 432 - struct i2c_client *power = NULL, *client = v4l2_get_subdevdata(subdev); 433 - struct acpi_device *adev; 434 - acpi_handle handle; 435 - struct device *dev; 436 - int i, ret; 442 + char name[5]; 443 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 444 + struct acpi_buffer b_name = { sizeof(name), name }; 445 + union acpi_object *package, *element; 446 + acpi_handle handle = ACPI_HANDLE(dev); 447 + acpi_handle rhandle; 448 + acpi_status status; 449 + int clock_num = -1; 450 + int i; 437 451 438 - if (!client) 439 - return NULL; 452 + status = acpi_evaluate_object(handle, "_PR0", NULL, &buffer); 453 + if (!ACPI_SUCCESS(status)) 454 + return -1; 440 455 441 - dev = &client->dev; 456 + package = buffer.pointer; 442 457 443 - handle = ACPI_HANDLE(dev); 458 + if (!buffer.length || !package 459 + || package->type != ACPI_TYPE_PACKAGE 460 + || !package->package.count) 461 + goto fail; 444 462 445 - // FIXME: may need to release resources allocated by acpi_bus_get_device() 446 - if (!handle || acpi_bus_get_device(handle, &adev)) { 447 - dev_err(dev, "Error could not get ACPI device\n"); 448 - return NULL; 463 + for (i = 0; i < package->package.count; i++) { 464 + element = &package->package.elements[i]; 465 + 466 + if (element->type != ACPI_TYPE_LOCAL_REFERENCE) 467 + continue; 468 + 469 + rhandle = element->reference.handle; 470 + if (!rhandle) 471 + goto fail; 472 + 473 + acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name); 474 + 475 + dev_dbg(dev, "Found PM resource '%s'\n", name); 476 + if (strlen(name) == 4 && !strncmp(name, "CLK", 3)) { 477 + if (name[3] >= '0' && name[3] <= '4') 478 + clock_num = name[3] - '0'; 479 + #if 0 480 + /* 481 + * We could abort here, but let's parse all resources, 482 + * as this is helpful for debugging purposes 483 + */ 484 + if (clock_num >= 0) 485 + break; 486 + #endif 487 + } 449 488 } 450 489 451 - dev_info(&client->dev, "%s: ACPI detected it on bus ID=%s, HID=%s\n", 452 - __func__, acpi_device_bid(adev), acpi_device_hid(adev)); 490 + fail: 491 + ACPI_FREE(buffer.pointer); 453 492 454 - if (!pmic_id) { 455 - if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power)) 456 - pmic_id = PMIC_TI; 457 - else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power)) 458 - pmic_id = PMIC_AXP; 459 - else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power)) 460 - pmic_id = PMIC_CRYSTALCOVE; 461 - else 462 - pmic_id = PMIC_REGULATOR; 463 - } 493 + return clock_num; 494 + } 464 495 465 - for (i = 0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++) 466 - ; 467 - if (i >= MAX_SUBDEVS) 468 - return NULL; 496 + static u8 gmin_get_pmic_id_and_addr(struct device *dev) 497 + { 498 + struct i2c_client *power; 499 + static u8 pmic_i2c_addr; 469 500 470 - if (power) { 471 - gmin_subdevs[i].pwm_i2c_addr = power->addr; 472 - dev_info(dev, 473 - "gmin: power management provided via %s (i2c addr 0x%02x)\n", 474 - pmic_name[pmic_id], power->addr); 475 - } else { 476 - dev_info(dev, "gmin: power management provided via %s\n", 477 - pmic_name[pmic_id]); 478 - } 501 + if (pmic_id) 502 + return pmic_i2c_addr; 479 503 480 - gmin_subdevs[i].subdev = subdev; 481 - gmin_subdevs[i].clock_num = gmin_get_var_int(dev, false, "CamClk", 0); 504 + if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power)) 505 + pmic_id = PMIC_TI; 506 + else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power)) 507 + pmic_id = PMIC_AXP; 508 + else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power)) 509 + pmic_id = PMIC_CRYSTALCOVE; 510 + else 511 + pmic_id = PMIC_REGULATOR; 512 + 513 + pmic_i2c_addr = power ? power->addr : 0; 514 + return pmic_i2c_addr; 515 + } 516 + 517 + static int gmin_detect_pmic(struct v4l2_subdev *subdev) 518 + { 519 + struct i2c_client *client = v4l2_get_subdevdata(subdev); 520 + struct device *dev = &client->dev; 521 + u8 pmic_i2c_addr; 522 + 523 + pmic_i2c_addr = gmin_get_pmic_id_and_addr(dev); 524 + dev_info(dev, "gmin: power management provided via %s (i2c addr 0x%02x)\n", 525 + pmic_name[pmic_id], pmic_i2c_addr); 526 + return pmic_i2c_addr; 527 + } 528 + 529 + static int gmin_subdev_add(struct gmin_subdev *gs) 530 + { 531 + struct i2c_client *client = v4l2_get_subdevdata(gs->subdev); 532 + struct device *dev = &client->dev; 533 + struct acpi_device *adev = ACPI_COMPANION(dev); 534 + int ret, clock_num = -1; 535 + 536 + dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev)); 537 + 482 538 /*WA:CHT requires XTAL clock as PLL is not stable.*/ 483 - gmin_subdevs[i].clock_src = gmin_get_var_int(dev, false, "ClkSrc", 484 - VLV2_CLK_PLL_19P2MHZ); 485 - gmin_subdevs[i].csi_port = gmin_get_var_int(dev, false, "CsiPort", 0); 486 - gmin_subdevs[i].csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1); 539 + gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc", 540 + VLV2_CLK_PLL_19P2MHZ); 487 541 488 - /* get PMC clock with clock framework */ 489 - snprintf(gmin_pmc_clk_name, 490 - sizeof(gmin_pmc_clk_name), 491 - "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num); 542 + gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", 0); 543 + gs->csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1); 492 544 493 - gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name); 494 - if (IS_ERR(gmin_subdevs[i].pmc_clk)) { 495 - ret = PTR_ERR(gmin_subdevs[i].pmc_clk); 545 + gs->gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); 546 + if (IS_ERR(gs->gpio0)) 547 + gs->gpio0 = NULL; 548 + else 549 + dev_info(dev, "will handle gpio0 via ACPI\n"); 496 550 497 - dev_err(dev, 498 - "Failed to get clk from %s : %d\n", 499 - gmin_pmc_clk_name, 500 - ret); 551 + gs->gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW); 552 + if (IS_ERR(gs->gpio1)) 553 + gs->gpio1 = NULL; 554 + else 555 + dev_info(dev, "will handle gpio1 via ACPI\n"); 501 556 502 - return NULL; 557 + /* 558 + * Those are used only when there is an external regulator apart 559 + * from the PMIC that would be providing power supply, like on the 560 + * two cases below: 561 + * 562 + * The ECS E7 board drives camera 2.8v from an external regulator 563 + * instead of the PMIC. There's a gmin_CamV2P8 config variable 564 + * that specifies the GPIO to handle this particular case, 565 + * but this needs a broader architecture for handling camera power. 566 + * 567 + * The CHT RVP board drives camera 1.8v from an* external regulator 568 + * instead of the PMIC just like ECS E7 board. 569 + */ 570 + 571 + gs->v1p8_gpio = gmin_get_var_int(dev, true, "V1P8GPIO", -1); 572 + gs->v2p8_gpio = gmin_get_var_int(dev, true, "V2P8GPIO", -1); 573 + 574 + /* 575 + * FIXME: 576 + * 577 + * The ACPI handling code checks for the _PR? tables in order to 578 + * know what is required to switch the device from power state 579 + * D0 (_PR0) up to D3COLD (_PR3). 580 + * 581 + * The adev->flags.power_manageable is set to true if the device 582 + * has a _PR0 table, which can be checked by calling 583 + * acpi_device_power_manageable(adev). 584 + * 585 + * However, this only says that the device can be set to power off 586 + * mode. 587 + * 588 + * At least on the DSDT tables we've seen so far, there's no _PR3, 589 + * nor _PS3 (which would have a somewhat similar effect). 590 + * So, using ACPI for power management won't work, except if adding 591 + * an ACPI override logic somewhere. 592 + * 593 + * So, at least for the existing devices we know, the check below 594 + * will always be false. 595 + */ 596 + if (acpi_device_can_wakeup(adev) && 597 + acpi_device_can_poweroff(adev)) { 598 + dev_info(dev, 599 + "gmin: power management provided via device PM\n"); 600 + return 0; 503 601 } 602 + 603 + /* 604 + * The code below is here due to backward compatibility with devices 605 + * whose ACPI BIOS may not contain everything that would be needed 606 + * in order to set clocks and do power management. 607 + */ 608 + 609 + /* 610 + * According with : 611 + * https://github.com/projectceladon/hardware-intel-kernelflinger/blob/master/doc/fastboot.md 612 + * 613 + * The "CamClk" EFI var is set via fastboot on some Android devices, 614 + * and seems to contain the number of the clock used to feed the 615 + * sensor. 616 + * 617 + * On systems with a proper ACPI table, this is given via the _PR0 618 + * power resource table. The logic below should first check if there 619 + * is a power resource already, falling back to the EFI vars detection 620 + * otherwise. 621 + */ 622 + 623 + /* Try first to use ACPI to get the clock resource */ 624 + if (acpi_device_power_manageable(adev)) 625 + clock_num = atomisp_get_acpi_power(dev); 626 + 627 + /* Fall-back use EFI and/or DMI match */ 628 + if (clock_num < 0) 629 + clock_num = gmin_get_var_int(dev, false, "CamClk", 0); 630 + 631 + if (clock_num < 0 || clock_num > MAX_CLK_COUNT) { 632 + dev_err(dev, "Invalid clock number\n"); 633 + return -EINVAL; 634 + } 635 + 636 + snprintf(gmin_pmc_clk_name, sizeof(gmin_pmc_clk_name), 637 + "%s_%d", "pmc_plt_clk", clock_num); 638 + 639 + gs->pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name); 640 + if (IS_ERR(gs->pmc_clk)) { 641 + ret = PTR_ERR(gs->pmc_clk); 642 + dev_err(dev, "Failed to get clk from %s: %d\n", gmin_pmc_clk_name, ret); 643 + return ret; 644 + } 645 + dev_info(dev, "Will use CLK%d (%s)\n", clock_num, gmin_pmc_clk_name); 504 646 505 647 /* 506 648 * The firmware might enable the clock at ··· 640 526 * to disable a clock that has not been enabled, 641 527 * we need to enable the clock first. 642 528 */ 643 - ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk); 529 + ret = clk_prepare_enable(gs->pmc_clk); 644 530 if (!ret) 645 - clk_disable_unprepare(gmin_subdevs[i].pmc_clk); 646 - 647 - gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); 648 - if (IS_ERR(gmin_subdevs[i].gpio0)) 649 - gmin_subdevs[i].gpio0 = NULL; 650 - 651 - gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW); 652 - if (IS_ERR(gmin_subdevs[i].gpio1)) 653 - gmin_subdevs[i].gpio1 = NULL; 531 + clk_disable_unprepare(gs->pmc_clk); 654 532 655 533 switch (pmic_id) { 656 534 case PMIC_REGULATOR: 657 - gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX"); 658 - gmin_subdevs[i].v2p8_reg = regulator_get(dev, "V2P8SX"); 535 + gs->v1p8_reg = regulator_get(dev, "V1P8SX"); 536 + gs->v2p8_reg = regulator_get(dev, "V2P8SX"); 659 537 660 - gmin_subdevs[i].v1p2_reg = regulator_get(dev, "V1P2A"); 661 - gmin_subdevs[i].v2p8_vcm_reg = regulator_get(dev, "VPROG4B"); 538 + gs->v1p2_reg = regulator_get(dev, "V1P2A"); 539 + gs->v2p8_vcm_reg = regulator_get(dev, "VPROG4B"); 662 540 663 541 /* Note: ideally we would initialize v[12]p8_on to the 664 542 * output of regulator_is_enabled(), but sadly that ··· 662 556 break; 663 557 664 558 case PMIC_AXP: 665 - gmin_subdevs[i].eldo1_1p8v = gmin_get_var_int(dev, false, 666 - "eldo1_1p8v", 667 - ELDO1_1P8V); 668 - gmin_subdevs[i].eldo1_sel_reg = gmin_get_var_int(dev, false, 669 - "eldo1_sel_reg", 670 - ELDO1_SEL_REG); 671 - gmin_subdevs[i].eldo1_ctrl_shift = gmin_get_var_int(dev, false, 672 - "eldo1_ctrl_shift", 673 - ELDO1_CTRL_SHIFT); 674 - gmin_subdevs[i].eldo2_1p8v = gmin_get_var_int(dev, false, 675 - "eldo2_1p8v", 676 - ELDO2_1P8V); 677 - gmin_subdevs[i].eldo2_sel_reg = gmin_get_var_int(dev, false, 678 - "eldo2_sel_reg", 679 - ELDO2_SEL_REG); 680 - gmin_subdevs[i].eldo2_ctrl_shift = gmin_get_var_int(dev, false, 681 - "eldo2_ctrl_shift", 682 - ELDO2_CTRL_SHIFT); 683 - gmin_subdevs[i].pwm_i2c_addr = power->addr; 559 + gs->eldo1_1p8v = gmin_get_var_int(dev, false, 560 + "eldo1_1p8v", 561 + ELDO1_1P8V); 562 + gs->eldo1_sel_reg = gmin_get_var_int(dev, false, 563 + "eldo1_sel_reg", 564 + ELDO1_SEL_REG); 565 + gs->eldo1_ctrl_shift = gmin_get_var_int(dev, false, 566 + "eldo1_ctrl_shift", 567 + ELDO1_CTRL_SHIFT); 568 + gs->eldo2_1p8v = gmin_get_var_int(dev, false, 569 + "eldo2_1p8v", 570 + ELDO2_1P8V); 571 + gs->eldo2_sel_reg = gmin_get_var_int(dev, false, 572 + "eldo2_sel_reg", 573 + ELDO2_SEL_REG); 574 + gs->eldo2_ctrl_shift = gmin_get_var_int(dev, false, 575 + "eldo2_ctrl_shift", 576 + ELDO2_CTRL_SHIFT); 684 577 break; 685 578 686 579 default: 687 580 break; 688 581 } 689 582 690 - return &gmin_subdevs[i]; 583 + return 0; 691 584 } 692 585 693 586 static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev) ··· 696 591 for (i = 0; i < MAX_SUBDEVS; i++) 697 592 if (gmin_subdevs[i].subdev == subdev) 698 593 return &gmin_subdevs[i]; 699 - return gmin_subdev_add(subdev); 594 + return NULL; 595 + } 596 + 597 + static struct gmin_subdev *find_free_gmin_subdev_slot(void) 598 + { 599 + unsigned int i; 600 + 601 + for (i = 0; i < MAX_SUBDEVS; i++) 602 + if (gmin_subdevs[i].subdev == NULL) 603 + return &gmin_subdevs[i]; 604 + return NULL; 700 605 } 701 606 702 607 static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs, ··· 815 700 { 816 701 struct gmin_subdev *gs = find_gmin_subdev(subdev); 817 702 int ret; 818 - struct device *dev; 819 - struct i2c_client *client = v4l2_get_subdevdata(subdev); 820 703 int value; 821 704 822 - dev = &client->dev; 823 - 824 - if (v1p8_gpio == V1P8_GPIO_UNSET) { 825 - v1p8_gpio = gmin_get_var_int(dev, true, 826 - "V1P8GPIO", V1P8_GPIO_NONE); 827 - if (v1p8_gpio != V1P8_GPIO_NONE) { 828 - pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n", 829 - v1p8_gpio); 830 - ret = gpio_request(v1p8_gpio, "camera_v1p8_en"); 831 - if (!ret) 832 - ret = gpio_direction_output(v1p8_gpio, 0); 833 - if (ret) 834 - pr_err("V1P8 GPIO initialization failed\n"); 835 - } 705 + if (gs->v1p8_gpio >= 0) { 706 + pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n", 707 + gs->v1p8_gpio); 708 + ret = gpio_request(gs->v1p8_gpio, "camera_v1p8_en"); 709 + if (!ret) 710 + ret = gpio_direction_output(gs->v1p8_gpio, 0); 711 + if (ret) 712 + pr_err("V1P8 GPIO initialization failed\n"); 836 713 } 837 714 838 715 if (!gs || gs->v1p8_on == on) 839 716 return 0; 840 717 gs->v1p8_on = on; 841 718 842 - if (v1p8_gpio >= 0) 843 - gpio_set_value(v1p8_gpio, on); 719 + if (gs->v1p8_gpio >= 0) 720 + gpio_set_value(gs->v1p8_gpio, on); 844 721 845 722 if (gs->v1p8_reg) { 846 723 regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000); ··· 869 762 { 870 763 struct gmin_subdev *gs = find_gmin_subdev(subdev); 871 764 int ret; 872 - struct device *dev; 873 - struct i2c_client *client = v4l2_get_subdevdata(subdev); 874 765 int value; 875 766 876 - dev = &client->dev; 877 - 878 - if (v2p8_gpio == V2P8_GPIO_UNSET) { 879 - v2p8_gpio = gmin_get_var_int(dev, true, 880 - "V2P8GPIO", V2P8_GPIO_NONE); 881 - if (v2p8_gpio != V2P8_GPIO_NONE) { 882 - pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n", 883 - v2p8_gpio); 884 - ret = gpio_request(v2p8_gpio, "camera_v2p8"); 885 - if (!ret) 886 - ret = gpio_direction_output(v2p8_gpio, 0); 887 - if (ret) 888 - pr_err("V2P8 GPIO initialization failed\n"); 889 - } 767 + if (gs->v2p8_gpio >= 0) { 768 + pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n", 769 + gs->v2p8_gpio); 770 + ret = gpio_request(gs->v2p8_gpio, "camera_v2p8"); 771 + if (!ret) 772 + ret = gpio_direction_output(gs->v2p8_gpio, 0); 773 + if (ret) 774 + pr_err("V2P8 GPIO initialization failed\n"); 890 775 } 891 776 892 777 if (!gs || gs->v2p8_on == on) 893 778 return 0; 894 779 gs->v2p8_on = on; 895 780 896 - if (v2p8_gpio >= 0) 897 - gpio_set_value(v2p8_gpio, on); 781 + if (gs->v2p8_gpio >= 0) 782 + gpio_set_value(gs->v2p8_gpio, on); 898 783 899 784 if (gs->v2p8_reg) { 900 785 regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000); ··· 916 817 } 917 818 918 819 return -EINVAL; 820 + } 821 + 822 + static int gmin_acpi_pm_ctrl(struct v4l2_subdev *subdev, int on) 823 + { 824 + int ret = 0; 825 + struct gmin_subdev *gs = find_gmin_subdev(subdev); 826 + struct i2c_client *client = v4l2_get_subdevdata(subdev); 827 + struct acpi_device *adev = ACPI_COMPANION(&client->dev); 828 + 829 + /* Use the ACPI power management to control it */ 830 + on = !!on; 831 + if (gs->clock_on == on) 832 + return 0; 833 + 834 + dev_dbg(subdev->dev, "Setting power state to %s\n", 835 + on ? "on" : "off"); 836 + 837 + if (on) 838 + ret = acpi_device_set_power(adev, 839 + ACPI_STATE_D0); 840 + else 841 + ret = acpi_device_set_power(adev, 842 + ACPI_STATE_D3_COLD); 843 + 844 + if (!ret) 845 + gs->clock_on = on; 846 + else 847 + dev_err(subdev->dev, "Couldn't set power state to %s\n", 848 + on ? "on" : "off"); 849 + 850 + return ret; 919 851 } 920 852 921 853 static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on) ··· 1014 884 return NULL; 1015 885 } 1016 886 1017 - static struct camera_sensor_platform_data gmin_plat = { 887 + static struct camera_sensor_platform_data pmic_gmin_plat = { 1018 888 .gpio0_ctrl = gmin_gpio0_ctrl, 1019 889 .gpio1_ctrl = gmin_gpio1_ctrl, 1020 890 .v1p8_ctrl = gmin_v1p8_ctrl, ··· 1025 895 .get_vcm_ctrl = gmin_get_vcm_ctrl, 1026 896 }; 1027 897 898 + static struct camera_sensor_platform_data acpi_gmin_plat = { 899 + .gpio0_ctrl = gmin_gpio0_ctrl, 900 + .gpio1_ctrl = gmin_gpio1_ctrl, 901 + .v1p8_ctrl = gmin_acpi_pm_ctrl, 902 + .v2p8_ctrl = gmin_acpi_pm_ctrl, 903 + .v1p2_ctrl = gmin_acpi_pm_ctrl, 904 + .flisclk_ctrl = gmin_acpi_pm_ctrl, 905 + .csi_cfg = gmin_csi_cfg, 906 + .get_vcm_ctrl = gmin_get_vcm_ctrl, 907 + }; 908 + 1028 909 struct camera_sensor_platform_data *gmin_camera_platform_data( 1029 910 struct v4l2_subdev *subdev, 1030 911 enum atomisp_input_format csi_format, 1031 912 enum atomisp_bayer_order csi_bayer) 1032 913 { 1033 - struct gmin_subdev *gs = find_gmin_subdev(subdev); 914 + u8 pmic_i2c_addr = gmin_detect_pmic(subdev); 915 + struct gmin_subdev *gs; 1034 916 917 + gs = find_free_gmin_subdev_slot(); 918 + gs->subdev = subdev; 1035 919 gs->csi_fmt = csi_format; 1036 920 gs->csi_bayer = csi_bayer; 921 + gs->pwm_i2c_addr = pmic_i2c_addr; 1037 922 1038 - return &gmin_plat; 923 + gmin_subdev_add(gs); 924 + if (gs->pmc_clk) 925 + return &pmic_gmin_plat; 926 + else 927 + return &acpi_gmin_plat; 1039 928 } 1040 929 EXPORT_SYMBOL_GPL(gmin_camera_platform_data); 1041 930 ··· 1106 957 union acpi_object *obj, *cur = NULL; 1107 958 int i; 1108 959 960 + /* 961 + * The data reported by "CamClk" seems to be either 0 or 1 at the 962 + * _DSM table. 963 + * 964 + * At the ACPI tables we looked so far, this is not related to the 965 + * actual clock source for the sensor, which is given by the 966 + * _PR0 ACPI table. So, ignore it, as otherwise this will be 967 + * set to a wrong value. 968 + */ 969 + if (!strcmp(var, "CamClk")) 970 + return -EINVAL; 971 + 1109 972 obj = acpi_evaluate_dsm(handle, &atomisp_dsm_guid, 0, 0, NULL); 1110 973 if (!obj) { 1111 974 dev_info_once(dev, "Didn't find ACPI _DSM table.\n"); 1112 975 return -EINVAL; 1113 976 } 977 + 978 + /* Return on unexpected object type */ 979 + if (obj->type != ACPI_TYPE_PACKAGE) 980 + return -EINVAL; 1114 981 1115 982 #if 0 /* Just for debugging purposes */ 1116 983 for (i = 0; i < obj->package.count; i++) { ··· 1320 1155 * trying. The driver itself does direct calls to the PUNIT to manage 1321 1156 * ISP power. 1322 1157 */ 1323 - static void isp_pm_cap_fixup(struct pci_dev *dev) 1158 + static void isp_pm_cap_fixup(struct pci_dev *pdev) 1324 1159 { 1325 - dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n"); 1326 - dev->pm_cap = 0; 1160 + dev_info(&pdev->dev, "Disabling PCI power management on camera ISP\n"); 1161 + pdev->pm_cap = 0; 1327 1162 } 1328 1163 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup); 1329 1164
+1 -1
drivers/staging/media/atomisp/pci/atomisp_internal.h
··· 216 216 * ci device struct 217 217 */ 218 218 struct atomisp_device { 219 - struct pci_dev *pdev; 220 219 struct device *dev; 221 220 struct v4l2_device v4l2_dev; 222 221 struct media_device media_dev; 223 222 struct atomisp_platform_data *pdata; 224 223 void *mmu_l1_base; 224 + void __iomem *base; 225 225 const struct firmware *firmware; 226 226 227 227 struct pm_qos_request pm_qos;
+9 -10
drivers/staging/media/atomisp/pci/atomisp_ioctl.c
··· 549 549 550 550 strscpy(cap->driver, DRIVER, sizeof(cap->driver)); 551 551 strscpy(cap->card, CARD, sizeof(cap->card)); 552 - snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", 553 - pci_name(isp->pdev)); 552 + snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", dev_name(isp->dev)); 554 553 555 554 return 0; 556 555 } ··· 1634 1635 struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); 1635 1636 struct atomisp_sub_device *asd = pipe->asd; 1636 1637 struct atomisp_device *isp = video_get_drvdata(vdev); 1638 + struct pci_dev *pdev = to_pci_dev(isp->dev); 1637 1639 enum ia_css_pipe_id css_pipe_id; 1638 1640 unsigned int sensor_start_stream; 1639 1641 unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION; ··· 1844 1844 /* Enable the CSI interface on ANN B0/K0 */ 1845 1845 if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << 1846 1846 ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) { 1847 - pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL, 1848 - isp->saved_regs.csi_control | 1849 - MRFLD_PCI_CSI_CONTROL_CSI_READY); 1847 + pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL, 1848 + isp->saved_regs.csi_control | MRFLD_PCI_CSI_CONTROL_CSI_READY); 1850 1849 } 1851 1850 1852 1851 /* stream on the sensor */ ··· 1890 1891 { 1891 1892 struct video_device *vdev = video_devdata(file); 1892 1893 struct atomisp_device *isp = video_get_drvdata(vdev); 1894 + struct pci_dev *pdev = to_pci_dev(isp->dev); 1893 1895 struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); 1894 1896 struct atomisp_sub_device *asd = pipe->asd; 1895 1897 struct atomisp_video_pipe *capture_pipe = NULL; ··· 2076 2076 /* Disable the CSI interface on ANN B0/K0 */ 2077 2077 if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << 2078 2078 ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) { 2079 - pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL, 2080 - isp->saved_regs.csi_control & 2081 - ~MRFLD_PCI_CSI_CONTROL_CSI_READY); 2079 + pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL, 2080 + isp->saved_regs.csi_control & ~MRFLD_PCI_CSI_CONTROL_CSI_READY); 2082 2081 } 2083 2082 2084 2083 if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false)) ··· 2110 2111 } 2111 2112 2112 2113 /* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */ 2113 - pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control | 2114 - MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); 2114 + pci_write_config_dword(pdev, PCI_I_CONTROL, 2115 + isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); 2115 2116 dev_err(isp->dev, "atomisp_reset"); 2116 2117 atomisp_reset(isp); 2117 2118 for (i = 0; i < isp->num_of_streams; i++) {
+125 -156
drivers/staging/media/atomisp/pci/atomisp_v4l2.c
··· 127 127 128 128 struct device *atomisp_dev; 129 129 130 - void __iomem *atomisp_io_base; 131 - 132 130 static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = { 133 131 { 134 132 .width = ISP_FREQ_RULE_ANY, ··· 510 512 511 513 static int atomisp_save_iunit_reg(struct atomisp_device *isp) 512 514 { 513 - struct pci_dev *dev = isp->pdev; 515 + struct pci_dev *pdev = to_pci_dev(isp->dev); 514 516 515 517 dev_dbg(isp->dev, "%s\n", __func__); 516 518 517 - pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts); 519 + pci_read_config_word(pdev, PCI_COMMAND, &isp->saved_regs.pcicmdsts); 518 520 /* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */ 519 - pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap); 520 - pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr); 521 - pci_read_config_word(dev, PCI_MSI_DATA, &isp->saved_regs.msi_data); 522 - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr); 523 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, 524 - &isp->saved_regs.interrupt_control); 521 + pci_read_config_dword(pdev, PCI_MSI_CAPID, &isp->saved_regs.msicap); 522 + pci_read_config_dword(pdev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr); 523 + pci_read_config_word(pdev, PCI_MSI_DATA, &isp->saved_regs.msi_data); 524 + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr); 525 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &isp->saved_regs.interrupt_control); 525 526 526 - pci_read_config_dword(dev, MRFLD_PCI_PMCS, 527 - &isp->saved_regs.pmcs); 527 + pci_read_config_dword(pdev, MRFLD_PCI_PMCS, &isp->saved_regs.pmcs); 528 528 /* Ensure read/write combining is enabled. */ 529 - pci_read_config_dword(dev, PCI_I_CONTROL, 530 - &isp->saved_regs.i_control); 529 + pci_read_config_dword(pdev, PCI_I_CONTROL, &isp->saved_regs.i_control); 531 530 isp->saved_regs.i_control |= 532 531 MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING | 533 532 MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING; 534 - pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, 533 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, 535 534 &isp->saved_regs.csi_access_viol); 536 - pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL, 535 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL, 537 536 &isp->saved_regs.csi_rcomp_config); 538 537 /* 539 538 * Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE. ··· 540 545 * is missed, and IUNIT can hang. 541 546 * For both issues, setting this bit is a workaround. 542 547 */ 543 - isp->saved_regs.csi_rcomp_config |= 544 - MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE; 545 - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, 548 + isp->saved_regs.csi_rcomp_config |= MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE; 549 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, 546 550 &isp->saved_regs.csi_afe_dly); 547 - pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL, 551 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, 548 552 &isp->saved_regs.csi_control); 549 553 if (isp->media_dev.hw_revision >= 550 554 (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT)) 551 - isp->saved_regs.csi_control |= 552 - MRFLD_PCI_CSI_CONTROL_PARPATHEN; 555 + isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_PARPATHEN; 553 556 /* 554 557 * On CHT CSI_READY bit should be enabled before stream on 555 558 */ 556 559 if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << 557 560 ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0))) 558 - isp->saved_regs.csi_control |= 559 - MRFLD_PCI_CSI_CONTROL_CSI_READY; 560 - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, 561 + isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_CSI_READY; 562 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, 561 563 &isp->saved_regs.csi_afe_rcomp_config); 562 - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL, 564 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL, 563 565 &isp->saved_regs.csi_afe_hs_control); 564 - pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL, 566 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL, 565 567 &isp->saved_regs.csi_deadline_control); 566 568 return 0; 567 569 } 568 570 569 571 static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp) 570 572 { 571 - struct pci_dev *dev = isp->pdev; 573 + struct pci_dev *pdev = to_pci_dev(isp->dev); 572 574 573 575 dev_dbg(isp->dev, "%s\n", __func__); 574 576 575 - pci_write_config_word(dev, PCI_COMMAND, isp->saved_regs.pcicmdsts); 576 - pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 577 - isp->saved_regs.ispmmadr); 578 - pci_write_config_dword(dev, PCI_MSI_CAPID, isp->saved_regs.msicap); 579 - pci_write_config_dword(dev, PCI_MSI_ADDR, isp->saved_regs.msi_addr); 580 - pci_write_config_word(dev, PCI_MSI_DATA, isp->saved_regs.msi_data); 581 - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, isp->saved_regs.intr); 582 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 583 - isp->saved_regs.interrupt_control); 584 - pci_write_config_dword(dev, PCI_I_CONTROL, 585 - isp->saved_regs.i_control); 577 + pci_write_config_word(pdev, PCI_COMMAND, isp->saved_regs.pcicmdsts); 578 + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, isp->saved_regs.ispmmadr); 579 + pci_write_config_dword(pdev, PCI_MSI_CAPID, isp->saved_regs.msicap); 580 + pci_write_config_dword(pdev, PCI_MSI_ADDR, isp->saved_regs.msi_addr); 581 + pci_write_config_word(pdev, PCI_MSI_DATA, isp->saved_regs.msi_data); 582 + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, isp->saved_regs.intr); 583 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, isp->saved_regs.interrupt_control); 584 + pci_write_config_dword(pdev, PCI_I_CONTROL, isp->saved_regs.i_control); 586 585 587 - pci_write_config_dword(dev, MRFLD_PCI_PMCS, 588 - isp->saved_regs.pmcs); 589 - pci_write_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, 586 + pci_write_config_dword(pdev, MRFLD_PCI_PMCS, isp->saved_regs.pmcs); 587 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, 590 588 isp->saved_regs.csi_access_viol); 591 - pci_write_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL, 589 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL, 592 590 isp->saved_regs.csi_rcomp_config); 593 - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, 591 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, 594 592 isp->saved_regs.csi_afe_dly); 595 - pci_write_config_dword(dev, MRFLD_PCI_CSI_CONTROL, 593 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, 596 594 isp->saved_regs.csi_control); 597 - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, 595 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, 598 596 isp->saved_regs.csi_afe_rcomp_config); 599 - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL, 597 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL, 600 598 isp->saved_regs.csi_afe_hs_control); 601 - pci_write_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL, 599 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL, 602 600 isp->saved_regs.csi_deadline_control); 603 601 604 602 /* ··· 607 619 608 620 static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp) 609 621 { 610 - struct pci_dev *dev = isp->pdev; 622 + struct pci_dev *pdev = to_pci_dev(isp->dev); 611 623 u32 irq; 612 624 unsigned long flags; 613 625 ··· 623 635 * So, here we need to check if there is any pending 624 636 * IRQ, if so, waiting for it to be served 625 637 */ 626 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); 638 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); 627 639 irq = irq & 1 << INTR_IIR; 628 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); 640 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); 629 641 630 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); 642 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); 631 643 if (!(irq & (1 << INTR_IIR))) 632 644 goto done; 633 645 ··· 640 652 spin_unlock_irqrestore(&isp->lock, flags); 641 653 return -EAGAIN; 642 654 } else { 643 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); 655 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); 644 656 irq = irq & 1 << INTR_IIR; 645 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); 657 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); 646 658 647 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); 659 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); 648 660 if (!(irq & (1 << INTR_IIR))) { 649 661 atomisp_css2_hw_store_32(MRFLD_INTR_ENABLE_REG, 0x0); 650 662 goto done; ··· 663 675 * to IIR. It could block subsequent interrupt messages. 664 676 * HW sighting:4568410. 665 677 */ 666 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); 678 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); 667 679 irq &= ~(1 << INTR_IER); 668 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); 680 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); 669 681 670 - atomisp_msi_irq_uninit(isp, dev); 682 + atomisp_msi_irq_uninit(isp); 671 683 atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true); 672 684 spin_unlock_irqrestore(&isp->lock, flags); 673 685 ··· 743 755 744 756 /* Wait until ISPSSPM0 bit[25:24] shows the right value */ 745 757 iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &tmp); 746 - tmp = (tmp & MRFLD_ISPSSPM0_ISPSSC_MASK) >> MRFLD_ISPSSPM0_ISPSSS_OFFSET; 758 + tmp = (tmp >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) & MRFLD_ISPSSPM0_ISPSSC_MASK; 747 759 if (tmp == val) { 748 760 trace_ipu_cstate(enable); 749 761 return 0; ··· 766 778 /* Workaround for pmu_nc_set_power_state not ready in MRFLD */ 767 779 int atomisp_mrfld_power_down(struct atomisp_device *isp) 768 780 { 769 - // FIXME: at least with ISP2401, enabling this code causes the driver to break 770 - return 0 && atomisp_mrfld_power(isp, false); 781 + return atomisp_mrfld_power(isp, false); 771 782 } 772 783 773 784 /* Workaround for pmu_nc_set_power_state not ready in MRFLD */ 774 785 int atomisp_mrfld_power_up(struct atomisp_device *isp) 775 786 { 776 - // FIXME: at least with ISP2401, enabling this code causes the driver to break 777 - return 0 && atomisp_mrfld_power(isp, true); 787 + return atomisp_mrfld_power(isp, true); 778 788 } 779 789 780 790 int atomisp_runtime_suspend(struct device *dev) ··· 888 902 889 903 int atomisp_csi_lane_config(struct atomisp_device *isp) 890 904 { 905 + struct pci_dev *pdev = to_pci_dev(isp->dev); 891 906 static const struct { 892 907 u8 code; 893 908 u8 lanes[MRFLD_PORT_NUM]; ··· 990 1003 return -EINVAL; 991 1004 } 992 1005 993 - pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &csi_control); 1006 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &csi_control); 994 1007 csi_control &= ~port_config_mask; 995 1008 csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT) 996 1009 | (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT)) ··· 1000 1013 | (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT) 1001 1014 | (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift); 1002 1015 1003 - pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, csi_control); 1016 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, csi_control); 1004 1017 1005 1018 dev_dbg(isp->dev, 1006 1019 "%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n", ··· 1427 1440 * Check for flags the driver was compiled with against the PCI 1428 1441 * device. Always returns true on other than ISP 2400. 1429 1442 */ 1430 - static bool is_valid_device(struct pci_dev *dev, 1431 - const struct pci_device_id *id) 1443 + static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id) 1432 1444 { 1433 1445 unsigned int a0_max_id = 0; 1434 1446 const char *name; ··· 1451 1465 name = "Cherrytrail"; 1452 1466 break; 1453 1467 default: 1454 - dev_err(&dev->dev, "%s: unknown device ID %x04:%x04\n", 1468 + dev_err(&pdev->dev, "%s: unknown device ID %x04:%x04\n", 1455 1469 product, id->vendor, id->device); 1456 1470 return false; 1457 1471 } 1458 1472 1459 - if (dev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) { 1460 - dev_err(&dev->dev, "%s revision %d is not unsupported\n", 1461 - name, dev->revision); 1473 + if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) { 1474 + dev_err(&pdev->dev, "%s revision %d is not unsupported\n", 1475 + name, pdev->revision); 1462 1476 return false; 1463 1477 } 1464 1478 ··· 1469 1483 1470 1484 #if defined(ISP2400) 1471 1485 if (IS_ISP2401) { 1472 - dev_err(&dev->dev, "Support for %s (ISP2401) was disabled at compile time\n", 1486 + dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n", 1473 1487 name); 1474 1488 return false; 1475 1489 } 1476 1490 #else 1477 1491 if (!IS_ISP2401) { 1478 - dev_err(&dev->dev, "Support for %s (ISP2400) was disabled at compile time\n", 1492 + dev_err(&pdev->dev, "Support for %s (ISP2400) was disabled at compile time\n", 1479 1493 name); 1480 1494 return false; 1481 1495 } 1482 1496 #endif 1483 1497 1484 - dev_info(&dev->dev, "Detected %s version %d (ISP240%c) on %s\n", 1485 - name, dev->revision, 1486 - IS_ISP2401 ? '1' : '0', 1487 - product); 1498 + dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n", 1499 + name, pdev->revision, IS_ISP2401 ? '1' : '0', product); 1488 1500 1489 1501 return true; 1490 1502 } ··· 1522 1538 1523 1539 #define ATOM_ISP_PCI_BAR 0 1524 1540 1525 - static int atomisp_pci_probe(struct pci_dev *dev, 1526 - const struct pci_device_id *id) 1541 + static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1527 1542 { 1528 1543 const struct atomisp_platform_data *pdata; 1529 1544 struct atomisp_device *isp; 1530 1545 unsigned int start; 1531 - void __iomem *base; 1532 1546 int err, val; 1533 1547 u32 irq; 1534 1548 1535 - if (!is_valid_device(dev, id)) 1549 + if (!is_valid_device(pdev, id)) 1536 1550 return -ENODEV; 1537 1551 1538 1552 /* Pointer to struct device. */ 1539 - atomisp_dev = &dev->dev; 1553 + atomisp_dev = &pdev->dev; 1540 1554 1541 1555 pdata = atomisp_get_platform_data(); 1542 1556 if (!pdata) 1543 - dev_warn(&dev->dev, "no platform data available\n"); 1557 + dev_warn(&pdev->dev, "no platform data available\n"); 1544 1558 1545 - err = pcim_enable_device(dev); 1559 + err = pcim_enable_device(pdev); 1546 1560 if (err) { 1547 - dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n", 1548 - err); 1561 + dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", err); 1549 1562 return err; 1550 1563 } 1551 1564 1552 - start = pci_resource_start(dev, ATOM_ISP_PCI_BAR); 1553 - dev_dbg(&dev->dev, "start: 0x%x\n", start); 1565 + start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR); 1566 + dev_dbg(&pdev->dev, "start: 0x%x\n", start); 1554 1567 1555 - err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev)); 1568 + err = pcim_iomap_regions(pdev, 1 << ATOM_ISP_PCI_BAR, pci_name(pdev)); 1556 1569 if (err) { 1557 - dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n", 1558 - err); 1570 + dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err); 1559 1571 goto ioremap_fail; 1560 1572 } 1561 1573 1562 - base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR]; 1563 - dev_dbg(&dev->dev, "base: %p\n", base); 1564 - 1565 - atomisp_io_base = base; 1566 - 1567 - dev_dbg(&dev->dev, "atomisp_io_base: %p\n", atomisp_io_base); 1568 - 1569 - isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL); 1574 + isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL); 1570 1575 if (!isp) { 1571 1576 err = -ENOMEM; 1572 1577 goto atomisp_dev_alloc_fail; 1573 1578 } 1574 - isp->pdev = dev; 1575 - isp->dev = &dev->dev; 1579 + 1580 + isp->dev = &pdev->dev; 1581 + isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR]; 1576 1582 isp->sw_contex.power_state = ATOM_ISP_POWER_UP; 1577 1583 isp->saved_regs.ispmmadr = start; 1584 + 1585 + dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base); 1578 1586 1579 1587 rt_mutex_init(&isp->mutex); 1580 1588 mutex_init(&isp->streamoff_mutex); 1581 1589 spin_lock_init(&isp->lock); 1582 1590 1583 1591 /* This is not a true PCI device on SoC, so the delay is not needed. */ 1584 - isp->pdev->d3_delay = 0; 1592 + pdev->d3_delay = 0; 1593 + 1594 + pci_set_drvdata(pdev, isp); 1585 1595 1586 1596 switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) { 1587 1597 case ATOMISP_PCI_DEVICE_SOC_MRFLD: ··· 1626 1648 * have specs yet for exactly how it varies. Default to 1627 1649 * BYT-CR but let provisioning set it via EFI variable 1628 1650 */ 1629 - isp->hpll_freq = gmin_get_var_int(&dev->dev, false, "HpllFreq", 1630 - HPLL_FREQ_2000MHZ); 1651 + isp->hpll_freq = gmin_get_var_int(&pdev->dev, false, "HpllFreq", HPLL_FREQ_2000MHZ); 1631 1652 1632 1653 /* 1633 1654 * for BYT/CHT we are put isp into D3cold to avoid pci registers access 1634 1655 * in power off. Set d3cold_delay to 0 since default 100ms is not 1635 1656 * necessary. 1636 1657 */ 1637 - isp->pdev->d3cold_delay = 0; 1658 + pdev->d3cold_delay = 0; 1638 1659 break; 1639 1660 case ATOMISP_PCI_DEVICE_SOC_ANN: 1640 1661 isp->media_dev.hw_revision = ( ··· 1643 1666 ATOMISP_HW_REVISION_ISP2401_LEGACY 1644 1667 #endif 1645 1668 << ATOMISP_HW_REVISION_SHIFT); 1646 - isp->media_dev.hw_revision |= isp->pdev->revision < 2 ? 1669 + isp->media_dev.hw_revision |= pdev->revision < 2 ? 1647 1670 ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0; 1648 1671 isp->dfs = &dfs_config_merr; 1649 1672 isp->hpll_freq = HPLL_FREQ_1600MHZ; ··· 1656 1679 ATOMISP_HW_REVISION_ISP2401_LEGACY 1657 1680 #endif 1658 1681 << ATOMISP_HW_REVISION_SHIFT); 1659 - isp->media_dev.hw_revision |= isp->pdev->revision < 2 ? 1682 + isp->media_dev.hw_revision |= pdev->revision < 2 ? 1660 1683 ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0; 1661 1684 1662 1685 isp->dfs = &dfs_config_cht; 1663 - isp->pdev->d3cold_delay = 0; 1686 + pdev->d3cold_delay = 0; 1664 1687 1665 - iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val); 1688 + iosf_mbi_read(BT_MBI_UNIT_CCK, MBI_REG_READ, CCK_FUSE_REG_0, &val); 1666 1689 switch (val & CCK_FUSE_HPLL_FREQ_MASK) { 1667 1690 case 0x00: 1668 1691 isp->hpll_freq = HPLL_FREQ_800MHZ; ··· 1675 1698 break; 1676 1699 default: 1677 1700 isp->hpll_freq = HPLL_FREQ_1600MHZ; 1678 - dev_warn(isp->dev, 1679 - "read HPLL from cck failed. Default to 1600 MHz.\n"); 1701 + dev_warn(&pdev->dev, "read HPLL from cck failed. Default to 1600 MHz.\n"); 1680 1702 } 1681 1703 break; 1682 1704 default: 1683 - dev_err(&dev->dev, "un-supported IUNIT device\n"); 1705 + dev_err(&pdev->dev, "un-supported IUNIT device\n"); 1684 1706 err = -ENODEV; 1685 1707 goto atomisp_dev_alloc_fail; 1686 1708 } 1687 1709 1688 - dev_info(&dev->dev, "ISP HPLL frequency base = %d MHz\n", 1689 - isp->hpll_freq); 1710 + dev_info(&pdev->dev, "ISP HPLL frequency base = %d MHz\n", isp->hpll_freq); 1690 1711 1691 1712 isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY; 1692 1713 ··· 1693 1718 isp->firmware = atomisp_load_firmware(isp); 1694 1719 if (!isp->firmware) { 1695 1720 err = -ENOENT; 1696 - dev_dbg(&dev->dev, "Firmware load failed\n"); 1721 + dev_dbg(&pdev->dev, "Firmware load failed\n"); 1697 1722 goto load_fw_fail; 1698 1723 } 1699 1724 1700 - err = sh_css_check_firmware_version(isp->dev, 1701 - isp->firmware->data); 1725 + err = sh_css_check_firmware_version(isp->dev, isp->firmware->data); 1702 1726 if (err) { 1703 - dev_dbg(&dev->dev, "Firmware version check failed\n"); 1727 + dev_dbg(&pdev->dev, "Firmware version check failed\n"); 1704 1728 goto fw_validation_fail; 1705 1729 } 1706 1730 } else { 1707 - dev_info(&dev->dev, "Firmware load will be deferred\n"); 1731 + dev_info(&pdev->dev, "Firmware load will be deferred\n"); 1708 1732 } 1709 1733 1710 - pci_set_master(dev); 1711 - pci_set_drvdata(dev, isp); 1734 + pci_set_master(pdev); 1712 1735 1713 - err = pci_enable_msi(dev); 1736 + err = pci_enable_msi(pdev); 1714 1737 if (err) { 1715 - dev_err(&dev->dev, "Failed to enable msi (%d)\n", err); 1738 + dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err); 1716 1739 goto enable_msi_fail; 1717 1740 } 1718 1741 1719 - atomisp_msi_irq_init(isp, dev); 1742 + atomisp_msi_irq_init(isp); 1720 1743 1721 1744 cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE); 1722 1745 ··· 1735 1762 * Workaround for imbalance data eye issue which is observed 1736 1763 * on TNG B0. 1737 1764 */ 1738 - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, 1739 - &csi_afe_trim); 1765 + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, &csi_afe_trim); 1740 1766 csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << 1741 1767 MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) | 1742 1768 (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << ··· 1748 1776 MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) | 1749 1777 (MRFLD_PCI_CSI3_HSRXCLKTRIM << 1750 1778 MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT); 1751 - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, 1752 - csi_afe_trim); 1779 + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim); 1753 1780 } 1754 1781 1755 1782 err = atomisp_initialize_modules(isp); 1756 1783 if (err < 0) { 1757 - dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err); 1784 + dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err); 1758 1785 goto initialize_modules_fail; 1759 1786 } 1760 1787 1761 1788 err = atomisp_register_entities(isp); 1762 1789 if (err < 0) { 1763 - dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n", 1764 - err); 1790 + dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err); 1765 1791 goto register_entities_fail; 1766 1792 } 1767 1793 err = atomisp_create_pads_links(isp); ··· 1772 1802 /* save the iunit context only once after all the values are init'ed. */ 1773 1803 atomisp_save_iunit_reg(isp); 1774 1804 1775 - pm_runtime_put_noidle(&dev->dev); 1776 - pm_runtime_allow(&dev->dev); 1805 + pm_runtime_put_noidle(&pdev->dev); 1806 + pm_runtime_allow(&pdev->dev); 1777 1807 1778 1808 hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr); 1779 1809 err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED); 1780 1810 if (err) { 1781 - dev_err(&dev->dev, "Failed to register reserved memory pool.\n"); 1811 + dev_err(&pdev->dev, "Failed to register reserved memory pool.\n"); 1782 1812 goto hmm_pool_fail; 1783 1813 } 1784 1814 1785 1815 /* Init ISP memory management */ 1786 1816 hmm_init(); 1787 1817 1788 - err = devm_request_threaded_irq(&dev->dev, dev->irq, 1818 + err = devm_request_threaded_irq(&pdev->dev, pdev->irq, 1789 1819 atomisp_isr, atomisp_isr_thread, 1790 1820 IRQF_SHARED, "isp_irq", isp); 1791 1821 if (err) { 1792 - dev_err(&dev->dev, "Failed to request irq (%d)\n", err); 1822 + dev_err(&pdev->dev, "Failed to request irq (%d)\n", err); 1793 1823 goto request_irq_fail; 1794 1824 } 1795 1825 ··· 1797 1827 if (!defer_fw_load) { 1798 1828 err = atomisp_css_load_firmware(isp); 1799 1829 if (err) { 1800 - dev_err(&dev->dev, "Failed to init css.\n"); 1830 + dev_err(&pdev->dev, "Failed to init css.\n"); 1801 1831 goto css_init_fail; 1802 1832 } 1803 1833 } else { 1804 - dev_dbg(&dev->dev, "Skip css init.\n"); 1834 + dev_dbg(&pdev->dev, "Skip css init.\n"); 1805 1835 } 1806 1836 /* Clear FW image from memory */ 1807 1837 release_firmware(isp->firmware); 1808 1838 isp->firmware = NULL; 1809 1839 isp->css_env.isp_css_fw.data = NULL; 1810 1840 1811 - atomisp_drvfs_init(&dev->driver->driver, isp); 1841 + atomisp_drvfs_init(isp); 1812 1842 1813 1843 return 0; 1814 1844 1815 1845 css_init_fail: 1816 - devm_free_irq(&dev->dev, dev->irq, isp); 1846 + devm_free_irq(&pdev->dev, pdev->irq, isp); 1817 1847 request_irq_fail: 1818 1848 hmm_cleanup(); 1819 1849 hmm_pool_unregister(HMM_POOL_TYPE_RESERVED); ··· 1826 1856 atomisp_uninitialize_modules(isp); 1827 1857 initialize_modules_fail: 1828 1858 cpu_latency_qos_remove_request(&isp->pm_qos); 1829 - atomisp_msi_irq_uninit(isp, dev); 1830 - pci_disable_msi(dev); 1859 + atomisp_msi_irq_uninit(isp); 1860 + pci_disable_msi(pdev); 1831 1861 enable_msi_fail: 1832 1862 fw_validation_fail: 1833 1863 release_firmware(isp->firmware); ··· 1839 1869 * The following lines have been copied from atomisp suspend path 1840 1870 */ 1841 1871 1842 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); 1872 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); 1843 1873 irq = irq & 1 << INTR_IIR; 1844 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); 1874 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); 1845 1875 1846 - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); 1876 + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); 1847 1877 irq &= ~(1 << INTR_IER); 1848 - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); 1878 + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); 1849 1879 1850 - atomisp_msi_irq_uninit(isp, dev); 1880 + atomisp_msi_irq_uninit(isp); 1851 1881 1852 1882 atomisp_ospm_dphy_down(isp); 1853 1883 1854 1884 /* Address later when we worry about the ...field chips */ 1855 1885 if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power_down(isp)) 1856 - dev_err(&dev->dev, "Failed to switch off ISP\n"); 1886 + dev_err(&pdev->dev, "Failed to switch off ISP\n"); 1857 1887 1858 1888 atomisp_dev_alloc_fail: 1859 - pcim_iounmap_regions(dev, 1 << ATOM_ISP_PCI_BAR); 1889 + pcim_iounmap_regions(pdev, 1 << ATOM_ISP_PCI_BAR); 1860 1890 1861 1891 ioremap_fail: 1862 1892 return err; 1863 1893 } 1864 1894 1865 - static void atomisp_pci_remove(struct pci_dev *dev) 1895 + static void atomisp_pci_remove(struct pci_dev *pdev) 1866 1896 { 1867 - struct atomisp_device *isp = (struct atomisp_device *) 1868 - pci_get_drvdata(dev); 1897 + struct atomisp_device *isp = pci_get_drvdata(pdev); 1869 1898 1870 - dev_info(&dev->dev, "Removing atomisp driver\n"); 1899 + dev_info(&pdev->dev, "Removing atomisp driver\n"); 1871 1900 1872 1901 atomisp_drvfs_exit(); 1873 1902 ··· 1875 1906 ia_css_unload_firmware(); 1876 1907 hmm_cleanup(); 1877 1908 1878 - pm_runtime_forbid(&dev->dev); 1879 - pm_runtime_get_noresume(&dev->dev); 1909 + pm_runtime_forbid(&pdev->dev); 1910 + pm_runtime_get_noresume(&pdev->dev); 1880 1911 cpu_latency_qos_remove_request(&isp->pm_qos); 1881 1912 1882 - atomisp_msi_irq_uninit(isp, dev); 1913 + atomisp_msi_irq_uninit(isp); 1883 1914 atomisp_unregister_entities(isp); 1884 1915 1885 1916 destroy_workqueue(isp->wdt_work_queue);
+14 -14
drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
··· 48 48 return NULL; 49 49 if (!myrefcount.items) { 50 50 ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, 51 - "refcount_find_entry(): Ref count not initialized!\n"); 51 + "%s(): Ref count not initialized!\n", __func__); 52 52 return NULL; 53 53 } 54 54 ··· 73 73 74 74 if (size == 0) { 75 75 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 76 - "ia_css_refcount_init(): Size of 0 for Ref count init!\n"); 76 + "%s(): Size of 0 for Ref count init!\n", __func__); 77 77 return -EINVAL; 78 78 } 79 79 if (myrefcount.items) { 80 80 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 81 - "ia_css_refcount_init(): Ref count is already initialized\n"); 81 + "%s(): Ref count is already initialized\n", __func__); 82 82 return -EINVAL; 83 83 } 84 84 myrefcount.items = ··· 99 99 u32 i; 100 100 101 101 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 102 - "ia_css_refcount_uninit() entry\n"); 102 + "%s() entry\n", __func__); 103 103 for (i = 0; i < myrefcount.size; i++) { 104 104 /* driver verifier tool has issues with &arr[i] 105 105 and prefers arr + i; as these are actually equivalent ··· 120 120 myrefcount.items = NULL; 121 121 myrefcount.size = 0; 122 122 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 123 - "ia_css_refcount_uninit() leave\n"); 123 + "%s() leave\n", __func__); 124 124 } 125 125 126 126 ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr) ··· 133 133 entry = refcount_find_entry(ptr, false); 134 134 135 135 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 136 - "ia_css_refcount_increment(%x) 0x%x\n", id, ptr); 136 + "%s(%x) 0x%x\n", __func__, id, ptr); 137 137 138 138 if (!entry) { 139 139 entry = refcount_find_entry(ptr, true); ··· 145 145 146 146 if (entry->id != id) { 147 147 ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, 148 - "ia_css_refcount_increment(): Ref count IDS do not match!\n"); 148 + "%s(): Ref count IDS do not match!\n", __func__); 149 149 return mmgr_NULL; 150 150 } 151 151 ··· 165 165 struct ia_css_refcount_entry *entry; 166 166 167 167 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 168 - "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr); 168 + "%s(%x) 0x%x\n", __func__, id, ptr); 169 169 170 170 if (ptr == mmgr_NULL) 171 171 return false; ··· 175 175 if (entry) { 176 176 if (entry->id != id) { 177 177 ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, 178 - "ia_css_refcount_decrement(): Ref count IDS do not match!\n"); 178 + "%s(): Ref count IDS do not match!\n", __func__); 179 179 return false; 180 180 } 181 181 if (entry->count > 0) { ··· 225 225 u32 count = 0; 226 226 227 227 assert(clear_func_ptr); 228 - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n", 229 - id); 228 + ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x)\n", 229 + __func__, id); 230 230 231 231 for (i = 0; i < myrefcount.size; i++) { 232 232 /* driver verifier tool has issues with &arr[i] ··· 236 236 entry = myrefcount.items + i; 237 237 if ((entry->data != mmgr_NULL) && (entry->id == id)) { 238 238 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 239 - "ia_css_refcount_clear: %x: 0x%x\n", 239 + "%s: %x: 0x%x\n", __func__, 240 240 id, entry->data); 241 241 if (clear_func_ptr) { 242 242 /* clear using provided function */ 243 243 clear_func_ptr(entry->data); 244 244 } else { 245 245 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 246 - "ia_css_refcount_clear: using hmm_free: no clear_func\n"); 246 + "%s: using hmm_free: no clear_func\n", __func__); 247 247 hmm_free(entry->data); 248 248 } 249 249 ··· 260 260 } 261 261 } 262 262 ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, 263 - "ia_css_refcount_clear(%x): cleared %d\n", id, 263 + "%s(%x): cleared %d\n", __func__, id, 264 264 count); 265 265 } 266 266
+3 -21
drivers/staging/media/atomisp/pci/hive_types.h
··· 52 52 typedef unsigned int hive_uint32; 53 53 typedef unsigned long long hive_uint64; 54 54 55 - /* by default assume 32 bit master port (both data and address) */ 56 - #ifndef HRT_DATA_WIDTH 57 - #define HRT_DATA_WIDTH 32 58 - #endif 59 - #ifndef HRT_ADDRESS_WIDTH 60 - #define HRT_ADDRESS_WIDTH 32 61 - #endif 62 - 55 + #define HRT_DATA_WIDTH 32 56 + #define HRT_ADDRESS_WIDTH 64 63 57 #define HRT_DATA_BYTES (HRT_DATA_WIDTH / 8) 64 58 #define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH / 8) 59 + #define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) 65 60 66 - #if HRT_DATA_WIDTH == 64 67 - typedef hive_uint64 hrt_data; 68 - #elif HRT_DATA_WIDTH == 32 69 61 typedef hive_uint32 hrt_data; 70 - #else 71 - #error data width not supported 72 - #endif 73 - 74 - #if HRT_ADDRESS_WIDTH == 64 75 62 typedef hive_uint64 hrt_address; 76 - #elif HRT_ADDRESS_WIDTH == 32 77 - typedef hive_uint32 hrt_address; 78 - #else 79 - #error adddres width not supported 80 - #endif 81 63 82 64 /* use 64 bit addresses in simulation, where possible */ 83 65 typedef hive_uint64 hive_sim_address;
+5 -5
drivers/staging/media/atomisp/pci/hmm/hmm.c
··· 735 735 736 736 void hmm_show_mem_stat(const char *func, const int line) 737 737 { 738 - trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n", 739 - hmm_mem_stat.tol_cnt, 740 - hmm_mem_stat.usr_size, hmm_mem_stat.res_size, 741 - hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, 742 - hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); 738 + pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n", 739 + hmm_mem_stat.tol_cnt, 740 + hmm_mem_stat.usr_size, hmm_mem_stat.res_size, 741 + hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, 742 + hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); 743 743 } 744 744 745 745 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
-302
drivers/staging/media/atomisp/pci/isp2400_system_global.h
··· 13 13 * more details. 14 14 */ 15 15 16 - #ifndef __SYSTEM_GLOBAL_H_INCLUDED__ 17 - #define __SYSTEM_GLOBAL_H_INCLUDED__ 18 - 19 - #include <hive_isp_css_defs.h> 20 - #include <type_support.h> 21 - 22 - /* 23 - * The longest allowed (uninteruptible) bus transfer, does not 24 - * take stalling into account 25 - */ 26 - #define HIVE_ISP_MAX_BURST_LENGTH 1024 27 - 28 - /* 29 - * Maximum allowed burst length in words for the ISP DMA 30 - */ 31 - #define ISP_DMA_MAX_BURST_LENGTH 128 32 - 33 - /* 34 - * Create a list of HAS and IS properties that defines the system 35 - * 36 - * The configuration assumes the following 37 - * - The system is hetereogeneous; Multiple cells and devices classes 38 - * - The cell and device instances are homogeneous, each device type 39 - * belongs to the same class 40 - * - Device instances supporting a subset of the class capabilities are 41 - * allowed 42 - * 43 - * We could manage different device classes through the enumerated 44 - * lists (C) or the use of classes (C++), but that is presently not 45 - * fully supported 46 - * 47 - * N.B. the 3 input formatters are of 2 different classess 48 - */ 49 - 50 16 #define USE_INPUT_SYSTEM_VERSION_2 51 - 52 - #define HAS_MMU_VERSION_2 53 - #define HAS_DMA_VERSION_2 54 - #define HAS_GDC_VERSION_2 55 - #define HAS_VAMEM_VERSION_2 56 - #define HAS_HMEM_VERSION_1 57 - #define HAS_BAMEM_VERSION_2 58 - #define HAS_IRQ_VERSION_2 59 - #define HAS_IRQ_MAP_VERSION_2 60 - #define HAS_INPUT_FORMATTER_VERSION_2 61 - /* 2401: HAS_INPUT_SYSTEM_VERSION_2401 */ 62 - #define HAS_INPUT_SYSTEM_VERSION_2 63 - #define HAS_BUFFERED_SENSOR 64 - #define HAS_FIFO_MONITORS_VERSION_2 65 - /* #define HAS_GP_REGS_VERSION_2 */ 66 - #define HAS_GP_DEVICE_VERSION_2 67 - #define HAS_GPIO_VERSION_1 68 - #define HAS_TIMED_CTRL_VERSION_1 69 - #define HAS_RX_VERSION_2 70 - 71 - #define DMA_DDR_TO_VAMEM_WORKAROUND 72 - #define DMA_DDR_TO_HMEM_WORKAROUND 73 - 74 - /* 75 - * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply 76 - */ 77 - #define HRT_VADDRESS_WIDTH 32 78 - //#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property*/ 79 - #define HRT_DATA_WIDTH 32 80 - 81 - #define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) 82 - #define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8) 83 - 84 - /* The main bus connecting all devices */ 85 - #define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH 86 - #define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES 87 - 88 - /* per-frame parameter handling support */ 89 - #define SH_CSS_ENABLE_PER_FRAME_PARAMS 90 - 91 - typedef u32 hrt_bus_align_t; 92 - 93 - /* 94 - * Enumerate the devices, device access through the API is by ID, through the DLI by address 95 - * The enumerator terminators are used to size the wiring arrays and as an exception value. 96 - */ 97 - typedef enum { 98 - DDR0_ID = 0, 99 - N_DDR_ID 100 - } ddr_ID_t; 101 - 102 - typedef enum { 103 - ISP0_ID = 0, 104 - N_ISP_ID 105 - } isp_ID_t; 106 - 107 - typedef enum { 108 - SP0_ID = 0, 109 - N_SP_ID 110 - } sp_ID_t; 111 - 112 - typedef enum { 113 - MMU0_ID = 0, 114 - MMU1_ID, 115 - N_MMU_ID 116 - } mmu_ID_t; 117 - 118 - typedef enum { 119 - DMA0_ID = 0, 120 - N_DMA_ID 121 - } dma_ID_t; 122 - 123 - typedef enum { 124 - GDC0_ID = 0, 125 - GDC1_ID, 126 - N_GDC_ID 127 - } gdc_ID_t; 128 - 129 - #define N_GDC_ID_CPP 2 // this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums. 130 - 131 - typedef enum { 132 - VAMEM0_ID = 0, 133 - VAMEM1_ID, 134 - VAMEM2_ID, 135 - N_VAMEM_ID 136 - } vamem_ID_t; 137 - 138 - typedef enum { 139 - BAMEM0_ID = 0, 140 - N_BAMEM_ID 141 - } bamem_ID_t; 142 - 143 - typedef enum { 144 - HMEM0_ID = 0, 145 - N_HMEM_ID 146 - } hmem_ID_t; 147 - 148 - /* 149 - typedef enum { 150 - IRQ0_ID = 0, 151 - N_IRQ_ID 152 - } irq_ID_t; 153 - */ 154 - 155 - typedef enum { 156 - IRQ0_ID = 0, // GP IRQ block 157 - IRQ1_ID, // Input formatter 158 - IRQ2_ID, // input system 159 - IRQ3_ID, // input selector 160 - N_IRQ_ID 161 - } irq_ID_t; 162 - 163 - typedef enum { 164 - FIFO_MONITOR0_ID = 0, 165 - N_FIFO_MONITOR_ID 166 - } fifo_monitor_ID_t; 167 - 168 - /* 169 - * Deprecated: Since all gp_reg instances are different 170 - * and put in the address maps of other devices we cannot 171 - * enumerate them as that assumes the instrances are the 172 - * same. 173 - * 174 - * We define a single GP_DEVICE containing all gp_regs 175 - * w.r.t. a single base address 176 - * 177 - typedef enum { 178 - GP_REGS0_ID = 0, 179 - N_GP_REGS_ID 180 - } gp_regs_ID_t; 181 - */ 182 - typedef enum { 183 - GP_DEVICE0_ID = 0, 184 - N_GP_DEVICE_ID 185 - } gp_device_ID_t; 186 - 187 - typedef enum { 188 - GP_TIMER0_ID = 0, 189 - GP_TIMER1_ID, 190 - GP_TIMER2_ID, 191 - GP_TIMER3_ID, 192 - GP_TIMER4_ID, 193 - GP_TIMER5_ID, 194 - GP_TIMER6_ID, 195 - GP_TIMER7_ID, 196 - N_GP_TIMER_ID 197 - } gp_timer_ID_t; 198 - 199 - typedef enum { 200 - GPIO0_ID = 0, 201 - N_GPIO_ID 202 - } gpio_ID_t; 203 - 204 - typedef enum { 205 - TIMED_CTRL0_ID = 0, 206 - N_TIMED_CTRL_ID 207 - } timed_ctrl_ID_t; 208 - 209 - typedef enum { 210 - INPUT_FORMATTER0_ID = 0, 211 - INPUT_FORMATTER1_ID, 212 - INPUT_FORMATTER2_ID, 213 - INPUT_FORMATTER3_ID, 214 - N_INPUT_FORMATTER_ID 215 - } input_formatter_ID_t; 216 - 217 - /* The IF RST is outside the IF */ 218 - #define INPUT_FORMATTER0_SRST_OFFSET 0x0824 219 - #define INPUT_FORMATTER1_SRST_OFFSET 0x0624 220 - #define INPUT_FORMATTER2_SRST_OFFSET 0x0424 221 - #define INPUT_FORMATTER3_SRST_OFFSET 0x0224 222 - 223 - #define INPUT_FORMATTER0_SRST_MASK 0x0001 224 - #define INPUT_FORMATTER1_SRST_MASK 0x0002 225 - #define INPUT_FORMATTER2_SRST_MASK 0x0004 226 - #define INPUT_FORMATTER3_SRST_MASK 0x0008 227 - 228 - typedef enum { 229 - INPUT_SYSTEM0_ID = 0, 230 - N_INPUT_SYSTEM_ID 231 - } input_system_ID_t; 232 - 233 - typedef enum { 234 - RX0_ID = 0, 235 - N_RX_ID 236 - } rx_ID_t; 237 - 238 - enum mipi_port_id { 239 - MIPI_PORT0_ID = 0, 240 - MIPI_PORT1_ID, 241 - MIPI_PORT2_ID, 242 - N_MIPI_PORT_ID 243 - }; 244 - 245 - #define N_RX_CHANNEL_ID 4 246 - 247 - /* Generic port enumeration with an internal port type ID */ 248 - typedef enum { 249 - CSI_PORT0_ID = 0, 250 - CSI_PORT1_ID, 251 - CSI_PORT2_ID, 252 - TPG_PORT0_ID, 253 - PRBS_PORT0_ID, 254 - FIFO_PORT0_ID, 255 - MEMORY_PORT0_ID, 256 - N_INPUT_PORT_ID 257 - } input_port_ID_t; 258 - 259 - typedef enum { 260 - CAPTURE_UNIT0_ID = 0, 261 - CAPTURE_UNIT1_ID, 262 - CAPTURE_UNIT2_ID, 263 - ACQUISITION_UNIT0_ID, 264 - DMA_UNIT0_ID, 265 - CTRL_UNIT0_ID, 266 - GPREGS_UNIT0_ID, 267 - FIFO_UNIT0_ID, 268 - IRQ_UNIT0_ID, 269 - N_SUB_SYSTEM_ID 270 - } sub_system_ID_t; 271 - 272 - #define N_CAPTURE_UNIT_ID 3 273 - #define N_ACQUISITION_UNIT_ID 1 274 - #define N_CTRL_UNIT_ID 1 275 - 276 - enum ia_css_isp_memories { 277 - IA_CSS_ISP_PMEM0 = 0, 278 - IA_CSS_ISP_DMEM0, 279 - IA_CSS_ISP_VMEM0, 280 - IA_CSS_ISP_VAMEM0, 281 - IA_CSS_ISP_VAMEM1, 282 - IA_CSS_ISP_VAMEM2, 283 - IA_CSS_ISP_HMEM0, 284 - IA_CSS_SP_DMEM0, 285 - IA_CSS_DDR, 286 - N_IA_CSS_MEMORIES 287 - }; 288 - 289 - #define IA_CSS_NUM_MEMORIES 9 290 - /* For driver compatibility */ 291 - #define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES 292 - #define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES 293 - 294 - #if 0 295 - typedef enum { 296 - dev_chn, /* device channels, external resource */ 297 - ext_mem, /* external memories */ 298 - int_mem, /* internal memories */ 299 - int_chn /* internal channels, user defined */ 300 - } resource_type_t; 301 - 302 - /* if this enum is extended with other memory resources, pls also extend the function resource_to_memptr() */ 303 - typedef enum { 304 - vied_nci_dev_chn_dma_ext0, 305 - int_mem_vmem0, 306 - int_mem_dmem0 307 - } resource_id_t; 308 - 309 - /* enum listing the different memories within a program group. 310 - This enum is used in the mem_ptr_t type */ 311 - typedef enum { 312 - buf_mem_invalid = 0, 313 - buf_mem_vmem_prog0, 314 - buf_mem_dmem_prog0 315 - } buf_mem_t; 316 - 317 - #endif 318 - #endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
-321
drivers/staging/media/atomisp/pci/isp2400_system_local.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Support for Intel Camera Imaging ISP subsystem. 4 - * Copyright (c) 2010-2015, Intel Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify it 7 - * under the terms and conditions of the GNU General Public License, 8 - * version 2, as published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope it will be useful, but WITHOUT 11 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 - * more details. 14 - */ 15 - 16 - #ifndef __SYSTEM_LOCAL_H_INCLUDED__ 17 - #define __SYSTEM_LOCAL_H_INCLUDED__ 18 - 19 - #ifdef HRT_ISP_CSS_CUSTOM_HOST 20 - #ifndef HRT_USE_VIR_ADDRS 21 - #define HRT_USE_VIR_ADDRS 22 - #endif 23 - #endif 24 - 25 - #include "system_global.h" 26 - 27 - /* HRT assumes 32 by default (see Linux/include/hive_types.h), overrule it in case it is different */ 28 - #undef HRT_ADDRESS_WIDTH 29 - #define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */ 30 - 31 - /* This interface is deprecated */ 32 - #include "hive_types.h" 33 - 34 - /* 35 - * Cell specific address maps 36 - */ 37 - #if HRT_ADDRESS_WIDTH == 64 38 - 39 - #define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ 40 - 41 - /* DDR */ 42 - static const hrt_address DDR_BASE[N_DDR_ID] = { 43 - (hrt_address)0x0000000120000000ULL 44 - }; 45 - 46 - /* ISP */ 47 - static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { 48 - (hrt_address)0x0000000000020000ULL 49 - }; 50 - 51 - static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { 52 - (hrt_address)0x0000000000200000ULL 53 - }; 54 - 55 - static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { 56 - (hrt_address)0x0000000000100000ULL 57 - }; 58 - 59 - static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { 60 - (hrt_address)0x00000000001C0000ULL, 61 - (hrt_address)0x00000000001D0000ULL, 62 - (hrt_address)0x00000000001E0000ULL 63 - }; 64 - 65 - static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { 66 - (hrt_address)0x00000000001F0000ULL 67 - }; 68 - 69 - /* SP */ 70 - static const hrt_address SP_CTRL_BASE[N_SP_ID] = { 71 - (hrt_address)0x0000000000010000ULL 72 - }; 73 - 74 - static const hrt_address SP_DMEM_BASE[N_SP_ID] = { 75 - (hrt_address)0x0000000000300000ULL 76 - }; 77 - 78 - static const hrt_address SP_PMEM_BASE[N_SP_ID] = { 79 - (hrt_address)0x00000000000B0000ULL 80 - }; 81 - 82 - /* MMU */ 83 - /* 84 - * MMU0_ID: The data MMU 85 - * MMU1_ID: The icache MMU 86 - */ 87 - static const hrt_address MMU_BASE[N_MMU_ID] = { 88 - (hrt_address)0x0000000000070000ULL, 89 - (hrt_address)0x00000000000A0000ULL 90 - }; 91 - 92 - /* DMA */ 93 - static const hrt_address DMA_BASE[N_DMA_ID] = { 94 - (hrt_address)0x0000000000040000ULL 95 - }; 96 - 97 - /* IRQ */ 98 - static const hrt_address IRQ_BASE[N_IRQ_ID] = { 99 - (hrt_address)0x0000000000000500ULL, 100 - (hrt_address)0x0000000000030A00ULL, 101 - (hrt_address)0x000000000008C000ULL, 102 - (hrt_address)0x0000000000090200ULL 103 - }; 104 - 105 - /* 106 - (hrt_address)0x0000000000000500ULL}; 107 - */ 108 - 109 - /* GDC */ 110 - static const hrt_address GDC_BASE[N_GDC_ID] = { 111 - (hrt_address)0x0000000000050000ULL, 112 - (hrt_address)0x0000000000060000ULL 113 - }; 114 - 115 - /* FIFO_MONITOR (not a subset of GP_DEVICE) */ 116 - static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { 117 - (hrt_address)0x0000000000000000ULL 118 - }; 119 - 120 - /* 121 - static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { 122 - (hrt_address)0x0000000000000000ULL}; 123 - 124 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 125 - (hrt_address)0x0000000000090000ULL}; 126 - */ 127 - 128 - /* GP_DEVICE (single base for all separate GP_REG instances) */ 129 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 130 - (hrt_address)0x0000000000000000ULL 131 - }; 132 - 133 - /*GP TIMER , all timer registers are inter-twined, 134 - * so, having multiple base addresses for 135 - * different timers does not help*/ 136 - static const hrt_address GP_TIMER_BASE = 137 - (hrt_address)0x0000000000000600ULL; 138 - /* GPIO */ 139 - static const hrt_address GPIO_BASE[N_GPIO_ID] = { 140 - (hrt_address)0x0000000000000400ULL 141 - }; 142 - 143 - /* TIMED_CTRL */ 144 - static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { 145 - (hrt_address)0x0000000000000100ULL 146 - }; 147 - 148 - /* INPUT_FORMATTER */ 149 - static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { 150 - (hrt_address)0x0000000000030000ULL, 151 - (hrt_address)0x0000000000030200ULL, 152 - (hrt_address)0x0000000000030400ULL, 153 - (hrt_address)0x0000000000030600ULL 154 - }; /* memcpy() */ 155 - 156 - /* INPUT_SYSTEM */ 157 - static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { 158 - (hrt_address)0x0000000000080000ULL 159 - }; 160 - 161 - /* (hrt_address)0x0000000000081000ULL, */ /* capture A */ 162 - /* (hrt_address)0x0000000000082000ULL, */ /* capture B */ 163 - /* (hrt_address)0x0000000000083000ULL, */ /* capture C */ 164 - /* (hrt_address)0x0000000000084000ULL, */ /* Acquisition */ 165 - /* (hrt_address)0x0000000000085000ULL, */ /* DMA */ 166 - /* (hrt_address)0x0000000000089000ULL, */ /* ctrl */ 167 - /* (hrt_address)0x000000000008A000ULL, */ /* GP regs */ 168 - /* (hrt_address)0x000000000008B000ULL, */ /* FIFO */ 169 - /* (hrt_address)0x000000000008C000ULL, */ /* IRQ */ 170 - 171 - /* RX, the MIPI lane control regs start at offset 0 */ 172 - static const hrt_address RX_BASE[N_RX_ID] = { 173 - (hrt_address)0x0000000000080100ULL 174 - }; 175 - 176 - #elif HRT_ADDRESS_WIDTH == 32 177 - 178 - #define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */ 179 - 180 - /* DDR : Attention, this value not defined in 32-bit */ 181 - static const hrt_address DDR_BASE[N_DDR_ID] = { 182 - (hrt_address)0x00000000UL 183 - }; 184 - 185 - /* ISP */ 186 - static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { 187 - (hrt_address)0x00020000UL 188 - }; 189 - 190 - static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { 191 - (hrt_address)0x00200000UL 192 - }; 193 - 194 - static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { 195 - (hrt_address)0x100000UL 196 - }; 197 - 198 - static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { 199 - (hrt_address)0xffffffffUL, 200 - (hrt_address)0xffffffffUL, 201 - (hrt_address)0xffffffffUL 202 - }; 203 - 204 - static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { 205 - (hrt_address)0xffffffffUL 206 - }; 207 - 208 - /* SP */ 209 - static const hrt_address SP_CTRL_BASE[N_SP_ID] = { 210 - (hrt_address)0x00010000UL 211 - }; 212 - 213 - static const hrt_address SP_DMEM_BASE[N_SP_ID] = { 214 - (hrt_address)0x00300000UL 215 - }; 216 - 217 - static const hrt_address SP_PMEM_BASE[N_SP_ID] = { 218 - (hrt_address)0x000B0000UL 219 - }; 220 - 221 - /* MMU */ 222 - /* 223 - * MMU0_ID: The data MMU 224 - * MMU1_ID: The icache MMU 225 - */ 226 - static const hrt_address MMU_BASE[N_MMU_ID] = { 227 - (hrt_address)0x00070000UL, 228 - (hrt_address)0x000A0000UL 229 - }; 230 - 231 - /* DMA */ 232 - static const hrt_address DMA_BASE[N_DMA_ID] = { 233 - (hrt_address)0x00040000UL 234 - }; 235 - 236 - /* IRQ */ 237 - static const hrt_address IRQ_BASE[N_IRQ_ID] = { 238 - (hrt_address)0x00000500UL, 239 - (hrt_address)0x00030A00UL, 240 - (hrt_address)0x0008C000UL, 241 - (hrt_address)0x00090200UL 242 - }; 243 - 244 - /* 245 - (hrt_address)0x00000500UL}; 246 - */ 247 - 248 - /* GDC */ 249 - static const hrt_address GDC_BASE[N_GDC_ID] = { 250 - (hrt_address)0x00050000UL, 251 - (hrt_address)0x00060000UL 252 - }; 253 - 254 - /* FIFO_MONITOR (not a subset of GP_DEVICE) */ 255 - static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { 256 - (hrt_address)0x00000000UL 257 - }; 258 - 259 - /* 260 - static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { 261 - (hrt_address)0x00000000UL}; 262 - 263 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 264 - (hrt_address)0x00090000UL}; 265 - */ 266 - 267 - /* GP_DEVICE (single base for all separate GP_REG instances) */ 268 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 269 - (hrt_address)0x00000000UL 270 - }; 271 - 272 - /*GP TIMER , all timer registers are inter-twined, 273 - * so, having multiple base addresses for 274 - * different timers does not help*/ 275 - static const hrt_address GP_TIMER_BASE = 276 - (hrt_address)0x00000600UL; 277 - 278 - /* GPIO */ 279 - static const hrt_address GPIO_BASE[N_GPIO_ID] = { 280 - (hrt_address)0x00000400UL 281 - }; 282 - 283 - /* TIMED_CTRL */ 284 - static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { 285 - (hrt_address)0x00000100UL 286 - }; 287 - 288 - /* INPUT_FORMATTER */ 289 - static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { 290 - (hrt_address)0x00030000UL, 291 - (hrt_address)0x00030200UL, 292 - (hrt_address)0x00030400UL 293 - }; 294 - 295 - /* (hrt_address)0x00030600UL, */ /* memcpy() */ 296 - 297 - /* INPUT_SYSTEM */ 298 - static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { 299 - (hrt_address)0x00080000UL 300 - }; 301 - 302 - /* (hrt_address)0x00081000UL, */ /* capture A */ 303 - /* (hrt_address)0x00082000UL, */ /* capture B */ 304 - /* (hrt_address)0x00083000UL, */ /* capture C */ 305 - /* (hrt_address)0x00084000UL, */ /* Acquisition */ 306 - /* (hrt_address)0x00085000UL, */ /* DMA */ 307 - /* (hrt_address)0x00089000UL, */ /* ctrl */ 308 - /* (hrt_address)0x0008A000UL, */ /* GP regs */ 309 - /* (hrt_address)0x0008B000UL, */ /* FIFO */ 310 - /* (hrt_address)0x0008C000UL, */ /* IRQ */ 311 - 312 - /* RX, the MIPI lane control regs start at offset 0 */ 313 - static const hrt_address RX_BASE[N_RX_ID] = { 314 - (hrt_address)0x00080100UL 315 - }; 316 - 317 - #else 318 - #error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}" 319 - #endif 320 - 321 - #endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
+2 -410
drivers/staging/media/atomisp/pci/isp2401_system_global.h
··· 13 13 * more details. 14 14 */ 15 15 16 - #ifndef __SYSTEM_GLOBAL_H_INCLUDED__ 17 - #define __SYSTEM_GLOBAL_H_INCLUDED__ 18 - 19 - #include <hive_isp_css_defs.h> 20 - #include <type_support.h> 21 - 22 - /* 23 - * The longest allowed (uninteruptible) bus transfer, does not 24 - * take stalling into account 25 - */ 26 - #define HIVE_ISP_MAX_BURST_LENGTH 1024 27 - 28 - /* 29 - * Maximum allowed burst length in words for the ISP DMA 30 - * This value is set to 2 to prevent the ISP DMA from blocking 31 - * the bus for too long; as the input system can only buffer 32 - * 2 lines on Moorefield and Cherrytrail, the input system buffers 33 - * may overflow if blocked for too long (BZ 2726). 34 - */ 35 - #define ISP_DMA_MAX_BURST_LENGTH 2 36 - 37 - /* 38 - * Create a list of HAS and IS properties that defines the system 39 - * 40 - * The configuration assumes the following 41 - * - The system is hetereogeneous; Multiple cells and devices classes 42 - * - The cell and device instances are homogeneous, each device type 43 - * belongs to the same class 44 - * - Device instances supporting a subset of the class capabilities are 45 - * allowed 46 - * 47 - * We could manage different device classes through the enumerated 48 - * lists (C) or the use of classes (C++), but that is presently not 49 - * fully supported 50 - * 51 - * N.B. the 3 input formatters are of 2 different classess 52 - */ 53 - 54 - #define USE_INPUT_SYSTEM_VERSION_2401 55 - 56 - #define HAS_MMU_VERSION_2 57 - #define HAS_DMA_VERSION_2 58 - #define HAS_GDC_VERSION_2 59 - #define HAS_VAMEM_VERSION_2 60 - #define HAS_HMEM_VERSION_1 61 - #define HAS_BAMEM_VERSION_2 62 - #define HAS_IRQ_VERSION_2 63 - #define HAS_IRQ_MAP_VERSION_2 64 - #define HAS_INPUT_FORMATTER_VERSION_2 65 - /* 2401: HAS_INPUT_SYSTEM_VERSION_3 */ 66 - /* 2400: HAS_INPUT_SYSTEM_VERSION_2 */ 67 - #define HAS_INPUT_SYSTEM_VERSION_2 68 - #define HAS_INPUT_SYSTEM_VERSION_2401 69 - #define HAS_BUFFERED_SENSOR 70 - #define HAS_FIFO_MONITORS_VERSION_2 71 - /* #define HAS_GP_REGS_VERSION_2 */ 72 - #define HAS_GP_DEVICE_VERSION_2 73 - #define HAS_GPIO_VERSION_1 74 - #define HAS_TIMED_CTRL_VERSION_1 75 - #define HAS_RX_VERSION_2 76 16 #define HAS_NO_INPUT_FORMATTER 77 - /*#define HAS_NO_PACKED_RAW_PIXELS*/ 78 - /*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/ 79 - 80 - #define DMA_DDR_TO_VAMEM_WORKAROUND 81 - #define DMA_DDR_TO_HMEM_WORKAROUND 82 - 83 - /* 84 - * Semi global. "HRT" is accessible from SP, but 85 - * the HRT types do not fully apply 86 - */ 87 - #define HRT_VADDRESS_WIDTH 32 88 - /* Surprise, this is a local property*/ 89 - /*#define HRT_ADDRESS_WIDTH 64 */ 90 - #define HRT_DATA_WIDTH 32 91 - 92 - #define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) 93 - #define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8) 94 - 95 - /* The main bus connecting all devices */ 96 - #define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH 97 - #define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES 98 - 17 + #define USE_INPUT_SYSTEM_VERSION_2401 18 + #define HAS_INPUT_SYSTEM_VERSION_2401 99 19 #define CSI2P_DISABLE_ISYS2401_ONLINE_MODE 100 - 101 - /* per-frame parameter handling support */ 102 - #define SH_CSS_ENABLE_PER_FRAME_PARAMS 103 - 104 - typedef u32 hrt_bus_align_t; 105 - 106 - /* 107 - * Enumerate the devices, device access through the API is by ID, 108 - * through the DLI by address. The enumerator terminators are used 109 - * to size the wiring arrays and as an exception value. 110 - */ 111 - typedef enum { 112 - DDR0_ID = 0, 113 - N_DDR_ID 114 - } ddr_ID_t; 115 - 116 - typedef enum { 117 - ISP0_ID = 0, 118 - N_ISP_ID 119 - } isp_ID_t; 120 - 121 - typedef enum { 122 - SP0_ID = 0, 123 - N_SP_ID 124 - } sp_ID_t; 125 - 126 - typedef enum { 127 - MMU0_ID = 0, 128 - MMU1_ID, 129 - N_MMU_ID 130 - } mmu_ID_t; 131 - 132 - typedef enum { 133 - DMA0_ID = 0, 134 - N_DMA_ID 135 - } dma_ID_t; 136 - 137 - typedef enum { 138 - GDC0_ID = 0, 139 - GDC1_ID, 140 - N_GDC_ID 141 - } gdc_ID_t; 142 - 143 - /* this extra define is needed because we want to use it also 144 - in the preprocessor, and that doesn't work with enums. 145 - */ 146 - #define N_GDC_ID_CPP 2 147 - 148 - typedef enum { 149 - VAMEM0_ID = 0, 150 - VAMEM1_ID, 151 - VAMEM2_ID, 152 - N_VAMEM_ID 153 - } vamem_ID_t; 154 - 155 - typedef enum { 156 - BAMEM0_ID = 0, 157 - N_BAMEM_ID 158 - } bamem_ID_t; 159 - 160 - typedef enum { 161 - HMEM0_ID = 0, 162 - N_HMEM_ID 163 - } hmem_ID_t; 164 - 165 - typedef enum { 166 - ISYS_IRQ0_ID = 0, /* port a */ 167 - ISYS_IRQ1_ID, /* port b */ 168 - ISYS_IRQ2_ID, /* port c */ 169 - N_ISYS_IRQ_ID 170 - } isys_irq_ID_t; 171 - 172 - typedef enum { 173 - IRQ0_ID = 0, /* GP IRQ block */ 174 - IRQ1_ID, /* Input formatter */ 175 - IRQ2_ID, /* input system */ 176 - IRQ3_ID, /* input selector */ 177 - N_IRQ_ID 178 - } irq_ID_t; 179 - 180 - typedef enum { 181 - FIFO_MONITOR0_ID = 0, 182 - N_FIFO_MONITOR_ID 183 - } fifo_monitor_ID_t; 184 - 185 - /* 186 - * Deprecated: Since all gp_reg instances are different 187 - * and put in the address maps of other devices we cannot 188 - * enumerate them as that assumes the instrances are the 189 - * same. 190 - * 191 - * We define a single GP_DEVICE containing all gp_regs 192 - * w.r.t. a single base address 193 - * 194 - typedef enum { 195 - GP_REGS0_ID = 0, 196 - N_GP_REGS_ID 197 - } gp_regs_ID_t; 198 - */ 199 - typedef enum { 200 - GP_DEVICE0_ID = 0, 201 - N_GP_DEVICE_ID 202 - } gp_device_ID_t; 203 - 204 - typedef enum { 205 - GP_TIMER0_ID = 0, 206 - GP_TIMER1_ID, 207 - GP_TIMER2_ID, 208 - GP_TIMER3_ID, 209 - GP_TIMER4_ID, 210 - GP_TIMER5_ID, 211 - GP_TIMER6_ID, 212 - GP_TIMER7_ID, 213 - N_GP_TIMER_ID 214 - } gp_timer_ID_t; 215 - 216 - typedef enum { 217 - GPIO0_ID = 0, 218 - N_GPIO_ID 219 - } gpio_ID_t; 220 - 221 - typedef enum { 222 - TIMED_CTRL0_ID = 0, 223 - N_TIMED_CTRL_ID 224 - } timed_ctrl_ID_t; 225 - 226 - typedef enum { 227 - INPUT_FORMATTER0_ID = 0, 228 - INPUT_FORMATTER1_ID, 229 - INPUT_FORMATTER2_ID, 230 - INPUT_FORMATTER3_ID, 231 - N_INPUT_FORMATTER_ID 232 - } input_formatter_ID_t; 233 - 234 - /* The IF RST is outside the IF */ 235 - #define INPUT_FORMATTER0_SRST_OFFSET 0x0824 236 - #define INPUT_FORMATTER1_SRST_OFFSET 0x0624 237 - #define INPUT_FORMATTER2_SRST_OFFSET 0x0424 238 - #define INPUT_FORMATTER3_SRST_OFFSET 0x0224 239 - 240 - #define INPUT_FORMATTER0_SRST_MASK 0x0001 241 - #define INPUT_FORMATTER1_SRST_MASK 0x0002 242 - #define INPUT_FORMATTER2_SRST_MASK 0x0004 243 - #define INPUT_FORMATTER3_SRST_MASK 0x0008 244 - 245 - typedef enum { 246 - INPUT_SYSTEM0_ID = 0, 247 - N_INPUT_SYSTEM_ID 248 - } input_system_ID_t; 249 - 250 - typedef enum { 251 - RX0_ID = 0, 252 - N_RX_ID 253 - } rx_ID_t; 254 - 255 - enum mipi_port_id { 256 - MIPI_PORT0_ID = 0, 257 - MIPI_PORT1_ID, 258 - MIPI_PORT2_ID, 259 - N_MIPI_PORT_ID 260 - }; 261 - 262 - #define N_RX_CHANNEL_ID 4 263 - 264 - /* Generic port enumeration with an internal port type ID */ 265 - typedef enum { 266 - CSI_PORT0_ID = 0, 267 - CSI_PORT1_ID, 268 - CSI_PORT2_ID, 269 - TPG_PORT0_ID, 270 - PRBS_PORT0_ID, 271 - FIFO_PORT0_ID, 272 - MEMORY_PORT0_ID, 273 - N_INPUT_PORT_ID 274 - } input_port_ID_t; 275 - 276 - typedef enum { 277 - CAPTURE_UNIT0_ID = 0, 278 - CAPTURE_UNIT1_ID, 279 - CAPTURE_UNIT2_ID, 280 - ACQUISITION_UNIT0_ID, 281 - DMA_UNIT0_ID, 282 - CTRL_UNIT0_ID, 283 - GPREGS_UNIT0_ID, 284 - FIFO_UNIT0_ID, 285 - IRQ_UNIT0_ID, 286 - N_SUB_SYSTEM_ID 287 - } sub_system_ID_t; 288 - 289 - #define N_CAPTURE_UNIT_ID 3 290 - #define N_ACQUISITION_UNIT_ID 1 291 - #define N_CTRL_UNIT_ID 1 292 - 293 - /* 294 - * Input-buffer Controller. 295 - */ 296 - typedef enum { 297 - IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */ 298 - IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */ 299 - IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */ 300 - N_IBUF_CTRL_ID 301 - } ibuf_ctrl_ID_t; 302 - /* end of Input-buffer Controller */ 303 - 304 - /* 305 - * Stream2MMIO. 306 - */ 307 - typedef enum { 308 - STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */ 309 - STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */ 310 - STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */ 311 - N_STREAM2MMIO_ID 312 - } stream2mmio_ID_t; 313 - 314 - typedef enum { 315 - /* 316 - * Stream2MMIO 0 has 8 SIDs that are indexed by 317 - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID]. 318 - * 319 - * Stream2MMIO 1 has 4 SIDs that are indexed by 320 - * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID]. 321 - * 322 - * Stream2MMIO 2 has 4 SIDs that are indexed by 323 - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID]. 324 - */ 325 - STREAM2MMIO_SID0_ID = 0, 326 - STREAM2MMIO_SID1_ID, 327 - STREAM2MMIO_SID2_ID, 328 - STREAM2MMIO_SID3_ID, 329 - STREAM2MMIO_SID4_ID, 330 - STREAM2MMIO_SID5_ID, 331 - STREAM2MMIO_SID6_ID, 332 - STREAM2MMIO_SID7_ID, 333 - N_STREAM2MMIO_SID_ID 334 - } stream2mmio_sid_ID_t; 335 - /* end of Stream2MMIO */ 336 - 337 - /** 338 - * Input System 2401: CSI-MIPI recevier. 339 - */ 340 - typedef enum { 341 - CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */ 342 - CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */ 343 - CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */ 344 - N_CSI_RX_BACKEND_ID 345 - } csi_rx_backend_ID_t; 346 - 347 - typedef enum { 348 - CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */ 349 - CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */ 350 - CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */ 351 - #define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1) 352 - } csi_rx_frontend_ID_t; 353 - 354 - typedef enum { 355 - CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */ 356 - CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */ 357 - CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */ 358 - CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */ 359 - N_CSI_RX_DLANE_ID 360 - } csi_rx_fe_dlane_ID_t; 361 - /* end of CSI-MIPI receiver */ 362 - 363 - typedef enum { 364 - ISYS2401_DMA0_ID = 0, 365 - N_ISYS2401_DMA_ID 366 - } isys2401_dma_ID_t; 367 - 368 - /** 369 - * Pixel-generator. ("system_global.h") 370 - */ 371 - typedef enum { 372 - PIXELGEN0_ID = 0, 373 - PIXELGEN1_ID, 374 - PIXELGEN2_ID, 375 - N_PIXELGEN_ID 376 - } pixelgen_ID_t; 377 - /* end of pixel-generator. ("system_global.h") */ 378 - 379 - typedef enum { 380 - INPUT_SYSTEM_CSI_PORT0_ID = 0, 381 - INPUT_SYSTEM_CSI_PORT1_ID, 382 - INPUT_SYSTEM_CSI_PORT2_ID, 383 - 384 - INPUT_SYSTEM_PIXELGEN_PORT0_ID, 385 - INPUT_SYSTEM_PIXELGEN_PORT1_ID, 386 - INPUT_SYSTEM_PIXELGEN_PORT2_ID, 387 - 388 - N_INPUT_SYSTEM_INPUT_PORT_ID 389 - } input_system_input_port_ID_t; 390 - 391 - #define N_INPUT_SYSTEM_CSI_PORT 3 392 - 393 - typedef enum { 394 - ISYS2401_DMA_CHANNEL_0 = 0, 395 - ISYS2401_DMA_CHANNEL_1, 396 - ISYS2401_DMA_CHANNEL_2, 397 - ISYS2401_DMA_CHANNEL_3, 398 - ISYS2401_DMA_CHANNEL_4, 399 - ISYS2401_DMA_CHANNEL_5, 400 - ISYS2401_DMA_CHANNEL_6, 401 - ISYS2401_DMA_CHANNEL_7, 402 - ISYS2401_DMA_CHANNEL_8, 403 - ISYS2401_DMA_CHANNEL_9, 404 - ISYS2401_DMA_CHANNEL_10, 405 - ISYS2401_DMA_CHANNEL_11, 406 - N_ISYS2401_DMA_CHANNEL 407 - } isys2401_dma_channel; 408 - 409 - enum ia_css_isp_memories { 410 - IA_CSS_ISP_PMEM0 = 0, 411 - IA_CSS_ISP_DMEM0, 412 - IA_CSS_ISP_VMEM0, 413 - IA_CSS_ISP_VAMEM0, 414 - IA_CSS_ISP_VAMEM1, 415 - IA_CSS_ISP_VAMEM2, 416 - IA_CSS_ISP_HMEM0, 417 - IA_CSS_SP_DMEM0, 418 - IA_CSS_DDR, 419 - N_IA_CSS_MEMORIES 420 - }; 421 - 422 - #define IA_CSS_NUM_MEMORIES 9 423 - /* For driver compatibility */ 424 - #define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES 425 - #define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES 426 - 427 - #endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
-402
drivers/staging/media/atomisp/pci/isp2401_system_local.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Support for Intel Camera Imaging ISP subsystem. 4 - * Copyright (c) 2015, Intel Corporation. 5 - * 6 - * This program is free software; you can redistribute it and/or modify it 7 - * under the terms and conditions of the GNU General Public License, 8 - * version 2, as published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope it will be useful, but WITHOUT 11 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 - * more details. 14 - */ 15 - 16 - #ifndef __SYSTEM_LOCAL_H_INCLUDED__ 17 - #define __SYSTEM_LOCAL_H_INCLUDED__ 18 - 19 - #ifdef HRT_ISP_CSS_CUSTOM_HOST 20 - #ifndef HRT_USE_VIR_ADDRS 21 - #define HRT_USE_VIR_ADDRS 22 - #endif 23 - #endif 24 - 25 - #include "system_global.h" 26 - 27 - #define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */ 28 - 29 - /* This interface is deprecated */ 30 - #include "hive_types.h" 31 - 32 - /* 33 - * Cell specific address maps 34 - */ 35 - #if HRT_ADDRESS_WIDTH == 64 36 - 37 - #define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ 38 - 39 - /* DDR */ 40 - static const hrt_address DDR_BASE[N_DDR_ID] = { 41 - 0x0000000120000000ULL 42 - }; 43 - 44 - /* ISP */ 45 - static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { 46 - 0x0000000000020000ULL 47 - }; 48 - 49 - static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { 50 - 0x0000000000200000ULL 51 - }; 52 - 53 - static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { 54 - 0x0000000000100000ULL 55 - }; 56 - 57 - static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { 58 - 0x00000000001C0000ULL, 59 - 0x00000000001D0000ULL, 60 - 0x00000000001E0000ULL 61 - }; 62 - 63 - static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { 64 - 0x00000000001F0000ULL 65 - }; 66 - 67 - /* SP */ 68 - static const hrt_address SP_CTRL_BASE[N_SP_ID] = { 69 - 0x0000000000010000ULL 70 - }; 71 - 72 - static const hrt_address SP_DMEM_BASE[N_SP_ID] = { 73 - 0x0000000000300000ULL 74 - }; 75 - 76 - /* MMU */ 77 - /* 78 - * MMU0_ID: The data MMU 79 - * MMU1_ID: The icache MMU 80 - */ 81 - static const hrt_address MMU_BASE[N_MMU_ID] = { 82 - 0x0000000000070000ULL, 83 - 0x00000000000A0000ULL 84 - }; 85 - 86 - /* DMA */ 87 - static const hrt_address DMA_BASE[N_DMA_ID] = { 88 - 0x0000000000040000ULL 89 - }; 90 - 91 - static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { 92 - 0x00000000000CA000ULL 93 - }; 94 - 95 - /* IRQ */ 96 - static const hrt_address IRQ_BASE[N_IRQ_ID] = { 97 - 0x0000000000000500ULL, 98 - 0x0000000000030A00ULL, 99 - 0x000000000008C000ULL, 100 - 0x0000000000090200ULL 101 - }; 102 - 103 - /* 104 - 0x0000000000000500ULL}; 105 - */ 106 - 107 - /* GDC */ 108 - static const hrt_address GDC_BASE[N_GDC_ID] = { 109 - 0x0000000000050000ULL, 110 - 0x0000000000060000ULL 111 - }; 112 - 113 - /* FIFO_MONITOR (not a subset of GP_DEVICE) */ 114 - static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { 115 - 0x0000000000000000ULL 116 - }; 117 - 118 - /* 119 - static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { 120 - 0x0000000000000000ULL}; 121 - 122 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 123 - 0x0000000000090000ULL}; 124 - */ 125 - 126 - /* GP_DEVICE (single base for all separate GP_REG instances) */ 127 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 128 - 0x0000000000000000ULL 129 - }; 130 - 131 - /*GP TIMER , all timer registers are inter-twined, 132 - * so, having multiple base addresses for 133 - * different timers does not help*/ 134 - static const hrt_address GP_TIMER_BASE = 135 - (hrt_address)0x0000000000000600ULL; 136 - 137 - /* GPIO */ 138 - static const hrt_address GPIO_BASE[N_GPIO_ID] = { 139 - 0x0000000000000400ULL 140 - }; 141 - 142 - /* TIMED_CTRL */ 143 - static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { 144 - 0x0000000000000100ULL 145 - }; 146 - 147 - /* INPUT_FORMATTER */ 148 - static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { 149 - 0x0000000000030000ULL, 150 - 0x0000000000030200ULL, 151 - 0x0000000000030400ULL, 152 - 0x0000000000030600ULL 153 - }; /* memcpy() */ 154 - 155 - /* INPUT_SYSTEM */ 156 - static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { 157 - 0x0000000000080000ULL 158 - }; 159 - 160 - /* 0x0000000000081000ULL, */ /* capture A */ 161 - /* 0x0000000000082000ULL, */ /* capture B */ 162 - /* 0x0000000000083000ULL, */ /* capture C */ 163 - /* 0x0000000000084000ULL, */ /* Acquisition */ 164 - /* 0x0000000000085000ULL, */ /* DMA */ 165 - /* 0x0000000000089000ULL, */ /* ctrl */ 166 - /* 0x000000000008A000ULL, */ /* GP regs */ 167 - /* 0x000000000008B000ULL, */ /* FIFO */ 168 - /* 0x000000000008C000ULL, */ /* IRQ */ 169 - 170 - /* RX, the MIPI lane control regs start at offset 0 */ 171 - static const hrt_address RX_BASE[N_RX_ID] = { 172 - 0x0000000000080100ULL 173 - }; 174 - 175 - /* IBUF_CTRL, part of the Input System 2401 */ 176 - static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { 177 - 0x00000000000C1800ULL, /* ibuf controller A */ 178 - 0x00000000000C3800ULL, /* ibuf controller B */ 179 - 0x00000000000C5800ULL /* ibuf controller C */ 180 - }; 181 - 182 - /* ISYS IRQ Controllers, part of the Input System 2401 */ 183 - static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { 184 - 0x00000000000C1400ULL, /* port a */ 185 - 0x00000000000C3400ULL, /* port b */ 186 - 0x00000000000C5400ULL /* port c */ 187 - }; 188 - 189 - /* CSI FE, part of the Input System 2401 */ 190 - static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { 191 - 0x00000000000C0400ULL, /* csi fe controller A */ 192 - 0x00000000000C2400ULL, /* csi fe controller B */ 193 - 0x00000000000C4400ULL /* csi fe controller C */ 194 - }; 195 - 196 - /* CSI BE, part of the Input System 2401 */ 197 - static const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { 198 - 0x00000000000C0800ULL, /* csi be controller A */ 199 - 0x00000000000C2800ULL, /* csi be controller B */ 200 - 0x00000000000C4800ULL /* csi be controller C */ 201 - }; 202 - 203 - /* PIXEL Generator, part of the Input System 2401 */ 204 - static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { 205 - 0x00000000000C1000ULL, /* pixel gen controller A */ 206 - 0x00000000000C3000ULL, /* pixel gen controller B */ 207 - 0x00000000000C5000ULL /* pixel gen controller C */ 208 - }; 209 - 210 - /* Stream2MMIO, part of the Input System 2401 */ 211 - static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { 212 - 0x00000000000C0C00ULL, /* stream2mmio controller A */ 213 - 0x00000000000C2C00ULL, /* stream2mmio controller B */ 214 - 0x00000000000C4C00ULL /* stream2mmio controller C */ 215 - }; 216 - #elif HRT_ADDRESS_WIDTH == 32 217 - 218 - #define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */ 219 - 220 - /* DDR : Attention, this value not defined in 32-bit */ 221 - static const hrt_address DDR_BASE[N_DDR_ID] = { 222 - 0x00000000UL 223 - }; 224 - 225 - /* ISP */ 226 - static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { 227 - 0x00020000UL 228 - }; 229 - 230 - static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { 231 - 0xffffffffUL 232 - }; 233 - 234 - static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { 235 - 0xffffffffUL 236 - }; 237 - 238 - static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { 239 - 0xffffffffUL, 240 - 0xffffffffUL, 241 - 0xffffffffUL 242 - }; 243 - 244 - static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { 245 - 0xffffffffUL 246 - }; 247 - 248 - /* SP */ 249 - static const hrt_address SP_CTRL_BASE[N_SP_ID] = { 250 - 0x00010000UL 251 - }; 252 - 253 - static const hrt_address SP_DMEM_BASE[N_SP_ID] = { 254 - 0x00300000UL 255 - }; 256 - 257 - /* MMU */ 258 - /* 259 - * MMU0_ID: The data MMU 260 - * MMU1_ID: The icache MMU 261 - */ 262 - static const hrt_address MMU_BASE[N_MMU_ID] = { 263 - 0x00070000UL, 264 - 0x000A0000UL 265 - }; 266 - 267 - /* DMA */ 268 - static const hrt_address DMA_BASE[N_DMA_ID] = { 269 - 0x00040000UL 270 - }; 271 - 272 - static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { 273 - 0x000CA000UL 274 - }; 275 - 276 - /* IRQ */ 277 - static const hrt_address IRQ_BASE[N_IRQ_ID] = { 278 - 0x00000500UL, 279 - 0x00030A00UL, 280 - 0x0008C000UL, 281 - 0x00090200UL 282 - }; 283 - 284 - /* 285 - 0x00000500UL}; 286 - */ 287 - 288 - /* GDC */ 289 - static const hrt_address GDC_BASE[N_GDC_ID] = { 290 - 0x00050000UL, 291 - 0x00060000UL 292 - }; 293 - 294 - /* FIFO_MONITOR (not a subset of GP_DEVICE) */ 295 - static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { 296 - 0x00000000UL 297 - }; 298 - 299 - /* 300 - static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { 301 - 0x00000000UL}; 302 - 303 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 304 - 0x00090000UL}; 305 - */ 306 - 307 - /* GP_DEVICE (single base for all separate GP_REG instances) */ 308 - static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 309 - 0x00000000UL 310 - }; 311 - 312 - /*GP TIMER , all timer registers are inter-twined, 313 - * so, having multiple base addresses for 314 - * different timers does not help*/ 315 - static const hrt_address GP_TIMER_BASE = 316 - (hrt_address)0x00000600UL; 317 - /* GPIO */ 318 - static const hrt_address GPIO_BASE[N_GPIO_ID] = { 319 - 0x00000400UL 320 - }; 321 - 322 - /* TIMED_CTRL */ 323 - static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { 324 - 0x00000100UL 325 - }; 326 - 327 - /* INPUT_FORMATTER */ 328 - static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { 329 - 0x00030000UL, 330 - 0x00030200UL, 331 - 0x00030400UL 332 - }; 333 - 334 - /* 0x00030600UL, */ /* memcpy() */ 335 - 336 - /* INPUT_SYSTEM */ 337 - static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { 338 - 0x00080000UL 339 - }; 340 - 341 - /* 0x00081000UL, */ /* capture A */ 342 - /* 0x00082000UL, */ /* capture B */ 343 - /* 0x00083000UL, */ /* capture C */ 344 - /* 0x00084000UL, */ /* Acquisition */ 345 - /* 0x00085000UL, */ /* DMA */ 346 - /* 0x00089000UL, */ /* ctrl */ 347 - /* 0x0008A000UL, */ /* GP regs */ 348 - /* 0x0008B000UL, */ /* FIFO */ 349 - /* 0x0008C000UL, */ /* IRQ */ 350 - 351 - /* RX, the MIPI lane control regs start at offset 0 */ 352 - static const hrt_address RX_BASE[N_RX_ID] = { 353 - 0x00080100UL 354 - }; 355 - 356 - /* IBUF_CTRL, part of the Input System 2401 */ 357 - static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { 358 - 0x000C1800UL, /* ibuf controller A */ 359 - 0x000C3800UL, /* ibuf controller B */ 360 - 0x000C5800UL /* ibuf controller C */ 361 - }; 362 - 363 - /* ISYS IRQ Controllers, part of the Input System 2401 */ 364 - static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { 365 - 0x000C1400ULL, /* port a */ 366 - 0x000C3400ULL, /* port b */ 367 - 0x000C5400ULL /* port c */ 368 - }; 369 - 370 - /* CSI FE, part of the Input System 2401 */ 371 - static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { 372 - 0x000C0400UL, /* csi fe controller A */ 373 - 0x000C2400UL, /* csi fe controller B */ 374 - 0x000C4400UL /* csi fe controller C */ 375 - }; 376 - 377 - /* CSI BE, part of the Input System 2401 */ 378 - static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { 379 - 0x000C0800UL, /* csi be controller A */ 380 - 0x000C2800UL, /* csi be controller B */ 381 - 0x000C4800UL /* csi be controller C */ 382 - }; 383 - 384 - /* PIXEL Generator, part of the Input System 2401 */ 385 - static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { 386 - 0x000C1000UL, /* pixel gen controller A */ 387 - 0x000C3000UL, /* pixel gen controller B */ 388 - 0x000C5000UL /* pixel gen controller C */ 389 - }; 390 - 391 - /* Stream2MMIO, part of the Input System 2401 */ 392 - static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { 393 - 0x000C0C00UL, /* stream2mmio controller A */ 394 - 0x000C2C00UL, /* stream2mmio controller B */ 395 - 0x000C4C00UL /* stream2mmio controller C */ 396 - }; 397 - 398 - #else 399 - #error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}" 400 - #endif 401 - 402 - #endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
+7 -2
drivers/staging/media/atomisp/pci/sh_css.c
··· 1841 1841 #endif 1842 1842 1843 1843 #if !defined(HAS_NO_INPUT_SYSTEM) 1844 - dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN, 1845 - ISP_DMA_MAX_BURST_LENGTH); 1844 + 1845 + if (!IS_ISP2401) 1846 + dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN, 1847 + ISP2400_DMA_MAX_BURST_LENGTH); 1848 + else 1849 + dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN, 1850 + ISP2401_DMA_MAX_BURST_LENGTH); 1846 1851 1847 1852 if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR) 1848 1853 err = -EINVAL;
+395
drivers/staging/media/atomisp/pci/system_global.h
··· 4 4 * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org> 5 5 */ 6 6 7 + #ifndef __SYSTEM_GLOBAL_H_INCLUDED__ 8 + #define __SYSTEM_GLOBAL_H_INCLUDED__ 9 + 10 + /* 11 + * Create a list of HAS and IS properties that defines the system 12 + * Those are common for both ISP2400 and ISP2401 13 + * 14 + * The configuration assumes the following 15 + * - The system is hetereogeneous; Multiple cells and devices classes 16 + * - The cell and device instances are homogeneous, each device type 17 + * belongs to the same class 18 + * - Device instances supporting a subset of the class capabilities are 19 + * allowed 20 + * 21 + * We could manage different device classes through the enumerated 22 + * lists (C) or the use of classes (C++), but that is presently not 23 + * fully supported 24 + * 25 + * N.B. the 3 input formatters are of 2 different classess 26 + */ 27 + 28 + #define HAS_MMU_VERSION_2 29 + #define HAS_DMA_VERSION_2 30 + #define HAS_GDC_VERSION_2 31 + #define HAS_VAMEM_VERSION_2 32 + #define HAS_HMEM_VERSION_1 33 + #define HAS_BAMEM_VERSION_2 34 + #define HAS_IRQ_VERSION_2 35 + #define HAS_IRQ_MAP_VERSION_2 36 + #define HAS_INPUT_FORMATTER_VERSION_2 37 + #define HAS_INPUT_SYSTEM_VERSION_2 38 + #define HAS_BUFFERED_SENSOR 39 + #define HAS_FIFO_MONITORS_VERSION_2 40 + #define HAS_GP_DEVICE_VERSION_2 41 + #define HAS_GPIO_VERSION_1 42 + #define HAS_TIMED_CTRL_VERSION_1 43 + #define HAS_RX_VERSION_2 44 + 45 + /* per-frame parameter handling support */ 46 + #define SH_CSS_ENABLE_PER_FRAME_PARAMS 47 + 48 + #define DMA_DDR_TO_VAMEM_WORKAROUND 49 + #define DMA_DDR_TO_HMEM_WORKAROUND 50 + 51 + /* 52 + * The longest allowed (uninteruptible) bus transfer, does not 53 + * take stalling into account 54 + */ 55 + #define HIVE_ISP_MAX_BURST_LENGTH 1024 56 + 57 + /* 58 + * Maximum allowed burst length in words for the ISP DMA 59 + * This value is set to 2 to prevent the ISP DMA from blocking 60 + * the bus for too long; as the input system can only buffer 61 + * 2 lines on Moorefield and Cherrytrail, the input system buffers 62 + * may overflow if blocked for too long (BZ 2726). 63 + */ 64 + #define ISP2400_DMA_MAX_BURST_LENGTH 128 65 + #define ISP2401_DMA_MAX_BURST_LENGTH 2 66 + 7 67 #ifdef ISP2401 8 68 # include "isp2401_system_global.h" 9 69 #else 10 70 # include "isp2400_system_global.h" 11 71 #endif 72 + 73 + #include <hive_isp_css_defs.h> 74 + #include <type_support.h> 75 + 76 + /* This interface is deprecated */ 77 + #include "hive_types.h" 78 + 79 + /* 80 + * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply 81 + */ 82 + #define HRT_VADDRESS_WIDTH 32 83 + 84 + #define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) 85 + #define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8) 86 + 87 + /* The main bus connecting all devices */ 88 + #define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH 89 + #define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES 90 + 91 + typedef u32 hrt_bus_align_t; 92 + 93 + /* 94 + * Enumerate the devices, device access through the API is by ID, 95 + * through the DLI by address. The enumerator terminators are used 96 + * to size the wiring arrays and as an exception value. 97 + */ 98 + typedef enum { 99 + DDR0_ID = 0, 100 + N_DDR_ID 101 + } ddr_ID_t; 102 + 103 + typedef enum { 104 + ISP0_ID = 0, 105 + N_ISP_ID 106 + } isp_ID_t; 107 + 108 + typedef enum { 109 + SP0_ID = 0, 110 + N_SP_ID 111 + } sp_ID_t; 112 + 113 + typedef enum { 114 + MMU0_ID = 0, 115 + MMU1_ID, 116 + N_MMU_ID 117 + } mmu_ID_t; 118 + 119 + typedef enum { 120 + DMA0_ID = 0, 121 + N_DMA_ID 122 + } dma_ID_t; 123 + 124 + typedef enum { 125 + GDC0_ID = 0, 126 + GDC1_ID, 127 + N_GDC_ID 128 + } gdc_ID_t; 129 + 130 + /* this extra define is needed because we want to use it also 131 + in the preprocessor, and that doesn't work with enums. 132 + */ 133 + #define N_GDC_ID_CPP 2 134 + 135 + typedef enum { 136 + VAMEM0_ID = 0, 137 + VAMEM1_ID, 138 + VAMEM2_ID, 139 + N_VAMEM_ID 140 + } vamem_ID_t; 141 + 142 + typedef enum { 143 + BAMEM0_ID = 0, 144 + N_BAMEM_ID 145 + } bamem_ID_t; 146 + 147 + typedef enum { 148 + HMEM0_ID = 0, 149 + N_HMEM_ID 150 + } hmem_ID_t; 151 + 152 + typedef enum { 153 + IRQ0_ID = 0, /* GP IRQ block */ 154 + IRQ1_ID, /* Input formatter */ 155 + IRQ2_ID, /* input system */ 156 + IRQ3_ID, /* input selector */ 157 + N_IRQ_ID 158 + } irq_ID_t; 159 + 160 + typedef enum { 161 + FIFO_MONITOR0_ID = 0, 162 + N_FIFO_MONITOR_ID 163 + } fifo_monitor_ID_t; 164 + 165 + typedef enum { 166 + GP_DEVICE0_ID = 0, 167 + N_GP_DEVICE_ID 168 + } gp_device_ID_t; 169 + 170 + typedef enum { 171 + GP_TIMER0_ID = 0, 172 + GP_TIMER1_ID, 173 + GP_TIMER2_ID, 174 + GP_TIMER3_ID, 175 + GP_TIMER4_ID, 176 + GP_TIMER5_ID, 177 + GP_TIMER6_ID, 178 + GP_TIMER7_ID, 179 + N_GP_TIMER_ID 180 + } gp_timer_ID_t; 181 + 182 + typedef enum { 183 + GPIO0_ID = 0, 184 + N_GPIO_ID 185 + } gpio_ID_t; 186 + 187 + typedef enum { 188 + TIMED_CTRL0_ID = 0, 189 + N_TIMED_CTRL_ID 190 + } timed_ctrl_ID_t; 191 + 192 + typedef enum { 193 + INPUT_FORMATTER0_ID = 0, 194 + INPUT_FORMATTER1_ID, 195 + INPUT_FORMATTER2_ID, 196 + INPUT_FORMATTER3_ID, 197 + N_INPUT_FORMATTER_ID 198 + } input_formatter_ID_t; 199 + 200 + /* The IF RST is outside the IF */ 201 + #define INPUT_FORMATTER0_SRST_OFFSET 0x0824 202 + #define INPUT_FORMATTER1_SRST_OFFSET 0x0624 203 + #define INPUT_FORMATTER2_SRST_OFFSET 0x0424 204 + #define INPUT_FORMATTER3_SRST_OFFSET 0x0224 205 + 206 + #define INPUT_FORMATTER0_SRST_MASK 0x0001 207 + #define INPUT_FORMATTER1_SRST_MASK 0x0002 208 + #define INPUT_FORMATTER2_SRST_MASK 0x0004 209 + #define INPUT_FORMATTER3_SRST_MASK 0x0008 210 + 211 + typedef enum { 212 + INPUT_SYSTEM0_ID = 0, 213 + N_INPUT_SYSTEM_ID 214 + } input_system_ID_t; 215 + 216 + typedef enum { 217 + RX0_ID = 0, 218 + N_RX_ID 219 + } rx_ID_t; 220 + 221 + enum mipi_port_id { 222 + MIPI_PORT0_ID = 0, 223 + MIPI_PORT1_ID, 224 + MIPI_PORT2_ID, 225 + N_MIPI_PORT_ID 226 + }; 227 + 228 + #define N_RX_CHANNEL_ID 4 229 + 230 + /* Generic port enumeration with an internal port type ID */ 231 + typedef enum { 232 + CSI_PORT0_ID = 0, 233 + CSI_PORT1_ID, 234 + CSI_PORT2_ID, 235 + TPG_PORT0_ID, 236 + PRBS_PORT0_ID, 237 + FIFO_PORT0_ID, 238 + MEMORY_PORT0_ID, 239 + N_INPUT_PORT_ID 240 + } input_port_ID_t; 241 + 242 + typedef enum { 243 + CAPTURE_UNIT0_ID = 0, 244 + CAPTURE_UNIT1_ID, 245 + CAPTURE_UNIT2_ID, 246 + ACQUISITION_UNIT0_ID, 247 + DMA_UNIT0_ID, 248 + CTRL_UNIT0_ID, 249 + GPREGS_UNIT0_ID, 250 + FIFO_UNIT0_ID, 251 + IRQ_UNIT0_ID, 252 + N_SUB_SYSTEM_ID 253 + } sub_system_ID_t; 254 + 255 + #define N_CAPTURE_UNIT_ID 3 256 + #define N_ACQUISITION_UNIT_ID 1 257 + #define N_CTRL_UNIT_ID 1 258 + 259 + 260 + enum ia_css_isp_memories { 261 + IA_CSS_ISP_PMEM0 = 0, 262 + IA_CSS_ISP_DMEM0, 263 + IA_CSS_ISP_VMEM0, 264 + IA_CSS_ISP_VAMEM0, 265 + IA_CSS_ISP_VAMEM1, 266 + IA_CSS_ISP_VAMEM2, 267 + IA_CSS_ISP_HMEM0, 268 + IA_CSS_SP_DMEM0, 269 + IA_CSS_DDR, 270 + N_IA_CSS_MEMORIES 271 + }; 272 + 273 + #define IA_CSS_NUM_MEMORIES 9 274 + /* For driver compatibility */ 275 + #define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES 276 + #define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES 277 + 278 + /* 279 + * ISP2401 specific enums 280 + */ 281 + 282 + typedef enum { 283 + ISYS_IRQ0_ID = 0, /* port a */ 284 + ISYS_IRQ1_ID, /* port b */ 285 + ISYS_IRQ2_ID, /* port c */ 286 + N_ISYS_IRQ_ID 287 + } isys_irq_ID_t; 288 + 289 + 290 + /* 291 + * Input-buffer Controller. 292 + */ 293 + typedef enum { 294 + IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */ 295 + IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */ 296 + IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */ 297 + N_IBUF_CTRL_ID 298 + } ibuf_ctrl_ID_t; 299 + /* end of Input-buffer Controller */ 300 + 301 + /* 302 + * Stream2MMIO. 303 + */ 304 + typedef enum { 305 + STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */ 306 + STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */ 307 + STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */ 308 + N_STREAM2MMIO_ID 309 + } stream2mmio_ID_t; 310 + 311 + typedef enum { 312 + /* 313 + * Stream2MMIO 0 has 8 SIDs that are indexed by 314 + * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID]. 315 + * 316 + * Stream2MMIO 1 has 4 SIDs that are indexed by 317 + * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID]. 318 + * 319 + * Stream2MMIO 2 has 4 SIDs that are indexed by 320 + * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID]. 321 + */ 322 + STREAM2MMIO_SID0_ID = 0, 323 + STREAM2MMIO_SID1_ID, 324 + STREAM2MMIO_SID2_ID, 325 + STREAM2MMIO_SID3_ID, 326 + STREAM2MMIO_SID4_ID, 327 + STREAM2MMIO_SID5_ID, 328 + STREAM2MMIO_SID6_ID, 329 + STREAM2MMIO_SID7_ID, 330 + N_STREAM2MMIO_SID_ID 331 + } stream2mmio_sid_ID_t; 332 + /* end of Stream2MMIO */ 333 + 334 + /** 335 + * Input System 2401: CSI-MIPI recevier. 336 + */ 337 + typedef enum { 338 + CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */ 339 + CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */ 340 + CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */ 341 + N_CSI_RX_BACKEND_ID 342 + } csi_rx_backend_ID_t; 343 + 344 + typedef enum { 345 + CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */ 346 + CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */ 347 + CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */ 348 + #define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1) 349 + } csi_rx_frontend_ID_t; 350 + 351 + typedef enum { 352 + CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */ 353 + CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */ 354 + CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */ 355 + CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */ 356 + N_CSI_RX_DLANE_ID 357 + } csi_rx_fe_dlane_ID_t; 358 + /* end of CSI-MIPI receiver */ 359 + 360 + typedef enum { 361 + ISYS2401_DMA0_ID = 0, 362 + N_ISYS2401_DMA_ID 363 + } isys2401_dma_ID_t; 364 + 365 + /** 366 + * Pixel-generator. ("system_global.h") 367 + */ 368 + typedef enum { 369 + PIXELGEN0_ID = 0, 370 + PIXELGEN1_ID, 371 + PIXELGEN2_ID, 372 + N_PIXELGEN_ID 373 + } pixelgen_ID_t; 374 + /* end of pixel-generator. ("system_global.h") */ 375 + 376 + typedef enum { 377 + INPUT_SYSTEM_CSI_PORT0_ID = 0, 378 + INPUT_SYSTEM_CSI_PORT1_ID, 379 + INPUT_SYSTEM_CSI_PORT2_ID, 380 + 381 + INPUT_SYSTEM_PIXELGEN_PORT0_ID, 382 + INPUT_SYSTEM_PIXELGEN_PORT1_ID, 383 + INPUT_SYSTEM_PIXELGEN_PORT2_ID, 384 + 385 + N_INPUT_SYSTEM_INPUT_PORT_ID 386 + } input_system_input_port_ID_t; 387 + 388 + #define N_INPUT_SYSTEM_CSI_PORT 3 389 + 390 + typedef enum { 391 + ISYS2401_DMA_CHANNEL_0 = 0, 392 + ISYS2401_DMA_CHANNEL_1, 393 + ISYS2401_DMA_CHANNEL_2, 394 + ISYS2401_DMA_CHANNEL_3, 395 + ISYS2401_DMA_CHANNEL_4, 396 + ISYS2401_DMA_CHANNEL_5, 397 + ISYS2401_DMA_CHANNEL_6, 398 + ISYS2401_DMA_CHANNEL_7, 399 + ISYS2401_DMA_CHANNEL_8, 400 + ISYS2401_DMA_CHANNEL_9, 401 + ISYS2401_DMA_CHANNEL_10, 402 + ISYS2401_DMA_CHANNEL_11, 403 + N_ISYS2401_DMA_CHANNEL 404 + } isys2401_dma_channel; 405 + 406 + #endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
+179
drivers/staging/media/atomisp/pci/system_local.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Support for Intel Camera Imaging ISP subsystem. 4 + * Copyright (c) 2015, Intel Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + */ 15 + 16 + #include "system_local.h" 17 + 18 + /* ISP */ 19 + const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { 20 + 0x0000000000020000ULL 21 + }; 22 + 23 + const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { 24 + 0x0000000000200000ULL 25 + }; 26 + 27 + const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { 28 + 0x0000000000100000ULL 29 + }; 30 + 31 + /* SP */ 32 + const hrt_address SP_CTRL_BASE[N_SP_ID] = { 33 + 0x0000000000010000ULL 34 + }; 35 + 36 + const hrt_address SP_DMEM_BASE[N_SP_ID] = { 37 + 0x0000000000300000ULL 38 + }; 39 + 40 + /* MMU */ 41 + /* 42 + * MMU0_ID: The data MMU 43 + * MMU1_ID: The icache MMU 44 + */ 45 + const hrt_address MMU_BASE[N_MMU_ID] = { 46 + 0x0000000000070000ULL, 47 + 0x00000000000A0000ULL 48 + }; 49 + 50 + /* DMA */ 51 + const hrt_address DMA_BASE[N_DMA_ID] = { 52 + 0x0000000000040000ULL 53 + }; 54 + 55 + const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { 56 + 0x00000000000CA000ULL 57 + }; 58 + 59 + /* IRQ */ 60 + const hrt_address IRQ_BASE[N_IRQ_ID] = { 61 + 0x0000000000000500ULL, 62 + 0x0000000000030A00ULL, 63 + 0x000000000008C000ULL, 64 + 0x0000000000090200ULL 65 + }; 66 + 67 + /* 68 + 0x0000000000000500ULL}; 69 + */ 70 + 71 + /* GDC */ 72 + const hrt_address GDC_BASE[N_GDC_ID] = { 73 + 0x0000000000050000ULL, 74 + 0x0000000000060000ULL 75 + }; 76 + 77 + /* FIFO_MONITOR (not a subset of GP_DEVICE) */ 78 + const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { 79 + 0x0000000000000000ULL 80 + }; 81 + 82 + /* 83 + const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { 84 + 0x0000000000000000ULL}; 85 + 86 + const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 87 + 0x0000000000090000ULL}; 88 + */ 89 + 90 + /* GP_DEVICE (single base for all separate GP_REG instances) */ 91 + const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { 92 + 0x0000000000000000ULL 93 + }; 94 + 95 + /*GP TIMER , all timer registers are inter-twined, 96 + * so, having multiple base addresses for 97 + * different timers does not help*/ 98 + const hrt_address GP_TIMER_BASE = 99 + (hrt_address)0x0000000000000600ULL; 100 + 101 + /* GPIO */ 102 + const hrt_address GPIO_BASE[N_GPIO_ID] = { 103 + 0x0000000000000400ULL 104 + }; 105 + 106 + /* TIMED_CTRL */ 107 + const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { 108 + 0x0000000000000100ULL 109 + }; 110 + 111 + /* INPUT_FORMATTER */ 112 + const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { 113 + 0x0000000000030000ULL, 114 + 0x0000000000030200ULL, 115 + 0x0000000000030400ULL, 116 + 0x0000000000030600ULL 117 + }; /* memcpy() */ 118 + 119 + /* INPUT_SYSTEM */ 120 + const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { 121 + 0x0000000000080000ULL 122 + }; 123 + 124 + /* 0x0000000000081000ULL, */ /* capture A */ 125 + /* 0x0000000000082000ULL, */ /* capture B */ 126 + /* 0x0000000000083000ULL, */ /* capture C */ 127 + /* 0x0000000000084000ULL, */ /* Acquisition */ 128 + /* 0x0000000000085000ULL, */ /* DMA */ 129 + /* 0x0000000000089000ULL, */ /* ctrl */ 130 + /* 0x000000000008A000ULL, */ /* GP regs */ 131 + /* 0x000000000008B000ULL, */ /* FIFO */ 132 + /* 0x000000000008C000ULL, */ /* IRQ */ 133 + 134 + /* RX, the MIPI lane control regs start at offset 0 */ 135 + const hrt_address RX_BASE[N_RX_ID] = { 136 + 0x0000000000080100ULL 137 + }; 138 + 139 + /* IBUF_CTRL, part of the Input System 2401 */ 140 + const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { 141 + 0x00000000000C1800ULL, /* ibuf controller A */ 142 + 0x00000000000C3800ULL, /* ibuf controller B */ 143 + 0x00000000000C5800ULL /* ibuf controller C */ 144 + }; 145 + 146 + /* ISYS IRQ Controllers, part of the Input System 2401 */ 147 + const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { 148 + 0x00000000000C1400ULL, /* port a */ 149 + 0x00000000000C3400ULL, /* port b */ 150 + 0x00000000000C5400ULL /* port c */ 151 + }; 152 + 153 + /* CSI FE, part of the Input System 2401 */ 154 + const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { 155 + 0x00000000000C0400ULL, /* csi fe controller A */ 156 + 0x00000000000C2400ULL, /* csi fe controller B */ 157 + 0x00000000000C4400ULL /* csi fe controller C */ 158 + }; 159 + 160 + /* CSI BE, part of the Input System 2401 */ 161 + const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { 162 + 0x00000000000C0800ULL, /* csi be controller A */ 163 + 0x00000000000C2800ULL, /* csi be controller B */ 164 + 0x00000000000C4800ULL /* csi be controller C */ 165 + }; 166 + 167 + /* PIXEL Generator, part of the Input System 2401 */ 168 + const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { 169 + 0x00000000000C1000ULL, /* pixel gen controller A */ 170 + 0x00000000000C3000ULL, /* pixel gen controller B */ 171 + 0x00000000000C5000ULL /* pixel gen controller C */ 172 + }; 173 + 174 + /* Stream2MMIO, part of the Input System 2401 */ 175 + const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { 176 + 0x00000000000C0C00ULL, /* stream2mmio controller A */ 177 + 0x00000000000C2C00ULL, /* stream2mmio controller B */ 178 + 0x00000000000C4C00ULL /* stream2mmio controller C */ 179 + };
+98 -6
drivers/staging/media/atomisp/pci/system_local.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - // SPDX-License-Identifier: GPL-2.0-or-later 3 2 /* 4 - * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org> 3 + * Support for Intel Camera Imaging ISP subsystem. 4 + * Copyright (c) 2015, Intel Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 5 14 */ 6 15 7 - #ifdef ISP2401 8 - # include "isp2401_system_local.h" 9 - #else 10 - # include "isp2400_system_local.h" 16 + #ifndef __SYSTEM_LOCAL_H_INCLUDED__ 17 + #define __SYSTEM_LOCAL_H_INCLUDED__ 18 + 19 + #ifdef HRT_ISP_CSS_CUSTOM_HOST 20 + #ifndef HRT_USE_VIR_ADDRS 21 + #define HRT_USE_VIR_ADDRS 11 22 #endif 23 + #endif 24 + 25 + #include "system_global.h" 26 + 27 + /* This interface is deprecated */ 28 + #include "hive_types.h" 29 + 30 + /* 31 + * Cell specific address maps 32 + */ 33 + 34 + #define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ 35 + 36 + /* ISP */ 37 + extern const hrt_address ISP_CTRL_BASE[N_ISP_ID]; 38 + extern const hrt_address ISP_DMEM_BASE[N_ISP_ID]; 39 + extern const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID]; 40 + 41 + /* SP */ 42 + extern const hrt_address SP_CTRL_BASE[N_SP_ID]; 43 + extern const hrt_address SP_DMEM_BASE[N_SP_ID]; 44 + 45 + /* MMU */ 46 + 47 + extern const hrt_address MMU_BASE[N_MMU_ID]; 48 + 49 + /* DMA */ 50 + extern const hrt_address DMA_BASE[N_DMA_ID]; 51 + extern const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID]; 52 + 53 + /* IRQ */ 54 + extern const hrt_address IRQ_BASE[N_IRQ_ID]; 55 + 56 + /* GDC */ 57 + extern const hrt_address GDC_BASE[N_GDC_ID]; 58 + 59 + /* FIFO_MONITOR (not a subset of GP_DEVICE) */ 60 + extern const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID]; 61 + 62 + /* GP_DEVICE (single base for all separate GP_REG instances) */ 63 + extern const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID]; 64 + 65 + /*GP TIMER , all timer registers are inter-twined, 66 + * so, having multiple base addresses for 67 + * different timers does not help*/ 68 + extern const hrt_address GP_TIMER_BASE; 69 + 70 + /* GPIO */ 71 + extern const hrt_address GPIO_BASE[N_GPIO_ID]; 72 + 73 + /* TIMED_CTRL */ 74 + extern const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID]; 75 + 76 + /* INPUT_FORMATTER */ 77 + extern const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID]; 78 + 79 + /* INPUT_SYSTEM */ 80 + extern const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID]; 81 + 82 + /* RX, the MIPI lane control regs start at offset 0 */ 83 + extern const hrt_address RX_BASE[N_RX_ID]; 84 + 85 + /* IBUF_CTRL, part of the Input System 2401 */ 86 + extern const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID]; 87 + 88 + /* ISYS IRQ Controllers, part of the Input System 2401 */ 89 + extern const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID]; 90 + 91 + /* CSI FE, part of the Input System 2401 */ 92 + extern const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID]; 93 + 94 + /* CSI BE, part of the Input System 2401 */ 95 + extern const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID]; 96 + 97 + /* PIXEL Generator, part of the Input System 2401 */ 98 + extern const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID]; 99 + 100 + /* Stream2MMIO, part of the Input System 2401 */ 101 + extern const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID]; 102 + 103 + #endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
+15 -1
drivers/staging/wlan-ng/prism2usb.c
··· 61 61 const struct usb_device_id *id) 62 62 { 63 63 struct usb_device *dev; 64 - 64 + const struct usb_endpoint_descriptor *epd; 65 + const struct usb_host_interface *iface_desc = interface->cur_altsetting; 65 66 struct wlandevice *wlandev = NULL; 66 67 struct hfa384x *hw = NULL; 67 68 int result = 0; 69 + 70 + if (iface_desc->desc.bNumEndpoints != 2) { 71 + result = -ENODEV; 72 + goto failed; 73 + } 74 + 75 + result = -EINVAL; 76 + epd = &iface_desc->endpoint[1].desc; 77 + if (!usb_endpoint_is_bulk_in(epd)) 78 + goto failed; 79 + epd = &iface_desc->endpoint[2].desc; 80 + if (!usb_endpoint_is_bulk_out(epd)) 81 + goto failed; 68 82 69 83 dev = interface_to_usbdev(interface); 70 84 wlandev = create_wlan();
+3 -3
drivers/thermal/cpufreq_cooling.c
··· 123 123 { 124 124 int i; 125 125 126 - for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { 127 - if (power > cpufreq_cdev->em->table[i].power) 126 + for (i = cpufreq_cdev->max_level; i >= 0; i--) { 127 + if (power >= cpufreq_cdev->em->table[i].power) 128 128 break; 129 129 } 130 130 131 - return cpufreq_cdev->em->table[i + 1].frequency; 131 + return cpufreq_cdev->em->table[i].frequency; 132 132 } 133 133 134 134 /**
+4 -3
drivers/thermal/imx_thermal.c
··· 649 649 static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) 650 650 { 651 651 struct device_node *np; 652 - int ret; 652 + int ret = 0; 653 653 654 654 data->policy = cpufreq_cpu_get(0); 655 655 if (!data->policy) { ··· 664 664 if (IS_ERR(data->cdev)) { 665 665 ret = PTR_ERR(data->cdev); 666 666 cpufreq_cpu_put(data->policy); 667 - return ret; 668 667 } 669 668 } 670 669 671 - return 0; 670 + of_node_put(np); 671 + 672 + return ret; 672 673 } 673 674 674 675 static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data)
+6 -1
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
··· 216 216 acpi_status status; 217 217 int result = 0; 218 218 struct acpi_osc_context context = { 219 - .uuid_str = int3400_thermal_uuids[uuid], 219 + .uuid_str = NULL, 220 220 .rev = 1, 221 221 .cap.length = 8, 222 222 }; 223 + 224 + if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID) 225 + return -EINVAL; 226 + 227 + context.uuid_str = int3400_thermal_uuids[uuid]; 223 228 224 229 buf[OSC_QUERY_DWORD] = 0; 225 230 buf[OSC_SUPPORT_DWORD] = enable;
+1 -1
drivers/thermal/intel/int340x_thermal/int3403_thermal.c
··· 74 74 THERMAL_TRIP_CHANGED); 75 75 break; 76 76 default: 77 - dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); 77 + dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); 78 78 break; 79 79 } 80 80 }
+6 -5
drivers/thermal/mtk_thermal.c
··· 211 211 /* The total number of temperature sensors in the MT8183 */ 212 212 #define MT8183_NUM_SENSORS 6 213 213 214 + /* The number of banks in the MT8183 */ 215 + #define MT8183_NUM_ZONES 1 216 + 214 217 /* The number of sensing points per bank */ 215 218 #define MT8183_NUM_SENSORS_PER_ZONE 6 216 219 ··· 500 497 */ 501 498 static const struct mtk_thermal_data mt8183_thermal_data = { 502 499 .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL, 503 - .num_banks = MT8183_NUM_SENSORS_PER_ZONE, 500 + .num_banks = MT8183_NUM_ZONES, 504 501 .num_sensors = MT8183_NUM_SENSORS, 505 502 .vts_index = mt8183_vts_index, 506 503 .cali_val = MT8183_CALIBRATION, ··· 594 591 u32 raw; 595 592 596 593 for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { 597 - raw = readl(mt->thermal_base + 598 - conf->msr[conf->bank_data[bank->id].sensors[i]]); 594 + raw = readl(mt->thermal_base + conf->msr[i]); 599 595 600 596 temp = raw_to_mcelsius(mt, 601 597 conf->bank_data[bank->id].sensors[i], ··· 735 733 736 734 for (i = 0; i < conf->bank_data[num].num_sensors; i++) 737 735 writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], 738 - mt->thermal_base + 739 - conf->adcpnp[conf->bank_data[num].sensors[i]]); 736 + mt->thermal_base + conf->adcpnp[i]); 740 737 741 738 writel((1 << conf->bank_data[num].num_sensors) - 1, 742 739 controller_base + TEMP_MONCTL0);
+5 -5
drivers/thermal/qcom/tsens.c
··· 382 382 * 383 383 * Return: IRQ_HANDLED 384 384 */ 385 - irqreturn_t tsens_critical_irq_thread(int irq, void *data) 385 + static irqreturn_t tsens_critical_irq_thread(int irq, void *data) 386 386 { 387 387 struct tsens_priv *priv = data; 388 388 struct tsens_irq_data d; ··· 452 452 * 453 453 * Return: IRQ_HANDLED 454 454 */ 455 - irqreturn_t tsens_irq_thread(int irq, void *data) 455 + static irqreturn_t tsens_irq_thread(int irq, void *data) 456 456 { 457 457 struct tsens_priv *priv = data; 458 458 struct tsens_irq_data d; ··· 520 520 return IRQ_HANDLED; 521 521 } 522 522 523 - int tsens_set_trips(void *_sensor, int low, int high) 523 + static int tsens_set_trips(void *_sensor, int low, int high) 524 524 { 525 525 struct tsens_sensor *s = _sensor; 526 526 struct tsens_priv *priv = s->priv; ··· 557 557 return 0; 558 558 } 559 559 560 - int tsens_enable_irq(struct tsens_priv *priv) 560 + static int tsens_enable_irq(struct tsens_priv *priv) 561 561 { 562 562 int ret; 563 563 int val = tsens_version(priv) > VER_1_X ? 7 : 1; ··· 570 570 return ret; 571 571 } 572 572 573 - void tsens_disable_irq(struct tsens_priv *priv) 573 + static void tsens_disable_irq(struct tsens_priv *priv) 574 574 { 575 575 regmap_field_write(priv->rf[INT_EN], 0); 576 576 }
+1 -1
drivers/thermal/rcar_gen3_thermal.c
··· 167 167 { 168 168 struct rcar_gen3_thermal_tsc *tsc = devdata; 169 169 int mcelsius, val; 170 - u32 reg; 170 + int reg; 171 171 172 172 /* Read register and convert to mili Celsius */ 173 173 reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
+2 -2
drivers/thermal/sprd_thermal.c
··· 348 348 349 349 thm->var_data = pdata; 350 350 thm->base = devm_platform_ioremap_resource(pdev, 0); 351 - if (!thm->base) 352 - return -ENOMEM; 351 + if (IS_ERR(thm->base)) 352 + return PTR_ERR(thm->base); 353 353 354 354 thm->nr_sensors = of_get_child_count(np); 355 355 if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
+8 -8
drivers/thunderbolt/tunnel.c
··· 913 913 * case. 914 914 */ 915 915 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, 916 - &tunnel->dst_port, "USB3 Up"); 916 + &tunnel->dst_port, "USB3 Down"); 917 917 if (!path) { 918 918 /* Just disable the downstream port */ 919 919 tb_usb3_port_enable(down, false); 920 920 goto err_free; 921 921 } 922 - tunnel->paths[TB_USB3_PATH_UP] = path; 923 - tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); 924 - 925 - path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, 926 - "USB3 Down"); 927 - if (!path) 928 - goto err_deactivate; 929 922 tunnel->paths[TB_USB3_PATH_DOWN] = path; 930 923 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); 924 + 925 + path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, 926 + "USB3 Up"); 927 + if (!path) 928 + goto err_deactivate; 929 + tunnel->paths[TB_USB3_PATH_UP] = path; 930 + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); 931 931 932 932 /* Validate that the tunnel is complete */ 933 933 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
+1 -1
drivers/tty/serial/8250/8250_core.c
··· 524 524 */ 525 525 up->mcr_mask = ~ALPHA_KLUDGE_MCR; 526 526 up->mcr_force = ALPHA_KLUDGE_MCR; 527 + serial8250_set_defaults(up); 527 528 } 528 529 529 530 /* chain base port ops to support Remote Supervisor Adapter */ ··· 548 547 port->membase = old_serial_port[i].iomem_base; 549 548 port->iotype = old_serial_port[i].io_type; 550 549 port->regshift = old_serial_port[i].iomem_reg_shift; 551 - serial8250_set_defaults(up); 552 550 553 551 port->irqflags |= irqflag; 554 552 if (serial8250_isa_config != NULL)
+11 -1
drivers/tty/serial/8250/8250_exar.c
··· 326 326 * devices will export them as GPIOs, so we pre-configure them safely 327 327 * as inputs. 328 328 */ 329 - u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00; 329 + 330 + u8 dir = 0x00; 331 + 332 + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && 333 + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { 334 + // Configure GPIO as inputs for Commtech adapters 335 + dir = 0xff; 336 + } else { 337 + // Configure GPIO as outputs for SeaLevel adapters 338 + dir = 0x00; 339 + } 330 340 331 341 writeb(0x00, p + UART_EXAR_MPIOINT_7_0); 332 342 writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
+18
drivers/tty/serial/8250/8250_mtk.c
··· 306 306 } 307 307 #endif 308 308 309 + /* 310 + * Store the requested baud rate before calling the generic 8250 311 + * set_termios method. Standard 8250 port expects bauds to be 312 + * no higher than (uartclk / 16) so the baud will be clamped if it 313 + * gets out of that bound. Mediatek 8250 port supports speed 314 + * higher than that, therefore we'll get original baud rate back 315 + * after calling the generic set_termios method and recalculate 316 + * the speed later in this method. 317 + */ 318 + baud = tty_termios_baud_rate(termios); 319 + 309 320 serial8250_do_set_termios(port, termios, old); 321 + 322 + tty_termios_encode_baud_rate(termios, baud, baud); 310 323 311 324 /* 312 325 * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS) ··· 351 338 * interrupts disabled. 352 339 */ 353 340 spin_lock_irqsave(&port->lock, flags); 341 + 342 + /* 343 + * Update the per-port timeout. 344 + */ 345 + uart_update_timeout(port, termios->c_cflag, baud); 354 346 355 347 /* set DLAB we have cval saved in up->lcr from the call to the core */ 356 348 serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
+8 -1
drivers/tty/serial/cpm_uart/cpm_uart_core.c
··· 1215 1215 1216 1216 pinfo->gpios[i] = NULL; 1217 1217 1218 - gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS); 1218 + gpiod = devm_gpiod_get_index_optional(dev, NULL, i, GPIOD_ASIS); 1219 + 1220 + if (IS_ERR(gpiod)) { 1221 + ret = PTR_ERR(gpiod); 1222 + goto out_irq; 1223 + } 1219 1224 1220 1225 if (gpiod) { 1221 1226 if (i == GPIO_RTS || i == GPIO_DTR) ··· 1242 1237 1243 1238 return cpm_uart_request_port(&pinfo->port); 1244 1239 1240 + out_irq: 1241 + irq_dispose_mapping(pinfo->port.irq); 1245 1242 out_pram: 1246 1243 cpm_uart_unmap_pram(pinfo, pram); 1247 1244 out_mem:
+8 -4
drivers/tty/serial/mxs-auart.c
··· 1698 1698 irq = platform_get_irq(pdev, 0); 1699 1699 if (irq < 0) { 1700 1700 ret = irq; 1701 - goto out_disable_clks; 1701 + goto out_iounmap; 1702 1702 } 1703 1703 1704 1704 s->port.irq = irq; 1705 1705 ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0, 1706 1706 dev_name(&pdev->dev), s); 1707 1707 if (ret) 1708 - goto out_disable_clks; 1708 + goto out_iounmap; 1709 1709 1710 1710 platform_set_drvdata(pdev, s); 1711 1711 1712 1712 ret = mxs_auart_init_gpios(s, &pdev->dev); 1713 1713 if (ret) { 1714 1714 dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); 1715 - goto out_disable_clks; 1715 + goto out_iounmap; 1716 1716 } 1717 1717 1718 1718 /* ··· 1720 1720 */ 1721 1721 ret = mxs_auart_request_gpio_irq(s); 1722 1722 if (ret) 1723 - goto out_disable_clks; 1723 + goto out_iounmap; 1724 1724 1725 1725 auart_port[s->port.line] = s; 1726 1726 ··· 1746 1746 mxs_auart_free_gpio_irq(s); 1747 1747 auart_port[pdev->id] = NULL; 1748 1748 1749 + out_iounmap: 1750 + iounmap(s->port.membase); 1751 + 1749 1752 out_disable_clks: 1750 1753 if (is_asm9260_auart(s)) { 1751 1754 clk_disable_unprepare(s->clk); ··· 1764 1761 uart_remove_one_port(&auart_driver, &s->port); 1765 1762 auart_port[pdev->id] = NULL; 1766 1763 mxs_auart_free_gpio_irq(s); 1764 + iounmap(s->port.membase); 1767 1765 if (is_asm9260_auart(s)) { 1768 1766 clk_disable_unprepare(s->clk); 1769 1767 clk_disable_unprepare(s->clk_ahb);
+7 -9
drivers/tty/serial/serial-tegra.c
··· 635 635 } 636 636 637 637 static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, 638 - struct tty_port *tty) 638 + struct tty_port *port) 639 639 { 640 640 do { 641 641 char flag = TTY_NORMAL; ··· 653 653 ch = (unsigned char) tegra_uart_read(tup, UART_RX); 654 654 tup->uport.icount.rx++; 655 655 656 - if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) 657 - tty_insert_flip_char(tty, ch, flag); 656 + if (uart_handle_sysrq_char(&tup->uport, ch)) 657 + continue; 658 658 659 659 if (tup->uport.ignore_status_mask & UART_LSR_DR) 660 660 continue; 661 + 662 + tty_insert_flip_char(port, ch, flag); 661 663 } while (1); 662 664 } 663 665 664 666 static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, 665 - struct tty_port *tty, 667 + struct tty_port *port, 666 668 unsigned int count) 667 669 { 668 670 int copied; ··· 674 672 return; 675 673 676 674 tup->uport.icount.rx += count; 677 - if (!tty) { 678 - dev_err(tup->uport.dev, "No tty port\n"); 679 - return; 680 - } 681 675 682 676 if (tup->uport.ignore_status_mask & UART_LSR_DR) 683 677 return; 684 678 685 679 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, 686 680 count, DMA_FROM_DEVICE); 687 - copied = tty_insert_flip_string(tty, 681 + copied = tty_insert_flip_string(port, 688 682 ((unsigned char *)(tup->rx_dma_buf_virt)), count); 689 683 if (copied != count) { 690 684 WARN_ON(1);
+17 -98
drivers/tty/serial/serial_core.c
··· 41 41 42 42 #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) 43 43 44 - #define SYSRQ_TIMEOUT (HZ * 5) 45 - 46 44 static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, 47 45 struct ktermios *old_termios); 48 46 static void uart_wait_until_sent(struct tty_struct *tty, int timeout); ··· 1914 1916 return uart_console(port) && (port->cons->flags & CON_ENABLED); 1915 1917 } 1916 1918 1919 + static void __uart_port_spin_lock_init(struct uart_port *port) 1920 + { 1921 + spin_lock_init(&port->lock); 1922 + lockdep_set_class(&port->lock, &port_lock_key); 1923 + } 1924 + 1917 1925 /* 1918 1926 * Ensure that the serial console lock is initialised early. 1919 1927 * If this port is a console, then the spinlock is already initialised. ··· 1929 1925 if (uart_console(port)) 1930 1926 return; 1931 1927 1932 - spin_lock_init(&port->lock); 1933 - lockdep_set_class(&port->lock, &port_lock_key); 1928 + __uart_port_spin_lock_init(port); 1934 1929 } 1935 1930 1936 1931 #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL) ··· 2374 2371 2375 2372 /* Power up port for set_mctrl() */ 2376 2373 uart_change_pm(state, UART_PM_STATE_ON); 2374 + 2375 + /* 2376 + * If this driver supports console, and it hasn't been 2377 + * successfully registered yet, initialise spin lock for it. 2378 + */ 2379 + if (port->cons && !(port->cons->flags & CON_ENABLED)) 2380 + __uart_port_spin_lock_init(port); 2377 2381 2378 2382 /* 2379 2383 * Ensure that the modem control lines are de-activated. ··· 3173 3163 * Returns false if @ch is out of enabling sequence and should be 3174 3164 * handled some other way, true if @ch was consumed. 3175 3165 */ 3176 - static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) 3166 + bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) 3177 3167 { 3178 3168 int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq); 3179 3169 ··· 3196 3186 port->sysrq = 0; 3197 3187 return true; 3198 3188 } 3199 - #else 3200 - static inline bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) 3201 - { 3202 - return false; 3203 - } 3189 + EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq); 3204 3190 #endif 3205 - 3206 - int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) 3207 - { 3208 - if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL)) 3209 - return 0; 3210 - 3211 - if (!port->has_sysrq || !port->sysrq) 3212 - return 0; 3213 - 3214 - if (ch && time_before(jiffies, port->sysrq)) { 3215 - if (sysrq_mask()) { 3216 - handle_sysrq(ch); 3217 - port->sysrq = 0; 3218 - return 1; 3219 - } 3220 - if (uart_try_toggle_sysrq(port, ch)) 3221 - return 1; 3222 - } 3223 - port->sysrq = 0; 3224 - 3225 - return 0; 3226 - } 3227 - EXPORT_SYMBOL_GPL(uart_handle_sysrq_char); 3228 - 3229 - int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) 3230 - { 3231 - if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL)) 3232 - return 0; 3233 - 3234 - if (!port->has_sysrq || !port->sysrq) 3235 - return 0; 3236 - 3237 - if (ch && time_before(jiffies, port->sysrq)) { 3238 - if (sysrq_mask()) { 3239 - port->sysrq_ch = ch; 3240 - port->sysrq = 0; 3241 - return 1; 3242 - } 3243 - if (uart_try_toggle_sysrq(port, ch)) 3244 - return 1; 3245 - } 3246 - port->sysrq = 0; 3247 - 3248 - return 0; 3249 - } 3250 - EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char); 3251 - 3252 - void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags) 3253 - __releases(&port->lock) 3254 - { 3255 - if (port->has_sysrq) { 3256 - int sysrq_ch = port->sysrq_ch; 3257 - 3258 - port->sysrq_ch = 0; 3259 - spin_unlock_irqrestore(&port->lock, flags); 3260 - if (sysrq_ch) 3261 - handle_sysrq(sysrq_ch); 3262 - } else { 3263 - spin_unlock_irqrestore(&port->lock, flags); 3264 - } 3265 - } 3266 - EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq); 3267 - 3268 - /* 3269 - * We do the SysRQ and SAK checking like this... 3270 - */ 3271 - int uart_handle_break(struct uart_port *port) 3272 - { 3273 - struct uart_state *state = port->state; 3274 - 3275 - if (port->handle_break) 3276 - port->handle_break(port); 3277 - 3278 - if (port->has_sysrq && uart_console(port)) { 3279 - if (!port->sysrq) { 3280 - port->sysrq = jiffies + SYSRQ_TIMEOUT; 3281 - return 1; 3282 - } 3283 - port->sysrq = 0; 3284 - } 3285 - 3286 - if (port->flags & UPF_SAK) 3287 - do_SAK(state->port.tty); 3288 - return 0; 3289 - } 3290 - EXPORT_SYMBOL_GPL(uart_handle_break); 3291 3191 3292 3192 EXPORT_SYMBOL(uart_write_wakeup); 3293 3193 EXPORT_SYMBOL(uart_register_driver); ··· 3209 3289 3210 3290 /** 3211 3291 * uart_get_rs485_mode() - retrieve rs485 properties for given uart 3212 - * @dev: uart device 3213 - * @rs485conf: output parameter 3292 + * @port: uart device's target port 3214 3293 * 3215 3294 * This function implements the device tree binding described in 3216 3295 * Documentation/devicetree/bindings/serial/rs485.txt.
+3
drivers/tty/serial/sh-sci.c
··· 3301 3301 sciport->port.flags |= UPF_HARD_FLOW; 3302 3302 } 3303 3303 3304 + if (sci_uart_driver.cons->index == sciport->port.line) 3305 + spin_lock_init(&sciport->port.lock); 3306 + 3304 3307 ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 3305 3308 if (ret) { 3306 3309 sci_cleanup_single(sciport);
+6 -3
drivers/tty/serial/xilinx_uartps.c
··· 1465 1465 cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; 1466 1466 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE 1467 1467 cdns_uart_uart_driver.cons = &cdns_uart_console; 1468 - cdns_uart_console.index = id; 1469 1468 #endif 1470 1469 1471 1470 rc = uart_register_driver(&cdns_uart_uart_driver); ··· 1580 1581 * If register_console() don't assign value, then console_port pointer 1581 1582 * is cleanup. 1582 1583 */ 1583 - if (!console_port) 1584 + if (!console_port) { 1585 + cdns_uart_console.index = id; 1584 1586 console_port = port; 1587 + } 1585 1588 #endif 1586 1589 1587 1590 rc = uart_add_one_port(&cdns_uart_uart_driver, port); ··· 1596 1595 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE 1597 1596 /* This is not port which is used for console that's why clean it up */ 1598 1597 if (console_port == port && 1599 - !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) 1598 + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) { 1600 1599 console_port = NULL; 1600 + cdns_uart_console.index = -1; 1601 + } 1601 1602 #endif 1602 1603 1603 1604 cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
+18 -11
drivers/tty/vt/vt.c
··· 1092 1092 .destruct = vc_port_destruct, 1093 1093 }; 1094 1094 1095 + /* 1096 + * Change # of rows and columns (0 means unchanged/the size of fg_console) 1097 + * [this is to be used together with some user program 1098 + * like resize that changes the hardware videomode] 1099 + */ 1100 + #define VC_MAXCOL (32767) 1101 + #define VC_MAXROW (32767) 1102 + 1095 1103 int vc_allocate(unsigned int currcons) /* return 0 on success */ 1096 1104 { 1097 1105 struct vt_notifier_param param; 1098 1106 struct vc_data *vc; 1107 + int err; 1099 1108 1100 1109 WARN_CONSOLE_UNLOCKED(); 1101 1110 ··· 1134 1125 if (!*vc->vc_uni_pagedir_loc) 1135 1126 con_set_default_unimap(vc); 1136 1127 1128 + err = -EINVAL; 1129 + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || 1130 + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) 1131 + goto err_free; 1132 + err = -ENOMEM; 1137 1133 vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); 1138 1134 if (!vc->vc_screenbuf) 1139 1135 goto err_free; ··· 1157 1143 visual_deinit(vc); 1158 1144 kfree(vc); 1159 1145 vc_cons[currcons].d = NULL; 1160 - return -ENOMEM; 1146 + return err; 1161 1147 } 1162 1148 1163 1149 static inline int resize_screen(struct vc_data *vc, int width, int height, ··· 1171 1157 1172 1158 return err; 1173 1159 } 1174 - 1175 - /* 1176 - * Change # of rows and columns (0 means unchanged/the size of fg_console) 1177 - * [this is to be used together with some user program 1178 - * like resize that changes the hardware videomode] 1179 - */ 1180 - #define VC_RESIZE_MAXCOL (32767) 1181 - #define VC_RESIZE_MAXROW (32767) 1182 1160 1183 1161 /** 1184 1162 * vc_do_resize - resizing method for the tty ··· 1207 1201 user = vc->vc_resize_user; 1208 1202 vc->vc_resize_user = 0; 1209 1203 1210 - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) 1204 + if (cols > VC_MAXCOL || lines > VC_MAXROW) 1211 1205 return -EINVAL; 1212 1206 1213 1207 new_cols = (cols ? cols : vc->vc_cols); ··· 1218 1212 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) 1219 1213 return 0; 1220 1214 1221 - if (new_screen_size > KMALLOC_MAX_SIZE) 1215 + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) 1222 1216 return -EINVAL; 1223 1217 newscreen = kzalloc(new_screen_size, GFP_USER); 1224 1218 if (!newscreen) ··· 3399 3393 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); 3400 3394 tty_port_init(&vc->port); 3401 3395 visual_init(vc, currcons, 1); 3396 + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ 3402 3397 vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); 3403 3398 vc_init(vc, vc->vc_rows, vc->vc_cols, 3404 3399 currcons || !vc->vc_sw->con_save_screen);
+2 -2
drivers/uio/uio_pdrv_genirq.c
··· 159 159 priv->pdev = pdev; 160 160 161 161 if (!uioinfo->irq) { 162 - ret = platform_get_irq(pdev, 0); 162 + ret = platform_get_irq_optional(pdev, 0); 163 163 uioinfo->irq = ret; 164 - if (ret == -ENXIO && pdev->dev.of_node) 164 + if (ret == -ENXIO) 165 165 uioinfo->irq = UIO_IRQ_NONE; 166 166 else if (ret == -EPROBE_DEFER) 167 167 return ret;
+1 -1
drivers/usb/c67x00/c67x00-sched.c
··· 486 486 c67x00_release_urb(c67x00, urb); 487 487 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); 488 488 spin_unlock(&c67x00->lock); 489 - usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); 489 + usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); 490 490 spin_lock(&c67x00->lock); 491 491 } 492 492
+15 -15
drivers/usb/cdns3/ep0.c
··· 37 37 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 38 38 struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; 39 39 40 - priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr); 41 - priv_ep->trb_pool[0].length = TRB_LEN(length); 40 + priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); 41 + priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length)); 42 42 43 43 if (zlp) { 44 - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL); 45 - priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr); 46 - priv_ep->trb_pool[1].length = TRB_LEN(0); 47 - priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC | 48 - TRB_TYPE(TRB_NORMAL); 44 + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL)); 45 + priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); 46 + priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0)); 47 + priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | 48 + TRB_TYPE(TRB_NORMAL)); 49 49 } else { 50 - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC | 51 - TRB_TYPE(TRB_NORMAL); 50 + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | 51 + TRB_TYPE(TRB_NORMAL)); 52 52 priv_ep->trb_pool[1].control = 0; 53 53 } 54 54 ··· 264 264 case USB_RECIP_INTERFACE: 265 265 return cdns3_ep0_delegate_req(priv_dev, ctrl); 266 266 case USB_RECIP_ENDPOINT: 267 - index = cdns3_ep_addr_to_index(ctrl->wIndex); 267 + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); 268 268 priv_ep = priv_dev->eps[index]; 269 269 270 270 /* check if endpoint is stalled or stall is pending */ 271 - cdns3_select_ep(priv_dev, ctrl->wIndex); 271 + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); 272 272 if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || 273 273 (priv_ep->flags & EP_STALL_PENDING)) 274 274 usb_status = BIT(USB_ENDPOINT_HALT); ··· 381 381 if (!(ctrl->wIndex & ~USB_DIR_IN)) 382 382 return 0; 383 383 384 - index = cdns3_ep_addr_to_index(ctrl->wIndex); 384 + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); 385 385 priv_ep = priv_dev->eps[index]; 386 386 387 - cdns3_select_ep(priv_dev, ctrl->wIndex); 387 + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); 388 388 389 389 if (set) 390 390 __cdns3_gadget_ep_set_halt(priv_ep); ··· 445 445 if (priv_dev->gadget.state < USB_STATE_ADDRESS) 446 446 return -EINVAL; 447 447 448 - if (ctrl_req->wLength != 6) { 448 + if (le16_to_cpu(ctrl_req->wLength) != 6) { 449 449 dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", 450 450 ctrl_req->wLength); 451 451 return -EINVAL; ··· 469 469 if (ctrl_req->wIndex || ctrl_req->wLength) 470 470 return -EINVAL; 471 471 472 - priv_dev->isoch_delay = ctrl_req->wValue; 472 + priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); 473 473 474 474 return 0; 475 475 }
+3 -3
drivers/usb/cdns3/trace.h
··· 404 404 TP_fast_assign( 405 405 __assign_str(name, priv_ep->name); 406 406 __entry->trb = trb; 407 - __entry->buffer = trb->buffer; 408 - __entry->length = trb->length; 409 - __entry->control = trb->control; 407 + __entry->buffer = le32_to_cpu(trb->buffer); 408 + __entry->length = le32_to_cpu(trb->length); 409 + __entry->control = le32_to_cpu(trb->control); 410 410 __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); 411 411 __entry->last_stream_id = priv_ep->last_stream_id; 412 412 ),
+24
drivers/usb/chipidea/core.c
··· 1243 1243 enable_irq(ci->irq); 1244 1244 } 1245 1245 1246 + /* 1247 + * Handle the wakeup interrupt triggered by extcon connector 1248 + * We need to call ci_irq again for extcon since the first 1249 + * interrupt (wakeup int) only let the controller be out of 1250 + * low power mode, but not handle any interrupts. 1251 + */ 1252 + static void ci_extcon_wakeup_int(struct ci_hdrc *ci) 1253 + { 1254 + struct ci_hdrc_cable *cable_id, *cable_vbus; 1255 + u32 otgsc = hw_read_otgsc(ci, ~0); 1256 + 1257 + cable_id = &ci->platdata->id_extcon; 1258 + cable_vbus = &ci->platdata->vbus_extcon; 1259 + 1260 + if (!IS_ERR(cable_id->edev) && ci->is_otg && 1261 + (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) 1262 + ci_irq(ci->irq, ci); 1263 + 1264 + if (!IS_ERR(cable_vbus->edev) && ci->is_otg && 1265 + (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) 1266 + ci_irq(ci->irq, ci); 1267 + } 1268 + 1246 1269 static int ci_controller_resume(struct device *dev) 1247 1270 { 1248 1271 struct ci_hdrc *ci = dev_get_drvdata(dev); ··· 1298 1275 enable_irq(ci->irq); 1299 1276 if (ci_otg_is_fsm_mode(ci)) 1300 1277 ci_otg_fsm_wakeup_by_srp(ci); 1278 + ci_extcon_wakeup_int(ci); 1301 1279 } 1302 1280 1303 1281 return 0;
+2 -1
drivers/usb/dwc2/platform.c
··· 342 342 { 343 343 struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); 344 344 345 - disable_irq(hsotg->irq); 345 + dwc2_disable_global_interrupts(hsotg); 346 + synchronize_irq(hsotg->irq); 346 347 } 347 348 348 349 /**
+8
drivers/usb/dwc3/dwc3-pci.c
··· 38 38 #define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee 39 39 #define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e 40 40 #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee 41 + #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee 42 + #define PCI_DEVICE_ID_INTEL_JSP 0x4dee 41 43 42 44 #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" 43 45 #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 ··· 358 356 (kernel_ulong_t) &dwc3_pci_intel_properties, }, 359 357 360 358 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP), 359 + (kernel_ulong_t) &dwc3_pci_intel_properties, }, 360 + 361 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH), 362 + (kernel_ulong_t) &dwc3_pci_intel_properties, }, 363 + 364 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), 361 365 (kernel_ulong_t) &dwc3_pci_intel_properties, }, 362 366 363 367 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
+2
drivers/usb/gadget/function/f_uac1_legacy.c
··· 336 336 337 337 /* Copy buffer is full, add it to the play_queue */ 338 338 if (audio_buf_size - copy_buf->actual < req->actual) { 339 + spin_lock_irq(&audio->lock); 339 340 list_add_tail(&copy_buf->list, &audio->play_queue); 341 + spin_unlock_irq(&audio->lock); 340 342 schedule_work(&audio->playback_work); 341 343 copy_buf = f_audio_buffer_alloc(audio_buf_size); 342 344 if (IS_ERR(copy_buf))
+2 -8
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 676 676 677 677 if (!ep->ep.desc) { 678 678 spin_unlock_irqrestore(&udc->lock, flags); 679 - /* REVISIT because this driver disables endpoints in 680 - * reset_all_endpoints() before calling disconnect(), 681 - * most gadget drivers would trigger this non-error ... 682 - */ 683 - if (udc->gadget.speed != USB_SPEED_UNKNOWN) 684 - DBG(DBG_ERR, "ep_disable: %s not enabled\n", 685 - ep->ep.name); 679 + DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name); 686 680 return -EINVAL; 687 681 } 688 682 ep->ep.desc = NULL; ··· 865 871 u32 status; 866 872 867 873 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", 868 - ep->ep.name, req); 874 + ep->ep.name, _req); 869 875 870 876 spin_lock_irqsave(&udc->lock, flags); 871 877
+5 -2
drivers/usb/gadget/udc/gr_udc.c
··· 1980 1980 1981 1981 if (num == 0) { 1982 1982 _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); 1983 + if (!_req) 1984 + return -ENOMEM; 1985 + 1983 1986 buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); 1984 - if (!_req || !buf) { 1985 - /* possible _req freed by gr_probe via gr_remove */ 1987 + if (!buf) { 1988 + gr_free_request(&ep->ep, _req); 1986 1989 return -ENOMEM; 1987 1990 } 1988 1991
+1 -1
drivers/usb/gadget/usbstring.c
··· 68 68 69 69 /** 70 70 * usb_validate_langid - validate usb language identifiers 71 - * @lang: usb language identifier 71 + * @langid: usb language identifier 72 72 * 73 73 * Returns true for valid language identifier, otherwise false. 74 74 */
+4
drivers/usb/host/xhci-mtk-sch.c
··· 557 557 if (is_fs_or_ls(speed) && !has_tt) 558 558 return false; 559 559 560 + /* skip endpoint with zero maxpkt */ 561 + if (usb_endpoint_maxp(&ep->desc) == 0) 562 + return false; 563 + 560 564 return true; 561 565 } 562 566
+3
drivers/usb/host/xhci-pci.c
··· 265 265 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 266 266 pdev->device == 0x1142) 267 267 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 268 + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 269 + pdev->device == 0x2142) 270 + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; 268 271 269 272 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 270 273 pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+1 -1
drivers/usb/host/xhci-tegra.c
··· 856 856 if (!tegra->context.ipfs) 857 857 return -ENOMEM; 858 858 859 - tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets, 859 + tegra->context.fpci = devm_kcalloc(tegra->dev, soc->fpci.num_offsets, 860 860 sizeof(u32), GFP_KERNEL); 861 861 if (!tegra->context.fpci) 862 862 return -ENOMEM;
+1
drivers/usb/serial/ch341.c
··· 77 77 78 78 static const struct usb_device_id id_table[] = { 79 79 { USB_DEVICE(0x4348, 0x5523) }, 80 + { USB_DEVICE(0x1a86, 0x7522) }, 80 81 { USB_DEVICE(0x1a86, 0x7523) }, 81 82 { USB_DEVICE(0x1a86, 0x5523) }, 82 83 { },
+2
drivers/usb/serial/cypress_m8.c
··· 59 59 60 60 static const struct usb_device_id id_table_cyphidcomrs232[] = { 61 61 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 62 + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, 62 63 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, 63 64 { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, 64 65 { } /* Terminating entry */ ··· 74 73 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, 75 74 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, 76 75 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 76 + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, 77 77 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, 78 78 { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, 79 79 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
+3
drivers/usb/serial/cypress_m8.h
··· 25 25 #define VENDOR_ID_CYPRESS 0x04b4 26 26 #define PRODUCT_ID_CYPHIDCOM 0x5500 27 27 28 + /* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */ 29 + #define VENDOR_ID_SAI 0x17dd 30 + 28 31 /* FRWD Dongle - a GPS sports watch */ 29 32 #define VENDOR_ID_FRWD 0x6737 30 33 #define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
+5 -3
drivers/usb/serial/iuu_phoenix.c
··· 697 697 struct iuu_private *priv = usb_get_serial_port_data(port); 698 698 unsigned long flags; 699 699 700 - if (count > 256) 701 - return -ENOMEM; 702 - 703 700 spin_lock_irqsave(&priv->lock, flags); 701 + 702 + count = min(count, 256 - priv->writelen); 703 + if (count == 0) 704 + goto out; 704 705 705 706 /* fill the buffer */ 706 707 memcpy(priv->writebuf + priv->writelen, buf, count); 707 708 priv->writelen += count; 709 + out: 708 710 spin_unlock_irqrestore(&priv->lock, flags); 709 711 710 712 return count;
+6
drivers/usb/serial/option.c
··· 245 245 /* These Quectel products use Quectel's vendor ID */ 246 246 #define QUECTEL_PRODUCT_EC21 0x0121 247 247 #define QUECTEL_PRODUCT_EC25 0x0125 248 + #define QUECTEL_PRODUCT_EG95 0x0195 248 249 #define QUECTEL_PRODUCT_BG96 0x0296 249 250 #define QUECTEL_PRODUCT_EP06 0x0306 250 251 #define QUECTEL_PRODUCT_EM12 0x0512 ··· 1097 1096 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), 1098 1097 .driver_info = RSVD(4) }, 1099 1098 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), 1099 + .driver_info = RSVD(4) }, 1100 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), 1100 1101 .driver_info = RSVD(4) }, 1101 1102 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), 1102 1103 .driver_info = RSVD(4) }, ··· 2031 2028 .driver_info = RSVD(4) | RSVD(5) }, 2032 2029 { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ 2033 2030 .driver_info = RSVD(6) }, 2031 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ 2032 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ 2033 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ 2034 2034 { } /* Terminating entry */ 2035 2035 }; 2036 2036 MODULE_DEVICE_TABLE(usb, option_ids);
+5
drivers/vfio/pci/vfio_pci.c
··· 521 521 vfio_pci_vf_token_user_add(vdev, -1); 522 522 vfio_spapr_pci_eeh_release(vdev->pdev); 523 523 vfio_pci_disable(vdev); 524 + mutex_lock(&vdev->igate); 524 525 if (vdev->err_trigger) { 525 526 eventfd_ctx_put(vdev->err_trigger); 526 527 vdev->err_trigger = NULL; 527 528 } 529 + mutex_unlock(&vdev->igate); 530 + 531 + mutex_lock(&vdev->igate); 528 532 if (vdev->req_trigger) { 529 533 eventfd_ctx_put(vdev->req_trigger); 530 534 vdev->req_trigger = NULL; 531 535 } 536 + mutex_unlock(&vdev->igate); 532 537 } 533 538 534 539 mutex_unlock(&vdev->reflck->lock);
+2 -2
drivers/video/fbdev/core/bitblit.c
··· 216 216 region.color = color; 217 217 region.rop = ROP_COPY; 218 218 219 - if (rw && !bottom_only) { 219 + if ((int) rw > 0 && !bottom_only) { 220 220 region.dx = info->var.xoffset + rs; 221 221 region.dy = 0; 222 222 region.width = rw; ··· 224 224 info->fbops->fb_fillrect(info, &region); 225 225 } 226 226 227 - if (bh) { 227 + if ((int) bh > 0) { 228 228 region.dx = info->var.xoffset; 229 229 region.dy = info->var.yoffset + bs; 230 230 region.width = rs;
+2 -2
drivers/video/fbdev/core/fbcon_ccw.c
··· 201 201 region.color = color; 202 202 region.rop = ROP_COPY; 203 203 204 - if (rw && !bottom_only) { 204 + if ((int) rw > 0 && !bottom_only) { 205 205 region.dx = 0; 206 206 region.dy = info->var.yoffset; 207 207 region.height = rw; ··· 209 209 info->fbops->fb_fillrect(info, &region); 210 210 } 211 211 212 - if (bh) { 212 + if ((int) bh > 0) { 213 213 region.dx = info->var.xoffset + bs; 214 214 region.dy = 0; 215 215 region.height = info->var.yres_virtual;
+2 -2
drivers/video/fbdev/core/fbcon_cw.c
··· 184 184 region.color = color; 185 185 region.rop = ROP_COPY; 186 186 187 - if (rw && !bottom_only) { 187 + if ((int) rw > 0 && !bottom_only) { 188 188 region.dx = 0; 189 189 region.dy = info->var.yoffset + rs; 190 190 region.height = rw; ··· 192 192 info->fbops->fb_fillrect(info, &region); 193 193 } 194 194 195 - if (bh) { 195 + if ((int) bh > 0) { 196 196 region.dx = info->var.xoffset; 197 197 region.dy = info->var.yoffset; 198 198 region.height = info->var.yres;
+2 -2
drivers/video/fbdev/core/fbcon_ud.c
··· 231 231 region.color = color; 232 232 region.rop = ROP_COPY; 233 233 234 - if (rw && !bottom_only) { 234 + if ((int) rw > 0 && !bottom_only) { 235 235 region.dy = 0; 236 236 region.dx = info->var.xoffset; 237 237 region.width = rw; ··· 239 239 info->fbops->fb_fillrect(info, &region); 240 240 } 241 241 242 - if (bh) { 242 + if ((int) bh > 0) { 243 243 region.dy = info->var.yoffset; 244 244 region.dx = info->var.xoffset; 245 245 region.height = bh;
+4 -2
drivers/virt/vboxguest/vboxguest_core.c
··· 1444 1444 or_mask = caps->u.in.or_mask; 1445 1445 not_mask = caps->u.in.not_mask; 1446 1446 1447 - if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) 1447 + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) 1448 1448 return -EINVAL; 1449 1449 1450 1450 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, ··· 1520 1520 1521 1521 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ 1522 1522 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || 1523 - req == VBG_IOCTL_VMMDEV_REQUEST_BIG) 1523 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || 1524 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT) 1524 1525 return vbg_ioctl_vmmrequest(gdev, session, data); 1525 1526 1526 1527 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) ··· 1559 1558 case VBG_IOCTL_HGCM_CALL(0): 1560 1559 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); 1561 1560 case VBG_IOCTL_LOG(0): 1561 + case VBG_IOCTL_LOG_ALT(0): 1562 1562 return vbg_ioctl_log(data); 1563 1563 } 1564 1564
+15
drivers/virt/vboxguest/vboxguest_core.h
··· 15 15 #include <linux/vboxguest.h> 16 16 #include "vmmdev.h" 17 17 18 + /* 19 + * The mainline kernel version (this version) of the vboxguest module 20 + * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and 21 + * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead 22 + * of _IO(V, ...) as the out of tree VirtualBox upstream version does. 23 + * 24 + * These _ALT definitions keep compatibility with the wrong defines the 25 + * mainline kernel version used for a while. 26 + * Note the VirtualBox userspace bits have always been built against 27 + * VirtualBox upstream's headers, so this is likely not necessary. But 28 + * we must never break our ABI so we keep these around to be 100% sure. 29 + */ 30 + #define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) 31 + #define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) 32 + 18 33 struct vbg_session; 19 34 20 35 /** VBox guest memory balloon. */
+2 -1
drivers/virt/vboxguest/vboxguest_linux.c
··· 131 131 * the need for a bounce-buffer and another copy later on. 132 132 */ 133 133 is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || 134 - req == VBG_IOCTL_VMMDEV_REQUEST_BIG; 134 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || 135 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT; 135 136 136 137 if (is_vmmdev_req) 137 138 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+2
drivers/virt/vboxguest/vmmdev.h
··· 206 206 * not. 207 207 */ 208 208 #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) 209 + /* The mask of valid capabilities, for sanity checking. */ 210 + #define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U 209 211 210 212 /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ 211 213 struct vmmdev_hypervisorinfo {
+2 -2
drivers/virtio/virtio_mmio.c
··· 641 641 &vm_cmdline_id, &consumed); 642 642 643 643 /* 644 - * sscanf() must processes at least 2 chunks; also there 644 + * sscanf() must process at least 2 chunks; also there 645 645 * must be no extra characters after the last chunk, so 646 646 * str[consumed] must be '\0' 647 647 */ 648 - if (processed < 2 || str[consumed]) 648 + if (processed < 2 || str[consumed] || irq == 0) 649 649 return -EINVAL; 650 650 651 651 resources[0].flags = IORESOURCE_MEM;
+82 -89
drivers/xen/xenbus/xenbus_client.c
··· 69 69 unsigned int nr_handles; 70 70 }; 71 71 72 + struct map_ring_valloc { 73 + struct xenbus_map_node *node; 74 + 75 + /* Why do we need two arrays? See comment of __xenbus_map_ring */ 76 + union { 77 + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 78 + pte_t *ptes[XENBUS_MAX_RING_GRANTS]; 79 + }; 80 + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 81 + 82 + struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; 83 + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 84 + 85 + unsigned int idx; /* HVM only. */ 86 + }; 87 + 72 88 static DEFINE_SPINLOCK(xenbus_valloc_lock); 73 89 static LIST_HEAD(xenbus_valloc_pages); 74 90 75 91 struct xenbus_ring_ops { 76 - int (*map)(struct xenbus_device *dev, 92 + int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info, 77 93 grant_ref_t *gnt_refs, unsigned int nr_grefs, 78 94 void **vaddr); 79 95 int (*unmap)(struct xenbus_device *dev, void *vaddr); ··· 456 440 * Map @nr_grefs pages of memory into this domain from another 457 441 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs 458 442 * pages of virtual address space, maps the pages to that address, and 459 - * sets *vaddr to that address. Returns 0 on success, and GNTST_* 460 - * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on 443 + * sets *vaddr to that address. Returns 0 on success, and -errno on 461 444 * error. If an error is returned, device will switch to 462 445 * XenbusStateClosing and the error message will be saved in XenStore. 463 446 */ ··· 464 449 unsigned int nr_grefs, void **vaddr) 465 450 { 466 451 int err; 452 + struct map_ring_valloc *info; 467 453 468 - err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); 469 - /* Some hypervisors are buggy and can return 1. */ 470 - if (err > 0) 471 - err = GNTST_general_error; 454 + *vaddr = NULL; 472 455 456 + if (nr_grefs > XENBUS_MAX_RING_GRANTS) 457 + return -EINVAL; 458 + 459 + info = kzalloc(sizeof(*info), GFP_KERNEL); 460 + if (!info) 461 + return -ENOMEM; 462 + 463 + info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); 464 + if (!info->node) 465 + err = -ENOMEM; 466 + else 467 + err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); 468 + 469 + kfree(info->node); 470 + kfree(info); 473 471 return err; 474 472 } 475 473 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); ··· 494 466 grant_ref_t *gnt_refs, 495 467 unsigned int nr_grefs, 496 468 grant_handle_t *handles, 497 - phys_addr_t *addrs, 469 + struct map_ring_valloc *info, 498 470 unsigned int flags, 499 471 bool *leaked) 500 472 { 501 - struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; 502 - struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 503 473 int i, j; 504 - int err = GNTST_okay; 505 474 506 475 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 507 476 return -EINVAL; 508 477 509 478 for (i = 0; i < nr_grefs; i++) { 510 - memset(&map[i], 0, sizeof(map[i])); 511 - gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], 512 - dev->otherend_id); 479 + gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, 480 + gnt_refs[i], dev->otherend_id); 513 481 handles[i] = INVALID_GRANT_HANDLE; 514 482 } 515 483 516 - gnttab_batch_map(map, i); 484 + gnttab_batch_map(info->map, i); 517 485 518 486 for (i = 0; i < nr_grefs; i++) { 519 - if (map[i].status != GNTST_okay) { 520 - err = map[i].status; 521 - xenbus_dev_fatal(dev, map[i].status, 487 + if (info->map[i].status != GNTST_okay) { 488 + xenbus_dev_fatal(dev, info->map[i].status, 522 489 "mapping in shared page %d from domain %d", 523 490 gnt_refs[i], dev->otherend_id); 524 491 goto fail; 525 492 } else 526 - handles[i] = map[i].handle; 493 + handles[i] = info->map[i].handle; 527 494 } 528 495 529 - return GNTST_okay; 496 + return 0; 530 497 531 498 fail: 532 499 for (i = j = 0; i < nr_grefs; i++) { 533 500 if (handles[i] != INVALID_GRANT_HANDLE) { 534 - memset(&unmap[j], 0, sizeof(unmap[j])); 535 - gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], 501 + gnttab_set_unmap_op(&info->unmap[j], 502 + info->phys_addrs[i], 536 503 GNTMAP_host_map, handles[i]); 537 504 j++; 538 505 } 539 506 } 540 507 541 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) 508 + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)) 542 509 BUG(); 543 510 544 511 *leaked = false; 545 512 for (i = 0; i < j; i++) { 546 - if (unmap[i].status != GNTST_okay) { 513 + if (info->unmap[i].status != GNTST_okay) { 547 514 *leaked = true; 548 515 break; 549 516 } 550 517 } 551 518 552 - return err; 519 + return -ENOENT; 553 520 } 554 521 555 522 /** ··· 589 566 return err; 590 567 } 591 568 592 - struct map_ring_valloc_hvm 593 - { 594 - unsigned int idx; 595 - 596 - /* Why do we need two arrays? See comment of __xenbus_map_ring */ 597 - phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 598 - unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 599 - }; 600 - 601 569 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, 602 570 unsigned int goffset, 603 571 unsigned int len, 604 572 void *data) 605 573 { 606 - struct map_ring_valloc_hvm *info = data; 574 + struct map_ring_valloc *info = data; 607 575 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); 608 576 609 577 info->phys_addrs[info->idx] = vaddr; ··· 603 589 info->idx++; 604 590 } 605 591 606 - static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, 607 - grant_ref_t *gnt_ref, 608 - unsigned int nr_grefs, 609 - void **vaddr) 592 + static int xenbus_map_ring_hvm(struct xenbus_device *dev, 593 + struct map_ring_valloc *info, 594 + grant_ref_t *gnt_ref, 595 + unsigned int nr_grefs, 596 + void **vaddr) 610 597 { 611 - struct xenbus_map_node *node; 598 + struct xenbus_map_node *node = info->node; 612 599 int err; 613 600 void *addr; 614 601 bool leaked = false; 615 - struct map_ring_valloc_hvm info = { 616 - .idx = 0, 617 - }; 618 602 unsigned int nr_pages = XENBUS_PAGES(nr_grefs); 619 - 620 - if (nr_grefs > XENBUS_MAX_RING_GRANTS) 621 - return -EINVAL; 622 - 623 - *vaddr = NULL; 624 - 625 - node = kzalloc(sizeof(*node), GFP_KERNEL); 626 - if (!node) 627 - return -ENOMEM; 628 603 629 604 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); 630 605 if (err) ··· 621 618 622 619 gnttab_foreach_grant(node->hvm.pages, nr_grefs, 623 620 xenbus_map_ring_setup_grant_hvm, 624 - &info); 621 + info); 625 622 626 623 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, 627 - info.phys_addrs, GNTMAP_host_map, &leaked); 624 + info, GNTMAP_host_map, &leaked); 628 625 node->nr_handles = nr_grefs; 629 626 630 627 if (err) ··· 644 641 spin_unlock(&xenbus_valloc_lock); 645 642 646 643 *vaddr = addr; 644 + info->node = NULL; 645 + 647 646 return 0; 648 647 649 648 out_xenbus_unmap_ring: 650 649 if (!leaked) 651 - xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs); 650 + xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); 652 651 else 653 652 pr_alert("leaking %p size %u page(s)", 654 653 addr, nr_pages); ··· 658 653 if (!leaked) 659 654 free_xenballooned_pages(nr_pages, node->hvm.pages); 660 655 out_err: 661 - kfree(node); 662 656 return err; 663 657 } 664 658 ··· 680 676 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); 681 677 682 678 #ifdef CONFIG_XEN_PV 683 - static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, 684 - grant_ref_t *gnt_refs, 685 - unsigned int nr_grefs, 686 - void **vaddr) 679 + static int xenbus_map_ring_pv(struct xenbus_device *dev, 680 + struct map_ring_valloc *info, 681 + grant_ref_t *gnt_refs, 682 + unsigned int nr_grefs, 683 + void **vaddr) 687 684 { 688 - struct xenbus_map_node *node; 685 + struct xenbus_map_node *node = info->node; 689 686 struct vm_struct *area; 690 - pte_t *ptes[XENBUS_MAX_RING_GRANTS]; 691 - phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 692 687 int err = GNTST_okay; 693 688 int i; 694 689 bool leaked; 695 690 696 - *vaddr = NULL; 697 - 698 - if (nr_grefs > XENBUS_MAX_RING_GRANTS) 699 - return -EINVAL; 700 - 701 - node = kzalloc(sizeof(*node), GFP_KERNEL); 702 - if (!node) 691 + area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes); 692 + if (!area) 703 693 return -ENOMEM; 704 - 705 - area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); 706 - if (!area) { 707 - kfree(node); 708 - return -ENOMEM; 709 - } 710 694 711 695 for (i = 0; i < nr_grefs; i++) 712 - phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; 696 + info->phys_addrs[i] = 697 + arbitrary_virt_to_machine(info->ptes[i]).maddr; 713 698 714 699 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, 715 - phys_addrs, 716 - GNTMAP_host_map | GNTMAP_contains_pte, 700 + info, GNTMAP_host_map | GNTMAP_contains_pte, 717 701 &leaked); 718 702 if (err) 719 703 goto failed; ··· 714 722 spin_unlock(&xenbus_valloc_lock); 715 723 716 724 *vaddr = area->addr; 725 + info->node = NULL; 726 + 717 727 return 0; 718 728 719 729 failed: ··· 724 730 else 725 731 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); 726 732 727 - kfree(node); 728 733 return err; 729 734 } 730 735 731 - static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) 736 + static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr) 732 737 { 733 738 struct xenbus_map_node *node; 734 739 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; ··· 791 798 } 792 799 793 800 static const struct xenbus_ring_ops ring_ops_pv = { 794 - .map = xenbus_map_ring_valloc_pv, 795 - .unmap = xenbus_unmap_ring_vfree_pv, 801 + .map = xenbus_map_ring_pv, 802 + .unmap = xenbus_unmap_ring_pv, 796 803 }; 797 804 #endif 798 805 799 - struct unmap_ring_vfree_hvm 806 + struct unmap_ring_hvm 800 807 { 801 808 unsigned int idx; 802 809 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; ··· 807 814 unsigned int len, 808 815 void *data) 809 816 { 810 - struct unmap_ring_vfree_hvm *info = data; 817 + struct unmap_ring_hvm *info = data; 811 818 812 819 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); 813 820 814 821 info->idx++; 815 822 } 816 823 817 - static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) 824 + static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) 818 825 { 819 826 int rv; 820 827 struct xenbus_map_node *node; 821 828 void *addr; 822 - struct unmap_ring_vfree_hvm info = { 829 + struct unmap_ring_hvm info = { 823 830 .idx = 0, 824 831 }; 825 832 unsigned int nr_pages; ··· 880 887 EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 881 888 882 889 static const struct xenbus_ring_ops ring_ops_hvm = { 883 - .map = xenbus_map_ring_valloc_hvm, 884 - .unmap = xenbus_unmap_ring_vfree_hvm, 890 + .map = xenbus_map_ring_hvm, 891 + .unmap = xenbus_unmap_ring_hvm, 885 892 }; 886 893 887 894 void __init xenbus_ring_ops_init(void)
+2 -2
fs/afs/fs_operation.c
··· 71 71 swap(vnode, vnode2); 72 72 73 73 if (mutex_lock_interruptible(&vnode->io_lock) < 0) { 74 - op->error = -EINTR; 74 + op->error = -ERESTARTSYS; 75 75 op->flags |= AFS_OPERATION_STOP; 76 76 _leave(" = f [I 0]"); 77 77 return false; ··· 80 80 81 81 if (vnode2) { 82 82 if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) { 83 - op->error = -EINTR; 83 + op->error = -ERESTARTSYS; 84 84 op->flags |= AFS_OPERATION_STOP; 85 85 mutex_unlock(&vnode->io_lock); 86 86 op->flags &= ~AFS_OPERATION_LOCK_0;
+1
fs/afs/write.c
··· 449 449 op->store.first_offset = offset; 450 450 op->store.last_to = to; 451 451 op->mtime = vnode->vfs_inode.i_mtime; 452 + op->flags |= AFS_OPERATION_UNINTR; 452 453 op->ops = &afs_store_data_operation; 453 454 454 455 try_next_key:
+1 -1
fs/autofs/waitq.c
··· 53 53 54 54 mutex_lock(&sbi->pipe_mutex); 55 55 while (bytes) { 56 - wr = __kernel_write(file, data, bytes, &file->f_pos); 56 + wr = kernel_write(file, data, bytes, &file->f_pos); 57 57 if (wr <= 0) 58 58 break; 59 59 data += wr;
+1
fs/btrfs/backref.c
··· 1461 1461 if (ret < 0 && ret != -ENOENT) { 1462 1462 ulist_free(tmp); 1463 1463 ulist_free(*roots); 1464 + *roots = NULL; 1464 1465 return ret; 1465 1466 } 1466 1467 node = ulist_next(tmp, &uiter);
+1 -1
fs/btrfs/ctree.c
··· 1196 1196 switch (tm->op) { 1197 1197 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1198 1198 BUG_ON(tm->slot < n); 1199 - /* Fallthrough */ 1199 + fallthrough; 1200 1200 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1201 1201 case MOD_LOG_KEY_REMOVE: 1202 1202 btrfs_set_node_key(eb, &tm->key, tm->slot);
+1
fs/btrfs/discard.c
··· 619 619 list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs, 620 620 bg_list) { 621 621 list_del_init(&block_group->bg_list); 622 + btrfs_put_block_group(block_group); 622 623 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); 623 624 } 624 625 spin_unlock(&fs_info->unused_bgs_lock);
+4 -2
fs/btrfs/disk-io.c
··· 2593 2593 !extent_buffer_uptodate(tree_root->node)) { 2594 2594 handle_error = true; 2595 2595 2596 - if (IS_ERR(tree_root->node)) 2596 + if (IS_ERR(tree_root->node)) { 2597 2597 ret = PTR_ERR(tree_root->node); 2598 - else if (!extent_buffer_uptodate(tree_root->node)) 2598 + tree_root->node = NULL; 2599 + } else if (!extent_buffer_uptodate(tree_root->node)) { 2599 2600 ret = -EUCLEAN; 2601 + } 2600 2602 2601 2603 btrfs_warn(fs_info, "failed to read tree root"); 2602 2604 continue;
+26 -17
fs/btrfs/extent_io.c
··· 1999 1999 if (!PageDirty(pages[i]) || 2000 2000 pages[i]->mapping != mapping) { 2001 2001 unlock_page(pages[i]); 2002 - put_page(pages[i]); 2002 + for (; i < ret; i++) 2003 + put_page(pages[i]); 2003 2004 err = -EAGAIN; 2004 2005 goto out; 2005 2006 } ··· 5059 5058 static void check_buffer_tree_ref(struct extent_buffer *eb) 5060 5059 { 5061 5060 int refs; 5062 - /* the ref bit is tricky. We have to make sure it is set 5063 - * if we have the buffer dirty. Otherwise the 5064 - * code to free a buffer can end up dropping a dirty 5065 - * page 5061 + /* 5062 + * The TREE_REF bit is first set when the extent_buffer is added 5063 + * to the radix tree. It is also reset, if unset, when a new reference 5064 + * is created by find_extent_buffer. 5066 5065 * 5067 - * Once the ref bit is set, it won't go away while the 5068 - * buffer is dirty or in writeback, and it also won't 5069 - * go away while we have the reference count on the 5070 - * eb bumped. 5066 + * It is only cleared in two cases: freeing the last non-tree 5067 + * reference to the extent_buffer when its STALE bit is set or 5068 + * calling releasepage when the tree reference is the only reference. 5071 5069 * 5072 - * We can't just set the ref bit without bumping the 5073 - * ref on the eb because free_extent_buffer might 5074 - * see the ref bit and try to clear it. If this happens 5075 - * free_extent_buffer might end up dropping our original 5076 - * ref by mistake and freeing the page before we are able 5077 - * to add one more ref. 5070 + * In both cases, care is taken to ensure that the extent_buffer's 5071 + * pages are not under io. However, releasepage can be concurrently 5072 + * called with creating new references, which is prone to race 5073 + * conditions between the calls to check_buffer_tree_ref in those 5074 + * codepaths and clearing TREE_REF in try_release_extent_buffer. 5078 5075 * 5079 - * So bump the ref count first, then set the bit. If someone 5080 - * beat us to it, drop the ref we added. 5076 + * The actual lifetime of the extent_buffer in the radix tree is 5077 + * adequately protected by the refcount, but the TREE_REF bit and 5078 + * its corresponding reference are not. To protect against this 5079 + * class of races, we call check_buffer_tree_ref from the codepaths 5080 + * which trigger io after they set eb->io_pages. Note that once io is 5081 + * initiated, TREE_REF can no longer be cleared, so that is the 5082 + * moment at which any such race is best fixed. 5081 5083 */ 5082 5084 refs = atomic_read(&eb->refs); 5083 5085 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) ··· 5531 5527 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 5532 5528 eb->read_mirror = 0; 5533 5529 atomic_set(&eb->io_pages, num_reads); 5530 + /* 5531 + * It is possible for releasepage to clear the TREE_REF bit before we 5532 + * set io_pages. See check_buffer_tree_ref for a more detailed comment. 5533 + */ 5534 + check_buffer_tree_ref(eb); 5534 5535 for (i = 0; i < num_pages; i++) { 5535 5536 page = eb->pages[i]; 5536 5537
+1
fs/btrfs/file.c
··· 3509 3509 .read_iter = generic_file_read_iter, 3510 3510 .splice_read = generic_file_splice_read, 3511 3511 .write_iter = btrfs_file_write_iter, 3512 + .splice_write = iter_file_splice_write, 3512 3513 .mmap = btrfs_file_mmap, 3513 3514 .open = btrfs_file_open, 3514 3515 .release = btrfs_release_file,
+11 -21
fs/btrfs/inode.c
··· 1690 1690 ret = fallback_to_cow(inode, locked_page, cow_start, 1691 1691 found_key.offset - 1, 1692 1692 page_started, nr_written); 1693 - if (ret) { 1694 - if (nocow) 1695 - btrfs_dec_nocow_writers(fs_info, 1696 - disk_bytenr); 1693 + if (ret) 1697 1694 goto error; 1698 - } 1699 1695 cow_start = (u64)-1; 1700 1696 } 1701 1697 ··· 1707 1711 ram_bytes, BTRFS_COMPRESS_NONE, 1708 1712 BTRFS_ORDERED_PREALLOC); 1709 1713 if (IS_ERR(em)) { 1710 - if (nocow) 1711 - btrfs_dec_nocow_writers(fs_info, 1712 - disk_bytenr); 1713 1714 ret = PTR_ERR(em); 1714 1715 goto error; 1715 1716 } ··· 8123 8130 /* 8124 8131 * Qgroup reserved space handler 8125 8132 * Page here will be either 8126 - * 1) Already written to disk 8127 - * In this case, its reserved space is released from data rsv map 8128 - * and will be freed by delayed_ref handler finally. 8129 - * So even we call qgroup_free_data(), it won't decrease reserved 8130 - * space. 8131 - * 2) Not written to disk 8132 - * This means the reserved space should be freed here. However, 8133 - * if a truncate invalidates the page (by clearing PageDirty) 8134 - * and the page is accounted for while allocating extent 8135 - * in btrfs_check_data_free_space() we let delayed_ref to 8136 - * free the entire extent. 8133 + * 1) Already written to disk or ordered extent already submitted 8134 + * Then its QGROUP_RESERVED bit in io_tree is already cleaned. 8135 + * Qgroup will be handled by its qgroup_record then. 8136 + * btrfs_qgroup_free_data() call will do nothing here. 8137 + * 8138 + * 2) Not written to disk yet 8139 + * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED 8140 + * bit of its io_tree, and free the qgroup reserved data space. 8141 + * Since the IO will never happen for this page. 8137 8142 */ 8138 - if (PageDirty(page)) 8139 - btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); 8143 + btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); 8140 8144 if (!inode_evicting) { 8141 8145 clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | 8142 8146 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+1 -1
fs/btrfs/ref-verify.c
··· 509 509 switch (key.type) { 510 510 case BTRFS_EXTENT_ITEM_KEY: 511 511 *num_bytes = key.offset; 512 - /* fall through */ 512 + fallthrough; 513 513 case BTRFS_METADATA_ITEM_KEY: 514 514 *bytenr = key.objectid; 515 515 ret = process_extent_item(fs_info, path, &key, i,
+1 -1
fs/btrfs/space-info.c
··· 879 879 return false; 880 880 } 881 881 global_rsv->reserved -= ticket->bytes; 882 + remove_ticket(space_info, ticket); 882 883 ticket->bytes = 0; 883 - list_del_init(&ticket->list); 884 884 wake_up(&ticket->wait); 885 885 space_info->tickets_id++; 886 886 if (global_rsv->reserved < global_rsv->size)
+3 -3
fs/btrfs/super.c
··· 523 523 case Opt_compress_force: 524 524 case Opt_compress_force_type: 525 525 compress_force = true; 526 - /* Fallthrough */ 526 + fallthrough; 527 527 case Opt_compress: 528 528 case Opt_compress_type: 529 529 saved_compress_type = btrfs_test_opt(info, ··· 622 622 btrfs_set_opt(info->mount_opt, NOSSD); 623 623 btrfs_clear_and_info(info, SSD, 624 624 "not using ssd optimizations"); 625 - /* Fallthrough */ 625 + fallthrough; 626 626 case Opt_nossd_spread: 627 627 btrfs_clear_and_info(info, SSD_SPREAD, 628 628 "not using spread ssd allocation scheme"); ··· 793 793 case Opt_recovery: 794 794 btrfs_warn(info, 795 795 "'recovery' is deprecated, use 'usebackuproot' instead"); 796 - /* fall through */ 796 + fallthrough; 797 797 case Opt_usebackuproot: 798 798 btrfs_info(info, 799 799 "trying to use backup root at mount time");
+8
fs/btrfs/volumes.c
··· 7052 7052 mutex_lock(&fs_info->chunk_mutex); 7053 7053 7054 7054 /* 7055 + * It is possible for mount and umount to race in such a way that 7056 + * we execute this code path, but open_fs_devices failed to clear 7057 + * total_rw_bytes. We certainly want it cleared before reading the 7058 + * device items, so clear it here. 7059 + */ 7060 + fs_info->fs_devices->total_rw_bytes = 0; 7061 + 7062 + /* 7055 7063 * Read all device items, and then all the chunk items. All 7056 7064 * device items are found before any chunk item (their object id 7057 7065 * is smaller than the lowest possible object id for a chunk
+1 -1
fs/btrfs/volumes.h
··· 408 408 return BTRFS_MAP_WRITE; 409 409 default: 410 410 WARN_ON_ONCE(1); 411 - /* fall through */ 411 + fallthrough; 412 412 case REQ_OP_READ: 413 413 return BTRFS_MAP_READ; 414 414 }
+1 -1
fs/cachefiles/rdwr.c
··· 937 937 } 938 938 939 939 data = kmap(page); 940 - ret = __kernel_write(file, data, len, &pos); 940 + ret = kernel_write(file, data, len, &pos); 941 941 kunmap(page); 942 942 fput(file); 943 943 if (ret != len)
+5 -1
fs/cifs/cifs_debug.c
··· 399 399 if (ses->sign) 400 400 seq_puts(m, " signed"); 401 401 402 + seq_printf(m, "\n\tUser: %d Cred User: %d", 403 + from_kuid(&init_user_ns, ses->linux_uid), 404 + from_kuid(&init_user_ns, ses->cred_uid)); 405 + 402 406 if (ses->chan_count > 1) { 403 407 seq_printf(m, "\n\n\tExtra Channels: %zu\n", 404 408 ses->chan_count-1); ··· 410 406 cifs_dump_channel(m, j, &ses->chans[j]); 411 407 } 412 408 413 - seq_puts(m, "\n\tShares:"); 409 + seq_puts(m, "\n\n\tShares:"); 414 410 j = 0; 415 411 416 412 seq_printf(m, "\n\t%d) IPC: ", j);
+1 -1
fs/cifs/cifsfs.h
··· 156 156 extern const struct export_operations cifs_export_ops; 157 157 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 158 158 159 - #define CIFS_VERSION "2.27" 159 + #define CIFS_VERSION "2.28" 160 160 #endif /* _CIFSFS_H */
+6 -4
fs/cifs/connect.c
··· 5306 5306 vol_info->nocase = master_tcon->nocase; 5307 5307 vol_info->nohandlecache = master_tcon->nohandlecache; 5308 5308 vol_info->local_lease = master_tcon->local_lease; 5309 + vol_info->no_lease = master_tcon->no_lease; 5310 + vol_info->resilient = master_tcon->use_resilient; 5311 + vol_info->persistent = master_tcon->use_persistent; 5312 + vol_info->handle_timeout = master_tcon->handle_timeout; 5309 5313 vol_info->no_linux_ext = !master_tcon->unix_ext; 5314 + vol_info->linux_ext = master_tcon->posix_extensions; 5310 5315 vol_info->sectype = master_tcon->ses->sectype; 5311 5316 vol_info->sign = master_tcon->ses->sign; 5317 + vol_info->seal = master_tcon->seal; 5312 5318 5313 5319 rc = cifs_set_vol_auth(vol_info, master_tcon->ses); 5314 5320 if (rc) { ··· 5339 5333 cifs_put_smb_ses(ses); 5340 5334 goto out; 5341 5335 } 5342 - 5343 - /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ 5344 - if (tcon->posix_extensions) 5345 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; 5346 5336 5347 5337 if (cap_unix(ses)) 5348 5338 reset_cifs_unix_caps(0, tcon, NULL, vol_info);
+6 -13
fs/cifs/file.c
··· 1149 1149 1150 1150 /* 1151 1151 * Set the byte-range lock (posix style). Returns: 1152 - * 1) 0, if we set the lock and don't need to request to the server; 1153 - * 2) 1, if we need to request to the server; 1154 - * 3) <0, if the error occurs while setting the lock. 1152 + * 1) <0, if the error occurs while setting the lock; 1153 + * 2) 0, if we set the lock and don't need to request to the server; 1154 + * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock; 1155 + * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server. 1155 1156 */ 1156 1157 static int 1157 1158 cifs_posix_lock_set(struct file *file, struct file_lock *flock) 1158 1159 { 1159 1160 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); 1160 - int rc = 1; 1161 + int rc = FILE_LOCK_DEFERRED + 1; 1161 1162 1162 1163 if ((flock->fl_flags & FL_POSIX) == 0) 1163 1164 return rc; 1164 1165 1165 - try_again: 1166 1166 cifs_down_write(&cinode->lock_sem); 1167 1167 if (!cinode->can_cache_brlcks) { 1168 1168 up_write(&cinode->lock_sem); ··· 1171 1171 1172 1172 rc = posix_lock_file(file, flock, NULL); 1173 1173 up_write(&cinode->lock_sem); 1174 - if (rc == FILE_LOCK_DEFERRED) { 1175 - rc = wait_event_interruptible(flock->fl_wait, 1176 - list_empty(&flock->fl_blocked_member)); 1177 - if (!rc) 1178 - goto try_again; 1179 - locks_delete_block(flock); 1180 - } 1181 1174 return rc; 1182 1175 } 1183 1176 ··· 1645 1652 int posix_lock_type; 1646 1653 1647 1654 rc = cifs_posix_lock_set(file, flock); 1648 - if (!rc || rc < 0) 1655 + if (rc <= FILE_LOCK_DEFERRED) 1649 1656 return rc; 1650 1657 1651 1658 if (type & server->vals->shared_lock_type)
+8 -1
fs/cifs/ioctl.c
··· 169 169 unsigned int xid; 170 170 struct cifsFileInfo *pSMBFile = filep->private_data; 171 171 struct cifs_tcon *tcon; 172 + struct tcon_link *tlink; 172 173 struct cifs_sb_info *cifs_sb; 173 174 __u64 ExtAttrBits = 0; 174 175 __u64 caps; ··· 308 307 break; 309 308 } 310 309 cifs_sb = CIFS_SB(inode->i_sb); 311 - tcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); 310 + tlink = cifs_sb_tlink(cifs_sb); 311 + if (IS_ERR(tlink)) { 312 + rc = PTR_ERR(tlink); 313 + break; 314 + } 315 + tcon = tlink_tcon(tlink); 312 316 if (tcon && tcon->ses->server->ops->notify) { 313 317 rc = tcon->ses->server->ops->notify(xid, 314 318 filep, (void __user *)arg); 315 319 cifs_dbg(FYI, "ioctl notify rc %d\n", rc); 316 320 } else 317 321 rc = -EOPNOTSUPP; 322 + cifs_put_tlink(tlink); 318 323 break; 319 324 default: 320 325 cifs_dbg(FYI, "unsupported ioctl\n");
+6 -2
fs/cifs/smb2misc.c
··· 354 354 ((struct smb2_ioctl_rsp *)shdr)->OutputCount); 355 355 break; 356 356 case SMB2_CHANGE_NOTIFY: 357 + *off = le16_to_cpu( 358 + ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset); 359 + *len = le32_to_cpu( 360 + ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength); 361 + break; 357 362 default: 358 - /* BB FIXME for unimplemented cases above */ 359 - cifs_dbg(VFS, "no length check for command\n"); 363 + cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command)); 360 364 break; 361 365 } 362 366
+1 -1
fs/cifs/smb2ops.c
··· 2148 2148 2149 2149 tcon = cifs_sb_master_tcon(cifs_sb); 2150 2150 oparms.tcon = tcon; 2151 - oparms.desired_access = FILE_READ_ATTRIBUTES; 2151 + oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; 2152 2152 oparms.disposition = FILE_OPEN; 2153 2153 oparms.create_options = cifs_create_options(cifs_sb, 0); 2154 2154 oparms.fid = &fid;
+1 -1
fs/cifs/transport.c
··· 523 523 const int timeout, const int flags, 524 524 unsigned int *instance) 525 525 { 526 - int rc; 526 + long rc; 527 527 int *credits; 528 528 int optype; 529 529 long int t;
+3 -3
fs/efivarfs/super.c
··· 201 201 sb->s_d_op = &efivarfs_d_ops; 202 202 sb->s_time_gran = 1; 203 203 204 + if (!efivar_supports_writes()) 205 + sb->s_flags |= SB_RDONLY; 206 + 204 207 inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); 205 208 if (!inode) 206 209 return -ENOMEM; ··· 255 252 256 253 static __init int efivarfs_init(void) 257 254 { 258 - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) 259 - return -ENODEV; 260 - 261 255 if (!efivars_kobject()) 262 256 return -ENODEV; 263 257
+8 -6
fs/exfat/dir.c
··· 309 309 .llseek = generic_file_llseek, 310 310 .read = generic_read_dir, 311 311 .iterate = exfat_iterate, 312 - .fsync = generic_file_fsync, 312 + .fsync = exfat_file_fsync, 313 313 }; 314 314 315 315 int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu) ··· 425 425 ep->dentry.name.flags = 0x0; 426 426 427 427 for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) { 428 - ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname); 429 - if (*uniname == 0x0) 430 - break; 431 - uniname++; 428 + if (*uniname != 0x0) { 429 + ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname); 430 + uniname++; 431 + } else { 432 + ep->dentry.name.unicode_0_14[i] = 0x0; 433 + } 432 434 } 433 435 } 434 436 ··· 1112 1110 ret = exfat_get_next_cluster(sb, &clu.dir); 1113 1111 } 1114 1112 1115 - if (ret || clu.dir != EXFAT_EOF_CLUSTER) { 1113 + if (ret || clu.dir == EXFAT_EOF_CLUSTER) { 1116 1114 /* just initialized hint_stat */ 1117 1115 hint_stat->clu = p_dir->dir; 1118 1116 hint_stat->eidx = 0;
+2 -1
fs/exfat/exfat_fs.h
··· 371 371 static inline sector_t exfat_cluster_to_sector(struct exfat_sb_info *sbi, 372 372 unsigned int clus) 373 373 { 374 - return ((clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) + 374 + return ((sector_t)(clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) + 375 375 sbi->data_start_sector; 376 376 } 377 377 ··· 420 420 int exfat_setattr(struct dentry *dentry, struct iattr *attr); 421 421 int exfat_getattr(const struct path *path, struct kstat *stat, 422 422 unsigned int request_mask, unsigned int query_flags); 423 + int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); 423 424 424 425 /* namei.c */ 425 426 extern const struct dentry_operations exfat_dentry_ops;
+19 -2
fs/exfat/file.c
··· 6 6 #include <linux/slab.h> 7 7 #include <linux/cred.h> 8 8 #include <linux/buffer_head.h> 9 + #include <linux/blkdev.h> 9 10 10 11 #include "exfat_raw.h" 11 12 #include "exfat_fs.h" ··· 176 175 ep2->dentry.stream.size = 0; 177 176 } else { 178 177 ep2->dentry.stream.valid_size = cpu_to_le64(new_size); 179 - ep2->dentry.stream.size = ep->dentry.stream.valid_size; 178 + ep2->dentry.stream.size = ep2->dentry.stream.valid_size; 180 179 } 181 180 182 181 if (new_size == 0) { ··· 347 346 return error; 348 347 } 349 348 349 + int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 350 + { 351 + struct inode *inode = filp->f_mapping->host; 352 + int err; 353 + 354 + err = __generic_file_fsync(filp, start, end, datasync); 355 + if (err) 356 + return err; 357 + 358 + err = sync_blockdev(inode->i_sb->s_bdev); 359 + if (err) 360 + return err; 361 + 362 + return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); 363 + } 364 + 350 365 const struct file_operations exfat_file_operations = { 351 366 .llseek = generic_file_llseek, 352 367 .read_iter = generic_file_read_iter, 353 368 .write_iter = generic_file_write_iter, 354 369 .mmap = generic_file_mmap, 355 - .fsync = generic_file_fsync, 370 + .fsync = exfat_file_fsync, 356 371 .splice_read = generic_file_splice_read, 357 372 .splice_write = iter_file_splice_write, 358 373 };
+11 -3
fs/exfat/namei.c
··· 975 975 goto unlock; 976 976 } 977 977 978 - exfat_set_vol_flags(sb, VOL_DIRTY); 979 978 exfat_chain_set(&clu_to_free, ei->start_clu, 980 979 EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi), ei->flags); 981 980 ··· 1001 1002 num_entries++; 1002 1003 brelse(bh); 1003 1004 1005 + exfat_set_vol_flags(sb, VOL_DIRTY); 1004 1006 err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries); 1005 1007 if (err) { 1006 1008 exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err); ··· 1077 1077 1078 1078 epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh, 1079 1079 &sector_old); 1080 + if (!epold) 1081 + return -EIO; 1080 1082 epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh, 1081 1083 &sector_new); 1082 - if (!epold || !epnew) 1084 + if (!epnew) { 1085 + brelse(old_bh); 1083 1086 return -EIO; 1087 + } 1084 1088 1085 1089 memcpy(epnew, epold, DENTRY_SIZE); 1086 1090 exfat_update_bh(sb, new_bh, sync); ··· 1165 1161 1166 1162 epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh, 1167 1163 &sector_mov); 1164 + if (!epmov) 1165 + return -EIO; 1168 1166 epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh, 1169 1167 &sector_new); 1170 - if (!epmov || !epnew) 1168 + if (!epnew) { 1169 + brelse(mov_bh); 1171 1170 return -EIO; 1171 + } 1172 1172 1173 1173 memcpy(epnew, epmov, DENTRY_SIZE); 1174 1174 exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
+4 -4
fs/exfat/nls.c
··· 495 495 struct exfat_uni_name *p_uniname, int *p_lossy) 496 496 { 497 497 int i, unilen, lossy = NLS_NAME_NO_LOSSY; 498 - unsigned short upname[MAX_NAME_LENGTH + 1]; 498 + __le16 upname[MAX_NAME_LENGTH + 1]; 499 499 unsigned short *uniname = p_uniname->name; 500 500 501 501 WARN_ON(!len); ··· 519 519 exfat_wstrchr(bad_uni_chars, *uniname)) 520 520 lossy |= NLS_NAME_LOSSY; 521 521 522 - upname[i] = exfat_toupper(sb, *uniname); 522 + upname[i] = cpu_to_le16(exfat_toupper(sb, *uniname)); 523 523 uniname++; 524 524 } 525 525 ··· 597 597 struct exfat_uni_name *p_uniname, int *p_lossy) 598 598 { 599 599 int i = 0, unilen = 0, lossy = NLS_NAME_NO_LOSSY; 600 - unsigned short upname[MAX_NAME_LENGTH + 1]; 600 + __le16 upname[MAX_NAME_LENGTH + 1]; 601 601 unsigned short *uniname = p_uniname->name; 602 602 struct nls_table *nls = EXFAT_SB(sb)->nls_io; 603 603 ··· 611 611 exfat_wstrchr(bad_uni_chars, *uniname)) 612 612 lossy |= NLS_NAME_LOSSY; 613 613 614 - upname[unilen] = exfat_toupper(sb, *uniname); 614 + upname[unilen] = cpu_to_le16(exfat_toupper(sb, *uniname)); 615 615 uniname++; 616 616 unilen++; 617 617 }
+10
fs/exfat/super.c
··· 693 693 } 694 694 } 695 695 696 + static int exfat_reconfigure(struct fs_context *fc) 697 + { 698 + fc->sb_flags |= SB_NODIRATIME; 699 + 700 + /* volume flag will be updated in exfat_sync_fs */ 701 + sync_filesystem(fc->root->d_sb); 702 + return 0; 703 + } 704 + 696 705 static const struct fs_context_operations exfat_context_ops = { 697 706 .parse_param = exfat_parse_param, 698 707 .get_tree = exfat_get_tree, 699 708 .free = exfat_free, 709 + .reconfigure = exfat_reconfigure, 700 710 }; 701 711 702 712 static int exfat_init_fs_context(struct fs_context *fc)
+1 -1
fs/file_table.c
··· 230 230 d_set_d_op(path.dentry, &anon_ops); 231 231 path.mnt = mntget(mnt); 232 232 d_instantiate(path.dentry, inode); 233 - file = alloc_file(&path, flags | FMODE_NONOTIFY, fops); 233 + file = alloc_file(&path, flags, fops); 234 234 if (IS_ERR(file)) { 235 235 ihold(inode); 236 236 path_put(&path);
+78 -54
fs/fuse/file.c
··· 18 18 #include <linux/swap.h> 19 19 #include <linux/falloc.h> 20 20 #include <linux/uio.h> 21 + #include <linux/fs.h> 21 22 22 23 static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, 23 24 struct fuse_page_desc **desc) ··· 1587 1586 struct backing_dev_info *bdi = inode_to_bdi(inode); 1588 1587 int i; 1589 1588 1590 - rb_erase(&wpa->writepages_entry, &fi->writepages); 1591 1589 for (i = 0; i < ap->num_pages; i++) { 1592 1590 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1593 1591 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); ··· 1637 1637 1638 1638 out_free: 1639 1639 fi->writectr--; 1640 + rb_erase(&wpa->writepages_entry, &fi->writepages); 1640 1641 fuse_writepage_finish(fc, wpa); 1641 1642 spin_unlock(&fi->lock); 1642 1643 ··· 1675 1674 } 1676 1675 } 1677 1676 1678 - static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) 1677 + static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, 1678 + struct fuse_writepage_args *wpa) 1679 1679 { 1680 1680 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; 1681 1681 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; ··· 1699 1697 else if (idx_to < curr_index) 1700 1698 p = &(*p)->rb_left; 1701 1699 else 1702 - return (void) WARN_ON(true); 1700 + return curr; 1703 1701 } 1704 1702 1705 1703 rb_link_node(&wpa->writepages_entry, parent, p); 1706 1704 rb_insert_color(&wpa->writepages_entry, root); 1705 + return NULL; 1706 + } 1707 + 1708 + static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) 1709 + { 1710 + WARN_ON(fuse_insert_writeback(root, wpa)); 1707 1711 } 1708 1712 1709 1713 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args, ··· 1722 1714 1723 1715 mapping_set_error(inode->i_mapping, error); 1724 1716 spin_lock(&fi->lock); 1717 + rb_erase(&wpa->writepages_entry, &fi->writepages); 1725 1718 while (wpa->next) { 1726 1719 struct fuse_conn *fc = get_fuse_conn(inode); 1727 1720 struct fuse_write_in *inarg = &wpa->ia.write.in; ··· 1961 1952 } 1962 1953 1963 1954 /* 1964 - * First recheck under fi->lock if the offending offset is still under 1965 - * writeback. If yes, then iterate auxiliary write requests, to see if there's 1955 + * Check under fi->lock if the page is under writeback, and insert it onto the 1956 + * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's 1966 1957 * one already added for a page at this offset. If there's none, then insert 1967 1958 * this new request onto the auxiliary list, otherwise reuse the existing one by 1968 - * copying the new page contents over to the old temporary page. 1959 + * swapping the new temp page with the old one. 1969 1960 */ 1970 - static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa, 1971 - struct page *page) 1961 + static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, 1962 + struct page *page) 1972 1963 { 1973 1964 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); 1974 1965 struct fuse_writepage_args *tmp; ··· 1976 1967 struct fuse_args_pages *new_ap = &new_wpa->ia.ap; 1977 1968 1978 1969 WARN_ON(new_ap->num_pages != 0); 1970 + new_ap->num_pages = 1; 1979 1971 1980 1972 spin_lock(&fi->lock); 1981 - rb_erase(&new_wpa->writepages_entry, &fi->writepages); 1982 - old_wpa = fuse_find_writeback(fi, page->index, page->index); 1973 + old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); 1983 1974 if (!old_wpa) { 1984 - tree_insert(&fi->writepages, new_wpa); 1985 1975 spin_unlock(&fi->lock); 1986 - return false; 1976 + return true; 1987 1977 } 1988 1978 1989 - new_ap->num_pages = 1; 1990 1979 for (tmp = old_wpa->next; tmp; tmp = tmp->next) { 1991 1980 pgoff_t curr_index; 1992 1981 ··· 2013 2006 fuse_writepage_free(new_wpa); 2014 2007 } 2015 2008 2016 - return true; 2009 + return false; 2010 + } 2011 + 2012 + static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, 2013 + struct fuse_args_pages *ap, 2014 + struct fuse_fill_wb_data *data) 2015 + { 2016 + WARN_ON(!ap->num_pages); 2017 + 2018 + /* 2019 + * Being under writeback is unlikely but possible. For example direct 2020 + * read to an mmaped fuse file will set the page dirty twice; once when 2021 + * the pages are faulted with get_user_pages(), and then after the read 2022 + * completed. 2023 + */ 2024 + if (fuse_page_is_writeback(data->inode, page->index)) 2025 + return true; 2026 + 2027 + /* Reached max pages */ 2028 + if (ap->num_pages == fc->max_pages) 2029 + return true; 2030 + 2031 + /* Reached max write bytes */ 2032 + if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write) 2033 + return true; 2034 + 2035 + /* Discontinuity */ 2036 + if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index) 2037 + return true; 2038 + 2039 + /* Need to grow the pages array? If so, did the expansion fail? */ 2040 + if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data)) 2041 + return true; 2042 + 2043 + return false; 2017 2044 } 2018 2045 2019 2046 static int fuse_writepages_fill(struct page *page, ··· 2060 2019 struct fuse_inode *fi = get_fuse_inode(inode); 2061 2020 struct fuse_conn *fc = get_fuse_conn(inode); 2062 2021 struct page *tmp_page; 2063 - bool is_writeback; 2064 2022 int err; 2065 2023 2066 2024 if (!data->ff) { ··· 2069 2029 goto out_unlock; 2070 2030 } 2071 2031 2072 - /* 2073 - * Being under writeback is unlikely but possible. For example direct 2074 - * read to an mmaped fuse file will set the page dirty twice; once when 2075 - * the pages are faulted with get_user_pages(), and then after the read 2076 - * completed. 2077 - */ 2078 - is_writeback = fuse_page_is_writeback(inode, page->index); 2079 - 2080 - if (wpa && ap->num_pages && 2081 - (is_writeback || ap->num_pages == fc->max_pages || 2082 - (ap->num_pages + 1) * PAGE_SIZE > fc->max_write || 2083 - data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) { 2032 + if (wpa && fuse_writepage_need_send(fc, page, ap, data)) { 2084 2033 fuse_writepages_send(data); 2085 2034 data->wpa = NULL; 2086 - } else if (wpa && ap->num_pages == data->max_pages) { 2087 - if (!fuse_pages_realloc(data)) { 2088 - fuse_writepages_send(data); 2089 - data->wpa = NULL; 2090 - } 2091 2035 } 2092 2036 2093 2037 err = -ENOMEM; ··· 2109 2085 ap->args.end = fuse_writepage_end; 2110 2086 ap->num_pages = 0; 2111 2087 wpa->inode = inode; 2112 - 2113 - spin_lock(&fi->lock); 2114 - tree_insert(&fi->writepages, wpa); 2115 - spin_unlock(&fi->lock); 2116 - 2117 - data->wpa = wpa; 2118 2088 } 2119 2089 set_page_writeback(page); 2120 2090 ··· 2116 2098 ap->pages[ap->num_pages] = tmp_page; 2117 2099 ap->descs[ap->num_pages].offset = 0; 2118 2100 ap->descs[ap->num_pages].length = PAGE_SIZE; 2101 + data->orig_pages[ap->num_pages] = page; 2119 2102 2120 2103 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 2121 2104 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 2122 2105 2123 2106 err = 0; 2124 - if (is_writeback && fuse_writepage_in_flight(wpa, page)) { 2107 + if (data->wpa) { 2108 + /* 2109 + * Protected by fi->lock against concurrent access by 2110 + * fuse_page_is_writeback(). 2111 + */ 2112 + spin_lock(&fi->lock); 2113 + ap->num_pages++; 2114 + spin_unlock(&fi->lock); 2115 + } else if (fuse_writepage_add(wpa, page)) { 2116 + data->wpa = wpa; 2117 + } else { 2125 2118 end_page_writeback(page); 2126 - data->wpa = NULL; 2127 - goto out_unlock; 2128 2119 } 2129 - data->orig_pages[ap->num_pages] = page; 2130 - 2131 - /* 2132 - * Protected by fi->lock against concurrent access by 2133 - * fuse_page_is_writeback(). 2134 - */ 2135 - spin_lock(&fi->lock); 2136 - ap->num_pages++; 2137 - spin_unlock(&fi->lock); 2138 - 2139 2120 out_unlock: 2140 2121 unlock_page(page); 2141 2122 ··· 2166 2149 2167 2150 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); 2168 2151 if (data.wpa) { 2169 - /* Ignore errors if we can write at least one page */ 2170 2152 WARN_ON(!data.wpa->ia.ap.num_pages); 2171 2153 fuse_writepages_send(&data); 2172 - err = 0; 2173 2154 } 2174 2155 if (data.ff) 2175 2156 fuse_file_put(data.ff, false, false); ··· 2776 2761 struct iovec *iov = iov_page; 2777 2762 2778 2763 iov->iov_base = (void __user *)arg; 2779 - iov->iov_len = _IOC_SIZE(cmd); 2764 + 2765 + switch (cmd) { 2766 + case FS_IOC_GETFLAGS: 2767 + case FS_IOC_SETFLAGS: 2768 + iov->iov_len = sizeof(int); 2769 + break; 2770 + default: 2771 + iov->iov_len = _IOC_SIZE(cmd); 2772 + break; 2773 + } 2780 2774 2781 2775 if (_IOC_DIR(cmd) & _IOC_WRITE) { 2782 2776 in_iov = iov;
+16 -3
fs/fuse/inode.c
··· 121 121 } 122 122 } 123 123 124 - static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) 124 + static int fuse_reconfigure(struct fs_context *fc) 125 125 { 126 + struct super_block *sb = fc->root->d_sb; 127 + 126 128 sync_filesystem(sb); 127 - if (*flags & SB_MANDLOCK) 129 + if (fc->sb_flags & SB_MANDLOCK) 128 130 return -EINVAL; 129 131 130 132 return 0; ··· 477 475 struct fuse_fs_context *ctx = fc->fs_private; 478 476 int opt; 479 477 478 + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 479 + /* 480 + * Ignore options coming from mount(MS_REMOUNT) for backward 481 + * compatibility. 482 + */ 483 + if (fc->oldapi) 484 + return 0; 485 + 486 + return invalfc(fc, "No changes allowed in reconfigure"); 487 + } 488 + 480 489 opt = fs_parse(fc, fuse_fs_parameters, param, &result); 481 490 if (opt < 0) 482 491 return opt; ··· 830 817 .evict_inode = fuse_evict_inode, 831 818 .write_inode = fuse_write_inode, 832 819 .drop_inode = generic_delete_inode, 833 - .remount_fs = fuse_remount_fs, 834 820 .put_super = fuse_put_super, 835 821 .umount_begin = fuse_umount_begin, 836 822 .statfs = fuse_statfs, ··· 1308 1296 static const struct fs_context_operations fuse_context_ops = { 1309 1297 .free = fuse_free_fc, 1310 1298 .parse_param = fuse_parse_param, 1299 + .reconfigure = fuse_reconfigure, 1311 1300 .get_tree = fuse_get_tree, 1312 1301 }; 1313 1302
+1 -44
fs/gfs2/aops.c
··· 468 468 } 469 469 470 470 471 - /** 472 - * __gfs2_readpage - readpage 473 - * @file: The file to read a page for 474 - * @page: The page to read 475 - * 476 - * This is the core of gfs2's readpage. It's used by the internal file 477 - * reading code as in that case we already hold the glock. Also it's 478 - * called by gfs2_readpage() once the required lock has been granted. 479 - */ 480 - 481 471 static int __gfs2_readpage(void *file, struct page *page) 482 472 { 483 473 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 484 474 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 485 - 486 475 int error; 487 476 488 477 if (i_blocksize(page->mapping->host) == PAGE_SIZE && ··· 494 505 * gfs2_readpage - read a page of a file 495 506 * @file: The file to read 496 507 * @page: The page of the file 497 - * 498 - * This deals with the locking required. We have to unlock and 499 - * relock the page in order to get the locking in the right 500 - * order. 501 508 */ 502 509 503 510 static int gfs2_readpage(struct file *file, struct page *page) 504 511 { 505 - struct address_space *mapping = page->mapping; 506 - struct gfs2_inode *ip = GFS2_I(mapping->host); 507 - struct gfs2_holder gh; 508 - int error; 509 - 510 - unlock_page(page); 511 - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 512 - error = gfs2_glock_nq(&gh); 513 - if (unlikely(error)) 514 - goto out; 515 - error = AOP_TRUNCATED_PAGE; 516 - lock_page(page); 517 - if (page->mapping == mapping && !PageUptodate(page)) 518 - error = __gfs2_readpage(file, page); 519 - else 520 - unlock_page(page); 521 - gfs2_glock_dq(&gh); 522 - out: 523 - gfs2_holder_uninit(&gh); 524 - if (error && error != AOP_TRUNCATED_PAGE) 525 - lock_page(page); 526 - return error; 512 + return __gfs2_readpage(file, page); 527 513 } 528 514 529 515 /** ··· 562 598 { 563 599 struct inode *inode = rac->mapping->host; 564 600 struct gfs2_inode *ip = GFS2_I(inode); 565 - struct gfs2_holder gh; 566 601 567 - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 568 - if (gfs2_glock_nq(&gh)) 569 - goto out_uninit; 570 602 if (!gfs2_is_stuffed(ip)) 571 603 mpage_readahead(rac, gfs2_block_map); 572 - gfs2_glock_dq(&gh); 573 - out_uninit: 574 - gfs2_holder_uninit(&gh); 575 604 } 576 605 577 606 /**
+50 -2
fs/gfs2/file.c
··· 558 558 return block_page_mkwrite_return(ret); 559 559 } 560 560 561 + static vm_fault_t gfs2_fault(struct vm_fault *vmf) 562 + { 563 + struct inode *inode = file_inode(vmf->vma->vm_file); 564 + struct gfs2_inode *ip = GFS2_I(inode); 565 + struct gfs2_holder gh; 566 + vm_fault_t ret; 567 + int err; 568 + 569 + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 570 + err = gfs2_glock_nq(&gh); 571 + if (err) { 572 + ret = block_page_mkwrite_return(err); 573 + goto out_uninit; 574 + } 575 + ret = filemap_fault(vmf); 576 + gfs2_glock_dq(&gh); 577 + out_uninit: 578 + gfs2_holder_uninit(&gh); 579 + return ret; 580 + } 581 + 561 582 static const struct vm_operations_struct gfs2_vm_ops = { 562 - .fault = filemap_fault, 583 + .fault = gfs2_fault, 563 584 .map_pages = filemap_map_pages, 564 585 .page_mkwrite = gfs2_page_mkwrite, 565 586 }; ··· 845 824 846 825 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 847 826 { 827 + struct gfs2_inode *ip; 828 + struct gfs2_holder gh; 829 + size_t written = 0; 848 830 ssize_t ret; 849 831 850 832 if (iocb->ki_flags & IOCB_DIRECT) { ··· 856 832 return ret; 857 833 iocb->ki_flags &= ~IOCB_DIRECT; 858 834 } 859 - return generic_file_read_iter(iocb, to); 835 + iocb->ki_flags |= IOCB_NOIO; 836 + ret = generic_file_read_iter(iocb, to); 837 + iocb->ki_flags &= ~IOCB_NOIO; 838 + if (ret >= 0) { 839 + if (!iov_iter_count(to)) 840 + return ret; 841 + written = ret; 842 + } else { 843 + if (ret != -EAGAIN) 844 + return ret; 845 + if (iocb->ki_flags & IOCB_NOWAIT) 846 + return ret; 847 + } 848 + ip = GFS2_I(iocb->ki_filp->f_mapping->host); 849 + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 850 + ret = gfs2_glock_nq(&gh); 851 + if (ret) 852 + goto out_uninit; 853 + ret = generic_file_read_iter(iocb, to); 854 + if (ret > 0) 855 + written += ret; 856 + gfs2_glock_dq(&gh); 857 + out_uninit: 858 + gfs2_holder_uninit(&gh); 859 + return written ? written : ret; 860 860 } 861 861 862 862 /**
+4 -1
fs/gfs2/glock.c
··· 1899 1899 1900 1900 static void flush_delete_work(struct gfs2_glock *gl) 1901 1901 { 1902 - flush_delayed_work(&gl->gl_delete); 1902 + if (cancel_delayed_work(&gl->gl_delete)) { 1903 + queue_delayed_work(gfs2_delete_workqueue, 1904 + &gl->gl_delete, 0); 1905 + } 1903 1906 gfs2_glock_queue_work(gl, 0); 1904 1907 } 1905 1908
+6 -4
fs/gfs2/glops.c
··· 531 531 int error = 0; 532 532 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 533 533 534 - if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) && 535 - test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 534 + if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) { 536 535 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); 537 536 error = freeze_super(sdp->sd_vfs); 538 537 if (error) { ··· 544 545 gfs2_assert_withdraw(sdp, 0); 545 546 } 546 547 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); 547 - gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | 548 - GFS2_LFC_FREEZE_GO_SYNC); 548 + if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) 549 + gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | 550 + GFS2_LFC_FREEZE_GO_SYNC); 551 + else /* read-only mounts */ 552 + atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); 549 553 } 550 554 return 0; 551 555 }
-1
fs/gfs2/incore.h
··· 399 399 GIF_QD_LOCKED = 1, 400 400 GIF_ALLOC_FAILED = 2, 401 401 GIF_SW_PAGED = 3, 402 - GIF_ORDERED = 4, 403 402 GIF_FREE_VFS_INODE = 5, 404 403 GIF_GLOP_PENDING = 6, 405 404 GIF_DEFERRED_DELETE = 7,
+2 -1
fs/gfs2/inode.c
··· 207 207 208 208 if (no_formal_ino && ip->i_no_formal_ino && 209 209 no_formal_ino != ip->i_no_formal_ino) { 210 + error = -ESTALE; 210 211 if (inode->i_state & I_NEW) 211 212 goto fail; 212 213 iput(inode); 213 - return ERR_PTR(-ESTALE); 214 + return ERR_PTR(error); 214 215 } 215 216 216 217 if (inode->i_state & I_NEW)
+19 -6
fs/gfs2/log.c
··· 613 613 return 0; 614 614 } 615 615 616 + static void __ordered_del_inode(struct gfs2_inode *ip) 617 + { 618 + if (!list_empty(&ip->i_ordered)) 619 + list_del_init(&ip->i_ordered); 620 + } 621 + 616 622 static void gfs2_ordered_write(struct gfs2_sbd *sdp) 617 623 { 618 624 struct gfs2_inode *ip; ··· 629 623 while (!list_empty(&sdp->sd_log_ordered)) { 630 624 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered); 631 625 if (ip->i_inode.i_mapping->nrpages == 0) { 632 - test_and_clear_bit(GIF_ORDERED, &ip->i_flags); 633 - list_del(&ip->i_ordered); 626 + __ordered_del_inode(ip); 634 627 continue; 635 628 } 636 629 list_move(&ip->i_ordered, &written); ··· 648 643 spin_lock(&sdp->sd_ordered_lock); 649 644 while (!list_empty(&sdp->sd_log_ordered)) { 650 645 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered); 651 - list_del(&ip->i_ordered); 652 - WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags)); 646 + __ordered_del_inode(ip); 653 647 if (ip->i_inode.i_mapping->nrpages == 0) 654 648 continue; 655 649 spin_unlock(&sdp->sd_ordered_lock); ··· 663 659 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 664 660 665 661 spin_lock(&sdp->sd_ordered_lock); 666 - if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags)) 667 - list_del(&ip->i_ordered); 662 + __ordered_del_inode(ip); 668 663 spin_unlock(&sdp->sd_ordered_lock); 669 664 } 670 665 ··· 1005 1002 1006 1003 out: 1007 1004 if (gfs2_withdrawn(sdp)) { 1005 + /** 1006 + * If the tr_list is empty, we're withdrawing during a log 1007 + * flush that targets a transaction, but the transaction was 1008 + * never queued onto any of the ail lists. Here we add it to 1009 + * ail1 just so that ail_drain() will find and free it. 1010 + */ 1011 + spin_lock(&sdp->sd_ail_lock); 1012 + if (tr && list_empty(&tr->tr_list)) 1013 + list_add(&tr->tr_list, &sdp->sd_ail1_list); 1014 + spin_unlock(&sdp->sd_ail_lock); 1008 1015 ail_drain(sdp); /* frees all transactions */ 1009 1016 tr = NULL; 1010 1017 }
+2 -2
fs/gfs2/log.h
··· 53 53 if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp)) 54 54 return; 55 55 56 - if (!test_bit(GIF_ORDERED, &ip->i_flags)) { 56 + if (list_empty(&ip->i_ordered)) { 57 57 spin_lock(&sdp->sd_ordered_lock); 58 - if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags)) 58 + if (list_empty(&ip->i_ordered)) 59 59 list_add(&ip->i_ordered, &sdp->sd_log_ordered); 60 60 spin_unlock(&sdp->sd_ordered_lock); 61 61 }
+1
fs/gfs2/main.c
··· 39 39 atomic_set(&ip->i_sizehint, 0); 40 40 init_rwsem(&ip->i_rw_mutex); 41 41 INIT_LIST_HEAD(&ip->i_trunc_list); 42 + INIT_LIST_HEAD(&ip->i_ordered); 42 43 ip->i_qadata = NULL; 43 44 gfs2_holder_mark_uninitialized(&ip->i_rgd_gh); 44 45 memset(&ip->i_res, 0, sizeof(ip->i_res));
+12 -1
fs/gfs2/ops_fstype.c
··· 1136 1136 goto fail_per_node; 1137 1137 } 1138 1138 1139 - if (!sb_rdonly(sb)) { 1139 + if (sb_rdonly(sb)) { 1140 + struct gfs2_holder freeze_gh; 1141 + 1142 + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 1143 + LM_FLAG_NOEXP | GL_EXACT, 1144 + &freeze_gh); 1145 + if (error) { 1146 + fs_err(sdp, "can't make FS RO: %d\n", error); 1147 + goto fail_per_node; 1148 + } 1149 + gfs2_glock_dq_uninit(&freeze_gh); 1150 + } else { 1140 1151 error = gfs2_make_fs_rw(sdp); 1141 1152 if (error) { 1142 1153 fs_err(sdp, "can't make FS RW: %d\n", error);
+2 -2
fs/gfs2/recovery.c
··· 364 364 /* Acquire a shared hold on the freeze lock */ 365 365 366 366 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 367 - LM_FLAG_NOEXP | LM_FLAG_PRIORITY, 368 - &thaw_gh); 367 + LM_FLAG_NOEXP | LM_FLAG_PRIORITY | 368 + GL_EXACT, &thaw_gh); 369 369 if (error) 370 370 goto fail_gunlock_ji; 371 371
+10 -10
fs/gfs2/super.c
··· 167 167 if (error) 168 168 return error; 169 169 170 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0, 170 + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 171 + LM_FLAG_NOEXP | GL_EXACT, 171 172 &freeze_gh); 172 173 if (error) 173 174 goto fail_threads; ··· 204 203 return 0; 205 204 206 205 fail: 207 - freeze_gh.gh_flags |= GL_NOCACHE; 208 206 gfs2_glock_dq_uninit(&freeze_gh); 209 207 fail_threads: 210 208 if (sdp->sd_quotad_process) ··· 430 430 } 431 431 432 432 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, 433 - GL_NOCACHE, &sdp->sd_freeze_gh); 433 + LM_FLAG_NOEXP, &sdp->sd_freeze_gh); 434 434 if (error) 435 435 goto out; 436 436 ··· 613 613 !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) { 614 614 if (!log_write_allowed) { 615 615 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 616 - LM_ST_SHARED, GL_NOCACHE | 617 - LM_FLAG_TRY, &freeze_gh); 616 + LM_ST_SHARED, LM_FLAG_TRY | 617 + LM_FLAG_NOEXP | GL_EXACT, 618 + &freeze_gh); 618 619 if (error == GLR_TRYFAILED) 619 620 error = 0; 620 621 } else { 621 622 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, 622 - LM_ST_SHARED, GL_NOCACHE, 623 + LM_ST_SHARED, 624 + LM_FLAG_NOEXP | GL_EXACT, 623 625 &freeze_gh); 624 626 if (error && !gfs2_withdrawn(sdp)) 625 627 return error; ··· 763 761 struct super_block *sb = sdp->sd_vfs; 764 762 765 763 atomic_inc(&sb->s_active); 766 - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0, 767 - &freeze_gh); 764 + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 765 + LM_FLAG_NOEXP | GL_EXACT, &freeze_gh); 768 766 if (error) { 769 767 fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error); 770 768 gfs2_assert_withdraw(sdp, 0); ··· 776 774 error); 777 775 gfs2_assert_withdraw(sdp, 0); 778 776 } 779 - if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) 780 - freeze_gh.gh_flags |= GL_NOCACHE; 781 777 gfs2_glock_dq_uninit(&freeze_gh); 782 778 } 783 779 deactivate_super(sb);
+108 -40
fs/io_uring.c
··· 605 605 606 606 struct async_poll { 607 607 struct io_poll_iocb poll; 608 + struct io_poll_iocb *double_poll; 608 609 struct io_wq_work work; 609 610 }; 610 611 ··· 1097 1096 { 1098 1097 const struct io_op_def *def = &io_op_defs[req->opcode]; 1099 1098 1099 + io_req_init_async(req); 1100 + 1100 1101 if (req->flags & REQ_F_ISREG) { 1101 1102 if (def->hash_reg_file) 1102 1103 io_wq_hash_work(&req->work, file_inode(req->file)); ··· 1107 1104 req->work.flags |= IO_WQ_WORK_UNBOUND; 1108 1105 } 1109 1106 1110 - io_req_init_async(req); 1111 1107 io_req_work_grab_env(req, def); 1112 1108 1113 1109 *link = io_prep_linked_timeout(req); ··· 1276 1274 if (cqe) { 1277 1275 clear_bit(0, &ctx->sq_check_overflow); 1278 1276 clear_bit(0, &ctx->cq_check_overflow); 1277 + ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; 1279 1278 } 1280 1279 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1281 1280 io_cqring_ev_posted(ctx); ··· 1314 1311 if (list_empty(&ctx->cq_overflow_list)) { 1315 1312 set_bit(0, &ctx->sq_check_overflow); 1316 1313 set_bit(0, &ctx->cq_check_overflow); 1314 + ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; 1317 1315 } 1318 1316 req->flags |= REQ_F_OVERFLOW; 1319 1317 refcount_inc(&req->refs); ··· 3555 3551 if (req->flags & REQ_F_NEED_CLEANUP) 3556 3552 return 0; 3557 3553 3554 + io->msg.msg.msg_name = &io->msg.addr; 3558 3555 io->msg.iov = io->msg.fast_iov; 3559 3556 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, 3560 3557 &io->msg.iov); ··· 3737 3732 3738 3733 static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io) 3739 3734 { 3735 + io->msg.msg.msg_name = &io->msg.addr; 3740 3736 io->msg.iov = io->msg.fast_iov; 3741 3737 3742 3738 #ifdef CONFIG_COMPAT ··· 3846 3840 3847 3841 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg, 3848 3842 kmsg->uaddr, flags); 3849 - if (force_nonblock && ret == -EAGAIN) 3850 - return io_setup_async_msg(req, kmsg); 3843 + if (force_nonblock && ret == -EAGAIN) { 3844 + ret = io_setup_async_msg(req, kmsg); 3845 + if (ret != -EAGAIN) 3846 + kfree(kbuf); 3847 + return ret; 3848 + } 3851 3849 if (ret == -ERESTARTSYS) 3852 3850 ret = -EINTR; 3851 + if (kbuf) 3852 + kfree(kbuf); 3853 3853 } 3854 3854 3855 3855 if (kmsg && kmsg->iov != kmsg->fast_iov) ··· 4084 4072 int error; 4085 4073 }; 4086 4074 4075 + static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) 4076 + { 4077 + struct task_struct *tsk = req->task; 4078 + struct io_ring_ctx *ctx = req->ctx; 4079 + int ret, notify = TWA_RESUME; 4080 + 4081 + /* 4082 + * SQPOLL kernel thread doesn't need notification, just a wakeup. 4083 + * If we're not using an eventfd, then TWA_RESUME is always fine, 4084 + * as we won't have dependencies between request completions for 4085 + * other kernel wait conditions. 4086 + */ 4087 + if (ctx->flags & IORING_SETUP_SQPOLL) 4088 + notify = 0; 4089 + else if (ctx->cq_ev_fd) 4090 + notify = TWA_SIGNAL; 4091 + 4092 + ret = task_work_add(tsk, cb, notify); 4093 + if (!ret) 4094 + wake_up_process(tsk); 4095 + return ret; 4096 + } 4097 + 4087 4098 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, 4088 4099 __poll_t mask, task_work_func_t func) 4089 4100 { ··· 4130 4095 * of executing it. We can't safely execute it anyway, as we may not 4131 4096 * have the needed state needed for it anyway. 4132 4097 */ 4133 - ret = task_work_add(tsk, &req->task_work, true); 4098 + ret = io_req_task_work_add(req, &req->task_work); 4134 4099 if (unlikely(ret)) { 4135 4100 WRITE_ONCE(poll->canceled, true); 4136 4101 tsk = io_wq_get_task(req->ctx->io_wq); 4137 - task_work_add(tsk, &req->task_work, true); 4102 + task_work_add(tsk, &req->task_work, 0); 4103 + wake_up_process(tsk); 4138 4104 } 4139 - wake_up_process(tsk); 4140 4105 return 1; 4141 4106 } 4142 4107 ··· 4160 4125 return false; 4161 4126 } 4162 4127 4163 - static void io_poll_remove_double(struct io_kiocb *req) 4128 + static void io_poll_remove_double(struct io_kiocb *req, void *data) 4164 4129 { 4165 - struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; 4130 + struct io_poll_iocb *poll = data; 4166 4131 4167 4132 lockdep_assert_held(&req->ctx->completion_lock); 4168 4133 ··· 4182 4147 { 4183 4148 struct io_ring_ctx *ctx = req->ctx; 4184 4149 4185 - io_poll_remove_double(req); 4150 + io_poll_remove_double(req, req->io); 4186 4151 req->poll.done = true; 4187 4152 io_cqring_fill_event(req, error ? error : mangle_poll(mask)); 4188 4153 io_commit_cqring(ctx); ··· 4225 4190 int sync, void *key) 4226 4191 { 4227 4192 struct io_kiocb *req = wait->private; 4228 - struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; 4193 + struct io_poll_iocb *poll = req->apoll->double_poll; 4229 4194 __poll_t mask = key_to_poll(key); 4230 4195 4231 4196 /* for instances that support it check for an event match first: */ 4232 4197 if (mask && !(mask & poll->events)) 4233 4198 return 0; 4234 4199 4235 - if (req->poll.head) { 4200 + if (poll && poll->head) { 4236 4201 bool done; 4237 4202 4238 - spin_lock(&req->poll.head->lock); 4239 - done = list_empty(&req->poll.wait.entry); 4203 + spin_lock(&poll->head->lock); 4204 + done = list_empty(&poll->wait.entry); 4240 4205 if (!done) 4241 - list_del_init(&req->poll.wait.entry); 4242 - spin_unlock(&req->poll.head->lock); 4206 + list_del_init(&poll->wait.entry); 4207 + spin_unlock(&poll->head->lock); 4243 4208 if (!done) 4244 4209 __io_async_wake(req, poll, mask, io_poll_task_func); 4245 4210 } ··· 4259 4224 } 4260 4225 4261 4226 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, 4262 - struct wait_queue_head *head) 4227 + struct wait_queue_head *head, 4228 + struct io_poll_iocb **poll_ptr) 4263 4229 { 4264 4230 struct io_kiocb *req = pt->req; 4265 4231 ··· 4271 4235 */ 4272 4236 if (unlikely(poll->head)) { 4273 4237 /* already have a 2nd entry, fail a third attempt */ 4274 - if (req->io) { 4238 + if (*poll_ptr) { 4275 4239 pt->error = -EINVAL; 4276 4240 return; 4277 4241 } ··· 4283 4247 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake); 4284 4248 refcount_inc(&req->refs); 4285 4249 poll->wait.private = req; 4286 - req->io = (void *) poll; 4250 + *poll_ptr = poll; 4287 4251 } 4288 4252 4289 4253 pt->error = 0; ··· 4295 4259 struct poll_table_struct *p) 4296 4260 { 4297 4261 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 4262 + struct async_poll *apoll = pt->req->apoll; 4298 4263 4299 - __io_queue_proc(&pt->req->apoll->poll, pt, head); 4264 + __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); 4300 4265 } 4301 4266 4302 4267 static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) ··· 4347 4310 } 4348 4311 } 4349 4312 4313 + io_poll_remove_double(req, apoll->double_poll); 4350 4314 spin_unlock_irq(&ctx->completion_lock); 4351 4315 4352 4316 /* restore ->work in case we need to retry again */ 4353 4317 if (req->flags & REQ_F_WORK_INITIALIZED) 4354 4318 memcpy(&req->work, &apoll->work, sizeof(req->work)); 4319 + kfree(apoll->double_poll); 4355 4320 kfree(apoll); 4356 4321 4357 4322 if (!canceled) { ··· 4441 4402 struct async_poll *apoll; 4442 4403 struct io_poll_table ipt; 4443 4404 __poll_t mask, ret; 4444 - bool had_io; 4445 4405 4446 4406 if (!req->file || !file_can_poll(req->file)) 4447 4407 return false; ··· 4452 4414 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); 4453 4415 if (unlikely(!apoll)) 4454 4416 return false; 4417 + apoll->double_poll = NULL; 4455 4418 4456 4419 req->flags |= REQ_F_POLLED; 4457 4420 if (req->flags & REQ_F_WORK_INITIALIZED) 4458 4421 memcpy(&apoll->work, &req->work, sizeof(req->work)); 4459 - had_io = req->io != NULL; 4460 4422 4461 4423 io_get_req_task(req); 4462 4424 req->apoll = apoll; ··· 4474 4436 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, 4475 4437 io_async_wake); 4476 4438 if (ret) { 4477 - ipt.error = 0; 4478 - /* only remove double add if we did it here */ 4479 - if (!had_io) 4480 - io_poll_remove_double(req); 4439 + io_poll_remove_double(req, apoll->double_poll); 4481 4440 spin_unlock_irq(&ctx->completion_lock); 4482 4441 if (req->flags & REQ_F_WORK_INITIALIZED) 4483 4442 memcpy(&req->work, &apoll->work, sizeof(req->work)); 4443 + kfree(apoll->double_poll); 4484 4444 kfree(apoll); 4485 4445 return false; 4486 4446 } ··· 4509 4473 bool do_complete; 4510 4474 4511 4475 if (req->opcode == IORING_OP_POLL_ADD) { 4512 - io_poll_remove_double(req); 4476 + io_poll_remove_double(req, req->io); 4513 4477 do_complete = __io_poll_remove_one(req, &req->poll); 4514 4478 } else { 4515 4479 struct async_poll *apoll = req->apoll; 4480 + 4481 + io_poll_remove_double(req, apoll->double_poll); 4516 4482 4517 4483 /* non-poll requests have submit ref still */ 4518 4484 do_complete = __io_poll_remove_one(req, &apoll->poll); ··· 4528 4490 if (req->flags & REQ_F_WORK_INITIALIZED) 4529 4491 memcpy(&req->work, &apoll->work, 4530 4492 sizeof(req->work)); 4493 + kfree(apoll->double_poll); 4531 4494 kfree(apoll); 4532 4495 } 4533 4496 } ··· 4629 4590 { 4630 4591 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 4631 4592 4632 - __io_queue_proc(&pt->req->poll, pt, head); 4593 + __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io); 4633 4594 } 4634 4595 4635 4596 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 4737 4698 { 4738 4699 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) 4739 4700 return -EINVAL; 4740 - if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) 4701 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 4702 + return -EINVAL; 4703 + if (sqe->ioprio || sqe->buf_index || sqe->len) 4741 4704 return -EINVAL; 4742 4705 4743 4706 req->timeout.addr = READ_ONCE(sqe->addr); ··· 4917 4876 { 4918 4877 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) 4919 4878 return -EINVAL; 4920 - if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || 4921 - sqe->cancel_flags) 4879 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 4880 + return -EINVAL; 4881 + if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) 4922 4882 return -EINVAL; 4923 4883 4924 4884 req->cancel.addr = READ_ONCE(sqe->addr); ··· 4937 4895 static int io_files_update_prep(struct io_kiocb *req, 4938 4896 const struct io_uring_sqe *sqe) 4939 4897 { 4940 - if (sqe->flags || sqe->ioprio || sqe->rw_flags) 4898 + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 4899 + return -EINVAL; 4900 + if (sqe->ioprio || sqe->rw_flags) 4941 4901 return -EINVAL; 4942 4902 4943 4903 req->files_update.offset = READ_ONCE(sqe->off); ··· 5730 5686 * Never try inline submit of IOSQE_ASYNC is set, go straight 5731 5687 * to async execution. 5732 5688 */ 5689 + io_req_init_async(req); 5733 5690 req->work.flags |= IO_WQ_WORK_CONCURRENT; 5734 5691 io_queue_async_work(req); 5735 5692 } else { ··· 6102 6057 } 6103 6058 6104 6059 /* Tell userspace we may need a wakeup call */ 6060 + spin_lock_irq(&ctx->completion_lock); 6105 6061 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; 6106 - /* make sure to read SQ tail after writing flags */ 6107 - smp_mb(); 6062 + spin_unlock_irq(&ctx->completion_lock); 6108 6063 6109 6064 to_submit = io_sqring_entries(ctx); 6110 6065 if (!to_submit || ret == -EBUSY) { ··· 6122 6077 schedule(); 6123 6078 finish_wait(&ctx->sqo_wait, &wait); 6124 6079 6080 + spin_lock_irq(&ctx->completion_lock); 6125 6081 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; 6082 + spin_unlock_irq(&ctx->completion_lock); 6126 6083 ret = 0; 6127 6084 continue; 6128 6085 } 6129 6086 finish_wait(&ctx->sqo_wait, &wait); 6130 6087 6088 + spin_lock_irq(&ctx->completion_lock); 6131 6089 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; 6090 + spin_unlock_irq(&ctx->completion_lock); 6132 6091 } 6133 6092 6134 6093 mutex_lock(&ctx->uring_lock); ··· 6231 6182 do { 6232 6183 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, 6233 6184 TASK_INTERRUPTIBLE); 6185 + /* make sure we run task_work before checking for signals */ 6234 6186 if (current->task_works) 6235 6187 task_work_run(); 6236 - if (io_should_wake(&iowq, false)) 6237 - break; 6238 - schedule(); 6239 6188 if (signal_pending(current)) { 6189 + if (current->jobctl & JOBCTL_TASK_WORK) { 6190 + spin_lock_irq(&current->sighand->siglock); 6191 + current->jobctl &= ~JOBCTL_TASK_WORK; 6192 + recalc_sigpending(); 6193 + spin_unlock_irq(&current->sighand->siglock); 6194 + continue; 6195 + } 6240 6196 ret = -EINTR; 6241 6197 break; 6242 6198 } 6199 + if (io_should_wake(&iowq, false)) 6200 + break; 6201 + schedule(); 6243 6202 } while (1); 6244 6203 finish_wait(&ctx->wait, &iowq.wq); 6245 6204 ··· 6719 6662 for (i = 0; i < nr_tables; i++) 6720 6663 kfree(ctx->file_data->table[i].files); 6721 6664 6665 + percpu_ref_exit(&ctx->file_data->refs); 6722 6666 kfree(ctx->file_data->table); 6723 6667 kfree(ctx->file_data); 6724 6668 ctx->file_data = NULL; ··· 6872 6814 } 6873 6815 table->files[index] = file; 6874 6816 err = io_sqe_file_register(ctx, file, i); 6875 - if (err) 6817 + if (err) { 6818 + fput(file); 6876 6819 break; 6820 + } 6877 6821 } 6878 6822 nr_args--; 6879 6823 done++; ··· 7371 7311 io_mem_free(ctx->sq_sqes); 7372 7312 7373 7313 percpu_ref_exit(&ctx->refs); 7374 - if (ctx->account_mem) 7375 - io_unaccount_mem(ctx->user, 7376 - ring_pages(ctx->sq_entries, ctx->cq_entries)); 7377 7314 free_uid(ctx->user); 7378 7315 put_cred(ctx->creds); 7379 7316 kfree(ctx->cancel_hash); ··· 7455 7398 if (ctx->rings) 7456 7399 io_cqring_overflow_flush(ctx, true); 7457 7400 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); 7401 + 7402 + /* 7403 + * Do this upfront, so we won't have a grace period where the ring 7404 + * is closed but resources aren't reaped yet. This can cause 7405 + * spurious failure in setting up a new ring. 7406 + */ 7407 + if (ctx->account_mem) 7408 + io_unaccount_mem(ctx->user, 7409 + ring_pages(ctx->sq_entries, ctx->cq_entries)); 7410 + 7458 7411 INIT_WORK(&ctx->exit_work, io_ring_exit_work); 7459 7412 queue_work(system_wq, &ctx->exit_work); 7460 7413 } ··· 7524 7457 if (list_empty(&ctx->cq_overflow_list)) { 7525 7458 clear_bit(0, &ctx->sq_check_overflow); 7526 7459 clear_bit(0, &ctx->cq_check_overflow); 7460 + ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; 7527 7461 } 7528 7462 spin_unlock_irq(&ctx->completion_lock); 7529 7463
+1
fs/namespace.c
··· 2603 2603 if (IS_ERR(fc)) 2604 2604 return PTR_ERR(fc); 2605 2605 2606 + fc->oldapi = true; 2606 2607 err = parse_monolithic_mount_data(fc, data); 2607 2608 if (!err) { 2608 2609 down_write(&sb->s_umount);
+4 -9
fs/nfs/direct.c
··· 267 267 { 268 268 struct inode *inode = dreq->inode; 269 269 270 + inode_dio_end(inode); 271 + 270 272 if (dreq->iocb) { 271 273 long res = (long) dreq->error; 272 274 if (dreq->count != 0) { ··· 280 278 281 279 complete(&dreq->completion); 282 280 283 - igrab(inode); 284 281 nfs_direct_req_release(dreq); 285 - inode_dio_end(inode); 286 - iput(inode); 287 282 } 288 283 289 284 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) ··· 410 411 * generic layer handle the completion. 411 412 */ 412 413 if (requested_bytes == 0) { 413 - igrab(inode); 414 - nfs_direct_req_release(dreq); 415 414 inode_dio_end(inode); 416 - iput(inode); 415 + nfs_direct_req_release(dreq); 417 416 return result < 0 ? result : -EIO; 418 417 } 419 418 ··· 864 867 * generic layer handle the completion. 865 868 */ 866 869 if (requested_bytes == 0) { 867 - igrab(inode); 868 - nfs_direct_req_release(dreq); 869 870 inode_dio_end(inode); 870 - iput(inode); 871 + nfs_direct_req_release(dreq); 871 872 return result < 0 ? result : -EIO; 872 873 } 873 874
-1
fs/nfs/file.c
··· 83 83 dprintk("NFS: release(%pD2)\n", filp); 84 84 85 85 nfs_inc_stats(inode, NFSIOS_VFSRELEASE); 86 - inode_dio_wait(inode); 87 86 nfs_file_clear_open_context(filp); 88 87 return 0; 89 88 }
+18 -2
fs/nfs/nfs4proc.c
··· 774 774 slot->seq_nr_last_acked = seqnr; 775 775 } 776 776 777 + static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 778 + struct nfs4_slot *slot) 779 + { 780 + struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 781 + if (!IS_ERR(task)) 782 + rpc_put_task_async(task); 783 + } 784 + 777 785 static int nfs41_sequence_process(struct rpc_task *task, 778 786 struct nfs4_sequence_res *res) 779 787 { ··· 798 790 goto out; 799 791 800 792 session = slot->table->session; 793 + clp = session->clp; 801 794 802 795 trace_nfs4_sequence_done(session, res); 803 796 ··· 813 804 nfs4_slot_sequence_acked(slot, slot->seq_nr); 814 805 /* Update the slot's sequence and clientid lease timer */ 815 806 slot->seq_done = 1; 816 - clp = session->clp; 817 807 do_renew_lease(clp, res->sr_timestamp); 818 808 /* Check sequence flags */ 819 809 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, ··· 860 852 /* 861 853 * Were one or more calls using this slot interrupted? 862 854 * If the server never received the request, then our 863 - * transmitted slot sequence number may be too high. 855 + * transmitted slot sequence number may be too high. However, 856 + * if the server did receive the request then it might 857 + * accidentally give us a reply with a mismatched operation. 858 + * We can sort this out by sending a lone sequence operation 859 + * to the server on the same slot. 864 860 */ 865 861 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 866 862 slot->seq_nr--; 863 + if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 864 + nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 865 + res->sr_slot = NULL; 866 + } 867 867 goto retry_nowait; 868 868 } 869 869 /*
+26 -2
fs/nfsd/nfs4state.c
··· 507 507 return ret; 508 508 } 509 509 510 + static struct nfsd_file *find_deleg_file(struct nfs4_file *f) 511 + { 512 + struct nfsd_file *ret = NULL; 513 + 514 + spin_lock(&f->fi_lock); 515 + if (f->fi_deleg_file) 516 + ret = nfsd_file_get(f->fi_deleg_file); 517 + spin_unlock(&f->fi_lock); 518 + return ret; 519 + } 520 + 510 521 static atomic_long_t num_delegations; 511 522 unsigned long max_delegations; 512 523 ··· 2455 2444 oo = ols->st_stateowner; 2456 2445 nf = st->sc_file; 2457 2446 file = find_any_file(nf); 2447 + if (!file) 2448 + return 0; 2458 2449 2459 2450 seq_printf(s, "- "); 2460 2451 nfs4_show_stateid(s, &st->sc_stateid); ··· 2494 2481 oo = ols->st_stateowner; 2495 2482 nf = st->sc_file; 2496 2483 file = find_any_file(nf); 2484 + if (!file) 2485 + return 0; 2497 2486 2498 2487 seq_printf(s, "- "); 2499 2488 nfs4_show_stateid(s, &st->sc_stateid); ··· 2528 2513 2529 2514 ds = delegstateid(st); 2530 2515 nf = st->sc_file; 2531 - file = nf->fi_deleg_file; 2516 + file = find_deleg_file(nf); 2517 + if (!file) 2518 + return 0; 2532 2519 2533 2520 seq_printf(s, "- "); 2534 2521 nfs4_show_stateid(s, &st->sc_stateid); ··· 2546 2529 seq_printf(s, ", "); 2547 2530 nfs4_show_fname(s, file); 2548 2531 seq_printf(s, " }\n"); 2532 + nfsd_file_put(file); 2549 2533 2550 2534 return 0; 2551 2535 } ··· 7930 7912 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7931 7913 int ret; 7932 7914 7933 - ret = nfs4_state_create_net(net); 7915 + ret = get_nfsdfs(net); 7934 7916 if (ret) 7935 7917 return ret; 7918 + ret = nfs4_state_create_net(net); 7919 + if (ret) { 7920 + mntput(nn->nfsd_mnt); 7921 + return ret; 7922 + } 7936 7923 locks_start_grace(net, &nn->nfsd4_manager); 7937 7924 nfsd4_client_tracking_init(net); 7938 7925 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) ··· 8007 7984 8008 7985 nfsd4_client_tracking_exit(net); 8009 7986 nfs4_state_destroy_net(net); 7987 + mntput(nn->nfsd_mnt); 8010 7988 } 8011 7989 8012 7990 void
+13 -10
fs/nfsd/nfsctl.c
··· 1335 1335 WARN_ON_ONCE(ret); 1336 1336 fsnotify_rmdir(dir, dentry); 1337 1337 d_delete(dentry); 1338 + dput(dentry); 1338 1339 inode_unlock(dir); 1339 1340 } 1340 1341 ··· 1425 1424 }; 1426 1425 MODULE_ALIAS_FS("nfsd"); 1427 1426 1427 + int get_nfsdfs(struct net *net) 1428 + { 1429 + struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1430 + struct vfsmount *mnt; 1431 + 1432 + mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); 1433 + if (IS_ERR(mnt)) 1434 + return PTR_ERR(mnt); 1435 + nn->nfsd_mnt = mnt; 1436 + return 0; 1437 + } 1438 + 1428 1439 #ifdef CONFIG_PROC_FS 1429 1440 static int create_proc_exports_entry(void) 1430 1441 { ··· 1464 1451 static __net_init int nfsd_init_net(struct net *net) 1465 1452 { 1466 1453 int retval; 1467 - struct vfsmount *mnt; 1468 1454 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1469 1455 1470 1456 retval = nfsd_export_init(net); ··· 1490 1478 init_waitqueue_head(&nn->ntf_wq); 1491 1479 seqlock_init(&nn->boot_lock); 1492 1480 1493 - mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); 1494 - if (IS_ERR(mnt)) { 1495 - retval = PTR_ERR(mnt); 1496 - goto out_mount_err; 1497 - } 1498 - nn->nfsd_mnt = mnt; 1499 1481 return 0; 1500 1482 1501 - out_mount_err: 1502 - nfsd_reply_cache_shutdown(nn); 1503 1483 out_drc_error: 1504 1484 nfsd_idmap_shutdown(net); 1505 1485 out_idmap_error: ··· 1504 1500 { 1505 1501 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1506 1502 1507 - mntput(nn->nfsd_mnt); 1508 1503 nfsd_reply_cache_shutdown(nn); 1509 1504 nfsd_idmap_shutdown(net); 1510 1505 nfsd_export_shutdown(net);
+3
fs/nfsd/nfsd.h
··· 90 90 91 91 bool i_am_nfsd(void); 92 92 93 + int get_nfsdfs(struct net *); 94 + 93 95 struct nfsdfs_client { 94 96 struct kref cl_ref; 95 97 void (*cl_release)(struct kref *kref); ··· 101 99 struct dentry *nfsd_client_mkdir(struct nfsd_net *nn, 102 100 struct nfsdfs_client *ncl, u32 id, const struct tree_descr *); 103 101 void nfsd_client_rmdir(struct dentry *dentry); 102 + 104 103 105 104 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 106 105 #ifdef CONFIG_NFSD_V2_ACL
+6
fs/nfsd/vfs.c
··· 1226 1226 iap->ia_mode = 0; 1227 1227 iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; 1228 1228 1229 + if (!IS_POSIXACL(dirp)) 1230 + iap->ia_mode &= ~current_umask(); 1231 + 1229 1232 err = 0; 1230 1233 host_err = 0; 1231 1234 switch (type) { ··· 1460 1457 fh_drop_write(fhp); 1461 1458 goto out; 1462 1459 } 1460 + 1461 + if (!IS_POSIXACL(dirp)) 1462 + iap->ia_mode &= ~current_umask(); 1463 1463 1464 1464 host_err = vfs_create(dirp, dchild, iap->ia_mode, true); 1465 1465 if (host_err < 0) {
+1 -1
fs/overlayfs/copy_up.c
··· 895 895 return err; 896 896 } 897 897 898 - int ovl_copy_up_flags(struct dentry *dentry, int flags) 898 + static int ovl_copy_up_flags(struct dentry *dentry, int flags) 899 899 { 900 900 int err = 0; 901 901 const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
+1 -1
fs/overlayfs/export.c
··· 476 476 if (IS_ERR_OR_NULL(this)) 477 477 return this; 478 478 479 - if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { 479 + if (ovl_dentry_real_at(this, layer->idx) != real) { 480 480 dput(this); 481 481 this = ERR_PTR(-EIO); 482 482 }
+6 -4
fs/overlayfs/file.c
··· 33 33 return 'm'; 34 34 } 35 35 36 + /* No atime modificaton nor notify on underlying */ 37 + #define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY) 38 + 36 39 static struct file *ovl_open_realfile(const struct file *file, 37 40 struct inode *realinode) 38 41 { 39 42 struct inode *inode = file_inode(file); 40 43 struct file *realfile; 41 44 const struct cred *old_cred; 42 - int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY; 45 + int flags = file->f_flags | OVL_OPEN_FLAGS; 43 46 int acc_mode = ACC_MODE(flags); 44 47 int err; 45 48 ··· 75 72 struct inode *inode = file_inode(file); 76 73 int err; 77 74 78 - /* No atime modificaton on underlying */ 79 - flags |= O_NOATIME | FMODE_NONOTIFY; 75 + flags |= OVL_OPEN_FLAGS; 80 76 81 77 /* If some flag changed that cannot be changed then something's amiss */ 82 78 if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK)) ··· 128 126 } 129 127 130 128 /* Did the flags change since open? */ 131 - if (unlikely((file->f_flags ^ real->file->f_flags) & ~O_NOATIME)) 129 + if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS)) 132 130 return ovl_change_flags(real->file, file->f_flags); 133 131 134 132 return 0;
+6 -9
fs/overlayfs/namei.c
··· 389 389 } 390 390 391 391 static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry, 392 - struct ovl_path **stackp, unsigned int *ctrp) 392 + struct ovl_path **stackp) 393 393 { 394 394 struct ovl_fh *fh = ovl_get_fh(upperdentry, OVL_XATTR_ORIGIN); 395 395 int err; ··· 406 406 return err; 407 407 } 408 408 409 - if (WARN_ON(*ctrp)) 410 - return -EIO; 411 - 412 - *ctrp = 1; 413 409 return 0; 414 410 } 415 411 ··· 857 861 goto out; 858 862 } 859 863 if (upperdentry && !d.is_dir) { 860 - unsigned int origin_ctr = 0; 861 - 862 864 /* 863 865 * Lookup copy up origin by decoding origin file handle. 864 866 * We may get a disconnected dentry, which is fine, ··· 867 873 * number - it's the same as if we held a reference 868 874 * to a dentry in lower layer that was moved under us. 869 875 */ 870 - err = ovl_check_origin(ofs, upperdentry, &origin_path, 871 - &origin_ctr); 876 + err = ovl_check_origin(ofs, upperdentry, &origin_path); 872 877 if (err) 873 878 goto out_put_upper; 874 879 ··· 1066 1073 upperredirect = NULL; 1067 1074 goto out_free_oe; 1068 1075 } 1076 + err = ovl_check_metacopy_xattr(upperdentry); 1077 + if (err < 0) 1078 + goto out_free_oe; 1079 + uppermetacopy = err; 1069 1080 } 1070 1081 1071 1082 if (upperdentry || ctr) {
-1
fs/overlayfs/overlayfs.h
··· 483 483 /* copy_up.c */ 484 484 int ovl_copy_up(struct dentry *dentry); 485 485 int ovl_copy_up_with_data(struct dentry *dentry); 486 - int ovl_copy_up_flags(struct dentry *dentry, int flags); 487 486 int ovl_maybe_copy_up(struct dentry *dentry, int flags); 488 487 int ovl_copy_xattr(struct dentry *old, struct dentry *new); 489 488 int ovl_set_attr(struct dentry *upper, struct kstat *stat);
+49 -24
fs/overlayfs/super.c
··· 580 580 } 581 581 } 582 582 583 - /* Workdir is useless in non-upper mount */ 584 - if (!config->upperdir && config->workdir) { 585 - pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n", 586 - config->workdir); 587 - kfree(config->workdir); 588 - config->workdir = NULL; 583 + /* Workdir/index are useless in non-upper mount */ 584 + if (!config->upperdir) { 585 + if (config->workdir) { 586 + pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n", 587 + config->workdir); 588 + kfree(config->workdir); 589 + config->workdir = NULL; 590 + } 591 + if (config->index && index_opt) { 592 + pr_info("option \"index=on\" is useless in a non-upper mount, ignore\n"); 593 + index_opt = false; 594 + } 595 + config->index = false; 589 596 } 590 597 591 598 err = ovl_parse_redirect_mode(config, config->redirect_mode); ··· 629 622 630 623 /* Resolve nfs_export -> index dependency */ 631 624 if (config->nfs_export && !config->index) { 632 - if (nfs_export_opt && index_opt) { 625 + if (!config->upperdir && config->redirect_follow) { 626 + pr_info("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n"); 627 + config->nfs_export = false; 628 + } else if (nfs_export_opt && index_opt) { 633 629 pr_err("conflicting options: nfs_export=on,index=off\n"); 634 630 return -EINVAL; 635 - } 636 - if (index_opt) { 631 + } else if (index_opt) { 637 632 /* 638 633 * There was an explicit index=off that resulted 639 634 * in this conflict. ··· 1361 1352 goto out; 1362 1353 } 1363 1354 1355 + /* index dir will act also as workdir */ 1356 + iput(ofs->workdir_trap); 1357 + ofs->workdir_trap = NULL; 1358 + dput(ofs->workdir); 1359 + ofs->workdir = NULL; 1364 1360 ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); 1365 1361 if (ofs->indexdir) { 1362 + ofs->workdir = dget(ofs->indexdir); 1363 + 1366 1364 err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap, 1367 1365 "indexdir"); 1368 1366 if (err) ··· 1411 1395 1412 1396 if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs)) 1413 1397 return true; 1398 + 1399 + /* 1400 + * We allow using single lower with null uuid for index and nfs_export 1401 + * for example to support those features with single lower squashfs. 1402 + * To avoid regressions in setups of overlay with re-formatted lower 1403 + * squashfs, do not allow decoding origin with lower null uuid unless 1404 + * user opted-in to one of the new features that require following the 1405 + * lower inode of non-dir upper. 1406 + */ 1407 + if (!ofs->config.index && !ofs->config.metacopy && !ofs->config.xino && 1408 + uuid_is_null(uuid)) 1409 + return false; 1414 1410 1415 1411 for (i = 0; i < ofs->numfs; i++) { 1416 1412 /* ··· 1521 1493 if (err < 0) 1522 1494 goto out; 1523 1495 1496 + /* 1497 + * Check if lower root conflicts with this overlay layers before 1498 + * checking if it is in-use as upperdir/workdir of "another" 1499 + * mount, because we do not bother to check in ovl_is_inuse() if 1500 + * the upperdir/workdir is in fact in-use by our 1501 + * upperdir/workdir. 1502 + */ 1524 1503 err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); 1525 1504 if (err) 1526 1505 goto out; 1527 1506 1528 1507 if (ovl_is_inuse(stack[i].dentry)) { 1529 1508 err = ovl_report_in_use(ofs, "lowerdir"); 1530 - if (err) 1509 + if (err) { 1510 + iput(trap); 1531 1511 goto out; 1512 + } 1532 1513 } 1533 1514 1534 1515 mnt = clone_private_mount(&stack[i]); ··· 1612 1575 if (!ofs->config.upperdir && numlower == 1) { 1613 1576 pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n"); 1614 1577 return ERR_PTR(-EINVAL); 1615 - } else if (!ofs->config.upperdir && ofs->config.nfs_export && 1616 - ofs->config.redirect_follow) { 1617 - pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n"); 1618 - ofs->config.nfs_export = false; 1619 1578 } 1620 1579 1621 1580 stack = kcalloc(numlower, sizeof(struct path), GFP_KERNEL); ··· 1875 1842 if (!ovl_upper_mnt(ofs)) 1876 1843 sb->s_flags |= SB_RDONLY; 1877 1844 1878 - if (!(ovl_force_readonly(ofs)) && ofs->config.index) { 1879 - /* index dir will act also as workdir */ 1880 - dput(ofs->workdir); 1881 - ofs->workdir = NULL; 1882 - iput(ofs->workdir_trap); 1883 - ofs->workdir_trap = NULL; 1884 - 1845 + if (!ovl_force_readonly(ofs) && ofs->config.index) { 1885 1846 err = ovl_get_indexdir(sb, ofs, oe, &upperpath); 1886 1847 if (err) 1887 1848 goto out_free_oe; 1888 1849 1889 1850 /* Force r/o mount with no index dir */ 1890 - if (ofs->indexdir) 1891 - ofs->workdir = dget(ofs->indexdir); 1892 - else 1851 + if (!ofs->indexdir) 1893 1852 sb->s_flags |= SB_RDONLY; 1894 1853 } 1895 1854
+3 -3
fs/proc/proc_sysctl.c
··· 566 566 goto out; 567 567 568 568 /* don't even try if the size is too large */ 569 - if (count > KMALLOC_MAX_SIZE) 570 - return -ENOMEM; 569 + error = -ENOMEM; 570 + if (count >= KMALLOC_MAX_SIZE) 571 + goto out; 571 572 572 573 if (write) { 573 574 kbuf = memdup_user_nul(ubuf, count); ··· 577 576 goto out; 578 577 } 579 578 } else { 580 - error = -ENOMEM; 581 579 kbuf = kzalloc(count, GFP_KERNEL); 582 580 if (!kbuf) 583 581 goto out;
+77 -58
fs/read_write.c
··· 419 419 return ret; 420 420 } 421 421 422 - ssize_t __vfs_read(struct file *file, char __user *buf, size_t count, 423 - loff_t *pos) 422 + ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) 424 423 { 425 - if (file->f_op->read) 426 - return file->f_op->read(file, buf, count, pos); 427 - else if (file->f_op->read_iter) 428 - return new_sync_read(file, buf, count, pos); 429 - else 424 + mm_segment_t old_fs = get_fs(); 425 + ssize_t ret; 426 + 427 + if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ))) 430 428 return -EINVAL; 429 + if (!(file->f_mode & FMODE_CAN_READ)) 430 + return -EINVAL; 431 + 432 + if (count > MAX_RW_COUNT) 433 + count = MAX_RW_COUNT; 434 + set_fs(KERNEL_DS); 435 + if (file->f_op->read) 436 + ret = file->f_op->read(file, (void __user *)buf, count, pos); 437 + else if (file->f_op->read_iter) 438 + ret = new_sync_read(file, (void __user *)buf, count, pos); 439 + else 440 + ret = -EINVAL; 441 + set_fs(old_fs); 442 + if (ret > 0) { 443 + fsnotify_access(file); 444 + add_rchar(current, ret); 445 + } 446 + inc_syscr(current); 447 + return ret; 431 448 } 432 449 433 450 ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) 434 451 { 435 - mm_segment_t old_fs; 436 - ssize_t result; 452 + ssize_t ret; 437 453 438 - old_fs = get_fs(); 439 - set_fs(KERNEL_DS); 440 - /* The cast to a user pointer is valid due to the set_fs() */ 441 - result = vfs_read(file, (void __user *)buf, count, pos); 442 - set_fs(old_fs); 443 - return result; 454 + ret = rw_verify_area(READ, file, pos, count); 455 + if (ret) 456 + return ret; 457 + return __kernel_read(file, buf, count, pos); 444 458 } 445 459 EXPORT_SYMBOL(kernel_read); 446 460 ··· 470 456 return -EFAULT; 471 457 472 458 ret = rw_verify_area(READ, file, pos, count); 473 - if (!ret) { 474 - if (count > MAX_RW_COUNT) 475 - count = MAX_RW_COUNT; 476 - ret = __vfs_read(file, buf, count, pos); 477 - if (ret > 0) { 478 - fsnotify_access(file); 479 - add_rchar(current, ret); 480 - } 481 - inc_syscr(current); 482 - } 459 + if (ret) 460 + return ret; 461 + if (count > MAX_RW_COUNT) 462 + count = MAX_RW_COUNT; 483 463 464 + if (file->f_op->read) 465 + ret = file->f_op->read(file, buf, count, pos); 466 + else if (file->f_op->read_iter) 467 + ret = new_sync_read(file, buf, count, pos); 468 + else 469 + ret = -EINVAL; 470 + if (ret > 0) { 471 + fsnotify_access(file); 472 + add_rchar(current, ret); 473 + } 474 + inc_syscr(current); 484 475 return ret; 485 476 } 486 477 ··· 507 488 return ret; 508 489 } 509 490 510 - static ssize_t __vfs_write(struct file *file, const char __user *p, 511 - size_t count, loff_t *pos) 512 - { 513 - if (file->f_op->write) 514 - return file->f_op->write(file, p, count, pos); 515 - else if (file->f_op->write_iter) 516 - return new_sync_write(file, p, count, pos); 517 - else 518 - return -EINVAL; 519 - } 520 - 491 + /* caller is responsible for file_start_write/file_end_write */ 521 492 ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos) 522 493 { 523 494 mm_segment_t old_fs; 524 495 const char __user *p; 525 496 ssize_t ret; 526 497 498 + if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE))) 499 + return -EBADF; 527 500 if (!(file->f_mode & FMODE_CAN_WRITE)) 528 501 return -EINVAL; 529 502 ··· 524 513 p = (__force const char __user *)buf; 525 514 if (count > MAX_RW_COUNT) 526 515 count = MAX_RW_COUNT; 527 - ret = __vfs_write(file, p, count, pos); 516 + if (file->f_op->write) 517 + ret = file->f_op->write(file, p, count, pos); 518 + else if (file->f_op->write_iter) 519 + ret = new_sync_write(file, p, count, pos); 520 + else 521 + ret = -EINVAL; 528 522 set_fs(old_fs); 529 523 if (ret > 0) { 530 524 fsnotify_modify(file); ··· 538 522 inc_syscw(current); 539 523 return ret; 540 524 } 541 - EXPORT_SYMBOL(__kernel_write); 542 525 543 526 ssize_t kernel_write(struct file *file, const void *buf, size_t count, 544 527 loff_t *pos) 545 528 { 546 - mm_segment_t old_fs; 547 - ssize_t res; 529 + ssize_t ret; 548 530 549 - old_fs = get_fs(); 550 - set_fs(KERNEL_DS); 551 - /* The cast to a user pointer is valid due to the set_fs() */ 552 - res = vfs_write(file, (__force const char __user *)buf, count, pos); 553 - set_fs(old_fs); 531 + ret = rw_verify_area(WRITE, file, pos, count); 532 + if (ret) 533 + return ret; 554 534 555 - return res; 535 + file_start_write(file); 536 + ret = __kernel_write(file, buf, count, pos); 537 + file_end_write(file); 538 + return ret; 556 539 } 557 540 EXPORT_SYMBOL(kernel_write); 558 541 ··· 567 552 return -EFAULT; 568 553 569 554 ret = rw_verify_area(WRITE, file, pos, count); 570 - if (!ret) { 571 - if (count > MAX_RW_COUNT) 572 - count = MAX_RW_COUNT; 573 - file_start_write(file); 574 - ret = __vfs_write(file, buf, count, pos); 575 - if (ret > 0) { 576 - fsnotify_modify(file); 577 - add_wchar(current, ret); 578 - } 579 - inc_syscw(current); 580 - file_end_write(file); 555 + if (ret) 556 + return ret; 557 + if (count > MAX_RW_COUNT) 558 + count = MAX_RW_COUNT; 559 + file_start_write(file); 560 + if (file->f_op->write) 561 + ret = file->f_op->write(file, buf, count, pos); 562 + else if (file->f_op->write_iter) 563 + ret = new_sync_write(file, buf, count, pos); 564 + else 565 + ret = -EINVAL; 566 + if (ret > 0) { 567 + fsnotify_modify(file); 568 + add_wchar(current, ret); 581 569 } 582 - 570 + inc_syscw(current); 571 + file_end_write(file); 583 572 return ret; 584 573 } 585 574
+1 -1
fs/squashfs/block.c
··· 175 175 /* Extract the length of the metadata block */ 176 176 data = page_address(bvec->bv_page) + bvec->bv_offset; 177 177 length = data[offset]; 178 - if (offset <= bvec->bv_len - 1) { 178 + if (offset < bvec->bv_len - 1) { 179 179 length |= data[offset + 1] << 8; 180 180 } else { 181 181 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
+5 -5
fs/xfs/xfs_log_cil.c
··· 671 671 /* 672 672 * Wake up any background push waiters now this context is being pushed. 673 673 */ 674 - wake_up_all(&ctx->push_wait); 674 + if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) 675 + wake_up_all(&cil->xc_push_wait); 675 676 676 677 /* 677 678 * Check if we've anything to push. If there is nothing, then we don't ··· 744 743 745 744 /* 746 745 * initialise the new context and attach it to the CIL. Then attach 747 - * the current context to the CIL committing lsit so it can be found 746 + * the current context to the CIL committing list so it can be found 748 747 * during log forces to extract the commit lsn of the sequence that 749 748 * needs to be forced. 750 749 */ 751 750 INIT_LIST_HEAD(&new_ctx->committing); 752 751 INIT_LIST_HEAD(&new_ctx->busy_extents); 753 - init_waitqueue_head(&new_ctx->push_wait); 754 752 new_ctx->sequence = ctx->sequence + 1; 755 753 new_ctx->cil = cil; 756 754 cil->xc_ctx = new_ctx; ··· 937 937 if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) { 938 938 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); 939 939 ASSERT(cil->xc_ctx->space_used < log->l_logsize); 940 - xlog_wait(&cil->xc_ctx->push_wait, &cil->xc_push_lock); 940 + xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); 941 941 return; 942 942 } 943 943 ··· 1216 1216 INIT_LIST_HEAD(&cil->xc_committing); 1217 1217 spin_lock_init(&cil->xc_cil_lock); 1218 1218 spin_lock_init(&cil->xc_push_lock); 1219 + init_waitqueue_head(&cil->xc_push_wait); 1219 1220 init_rwsem(&cil->xc_ctx_lock); 1220 1221 init_waitqueue_head(&cil->xc_commit_wait); 1221 1222 1222 1223 INIT_LIST_HEAD(&ctx->committing); 1223 1224 INIT_LIST_HEAD(&ctx->busy_extents); 1224 - init_waitqueue_head(&ctx->push_wait); 1225 1225 ctx->sequence = 1; 1226 1226 ctx->cil = cil; 1227 1227 cil->xc_ctx = ctx;
+1 -1
fs/xfs/xfs_log_priv.h
··· 240 240 struct xfs_log_vec *lv_chain; /* logvecs being pushed */ 241 241 struct list_head iclog_entry; 242 242 struct list_head committing; /* ctx committing list */ 243 - wait_queue_head_t push_wait; /* background push throttle */ 244 243 struct work_struct discard_endio_work; 245 244 }; 246 245 ··· 273 274 wait_queue_head_t xc_commit_wait; 274 275 xfs_lsn_t xc_current_sequence; 275 276 struct work_struct xc_push_work; 277 + wait_queue_head_t xc_push_wait; /* background push throttle */ 276 278 } ____cacheline_aligned_in_smp; 277 279 278 280 /*
+11 -7
fs/zonefs/super.c
··· 607 607 int nr_pages; 608 608 ssize_t ret; 609 609 610 - nr_pages = iov_iter_npages(from, BIO_MAX_PAGES); 611 - if (!nr_pages) 612 - return 0; 613 - 614 610 max = queue_max_zone_append_sectors(bdev_get_queue(bdev)); 615 611 max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize); 616 612 iov_iter_truncate(from, max); 613 + 614 + nr_pages = iov_iter_npages(from, BIO_MAX_PAGES); 615 + if (!nr_pages) 616 + return 0; 617 617 618 618 bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set); 619 619 if (!bio) ··· 1119 1119 char *file_name; 1120 1120 struct dentry *dir; 1121 1121 unsigned int n = 0; 1122 - int ret = -ENOMEM; 1122 + int ret; 1123 1123 1124 1124 /* If the group is empty, there is nothing to do */ 1125 1125 if (!zd->nr_zones[type]) ··· 1135 1135 zgroup_name = "seq"; 1136 1136 1137 1137 dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type); 1138 - if (!dir) 1138 + if (!dir) { 1139 + ret = -ENOMEM; 1139 1140 goto free; 1141 + } 1140 1142 1141 1143 /* 1142 1144 * The first zone contains the super block: skip it. ··· 1176 1174 * Use the file number within its group as file name. 1177 1175 */ 1178 1176 snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); 1179 - if (!zonefs_create_inode(dir, file_name, zone, type)) 1177 + if (!zonefs_create_inode(dir, file_name, zone, type)) { 1178 + ret = -ENOMEM; 1180 1179 goto free; 1180 + } 1181 1181 1182 1182 n++; 1183 1183 }
+4 -2
include/asm-generic/mmiowb.h
··· 27 27 #include <asm/smp.h> 28 28 29 29 DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); 30 - #define __mmiowb_state() this_cpu_ptr(&__mmiowb_state) 30 + #define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state) 31 31 #else 32 32 #define __mmiowb_state() arch_mmiowb_state() 33 33 #endif /* arch_mmiowb_state */ ··· 35 35 static inline void mmiowb_set_pending(void) 36 36 { 37 37 struct mmiowb_state *ms = __mmiowb_state(); 38 - ms->mmiowb_pending = ms->nesting_count; 38 + 39 + if (likely(ms->nesting_count)) 40 + ms->mmiowb_pending = ms->nesting_count; 39 41 } 40 42 41 43 static inline void mmiowb_spin_lock(void)
+4 -1
include/asm-generic/vmlinux.lds.h
··· 341 341 342 342 #define PAGE_ALIGNED_DATA(page_align) \ 343 343 . = ALIGN(page_align); \ 344 - *(.data..page_aligned) 344 + *(.data..page_aligned) \ 345 + . = ALIGN(page_align); 345 346 346 347 #define READ_MOSTLY_DATA(align) \ 347 348 . = ALIGN(align); \ ··· 738 737 . = ALIGN(bss_align); \ 739 738 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 740 739 BSS_FIRST_SECTIONS \ 740 + . = ALIGN(PAGE_SIZE); \ 741 741 *(.bss..page_aligned) \ 742 + . = ALIGN(PAGE_SIZE); \ 742 743 *(.dynbss) \ 743 744 *(BSS_MAIN) \ 744 745 *(COMMON) \
+2 -2
include/crypto/if_alg.h
··· 29 29 30 30 struct sock *parent; 31 31 32 - unsigned int refcnt; 33 - unsigned int nokey_refcnt; 32 + atomic_t refcnt; 33 + atomic_t nokey_refcnt; 34 34 35 35 const struct af_alg_type *type; 36 36 void *private;
+1 -2
include/linux/bits.h
··· 18 18 * position @h. For example 19 19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. 20 20 */ 21 - #if !defined(__ASSEMBLY__) && \ 22 - (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) 21 + #if !defined(__ASSEMBLY__) 23 22 #include <linux/build_bug.h> 24 23 #define GENMASK_INPUT_CHECK(h, l) \ 25 24 (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+1
include/linux/blkdev.h
··· 590 590 u64 write_hints[BLK_MAX_WRITE_HINTS]; 591 591 }; 592 592 593 + /* Keep blk_queue_flag_name[] in sync with the definitions below */ 593 594 #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 594 595 #define QUEUE_FLAG_DYING 1 /* queue being torn down */ 595 596 #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
+3 -2
include/linux/bpf-netns.h
··· 33 33 union bpf_attr __user *uattr); 34 34 int netns_bpf_prog_attach(const union bpf_attr *attr, 35 35 struct bpf_prog *prog); 36 - int netns_bpf_prog_detach(const union bpf_attr *attr); 36 + int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 37 37 int netns_bpf_link_create(const union bpf_attr *attr, 38 38 struct bpf_prog *prog); 39 39 #else ··· 49 49 return -EOPNOTSUPP; 50 50 } 51 51 52 - static inline int netns_bpf_prog_detach(const union bpf_attr *attr) 52 + static inline int netns_bpf_prog_detach(const union bpf_attr *attr, 53 + enum bpf_prog_type ptype) 53 54 { 54 55 return -EOPNOTSUPP; 55 56 }
+11 -2
include/linux/bpf.h
··· 1543 1543 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1544 1544 1545 1545 #if defined(CONFIG_BPF_STREAM_PARSER) 1546 - int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); 1546 + int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1547 + struct bpf_prog *old, u32 which); 1547 1548 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1549 + int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 1548 1550 void sock_map_unhash(struct sock *sk); 1549 1551 void sock_map_close(struct sock *sk, long timeout); 1550 1552 #else 1551 1553 static inline int sock_map_prog_update(struct bpf_map *map, 1552 - struct bpf_prog *prog, u32 which) 1554 + struct bpf_prog *prog, 1555 + struct bpf_prog *old, u32 which) 1553 1556 { 1554 1557 return -EOPNOTSUPP; 1555 1558 } ··· 1561 1558 struct bpf_prog *prog) 1562 1559 { 1563 1560 return -EINVAL; 1561 + } 1562 + 1563 + static inline int sock_map_prog_detach(const union bpf_attr *attr, 1564 + enum bpf_prog_type ptype) 1565 + { 1566 + return -EOPNOTSUPP; 1564 1567 } 1565 1568 #endif /* CONFIG_BPF_STREAM_PARSER */ 1566 1569
+5
include/linux/btf.h
··· 82 82 return BTF_INFO_KIND(t->info) == BTF_KIND_INT; 83 83 } 84 84 85 + static inline bool btf_type_is_small_int(const struct btf_type *t) 86 + { 87 + return btf_type_is_int(t) && t->size <= sizeof(u64); 88 + } 89 + 85 90 static inline bool btf_type_is_enum(const struct btf_type *t) 86 91 { 87 92 return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
+6 -2
include/linux/cgroup-defs.h
··· 790 790 union { 791 791 #ifdef __LITTLE_ENDIAN 792 792 struct { 793 - u8 is_data; 793 + u8 is_data : 1; 794 + u8 no_refcnt : 1; 795 + u8 unused : 6; 794 796 u8 padding; 795 797 u16 prioidx; 796 798 u32 classid; ··· 802 800 u32 classid; 803 801 u16 prioidx; 804 802 u8 padding; 805 - u8 is_data; 803 + u8 unused : 6; 804 + u8 no_refcnt : 1; 805 + u8 is_data : 1; 806 806 } __packed; 807 807 #endif 808 808 u64 val;
+3 -1
include/linux/cgroup.h
··· 822 822 823 823 void cgroup_sk_alloc_disable(void); 824 824 void cgroup_sk_alloc(struct sock_cgroup_data *skcd); 825 + void cgroup_sk_clone(struct sock_cgroup_data *skcd); 825 826 void cgroup_sk_free(struct sock_cgroup_data *skcd); 826 827 827 828 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) ··· 836 835 */ 837 836 v = READ_ONCE(skcd->val); 838 837 839 - if (v & 1) 838 + if (v & 3) 840 839 return &cgrp_dfl_root.cgrp; 841 840 842 841 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; ··· 848 847 #else /* CONFIG_CGROUP_DATA */ 849 848 850 849 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} 850 + static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} 851 851 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} 852 852 853 853 #endif /* CONFIG_CGROUP_DATA */
+1 -1
include/linux/compiler-gcc.h
··· 11 11 + __GNUC_PATCHLEVEL__) 12 12 13 13 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ 14 - #if GCC_VERSION < 40800 14 + #if GCC_VERSION < 40900 15 15 # error Sorry, your compiler is too old - please upgrade it. 16 16 #endif 17 17
+1 -26
include/linux/compiler_types.h
··· 252 252 * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving 253 253 * non-scalar types unchanged. 254 254 */ 255 - #if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__) 256 255 /* 257 - * We build this out of a couple of helper macros in a vain attempt to 258 - * help you keep your lunch down while reading it. 259 - */ 260 - #define __pick_scalar_type(x, type, otherwise) \ 261 - __builtin_choose_expr(__same_type(x, type), (type)0, otherwise) 262 - 263 - /* 264 - * 'char' is not type-compatible with either 'signed char' or 'unsigned char', 265 - * so we include the naked type here as well as the signed/unsigned variants. 266 - */ 267 - #define __pick_integer_type(x, type, otherwise) \ 268 - __pick_scalar_type(x, type, \ 269 - __pick_scalar_type(x, unsigned type, \ 270 - __pick_scalar_type(x, signed type, otherwise))) 271 - 272 - #define __unqual_scalar_typeof(x) typeof( \ 273 - __pick_integer_type(x, char, \ 274 - __pick_integer_type(x, short, \ 275 - __pick_integer_type(x, int, \ 276 - __pick_integer_type(x, long, \ 277 - __pick_integer_type(x, long long, x)))))) 278 - #else 279 - /* 280 - * If supported, prefer C11 _Generic for better compile-times. As above, 'char' 256 + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' 281 257 * is not type-compatible with 'signed char', and we define a separate case. 282 258 */ 283 259 #define __scalar_type_to_expr_cases(type) \ ··· 269 293 __scalar_type_to_expr_cases(long), \ 270 294 __scalar_type_to_expr_cases(long long), \ 271 295 default: (x))) 272 - #endif 273 296 274 297 /* Is this type a native word size -- useful for atomic operations */ 275 298 #define __native_word(t) \
+1
include/linux/device-mapper.h
··· 426 426 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 427 427 struct gendisk *dm_disk(struct mapped_device *md); 428 428 int dm_suspended(struct dm_target *ti); 429 + int dm_post_suspending(struct dm_target *ti); 429 430 int dm_noflush_suspending(struct dm_target *ti); 430 431 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 431 432 union map_info *dm_get_rq_mapinfo(struct request *rq);
+3 -2
include/linux/device.h
··· 433 433 * @suppliers: List of links to supplier devices. 434 434 * @consumers: List of links to consumer devices. 435 435 * @needs_suppliers: Hook to global list of devices waiting for suppliers. 436 - * @defer_sync: Hook to global list of devices that have deferred sync_state. 436 + * @defer_hook: Hook to global list of devices that have deferred sync_state or 437 + * deferred fw_devlink. 437 438 * @need_for_probe: If needs_suppliers is on a list, this indicates if the 438 439 * suppliers are needed for probe or not. 439 440 * @status: Driver status information. ··· 443 442 struct list_head suppliers; 444 443 struct list_head consumers; 445 444 struct list_head needs_suppliers; 446 - struct list_head defer_sync; 445 + struct list_head defer_hook; 447 446 bool need_for_probe; 448 447 enum dl_dev_state status; 449 448 };
+1
include/linux/dma-buf.h
··· 311 311 void *vmap_ptr; 312 312 const char *exp_name; 313 313 const char *name; 314 + spinlock_t name_lock; /* spinlock to protect name access */ 314 315 struct module *owner; 315 316 struct list_head list_node; 316 317 void *priv;
+2
include/linux/dma-direct.h
··· 69 69 u64 dma_direct_get_required_mask(struct device *dev); 70 70 gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, 71 71 u64 *phys_mask); 72 + bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); 72 73 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 73 74 gfp_t gfp, unsigned long attrs); 74 75 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, ··· 86 85 void *cpu_addr, dma_addr_t dma_addr, size_t size, 87 86 unsigned long attrs); 88 87 int dma_direct_supported(struct device *dev, u64 mask); 88 + bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); 89 89 #endif /* _LINUX_DMA_DIRECT_H */
+5
include/linux/dma-mapping.h
··· 461 461 int dma_set_coherent_mask(struct device *dev, u64 mask); 462 462 u64 dma_get_required_mask(struct device *dev); 463 463 size_t dma_max_mapping_size(struct device *dev); 464 + bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); 464 465 unsigned long dma_get_merge_boundary(struct device *dev); 465 466 #else /* CONFIG_HAS_DMA */ 466 467 static inline dma_addr_t dma_map_page_attrs(struct device *dev, ··· 571 570 static inline size_t dma_max_mapping_size(struct device *dev) 572 571 { 573 572 return 0; 573 + } 574 + static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 575 + { 576 + return false; 574 577 } 575 578 static inline unsigned long dma_get_merge_boundary(struct device *dev) 576 579 {
+1
include/linux/efi.h
··· 994 994 int efivars_unregister(struct efivars *efivars); 995 995 struct kobject *efivars_kobject(void); 996 996 997 + int efivar_supports_writes(void); 997 998 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), 998 999 void *data, bool duplicates, struct list_head *head); 999 1000
+2 -2
include/linux/filter.h
··· 884 884 bool bpf_jit_needs_zext(void); 885 885 bool bpf_helper_changes_pkt_data(void *func); 886 886 887 - static inline bool bpf_dump_raw_ok(void) 887 + static inline bool bpf_dump_raw_ok(const struct cred *cred) 888 888 { 889 889 /* Reconstruction of call-sites is dependent on kallsyms, 890 890 * thus make dump the same restriction. 891 891 */ 892 - return kallsyms_show_value() == 1; 892 + return kallsyms_show_value(cred); 893 893 } 894 894 895 895 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+2 -1
include/linux/fs.h
··· 315 315 #define IOCB_SYNC (1 << 5) 316 316 #define IOCB_WRITE (1 << 6) 317 317 #define IOCB_NOWAIT (1 << 7) 318 + #define IOCB_NOIO (1 << 9) 318 319 319 320 struct kiocb { 320 321 struct file *ki_filp; ··· 1918 1917 struct iovec *fast_pointer, 1919 1918 struct iovec **ret_pointer); 1920 1919 1921 - extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); 1922 1920 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); 1923 1921 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); 1924 1922 extern ssize_t vfs_readv(struct file *, const struct iovec __user *, ··· 3033 3033 extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, 3034 3034 enum kernel_read_file_id); 3035 3035 extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); 3036 + ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos); 3036 3037 extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); 3037 3038 extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); 3038 3039 extern struct file * open_exec(const char *);
+1
include/linux/fs_context.h
··· 109 109 enum fs_context_phase phase:8; /* The phase the context is in */ 110 110 bool need_free:1; /* Need to call ops->free() */ 111 111 bool global:1; /* Goes into &init_user_ns */ 112 + bool oldapi:1; /* Coming from mount(2) */ 112 113 }; 113 114 114 115 struct fs_context_operations {
+1 -1
include/linux/i2c.h
··· 56 56 * on a bus (or read from them). Apart from two basic transfer functions to 57 57 * transmit one message at a time, a more complex version can be used to 58 58 * transmit an arbitrary number of messages without interruption. 59 - * @count must be be less than 64k since msg.len is u16. 59 + * @count must be less than 64k since msg.len is u16. 60 60 */ 61 61 int i2c_transfer_buffer_flags(const struct i2c_client *client, 62 62 char *buf, int count, u16 flags);
+4
include/linux/ieee80211.h
··· 3333 3333 #define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) 3334 3334 #define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) 3335 3335 #define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) 3336 + #define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10) 3336 3337 #define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) 3337 3338 #define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) 3339 + #define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13) 3338 3340 #define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) 3339 3341 #define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) 3340 3342 #define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) 3341 3343 #define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) 3342 3344 #define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18) 3345 + #define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19) 3346 + #define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20) 3343 3347 3344 3348 #define WLAN_MAX_KEY_LEN 32 3345 3349
+22 -7
include/linux/if_vlan.h
··· 25 25 #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ 26 26 #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ 27 27 28 + #define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ 29 + 28 30 /* 29 31 * struct vlan_hdr - vlan header 30 32 * @h_vlan_TCI: priority and VLAN ID ··· 579 577 * Returns the EtherType of the packet, regardless of whether it is 580 578 * vlan encapsulated (normal or hardware accelerated) or not. 581 579 */ 582 - static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, 580 + static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, 583 581 int *depth) 584 582 { 585 - unsigned int vlan_depth = skb->mac_len; 583 + unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; 586 584 587 585 /* if type is 802.1Q/AD then the header should already be 588 586 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at ··· 597 595 vlan_depth = ETH_HLEN; 598 596 } 599 597 do { 600 - struct vlan_hdr *vh; 598 + struct vlan_hdr vhdr, *vh; 601 599 602 - if (unlikely(!pskb_may_pull(skb, 603 - vlan_depth + VLAN_HLEN))) 600 + vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); 601 + if (unlikely(!vh || !--parse_depth)) 604 602 return 0; 605 603 606 - vh = (struct vlan_hdr *)(skb->data + vlan_depth); 607 604 type = vh->h_vlan_encapsulated_proto; 608 605 vlan_depth += VLAN_HLEN; 609 606 } while (eth_type_vlan(type)); ··· 621 620 * Returns the EtherType of the packet, regardless of whether it is 622 621 * vlan encapsulated (normal or hardware accelerated) or not. 623 622 */ 624 - static inline __be16 vlan_get_protocol(struct sk_buff *skb) 623 + static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 625 624 { 626 625 return __vlan_get_protocol(skb, skb->protocol, NULL); 626 + } 627 + 628 + /* A getter for the SKB protocol field which will handle VLAN tags consistently 629 + * whether VLAN acceleration is enabled or not. 630 + */ 631 + static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) 632 + { 633 + if (!skip_vlan) 634 + /* VLAN acceleration strips the VLAN header from the skb and 635 + * moves it to skb->vlan_proto 636 + */ 637 + return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; 638 + 639 + return vlan_get_protocol(skb); 627 640 } 628 641 629 642 static inline void vlan_set_encap_proto(struct sk_buff *skb,
+7
include/linux/input/elan-i2c-ids.h
··· 67 67 { "ELAN062B", 0 }, 68 68 { "ELAN062C", 0 }, 69 69 { "ELAN062D", 0 }, 70 + { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */ 71 + { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */ 70 72 { "ELAN0631", 0 }, 71 73 { "ELAN0632", 0 }, 74 + { "ELAN0633", 0 }, /* Lenovo S145 */ 75 + { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */ 76 + { "ELAN0635", 0 }, /* Lenovo V1415-IIL */ 77 + { "ELAN0636", 0 }, /* Lenovo V1415-Dali */ 78 + { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */ 72 79 { "ELAN1000", 0 }, 73 80 { } 74 81 };
+4 -1
include/linux/io-mapping.h
··· 107 107 resource_size_t base, 108 108 unsigned long size) 109 109 { 110 + iomap->iomem = ioremap_wc(base, size); 111 + if (!iomap->iomem) 112 + return NULL; 113 + 110 114 iomap->base = base; 111 115 iomap->size = size; 112 - iomap->iomem = ioremap_wc(base, size); 113 116 #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ 114 117 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); 115 118 #elif defined(pgprot_writecombine)
+3 -2
include/linux/kallsyms.h
··· 18 18 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 19 19 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) 20 20 21 + struct cred; 21 22 struct module; 22 23 23 24 static inline int is_kernel_inittext(unsigned long addr) ··· 99 98 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); 100 99 101 100 /* How and when do we show kallsyms values? */ 102 - extern int kallsyms_show_value(void); 101 + extern bool kallsyms_show_value(const struct cred *cred); 103 102 104 103 #else /* !CONFIG_KALLSYMS */ 105 104 ··· 159 158 return -ERANGE; 160 159 } 161 160 162 - static inline int kallsyms_show_value(void) 161 + static inline bool kallsyms_show_value(const struct cred *cred) 163 162 { 164 163 return false; 165 164 }
+12
include/linux/kgdb.h
··· 177 177 struct pt_regs *regs); 178 178 179 179 /** 180 + * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML 181 + * packets. 182 + * @remcom_in_buffer: The buffer of the packet we have read. 183 + * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. 184 + */ 185 + 186 + extern void 187 + kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, 188 + char *remcom_out_buffer); 189 + 190 + /** 180 191 * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU 181 192 * @ignored: This parameter is only here to match the prototype. 182 193 * ··· 325 314 326 315 extern int kgdb_isremovedbreak(unsigned long addr); 327 316 extern void kgdb_schedule_breakpoint(void); 317 + extern int kgdb_has_hit_break(unsigned long addr); 328 318 329 319 extern int 330 320 kgdb_handle_exception(int ex_vector, int signo, int err_code,
+2 -2
include/linux/lsm_hook_defs.h
··· 150 150 size_t buffer_size) 151 151 LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) 152 152 LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) 153 - LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name) 153 + LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name) 154 154 LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, 155 155 struct kernfs_node *kn) 156 156 LSM_HOOK(int, 0, file_permission, struct file *file, int mask) ··· 360 360 unsigned long flags) 361 361 LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key) 362 362 LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, 363 - unsigned perm) 363 + enum key_need_perm need_perm) 364 364 LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer) 365 365 #endif /* CONFIG_KEYS */ 366 366
+1
include/linux/mlx5/driver.h
··· 147 147 MLX5_REG_MCDA = 0x9063, 148 148 MLX5_REG_MCAM = 0x907f, 149 149 MLX5_REG_MIRC = 0x9162, 150 + MLX5_REG_SBCAM = 0xB01F, 150 151 MLX5_REG_RESOURCE_DUMP = 0xC000, 151 152 }; 152 153
+28
include/linux/mlx5/mlx5_ifc.h
··· 9960 9960 u8 untagged_buff[0x4]; 9961 9961 }; 9962 9962 9963 + struct mlx5_ifc_sbcam_reg_bits { 9964 + u8 reserved_at_0[0x8]; 9965 + u8 feature_group[0x8]; 9966 + u8 reserved_at_10[0x8]; 9967 + u8 access_reg_group[0x8]; 9968 + 9969 + u8 reserved_at_20[0x20]; 9970 + 9971 + u8 sb_access_reg_cap_mask[4][0x20]; 9972 + 9973 + u8 reserved_at_c0[0x80]; 9974 + 9975 + u8 sb_feature_cap_mask[4][0x20]; 9976 + 9977 + u8 reserved_at_1c0[0x40]; 9978 + 9979 + u8 cap_total_buffer_size[0x20]; 9980 + 9981 + u8 cap_cell_size[0x10]; 9982 + u8 cap_max_pg_buffers[0x8]; 9983 + u8 cap_num_pool_supported[0x8]; 9984 + 9985 + u8 reserved_at_240[0x8]; 9986 + u8 cap_sbsr_stat_size[0x8]; 9987 + u8 cap_max_tclass_data[0x8]; 9988 + u8 cap_max_cpu_ingress_tclass_sb[0x8]; 9989 + }; 9990 + 9963 9991 struct mlx5_ifc_pbmc_reg_bits { 9964 9992 u8 reserved_at_0[0x8]; 9965 9993 u8 local_port[0x8];
+1 -1
include/linux/mod_devicetable.h
··· 318 318 #define INPUT_DEVICE_ID_LED_MAX 0x0f 319 319 #define INPUT_DEVICE_ID_SND_MAX 0x07 320 320 #define INPUT_DEVICE_ID_FF_MAX 0x7f 321 - #define INPUT_DEVICE_ID_SW_MAX 0x0f 321 + #define INPUT_DEVICE_ID_SW_MAX 0x10 322 322 #define INPUT_DEVICE_ID_PROP_MAX 0x1f 323 323 324 324 #define INPUT_DEVICE_ID_MATCH_BUS 1
+5 -6
include/linux/pci.h
··· 2169 2169 */ 2170 2170 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) 2171 2171 { 2172 - struct pci_dev *bridge = pci_upstream_bridge(dev); 2173 - 2174 - while (bridge) { 2175 - if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) 2176 - return bridge; 2177 - bridge = pci_upstream_bridge(bridge); 2172 + while (dev) { 2173 + if (pci_is_pcie(dev) && 2174 + pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 2175 + return dev; 2176 + dev = pci_upstream_bridge(dev); 2178 2177 } 2179 2178 2180 2179 return NULL;
+1 -1
include/linux/rhashtable.h
··· 33 33 * of two or more hash tables when the rhashtable is being resized. 34 34 * The end of the chain is marked with a special nulls marks which has 35 35 * the least significant bit set but otherwise stores the address of 36 - * the hash bucket. This allows us to be be sure we've found the end 36 + * the hash bucket. This allows us to be sure we've found the end 37 37 * of the right list. 38 38 * The value stored in the hash bucket has BIT(0) used as a lock bit. 39 39 * This bit must be atomically set before any changes are made to
+4 -4
include/linux/scatterlist.h
··· 155 155 * Loop over each sg element in the given sg_table object. 156 156 */ 157 157 #define for_each_sgtable_sg(sgt, sg, i) \ 158 - for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) 158 + for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i) 159 159 160 160 /* 161 161 * Loop over each sg element in the given *DMA mapped* sg_table object. ··· 163 163 * of the each element. 164 164 */ 165 165 #define for_each_sgtable_dma_sg(sgt, sg, i) \ 166 - for_each_sg(sgt->sgl, sg, sgt->nents, i) 166 + for_each_sg((sgt)->sgl, sg, (sgt)->nents, i) 167 167 168 168 /** 169 169 * sg_chain - Chain two sglists together ··· 451 451 * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit. 452 452 */ 453 453 #define for_each_sgtable_page(sgt, piter, pgoffset) \ 454 - for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset) 454 + for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset) 455 455 456 456 /** 457 457 * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object ··· 465 465 * unit. 466 466 */ 467 467 #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \ 468 - for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset) 468 + for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset) 469 469 470 470 471 471 /*
-4
include/linux/sched.h
··· 114 114 115 115 #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 116 116 117 - #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 118 - (task->flags & PF_FROZEN) == 0 && \ 119 - (task->state & TASK_NOLOAD) == 0) 120 - 121 117 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 122 118 123 119 /*
+3 -1
include/linux/sched/jobctl.h
··· 19 19 #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ 20 20 #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ 21 21 #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ 22 + #define JOBCTL_TASK_WORK_BIT 24 /* set by TWA_SIGNAL */ 22 23 23 24 #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) 24 25 #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) ··· 29 28 #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) 30 29 #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) 31 30 #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) 31 + #define JOBCTL_TASK_WORK (1UL << JOBCTL_TASK_WORK_BIT) 32 32 33 33 #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) 34 - #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) 34 + #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK) 35 35 36 36 extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); 37 37 extern void task_clear_jobctl_trapping(struct task_struct *task);
+98 -4
include/linux/serial_core.h
··· 462 462 extern void uart_insert_char(struct uart_port *port, unsigned int status, 463 463 unsigned int overrun, unsigned int ch, unsigned int flag); 464 464 465 - extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch); 466 - extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch); 467 - extern void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags); 468 - extern int uart_handle_break(struct uart_port *port); 465 + #ifdef CONFIG_MAGIC_SYSRQ_SERIAL 466 + #define SYSRQ_TIMEOUT (HZ * 5) 467 + 468 + bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch); 469 + 470 + static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) 471 + { 472 + if (!port->sysrq) 473 + return 0; 474 + 475 + if (ch && time_before(jiffies, port->sysrq)) { 476 + if (sysrq_mask()) { 477 + handle_sysrq(ch); 478 + port->sysrq = 0; 479 + return 1; 480 + } 481 + if (uart_try_toggle_sysrq(port, ch)) 482 + return 1; 483 + } 484 + port->sysrq = 0; 485 + 486 + return 0; 487 + } 488 + 489 + static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) 490 + { 491 + if (!port->sysrq) 492 + return 0; 493 + 494 + if (ch && time_before(jiffies, port->sysrq)) { 495 + if (sysrq_mask()) { 496 + port->sysrq_ch = ch; 497 + port->sysrq = 0; 498 + return 1; 499 + } 500 + if (uart_try_toggle_sysrq(port, ch)) 501 + return 1; 502 + } 503 + port->sysrq = 0; 504 + 505 + return 0; 506 + } 507 + 508 + static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) 509 + { 510 + int sysrq_ch; 511 + 512 + if (!port->has_sysrq) { 513 + spin_unlock_irqrestore(&port->lock, irqflags); 514 + return; 515 + } 516 + 517 + sysrq_ch = port->sysrq_ch; 518 + port->sysrq_ch = 0; 519 + 520 + spin_unlock_irqrestore(&port->lock, irqflags); 521 + 522 + if (sysrq_ch) 523 + handle_sysrq(sysrq_ch); 524 + } 525 + #else /* CONFIG_MAGIC_SYSRQ_SERIAL */ 526 + static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) 527 + { 528 + return 0; 529 + } 530 + static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) 531 + { 532 + return 0; 533 + } 534 + static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) 535 + { 536 + spin_unlock_irqrestore(&port->lock, irqflags); 537 + } 538 + #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ 539 + 540 + /* 541 + * We do the SysRQ and SAK checking like this... 542 + */ 543 + static inline int uart_handle_break(struct uart_port *port) 544 + { 545 + struct uart_state *state = port->state; 546 + 547 + if (port->handle_break) 548 + port->handle_break(port); 549 + 550 + #ifdef CONFIG_MAGIC_SYSRQ_SERIAL 551 + if (port->has_sysrq && uart_console(port)) { 552 + if (!port->sysrq) { 553 + port->sysrq = jiffies + SYSRQ_TIMEOUT; 554 + return 1; 555 + } 556 + port->sysrq = 0; 557 + } 558 + #endif 559 + if (port->flags & UPF_SAK) 560 + do_SAK(state->port.tty); 561 + return 0; 562 + } 469 563 470 564 /* 471 565 * UART_ENABLE_MS - determine if port should enable modem status irqs
+13
include/linux/skmsg.h
··· 430 430 bpf_prog_put(prog); 431 431 } 432 432 433 + static inline int psock_replace_prog(struct bpf_prog **pprog, 434 + struct bpf_prog *prog, 435 + struct bpf_prog *old) 436 + { 437 + if (cmpxchg(pprog, old, prog) != old) 438 + return -ENOENT; 439 + 440 + if (old) 441 + bpf_prog_put(old); 442 + 443 + return 0; 444 + } 445 + 433 446 static inline void psock_progs_drop(struct sk_psock_progs *progs) 434 447 { 435 448 psock_set_prog(&progs->msg_parser, NULL);
+4 -1
include/linux/task_work.h
··· 13 13 twork->func = func; 14 14 } 15 15 16 - int task_work_add(struct task_struct *task, struct callback_head *twork, bool); 16 + #define TWA_RESUME 1 17 + #define TWA_SIGNAL 2 18 + int task_work_add(struct task_struct *task, struct callback_head *twork, int); 19 + 17 20 struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); 18 21 void task_work_run(void); 19 22
+4 -2
include/linux/tcp.h
··· 220 220 } rack; 221 221 u16 advmss; /* Advertised MSS */ 222 222 u8 compressed_ack; 223 - u8 dup_ack_counter; 223 + u8 dup_ack_counter:2, 224 + tlp_retrans:1, /* TLP is a retransmission */ 225 + unused:5; 224 226 u32 chrono_start; /* Start time in jiffies of a TCP chrono */ 225 227 u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ 226 228 u8 chrono_type:2, /* current chronograph type */ ··· 245 243 save_syn:1, /* Save headers of SYN packet */ 246 244 is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ 247 245 syn_smc:1; /* SYN includes SMC */ 248 - u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ 246 + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ 249 247 250 248 u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ 251 249 u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+2 -1
include/linux/xattr.h
··· 15 15 #include <linux/slab.h> 16 16 #include <linux/types.h> 17 17 #include <linux/spinlock.h> 18 + #include <linux/mm.h> 18 19 #include <uapi/linux/xattr.h> 19 20 20 21 struct inode; ··· 95 94 96 95 list_for_each_entry_safe(xattr, node, &xattrs->head, list) { 97 96 kfree(xattr->name); 98 - kfree(xattr); 97 + kvfree(xattr); 99 98 } 100 99 } 101 100
+9 -1
include/net/dst.h
··· 400 400 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, 401 401 struct sk_buff *skb) 402 402 { 403 - struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); 403 + struct neighbour *n = NULL; 404 + 405 + /* The packets from tunnel devices (eg bareudp) may have only 406 + * metadata in the dst pointer of skb. Hence a pointer check of 407 + * neigh_lookup is needed. 408 + */ 409 + if (dst->ops->neigh_lookup) 410 + n = dst->ops->neigh_lookup(dst, skb, NULL); 411 + 404 412 return IS_ERR(n) ? NULL : n; 405 413 } 406 414
+2 -1
include/net/flow_dissector.h
··· 372 372 } 373 373 374 374 #ifdef CONFIG_BPF_SYSCALL 375 - int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog); 375 + int flow_dissector_bpf_prog_attach_check(struct net *net, 376 + struct bpf_prog *prog); 376 377 #endif /* CONFIG_BPF_SYSCALL */ 377 378 378 379 #endif
-1
include/net/flow_offload.h
··· 5 5 #include <linux/list.h> 6 6 #include <linux/netlink.h> 7 7 #include <net/flow_dissector.h> 8 - #include <linux/rhashtable.h> 9 8 10 9 struct flow_match { 11 10 struct flow_dissector *dissector;
-10
include/net/genetlink.h
··· 35 35 * do additional, common, filtering and return an error 36 36 * @post_doit: called after an operation's doit callback, it may 37 37 * undo operations done by pre_doit, for example release locks 38 - * @mcast_bind: a socket bound to the given multicast group (which 39 - * is given as the offset into the groups array) 40 - * @mcast_unbind: a socket was unbound from the given multicast group. 41 - * Note that unbind() will not be called symmetrically if the 42 - * generic netlink family is removed while there are still open 43 - * sockets. 44 - * @attrbuf: buffer to store parsed attributes (private) 45 38 * @mcgrps: multicast groups used by this family 46 39 * @n_mcgrps: number of multicast groups 47 40 * @mcgrp_offset: starting number of multicast group IDs in this family ··· 57 64 void (*post_doit)(const struct genl_ops *ops, 58 65 struct sk_buff *skb, 59 66 struct genl_info *info); 60 - int (*mcast_bind)(struct net *net, int group); 61 - void (*mcast_unbind)(struct net *net, int group); 62 - struct nlattr ** attrbuf; /* private */ 63 67 const struct genl_ops * ops; 64 68 const struct genl_multicast_group *mcgrps; 65 69 unsigned int n_ops;
+17 -8
include/net/inet_ecn.h
··· 4 4 5 5 #include <linux/ip.h> 6 6 #include <linux/skbuff.h> 7 + #include <linux/if_vlan.h> 7 8 8 9 #include <net/inet_sock.h> 9 10 #include <net/dsfield.h> ··· 173 172 174 173 static inline int INET_ECN_set_ce(struct sk_buff *skb) 175 174 { 176 - switch (skb->protocol) { 175 + switch (skb_protocol(skb, true)) { 177 176 case cpu_to_be16(ETH_P_IP): 178 177 if (skb_network_header(skb) + sizeof(struct iphdr) <= 179 178 skb_tail_pointer(skb)) ··· 192 191 193 192 static inline int INET_ECN_set_ect1(struct sk_buff *skb) 194 193 { 195 - switch (skb->protocol) { 194 + switch (skb_protocol(skb, true)) { 196 195 case cpu_to_be16(ETH_P_IP): 197 196 if (skb_network_header(skb) + sizeof(struct iphdr) <= 198 197 skb_tail_pointer(skb)) ··· 273 272 { 274 273 __u8 inner; 275 274 276 - if (skb->protocol == htons(ETH_P_IP)) 275 + switch (skb_protocol(skb, true)) { 276 + case htons(ETH_P_IP): 277 277 inner = ip_hdr(skb)->tos; 278 - else if (skb->protocol == htons(ETH_P_IPV6)) 278 + break; 279 + case htons(ETH_P_IPV6): 279 280 inner = ipv6_get_dsfield(ipv6_hdr(skb)); 280 - else 281 + break; 282 + default: 281 283 return 0; 284 + } 282 285 283 286 return INET_ECN_decapsulate(skb, oiph->tos, inner); 284 287 } ··· 292 287 { 293 288 __u8 inner; 294 289 295 - if (skb->protocol == htons(ETH_P_IP)) 290 + switch (skb_protocol(skb, true)) { 291 + case htons(ETH_P_IP): 296 292 inner = ip_hdr(skb)->tos; 297 - else if (skb->protocol == htons(ETH_P_IPV6)) 293 + break; 294 + case htons(ETH_P_IPV6): 298 295 inner = ipv6_get_dsfield(ipv6_hdr(skb)); 299 - else 296 + break; 297 + default: 300 298 return 0; 299 + } 301 300 302 301 return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); 303 302 }
+3
include/net/ip_tunnels.h
··· 290 290 struct ip_tunnel_parm *p, __u32 fwmark); 291 291 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); 292 292 293 + extern const struct header_ops ip_tunnel_header_ops; 294 + __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); 295 + 293 296 struct ip_tunnel_encap_ops { 294 297 size_t (*encap_hlen)(struct ip_tunnel_encap *e); 295 298 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+5 -2
include/net/netns/bpf.h
··· 9 9 #include <linux/bpf-netns.h> 10 10 11 11 struct bpf_prog; 12 + struct bpf_prog_array; 12 13 13 14 struct netns_bpf { 14 - struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE]; 15 - struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE]; 15 + /* Array of programs to run compiled from progs or links */ 16 + struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE]; 17 + struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE]; 18 + struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE]; 16 19 }; 17 20 18 21 #endif /* __NETNS_BPF_H__ */
-11
include/net/pkt_sched.h
··· 136 136 } 137 137 } 138 138 139 - static inline __be16 tc_skb_protocol(const struct sk_buff *skb) 140 - { 141 - /* We need to take extra care in case the skb came via 142 - * vlan accelerated path. In that case, use skb->vlan_proto 143 - * as the original vlan header was already stripped. 144 - */ 145 - if (skb_vlan_tag_present(skb)) 146 - return skb->vlan_proto; 147 - return skb->protocol; 148 - } 149 - 150 139 /* Calculate maximal size of packet seen by hard_start_xmit 151 140 routine of this device. 152 141 */
+2 -1
include/net/sock.h
··· 533 533 * be copied. 534 534 */ 535 535 #define SK_USER_DATA_NOCOPY 1UL 536 - #define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY) 536 + #define SK_USER_DATA_BPF 2UL /* Managed by BPF */ 537 + #define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF) 537 538 538 539 /** 539 540 * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
+3 -3
include/net/xsk_buff_pool.h
··· 40 40 u32 headroom; 41 41 u32 chunk_size; 42 42 u32 frame_len; 43 - bool cheap_dma; 43 + bool dma_need_sync; 44 44 bool unaligned; 45 45 void *addrs; 46 46 struct device *dev; ··· 80 80 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); 81 81 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) 82 82 { 83 - if (xskb->pool->cheap_dma) 83 + if (!xskb->pool->dma_need_sync) 84 84 return; 85 85 86 86 xp_dma_sync_for_cpu_slow(xskb); ··· 91 91 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, 92 92 dma_addr_t dma, size_t size) 93 93 { 94 - if (pool->cheap_dma) 94 + if (!pool->dma_need_sync) 95 95 return; 96 96 97 97 xp_dma_sync_for_device_slow(pool, dma, size);
+9 -1
include/sound/compress_driver.h
··· 66 66 * @direction: stream direction, playback/recording 67 67 * @metadata_set: metadata set flag, true when set 68 68 * @next_track: has userspace signal next track transition, true when set 69 + * @partial_drain: undergoing partial_drain for stream, true when set 69 70 * @private_data: pointer to DSP private data 70 71 * @dma_buffer: allocated buffer if any 71 72 */ ··· 79 78 enum snd_compr_direction direction; 80 79 bool metadata_set; 81 80 bool next_track; 81 + bool partial_drain; 82 82 void *private_data; 83 83 struct snd_dma_buffer dma_buffer; 84 84 }; ··· 184 182 if (snd_BUG_ON(!stream)) 185 183 return; 186 184 187 - stream->runtime->state = SNDRV_PCM_STATE_SETUP; 185 + /* for partial_drain case we are back to running state on success */ 186 + if (stream->partial_drain) { 187 + stream->runtime->state = SNDRV_PCM_STATE_RUNNING; 188 + stream->partial_drain = false; /* clear this flag as well */ 189 + } else { 190 + stream->runtime->state = SNDRV_PCM_STATE_SETUP; 191 + } 188 192 189 193 wake_up(&stream->runtime->sleep); 190 194 }
+1
include/sound/rt5670.h
··· 12 12 int jd_mode; 13 13 bool in2_diff; 14 14 bool dev_gpio; 15 + bool gpio1_is_ext_spk_en; 15 16 16 17 bool dmic_en; 17 18 unsigned int dmic1_data_pin;
+1
include/sound/soc-dai.h
··· 161 161 int snd_soc_dai_compress_new(struct snd_soc_dai *dai, 162 162 struct snd_soc_pcm_runtime *rtd, int num); 163 163 bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream); 164 + void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link); 164 165 void snd_soc_dai_action(struct snd_soc_dai *dai, 165 166 int stream, int action); 166 167 static inline void snd_soc_dai_activate(struct snd_soc_dai *dai,
+2
include/sound/soc.h
··· 444 444 const struct snd_soc_component_driver *component_driver, 445 445 struct snd_soc_dai_driver *dai_drv, int num_dai); 446 446 void snd_soc_unregister_component(struct device *dev); 447 + void snd_soc_unregister_component_by_driver(struct device *dev, 448 + const struct snd_soc_component_driver *component_driver); 447 449 struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev, 448 450 const char *driver_name); 449 451 struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
+21 -20
include/uapi/linux/bpf.h
··· 3171 3171 * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) 3172 3172 * Description 3173 3173 * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 3174 - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of 3175 - * new data availability is sent. 3176 - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of 3177 - * new data availability is sent unconditionally. 3174 + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3175 + * of new data availability is sent. 3176 + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3177 + * of new data availability is sent unconditionally. 3178 3178 * Return 3179 - * 0, on success; 3180 - * < 0, on error. 3179 + * 0 on success, or a negative error in case of failure. 3181 3180 * 3182 3181 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) 3183 3182 * Description ··· 3188 3189 * void bpf_ringbuf_submit(void *data, u64 flags) 3189 3190 * Description 3190 3191 * Submit reserved ring buffer sample, pointed to by *data*. 3191 - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of 3192 - * new data availability is sent. 3193 - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of 3194 - * new data availability is sent unconditionally. 3192 + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3193 + * of new data availability is sent. 3194 + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3195 + * of new data availability is sent unconditionally. 3195 3196 * Return 3196 3197 * Nothing. Always succeeds. 3197 3198 * 3198 3199 * void bpf_ringbuf_discard(void *data, u64 flags) 3199 3200 * Description 3200 3201 * Discard reserved ring buffer sample, pointed to by *data*. 3201 - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of 3202 - * new data availability is sent. 3203 - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of 3204 - * new data availability is sent unconditionally. 3202 + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3203 + * of new data availability is sent. 3204 + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3205 + * of new data availability is sent unconditionally. 3205 3206 * Return 3206 3207 * Nothing. Always succeeds. 3207 3208 * ··· 3209 3210 * Description 3210 3211 * Query various characteristics of provided ring buffer. What 3211 3212 * exactly is queries is determined by *flags*: 3212 - * - BPF_RB_AVAIL_DATA - amount of data not yet consumed; 3213 - * - BPF_RB_RING_SIZE - the size of ring buffer; 3214 - * - BPF_RB_CONS_POS - consumer position (can wrap around); 3215 - * - BPF_RB_PROD_POS - producer(s) position (can wrap around); 3216 - * Data returned is just a momentary snapshots of actual values 3213 + * 3214 + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 3215 + * * **BPF_RB_RING_SIZE**: The size of ring buffer. 3216 + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 3217 + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 3218 + * 3219 + * Data returned is just a momentary snapshot of actual values 3217 3220 * and could be inaccurate, so this facility should be used to 3218 3221 * power heuristics and for reporting, not to make 100% correct 3219 3222 * calculation. 3220 3223 * Return 3221 - * Requested value, or 0, if flags are not recognized. 3224 + * Requested value, or 0, if *flags* are not recognized. 3222 3225 * 3223 3226 * int bpf_csum_level(struct sk_buff *skb, u64 level) 3224 3227 * Description
+3
include/uapi/linux/idxd.h
··· 110 110 uint16_t rsvd1; 111 111 union { 112 112 uint8_t expected_res; 113 + /* create delta record */ 113 114 struct { 114 115 uint64_t delta_addr; 115 116 uint32_t max_delta_size; 117 + uint32_t delt_rsvd; 118 + uint8_t expected_res_mask; 116 119 }; 117 120 uint32_t delta_rec_size; 118 121 uint64_t dest2;
+2 -1
include/uapi/linux/input-event-codes.h
··· 888 888 #define SW_LINEIN_INSERT 0x0d /* set = inserted */ 889 889 #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ 890 890 #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ 891 - #define SW_MAX 0x0f 891 + #define SW_MACHINE_COVER 0x10 /* set = cover closed */ 892 + #define SW_MAX 0x10 892 893 #define SW_CNT (SW_MAX+1) 893 894 894 895 /*
+1
include/uapi/linux/io_uring.h
··· 197 197 * sq_ring->flags 198 198 */ 199 199 #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ 200 + #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ 200 201 201 202 struct io_cqring_offsets { 202 203 __u32 head;
+2 -2
include/uapi/linux/vboxguest.h
··· 103 103 104 104 105 105 /* IOCTL to perform a VMM Device request larger then 1KB. */ 106 - #define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) 106 + #define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3) 107 107 108 108 109 109 /** VBG_IOCTL_HGCM_CONNECT data structure. */ ··· 198 198 } u; 199 199 }; 200 200 201 - #define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) 201 + #define VBG_IOCTL_LOG(s) _IO('V', 9) 202 202 203 203 204 204 /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
+4 -4
init/Kconfig
··· 49 49 50 50 config CC_CAN_LINK 51 51 bool 52 - default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m64-flag)) if 64BIT 53 - default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m32-flag)) 52 + default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT 53 + default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag)) 54 54 55 55 config CC_CAN_LINK_STATIC 56 56 bool 57 - default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m64-flag)) if 64BIT 58 - default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m32-flag)) 57 + default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT 58 + default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static) 59 59 60 60 config CC_HAS_ASM_GOTO 61 61 def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
+2 -2
kernel/bpf/btf.c
··· 3746 3746 return false; 3747 3747 3748 3748 t = btf_type_skip_modifiers(btf, t->type, NULL); 3749 - if (!btf_type_is_int(t)) { 3749 + if (!btf_type_is_small_int(t)) { 3750 3750 bpf_log(log, 3751 3751 "ret type %s not allowed for fmod_ret\n", 3752 3752 btf_kind_str[BTF_INFO_KIND(t->info)]); ··· 3768 3768 /* skip modifiers */ 3769 3769 while (btf_type_is_modifier(t)) 3770 3770 t = btf_type_by_id(btf, t->type); 3771 - if (btf_type_is_int(t) || btf_type_is_enum(t)) 3771 + if (btf_type_is_small_int(t) || btf_type_is_enum(t)) 3772 3772 /* accessing a scalar */ 3773 3773 return true; 3774 3774 if (!btf_type_is_ptr(t)) {
+134 -60
kernel/bpf/net_namespace.c
··· 19 19 * with netns_bpf_mutex held. 20 20 */ 21 21 struct net *net; 22 + struct list_head node; /* node in list of links attached to net */ 22 23 }; 23 24 24 25 /* Protects updates to netns_bpf */ 25 26 DEFINE_MUTEX(netns_bpf_mutex); 26 27 27 28 /* Must be called with netns_bpf_mutex held. */ 28 - static void __net_exit bpf_netns_link_auto_detach(struct bpf_link *link) 29 + static void netns_bpf_run_array_detach(struct net *net, 30 + enum netns_bpf_attach_type type) 29 31 { 30 - struct bpf_netns_link *net_link = 31 - container_of(link, struct bpf_netns_link, link); 32 + struct bpf_prog_array *run_array; 32 33 33 - net_link->net = NULL; 34 + run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL, 35 + lockdep_is_held(&netns_bpf_mutex)); 36 + bpf_prog_array_free(run_array); 34 37 } 35 38 36 39 static void bpf_netns_link_release(struct bpf_link *link) ··· 43 40 enum netns_bpf_attach_type type = net_link->netns_type; 44 41 struct net *net; 45 42 46 - /* Link auto-detached by dying netns. */ 47 - if (!net_link->net) 48 - return; 49 - 50 43 mutex_lock(&netns_bpf_mutex); 51 44 52 - /* Recheck after potential sleep. We can race with cleanup_net 53 - * here, but if we see a non-NULL struct net pointer pre_exit 54 - * has not happened yet and will block on netns_bpf_mutex. 45 + /* We can race with cleanup_net, but if we see a non-NULL 46 + * struct net pointer, pre_exit has not run yet and wait for 47 + * netns_bpf_mutex. 55 48 */ 56 49 net = net_link->net; 57 50 if (!net) 58 51 goto out_unlock; 59 52 60 - net->bpf.links[type] = NULL; 61 - RCU_INIT_POINTER(net->bpf.progs[type], NULL); 53 + netns_bpf_run_array_detach(net, type); 54 + list_del(&net_link->node); 62 55 63 56 out_unlock: 64 57 mutex_unlock(&netns_bpf_mutex); ··· 75 76 struct bpf_netns_link *net_link = 76 77 container_of(link, struct bpf_netns_link, link); 77 78 enum netns_bpf_attach_type type = net_link->netns_type; 79 + struct bpf_prog_array *run_array; 78 80 struct net *net; 79 81 int ret = 0; 80 82 ··· 93 93 goto out_unlock; 94 94 } 95 95 96 + run_array = rcu_dereference_protected(net->bpf.run_array[type], 97 + lockdep_is_held(&netns_bpf_mutex)); 98 + WRITE_ONCE(run_array->items[0].prog, new_prog); 99 + 96 100 old_prog = xchg(&link->prog, new_prog); 97 - rcu_assign_pointer(net->bpf.progs[type], new_prog); 98 101 bpf_prog_put(old_prog); 99 102 100 103 out_unlock: ··· 145 142 .show_fdinfo = bpf_netns_link_show_fdinfo, 146 143 }; 147 144 145 + /* Must be called with netns_bpf_mutex held. */ 146 + static int __netns_bpf_prog_query(const union bpf_attr *attr, 147 + union bpf_attr __user *uattr, 148 + struct net *net, 149 + enum netns_bpf_attach_type type) 150 + { 151 + __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 152 + struct bpf_prog_array *run_array; 153 + u32 prog_cnt = 0, flags = 0; 154 + 155 + run_array = rcu_dereference_protected(net->bpf.run_array[type], 156 + lockdep_is_held(&netns_bpf_mutex)); 157 + if (run_array) 158 + prog_cnt = bpf_prog_array_length(run_array); 159 + 160 + if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 161 + return -EFAULT; 162 + if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) 163 + return -EFAULT; 164 + if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) 165 + return 0; 166 + 167 + return bpf_prog_array_copy_to_user(run_array, prog_ids, 168 + attr->query.prog_cnt); 169 + } 170 + 148 171 int netns_bpf_prog_query(const union bpf_attr *attr, 149 172 union bpf_attr __user *uattr) 150 173 { 151 - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 152 - u32 prog_id, prog_cnt = 0, flags = 0; 153 174 enum netns_bpf_attach_type type; 154 - struct bpf_prog *attached; 155 175 struct net *net; 176 + int ret; 156 177 157 178 if (attr->query.query_flags) 158 179 return -EINVAL; ··· 189 162 if (IS_ERR(net)) 190 163 return PTR_ERR(net); 191 164 192 - rcu_read_lock(); 193 - attached = rcu_dereference(net->bpf.progs[type]); 194 - if (attached) { 195 - prog_cnt = 1; 196 - prog_id = attached->aux->id; 197 - } 198 - rcu_read_unlock(); 165 + mutex_lock(&netns_bpf_mutex); 166 + ret = __netns_bpf_prog_query(attr, uattr, net, type); 167 + mutex_unlock(&netns_bpf_mutex); 199 168 200 169 put_net(net); 201 - 202 - if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 203 - return -EFAULT; 204 - if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) 205 - return -EFAULT; 206 - 207 - if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) 208 - return 0; 209 - 210 - if (copy_to_user(prog_ids, &prog_id, sizeof(u32))) 211 - return -EFAULT; 212 - 213 - return 0; 170 + return ret; 214 171 } 215 172 216 173 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) 217 174 { 175 + struct bpf_prog_array *run_array; 218 176 enum netns_bpf_attach_type type; 177 + struct bpf_prog *attached; 219 178 struct net *net; 220 179 int ret; 180 + 181 + if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd) 182 + return -EINVAL; 221 183 222 184 type = to_netns_bpf_attach_type(attr->attach_type); 223 185 if (type < 0) ··· 216 200 mutex_lock(&netns_bpf_mutex); 217 201 218 202 /* Attaching prog directly is not compatible with links */ 219 - if (net->bpf.links[type]) { 203 + if (!list_empty(&net->bpf.links[type])) { 220 204 ret = -EEXIST; 221 205 goto out_unlock; 222 206 } 223 207 224 208 switch (type) { 225 209 case NETNS_BPF_FLOW_DISSECTOR: 226 - ret = flow_dissector_bpf_prog_attach(net, prog); 210 + ret = flow_dissector_bpf_prog_attach_check(net, prog); 227 211 break; 228 212 default: 229 213 ret = -EINVAL; 230 214 break; 231 215 } 216 + if (ret) 217 + goto out_unlock; 218 + 219 + attached = net->bpf.progs[type]; 220 + if (attached == prog) { 221 + /* The same program cannot be attached twice */ 222 + ret = -EINVAL; 223 + goto out_unlock; 224 + } 225 + 226 + run_array = rcu_dereference_protected(net->bpf.run_array[type], 227 + lockdep_is_held(&netns_bpf_mutex)); 228 + if (run_array) { 229 + WRITE_ONCE(run_array->items[0].prog, prog); 230 + } else { 231 + run_array = bpf_prog_array_alloc(1, GFP_KERNEL); 232 + if (!run_array) { 233 + ret = -ENOMEM; 234 + goto out_unlock; 235 + } 236 + run_array->items[0].prog = prog; 237 + rcu_assign_pointer(net->bpf.run_array[type], run_array); 238 + } 239 + 240 + net->bpf.progs[type] = prog; 241 + if (attached) 242 + bpf_prog_put(attached); 243 + 232 244 out_unlock: 233 245 mutex_unlock(&netns_bpf_mutex); 234 246 ··· 265 221 266 222 /* Must be called with netns_bpf_mutex held. */ 267 223 static int __netns_bpf_prog_detach(struct net *net, 268 - enum netns_bpf_attach_type type) 224 + enum netns_bpf_attach_type type, 225 + struct bpf_prog *old) 269 226 { 270 227 struct bpf_prog *attached; 271 228 272 229 /* Progs attached via links cannot be detached */ 273 - if (net->bpf.links[type]) 230 + if (!list_empty(&net->bpf.links[type])) 274 231 return -EINVAL; 275 232 276 - attached = rcu_dereference_protected(net->bpf.progs[type], 277 - lockdep_is_held(&netns_bpf_mutex)); 278 - if (!attached) 233 + attached = net->bpf.progs[type]; 234 + if (!attached || attached != old) 279 235 return -ENOENT; 280 - RCU_INIT_POINTER(net->bpf.progs[type], NULL); 236 + netns_bpf_run_array_detach(net, type); 237 + net->bpf.progs[type] = NULL; 281 238 bpf_prog_put(attached); 282 239 return 0; 283 240 } 284 241 285 - int netns_bpf_prog_detach(const union bpf_attr *attr) 242 + int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 286 243 { 287 244 enum netns_bpf_attach_type type; 245 + struct bpf_prog *prog; 288 246 int ret; 247 + 248 + if (attr->target_fd) 249 + return -EINVAL; 289 250 290 251 type = to_netns_bpf_attach_type(attr->attach_type); 291 252 if (type < 0) 292 253 return -EINVAL; 293 254 255 + prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 256 + if (IS_ERR(prog)) 257 + return PTR_ERR(prog); 258 + 294 259 mutex_lock(&netns_bpf_mutex); 295 - ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type); 260 + ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog); 296 261 mutex_unlock(&netns_bpf_mutex); 262 + 263 + bpf_prog_put(prog); 297 264 298 265 return ret; 299 266 } ··· 312 257 static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, 313 258 enum netns_bpf_attach_type type) 314 259 { 315 - struct bpf_prog *prog; 260 + struct bpf_netns_link *net_link = 261 + container_of(link, struct bpf_netns_link, link); 262 + struct bpf_prog_array *run_array; 316 263 int err; 317 264 318 265 mutex_lock(&netns_bpf_mutex); 319 266 320 267 /* Allow attaching only one prog or link for now */ 321 - if (net->bpf.links[type]) { 268 + if (!list_empty(&net->bpf.links[type])) { 322 269 err = -E2BIG; 323 270 goto out_unlock; 324 271 } 325 272 /* Links are not compatible with attaching prog directly */ 326 - prog = rcu_dereference_protected(net->bpf.progs[type], 327 - lockdep_is_held(&netns_bpf_mutex)); 328 - if (prog) { 273 + if (net->bpf.progs[type]) { 329 274 err = -EEXIST; 330 275 goto out_unlock; 331 276 } 332 277 333 278 switch (type) { 334 279 case NETNS_BPF_FLOW_DISSECTOR: 335 - err = flow_dissector_bpf_prog_attach(net, link->prog); 280 + err = flow_dissector_bpf_prog_attach_check(net, link->prog); 336 281 break; 337 282 default: 338 283 err = -EINVAL; ··· 341 286 if (err) 342 287 goto out_unlock; 343 288 344 - net->bpf.links[type] = link; 289 + run_array = bpf_prog_array_alloc(1, GFP_KERNEL); 290 + if (!run_array) { 291 + err = -ENOMEM; 292 + goto out_unlock; 293 + } 294 + run_array->items[0].prog = link->prog; 295 + rcu_assign_pointer(net->bpf.run_array[type], run_array); 296 + 297 + list_add_tail(&net_link->node, &net->bpf.links[type]); 345 298 346 299 out_unlock: 347 300 mutex_unlock(&netns_bpf_mutex); ··· 408 345 return err; 409 346 } 410 347 348 + static int __net_init netns_bpf_pernet_init(struct net *net) 349 + { 350 + int type; 351 + 352 + for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) 353 + INIT_LIST_HEAD(&net->bpf.links[type]); 354 + 355 + return 0; 356 + } 357 + 411 358 static void __net_exit netns_bpf_pernet_pre_exit(struct net *net) 412 359 { 413 360 enum netns_bpf_attach_type type; 414 - struct bpf_link *link; 361 + struct bpf_netns_link *net_link; 415 362 416 363 mutex_lock(&netns_bpf_mutex); 417 364 for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) { 418 - link = net->bpf.links[type]; 419 - if (link) 420 - bpf_netns_link_auto_detach(link); 421 - else 422 - __netns_bpf_prog_detach(net, type); 365 + netns_bpf_run_array_detach(net, type); 366 + list_for_each_entry(net_link, &net->bpf.links[type], node) 367 + net_link->net = NULL; /* auto-detach link */ 368 + if (net->bpf.progs[type]) 369 + bpf_prog_put(net->bpf.progs[type]); 423 370 } 424 371 mutex_unlock(&netns_bpf_mutex); 425 372 } 426 373 427 374 static struct pernet_operations netns_bpf_pernet_ops __net_initdata = { 375 + .init = netns_bpf_pernet_init, 428 376 .pre_exit = netns_bpf_pernet_pre_exit, 429 377 }; 430 378
+10 -4
kernel/bpf/reuseport_array.c
··· 20 20 /* The caller must hold the reuseport_lock */ 21 21 void bpf_sk_reuseport_detach(struct sock *sk) 22 22 { 23 - struct sock __rcu **socks; 23 + uintptr_t sk_user_data; 24 24 25 25 write_lock_bh(&sk->sk_callback_lock); 26 - socks = sk->sk_user_data; 27 - if (socks) { 26 + sk_user_data = (uintptr_t)sk->sk_user_data; 27 + if (sk_user_data & SK_USER_DATA_BPF) { 28 + struct sock __rcu **socks; 29 + 30 + socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK); 28 31 WRITE_ONCE(sk->sk_user_data, NULL); 29 32 /* 30 33 * Do not move this NULL assignment outside of ··· 255 252 struct sock *free_osk = NULL, *osk, *nsk; 256 253 struct sock_reuseport *reuse; 257 254 u32 index = *(u32 *)key; 255 + uintptr_t sk_user_data; 258 256 struct socket *socket; 259 257 int err, fd; 260 258 ··· 309 305 if (err) 310 306 goto put_file_unlock; 311 307 312 - WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]); 308 + sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY | 309 + SK_USER_DATA_BPF; 310 + WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data); 313 311 rcu_assign_pointer(array->ptrs[index], nsk); 314 312 free_osk = osk; 315 313 err = 0;
+8 -10
kernel/bpf/ringbuf.c
··· 132 132 { 133 133 struct bpf_ringbuf *rb; 134 134 135 - if (!data_sz || !PAGE_ALIGNED(data_sz)) 136 - return ERR_PTR(-EINVAL); 137 - 138 - #ifdef CONFIG_64BIT 139 - /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */ 140 - if (data_sz > RINGBUF_MAX_DATA_SZ) 141 - return ERR_PTR(-E2BIG); 142 - #endif 143 - 144 135 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); 145 136 if (!rb) 146 137 return ERR_PTR(-ENOMEM); ··· 157 166 return ERR_PTR(-EINVAL); 158 167 159 168 if (attr->key_size || attr->value_size || 160 - attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries)) 169 + !is_power_of_2(attr->max_entries) || 170 + !PAGE_ALIGNED(attr->max_entries)) 161 171 return ERR_PTR(-EINVAL); 172 + 173 + #ifdef CONFIG_64BIT 174 + /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */ 175 + if (attr->max_entries > RINGBUF_MAX_DATA_SZ) 176 + return ERR_PTR(-E2BIG); 177 + #endif 162 178 163 179 rb_map = kzalloc(sizeof(*rb_map), GFP_USER); 164 180 if (!rb_map)
+24 -21
kernel/bpf/syscall.c
··· 2121 2121 !bpf_capable()) 2122 2122 return -EPERM; 2123 2123 2124 - if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN)) 2124 + if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2125 2125 return -EPERM; 2126 2126 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2127 2127 return -EPERM; ··· 2893 2893 switch (ptype) { 2894 2894 case BPF_PROG_TYPE_SK_MSG: 2895 2895 case BPF_PROG_TYPE_SK_SKB: 2896 - return sock_map_get_from_fd(attr, NULL); 2896 + return sock_map_prog_detach(attr, ptype); 2897 2897 case BPF_PROG_TYPE_LIRC_MODE2: 2898 2898 return lirc_prog_detach(attr); 2899 2899 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2900 - if (!capable(CAP_NET_ADMIN)) 2901 - return -EPERM; 2902 - return netns_bpf_prog_detach(attr); 2900 + return netns_bpf_prog_detach(attr, ptype); 2903 2901 case BPF_PROG_TYPE_CGROUP_DEVICE: 2904 2902 case BPF_PROG_TYPE_CGROUP_SKB: 2905 2903 case BPF_PROG_TYPE_CGROUP_SOCK: ··· 3137 3139 return NULL; 3138 3140 } 3139 3141 3140 - static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 3142 + static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 3143 + const struct cred *f_cred) 3141 3144 { 3142 3145 const struct bpf_map *map; 3143 3146 struct bpf_insn *insns; ··· 3164 3165 code == (BPF_JMP | BPF_CALL_ARGS)) { 3165 3166 if (code == (BPF_JMP | BPF_CALL_ARGS)) 3166 3167 insns[i].code = BPF_JMP | BPF_CALL; 3167 - if (!bpf_dump_raw_ok()) 3168 + if (!bpf_dump_raw_ok(f_cred)) 3168 3169 insns[i].imm = 0; 3169 3170 continue; 3170 3171 } ··· 3220 3221 return 0; 3221 3222 } 3222 3223 3223 - static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 3224 + static int bpf_prog_get_info_by_fd(struct file *file, 3225 + struct bpf_prog *prog, 3224 3226 const union bpf_attr *attr, 3225 3227 union bpf_attr __user *uattr) 3226 3228 { ··· 3290 3290 struct bpf_insn *insns_sanitized; 3291 3291 bool fault; 3292 3292 3293 - if (prog->blinded && !bpf_dump_raw_ok()) { 3293 + if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 3294 3294 info.xlated_prog_insns = 0; 3295 3295 goto done; 3296 3296 } 3297 - insns_sanitized = bpf_insn_prepare_dump(prog); 3297 + insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 3298 3298 if (!insns_sanitized) 3299 3299 return -ENOMEM; 3300 3300 uinsns = u64_to_user_ptr(info.xlated_prog_insns); ··· 3328 3328 } 3329 3329 3330 3330 if (info.jited_prog_len && ulen) { 3331 - if (bpf_dump_raw_ok()) { 3331 + if (bpf_dump_raw_ok(file->f_cred)) { 3332 3332 uinsns = u64_to_user_ptr(info.jited_prog_insns); 3333 3333 ulen = min_t(u32, info.jited_prog_len, ulen); 3334 3334 ··· 3363 3363 ulen = info.nr_jited_ksyms; 3364 3364 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 3365 3365 if (ulen) { 3366 - if (bpf_dump_raw_ok()) { 3366 + if (bpf_dump_raw_ok(file->f_cred)) { 3367 3367 unsigned long ksym_addr; 3368 3368 u64 __user *user_ksyms; 3369 3369 u32 i; ··· 3394 3394 ulen = info.nr_jited_func_lens; 3395 3395 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 3396 3396 if (ulen) { 3397 - if (bpf_dump_raw_ok()) { 3397 + if (bpf_dump_raw_ok(file->f_cred)) { 3398 3398 u32 __user *user_lens; 3399 3399 u32 func_len, i; 3400 3400 ··· 3451 3451 else 3452 3452 info.nr_jited_line_info = 0; 3453 3453 if (info.nr_jited_line_info && ulen) { 3454 - if (bpf_dump_raw_ok()) { 3454 + if (bpf_dump_raw_ok(file->f_cred)) { 3455 3455 __u64 __user *user_linfo; 3456 3456 u32 i; 3457 3457 ··· 3497 3497 return 0; 3498 3498 } 3499 3499 3500 - static int bpf_map_get_info_by_fd(struct bpf_map *map, 3500 + static int bpf_map_get_info_by_fd(struct file *file, 3501 + struct bpf_map *map, 3501 3502 const union bpf_attr *attr, 3502 3503 union bpf_attr __user *uattr) 3503 3504 { ··· 3541 3540 return 0; 3542 3541 } 3543 3542 3544 - static int bpf_btf_get_info_by_fd(struct btf *btf, 3543 + static int bpf_btf_get_info_by_fd(struct file *file, 3544 + struct btf *btf, 3545 3545 const union bpf_attr *attr, 3546 3546 union bpf_attr __user *uattr) 3547 3547 { ··· 3557 3555 return btf_get_info_by_fd(btf, attr, uattr); 3558 3556 } 3559 3557 3560 - static int bpf_link_get_info_by_fd(struct bpf_link *link, 3558 + static int bpf_link_get_info_by_fd(struct file *file, 3559 + struct bpf_link *link, 3561 3560 const union bpf_attr *attr, 3562 3561 union bpf_attr __user *uattr) 3563 3562 { ··· 3611 3608 return -EBADFD; 3612 3609 3613 3610 if (f.file->f_op == &bpf_prog_fops) 3614 - err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 3611 + err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 3615 3612 uattr); 3616 3613 else if (f.file->f_op == &bpf_map_fops) 3617 - err = bpf_map_get_info_by_fd(f.file->private_data, attr, 3614 + err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 3618 3615 uattr); 3619 3616 else if (f.file->f_op == &btf_fops) 3620 - err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); 3617 + err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 3621 3618 else if (f.file->f_op == &bpf_link_fops) 3622 - err = bpf_link_get_info_by_fd(f.file->private_data, 3619 + err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 3623 3620 attr, uattr); 3624 3621 else 3625 3622 err = -EINVAL;
+10 -3
kernel/bpf/verifier.c
··· 399 399 return type == PTR_TO_SOCKET || 400 400 type == PTR_TO_TCP_SOCK || 401 401 type == PTR_TO_MAP_VALUE || 402 - type == PTR_TO_SOCK_COMMON || 403 - type == PTR_TO_BTF_ID; 402 + type == PTR_TO_SOCK_COMMON; 404 403 } 405 404 406 405 static bool reg_type_may_be_null(enum bpf_reg_type type) ··· 9800 9801 int i, j, subprog_start, subprog_end = 0, len, subprog; 9801 9802 struct bpf_insn *insn; 9802 9803 void *old_bpf_func; 9803 - int err; 9804 + int err, num_exentries; 9804 9805 9805 9806 if (env->subprog_cnt <= 1) 9806 9807 return 0; ··· 9875 9876 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 9876 9877 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 9877 9878 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 9879 + num_exentries = 0; 9880 + insn = func[i]->insnsi; 9881 + for (j = 0; j < func[i]->len; j++, insn++) { 9882 + if (BPF_CLASS(insn->code) == BPF_LDX && 9883 + BPF_MODE(insn->code) == BPF_PROBE_MEM) 9884 + num_exentries++; 9885 + } 9886 + func[i]->aux->num_exentries = num_exentries; 9878 9887 func[i] = bpf_int_jit_compile(func[i]); 9879 9888 if (!func[i]->jited) { 9880 9889 err = -ENOTSUPP;
+19 -12
kernel/cgroup/cgroup.c
··· 6439 6439 6440 6440 void cgroup_sk_alloc(struct sock_cgroup_data *skcd) 6441 6441 { 6442 - if (cgroup_sk_alloc_disabled) 6443 - return; 6444 - 6445 - /* Socket clone path */ 6446 - if (skcd->val) { 6447 - /* 6448 - * We might be cloning a socket which is left in an empty 6449 - * cgroup and the cgroup might have already been rmdir'd. 6450 - * Don't use cgroup_get_live(). 6451 - */ 6452 - cgroup_get(sock_cgroup_ptr(skcd)); 6453 - cgroup_bpf_get(sock_cgroup_ptr(skcd)); 6442 + if (cgroup_sk_alloc_disabled) { 6443 + skcd->no_refcnt = 1; 6454 6444 return; 6455 6445 } 6456 6446 ··· 6465 6475 rcu_read_unlock(); 6466 6476 } 6467 6477 6478 + void cgroup_sk_clone(struct sock_cgroup_data *skcd) 6479 + { 6480 + if (skcd->val) { 6481 + if (skcd->no_refcnt) 6482 + return; 6483 + /* 6484 + * We might be cloning a socket which is left in an empty 6485 + * cgroup and the cgroup might have already been rmdir'd. 6486 + * Don't use cgroup_get_live(). 6487 + */ 6488 + cgroup_get(sock_cgroup_ptr(skcd)); 6489 + cgroup_bpf_get(sock_cgroup_ptr(skcd)); 6490 + } 6491 + } 6492 + 6468 6493 void cgroup_sk_free(struct sock_cgroup_data *skcd) 6469 6494 { 6470 6495 struct cgroup *cgrp = sock_cgroup_ptr(skcd); 6471 6496 6497 + if (skcd->no_refcnt) 6498 + return; 6472 6499 cgroup_bpf_put(cgrp); 6473 6500 cgroup_put(cgrp); 6474 6501 }
+13
kernel/debug/gdbstub.c
··· 792 792 } 793 793 break; 794 794 #endif 795 + #ifdef CONFIG_HAVE_ARCH_KGDB_QXFER_PKT 796 + case 'S': 797 + if (!strncmp(remcom_in_buffer, "qSupported:", 11)) 798 + strcpy(remcom_out_buffer, kgdb_arch_gdb_stub_feature); 799 + break; 800 + case 'X': 801 + if (!strncmp(remcom_in_buffer, "qXfer:", 6)) 802 + kgdb_arch_handle_qxfer_pkt(remcom_in_buffer, 803 + remcom_out_buffer); 804 + break; 805 + #endif 806 + default: 807 + break; 795 808 } 796 809 } 797 810
+7 -1
kernel/dma/direct.c
··· 70 70 return 0; 71 71 } 72 72 73 - static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 73 + bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 74 74 { 75 75 return phys_to_dma_direct(dev, phys) + size - 1 <= 76 76 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); ··· 538 538 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) 539 539 return swiotlb_max_mapping_size(dev); 540 540 return SIZE_MAX; 541 + } 542 + 543 + bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) 544 + { 545 + return !dev_is_dma_coherent(dev) || 546 + is_swiotlb_buffer(dma_to_phys(dev, dma_addr)); 541 547 }
+10
kernel/dma/mapping.c
··· 397 397 } 398 398 EXPORT_SYMBOL_GPL(dma_max_mapping_size); 399 399 400 + bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 401 + { 402 + const struct dma_map_ops *ops = get_dma_ops(dev); 403 + 404 + if (dma_is_direct(ops)) 405 + return dma_direct_need_sync(dev, dma_addr); 406 + return ops->sync_single_for_cpu || ops->sync_single_for_device; 407 + } 408 + EXPORT_SYMBOL_GPL(dma_need_sync); 409 + 400 410 unsigned long dma_get_merge_boundary(struct device *dev) 401 411 { 402 412 const struct dma_map_ops *ops = get_dma_ops(dev);
+55 -30
kernel/dma/pool.c
··· 6 6 #include <linux/debugfs.h> 7 7 #include <linux/dma-direct.h> 8 8 #include <linux/dma-noncoherent.h> 9 - #include <linux/dma-contiguous.h> 10 9 #include <linux/init.h> 11 10 #include <linux/genalloc.h> 12 11 #include <linux/set_memory.h> ··· 68 69 69 70 do { 70 71 pool_size = 1 << (PAGE_SHIFT + order); 71 - 72 - if (dev_get_cma_area(NULL)) 73 - page = dma_alloc_from_contiguous(NULL, 1 << order, 74 - order, false); 75 - else 76 - page = alloc_pages(gfp, order); 72 + page = alloc_pages(gfp, order); 77 73 } while (!page && order-- > 0); 78 74 if (!page) 79 75 goto out; ··· 112 118 dma_common_free_remap(addr, pool_size); 113 119 #endif 114 120 free_page: __maybe_unused 115 - if (!dma_release_from_contiguous(NULL, page, 1 << order)) 116 - __free_pages(page, order); 121 + __free_pages(page, order); 117 122 out: 118 123 return ret; 119 124 } ··· 196 203 } 197 204 postcore_initcall(dma_atomic_pool_init); 198 205 199 - static inline struct gen_pool *dev_to_pool(struct device *dev) 206 + static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev) 200 207 { 201 208 u64 phys_mask; 202 209 gfp_t gfp; ··· 210 217 return atomic_pool_kernel; 211 218 } 212 219 213 - static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size) 220 + static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool) 214 221 { 215 - struct gen_pool *pool = dev_to_pool(dev); 222 + if (bad_pool == atomic_pool_kernel) 223 + return atomic_pool_dma32 ? : atomic_pool_dma; 216 224 217 - if (unlikely(!pool)) 218 - return false; 219 - return gen_pool_has_addr(pool, (unsigned long)start, size); 225 + if (bad_pool == atomic_pool_dma32) 226 + return atomic_pool_dma; 227 + 228 + return NULL; 229 + } 230 + 231 + static inline struct gen_pool *dma_guess_pool(struct device *dev, 232 + struct gen_pool *bad_pool) 233 + { 234 + if (bad_pool) 235 + return dma_get_safer_pool(bad_pool); 236 + 237 + return dma_guess_pool_from_device(dev); 220 238 } 221 239 222 240 void *dma_alloc_from_pool(struct device *dev, size_t size, 223 241 struct page **ret_page, gfp_t flags) 224 242 { 225 - struct gen_pool *pool = dev_to_pool(dev); 226 - unsigned long val; 243 + struct gen_pool *pool = NULL; 244 + unsigned long val = 0; 227 245 void *ptr = NULL; 246 + phys_addr_t phys; 228 247 229 - if (!pool) { 230 - WARN(1, "%pGg atomic pool not initialised!\n", &flags); 231 - return NULL; 248 + while (1) { 249 + pool = dma_guess_pool(dev, pool); 250 + if (!pool) { 251 + WARN(1, "Failed to get suitable pool for %s\n", 252 + dev_name(dev)); 253 + break; 254 + } 255 + 256 + val = gen_pool_alloc(pool, size); 257 + if (!val) 258 + continue; 259 + 260 + phys = gen_pool_virt_to_phys(pool, val); 261 + if (dma_coherent_ok(dev, phys, size)) 262 + break; 263 + 264 + gen_pool_free(pool, val, size); 265 + val = 0; 232 266 } 233 267 234 - val = gen_pool_alloc(pool, size); 235 - if (val) { 236 - phys_addr_t phys = gen_pool_virt_to_phys(pool, val); 237 268 269 + if (val) { 238 270 *ret_page = pfn_to_page(__phys_to_pfn(phys)); 239 271 ptr = (void *)val; 240 272 memset(ptr, 0, size); 273 + 274 + if (gen_pool_avail(pool) < atomic_pool_size) 275 + schedule_work(&atomic_pool_work); 241 276 } 242 - if (gen_pool_avail(pool) < atomic_pool_size) 243 - schedule_work(&atomic_pool_work); 244 277 245 278 return ptr; 246 279 } 247 280 248 281 bool dma_free_from_pool(struct device *dev, void *start, size_t size) 249 282 { 250 - struct gen_pool *pool = dev_to_pool(dev); 283 + struct gen_pool *pool = NULL; 251 284 252 - if (!dma_in_atomic_pool(dev, start, size)) 253 - return false; 254 - gen_pool_free(pool, (unsigned long)start, size); 255 - return true; 285 + while (1) { 286 + pool = dma_guess_pool(dev, pool); 287 + if (!pool) 288 + return false; 289 + 290 + if (gen_pool_has_addr(pool, (unsigned long)start, size)) { 291 + gen_pool_free(pool, (unsigned long)start, size); 292 + return true; 293 + } 294 + } 256 295 }
+1 -1
kernel/events/uprobes.c
··· 2199 2199 if (!uprobe) { 2200 2200 if (is_swbp > 0) { 2201 2201 /* No matching uprobe; signal SIGTRAP. */ 2202 - send_sig(SIGTRAP, current, 0); 2202 + force_sig(SIGTRAP); 2203 2203 } else { 2204 2204 /* 2205 2205 * Either we raced with uprobe_unregister() or we can't
+1 -1
kernel/fork.c
··· 1977 1977 * to stop root fork bombs. 1978 1978 */ 1979 1979 retval = -EAGAIN; 1980 - if (nr_threads >= max_threads) 1980 + if (data_race(nr_threads >= max_threads)) 1981 1981 goto bad_fork_cleanup_count; 1982 1982 1983 1983 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
+35 -2
kernel/irq/manage.c
··· 195 195 set_bit(IRQTF_AFFINITY, &action->thread_flags); 196 196 } 197 197 198 + #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 198 199 static void irq_validate_effective_affinity(struct irq_data *data) 199 200 { 200 - #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 201 201 const struct cpumask *m = irq_data_get_effective_affinity_mask(data); 202 202 struct irq_chip *chip = irq_data_get_irq_chip(data); 203 203 ··· 205 205 return; 206 206 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", 207 207 chip->name, data->irq); 208 - #endif 209 208 } 209 + 210 + static inline void irq_init_effective_affinity(struct irq_data *data, 211 + const struct cpumask *mask) 212 + { 213 + cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); 214 + } 215 + #else 216 + static inline void irq_validate_effective_affinity(struct irq_data *data) { } 217 + static inline void irq_init_effective_affinity(struct irq_data *data, 218 + const struct cpumask *mask) { } 219 + #endif 210 220 211 221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 212 222 bool force) ··· 314 304 return ret; 315 305 } 316 306 307 + static bool irq_set_affinity_deactivated(struct irq_data *data, 308 + const struct cpumask *mask, bool force) 309 + { 310 + struct irq_desc *desc = irq_data_to_desc(data); 311 + 312 + /* 313 + * If the interrupt is not yet activated, just store the affinity 314 + * mask and do not call the chip driver at all. On activation the 315 + * driver has to make sure anyway that the interrupt is in a 316 + * useable state so startup works. 317 + */ 318 + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data)) 319 + return false; 320 + 321 + cpumask_copy(desc->irq_common_data.affinity, mask); 322 + irq_init_effective_affinity(data, mask); 323 + irqd_set(data, IRQD_AFFINITY_SET); 324 + return true; 325 + } 326 + 317 327 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 318 328 bool force) 319 329 { ··· 343 313 344 314 if (!chip || !chip->irq_set_affinity) 345 315 return -EINVAL; 316 + 317 + if (irq_set_affinity_deactivated(data, mask, force)) 318 + return 0; 346 319 347 320 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { 348 321 ret = irq_try_set_affinity(data, mask, force);
+11 -6
kernel/kallsyms.c
··· 644 644 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to 645 645 * block even that). 646 646 */ 647 - int kallsyms_show_value(void) 647 + bool kallsyms_show_value(const struct cred *cred) 648 648 { 649 649 switch (kptr_restrict) { 650 650 case 0: 651 651 if (kallsyms_for_perf()) 652 - return 1; 652 + return true; 653 653 /* fallthrough */ 654 654 case 1: 655 - if (has_capability_noaudit(current, CAP_SYSLOG)) 656 - return 1; 655 + if (security_capable(cred, &init_user_ns, CAP_SYSLOG, 656 + CAP_OPT_NOAUDIT) == 0) 657 + return true; 657 658 /* fallthrough */ 658 659 default: 659 - return 0; 660 + return false; 660 661 } 661 662 } 662 663 ··· 674 673 return -ENOMEM; 675 674 reset_iter(iter, 0); 676 675 677 - iter->show_value = kallsyms_show_value(); 676 + /* 677 + * Instead of checking this on every s_show() call, cache 678 + * the result here at open time. 679 + */ 680 + iter->show_value = kallsyms_show_value(file->f_cred); 678 681 return 0; 679 682 } 680 683
+2 -2
kernel/kprobes.c
··· 2448 2448 else 2449 2449 kprobe_type = "k"; 2450 2450 2451 - if (!kallsyms_show_value()) 2451 + if (!kallsyms_show_value(pi->file->f_cred)) 2452 2452 addr = NULL; 2453 2453 2454 2454 if (sym) ··· 2540 2540 * If /proc/kallsyms is not showing kernel address, we won't 2541 2541 * show them here either. 2542 2542 */ 2543 - if (!kallsyms_show_value()) 2543 + if (!kallsyms_show_value(m->file->f_cred)) 2544 2544 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2545 2545 (void *)ent->start_addr); 2546 2546 else
+28 -25
kernel/module.c
··· 1510 1510 } 1511 1511 1512 1512 struct module_sect_attr { 1513 - struct module_attribute mattr; 1514 - char *name; 1513 + struct bin_attribute battr; 1515 1514 unsigned long address; 1516 1515 }; 1517 1516 ··· 1520 1521 struct module_sect_attr attrs[]; 1521 1522 }; 1522 1523 1523 - static ssize_t module_sect_show(struct module_attribute *mattr, 1524 - struct module_kobject *mk, char *buf) 1524 + static ssize_t module_sect_read(struct file *file, struct kobject *kobj, 1525 + struct bin_attribute *battr, 1526 + char *buf, loff_t pos, size_t count) 1525 1527 { 1526 1528 struct module_sect_attr *sattr = 1527 - container_of(mattr, struct module_sect_attr, mattr); 1528 - return sprintf(buf, "0x%px\n", kptr_restrict < 2 ? 1529 - (void *)sattr->address : NULL); 1529 + container_of(battr, struct module_sect_attr, battr); 1530 + 1531 + if (pos != 0) 1532 + return -EINVAL; 1533 + 1534 + return sprintf(buf, "0x%px\n", 1535 + kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL); 1530 1536 } 1531 1537 1532 1538 static void free_sect_attrs(struct module_sect_attrs *sect_attrs) ··· 1539 1535 unsigned int section; 1540 1536 1541 1537 for (section = 0; section < sect_attrs->nsections; section++) 1542 - kfree(sect_attrs->attrs[section].name); 1538 + kfree(sect_attrs->attrs[section].battr.attr.name); 1543 1539 kfree(sect_attrs); 1544 1540 } 1545 1541 ··· 1548 1544 unsigned int nloaded = 0, i, size[2]; 1549 1545 struct module_sect_attrs *sect_attrs; 1550 1546 struct module_sect_attr *sattr; 1551 - struct attribute **gattr; 1547 + struct bin_attribute **gattr; 1552 1548 1553 1549 /* Count loaded sections and allocate structures */ 1554 1550 for (i = 0; i < info->hdr->e_shnum; i++) 1555 1551 if (!sect_empty(&info->sechdrs[i])) 1556 1552 nloaded++; 1557 1553 size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), 1558 - sizeof(sect_attrs->grp.attrs[0])); 1559 - size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); 1554 + sizeof(sect_attrs->grp.bin_attrs[0])); 1555 + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); 1560 1556 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); 1561 1557 if (sect_attrs == NULL) 1562 1558 return; 1563 1559 1564 1560 /* Setup section attributes. */ 1565 1561 sect_attrs->grp.name = "sections"; 1566 - sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; 1562 + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; 1567 1563 1568 1564 sect_attrs->nsections = 0; 1569 1565 sattr = &sect_attrs->attrs[0]; 1570 - gattr = &sect_attrs->grp.attrs[0]; 1566 + gattr = &sect_attrs->grp.bin_attrs[0]; 1571 1567 for (i = 0; i < info->hdr->e_shnum; i++) { 1572 1568 Elf_Shdr *sec = &info->sechdrs[i]; 1573 1569 if (sect_empty(sec)) 1574 1570 continue; 1571 + sysfs_bin_attr_init(&sattr->battr); 1575 1572 sattr->address = sec->sh_addr; 1576 - sattr->name = kstrdup(info->secstrings + sec->sh_name, 1577 - GFP_KERNEL); 1578 - if (sattr->name == NULL) 1573 + sattr->battr.attr.name = 1574 + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); 1575 + if (sattr->battr.attr.name == NULL) 1579 1576 goto out; 1580 1577 sect_attrs->nsections++; 1581 - sysfs_attr_init(&sattr->mattr.attr); 1582 - sattr->mattr.show = module_sect_show; 1583 - sattr->mattr.store = NULL; 1584 - sattr->mattr.attr.name = sattr->name; 1585 - sattr->mattr.attr.mode = S_IRUSR; 1586 - *(gattr++) = &(sattr++)->mattr.attr; 1578 + sattr->battr.read = module_sect_read; 1579 + sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4); 1580 + sattr->battr.attr.mode = 0400; 1581 + *(gattr++) = &(sattr++)->battr; 1587 1582 } 1588 1583 *gattr = NULL; 1589 1584 ··· 1672 1669 continue; 1673 1670 if (info->sechdrs[i].sh_type == SHT_NOTE) { 1674 1671 sysfs_bin_attr_init(nattr); 1675 - nattr->attr.name = mod->sect_attrs->attrs[loaded].name; 1672 + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; 1676 1673 nattr->attr.mode = S_IRUGO; 1677 1674 nattr->size = info->sechdrs[i].sh_size; 1678 1675 nattr->private = (void *) info->sechdrs[i].sh_addr; ··· 2788 2785 { 2789 2786 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 2790 2787 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 2791 - NUMA_NO_NODE, __func__); 2788 + NUMA_NO_NODE, __builtin_return_address(0)); 2792 2789 } 2793 2790 2794 2791 bool __weak module_init_section(const char *name) ··· 4382 4379 4383 4380 if (!err) { 4384 4381 struct seq_file *m = file->private_data; 4385 - m->private = kallsyms_show_value() ? NULL : (void *)8ul; 4382 + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; 4386 4383 } 4387 4384 4388 4385 return err;
+2 -2
kernel/padata.c
··· 335 335 * 336 336 * Ensure reorder queue is read after pd->lock is dropped so we see 337 337 * new objects from another task in padata_do_serial. Pairs with 338 - * smp_mb__after_atomic in padata_do_serial. 338 + * smp_mb in padata_do_serial. 339 339 */ 340 340 smp_mb(); 341 341 ··· 418 418 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb 419 419 * in padata_reorder. 420 420 */ 421 - smp_mb__after_atomic(); 421 + smp_mb(); 422 422 423 423 padata_reorder(pd); 424 424 }
+1 -1
kernel/rcu/rcuperf.c
··· 723 723 schedule_timeout_uninterruptible(1); 724 724 } 725 725 726 - pr_alert("kfree object size=%lu\n", kfree_mult * sizeof(struct kfree_obj)); 726 + pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj)); 727 727 728 728 kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]), 729 729 GFP_KERNEL);
+58 -16
kernel/sched/core.c
··· 1311 1311 1312 1312 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1313 1313 { 1314 - if (task_contributes_to_load(p)) 1315 - rq->nr_uninterruptible--; 1316 - 1317 1314 enqueue_task(rq, p, flags); 1318 1315 1319 1316 p->on_rq = TASK_ON_RQ_QUEUED; ··· 1319 1322 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1320 1323 { 1321 1324 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1322 - 1323 - if (task_contributes_to_load(p)) 1324 - rq->nr_uninterruptible++; 1325 1325 1326 1326 dequeue_task(rq, p, flags); 1327 1327 } ··· 2230 2236 2231 2237 lockdep_assert_held(&rq->lock); 2232 2238 2233 - #ifdef CONFIG_SMP 2234 2239 if (p->sched_contributes_to_load) 2235 2240 rq->nr_uninterruptible--; 2236 2241 2242 + #ifdef CONFIG_SMP 2237 2243 if (wake_flags & WF_MIGRATED) 2238 2244 en_flags |= ENQUEUE_MIGRATED; 2239 2245 #endif ··· 2577 2583 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 2578 2584 */ 2579 2585 smp_rmb(); 2580 - if (p->on_rq && ttwu_remote(p, wake_flags)) 2586 + if (READ_ONCE(p->on_rq) && ttwu_remote(p, wake_flags)) 2581 2587 goto unlock; 2582 2588 2583 2589 if (p->in_iowait) { ··· 2586 2592 } 2587 2593 2588 2594 #ifdef CONFIG_SMP 2589 - p->sched_contributes_to_load = !!task_contributes_to_load(p); 2590 - p->state = TASK_WAKING; 2591 - 2592 2595 /* 2593 2596 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 2594 2597 * possible to, falsely, observe p->on_cpu == 0. ··· 2604 2613 * 2605 2614 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2606 2615 * __schedule(). See the comment for smp_mb__after_spinlock(). 2616 + * 2617 + * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 2618 + * schedule()'s deactivate_task() has 'happened' and p will no longer 2619 + * care about it's own p->state. See the comment in __schedule(). 2607 2620 */ 2608 - smp_rmb(); 2621 + smp_acquire__after_ctrl_dep(); 2622 + 2623 + /* 2624 + * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 2625 + * == 0), which means we need to do an enqueue, change p->state to 2626 + * TASK_WAKING such that we can unlock p->pi_lock before doing the 2627 + * enqueue, such as ttwu_queue_wakelist(). 2628 + */ 2629 + p->state = TASK_WAKING; 2609 2630 2610 2631 /* 2611 2632 * If the owning (remote) CPU is still in the middle of schedule() with ··· 2965 2962 * Silence PROVE_RCU. 2966 2963 */ 2967 2964 raw_spin_lock_irqsave(&p->pi_lock, flags); 2965 + rseq_migrate(p); 2968 2966 /* 2969 2967 * We're setting the CPU for the first time, we don't migrate, 2970 2968 * so use __set_task_cpu(). ··· 3030 3026 * as we're not fully set-up yet. 3031 3027 */ 3032 3028 p->recent_used_cpu = task_cpu(p); 3029 + rseq_migrate(p); 3033 3030 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 3034 3031 #endif 3035 3032 rq = __task_rq_lock(p, &rf); ··· 4102 4097 { 4103 4098 struct task_struct *prev, *next; 4104 4099 unsigned long *switch_count; 4100 + unsigned long prev_state; 4105 4101 struct rq_flags rf; 4106 4102 struct rq *rq; 4107 4103 int cpu; ··· 4122 4116 /* 4123 4117 * Make sure that signal_pending_state()->signal_pending() below 4124 4118 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4125 - * done by the caller to avoid the race with signal_wake_up(). 4119 + * done by the caller to avoid the race with signal_wake_up(): 4126 4120 * 4127 - * The membarrier system call requires a full memory barrier 4121 + * __set_current_state(@state) signal_wake_up() 4122 + * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 4123 + * wake_up_state(p, state) 4124 + * LOCK rq->lock LOCK p->pi_state 4125 + * smp_mb__after_spinlock() smp_mb__after_spinlock() 4126 + * if (signal_pending_state()) if (p->state & @state) 4127 + * 4128 + * Also, the membarrier system call requires a full memory barrier 4128 4129 * after coming from user-space, before storing to rq->curr. 4129 4130 */ 4130 4131 rq_lock(rq, &rf); ··· 4142 4129 update_rq_clock(rq); 4143 4130 4144 4131 switch_count = &prev->nivcsw; 4145 - if (!preempt && prev->state) { 4146 - if (signal_pending_state(prev->state, prev)) { 4132 + 4133 + /* 4134 + * We must load prev->state once (task_struct::state is volatile), such 4135 + * that: 4136 + * 4137 + * - we form a control dependency vs deactivate_task() below. 4138 + * - ptrace_{,un}freeze_traced() can change ->state underneath us. 4139 + */ 4140 + prev_state = prev->state; 4141 + if (!preempt && prev_state) { 4142 + if (signal_pending_state(prev_state, prev)) { 4147 4143 prev->state = TASK_RUNNING; 4148 4144 } else { 4145 + prev->sched_contributes_to_load = 4146 + (prev_state & TASK_UNINTERRUPTIBLE) && 4147 + !(prev_state & TASK_NOLOAD) && 4148 + !(prev->flags & PF_FROZEN); 4149 + 4150 + if (prev->sched_contributes_to_load) 4151 + rq->nr_uninterruptible++; 4152 + 4153 + /* 4154 + * __schedule() ttwu() 4155 + * prev_state = prev->state; if (p->on_rq && ...) 4156 + * if (prev_state) goto out; 4157 + * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 4158 + * p->state = TASK_WAKING 4159 + * 4160 + * Where __schedule() and ttwu() have matching control dependencies. 4161 + * 4162 + * After this, schedule() must not care about p->state any more. 4163 + */ 4149 4164 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 4150 4165 4151 4166 if (prev->in_iowait) { ··· 4485 4444 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 4486 4445 void *key) 4487 4446 { 4447 + WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); 4488 4448 return try_to_wake_up(curr->private, mode, wake_flags); 4489 4449 } 4490 4450 EXPORT_SYMBOL(default_wake_function);
+13 -2
kernel/sched/fair.c
··· 4039 4039 return; 4040 4040 } 4041 4041 4042 - rq->misfit_task_load = task_h_load(p); 4042 + /* 4043 + * Make sure that misfit_task_load will not be null even if 4044 + * task_h_load() returns 0. 4045 + */ 4046 + rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); 4043 4047 } 4044 4048 4045 4049 #else /* CONFIG_SMP */ ··· 7642 7638 7643 7639 switch (env->migration_type) { 7644 7640 case migrate_load: 7645 - load = task_h_load(p); 7641 + /* 7642 + * Depending of the number of CPUs and tasks and the 7643 + * cgroup hierarchy, task_h_load() can return a null 7644 + * value. Make sure that env->imbalance decreases 7645 + * otherwise detach_tasks() will stop only after 7646 + * detaching up to loop_max tasks. 7647 + */ 7648 + load = max_t(unsigned long, task_h_load(p), 1); 7646 7649 7647 7650 if (sched_feat(LB_MIN) && 7648 7651 load < 16 && !env->sd->nr_balance_failed)
+7 -3
kernel/signal.c
··· 2529 2529 struct signal_struct *signal = current->signal; 2530 2530 int signr; 2531 2531 2532 - if (unlikely(current->task_works)) 2533 - task_work_run(); 2534 - 2535 2532 if (unlikely(uprobe_deny_signal())) 2536 2533 return false; 2537 2534 ··· 2541 2544 2542 2545 relock: 2543 2546 spin_lock_irq(&sighand->siglock); 2547 + current->jobctl &= ~JOBCTL_TASK_WORK; 2548 + if (unlikely(current->task_works)) { 2549 + spin_unlock_irq(&sighand->siglock); 2550 + task_work_run(); 2551 + goto relock; 2552 + } 2553 + 2544 2554 /* 2545 2555 * Every stopped thread goes here after wakeup. Check to see if 2546 2556 * we should notify the parent, prepare_signal(SIGCONT) encodes
+14 -2
kernel/task_work.c
··· 25 25 * 0 if succeeds or -ESRCH. 26 26 */ 27 27 int 28 - task_work_add(struct task_struct *task, struct callback_head *work, bool notify) 28 + task_work_add(struct task_struct *task, struct callback_head *work, int notify) 29 29 { 30 30 struct callback_head *head; 31 + unsigned long flags; 31 32 32 33 do { 33 34 head = READ_ONCE(task->task_works); ··· 37 36 work->next = head; 38 37 } while (cmpxchg(&task->task_works, head, work) != head); 39 38 40 - if (notify) 39 + switch (notify) { 40 + case TWA_RESUME: 41 41 set_notify_resume(task); 42 + break; 43 + case TWA_SIGNAL: 44 + if (lock_task_sighand(task, &flags)) { 45 + task->jobctl |= JOBCTL_TASK_WORK; 46 + signal_wake_up(task, 0); 47 + unlock_task_sighand(task, &flags); 48 + } 49 + break; 50 + } 51 + 42 52 return 0; 43 53 } 44 54
+16 -5
kernel/time/timer.c
··· 521 521 * Force expire obscene large timeouts to expire at the 522 522 * capacity limit of the wheel. 523 523 */ 524 - if (expires >= WHEEL_TIMEOUT_CUTOFF) 525 - expires = WHEEL_TIMEOUT_MAX; 524 + if (delta >= WHEEL_TIMEOUT_CUTOFF) 525 + expires = clk + WHEEL_TIMEOUT_MAX; 526 526 527 527 idx = calc_index(expires, LVL_DEPTH - 1); 528 528 } ··· 584 584 * Set the next expiry time and kick the CPU so it can reevaluate the 585 585 * wheel: 586 586 */ 587 - base->next_expiry = timer->expires; 587 + if (time_before(timer->expires, base->clk)) { 588 + /* 589 + * Prevent from forward_timer_base() moving the base->clk 590 + * backward 591 + */ 592 + base->next_expiry = base->clk; 593 + } else { 594 + base->next_expiry = timer->expires; 595 + } 588 596 wake_up_nohz_cpu(base->cpu); 589 597 } 590 598 ··· 904 896 * If the next expiry value is > jiffies, then we fast forward to 905 897 * jiffies otherwise we forward to the next expiry value. 906 898 */ 907 - if (time_after(base->next_expiry, jnow)) 899 + if (time_after(base->next_expiry, jnow)) { 908 900 base->clk = jnow; 909 - else 901 + } else { 902 + if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) 903 + return; 910 904 base->clk = base->next_expiry; 905 + } 911 906 #endif 912 907 } 913 908
+5
lib/Kconfig.kgdb
··· 3 3 config HAVE_ARCH_KGDB 4 4 bool 5 5 6 + # set if architecture has the its kgdb_arch_handle_qxfer_pkt 7 + # function to enable gdb stub to address XML packet sent from GDB. 8 + config HAVE_ARCH_KGDB_QXFER_PKT 9 + bool 10 + 6 11 menuconfig KGDB 7 12 bool "KGDB: kernel debugger" 8 13 depends on HAVE_ARCH_KGDB
+1
lib/packing.c
··· 73 73 * @endbit: The index (in logical notation, compensated for quirks) where 74 74 * the packed value ends within pbuf. Must be smaller than, or equal 75 75 * to, startbit. 76 + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. 76 77 * @op: If PACK, then uval will be treated as const pointer and copied (packed) 77 78 * into pbuf, between startbit and endbit. 78 79 * If UNPACK, then pbuf will be treated as const pointer and the logical
+2 -2
mm/cma.c
··· 339 339 */ 340 340 if (base < highmem_start && limit > highmem_start) { 341 341 addr = memblock_alloc_range_nid(size, alignment, 342 - highmem_start, limit, nid, false); 342 + highmem_start, limit, nid, true); 343 343 limit = highmem_start; 344 344 } 345 345 346 346 if (!addr) { 347 347 addr = memblock_alloc_range_nid(size, alignment, base, 348 - limit, nid, false); 348 + limit, nid, true); 349 349 if (!addr) { 350 350 ret = -ENOMEM; 351 351 goto err;
+21 -2
mm/filemap.c
··· 2028 2028 2029 2029 page = find_get_page(mapping, index); 2030 2030 if (!page) { 2031 - if (iocb->ki_flags & IOCB_NOWAIT) 2031 + if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) 2032 2032 goto would_block; 2033 2033 page_cache_sync_readahead(mapping, 2034 2034 ra, filp, ··· 2038 2038 goto no_cached_page; 2039 2039 } 2040 2040 if (PageReadahead(page)) { 2041 + if (iocb->ki_flags & IOCB_NOIO) { 2042 + put_page(page); 2043 + goto out; 2044 + } 2041 2045 page_cache_async_readahead(mapping, 2042 2046 ra, filp, page, 2043 2047 index, last_index - index); ··· 2164 2160 } 2165 2161 2166 2162 readpage: 2163 + if (iocb->ki_flags & IOCB_NOIO) { 2164 + unlock_page(page); 2165 + put_page(page); 2166 + goto would_block; 2167 + } 2167 2168 /* 2168 2169 * A previous I/O error may have been due to temporary 2169 2170 * failures, eg. multipath errors. ··· 2258 2249 * 2259 2250 * This is the "read_iter()" routine for all filesystems 2260 2251 * that can use the page cache directly. 2252 + * 2253 + * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall 2254 + * be returned when no data can be read without waiting for I/O requests 2255 + * to complete; it doesn't prevent readahead. 2256 + * 2257 + * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O 2258 + * requests shall be made for the read or for readahead. When no data 2259 + * can be read, -EAGAIN shall be returned. When readahead would be 2260 + * triggered, a partial, possibly empty read shall be returned. 2261 + * 2261 2262 * Return: 2262 2263 * * number of bytes copied, even for partial reads 2263 - * * negative error code if nothing was read 2264 + * * negative error code (or 0 if IOCB_NOIO) if nothing was read 2264 2265 */ 2265 2266 ssize_t 2266 2267 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+11 -6
mm/hugetlb.c
··· 45 45 unsigned int default_hstate_idx; 46 46 struct hstate hstates[HUGE_MAX_HSTATE]; 47 47 48 + #ifdef CONFIG_CMA 48 49 static struct cma *hugetlb_cma[MAX_NUMNODES]; 50 + #endif 51 + static unsigned long hugetlb_cma_size __initdata; 49 52 50 53 /* 51 54 * Minimum page order among possible hugepage sizes, set to a proper value ··· 1238 1235 * If the page isn't allocated using the cma allocator, 1239 1236 * cma_release() returns false. 1240 1237 */ 1241 - if (IS_ENABLED(CONFIG_CMA) && 1242 - cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) 1238 + #ifdef CONFIG_CMA 1239 + if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) 1243 1240 return; 1241 + #endif 1244 1242 1245 1243 free_contig_range(page_to_pfn(page), 1 << order); 1246 1244 } ··· 1252 1248 { 1253 1249 unsigned long nr_pages = 1UL << huge_page_order(h); 1254 1250 1255 - if (IS_ENABLED(CONFIG_CMA)) { 1251 + #ifdef CONFIG_CMA 1252 + { 1256 1253 struct page *page; 1257 1254 int node; 1258 1255 ··· 1267 1262 return page; 1268 1263 } 1269 1264 } 1265 + #endif 1270 1266 1271 1267 return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1272 1268 } ··· 1599 1593 1600 1594 /* Use first found vma */ 1601 1595 pgoff_start = page_to_pgoff(hpage); 1602 - pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1; 1596 + pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1; 1603 1597 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1604 1598 pgoff_start, pgoff_end) { 1605 1599 struct vm_area_struct *vma = avc->vma; ··· 2577 2571 2578 2572 for (i = 0; i < h->max_huge_pages; ++i) { 2579 2573 if (hstate_is_gigantic(h)) { 2580 - if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) { 2574 + if (hugetlb_cma_size) { 2581 2575 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 2582 2576 break; 2583 2577 } ··· 5660 5654 } 5661 5655 5662 5656 #ifdef CONFIG_CMA 5663 - static unsigned long hugetlb_cma_size __initdata; 5664 5657 static bool cma_reserve_called __initdata; 5665 5658 5666 5659 static int __init cmdline_parse_hugetlb_cma(char *p)
+3
mm/khugepaged.c
··· 958 958 return SCAN_ADDRESS_RANGE; 959 959 if (!hugepage_vma_check(vma, vma->vm_flags)) 960 960 return SCAN_VMA_CHECK; 961 + /* Anon VMA expected */ 962 + if (!vma->anon_vma || vma->vm_ops) 963 + return SCAN_VMA_CHECK; 961 964 return 0; 962 965 } 963 966
+10 -3
mm/memcontrol.c
··· 5669 5669 if (!mem_cgroup_is_root(mc.to)) 5670 5670 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5671 5671 5672 - mem_cgroup_id_get_many(mc.to, mc.moved_swap); 5673 5672 css_put_many(&mc.to->css, mc.moved_swap); 5674 5673 5675 5674 mc.moved_swap = 0; ··· 5859 5860 ent = target.ent; 5860 5861 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 5861 5862 mc.precharge--; 5862 - /* we fixup refcnts and charges later. */ 5863 + mem_cgroup_id_get_many(mc.to, 1); 5864 + /* we fixup other refcnts and charges later. */ 5863 5865 mc.moved_swap++; 5864 5866 } 5865 5867 break; ··· 7186 7186 { }, /* terminate */ 7187 7187 }; 7188 7188 7189 + /* 7190 + * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7191 + * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7192 + * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7193 + * boot parameter. This may result in premature OOPS inside 7194 + * mem_cgroup_get_nr_swap_pages() function in corner cases. 7195 + */ 7189 7196 static int __init mem_cgroup_swap_init(void) 7190 7197 { 7191 7198 /* No memory control -> no swap control */ ··· 7207 7200 7208 7201 return 0; 7209 7202 } 7210 - subsys_initcall(mem_cgroup_swap_init); 7203 + core_initcall(mem_cgroup_swap_init); 7211 7204 7212 7205 #endif /* CONFIG_MEMCG_SWAP */
+1 -1
mm/memory.c
··· 1601 1601 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); 1602 1602 #else 1603 1603 unsigned long idx = 0, pgcount = *num; 1604 - int err; 1604 + int err = -EINVAL; 1605 1605 1606 1606 for (; idx < pgcount; ++idx) { 1607 1607 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
+1 -12
mm/migrate.c
··· 1161 1161 } 1162 1162 1163 1163 /* 1164 - * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work 1165 - * around it. 1166 - */ 1167 - #if defined(CONFIG_ARM) && \ 1168 - defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700 1169 - #define ICE_noinline noinline 1170 - #else 1171 - #define ICE_noinline 1172 - #endif 1173 - 1174 - /* 1175 1164 * Obtain the lock on page, remove all ptes and migrate the page 1176 1165 * to the newly allocated page in newpage. 1177 1166 */ 1178 - static ICE_noinline int unmap_and_move(new_page_t get_new_page, 1167 + static int unmap_and_move(new_page_t get_new_page, 1179 1168 free_page_t put_new_page, 1180 1169 unsigned long private, struct page *page, 1181 1170 int force, enum migrate_mode mode,
+14 -2
mm/mmap.c
··· 2620 2620 * Create a list of vma's touched by the unmap, removing them from the mm's 2621 2621 * vma list as we go.. 2622 2622 */ 2623 - static void 2623 + static bool 2624 2624 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, 2625 2625 struct vm_area_struct *prev, unsigned long end) 2626 2626 { ··· 2645 2645 2646 2646 /* Kill the cache */ 2647 2647 vmacache_invalidate(mm); 2648 + 2649 + /* 2650 + * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or 2651 + * VM_GROWSUP VMA. Such VMAs can change their size under 2652 + * down_read(mmap_lock) and collide with the VMA we are about to unmap. 2653 + */ 2654 + if (vma && (vma->vm_flags & VM_GROWSDOWN)) 2655 + return false; 2656 + if (prev && (prev->vm_flags & VM_GROWSUP)) 2657 + return false; 2658 + return true; 2648 2659 } 2649 2660 2650 2661 /* ··· 2836 2825 } 2837 2826 2838 2827 /* Detach vmas from rbtree */ 2839 - detach_vmas_to_be_unmapped(mm, vma, prev, end); 2828 + if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) 2829 + downgrade = false; 2840 2830 2841 2831 if (downgrade) 2842 2832 mmap_write_downgrade(mm);
+21 -2
mm/mremap.c
··· 206 206 207 207 /* 208 208 * The destination pmd shouldn't be established, free_pgtables() 209 - * should have release it. 209 + * should have released it. 210 + * 211 + * However, there's a case during execve() where we use mremap 212 + * to move the initial stack, and in that case the target area 213 + * may overlap the source area (always moving down). 214 + * 215 + * If everything is PMD-aligned, that works fine, as moving 216 + * each pmd down will clear the source pmd. But if we first 217 + * have a few 4kB-only pages that get moved down, and then 218 + * hit the "now the rest is PMD-aligned, let's do everything 219 + * one pmd at a time", we will still have the old (now empty 220 + * of any 4kB pages, but still there) PMD in the page table 221 + * tree. 222 + * 223 + * Warn on it once - because we really should try to figure 224 + * out how to do this better - but then say "I won't move 225 + * this pmd". 226 + * 227 + * One alternative might be to just unmap the target pmd at 228 + * this point, and verify that it really is empty. We'll see. 210 229 */ 211 - if (WARN_ON(!pmd_none(*new_pmd))) 230 + if (WARN_ON_ONCE(!pmd_none(*new_pmd))) 212 231 return false; 213 232 214 233 /*
+1 -1
mm/page_alloc.c
··· 7832 7832 * Initialise min_free_kbytes. 7833 7833 * 7834 7834 * For small machines we want it small (128k min). For large machines 7835 - * we want it large (64MB max). But it is not linear, because network 7835 + * we want it large (256MB max). But it is not linear, because network 7836 7836 * bandwidth does not increase linearly with machine size. We use 7837 7837 * 7838 7838 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
+1 -1
mm/shmem.c
··· 3178 3178 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3179 3179 GFP_KERNEL); 3180 3180 if (!new_xattr->name) { 3181 - kfree(new_xattr); 3181 + kvfree(new_xattr); 3182 3182 return -ENOMEM; 3183 3183 } 3184 3184
+28 -7
mm/slab_common.c
··· 326 326 if (s->refcount < 0) 327 327 return 1; 328 328 329 + #ifdef CONFIG_MEMCG_KMEM 330 + /* 331 + * Skip the dying kmem_cache. 332 + */ 333 + if (s->memcg_params.dying) 334 + return 1; 335 + #endif 336 + 329 337 return 0; 330 338 } 331 339 ··· 894 886 return 0; 895 887 } 896 888 897 - static void flush_memcg_workqueue(struct kmem_cache *s) 889 + static void memcg_set_kmem_cache_dying(struct kmem_cache *s) 898 890 { 899 891 spin_lock_irq(&memcg_kmem_wq_lock); 900 892 s->memcg_params.dying = true; 901 893 spin_unlock_irq(&memcg_kmem_wq_lock); 894 + } 902 895 896 + static void flush_memcg_workqueue(struct kmem_cache *s) 897 + { 903 898 /* 904 899 * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make 905 900 * sure all registered rcu callbacks have been invoked. ··· 934 923 { 935 924 return 0; 936 925 } 937 - 938 - static inline void flush_memcg_workqueue(struct kmem_cache *s) 939 - { 940 - } 941 926 #endif /* CONFIG_MEMCG_KMEM */ 942 927 943 928 void slab_kmem_cache_release(struct kmem_cache *s) ··· 951 944 if (unlikely(!s)) 952 945 return; 953 946 954 - flush_memcg_workqueue(s); 955 - 956 947 get_online_cpus(); 957 948 get_online_mems(); 958 949 ··· 959 954 s->refcount--; 960 955 if (s->refcount) 961 956 goto out_unlock; 957 + 958 + #ifdef CONFIG_MEMCG_KMEM 959 + memcg_set_kmem_cache_dying(s); 960 + 961 + mutex_unlock(&slab_mutex); 962 + 963 + put_online_mems(); 964 + put_online_cpus(); 965 + 966 + flush_memcg_workqueue(s); 967 + 968 + get_online_cpus(); 969 + get_online_mems(); 970 + 971 + mutex_lock(&slab_mutex); 972 + #endif 962 973 963 974 err = shutdown_memcg_caches(s); 964 975 if (!err)
+4 -5
net/8021q/vlan_dev.c
··· 503 503 lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key); 504 504 } 505 505 506 - static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) 506 + static void vlan_dev_set_lockdep_class(struct net_device *dev) 507 507 { 508 - lockdep_set_class_and_subclass(&dev->addr_list_lock, 509 - &vlan_netdev_addr_lock_key, 510 - subclass); 508 + lockdep_set_class(&dev->addr_list_lock, 509 + &vlan_netdev_addr_lock_key); 511 510 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL); 512 511 } 513 512 ··· 600 601 601 602 SET_NETDEV_DEVTYPE(dev, &vlan_type); 602 603 603 - vlan_dev_set_lockdep_class(dev, dev->lower_level); 604 + vlan_dev_set_lockdep_class(dev); 604 605 605 606 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 606 607 if (!vlan->vlan_pcpu_stats)
+8 -2
net/ax25/af_ax25.c
··· 1187 1187 if (addr_len > sizeof(struct sockaddr_ax25) && 1188 1188 fsa->fsa_ax25.sax25_ndigis != 0) { 1189 1189 /* Valid number of digipeaters ? */ 1190 - if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { 1190 + if (fsa->fsa_ax25.sax25_ndigis < 1 || 1191 + fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS || 1192 + addr_len < sizeof(struct sockaddr_ax25) + 1193 + sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) { 1191 1194 err = -EINVAL; 1192 1195 goto out_release; 1193 1196 } ··· 1510 1507 struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; 1511 1508 1512 1509 /* Valid number of digipeaters ? */ 1513 - if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { 1510 + if (usax->sax25_ndigis < 1 || 1511 + usax->sax25_ndigis > AX25_MAX_DIGIS || 1512 + addr_len < sizeof(struct sockaddr_ax25) + 1513 + sizeof(ax25_address) * usax->sax25_ndigis) { 1514 1514 err = -EINVAL; 1515 1515 goto out; 1516 1516 }
+18 -1
net/bpf/test_run.c
··· 147 147 return a + (long)b + c + d + (long)e + f; 148 148 } 149 149 150 + struct bpf_fentry_test_t { 151 + struct bpf_fentry_test_t *a; 152 + }; 153 + 154 + int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) 155 + { 156 + return (long)arg; 157 + } 158 + 159 + int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) 160 + { 161 + return (long)arg->a; 162 + } 163 + 150 164 int noinline bpf_modify_return_test(int a, int *b) 151 165 { 152 166 *b += 1; ··· 199 185 const union bpf_attr *kattr, 200 186 union bpf_attr __user *uattr) 201 187 { 188 + struct bpf_fentry_test_t arg = {}; 202 189 u16 side_effect = 0, ret = 0; 203 190 int b = 2, err = -EFAULT; 204 191 u32 retval = 0; ··· 212 197 bpf_fentry_test3(4, 5, 6) != 15 || 213 198 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || 214 199 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || 215 - bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) 200 + bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || 201 + bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || 202 + bpf_fentry_test8(&arg) != 0) 216 203 goto out; 217 204 break; 218 205 case BPF_MODIFY_RETURN:
+1 -1
net/bpfilter/bpfilter_kern.c
··· 50 50 req.len = optlen; 51 51 if (!bpfilter_ops.info.pid) 52 52 goto out; 53 - n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), 53 + n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), 54 54 &pos); 55 55 if (n != sizeof(req)) { 56 56 pr_err("write fail %zd\n", n);
+1 -1
net/bridge/br_mrp.c
··· 86 86 { 87 87 struct ethhdr *eth_hdr; 88 88 struct sk_buff *skb; 89 - u16 *version; 89 + __be16 *version; 90 90 91 91 skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH); 92 92 if (!skb)
+1 -1
net/bridge/br_multicast.c
··· 1007 1007 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 1008 1008 1009 1009 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 1010 - nsrcs_offset + sizeof(_nsrcs)) 1010 + nsrcs_offset + sizeof(__nsrcs)) 1011 1011 return -EINVAL; 1012 1012 1013 1013 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
+1 -1
net/bridge/br_private.h
··· 430 430 struct hlist_head fdb_list; 431 431 432 432 #if IS_ENABLED(CONFIG_BRIDGE_MRP) 433 - struct list_head __rcu mrp_list; 433 + struct list_head mrp_list; 434 434 #endif 435 435 }; 436 436
+1 -1
net/bridge/br_private_mrp.h
··· 8 8 9 9 struct br_mrp { 10 10 /* list of mrp instances */ 11 - struct list_head __rcu list; 11 + struct list_head list; 12 12 13 13 struct net_bridge_port __rcu *p_port; 14 14 struct net_bridge_port __rcu *s_port;
+1 -1
net/core/dev.c
··· 5601 5601 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5602 5602 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5603 5603 __skb_unlink(skb, &sd->input_pkt_queue); 5604 - kfree_skb(skb); 5604 + dev_kfree_skb_irq(skb); 5605 5605 input_queue_head_incr(sd); 5606 5606 } 5607 5607 }
+10
net/core/dev_addr_lists.c
··· 690 690 if (to->addr_len != from->addr_len) 691 691 return; 692 692 693 + /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two 694 + * reasons: 695 + * 1) This is always called without any addr_list_lock, so as the 696 + * outermost one here, it must be 0. 697 + * 2) This is called by some callers after unlinking the upper device, 698 + * so the dev->lower_level becomes 1 again. 699 + * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or 700 + * larger. 701 + */ 693 702 netif_addr_lock_bh(from); 694 703 netif_addr_lock_nested(to); 695 704 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); ··· 920 911 if (to->addr_len != from->addr_len) 921 912 return; 922 913 914 + /* See the above comments inside dev_uc_unsync(). */ 923 915 netif_addr_lock_bh(from); 924 916 netif_addr_lock_nested(to); 925 917 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
+7 -3
net/core/filter.c
··· 5853 5853 { 5854 5854 unsigned int iphdr_len; 5855 5855 5856 - if (skb->protocol == cpu_to_be16(ETH_P_IP)) 5856 + switch (skb_protocol(skb, true)) { 5857 + case cpu_to_be16(ETH_P_IP): 5857 5858 iphdr_len = sizeof(struct iphdr); 5858 - else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) 5859 + break; 5860 + case cpu_to_be16(ETH_P_IPV6): 5859 5861 iphdr_len = sizeof(struct ipv6hdr); 5860 - else 5862 + break; 5863 + default: 5861 5864 return 0; 5865 + } 5862 5866 5863 5867 if (skb_headlen(skb) < iphdr_len) 5864 5868 return 0;
+12 -20
net/core/flow_dissector.c
··· 70 70 EXPORT_SYMBOL(skb_flow_dissector_init); 71 71 72 72 #ifdef CONFIG_BPF_SYSCALL 73 - int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog) 73 + int flow_dissector_bpf_prog_attach_check(struct net *net, 74 + struct bpf_prog *prog) 74 75 { 75 76 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; 76 - struct bpf_prog *attached; 77 77 78 78 if (net == &init_net) { 79 79 /* BPF flow dissector in the root namespace overrides ··· 86 86 for_each_net(ns) { 87 87 if (ns == &init_net) 88 88 continue; 89 - if (rcu_access_pointer(ns->bpf.progs[type])) 89 + if (rcu_access_pointer(ns->bpf.run_array[type])) 90 90 return -EEXIST; 91 91 } 92 92 } else { 93 93 /* Make sure root flow dissector is not attached 94 94 * when attaching to the non-root namespace. 95 95 */ 96 - if (rcu_access_pointer(init_net.bpf.progs[type])) 96 + if (rcu_access_pointer(init_net.bpf.run_array[type])) 97 97 return -EEXIST; 98 98 } 99 99 100 - attached = rcu_dereference_protected(net->bpf.progs[type], 101 - lockdep_is_held(&netns_bpf_mutex)); 102 - if (attached == prog) 103 - /* The same program cannot be attached twice */ 104 - return -EINVAL; 105 - 106 - rcu_assign_pointer(net->bpf.progs[type], prog); 107 - if (attached) 108 - bpf_prog_put(attached); 109 100 return 0; 110 101 } 111 102 #endif /* CONFIG_BPF_SYSCALL */ ··· 894 903 struct flow_dissector_key_addrs *key_addrs; 895 904 struct flow_dissector_key_tags *key_tags; 896 905 struct flow_dissector_key_vlan *key_vlan; 897 - struct bpf_prog *attached = NULL; 898 906 enum flow_dissect_ret fdret; 899 907 enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; 900 908 bool mpls_el = false; ··· 950 960 WARN_ON_ONCE(!net); 951 961 if (net) { 952 962 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; 963 + struct bpf_prog_array *run_array; 953 964 954 965 rcu_read_lock(); 955 - attached = rcu_dereference(init_net.bpf.progs[type]); 966 + run_array = rcu_dereference(init_net.bpf.run_array[type]); 967 + if (!run_array) 968 + run_array = rcu_dereference(net->bpf.run_array[type]); 956 969 957 - if (!attached) 958 - attached = rcu_dereference(net->bpf.progs[type]); 959 - 960 - if (attached) { 970 + if (run_array) { 961 971 struct bpf_flow_keys flow_keys; 962 972 struct bpf_flow_dissector ctx = { 963 973 .flow_keys = &flow_keys, ··· 965 975 .data_end = data + hlen, 966 976 }; 967 977 __be16 n_proto = proto; 978 + struct bpf_prog *prog; 968 979 969 980 if (skb) { 970 981 ctx.skb = skb; ··· 976 985 n_proto = skb->protocol; 977 986 } 978 987 979 - ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff, 988 + prog = READ_ONCE(run_array->items[0].prog); 989 + ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, 980 990 hlen, flags); 981 991 __skb_flow_bpf_to_target(&flow_keys, flow_dissector, 982 992 target_container);
+1
net/core/flow_offload.c
··· 4 4 #include <net/flow_offload.h> 5 5 #include <linux/rtnetlink.h> 6 6 #include <linux/mutex.h> 7 + #include <linux/rhashtable.h> 7 8 8 9 struct flow_rule *flow_rule_alloc(unsigned int num_actions) 9 10 {
+1 -1
net/core/net-sysfs.c
··· 1108 1108 trans_timeout = queue->trans_timeout; 1109 1109 spin_unlock_irq(&queue->_xmit_lock); 1110 1110 1111 - return sprintf(buf, "%lu", trans_timeout); 1111 + return sprintf(buf, fmt_ulong, trans_timeout); 1112 1112 } 1113 1113 1114 1114 static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+2 -1
net/core/rtnetlink.c
··· 3343 3343 */ 3344 3344 if (err < 0) { 3345 3345 /* If device is not registered at all, free it now */ 3346 - if (dev->reg_state == NETREG_UNINITIALIZED) 3346 + if (dev->reg_state == NETREG_UNINITIALIZED || 3347 + dev->reg_state == NETREG_UNREGISTERED) 3347 3348 free_netdev(dev); 3348 3349 goto out; 3349 3350 }
+15 -8
net/core/skmsg.c
··· 683 683 return container_of(parser, struct sk_psock, parser); 684 684 } 685 685 686 - static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb) 686 + static void sk_psock_skb_redirect(struct sk_buff *skb) 687 687 { 688 688 struct sk_psock *psock_other; 689 689 struct sock *sk_other; ··· 715 715 } 716 716 } 717 717 718 - static void sk_psock_tls_verdict_apply(struct sk_psock *psock, 719 - struct sk_buff *skb, int verdict) 718 + static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict) 720 719 { 721 720 switch (verdict) { 722 721 case __SK_REDIRECT: 723 - sk_psock_skb_redirect(psock, skb); 722 + sk_psock_skb_redirect(skb); 724 723 break; 725 724 case __SK_PASS: 726 725 case __SK_DROP: ··· 740 741 ret = sk_psock_bpf_run(psock, prog, skb); 741 742 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); 742 743 } 744 + sk_psock_tls_verdict_apply(skb, ret); 743 745 rcu_read_unlock(); 744 - sk_psock_tls_verdict_apply(psock, skb, ret); 745 746 return ret; 746 747 } 747 748 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); ··· 769 770 } 770 771 goto out_free; 771 772 case __SK_REDIRECT: 772 - sk_psock_skb_redirect(psock, skb); 773 + sk_psock_skb_redirect(skb); 773 774 break; 774 775 case __SK_DROP: 775 776 /* fall-through */ ··· 781 782 782 783 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) 783 784 { 784 - struct sk_psock *psock = sk_psock_from_strp(strp); 785 + struct sk_psock *psock; 785 786 struct bpf_prog *prog; 786 787 int ret = __SK_DROP; 788 + struct sock *sk; 787 789 788 790 rcu_read_lock(); 791 + sk = strp->sk; 792 + psock = sk_psock(sk); 793 + if (unlikely(!psock)) { 794 + kfree_skb(skb); 795 + goto out; 796 + } 789 797 prog = READ_ONCE(psock->progs.skb_verdict); 790 798 if (likely(prog)) { 791 799 skb_orphan(skb); ··· 800 794 ret = sk_psock_bpf_run(psock, prog, skb); 801 795 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); 802 796 } 803 - rcu_read_unlock(); 804 797 sk_psock_verdict_apply(psock, skb, ret); 798 + out: 799 + rcu_read_unlock(); 805 800 } 806 801 807 802 static int sk_psock_strp_read_done(struct strparser *strp, int err)
+1 -1
net/core/sock.c
··· 1926 1926 /* sk->sk_memcg will be populated at accept() time */ 1927 1927 newsk->sk_memcg = NULL; 1928 1928 1929 - cgroup_sk_alloc(&newsk->sk_cgrp_data); 1929 + cgroup_sk_clone(&newsk->sk_cgrp_data); 1930 1930 1931 1931 rcu_read_lock(); 1932 1932 filter = rcu_dereference(sk->sk_filter);
+48 -5
net/core/sock_map.c
··· 70 70 struct fd f; 71 71 int ret; 72 72 73 + if (attr->attach_flags || attr->replace_bpf_fd) 74 + return -EINVAL; 75 + 73 76 f = fdget(ufd); 74 77 map = __bpf_map_get(f); 75 78 if (IS_ERR(map)) 76 79 return PTR_ERR(map); 77 - ret = sock_map_prog_update(map, prog, attr->attach_type); 80 + ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); 81 + fdput(f); 82 + return ret; 83 + } 84 + 85 + int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 86 + { 87 + u32 ufd = attr->target_fd; 88 + struct bpf_prog *prog; 89 + struct bpf_map *map; 90 + struct fd f; 91 + int ret; 92 + 93 + if (attr->attach_flags || attr->replace_bpf_fd) 94 + return -EINVAL; 95 + 96 + f = fdget(ufd); 97 + map = __bpf_map_get(f); 98 + if (IS_ERR(map)) 99 + return PTR_ERR(map); 100 + 101 + prog = bpf_prog_get(attr->attach_bpf_fd); 102 + if (IS_ERR(prog)) { 103 + ret = PTR_ERR(prog); 104 + goto put_map; 105 + } 106 + 107 + if (prog->type != ptype) { 108 + ret = -EINVAL; 109 + goto put_prog; 110 + } 111 + 112 + ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); 113 + put_prog: 114 + bpf_prog_put(prog); 115 + put_map: 78 116 fdput(f); 79 117 return ret; 80 118 } ··· 1241 1203 } 1242 1204 1243 1205 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1244 - u32 which) 1206 + struct bpf_prog *old, u32 which) 1245 1207 { 1246 1208 struct sk_psock_progs *progs = sock_map_progs(map); 1209 + struct bpf_prog **pprog; 1247 1210 1248 1211 if (!progs) 1249 1212 return -EOPNOTSUPP; 1250 1213 1251 1214 switch (which) { 1252 1215 case BPF_SK_MSG_VERDICT: 1253 - psock_set_prog(&progs->msg_parser, prog); 1216 + pprog = &progs->msg_parser; 1254 1217 break; 1255 1218 case BPF_SK_SKB_STREAM_PARSER: 1256 - psock_set_prog(&progs->skb_parser, prog); 1219 + pprog = &progs->skb_parser; 1257 1220 break; 1258 1221 case BPF_SK_SKB_STREAM_VERDICT: 1259 - psock_set_prog(&progs->skb_verdict, prog); 1222 + pprog = &progs->skb_verdict; 1260 1223 break; 1261 1224 default: 1262 1225 return -EOPNOTSUPP; 1263 1226 } 1264 1227 1228 + if (old) 1229 + return psock_replace_prog(pprog, prog, old); 1230 + 1231 + psock_set_prog(pprog, prog); 1265 1232 return 0; 1266 1233 } 1267 1234
+1
net/core/sock_reuseport.c
··· 101 101 more_reuse->prog = reuse->prog; 102 102 more_reuse->reuseport_id = reuse->reuseport_id; 103 103 more_reuse->bind_inany = reuse->bind_inany; 104 + more_reuse->has_conns = reuse->has_conns; 104 105 105 106 memcpy(more_reuse->socks, reuse->socks, 106 107 reuse->num_socks * sizeof(struct sock *));
+1 -1
net/core/sysctl_net_core.c
··· 274 274 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 275 275 if (write && !ret) { 276 276 if (jit_enable < 2 || 277 - (jit_enable == 2 && bpf_dump_raw_ok())) { 277 + (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) { 278 278 *(int *)table->data = jit_enable; 279 279 if (jit_enable == 2) 280 280 pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
+13 -14
net/ethtool/netlink.c
··· 376 376 } 377 377 378 378 static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, 379 - const struct ethnl_dump_ctx *ctx) 379 + const struct ethnl_dump_ctx *ctx, 380 + struct netlink_callback *cb) 380 381 { 382 + void *ehdr; 381 383 int ret; 384 + 385 + ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 386 + &ethtool_genl_family, 0, ctx->ops->reply_cmd); 387 + if (!ehdr) 388 + return -EMSGSIZE; 382 389 383 390 ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev); 384 391 rtnl_lock(); ··· 402 395 if (ctx->ops->cleanup_data) 403 396 ctx->ops->cleanup_data(ctx->reply_data); 404 397 ctx->reply_data->dev = NULL; 398 + if (ret < 0) 399 + genlmsg_cancel(skb, ehdr); 400 + else 401 + genlmsg_end(skb, ehdr); 405 402 return ret; 406 403 } 407 404 ··· 422 411 int s_idx = ctx->pos_idx; 423 412 int h, idx = 0; 424 413 int ret = 0; 425 - void *ehdr; 426 414 427 415 rtnl_lock(); 428 416 for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { ··· 441 431 dev_hold(dev); 442 432 rtnl_unlock(); 443 433 444 - ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 445 - cb->nlh->nlmsg_seq, 446 - &ethtool_genl_family, 0, 447 - ctx->ops->reply_cmd); 448 - if (!ehdr) { 449 - dev_put(dev); 450 - ret = -EMSGSIZE; 451 - goto out; 452 - } 453 - ret = ethnl_default_dump_one(skb, dev, ctx); 434 + ret = ethnl_default_dump_one(skb, dev, ctx, cb); 454 435 dev_put(dev); 455 436 if (ret < 0) { 456 - genlmsg_cancel(skb, ehdr); 457 437 if (ret == -EOPNOTSUPP) 458 438 goto lock_and_cont; 459 439 if (likely(skb->len)) 460 440 ret = skb->len; 461 441 goto out; 462 442 } 463 - genlmsg_end(skb, ehdr); 464 443 lock_and_cont: 465 444 rtnl_lock(); 466 445 if (net->dev_base_seq != seq) {
+7 -4
net/hsr/hsr_device.c
··· 415 415 unsigned char multicast_spec, u8 protocol_version, 416 416 struct netlink_ext_ack *extack) 417 417 { 418 + bool unregister = false; 418 419 struct hsr_priv *hsr; 419 420 int res; 420 421 ··· 467 466 if (res) 468 467 goto err_unregister; 469 468 469 + unregister = true; 470 + 470 471 res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack); 471 472 if (res) 472 - goto err_add_slaves; 473 + goto err_unregister; 473 474 474 475 res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack); 475 476 if (res) 476 - goto err_add_slaves; 477 + goto err_unregister; 477 478 478 479 hsr_debugfs_init(hsr, hsr_dev); 479 480 mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD)); 480 481 481 482 return 0; 482 483 483 - err_add_slaves: 484 - unregister_netdevice(hsr_dev); 485 484 err_unregister: 486 485 hsr_del_ports(hsr); 487 486 err_add_master: 488 487 hsr_del_self_node(hsr); 489 488 489 + if (unregister) 490 + unregister_netdevice(hsr_dev); 490 491 return res; 491 492 }
+13 -5
net/hsr/hsr_forward.c
··· 120 120 return skb_clone(frame->skb_std, GFP_ATOMIC); 121 121 } 122 122 123 - static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, 124 - struct hsr_port *port, u8 proto_version) 123 + static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, 124 + struct hsr_frame_info *frame, 125 + struct hsr_port *port, u8 proto_version) 125 126 { 126 127 struct hsr_ethhdr *hsr_ethhdr; 127 128 int lane_id; 128 129 int lsdu_size; 130 + 131 + /* pad to minimum packet size which is 60 + 6 (HSR tag) */ 132 + if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) 133 + return NULL; 129 134 130 135 if (port->type == HSR_PT_SLAVE_A) 131 136 lane_id = 0; ··· 149 144 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; 150 145 hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? 151 146 ETH_P_HSR : ETH_P_PRP); 147 + 148 + return skb; 152 149 } 153 150 154 151 static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, ··· 179 172 memmove(dst, src, movelen); 180 173 skb_reset_mac_header(skb); 181 174 182 - hsr_fill_tag(skb, frame, port, port->hsr->prot_version); 183 - 184 - return skb; 175 + /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in 176 + * that case 177 + */ 178 + return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); 185 179 } 186 180 187 181 /* If the original frame was an HSR tagged frame, just clone it to be sent
+2 -1
net/hsr/hsr_framereg.c
··· 325 325 if (port->type != node_dst->addr_B_port) 326 326 return; 327 327 328 - ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B); 328 + if (is_valid_ether_addr(node_dst->macaddress_B)) 329 + ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B); 329 330 } 330 331 331 332 void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+2 -2
net/ipv4/icmp.c
··· 427 427 428 428 ipcm_init(&ipc); 429 429 inet->tos = ip_hdr(skb)->tos; 430 - sk->sk_mark = mark; 430 + ipc.sockc.mark = mark; 431 431 daddr = ipc.addr = ip_hdr(skb)->saddr; 432 432 saddr = fib_compute_spec_dst(skb); 433 433 ··· 710 710 icmp_param.skb = skb_in; 711 711 icmp_param.offset = skb_network_offset(skb_in); 712 712 inet_sk(sk)->tos = tos; 713 - sk->sk_mark = mark; 714 713 ipcm_init(&ipc); 715 714 ipc.addr = iph->saddr; 716 715 ipc.opt = &icmp_param.replyopts.opt; 716 + ipc.sockc.mark = mark; 717 717 718 718 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, 719 719 type, code, &icmp_param);
+1 -1
net/ipv4/ip_output.c
··· 1702 1702 sk->sk_protocol = ip_hdr(skb)->protocol; 1703 1703 sk->sk_bound_dev_if = arg->bound_dev_if; 1704 1704 sk->sk_sndbuf = sysctl_wmem_default; 1705 - sk->sk_mark = fl4.flowi4_mark; 1705 + ipc.sockc.mark = fl4.flowi4_mark; 1706 1706 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1707 1707 len, 0, &ipc, &rt, MSG_DONTWAIT); 1708 1708 if (unlikely(err)) {
+18
net/ipv4/ip_tunnel_core.c
··· 844 844 static_branch_dec(&ip_tunnel_metadata_cnt); 845 845 } 846 846 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata); 847 + 848 + /* Returns either the correct skb->protocol value, or 0 if invalid. */ 849 + __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) 850 + { 851 + if (skb_network_header(skb) >= skb->head && 852 + (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && 853 + ip_hdr(skb)->version == 4) 854 + return htons(ETH_P_IP); 855 + if (skb_network_header(skb) >= skb->head && 856 + (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && 857 + ipv6_hdr(skb)->version == 6) 858 + return htons(ETH_P_IPV6); 859 + return 0; 860 + } 861 + EXPORT_SYMBOL(ip_tunnel_parse_protocol); 862 + 863 + const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; 864 + EXPORT_SYMBOL(ip_tunnel_header_ops);
+1
net/ipv4/ip_vti.c
··· 441 441 static void vti_tunnel_setup(struct net_device *dev) 442 442 { 443 443 dev->netdev_ops = &vti_netdev_ops; 444 + dev->header_ops = &ip_tunnel_header_ops; 444 445 dev->type = ARPHRD_TUNNEL; 445 446 ip_tunnel_setup(dev, vti_net_id); 446 447 }
+1
net/ipv4/ipip.c
··· 361 361 static void ipip_tunnel_setup(struct net_device *dev) 362 362 { 363 363 dev->netdev_ops = &ipip_netdev_ops; 364 + dev->header_ops = &ip_tunnel_header_ops; 364 365 365 366 dev->type = ARPHRD_TUNNEL; 366 367 dev->flags = IFF_NOARP;
+3
net/ipv4/ping.c
··· 786 786 inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, 787 787 sk->sk_uid); 788 788 789 + fl4.fl4_icmp_type = user_icmph.type; 790 + fl4.fl4_icmp_code = user_icmph.code; 791 + 789 792 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 790 793 rt = ip_route_output_flow(net, &fl4, sk); 791 794 if (IS_ERR(rt)) {
+1 -1
net/ipv4/route.c
··· 2027 2027 const struct sk_buff *hint) 2028 2028 { 2029 2029 struct in_device *in_dev = __in_dev_get_rcu(dev); 2030 - struct rtable *rt = (struct rtable *)hint; 2030 + struct rtable *rt = skb_rtable(hint); 2031 2031 struct net *net = dev_net(dev); 2032 2032 int err = -EINVAL; 2033 2033 u32 tag = 0;
+10 -7
net/ipv4/tcp.c
··· 2691 2691 tp->window_clamp = 0; 2692 2692 tp->delivered = 0; 2693 2693 tp->delivered_ce = 0; 2694 + if (icsk->icsk_ca_ops->release) 2695 + icsk->icsk_ca_ops->release(sk); 2696 + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 2694 2697 tcp_set_ca_state(sk, TCP_CA_Open); 2695 2698 tp->is_sack_reneg = 0; 2696 2699 tcp_clear_retrans(tp); ··· 3249 3246 #ifdef CONFIG_TCP_MD5SIG 3250 3247 case TCP_MD5SIG: 3251 3248 case TCP_MD5SIG_EXT: 3252 - if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 3253 - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3254 - else 3255 - err = -EINVAL; 3249 + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3256 3250 break; 3257 3251 #endif 3258 3252 case TCP_USER_TIMEOUT: ··· 4033 4033 4034 4034 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 4035 4035 { 4036 + u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 4036 4037 struct scatterlist sg; 4037 4038 4038 - sg_init_one(&sg, key->key, key->keylen); 4039 - ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); 4040 - return crypto_ahash_update(hp->md5_req); 4039 + sg_init_one(&sg, key->key, keylen); 4040 + ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); 4041 + 4042 + /* We use data_race() because tcp_md5_do_add() might change key->key under us */ 4043 + return data_race(crypto_ahash_update(hp->md5_req)); 4041 4044 } 4042 4045 EXPORT_SYMBOL(tcp_md5_hash_key); 4043 4046
+1 -1
net/ipv4/tcp_cong.c
··· 197 197 icsk->icsk_ca_setsockopt = 1; 198 198 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 199 199 200 - if (sk->sk_state != TCP_CLOSE) 200 + if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 201 201 tcp_init_congestion_control(sk); 202 202 } 203 203
+8 -5
net/ipv4/tcp_input.c
··· 3488 3488 } 3489 3489 } 3490 3490 3491 - /* This routine deals with acks during a TLP episode. 3492 - * We mark the end of a TLP episode on receiving TLP dupack or when 3493 - * ack is after tlp_high_seq. 3494 - * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. 3491 + /* This routine deals with acks during a TLP episode and ends an episode by 3492 + * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack 3495 3493 */ 3496 3494 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) 3497 3495 { ··· 3498 3500 if (before(ack, tp->tlp_high_seq)) 3499 3501 return; 3500 3502 3501 - if (flag & FLAG_DSACKING_ACK) { 3503 + if (!tp->tlp_retrans) { 3504 + /* TLP of new data has been acknowledged */ 3505 + tp->tlp_high_seq = 0; 3506 + } else if (flag & FLAG_DSACKING_ACK) { 3502 3507 /* This DSACK means original and TLP probe arrived; no loss */ 3503 3508 tp->tlp_high_seq = 0; 3504 3509 } else if (after(ack, tp->tlp_high_seq)) { ··· 4583 4582 4584 4583 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4585 4584 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); 4585 + sk->sk_data_ready(sk); 4586 4586 tcp_drop(sk, skb); 4587 4587 return; 4588 4588 } ··· 4830 4828 sk_forced_mem_schedule(sk, skb->truesize); 4831 4829 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { 4832 4830 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); 4831 + sk->sk_data_ready(sk); 4833 4832 goto drop; 4834 4833 } 4835 4834
+16 -4
net/ipv4/tcp_ipv4.c
··· 1111 1111 1112 1112 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index); 1113 1113 if (key) { 1114 - /* Pre-existing entry - just update that one. */ 1115 - memcpy(key->key, newkey, newkeylen); 1116 - key->keylen = newkeylen; 1114 + /* Pre-existing entry - just update that one. 1115 + * Note that the key might be used concurrently. 1116 + * data_race() is telling kcsan that we do not care of 1117 + * key mismatches, since changing MD5 key on live flows 1118 + * can lead to packet drops. 1119 + */ 1120 + data_race(memcpy(key->key, newkey, newkeylen)); 1121 + 1122 + /* Pairs with READ_ONCE() in tcp_md5_hash_key(). 1123 + * Also note that a reader could catch new key->keylen value 1124 + * but old key->key[], this is the reason we use __GFP_ZERO 1125 + * at sock_kmalloc() time below these lines. 1126 + */ 1127 + WRITE_ONCE(key->keylen, newkeylen); 1128 + 1117 1129 return 0; 1118 1130 } 1119 1131 ··· 1141 1129 rcu_assign_pointer(tp->md5sig_info, md5sig); 1142 1130 } 1143 1131 1144 - key = sock_kmalloc(sk, sizeof(*key), gfp); 1132 + key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO); 1145 1133 if (!key) 1146 1134 return -ENOMEM; 1147 1135 if (!tcp_alloc_md5sig_pool()) {
+13 -8
net/ipv4/tcp_output.c
··· 700 700 unsigned int mss, struct sk_buff *skb, 701 701 struct tcp_out_options *opts, 702 702 const struct tcp_md5sig_key *md5, 703 - struct tcp_fastopen_cookie *foc) 703 + struct tcp_fastopen_cookie *foc, 704 + enum tcp_synack_type synack_type) 704 705 { 705 706 struct inet_request_sock *ireq = inet_rsk(req); 706 707 unsigned int remaining = MAX_TCP_OPTION_SPACE; ··· 716 715 * rather than TS in order to fit in better with old, 717 716 * buggy kernels, but that was deemed to be unnecessary. 718 717 */ 719 - ireq->tstamp_ok &= !ireq->sack_ok; 718 + if (synack_type != TCP_SYNACK_COOKIE) 719 + ireq->tstamp_ok &= !ireq->sack_ok; 720 720 } 721 721 #endif 722 722 ··· 2624 2622 int pcount; 2625 2623 int mss = tcp_current_mss(sk); 2626 2624 2625 + /* At most one outstanding TLP */ 2626 + if (tp->tlp_high_seq) 2627 + goto rearm_timer; 2628 + 2629 + tp->tlp_retrans = 0; 2627 2630 skb = tcp_send_head(sk); 2628 2631 if (skb && tcp_snd_wnd_test(tp, skb, mss)) { 2629 2632 pcount = tp->packets_out; ··· 2645 2638 inet_csk(sk)->icsk_pending = 0; 2646 2639 return; 2647 2640 } 2648 - 2649 - /* At most one outstanding TLP retransmission. */ 2650 - if (tp->tlp_high_seq) 2651 - goto rearm_timer; 2652 2641 2653 2642 if (skb_still_in_host_queue(sk, skb)) 2654 2643 goto rearm_timer; ··· 2667 2664 if (__tcp_retransmit_skb(sk, skb, 1)) 2668 2665 goto rearm_timer; 2669 2666 2667 + tp->tlp_retrans = 1; 2668 + 2669 + probe_sent: 2670 2670 /* Record snd_nxt for loss detection. */ 2671 2671 tp->tlp_high_seq = tp->snd_nxt; 2672 2672 2673 - probe_sent: 2674 2673 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); 2675 2674 /* Reset s.t. tcp_rearm_rto will restart timer from now */ 2676 2675 inet_csk(sk)->icsk_pending = 0; ··· 3399 3394 #endif 3400 3395 skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); 3401 3396 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, 3402 - foc) + sizeof(*th); 3397 + foc, synack_type) + sizeof(*th); 3403 3398 3404 3399 skb_push(skb, tcp_header_size); 3405 3400 skb_reset_transport_header(skb);
+10 -7
net/ipv4/udp.c
··· 416 416 struct udp_hslot *hslot2, 417 417 struct sk_buff *skb) 418 418 { 419 - struct sock *sk, *result; 419 + struct sock *sk, *result, *reuseport_result; 420 420 int score, badness; 421 421 u32 hash = 0; 422 422 ··· 426 426 score = compute_score(sk, net, saddr, sport, 427 427 daddr, hnum, dif, sdif); 428 428 if (score > badness) { 429 + reuseport_result = NULL; 430 + 429 431 if (sk->sk_reuseport && 430 432 sk->sk_state != TCP_ESTABLISHED) { 431 433 hash = udp_ehashfn(net, daddr, hnum, 432 434 saddr, sport); 433 - result = reuseport_select_sock(sk, hash, skb, 434 - sizeof(struct udphdr)); 435 - if (result && !reuseport_has_conns(sk, false)) 436 - return result; 435 + reuseport_result = reuseport_select_sock(sk, hash, skb, 436 + sizeof(struct udphdr)); 437 + if (reuseport_result && !reuseport_has_conns(sk, false)) 438 + return reuseport_result; 437 439 } 440 + 441 + result = reuseport_result ? : sk; 438 442 badness = score; 439 - result = sk; 440 443 } 441 444 } 442 445 return result; ··· 2054 2051 /* 2055 2052 * UDP-Lite specific tests, ignored on UDP sockets 2056 2053 */ 2057 - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 2054 + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 2058 2055 2059 2056 /* 2060 2057 * MIB statistics other than incrementing the error count are
+2 -2
net/ipv6/icmp.c
··· 566 566 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL); 567 567 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 568 568 569 - sk->sk_mark = mark; 570 569 np = inet6_sk(sk); 571 570 572 571 if (!icmpv6_xrlim_allow(sk, type, &fl6)) ··· 582 583 fl6.flowi6_oif = np->ucast_oif; 583 584 584 585 ipcm6_init_sk(&ipc6, np); 586 + ipc6.sockc.mark = mark; 585 587 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 586 588 587 589 dst = icmpv6_route_lookup(net, skb, sk, &fl6); ··· 751 751 sk = icmpv6_xmit_lock(net); 752 752 if (!sk) 753 753 goto out_bh_enable; 754 - sk->sk_mark = mark; 755 754 np = inet6_sk(sk); 756 755 757 756 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) ··· 778 779 ipcm6_init_sk(&ipc6, np); 779 780 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 780 781 ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); 782 + ipc6.sockc.mark = mark; 781 783 782 784 if (ip6_append_data(sk, icmpv6_getfrag, &msg, 783 785 skb->len + sizeof(struct icmp6hdr),
+6 -5
net/ipv6/ip6_gre.c
··· 1562 1562 static int __net_init ip6gre_init_net(struct net *net) 1563 1563 { 1564 1564 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1565 + struct net_device *ndev; 1565 1566 int err; 1566 1567 1567 1568 if (!net_has_fallback_tunnels(net)) 1568 1569 return 0; 1569 - ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", 1570 - NET_NAME_UNKNOWN, 1571 - ip6gre_tunnel_setup); 1572 - if (!ign->fb_tunnel_dev) { 1570 + ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", 1571 + NET_NAME_UNKNOWN, ip6gre_tunnel_setup); 1572 + if (!ndev) { 1573 1573 err = -ENOMEM; 1574 1574 goto err_alloc_dev; 1575 1575 } 1576 + ign->fb_tunnel_dev = ndev; 1576 1577 dev_net_set(ign->fb_tunnel_dev, net); 1577 1578 /* FB netdevice is special: we have one, and only one per netns. 1578 1579 * Allowing to move it to another netns is clearly unsafe. ··· 1593 1592 return 0; 1594 1593 1595 1594 err_reg_dev: 1596 - free_netdev(ign->fb_tunnel_dev); 1595 + free_netdev(ndev); 1597 1596 err_alloc_dev: 1598 1597 return err; 1599 1598 }
+1
net/ipv6/ip6_tunnel.c
··· 1846 1846 static void ip6_tnl_dev_setup(struct net_device *dev) 1847 1847 { 1848 1848 dev->netdev_ops = &ip6_tnl_netdev_ops; 1849 + dev->header_ops = &ip_tunnel_header_ops; 1849 1850 dev->needs_free_netdev = true; 1850 1851 dev->priv_destructor = ip6_dev_free; 1851 1852
+1
net/ipv6/ip6_vti.c
··· 905 905 static void vti6_dev_setup(struct net_device *dev) 906 906 { 907 907 dev->netdev_ops = &vti6_netdev_ops; 908 + dev->header_ops = &ip_tunnel_header_ops; 908 909 dev->needs_free_netdev = true; 909 910 dev->priv_destructor = vti6_dev_free; 910 911
+5 -2
net/ipv6/route.c
··· 431 431 struct fib6_info *sibling, *next_sibling; 432 432 struct fib6_info *match = res->f6i; 433 433 434 - if ((!match->fib6_nsiblings && !match->nh) || have_oif_match) 434 + if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) 435 435 goto out; 436 + 437 + if (match->nh && have_oif_match && res->nh) 438 + return; 436 439 437 440 /* We might have already computed the hash for ICMPv6 errors. In such 438 441 * case it will always be non-zero. Otherwise now is the time to do it. ··· 3405 3402 if ((flags & RTF_REJECT) || 3406 3403 (dev && (dev->flags & IFF_LOOPBACK) && 3407 3404 !(addr_type & IPV6_ADDR_LOOPBACK) && 3408 - !(flags & RTF_LOCAL))) 3405 + !(flags & (RTF_ANYCAST | RTF_LOCAL)))) 3409 3406 return true; 3410 3407 3411 3408 return false;
+1
net/ipv6/sit.c
··· 1421 1421 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 1422 1422 1423 1423 dev->netdev_ops = &ipip6_netdev_ops; 1424 + dev->header_ops = &ip_tunnel_header_ops; 1424 1425 dev->needs_free_netdev = true; 1425 1426 dev->priv_destructor = ipip6_dev_free; 1426 1427
+10 -7
net/ipv6/udp.c
··· 148 148 int dif, int sdif, struct udp_hslot *hslot2, 149 149 struct sk_buff *skb) 150 150 { 151 - struct sock *sk, *result; 151 + struct sock *sk, *result, *reuseport_result; 152 152 int score, badness; 153 153 u32 hash = 0; 154 154 ··· 158 158 score = compute_score(sk, net, saddr, sport, 159 159 daddr, hnum, dif, sdif); 160 160 if (score > badness) { 161 + reuseport_result = NULL; 162 + 161 163 if (sk->sk_reuseport && 162 164 sk->sk_state != TCP_ESTABLISHED) { 163 165 hash = udp6_ehashfn(net, daddr, hnum, 164 166 saddr, sport); 165 167 166 - result = reuseport_select_sock(sk, hash, skb, 167 - sizeof(struct udphdr)); 168 - if (result && !reuseport_has_conns(sk, false)) 169 - return result; 168 + reuseport_result = reuseport_select_sock(sk, hash, skb, 169 + sizeof(struct udphdr)); 170 + if (reuseport_result && !reuseport_has_conns(sk, false)) 171 + return reuseport_result; 170 172 } 171 - result = sk; 173 + 174 + result = reuseport_result ? : sk; 172 175 badness = score; 173 176 } 174 177 } ··· 646 643 /* 647 644 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 648 645 */ 649 - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 646 + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 650 647 651 648 if (up->pcrlen == 0) { /* full coverage was set */ 652 649 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
+1 -4
net/l2tp/l2tp_core.c
··· 1028 1028 1029 1029 /* Queue the packet to IP for output */ 1030 1030 skb->ignore_df = 1; 1031 + skb_dst_drop(skb); 1031 1032 #if IS_ENABLED(CONFIG_IPV6) 1032 1033 if (l2tp_sk_is_v6(tunnel->sock)) 1033 1034 error = inet6_csk_xmit(tunnel->sock, skb, NULL); ··· 1099 1098 ret = NET_XMIT_DROP; 1100 1099 goto out_unlock; 1101 1100 } 1102 - 1103 - /* Get routing info from the tunnel socket */ 1104 - skb_dst_drop(skb); 1105 - skb_dst_set(skb, sk_dst_check(sk, 0)); 1106 1101 1107 1102 inet = inet_sk(sk); 1108 1103 fl = &inet->cork.fl;
+7 -3
net/llc/af_llc.c
··· 273 273 274 274 if (!sock_flag(sk, SOCK_ZAPPED)) 275 275 goto out; 276 + if (!addr->sllc_arphrd) 277 + addr->sllc_arphrd = ARPHRD_ETHER; 278 + if (addr->sllc_arphrd != ARPHRD_ETHER) 279 + goto out; 276 280 rc = -ENODEV; 277 281 if (sk->sk_bound_dev_if) { 278 282 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); ··· 332 328 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) 333 329 goto out; 334 330 rc = -EAFNOSUPPORT; 335 - if (unlikely(addr->sllc_family != AF_LLC)) 331 + if (!addr->sllc_arphrd) 332 + addr->sllc_arphrd = ARPHRD_ETHER; 333 + if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER)) 336 334 goto out; 337 335 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); 338 336 rc = -ENODEV; ··· 342 336 if (sk->sk_bound_dev_if) { 343 337 llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); 344 338 if (llc->dev) { 345 - if (!addr->sllc_arphrd) 346 - addr->sllc_arphrd = llc->dev->type; 347 339 if (is_zero_ether_addr(addr->sllc_mac)) 348 340 memcpy(addr->sllc_mac, llc->dev->dev_addr, 349 341 IFHWADDRLEN);
+2 -5
net/mac80211/mesh_hwmp.c
··· 1105 1105 ttl, lifetime, 0, ifmsh->preq_id++, sdata); 1106 1106 1107 1107 spin_lock_bh(&mpath->state_lock); 1108 - if (mpath->flags & MESH_PATH_DELETED) { 1109 - spin_unlock_bh(&mpath->state_lock); 1110 - goto enddiscovery; 1111 - } 1112 - mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1108 + if (!(mpath->flags & MESH_PATH_DELETED)) 1109 + mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1113 1110 spin_unlock_bh(&mpath->state_lock); 1114 1111 1115 1112 enddiscovery:
+26
net/mac80211/rx.c
··· 2396 2396 2397 2397 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2398 2398 { 2399 + struct ieee80211_hdr *hdr = (void *)rx->skb->data; 2399 2400 struct sk_buff *skb = rx->skb; 2400 2401 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2401 2402 ··· 2407 2406 if (status->flag & RX_FLAG_DECRYPTED) 2408 2407 return 0; 2409 2408 2409 + /* check mesh EAPOL frames first */ 2410 + if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && 2411 + ieee80211_is_data(fc))) { 2412 + struct ieee80211s_hdr *mesh_hdr; 2413 + u16 hdr_len = ieee80211_hdrlen(fc); 2414 + u16 ethertype_offset; 2415 + __be16 ethertype; 2416 + 2417 + if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) 2418 + goto drop_check; 2419 + 2420 + /* make sure fixed part of mesh header is there, also checks skb len */ 2421 + if (!pskb_may_pull(rx->skb, hdr_len + 6)) 2422 + goto drop_check; 2423 + 2424 + mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); 2425 + ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + 2426 + sizeof(rfc1042_header); 2427 + 2428 + if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 && 2429 + ethertype == rx->sdata->control_port_protocol) 2430 + return 0; 2431 + } 2432 + 2433 + drop_check: 2410 2434 /* Drop unencrypted frames if key is set. */ 2411 2435 if (unlikely(!ieee80211_has_protected(fc) && 2412 2436 !ieee80211_is_any_nullfunc(fc) &&
+15 -7
net/mac80211/status.c
··· 639 639 u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; 640 640 struct ieee80211_sub_if_data *sdata; 641 641 struct ieee80211_hdr *hdr = (void *)skb->data; 642 + __be16 ethertype = 0; 643 + 644 + if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3)) 645 + skb_copy_bits(skb, 2 * ETH_ALEN, &ethertype, ETH_TLEN); 642 646 643 647 rcu_read_lock(); 644 648 sdata = ieee80211_sdata_from_skb(local, skb); 645 649 if (sdata) { 646 - if (ieee80211_is_any_nullfunc(hdr->frame_control)) 650 + if (ethertype == sdata->control_port_protocol || 651 + ethertype == cpu_to_be16(ETH_P_PREAUTH)) 652 + cfg80211_control_port_tx_status(&sdata->wdev, 653 + cookie, 654 + skb->data, 655 + skb->len, 656 + acked, 657 + GFP_ATOMIC); 658 + else if (ieee80211_is_any_nullfunc(hdr->frame_control)) 647 659 cfg80211_probe_status(sdata->dev, hdr->addr1, 648 660 cookie, acked, 649 661 info->status.ack_signal, ··· 666 654 skb->data, skb->len, 667 655 acked, GFP_ATOMIC); 668 656 else 669 - cfg80211_control_port_tx_status(&sdata->wdev, 670 - cookie, 671 - skb->data, 672 - skb->len, 673 - acked, 674 - GFP_ATOMIC); 657 + pr_warn("Unknown status report in ack skb\n"); 658 + 675 659 } 676 660 rcu_read_unlock(); 677 661
+6 -2
net/mac80211/tx.c
··· 3996 3996 skb_list_walk_safe(skb, skb, next) { 3997 3997 skb_mark_not_on_list(skb); 3998 3998 3999 + if (skb->protocol == sdata->control_port_protocol) 4000 + ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; 4001 + 3999 4002 skb = ieee80211_build_hdr(sdata, skb, info_flags, 4000 4003 sta, ctrl_flags, cookie); 4001 4004 if (IS_ERR(skb)) { ··· 4209 4206 (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER))) 4210 4207 ra = sdata->u.mgd.bssid; 4211 4208 4212 - if (!is_valid_ether_addr(ra)) 4209 + if (is_zero_ether_addr(ra)) 4213 4210 goto out_free; 4214 4211 4215 4212 multicast = is_multicast_ether_addr(ra); ··· 5374 5371 return -EINVAL; 5375 5372 5376 5373 if (proto == sdata->control_port_protocol) 5377 - ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; 5374 + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO | 5375 + IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; 5378 5376 5379 5377 if (unencrypted) 5380 5378 flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+3 -3
net/mptcp/options.c
··· 449 449 } 450 450 451 451 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, 452 - struct mptcp_ext *ext) 452 + struct sk_buff *skb, struct mptcp_ext *ext) 453 453 { 454 - if (!ext->use_map) { 454 + if (!ext->use_map || !skb->len) { 455 455 /* RFC6824 requires a DSS mapping with specific values 456 456 * if DATA_FIN is set but no data payload is mapped 457 457 */ ··· 503 503 opts->ext_copy = *mpext; 504 504 505 505 if (skb && tcp_fin && subflow->data_fin_tx_enable) 506 - mptcp_write_data_fin(subflow, &opts->ext_copy); 506 + mptcp_write_data_fin(subflow, skb, &opts->ext_copy); 507 507 ret = true; 508 508 } 509 509
+1 -1
net/netfilter/ipset/ip_set_bitmap_ip.c
··· 326 326 set->variant = &bitmap_ip; 327 327 if (!init_map_ip(set, map, first_ip, last_ip, 328 328 elements, hosts, netmask)) { 329 - kfree(map); 329 + ip_set_free(map); 330 330 return -ENOMEM; 331 331 } 332 332 if (tb[IPSET_ATTR_TIMEOUT]) {
+1 -1
net/netfilter/ipset/ip_set_bitmap_ipmac.c
··· 363 363 map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); 364 364 set->variant = &bitmap_ipmac; 365 365 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { 366 - kfree(map); 366 + ip_set_free(map); 367 367 return -ENOMEM; 368 368 } 369 369 if (tb[IPSET_ATTR_TIMEOUT]) {
+1 -1
net/netfilter/ipset/ip_set_bitmap_port.c
··· 274 274 map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); 275 275 set->variant = &bitmap_port; 276 276 if (!init_map_port(set, map, first_port, last_port)) { 277 - kfree(map); 277 + ip_set_free(map); 278 278 return -ENOMEM; 279 279 } 280 280 if (tb[IPSET_ATTR_TIMEOUT]) {
+2 -2
net/netfilter/ipset/ip_set_hash_gen.h
··· 682 682 } 683 683 t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits)); 684 684 if (!t->hregion) { 685 - kfree(t); 685 + ip_set_free(t); 686 686 ret = -ENOMEM; 687 687 goto out; 688 688 } ··· 1533 1533 } 1534 1534 t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits)); 1535 1535 if (!t->hregion) { 1536 - kfree(t); 1536 + ip_set_free(t); 1537 1537 kfree(h); 1538 1538 return -ENOMEM; 1539 1539 }
+8 -4
net/netfilter/ipvs/ip_vs_sync.c
··· 1717 1717 { 1718 1718 struct ip_vs_sync_thread_data *tinfo = data; 1719 1719 struct netns_ipvs *ipvs = tinfo->ipvs; 1720 + struct sock *sk = tinfo->sock->sk; 1721 + struct udp_sock *up = udp_sk(sk); 1720 1722 int len; 1721 1723 1722 1724 pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " ··· 1726 1724 ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id); 1727 1725 1728 1726 while (!kthread_should_stop()) { 1729 - wait_event_interruptible(*sk_sleep(tinfo->sock->sk), 1730 - !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) 1731 - || kthread_should_stop()); 1727 + wait_event_interruptible(*sk_sleep(sk), 1728 + !skb_queue_empty_lockless(&sk->sk_receive_queue) || 1729 + !skb_queue_empty_lockless(&up->reader_queue) || 1730 + kthread_should_stop()); 1732 1731 1733 1732 /* do we have data now? */ 1734 - while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { 1733 + while (!skb_queue_empty_lockless(&sk->sk_receive_queue) || 1734 + !skb_queue_empty_lockless(&up->reader_queue)) { 1735 1735 len = ip_vs_receive(tinfo->sock, tinfo->buf, 1736 1736 ipvs->bcfg.sync_maxlen); 1737 1737 if (len <= 0) {
+2
net/netfilter/nf_conntrack_core.c
··· 2158 2158 err = __nf_conntrack_update(net, skb, ct, ctinfo); 2159 2159 if (err < 0) 2160 2160 return err; 2161 + 2162 + ct = nf_ct_get(skb, &ctinfo); 2161 2163 } 2162 2164 2163 2165 return nf_confirm_cthelper(skb, ct, ctinfo);
+14 -27
net/netfilter/nf_tables_api.c
··· 188 188 nf_unregister_net_hook(net, &hook->ops); 189 189 } 190 190 191 - static int nft_register_basechain_hooks(struct net *net, int family, 192 - struct nft_base_chain *basechain) 193 - { 194 - if (family == NFPROTO_NETDEV) 195 - return nft_netdev_register_hooks(net, &basechain->hook_list); 196 - 197 - return nf_register_net_hook(net, &basechain->ops); 198 - } 199 - 200 - static void nft_unregister_basechain_hooks(struct net *net, int family, 201 - struct nft_base_chain *basechain) 202 - { 203 - if (family == NFPROTO_NETDEV) 204 - nft_netdev_unregister_hooks(net, &basechain->hook_list); 205 - else 206 - nf_unregister_net_hook(net, &basechain->ops); 207 - } 208 - 209 191 static int nf_tables_register_hook(struct net *net, 210 192 const struct nft_table *table, 211 193 struct nft_chain *chain) ··· 205 223 if (basechain->type->ops_register) 206 224 return basechain->type->ops_register(net, ops); 207 225 208 - return nft_register_basechain_hooks(net, table->family, basechain); 226 + if (table->family == NFPROTO_NETDEV) 227 + return nft_netdev_register_hooks(net, &basechain->hook_list); 228 + 229 + return nf_register_net_hook(net, &basechain->ops); 209 230 } 210 231 211 232 static void nf_tables_unregister_hook(struct net *net, ··· 227 242 if (basechain->type->ops_unregister) 228 243 return basechain->type->ops_unregister(net, ops); 229 244 230 - nft_unregister_basechain_hooks(net, table->family, basechain); 245 + if (table->family == NFPROTO_NETDEV) 246 + nft_netdev_unregister_hooks(net, &basechain->hook_list); 247 + else 248 + nf_unregister_net_hook(net, &basechain->ops); 231 249 } 232 250 233 251 static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) ··· 820 832 if (cnt && i++ == cnt) 821 833 break; 822 834 823 - nft_unregister_basechain_hooks(net, table->family, 824 - nft_base_chain(chain)); 835 + nf_tables_unregister_hook(net, table, chain); 825 836 } 826 837 } 827 838 ··· 835 848 if (!nft_is_base_chain(chain)) 836 849 continue; 837 850 838 - err = nft_register_basechain_hooks(net, table->family, 839 - nft_base_chain(chain)); 851 + err = nf_tables_register_hook(net, table, chain); 840 852 if (err < 0) 841 853 goto err_register_hooks; 842 854 ··· 880 894 nft_trans_table_enable(trans) = false; 881 895 } else if (!(flags & NFT_TABLE_F_DORMANT) && 882 896 ctx->table->flags & NFT_TABLE_F_DORMANT) { 897 + ctx->table->flags &= ~NFT_TABLE_F_DORMANT; 883 898 ret = nf_tables_table_enable(ctx->net, ctx->table); 884 - if (ret >= 0) { 885 - ctx->table->flags &= ~NFT_TABLE_F_DORMANT; 899 + if (ret >= 0) 886 900 nft_trans_table_enable(trans) = true; 887 - } 901 + else 902 + ctx->table->flags |= NFT_TABLE_F_DORMANT; 888 903 } 889 904 if (ret < 0) 890 905 goto err;
+13 -84
net/netlink/genetlink.c
··· 351 351 start = end = GENL_ID_VFS_DQUOT; 352 352 } 353 353 354 - if (family->maxattr && !family->parallel_ops) { 355 - family->attrbuf = kmalloc_array(family->maxattr + 1, 356 - sizeof(struct nlattr *), 357 - GFP_KERNEL); 358 - if (family->attrbuf == NULL) { 359 - err = -ENOMEM; 360 - goto errout_locked; 361 - } 362 - } else 363 - family->attrbuf = NULL; 364 - 365 354 family->id = idr_alloc_cyclic(&genl_fam_idr, family, 366 355 start, end + 1, GFP_KERNEL); 367 356 if (family->id < 0) { 368 357 err = family->id; 369 - goto errout_free; 358 + goto errout_locked; 370 359 } 371 360 372 361 err = genl_validate_assign_mc_groups(family); ··· 374 385 375 386 errout_remove: 376 387 idr_remove(&genl_fam_idr, family->id); 377 - errout_free: 378 - kfree(family->attrbuf); 379 388 errout_locked: 380 389 genl_unlock_all(); 381 390 return err; ··· 405 418 wait_event(genl_sk_destructing_waitq, 406 419 atomic_read(&genl_sk_destructing_cnt) == 0); 407 420 genl_unlock(); 408 - 409 - kfree(family->attrbuf); 410 421 411 422 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); 412 423 ··· 470 485 if (!family->maxattr) 471 486 return NULL; 472 487 473 - if (family->parallel_ops) { 474 - attrbuf = kmalloc_array(family->maxattr + 1, 475 - sizeof(struct nlattr *), GFP_KERNEL); 476 - if (!attrbuf) 477 - return ERR_PTR(-ENOMEM); 478 - } else { 479 - attrbuf = family->attrbuf; 480 - } 488 + attrbuf = kmalloc_array(family->maxattr + 1, 489 + sizeof(struct nlattr *), GFP_KERNEL); 490 + if (!attrbuf) 491 + return ERR_PTR(-ENOMEM); 481 492 482 493 err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr, 483 494 family->policy, validate, extack); 484 495 if (err) { 485 - if (family->parallel_ops) 486 - kfree(attrbuf); 496 + kfree(attrbuf); 487 497 return ERR_PTR(err); 488 498 } 489 499 return attrbuf; 490 500 } 491 501 492 - static void genl_family_rcv_msg_attrs_free(const struct genl_family *family, 493 - struct nlattr **attrbuf) 502 + static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf) 494 503 { 495 - if (family->parallel_ops) 496 - kfree(attrbuf); 504 + kfree(attrbuf); 497 505 } 498 506 499 507 struct genl_start_context { ··· 520 542 no_attrs: 521 543 info = genl_dumpit_info_alloc(); 522 544 if (!info) { 523 - genl_family_rcv_msg_attrs_free(ctx->family, attrs); 545 + genl_family_rcv_msg_attrs_free(attrs); 524 546 return -ENOMEM; 525 547 } 526 548 info->family = ctx->family; ··· 537 559 } 538 560 539 561 if (rc) { 540 - genl_family_rcv_msg_attrs_free(info->family, info->attrs); 562 + genl_family_rcv_msg_attrs_free(info->attrs); 541 563 genl_dumpit_info_free(info); 542 564 cb->data = NULL; 543 565 } ··· 566 588 rc = ops->done(cb); 567 589 genl_unlock(); 568 590 } 569 - genl_family_rcv_msg_attrs_free(info->family, info->attrs); 591 + genl_family_rcv_msg_attrs_free(info->attrs); 570 592 genl_dumpit_info_free(info); 571 593 return rc; 572 594 } ··· 579 601 580 602 if (ops->done) 581 603 rc = ops->done(cb); 582 - genl_family_rcv_msg_attrs_free(info->family, info->attrs); 604 + genl_family_rcv_msg_attrs_free(info->attrs); 583 605 genl_dumpit_info_free(info); 584 606 return rc; 585 607 } ··· 672 694 family->post_doit(ops, skb, &info); 673 695 674 696 out: 675 - genl_family_rcv_msg_attrs_free(family, attrbuf); 697 + genl_family_rcv_msg_attrs_free(attrbuf); 676 698 677 699 return err; 678 700 } ··· 1144 1166 .netnsok = true, 1145 1167 }; 1146 1168 1147 - static int genl_bind(struct net *net, int group) 1148 - { 1149 - struct genl_family *f; 1150 - int err = -ENOENT; 1151 - unsigned int id; 1152 - 1153 - down_read(&cb_lock); 1154 - 1155 - idr_for_each_entry(&genl_fam_idr, f, id) { 1156 - if (group >= f->mcgrp_offset && 1157 - group < f->mcgrp_offset + f->n_mcgrps) { 1158 - int fam_grp = group - f->mcgrp_offset; 1159 - 1160 - if (!f->netnsok && net != &init_net) 1161 - err = -ENOENT; 1162 - else if (f->mcast_bind) 1163 - err = f->mcast_bind(net, fam_grp); 1164 - else 1165 - err = 0; 1166 - break; 1167 - } 1168 - } 1169 - up_read(&cb_lock); 1170 - 1171 - return err; 1172 - } 1173 - 1174 - static void genl_unbind(struct net *net, int group) 1175 - { 1176 - struct genl_family *f; 1177 - unsigned int id; 1178 - 1179 - down_read(&cb_lock); 1180 - 1181 - idr_for_each_entry(&genl_fam_idr, f, id) { 1182 - if (group >= f->mcgrp_offset && 1183 - group < f->mcgrp_offset + f->n_mcgrps) { 1184 - int fam_grp = group - f->mcgrp_offset; 1185 - 1186 - if (f->mcast_unbind) 1187 - f->mcast_unbind(net, fam_grp); 1188 - break; 1189 - } 1190 - } 1191 - up_read(&cb_lock); 1192 - } 1193 - 1194 1169 static int __net_init genl_pernet_init(struct net *net) 1195 1170 { 1196 1171 struct netlink_kernel_cfg cfg = { 1197 1172 .input = genl_rcv, 1198 1173 .flags = NL_CFG_F_NONROOT_RECV, 1199 - .bind = genl_bind, 1200 - .unbind = genl_unbind, 1201 1174 }; 1202 1175 1203 1176 /* we'll bump the group number right afterwards */
+4 -1
net/nfc/nci/core.c
··· 1228 1228 1229 1229 rc = nfc_register_device(ndev->nfc_dev); 1230 1230 if (rc) 1231 - goto destroy_rx_wq_exit; 1231 + goto destroy_tx_wq_exit; 1232 1232 1233 1233 goto exit; 1234 + 1235 + destroy_tx_wq_exit: 1236 + destroy_workqueue(ndev->tx_wq); 1234 1237 1235 1238 destroy_rx_wq_exit: 1236 1239 destroy_workqueue(ndev->rx_wq);
+9 -2
net/qrtr/qrtr.c
··· 166 166 { 167 167 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); 168 168 struct radix_tree_iter iter; 169 + struct qrtr_tx_flow *flow; 169 170 unsigned long flags; 170 171 void __rcu **slot; 171 172 ··· 182 181 183 182 /* Free tx flow counters */ 184 183 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { 184 + flow = *slot; 185 185 radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot); 186 - kfree(*slot); 186 + kfree(flow); 187 187 } 188 188 kfree(node); 189 189 } ··· 429 427 unsigned int ver; 430 428 size_t hdrlen; 431 429 432 - if (len & 3) 430 + if (len == 0 || len & 3) 433 431 return -EINVAL; 434 432 435 433 skb = netdev_alloc_skb(NULL, len); ··· 443 441 444 442 switch (ver) { 445 443 case QRTR_PROTO_VER_1: 444 + if (len < sizeof(*v1)) 445 + goto err; 446 446 v1 = data; 447 447 hdrlen = sizeof(*v1); 448 448 ··· 458 454 size = le32_to_cpu(v1->size); 459 455 break; 460 456 case QRTR_PROTO_VER_2: 457 + if (len < sizeof(*v2)) 458 + goto err; 461 459 v2 = data; 462 460 hdrlen = sizeof(*v2) + v2->optlen; 463 461 ··· 1180 1174 sk->sk_state_change(sk); 1181 1175 1182 1176 sock_set_flag(sk, SOCK_DEAD); 1177 + sock_orphan(sk); 1183 1178 sock->sk = NULL; 1184 1179 1185 1180 if (!sock_flag(sk, SOCK_ZAPPED))
+11
net/rds/connection.c
··· 905 905 } 906 906 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down); 907 907 908 + /* Check connectivity of all paths 909 + */ 910 + void rds_check_all_paths(struct rds_connection *conn) 911 + { 912 + int i = 0; 913 + 914 + do { 915 + rds_conn_path_connect_if_down(&conn->c_path[i]); 916 + } while (++i < conn->c_npaths); 917 + } 918 + 908 919 void rds_conn_connect_if_down(struct rds_connection *conn) 909 920 { 910 921 WARN_ON(conn->c_trans->t_mp_capable);
+7
net/rds/rds.h
··· 778 778 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy); 779 779 void rds_conn_connect_if_down(struct rds_connection *conn); 780 780 void rds_conn_path_connect_if_down(struct rds_conn_path *cp); 781 + void rds_check_all_paths(struct rds_connection *conn); 781 782 void rds_for_each_conn_info(struct socket *sock, unsigned int len, 782 783 struct rds_info_iterator *iter, 783 784 struct rds_info_lengths *lens, ··· 821 820 rds_conn_path_up(struct rds_conn_path *cp) 822 821 { 823 822 return atomic_read(&cp->cp_state) == RDS_CONN_UP; 823 + } 824 + 825 + static inline int 826 + rds_conn_path_down(struct rds_conn_path *cp) 827 + { 828 + return atomic_read(&cp->cp_state) == RDS_CONN_DOWN; 824 829 } 825 830 826 831 static inline int
+2 -1
net/rds/send.c
··· 1340 1340 goto out; 1341 1341 } 1342 1342 1343 - rds_conn_path_connect_if_down(cpath); 1343 + if (rds_conn_path_down(cpath)) 1344 + rds_check_all_paths(conn); 1344 1345 1345 1346 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 1346 1347 if (ret) {
+1 -1
net/rxrpc/recvmsg.c
··· 543 543 list_empty(&rx->recvmsg_q) && 544 544 rx->sk.sk_state != RXRPC_SERVER_LISTENING) { 545 545 release_sock(&rx->sk); 546 - return -ENODATA; 546 + return -EAGAIN; 547 547 } 548 548 549 549 if (list_empty(&rx->recvmsg_q)) {
+1 -1
net/rxrpc/sendmsg.c
··· 304 304 /* this should be in poll */ 305 305 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 306 306 307 - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 307 + if (sk->sk_shutdown & SEND_SHUTDOWN) 308 308 return -EPIPE; 309 309 310 310 more = msg->msg_flags & MSG_MORE;
+6 -3
net/sched/act_connmark.c
··· 43 43 tcf_lastuse_update(&ca->tcf_tm); 44 44 bstats_update(&ca->tcf_bstats, skb); 45 45 46 - if (skb->protocol == htons(ETH_P_IP)) { 46 + switch (skb_protocol(skb, true)) { 47 + case htons(ETH_P_IP): 47 48 if (skb->len < sizeof(struct iphdr)) 48 49 goto out; 49 50 50 51 proto = NFPROTO_IPV4; 51 - } else if (skb->protocol == htons(ETH_P_IPV6)) { 52 + break; 53 + case htons(ETH_P_IPV6): 52 54 if (skb->len < sizeof(struct ipv6hdr)) 53 55 goto out; 54 56 55 57 proto = NFPROTO_IPV6; 56 - } else { 58 + break; 59 + default: 57 60 goto out; 58 61 } 59 62
+1 -1
net/sched/act_csum.c
··· 587 587 goto drop; 588 588 589 589 update_flags = params->update_flags; 590 - protocol = tc_skb_protocol(skb); 590 + protocol = skb_protocol(skb, false); 591 591 again: 592 592 switch (protocol) { 593 593 case cpu_to_be16(ETH_P_IP):
+20 -7
net/sched/act_ct.c
··· 624 624 { 625 625 u8 family = NFPROTO_UNSPEC; 626 626 627 - switch (skb->protocol) { 627 + switch (skb_protocol(skb, true)) { 628 628 case htons(ETH_P_IP): 629 629 family = NFPROTO_IPV4; 630 630 break; ··· 673 673 } 674 674 675 675 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, 676 - u8 family, u16 zone) 676 + u8 family, u16 zone, bool *defrag) 677 677 { 678 678 enum ip_conntrack_info ctinfo; 679 + struct qdisc_skb_cb cb; 679 680 struct nf_conn *ct; 680 681 int err = 0; 681 682 bool frag; ··· 694 693 return err; 695 694 696 695 skb_get(skb); 696 + cb = *qdisc_skb_cb(skb); 697 697 698 698 if (family == NFPROTO_IPV4) { 699 699 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; ··· 705 703 local_bh_enable(); 706 704 if (err && err != -EINPROGRESS) 707 705 goto out_free; 706 + 707 + if (!err) 708 + *defrag = true; 708 709 } else { /* NFPROTO_IPV6 */ 709 710 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 710 711 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; ··· 716 711 err = nf_ct_frag6_gather(net, skb, user); 717 712 if (err && err != -EINPROGRESS) 718 713 goto out_free; 714 + 715 + if (!err) 716 + *defrag = true; 719 717 #else 720 718 err = -EOPNOTSUPP; 721 719 goto out_free; 722 720 #endif 723 721 } 724 722 723 + *qdisc_skb_cb(skb) = cb; 725 724 skb_clear_hash(skb); 726 725 skb->ignore_df = 1; 727 726 return err; ··· 757 748 const struct nf_nat_range2 *range, 758 749 enum nf_nat_manip_type maniptype) 759 750 { 751 + __be16 proto = skb_protocol(skb, true); 760 752 int hooknum, err = NF_ACCEPT; 761 753 762 754 /* See HOOK2MANIP(). */ ··· 769 759 switch (ctinfo) { 770 760 case IP_CT_RELATED: 771 761 case IP_CT_RELATED_REPLY: 772 - if (skb->protocol == htons(ETH_P_IP) && 762 + if (proto == htons(ETH_P_IP) && 773 763 ip_hdr(skb)->protocol == IPPROTO_ICMP) { 774 764 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, 775 765 hooknum)) 776 766 err = NF_DROP; 777 767 goto out; 778 - } else if (IS_ENABLED(CONFIG_IPV6) && 779 - skb->protocol == htons(ETH_P_IPV6)) { 768 + } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) { 780 769 __be16 frag_off; 781 770 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 782 771 int hdrlen = ipv6_skip_exthdr(skb, ··· 923 914 int nh_ofs, err, retval; 924 915 struct tcf_ct_params *p; 925 916 bool skip_add = false; 917 + bool defrag = false; 926 918 struct nf_conn *ct; 927 919 u8 family; 928 920 ··· 934 924 clear = p->ct_action & TCA_CT_ACT_CLEAR; 935 925 force = p->ct_action & TCA_CT_ACT_FORCE; 936 926 tmpl = p->tmpl; 927 + 928 + tcf_lastuse_update(&c->tcf_tm); 937 929 938 930 if (clear) { 939 931 ct = nf_ct_get(skb, &ctinfo); ··· 956 944 */ 957 945 nh_ofs = skb_network_offset(skb); 958 946 skb_pull_rcsum(skb, nh_ofs); 959 - err = tcf_ct_handle_fragments(net, skb, family, p->zone); 947 + err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag); 960 948 if (err == -EINPROGRESS) { 961 949 retval = TC_ACT_STOLEN; 962 950 goto out; ··· 1024 1012 1025 1013 out: 1026 1014 tcf_action_update_bstats(&c->common, skb); 1015 + if (defrag) 1016 + qdisc_skb_cb(skb)->pkt_len = skb->len; 1027 1017 return retval; 1028 1018 1029 1019 drop: ··· 1564 1550 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>"); 1565 1551 MODULE_DESCRIPTION("Connection tracking action"); 1566 1552 MODULE_LICENSE("GPL v2"); 1567 -
+6 -3
net/sched/act_ctinfo.c
··· 96 96 action = READ_ONCE(ca->tcf_action); 97 97 98 98 wlen = skb_network_offset(skb); 99 - if (tc_skb_protocol(skb) == htons(ETH_P_IP)) { 99 + switch (skb_protocol(skb, true)) { 100 + case htons(ETH_P_IP): 100 101 wlen += sizeof(struct iphdr); 101 102 if (!pskb_may_pull(skb, wlen)) 102 103 goto out; 103 104 104 105 proto = NFPROTO_IPV4; 105 - } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) { 106 + break; 107 + case htons(ETH_P_IPV6): 106 108 wlen += sizeof(struct ipv6hdr); 107 109 if (!pskb_may_pull(skb, wlen)) 108 110 goto out; 109 111 110 112 proto = NFPROTO_IPV6; 111 - } else { 113 + break; 114 + default: 112 115 goto out; 113 116 } 114 117
+1 -1
net/sched/act_mpls.c
··· 82 82 goto drop; 83 83 break; 84 84 case TCA_MPLS_ACT_PUSH: 85 - new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol)); 85 + new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true))); 86 86 if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len, 87 87 skb->dev && skb->dev->type == ARPHRD_ETHER)) 88 88 goto drop;
+1 -1
net/sched/act_skbedit.c
··· 41 41 if (params->flags & SKBEDIT_F_INHERITDSFIELD) { 42 42 int wlen = skb_network_offset(skb); 43 43 44 - switch (tc_skb_protocol(skb)) { 44 + switch (skb_protocol(skb, true)) { 45 45 case htons(ETH_P_IP): 46 46 wlen += sizeof(struct iphdr); 47 47 if (!pskb_may_pull(skb, wlen))
+1 -2
net/sched/cls_api.c
··· 20 20 #include <linux/kmod.h> 21 21 #include <linux/slab.h> 22 22 #include <linux/idr.h> 23 - #include <linux/rhashtable.h> 24 23 #include <linux/jhash.h> 25 24 #include <linux/rculist.h> 26 25 #include <net/net_namespace.h> ··· 1537 1538 reclassify: 1538 1539 #endif 1539 1540 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1540 - __be16 protocol = tc_skb_protocol(skb); 1541 + __be16 protocol = skb_protocol(skb, false); 1541 1542 int err; 1542 1543 1543 1544 if (tp->protocol != protocol &&
+4 -4
net/sched/cls_flow.c
··· 80 80 if (dst) 81 81 return ntohl(dst); 82 82 83 - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); 83 + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); 84 84 } 85 85 86 86 static u32 flow_get_proto(const struct sk_buff *skb, ··· 104 104 if (flow->ports.ports) 105 105 return ntohs(flow->ports.dst); 106 106 107 - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); 107 + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); 108 108 } 109 109 110 110 static u32 flow_get_iif(const struct sk_buff *skb) ··· 151 151 static u32 flow_get_nfct_src(const struct sk_buff *skb, 152 152 const struct flow_keys *flow) 153 153 { 154 - switch (tc_skb_protocol(skb)) { 154 + switch (skb_protocol(skb, true)) { 155 155 case htons(ETH_P_IP): 156 156 return ntohl(CTTUPLE(skb, src.u3.ip)); 157 157 case htons(ETH_P_IPV6): ··· 164 164 static u32 flow_get_nfct_dst(const struct sk_buff *skb, 165 165 const struct flow_keys *flow) 166 166 { 167 - switch (tc_skb_protocol(skb)) { 167 + switch (skb_protocol(skb, true)) { 168 168 case htons(ETH_P_IP): 169 169 return ntohl(CTTUPLE(skb, dst.u3.ip)); 170 170 case htons(ETH_P_IPV6):
+1 -1
net/sched/cls_flower.c
··· 313 313 /* skb_flow_dissect() does not set n_proto in case an unknown 314 314 * protocol, so do it rather here. 315 315 */ 316 - skb_key.basic.n_proto = skb->protocol; 316 + skb_key.basic.n_proto = skb_protocol(skb, false); 317 317 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 318 318 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 319 319 fl_ct_info_to_flower_map,
+1 -1
net/sched/em_ipset.c
··· 59 59 }; 60 60 int ret, network_offset; 61 61 62 - switch (tc_skb_protocol(skb)) { 62 + switch (skb_protocol(skb, true)) { 63 63 case htons(ETH_P_IP): 64 64 state.pf = NFPROTO_IPV4; 65 65 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+1 -1
net/sched/em_ipt.c
··· 212 212 struct nf_hook_state state; 213 213 int ret; 214 214 215 - switch (tc_skb_protocol(skb)) { 215 + switch (skb_protocol(skb, true)) { 216 216 case htons(ETH_P_IP): 217 217 if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) 218 218 return 0;
+1 -1
net/sched/em_meta.c
··· 195 195 META_COLLECTOR(int_protocol) 196 196 { 197 197 /* Let userspace take care of the byte ordering */ 198 - dst->value = tc_skb_protocol(skb); 198 + dst->value = skb_protocol(skb, false); 199 199 } 200 200 201 201 META_COLLECTOR(int_pkttype)
+4 -4
net/sched/sch_atm.c
··· 553 553 if (!p->link.q) 554 554 p->link.q = &noop_qdisc; 555 555 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); 556 + p->link.vcc = NULL; 557 + p->link.sock = NULL; 558 + p->link.common.classid = sch->handle; 559 + p->link.ref = 1; 556 560 557 561 err = tcf_block_get(&p->link.block, &p->link.filter_list, sch, 558 562 extack); 559 563 if (err) 560 564 return err; 561 565 562 - p->link.vcc = NULL; 563 - p->link.sock = NULL; 564 - p->link.common.classid = sch->handle; 565 - p->link.ref = 1; 566 566 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); 567 567 return 0; 568 568 }
+2 -2
net/sched/sch_cake.c
··· 592 592 bool rev = !skb->_nfct, upd = false; 593 593 __be32 ip; 594 594 595 - if (tc_skb_protocol(skb) != htons(ETH_P_IP)) 595 + if (skb_protocol(skb, true) != htons(ETH_P_IP)) 596 596 return false; 597 597 598 598 if (!nf_ct_get_tuple_skb(&tuple, skb)) ··· 1557 1557 u16 *buf, buf_; 1558 1558 u8 dscp; 1559 1559 1560 - switch (tc_skb_protocol(skb)) { 1560 + switch (skb_protocol(skb, true)) { 1561 1561 case htons(ETH_P_IP): 1562 1562 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); 1563 1563 if (unlikely(!buf))
+3 -3
net/sched/sch_dsmark.c
··· 210 210 if (p->set_tc_index) { 211 211 int wlen = skb_network_offset(skb); 212 212 213 - switch (tc_skb_protocol(skb)) { 213 + switch (skb_protocol(skb, true)) { 214 214 case htons(ETH_P_IP): 215 215 wlen += sizeof(struct iphdr); 216 216 if (!pskb_may_pull(skb, wlen) || ··· 303 303 index = skb->tc_index & (p->indices - 1); 304 304 pr_debug("index %d->%d\n", skb->tc_index, index); 305 305 306 - switch (tc_skb_protocol(skb)) { 306 + switch (skb_protocol(skb, true)) { 307 307 case htons(ETH_P_IP): 308 308 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask, 309 309 p->mv[index].value); ··· 320 320 */ 321 321 if (p->mv[index].mask != 0xff || p->mv[index].value) 322 322 pr_warn("%s: unsupported protocol %d\n", 323 - __func__, ntohs(tc_skb_protocol(skb))); 323 + __func__, ntohs(skb_protocol(skb, true))); 324 324 break; 325 325 } 326 326
+1 -1
net/sched/sch_teql.c
··· 239 239 char haddr[MAX_ADDR_LEN]; 240 240 241 241 neigh_ha_snapshot(haddr, n, dev); 242 - err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)), 242 + err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)), 243 243 haddr, NULL, skb->len); 244 244 245 245 if (err < 0)
+18 -9
net/sctp/stream.c
··· 22 22 #include <net/sctp/sm.h> 23 23 #include <net/sctp/stream_sched.h> 24 24 25 - /* Migrates chunks from stream queues to new stream queues if needed, 26 - * but not across associations. Also, removes those chunks to streams 27 - * higher than the new max. 28 - */ 29 - static void sctp_stream_outq_migrate(struct sctp_stream *stream, 30 - struct sctp_stream *new, __u16 outcnt) 25 + static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) 31 26 { 32 27 struct sctp_association *asoc; 33 28 struct sctp_chunk *ch, *temp; 34 29 struct sctp_outq *outq; 35 - int i; 36 30 37 31 asoc = container_of(stream, struct sctp_association, stream); 38 32 outq = &asoc->outqueue; ··· 50 56 51 57 sctp_chunk_free(ch); 52 58 } 59 + } 60 + 61 + /* Migrates chunks from stream queues to new stream queues if needed, 62 + * but not across associations. Also, removes those chunks to streams 63 + * higher than the new max. 64 + */ 65 + static void sctp_stream_outq_migrate(struct sctp_stream *stream, 66 + struct sctp_stream *new, __u16 outcnt) 67 + { 68 + int i; 69 + 70 + if (stream->outcnt > outcnt) 71 + sctp_stream_shrink_out(stream, outcnt); 53 72 54 73 if (new) { 55 74 /* Here we actually move the old ext stuff into the new ··· 1044 1037 nums = ntohs(addstrm->number_of_streams); 1045 1038 number = stream->outcnt - nums; 1046 1039 1047 - if (result == SCTP_STRRESET_PERFORMED) 1040 + if (result == SCTP_STRRESET_PERFORMED) { 1048 1041 for (i = number; i < stream->outcnt; i++) 1049 1042 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; 1050 - else 1043 + } else { 1044 + sctp_stream_shrink_out(stream, number); 1051 1045 stream->outcnt = number; 1046 + } 1052 1047 1053 1048 *evp = sctp_ulpevent_make_stream_change_event(asoc, flags, 1054 1049 0, nums, GFP_ATOMIC);
+8 -4
net/smc/af_smc.c
··· 126 126 127 127 static void smc_restore_fallback_changes(struct smc_sock *smc) 128 128 { 129 - smc->clcsock->file->private_data = smc->sk.sk_socket; 130 - smc->clcsock->file = NULL; 129 + if (smc->clcsock->file) { /* non-accepted sockets have no file yet */ 130 + smc->clcsock->file->private_data = smc->sk.sk_socket; 131 + smc->clcsock->file = NULL; 132 + } 131 133 } 132 134 133 135 static int __smc_release(struct smc_sock *smc) ··· 354 352 */ 355 353 mutex_lock(&lgr->llc_conf_mutex); 356 354 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 357 - if (lgr->lnk[i].state != SMC_LNK_ACTIVE) 355 + if (!smc_link_active(&lgr->lnk[i])) 358 356 continue; 359 357 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc); 360 358 if (rc) ··· 634 632 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 635 633 struct smc_link *l = &smc->conn.lgr->lnk[i]; 636 634 637 - if (l->peer_qpn == ntoh24(aclc->qpn)) { 635 + if (l->peer_qpn == ntoh24(aclc->qpn) && 636 + !memcmp(l->peer_gid, &aclc->lcl.gid, SMC_GID_SIZE) && 637 + !memcmp(l->peer_mac, &aclc->lcl.mac, sizeof(l->peer_mac))) { 638 638 link = l; 639 639 break; 640 640 }
+5 -1
net/smc/smc_cdc.c
··· 66 66 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, 67 67 wr_rdma_buf, 68 68 (struct smc_wr_tx_pend_priv **)pend); 69 - if (conn->killed) 69 + if (conn->killed) { 70 70 /* abnormal termination */ 71 + if (!rc) 72 + smc_wr_tx_put_slot(link, 73 + (struct smc_wr_tx_pend_priv *)pend); 71 74 rc = -EPIPE; 75 + } 72 76 return rc; 73 77 } 74 78
+32 -13
net/smc/smc_clc.c
··· 27 27 28 28 #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 29 29 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48 30 + #define SMC_CLC_RECV_BUF_LEN 100 30 31 31 32 /* eye catcher "SMCR" EBCDIC for CLC messages */ 32 33 static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; ··· 37 36 /* check if received message has a correct header length and contains valid 38 37 * heading and trailing eyecatchers 39 38 */ 40 - static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) 39 + static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl) 41 40 { 42 41 struct smc_clc_msg_proposal_prefix *pclc_prfx; 43 42 struct smc_clc_msg_accept_confirm *clc; ··· 50 49 return false; 51 50 switch (clcm->type) { 52 51 case SMC_CLC_PROPOSAL: 53 - if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && 54 - clcm->path != SMC_TYPE_B) 55 - return false; 56 52 pclc = (struct smc_clc_msg_proposal *)clcm; 57 53 pclc_prfx = smc_clc_proposal_get_prefix(pclc); 58 - if (ntohs(pclc->hdr.length) != 54 + if (ntohs(pclc->hdr.length) < 59 55 sizeof(*pclc) + ntohs(pclc->iparea_offset) + 60 56 sizeof(*pclc_prfx) + 61 57 pclc_prfx->ipv6_prefixes_cnt * ··· 84 86 default: 85 87 return false; 86 88 } 87 - if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && 89 + if (check_trl && 90 + memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && 88 91 memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) 89 92 return false; 90 93 return true; ··· 275 276 struct msghdr msg = {NULL, 0}; 276 277 int reason_code = 0; 277 278 struct kvec vec = {buf, buflen}; 278 - int len, datlen; 279 + int len, datlen, recvlen; 280 + bool check_trl = true; 279 281 int krflags; 280 282 281 283 /* peek the first few bytes to determine length of data to receive ··· 320 320 } 321 321 datlen = ntohs(clcm->length); 322 322 if ((len < sizeof(struct smc_clc_msg_hdr)) || 323 - (datlen > buflen) || 324 - (clcm->version != SMC_CLC_V1) || 325 - (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && 326 - clcm->path != SMC_TYPE_B) || 323 + (clcm->version < SMC_CLC_V1) || 327 324 ((clcm->type != SMC_CLC_DECLINE) && 328 325 (clcm->type != expected_type))) { 329 326 smc->sk.sk_err = EPROTO; ··· 328 331 goto out; 329 332 } 330 333 334 + if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N) 335 + reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */ 336 + 331 337 /* receive the complete CLC message */ 332 338 memset(&msg, 0, sizeof(struct msghdr)); 333 - iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen); 339 + if (datlen > buflen) { 340 + check_trl = false; 341 + recvlen = buflen; 342 + } else { 343 + recvlen = datlen; 344 + } 345 + iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); 334 346 krflags = MSG_WAITALL; 335 347 len = sock_recvmsg(smc->clcsock, &msg, krflags); 336 - if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { 348 + if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) { 337 349 smc->sk.sk_err = EPROTO; 338 350 reason_code = -EPROTO; 339 351 goto out; 352 + } 353 + datlen -= len; 354 + while (datlen) { 355 + u8 tmp[SMC_CLC_RECV_BUF_LEN]; 356 + 357 + vec.iov_base = &tmp; 358 + vec.iov_len = SMC_CLC_RECV_BUF_LEN; 359 + /* receive remaining proposal message */ 360 + recvlen = datlen > SMC_CLC_RECV_BUF_LEN ? 361 + SMC_CLC_RECV_BUF_LEN : datlen; 362 + iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); 363 + len = sock_recvmsg(smc->clcsock, &msg, krflags); 364 + datlen -= len; 340 365 } 341 366 if (clcm->type == SMC_CLC_DECLINE) { 342 367 struct smc_clc_msg_decline *dclc;
+2
net/smc/smc_clc.h
··· 25 25 #define SMC_CLC_V1 0x1 /* SMC version */ 26 26 #define SMC_TYPE_R 0 /* SMC-R only */ 27 27 #define SMC_TYPE_D 1 /* SMC-D only */ 28 + #define SMC_TYPE_N 2 /* neither SMC-R nor SMC-D */ 28 29 #define SMC_TYPE_B 3 /* SMC-R and SMC-D */ 29 30 #define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */ 30 31 #define CLC_WAIT_TIME_SHORT HZ /* short wait time on clcsock */ ··· 47 46 #define SMC_CLC_DECL_ISMVLANERR 0x03090000 /* err to reg vlan id on ism dev */ 48 47 #define SMC_CLC_DECL_NOACTLINK 0x030a0000 /* no active smc-r link in lgr */ 49 48 #define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */ 49 + #define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */ 50 50 #define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */ 51 51 #define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */ 52 52 #define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */
+41 -95
net/smc/smc_core.c
··· 15 15 #include <linux/workqueue.h> 16 16 #include <linux/wait.h> 17 17 #include <linux/reboot.h> 18 + #include <linux/mutex.h> 18 19 #include <net/tcp.h> 19 20 #include <net/sock.h> 20 21 #include <rdma/ib_verbs.h> ··· 45 44 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */ 46 45 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted); 47 46 48 - struct smc_ib_up_work { 49 - struct work_struct work; 50 - struct smc_link_group *lgr; 51 - struct smc_ib_device *smcibdev; 52 - u8 ibport; 53 - }; 54 - 55 47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, 56 48 struct smc_buf_desc *buf_desc); 57 49 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft); 58 50 59 - static void smc_link_up_work(struct work_struct *work); 60 51 static void smc_link_down_work(struct work_struct *work); 61 52 62 53 /* return head of link group list and its lock for a given link group */ ··· 240 247 if (smc_link_usable(lnk)) 241 248 lnk->state = SMC_LNK_INACTIVE; 242 249 } 243 - wake_up_interruptible_all(&lgr->llc_waiter); 250 + wake_up_all(&lgr->llc_msg_waiter); 251 + wake_up_all(&lgr->llc_flow_waiter); 244 252 } 245 253 246 254 static void smc_lgr_free(struct smc_link_group *lgr); ··· 318 324 319 325 get_device(&ini->ib_dev->ibdev->dev); 320 326 atomic_inc(&ini->ib_dev->lnk_cnt); 321 - lnk->state = SMC_LNK_ACTIVATING; 322 327 lnk->link_id = smcr_next_link_id(lgr); 323 328 lnk->lgr = lgr; 324 329 lnk->link_idx = link_idx; ··· 353 360 rc = smc_wr_create_link(lnk); 354 361 if (rc) 355 362 goto destroy_qp; 363 + lnk->state = SMC_LNK_ACTIVATING; 356 364 return 0; 357 365 358 366 destroy_qp: ··· 444 450 } 445 451 smc->conn.lgr = lgr; 446 452 spin_lock_bh(lgr_lock); 447 - list_add(&lgr->list, lgr_list); 453 + list_add_tail(&lgr->list, lgr_list); 448 454 spin_unlock_bh(lgr_lock); 449 455 return 0; 450 456 ··· 542 548 smc_wr_wakeup_tx_wait(from_lnk); 543 549 544 550 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 545 - if (lgr->lnk[i].state != SMC_LNK_ACTIVE || 546 - i == from_lnk->link_idx) 551 + if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx) 547 552 continue; 548 553 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev && 549 554 from_lnk->ibport == lgr->lnk[i].ibport) { ··· 1097 1104 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */ 1098 1105 } 1099 1106 1100 - /* link is up - establish alternate link if applicable */ 1101 - static void smcr_link_up(struct smc_link_group *lgr, 1102 - struct smc_ib_device *smcibdev, u8 ibport) 1103 - { 1104 - struct smc_link *link = NULL; 1105 - 1106 - if (list_empty(&lgr->list) || 1107 - lgr->type == SMC_LGR_SYMMETRIC || 1108 - lgr->type == SMC_LGR_ASYMMETRIC_PEER) 1109 - return; 1110 - 1111 - if (lgr->role == SMC_SERV) { 1112 - /* trigger local add link processing */ 1113 - link = smc_llc_usable_link(lgr); 1114 - if (!link) 1115 - return; 1116 - smc_llc_srv_add_link_local(link); 1117 - } else { 1118 - /* invite server to start add link processing */ 1119 - u8 gid[SMC_GID_SIZE]; 1120 - 1121 - if (smc_ib_determine_gid(smcibdev, ibport, lgr->vlan_id, gid, 1122 - NULL)) 1123 - return; 1124 - if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { 1125 - /* some other llc task is ongoing */ 1126 - wait_event_interruptible_timeout(lgr->llc_waiter, 1127 - (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), 1128 - SMC_LLC_WAIT_TIME); 1129 - } 1130 - if (list_empty(&lgr->list) || 1131 - !smc_ib_port_active(smcibdev, ibport)) 1132 - return; /* lgr or device no longer active */ 1133 - link = smc_llc_usable_link(lgr); 1134 - if (!link) 1135 - return; 1136 - smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid, 1137 - NULL, SMC_LLC_REQ); 1138 - } 1139 - } 1140 - 1141 1107 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) 1142 1108 { 1143 - struct smc_ib_up_work *ib_work; 1144 1109 struct smc_link_group *lgr, *n; 1145 1110 1146 1111 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { 1112 + struct smc_link *link; 1113 + 1147 1114 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, 1148 1115 SMC_MAX_PNETID_LEN) || 1149 1116 lgr->type == SMC_LGR_SYMMETRIC || 1150 1117 lgr->type == SMC_LGR_ASYMMETRIC_PEER) 1151 1118 continue; 1152 - ib_work = kmalloc(sizeof(*ib_work), GFP_KERNEL); 1153 - if (!ib_work) 1154 - continue; 1155 - INIT_WORK(&ib_work->work, smc_link_up_work); 1156 - ib_work->lgr = lgr; 1157 - ib_work->smcibdev = smcibdev; 1158 - ib_work->ibport = ibport; 1159 - schedule_work(&ib_work->work); 1119 + 1120 + /* trigger local add link processing */ 1121 + link = smc_llc_usable_link(lgr); 1122 + if (link) 1123 + smc_llc_add_link_local(link); 1160 1124 } 1161 1125 } 1162 1126 ··· 1145 1195 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { 1146 1196 /* another llc task is ongoing */ 1147 1197 mutex_unlock(&lgr->llc_conf_mutex); 1148 - wait_event_interruptible_timeout(lgr->llc_waiter, 1149 - (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), 1198 + wait_event_timeout(lgr->llc_flow_waiter, 1199 + (list_empty(&lgr->list) || 1200 + lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), 1150 1201 SMC_LLC_WAIT_TIME); 1151 1202 mutex_lock(&lgr->llc_conf_mutex); 1152 1203 } 1153 - smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true, 1154 - SMC_LLC_DEL_LOST_PATH); 1204 + if (!list_empty(&lgr->list)) { 1205 + smc_llc_send_delete_link(to_lnk, del_link_id, 1206 + SMC_LLC_REQ, true, 1207 + SMC_LLC_DEL_LOST_PATH); 1208 + smcr_link_clear(lnk, true); 1209 + } 1210 + wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */ 1155 1211 } 1156 1212 } 1157 1213 ··· 1196 1240 } 1197 1241 } 1198 1242 1199 - static void smc_link_up_work(struct work_struct *work) 1200 - { 1201 - struct smc_ib_up_work *ib_work = container_of(work, 1202 - struct smc_ib_up_work, 1203 - work); 1204 - struct smc_link_group *lgr = ib_work->lgr; 1205 - 1206 - if (list_empty(&lgr->list)) 1207 - goto out; 1208 - smcr_link_up(lgr, ib_work->smcibdev, ib_work->ibport); 1209 - out: 1210 - kfree(ib_work); 1211 - } 1212 - 1213 1243 static void smc_link_down_work(struct work_struct *work) 1214 1244 { 1215 1245 struct smc_link *link = container_of(work, struct smc_link, ··· 1204 1262 1205 1263 if (list_empty(&lgr->list)) 1206 1264 return; 1207 - wake_up_interruptible_all(&lgr->llc_waiter); 1265 + wake_up_all(&lgr->llc_msg_waiter); 1208 1266 mutex_lock(&lgr->llc_conf_mutex); 1209 1267 smcr_link_down(link); 1210 1268 mutex_unlock(&lgr->llc_conf_mutex); ··· 1268 1326 return false; 1269 1327 1270 1328 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1271 - if (lgr->lnk[i].state != SMC_LNK_ACTIVE) 1329 + if (!smc_link_active(&lgr->lnk[i])) 1272 1330 continue; 1273 1331 if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) && 1274 1332 !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) && ··· 1311 1369 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) && 1312 1370 !lgr->sync_err && 1313 1371 lgr->vlan_id == ini->vlan_id && 1314 - (role == SMC_CLNT || 1372 + (role == SMC_CLNT || ini->is_smcd || 1315 1373 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) { 1316 1374 /* link group found */ 1317 1375 ini->cln_first_contact = SMC_REUSE_CONTACT; ··· 1716 1774 1717 1775 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn) 1718 1776 { 1719 - if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk)) 1777 + if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) 1720 1778 return; 1721 1779 smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); 1722 1780 } 1723 1781 1724 1782 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) 1725 1783 { 1726 - if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk)) 1784 + if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) 1727 1785 return; 1728 1786 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); 1729 1787 } ··· 1735 1793 if (!conn->lgr || conn->lgr->is_smcd) 1736 1794 return; 1737 1795 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1738 - if (!smc_link_usable(&conn->lgr->lnk[i])) 1796 + if (!smc_link_active(&conn->lgr->lnk[i])) 1739 1797 continue; 1740 1798 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc, 1741 1799 DMA_FROM_DEVICE); ··· 1749 1807 if (!conn->lgr || conn->lgr->is_smcd) 1750 1808 return; 1751 1809 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1752 - if (!smc_link_usable(&conn->lgr->lnk[i])) 1810 + if (!smc_link_active(&conn->lgr->lnk[i])) 1753 1811 continue; 1754 1812 smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc, 1755 1813 DMA_FROM_DEVICE); ··· 1772 1830 return rc; 1773 1831 /* create rmb */ 1774 1832 rc = __smc_buf_create(smc, is_smcd, true); 1775 - if (rc) 1833 + if (rc) { 1834 + mutex_lock(&smc->conn.lgr->sndbufs_lock); 1835 + list_del(&smc->conn.sndbuf_desc->list); 1836 + mutex_unlock(&smc->conn.lgr->sndbufs_lock); 1776 1837 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); 1838 + } 1777 1839 return rc; 1778 1840 } 1779 1841 ··· 1901 1955 struct smc_ib_device *smcibdev; 1902 1956 struct smcd_dev *smcd; 1903 1957 1904 - spin_lock(&smc_ib_devices.lock); 1958 + mutex_lock(&smc_ib_devices.mutex); 1905 1959 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) { 1906 1960 int i; 1907 1961 1908 1962 for (i = 0; i < SMC_MAX_PORTS; i++) 1909 1963 set_bit(i, smcibdev->ports_going_away); 1910 1964 } 1911 - spin_unlock(&smc_ib_devices.lock); 1965 + mutex_unlock(&smc_ib_devices.mutex); 1912 1966 1913 - spin_lock(&smcd_dev_list.lock); 1967 + mutex_lock(&smcd_dev_list.mutex); 1914 1968 list_for_each_entry(smcd, &smcd_dev_list.list, list) { 1915 1969 smcd->going_away = 1; 1916 1970 } 1917 - spin_unlock(&smcd_dev_list.lock); 1971 + mutex_unlock(&smcd_dev_list.mutex); 1918 1972 } 1919 1973 1920 1974 /* Clean up all SMC link groups */ ··· 1926 1980 1927 1981 smc_smcr_terminate_all(NULL); 1928 1982 1929 - spin_lock(&smcd_dev_list.lock); 1983 + mutex_lock(&smcd_dev_list.mutex); 1930 1984 list_for_each_entry(smcd, &smcd_dev_list.list, list) 1931 1985 smc_smcd_terminate_all(smcd); 1932 - spin_unlock(&smcd_dev_list.lock); 1986 + mutex_unlock(&smcd_dev_list.mutex); 1933 1987 } 1934 1988 1935 1989 static int smc_core_reboot_event(struct notifier_block *this,
+8 -1
net/smc/smc_core.h
··· 262 262 struct work_struct llc_del_link_work; 263 263 struct work_struct llc_event_work; 264 264 /* llc event worker */ 265 - wait_queue_head_t llc_waiter; 265 + wait_queue_head_t llc_flow_waiter; 266 266 /* w4 next llc event */ 267 + wait_queue_head_t llc_msg_waiter; 268 + /* w4 next llc msg */ 267 269 struct smc_llc_flow llc_flow_lcl; 268 270 /* llc local control field */ 269 271 struct smc_llc_flow llc_flow_rmt; ··· 347 345 if (lnk->state == SMC_LNK_UNUSED || lnk->state == SMC_LNK_INACTIVE) 348 346 return false; 349 347 return true; 348 + } 349 + 350 + static inline bool smc_link_active(struct smc_link *lnk) 351 + { 352 + return lnk->state == SMC_LNK_ACTIVE; 350 353 } 351 354 352 355 struct smc_sock;
+19 -8
net/smc/smc_ib.c
··· 16 16 #include <linux/workqueue.h> 17 17 #include <linux/scatterlist.h> 18 18 #include <linux/wait.h> 19 + #include <linux/mutex.h> 19 20 #include <rdma/ib_verbs.h> 20 21 #include <rdma/ib_cache.h> 21 22 ··· 34 33 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */ 35 34 36 35 struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ 37 - .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock), 36 + .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex), 38 37 .list = LIST_HEAD_INIT(smc_ib_devices.list), 39 38 }; 40 39 ··· 506 505 int cqe_size_order, smc_order; 507 506 long rc; 508 507 508 + mutex_lock(&smcibdev->mutex); 509 + rc = 0; 510 + if (smcibdev->initialized) 511 + goto out; 509 512 /* the calculated number of cq entries fits to mlx5 cq allocation */ 510 513 cqe_size_order = cache_line_size() == 128 ? 7 : 6; 511 514 smc_order = MAX_ORDER - cqe_size_order - 1; ··· 521 516 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); 522 517 if (IS_ERR(smcibdev->roce_cq_send)) { 523 518 smcibdev->roce_cq_send = NULL; 524 - return rc; 519 + goto out; 525 520 } 526 521 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, 527 522 smc_wr_rx_cq_handler, NULL, ··· 533 528 } 534 529 smc_wr_add_dev(smcibdev); 535 530 smcibdev->initialized = 1; 536 - return rc; 531 + goto out; 537 532 538 533 err: 539 534 ib_destroy_cq(smcibdev->roce_cq_send); 535 + out: 536 + mutex_unlock(&smcibdev->mutex); 540 537 return rc; 541 538 } 542 539 543 540 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) 544 541 { 542 + mutex_lock(&smcibdev->mutex); 545 543 if (!smcibdev->initialized) 546 - return; 544 + goto out; 547 545 smcibdev->initialized = 0; 548 546 ib_destroy_cq(smcibdev->roce_cq_recv); 549 547 ib_destroy_cq(smcibdev->roce_cq_send); 550 548 smc_wr_remove_dev(smcibdev); 549 + out: 550 + mutex_unlock(&smcibdev->mutex); 551 551 } 552 552 553 553 static struct ib_client smc_ib_client; ··· 575 565 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); 576 566 atomic_set(&smcibdev->lnk_cnt, 0); 577 567 init_waitqueue_head(&smcibdev->lnks_deleted); 578 - spin_lock(&smc_ib_devices.lock); 568 + mutex_init(&smcibdev->mutex); 569 + mutex_lock(&smc_ib_devices.mutex); 579 570 list_add_tail(&smcibdev->list, &smc_ib_devices.list); 580 - spin_unlock(&smc_ib_devices.lock); 571 + mutex_unlock(&smc_ib_devices.mutex); 581 572 ib_set_client_data(ibdev, &smc_ib_client, smcibdev); 582 573 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, 583 574 smc_ib_global_event_handler); ··· 613 602 { 614 603 struct smc_ib_device *smcibdev = client_data; 615 604 616 - spin_lock(&smc_ib_devices.lock); 605 + mutex_lock(&smc_ib_devices.mutex); 617 606 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ 618 - spin_unlock(&smc_ib_devices.lock); 607 + mutex_unlock(&smc_ib_devices.mutex); 619 608 pr_warn_ratelimited("smc: removing ib device %s\n", 620 609 smcibdev->ibdev->name); 621 610 smc_smcr_terminate_all(smcibdev);
+3 -1
net/smc/smc_ib.h
··· 14 14 15 15 #include <linux/interrupt.h> 16 16 #include <linux/if_ether.h> 17 + #include <linux/mutex.h> 17 18 #include <linux/wait.h> 18 19 #include <rdma/ib_verbs.h> 19 20 #include <net/smc.h> ··· 26 25 27 26 struct smc_ib_devices { /* list of smc ib devices definition */ 28 27 struct list_head list; 29 - spinlock_t lock; /* protects list of smc ib devices */ 28 + struct mutex mutex; /* protects list of smc ib devices */ 30 29 }; 31 30 32 31 extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */ ··· 52 51 DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS); 53 52 atomic_t lnk_cnt; /* number of links on ibdev */ 54 53 wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/ 54 + struct mutex mutex; /* protect dev setup+cleanup */ 55 55 }; 56 56 57 57 struct smc_buf_desc;
+6 -5
net/smc/smc_ism.c
··· 7 7 */ 8 8 9 9 #include <linux/spinlock.h> 10 + #include <linux/mutex.h> 10 11 #include <linux/slab.h> 11 12 #include <asm/page.h> 12 13 ··· 18 17 19 18 struct smcd_dev_list smcd_dev_list = { 20 19 .list = LIST_HEAD_INIT(smcd_dev_list.list), 21 - .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock) 20 + .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex) 22 21 }; 23 22 24 23 /* Test if an ISM communication is possible. */ ··· 318 317 319 318 int smcd_register_dev(struct smcd_dev *smcd) 320 319 { 321 - spin_lock(&smcd_dev_list.lock); 320 + mutex_lock(&smcd_dev_list.mutex); 322 321 list_add_tail(&smcd->list, &smcd_dev_list.list); 323 - spin_unlock(&smcd_dev_list.lock); 322 + mutex_unlock(&smcd_dev_list.mutex); 324 323 325 324 pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n", 326 325 dev_name(&smcd->dev), smcd->pnetid, ··· 334 333 { 335 334 pr_warn_ratelimited("smc: removing smcd device %s\n", 336 335 dev_name(&smcd->dev)); 337 - spin_lock(&smcd_dev_list.lock); 336 + mutex_lock(&smcd_dev_list.mutex); 338 337 list_del_init(&smcd->list); 339 - spin_unlock(&smcd_dev_list.lock); 338 + mutex_unlock(&smcd_dev_list.mutex); 340 339 smcd->going_away = 1; 341 340 smc_smcd_terminate_all(smcd); 342 341 flush_workqueue(smcd->event_wq);
+2 -1
net/smc/smc_ism.h
··· 10 10 #define SMCD_ISM_H 11 11 12 12 #include <linux/uio.h> 13 + #include <linux/mutex.h> 13 14 14 15 #include "smc.h" 15 16 16 17 struct smcd_dev_list { /* List of SMCD devices */ 17 18 struct list_head list; 18 - spinlock_t lock; /* Protects list of devices */ 19 + struct mutex mutex; /* Protects list of devices */ 19 20 }; 20 21 21 22 extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */
+138 -74
net/smc/smc_llc.c
··· 186 186 flow->qentry = qentry; 187 187 } 188 188 189 + static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, 190 + struct smc_llc_qentry *qentry) 191 + { 192 + u8 msg_type = qentry->msg.raw.hdr.common.type; 193 + 194 + if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) && 195 + flow_type != msg_type && !lgr->delayed_event) { 196 + lgr->delayed_event = qentry; 197 + return; 198 + } 199 + /* drop parallel or already-in-progress llc requests */ 200 + if (flow_type != msg_type) 201 + pr_warn_once("smc: SMC-R lg %*phN dropped parallel " 202 + "LLC msg: msg %d flow %d role %d\n", 203 + SMC_LGR_ID_SIZE, &lgr->id, 204 + qentry->msg.raw.hdr.common.type, 205 + flow_type, lgr->role); 206 + kfree(qentry); 207 + } 208 + 189 209 /* try to start a new llc flow, initiated by an incoming llc msg */ 190 210 static bool smc_llc_flow_start(struct smc_llc_flow *flow, 191 211 struct smc_llc_qentry *qentry) ··· 215 195 spin_lock_bh(&lgr->llc_flow_lock); 216 196 if (flow->type) { 217 197 /* a flow is already active */ 218 - if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK || 219 - qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) && 220 - !lgr->delayed_event) { 221 - lgr->delayed_event = qentry; 222 - } else { 223 - /* forget this llc request */ 224 - kfree(qentry); 225 - } 198 + smc_llc_flow_parallel(lgr, flow->type, qentry); 226 199 spin_unlock_bh(&lgr->llc_flow_lock); 227 200 return false; 228 201 } ··· 235 222 } 236 223 if (qentry == lgr->delayed_event) 237 224 lgr->delayed_event = NULL; 238 - spin_unlock_bh(&lgr->llc_flow_lock); 239 225 smc_llc_flow_qentry_set(flow, qentry); 226 + spin_unlock_bh(&lgr->llc_flow_lock); 240 227 return true; 241 228 } 242 229 ··· 264 251 return 0; 265 252 } 266 253 spin_unlock_bh(&lgr->llc_flow_lock); 267 - rc = wait_event_interruptible_timeout(lgr->llc_waiter, 268 - (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && 269 - (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || 270 - lgr->llc_flow_rmt.type == allowed_remote)), 271 - SMC_LLC_WAIT_TIME); 254 + rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) || 255 + (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && 256 + (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || 257 + lgr->llc_flow_rmt.type == allowed_remote))), 258 + SMC_LLC_WAIT_TIME * 10); 272 259 if (!rc) 273 260 return -ETIMEDOUT; 274 261 goto again; ··· 285 272 flow == &lgr->llc_flow_lcl) 286 273 schedule_work(&lgr->llc_event_work); 287 274 else 288 - wake_up_interruptible(&lgr->llc_waiter); 275 + wake_up(&lgr->llc_flow_waiter); 289 276 } 290 277 291 278 /* lnk is optional and used for early wakeup when link goes down, useful in ··· 296 283 int time_out, u8 exp_msg) 297 284 { 298 285 struct smc_llc_flow *flow = &lgr->llc_flow_lcl; 286 + u8 rcv_msg; 299 287 300 - wait_event_interruptible_timeout(lgr->llc_waiter, 301 - (flow->qentry || 302 - (lnk && !smc_link_usable(lnk)) || 303 - list_empty(&lgr->list)), 304 - time_out); 288 + wait_event_timeout(lgr->llc_msg_waiter, 289 + (flow->qentry || 290 + (lnk && !smc_link_usable(lnk)) || 291 + list_empty(&lgr->list)), 292 + time_out); 305 293 if (!flow->qentry || 306 294 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) { 307 295 smc_llc_flow_qentry_del(flow); 308 296 goto out; 309 297 } 310 - if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) { 298 + rcv_msg = flow->qentry->msg.raw.hdr.common.type; 299 + if (exp_msg && rcv_msg != exp_msg) { 311 300 if (exp_msg == SMC_LLC_ADD_LINK && 312 - flow->qentry->msg.raw.hdr.common.type == 313 - SMC_LLC_DELETE_LINK) { 301 + rcv_msg == SMC_LLC_DELETE_LINK) { 314 302 /* flow_start will delay the unexpected msg */ 315 303 smc_llc_flow_start(&lgr->llc_flow_lcl, 316 304 smc_llc_flow_qentry_clr(flow)); 317 305 return NULL; 318 306 } 307 + pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: " 308 + "msg %d exp %d flow %d role %d flags %x\n", 309 + SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg, 310 + flow->type, lgr->role, 311 + flow->qentry->msg.raw.hdr.flags); 319 312 smc_llc_flow_qentry_del(flow); 320 313 } 321 314 out: ··· 428 409 rtok_ix = 1; 429 410 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 430 411 link = &send_link->lgr->lnk[i]; 431 - if (link->state == SMC_LNK_ACTIVE && link != send_link) { 412 + if (smc_link_active(link) && link != send_link) { 432 413 rkeyllc->rtoken[rtok_ix].link_id = link->link_id; 433 414 rkeyllc->rtoken[rtok_ix].rmb_key = 434 415 htonl(rmb_desc->mr_rx[link->link_idx]->rkey); ··· 895 876 return rc; 896 877 } 897 878 879 + /* as an SMC client, invite server to start the add_link processing */ 880 + static void smc_llc_cli_add_link_invite(struct smc_link *link, 881 + struct smc_llc_qentry *qentry) 882 + { 883 + struct smc_link_group *lgr = smc_get_lgr(link); 884 + struct smc_init_info ini; 885 + 886 + if (lgr->type == SMC_LGR_SYMMETRIC || 887 + lgr->type == SMC_LGR_ASYMMETRIC_PEER) 888 + goto out; 889 + 890 + ini.vlan_id = lgr->vlan_id; 891 + smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); 892 + if (!ini.ib_dev) 893 + goto out; 894 + 895 + smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1], 896 + ini.ib_gid, NULL, SMC_LLC_REQ); 897 + out: 898 + kfree(qentry); 899 + } 900 + 901 + static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) 902 + { 903 + if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && 904 + !llc->add_link.qp_mtu && !llc->add_link.link_num) 905 + return true; 906 + return false; 907 + } 908 + 898 909 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) 899 910 { 900 911 struct smc_llc_qentry *qentry; ··· 932 883 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 933 884 934 885 mutex_lock(&lgr->llc_conf_mutex); 935 - smc_llc_cli_add_link(qentry->link, qentry); 886 + if (smc_llc_is_local_add_link(&qentry->msg)) 887 + smc_llc_cli_add_link_invite(qentry->link, qentry); 888 + else 889 + smc_llc_cli_add_link(qentry->link, qentry); 936 890 mutex_unlock(&lgr->llc_conf_mutex); 937 891 } 938 892 ··· 944 892 int i, link_count = 0; 945 893 946 894 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 947 - if (!smc_link_usable(&lgr->lnk[i])) 895 + if (!smc_link_active(&lgr->lnk[i])) 948 896 continue; 949 897 link_count++; 950 898 } ··· 1084 1032 if (rc) 1085 1033 return -ENOLINK; 1086 1034 /* receive CONFIRM LINK response over the RoCE fabric */ 1087 - qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 1088 - SMC_LLC_CONFIRM_LINK); 1089 - if (!qentry) { 1035 + qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); 1036 + if (!qentry || 1037 + qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { 1090 1038 /* send DELETE LINK */ 1091 1039 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 1092 1040 false, SMC_LLC_DEL_LOST_PATH); 1041 + if (qentry) 1042 + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1093 1043 return -ENOLINK; 1094 1044 } 1095 1045 smc_llc_save_peer_uid(qentry); ··· 1193 1139 mutex_unlock(&lgr->llc_conf_mutex); 1194 1140 } 1195 1141 1196 - /* enqueue a local add_link req to trigger a new add_link flow, only as SERV */ 1197 - void smc_llc_srv_add_link_local(struct smc_link *link) 1142 + /* enqueue a local add_link req to trigger a new add_link flow */ 1143 + void smc_llc_add_link_local(struct smc_link *link) 1198 1144 { 1199 1145 struct smc_llc_msg_add_link add_llc = {0}; 1200 1146 1201 1147 add_llc.hd.length = sizeof(add_llc); 1202 1148 add_llc.hd.common.type = SMC_LLC_ADD_LINK; 1203 - /* no dev and port needed, we as server ignore client data anyway */ 1149 + /* no dev and port needed */ 1204 1150 smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); 1205 1151 } 1206 1152 ··· 1276 1222 smc_llc_send_message(lnk, &qentry->msg); /* response */ 1277 1223 1278 1224 if (smc_link_downing(&lnk_del->state)) { 1279 - smc_switch_conns(lgr, lnk_del, false); 1280 - smc_wr_tx_wait_no_pending_sends(lnk_del); 1225 + if (smc_switch_conns(lgr, lnk_del, false)) 1226 + smc_wr_tx_wait_no_pending_sends(lnk_del); 1281 1227 } 1282 1228 smcr_link_clear(lnk_del, true); 1283 1229 ··· 1351 1297 goto out; /* asymmetric link already deleted */ 1352 1298 1353 1299 if (smc_link_downing(&lnk_del->state)) { 1354 - smc_switch_conns(lgr, lnk_del, false); 1355 - smc_wr_tx_wait_no_pending_sends(lnk_del); 1300 + if (smc_switch_conns(lgr, lnk_del, false)) 1301 + smc_wr_tx_wait_no_pending_sends(lnk_del); 1356 1302 } 1357 1303 if (!list_empty(&lgr->list)) { 1358 1304 /* qentry is either a request from peer (send it back to ··· 1380 1326 1381 1327 if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { 1382 1328 /* trigger setup of asymm alt link */ 1383 - smc_llc_srv_add_link_local(lnk); 1329 + smc_llc_add_link_local(lnk); 1384 1330 } 1385 1331 out: 1386 1332 mutex_unlock(&lgr->llc_conf_mutex); ··· 1509 1455 if (list_empty(&lgr->list)) 1510 1456 goto out; /* lgr is terminating */ 1511 1457 if (lgr->role == SMC_CLNT) { 1512 - if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) { 1458 + if (smc_llc_is_local_add_link(llc)) { 1459 + if (lgr->llc_flow_lcl.type == 1460 + SMC_LLC_FLOW_ADD_LINK) 1461 + break; /* add_link in progress */ 1462 + if (smc_llc_flow_start(&lgr->llc_flow_lcl, 1463 + qentry)) { 1464 + schedule_work(&lgr->llc_add_link_work); 1465 + } 1466 + return; 1467 + } 1468 + if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && 1469 + !lgr->llc_flow_lcl.qentry) { 1513 1470 /* a flow is waiting for this message */ 1514 1471 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, 1515 1472 qentry); 1516 - wake_up_interruptible(&lgr->llc_waiter); 1473 + wake_up(&lgr->llc_msg_waiter); 1517 1474 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, 1518 1475 qentry)) { 1519 1476 schedule_work(&lgr->llc_add_link_work); ··· 1539 1474 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { 1540 1475 /* a flow is waiting for this message */ 1541 1476 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); 1542 - wake_up_interruptible(&lgr->llc_waiter); 1477 + wake_up(&lgr->llc_msg_waiter); 1543 1478 return; 1544 1479 } 1545 1480 break; 1546 1481 case SMC_LLC_DELETE_LINK: 1547 - if (lgr->role == SMC_CLNT) { 1548 - /* server requests to delete this link, send response */ 1549 - if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { 1550 - /* DEL LINK REQ during ADD LINK SEQ */ 1551 - smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, 1552 - qentry); 1553 - wake_up_interruptible(&lgr->llc_waiter); 1554 - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, 1555 - qentry)) { 1556 - schedule_work(&lgr->llc_del_link_work); 1557 - } 1558 - } else { 1559 - if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && 1560 - !lgr->llc_flow_lcl.qentry) { 1561 - /* DEL LINK REQ during ADD LINK SEQ */ 1562 - smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, 1563 - qentry); 1564 - wake_up_interruptible(&lgr->llc_waiter); 1565 - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, 1566 - qentry)) { 1567 - schedule_work(&lgr->llc_del_link_work); 1568 - } 1482 + if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && 1483 + !lgr->llc_flow_lcl.qentry) { 1484 + /* DEL LINK REQ during ADD LINK SEQ */ 1485 + smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); 1486 + wake_up(&lgr->llc_msg_waiter); 1487 + } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { 1488 + schedule_work(&lgr->llc_del_link_work); 1569 1489 } 1570 1490 return; 1571 1491 case SMC_LLC_CONFIRM_RKEY: ··· 1616 1566 static void smc_llc_rx_response(struct smc_link *link, 1617 1567 struct smc_llc_qentry *qentry) 1618 1568 { 1569 + enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; 1570 + struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; 1619 1571 u8 llc_type = qentry->msg.raw.hdr.common.type; 1620 1572 1621 1573 switch (llc_type) { 1622 1574 case SMC_LLC_TEST_LINK: 1623 - if (link->state == SMC_LNK_ACTIVE) 1575 + if (smc_link_active(link)) 1624 1576 complete(&link->llc_testlink_resp); 1625 1577 break; 1626 1578 case SMC_LLC_ADD_LINK: 1627 - case SMC_LLC_DELETE_LINK: 1628 - case SMC_LLC_CONFIRM_LINK: 1629 1579 case SMC_LLC_ADD_LINK_CONT: 1580 + case SMC_LLC_CONFIRM_LINK: 1581 + if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry) 1582 + break; /* drop out-of-flow response */ 1583 + goto assign; 1584 + case SMC_LLC_DELETE_LINK: 1585 + if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry) 1586 + break; /* drop out-of-flow response */ 1587 + goto assign; 1630 1588 case SMC_LLC_CONFIRM_RKEY: 1631 1589 case SMC_LLC_DELETE_RKEY: 1632 - /* assign responses to the local flow, we requested them */ 1633 - smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); 1634 - wake_up_interruptible(&link->lgr->llc_waiter); 1635 - return; 1590 + if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry) 1591 + break; /* drop out-of-flow response */ 1592 + goto assign; 1636 1593 case SMC_LLC_CONFIRM_RKEY_CONT: 1637 1594 /* not used because max links is 3 */ 1638 1595 break; ··· 1648 1591 break; 1649 1592 } 1650 1593 kfree(qentry); 1594 + return; 1595 + assign: 1596 + /* assign responses to the local flow, we requested them */ 1597 + smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); 1598 + wake_up(&link->lgr->llc_msg_waiter); 1651 1599 } 1652 1600 1653 1601 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) ··· 1678 1616 spin_lock_irqsave(&lgr->llc_event_q_lock, flags); 1679 1617 list_add_tail(&qentry->list, &lgr->llc_event_q); 1680 1618 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags); 1681 - schedule_work(&link->lgr->llc_event_work); 1619 + schedule_work(&lgr->llc_event_work); 1682 1620 } 1683 1621 1684 1622 /* copy received msg and add it to the event queue */ ··· 1706 1644 u8 user_data[16] = { 0 }; 1707 1645 int rc; 1708 1646 1709 - if (link->state != SMC_LNK_ACTIVE) 1647 + if (!smc_link_active(link)) 1710 1648 return; /* don't reschedule worker */ 1711 1649 expire_time = link->wr_rx_tstamp + link->llc_testlink_time; 1712 1650 if (time_is_after_jiffies(expire_time)) { ··· 1718 1656 /* receive TEST LINK response over RoCE fabric */ 1719 1657 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, 1720 1658 SMC_LLC_WAIT_TIME); 1721 - if (link->state != SMC_LNK_ACTIVE) 1659 + if (!smc_link_active(link)) 1722 1660 return; /* link state changed */ 1723 1661 if (rc <= 0) { 1724 1662 smcr_link_down_cond_sched(link); ··· 1739 1677 INIT_LIST_HEAD(&lgr->llc_event_q); 1740 1678 spin_lock_init(&lgr->llc_event_q_lock); 1741 1679 spin_lock_init(&lgr->llc_flow_lock); 1742 - init_waitqueue_head(&lgr->llc_waiter); 1680 + init_waitqueue_head(&lgr->llc_flow_waiter); 1681 + init_waitqueue_head(&lgr->llc_msg_waiter); 1743 1682 mutex_init(&lgr->llc_conf_mutex); 1744 1683 lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time; 1745 1684 } ··· 1749 1686 void smc_llc_lgr_clear(struct smc_link_group *lgr) 1750 1687 { 1751 1688 smc_llc_event_flush(lgr); 1752 - wake_up_interruptible_all(&lgr->llc_waiter); 1689 + wake_up_all(&lgr->llc_flow_waiter); 1690 + wake_up_all(&lgr->llc_msg_waiter); 1753 1691 cancel_work_sync(&lgr->llc_event_work); 1754 1692 cancel_work_sync(&lgr->llc_add_link_work); 1755 1693 cancel_work_sync(&lgr->llc_del_link_work);
+1 -1
net/smc/smc_llc.h
··· 103 103 u32 rsn); 104 104 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); 105 105 int smc_llc_srv_add_link(struct smc_link *link); 106 - void smc_llc_srv_add_link_local(struct smc_link *link); 106 + void smc_llc_add_link_local(struct smc_link *link); 107 107 int smc_llc_init(void) __init; 108 108 109 109 #endif /* SMC_LLC_H */
+19 -18
net/smc/smc_pnet.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/list.h> 14 14 #include <linux/ctype.h> 15 + #include <linux/mutex.h> 15 16 #include <net/netlink.h> 16 17 #include <net/genetlink.h> 17 18 ··· 130 129 return rc; 131 130 132 131 /* remove ib devices */ 133 - spin_lock(&smc_ib_devices.lock); 132 + mutex_lock(&smc_ib_devices.mutex); 134 133 list_for_each_entry(ibdev, &smc_ib_devices.list, list) { 135 134 for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) { 136 135 if (ibdev->pnetid_by_user[ibport] && ··· 150 149 } 151 150 } 152 151 } 153 - spin_unlock(&smc_ib_devices.lock); 152 + mutex_unlock(&smc_ib_devices.mutex); 154 153 /* remove smcd devices */ 155 - spin_lock(&smcd_dev_list.lock); 154 + mutex_lock(&smcd_dev_list.mutex); 156 155 list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) { 157 156 if (smcd_dev->pnetid_by_user && 158 157 (!pnet_name || ··· 166 165 rc = 0; 167 166 } 168 167 } 169 - spin_unlock(&smcd_dev_list.lock); 168 + mutex_unlock(&smcd_dev_list.mutex); 170 169 return rc; 171 170 } 172 171 ··· 241 240 u8 pnet_null[SMC_MAX_PNETID_LEN] = {0}; 242 241 bool applied = false; 243 242 244 - spin_lock(&smc_ib_devices.lock); 243 + mutex_lock(&smc_ib_devices.mutex); 245 244 if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) { 246 245 memcpy(ib_dev->pnetid[ib_port - 1], pnet_name, 247 246 SMC_MAX_PNETID_LEN); 248 247 ib_dev->pnetid_by_user[ib_port - 1] = true; 249 248 applied = true; 250 249 } 251 - spin_unlock(&smc_ib_devices.lock); 250 + mutex_unlock(&smc_ib_devices.mutex); 252 251 return applied; 253 252 } 254 253 ··· 259 258 u8 pnet_null[SMC_MAX_PNETID_LEN] = {0}; 260 259 bool applied = false; 261 260 262 - spin_lock(&smcd_dev_list.lock); 261 + mutex_lock(&smcd_dev_list.mutex); 263 262 if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) { 264 263 memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN); 265 264 smcd_dev->pnetid_by_user = true; 266 265 applied = true; 267 266 } 268 - spin_unlock(&smcd_dev_list.lock); 267 + mutex_unlock(&smcd_dev_list.mutex); 269 268 return applied; 270 269 } 271 270 ··· 301 300 { 302 301 struct smc_ib_device *ibdev; 303 302 304 - spin_lock(&smc_ib_devices.lock); 303 + mutex_lock(&smc_ib_devices.mutex); 305 304 list_for_each_entry(ibdev, &smc_ib_devices.list, list) { 306 305 if (!strncmp(ibdev->ibdev->name, ib_name, 307 306 sizeof(ibdev->ibdev->name)) || ··· 312 311 } 313 312 ibdev = NULL; 314 313 out: 315 - spin_unlock(&smc_ib_devices.lock); 314 + mutex_unlock(&smc_ib_devices.mutex); 316 315 return ibdev; 317 316 } 318 317 ··· 321 320 { 322 321 struct smcd_dev *smcd_dev; 323 322 324 - spin_lock(&smcd_dev_list.lock); 323 + mutex_lock(&smcd_dev_list.mutex); 325 324 list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) { 326 325 if (!strncmp(dev_name(&smcd_dev->dev), smcd_name, 327 326 IB_DEVICE_NAME_MAX - 1)) ··· 329 328 } 330 329 smcd_dev = NULL; 331 330 out: 332 - spin_unlock(&smcd_dev_list.lock); 331 + mutex_unlock(&smcd_dev_list.mutex); 333 332 return smcd_dev; 334 333 } 335 334 ··· 826 825 int i; 827 826 828 827 ini->ib_dev = NULL; 829 - spin_lock(&smc_ib_devices.lock); 828 + mutex_lock(&smc_ib_devices.mutex); 830 829 list_for_each_entry(ibdev, &smc_ib_devices.list, list) { 831 830 if (ibdev == known_dev) 832 831 continue; ··· 845 844 } 846 845 } 847 846 out: 848 - spin_unlock(&smc_ib_devices.lock); 847 + mutex_unlock(&smc_ib_devices.mutex); 849 848 } 850 849 851 850 /* find alternate roce device with same pnet_id and vlan_id */ ··· 864 863 { 865 864 struct smc_ib_device *ibdev; 866 865 867 - spin_lock(&smc_ib_devices.lock); 866 + mutex_lock(&smc_ib_devices.mutex); 868 867 list_for_each_entry(ibdev, &smc_ib_devices.list, list) { 869 868 struct net_device *ndev; 870 869 int i; ··· 889 888 } 890 889 } 891 890 } 892 - spin_unlock(&smc_ib_devices.lock); 891 + mutex_unlock(&smc_ib_devices.mutex); 893 892 } 894 893 895 894 /* Determine the corresponding IB device port based on the hardware PNETID. ··· 925 924 smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) 926 925 return; /* pnetid could not be determined */ 927 926 928 - spin_lock(&smcd_dev_list.lock); 927 + mutex_lock(&smcd_dev_list.mutex); 929 928 list_for_each_entry(ismdev, &smcd_dev_list.list, list) { 930 929 if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) && 931 930 !ismdev->going_away) { ··· 933 932 break; 934 933 } 935 934 } 936 - spin_unlock(&smcd_dev_list.lock); 935 + mutex_unlock(&smcd_dev_list.mutex); 937 936 } 938 937 939 938 /* PNET table analysis for a given sock:
+6 -4
net/smc/smc_wr.c
··· 169 169 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) 170 170 { 171 171 *idx = link->wr_tx_cnt; 172 + if (!smc_link_usable(link)) 173 + return -ENOLINK; 172 174 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { 173 175 if (!test_and_set_bit(*idx, link->wr_tx_mask)) 174 176 return 0; ··· 562 560 { 563 561 struct ib_device *ibdev; 564 562 563 + if (!lnk->smcibdev) 564 + return; 565 + ibdev = lnk->smcibdev->ibdev; 566 + 565 567 if (smc_wr_tx_wait_no_pending_sends(lnk)) 566 568 memset(lnk->wr_tx_mask, 0, 567 569 BITS_TO_LONGS(SMC_WR_BUF_CNT) * 568 570 sizeof(*lnk->wr_tx_mask)); 569 - 570 - if (!lnk->smcibdev) 571 - return; 572 - ibdev = lnk->smcibdev->ibdev; 573 571 574 572 if (lnk->wr_rx_dma_addr) { 575 573 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
+1
net/sunrpc/svcsock.c
··· 44 44 #include <net/tcp.h> 45 45 #include <net/tcp_states.h> 46 46 #include <linux/uaccess.h> 47 + #include <linux/highmem.h> 47 48 #include <asm/ioctls.h> 48 49 49 50 #include <linux/sunrpc/types.h>
+2 -2
net/sunrpc/xprtrdma/rpc_rdma.c
··· 71 71 size = RPCRDMA_HDRLEN_MIN; 72 72 73 73 /* Maximum Read list size */ 74 - size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); 74 + size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); 75 75 76 76 /* Minimal Read chunk size */ 77 77 size += sizeof(__be32); /* segment count */ ··· 94 94 size = RPCRDMA_HDRLEN_MIN; 95 95 96 96 /* Maximum Write list size */ 97 - size = sizeof(__be32); /* segment count */ 97 + size += sizeof(__be32); /* segment count */ 98 98 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32); 99 99 size += sizeof(__be32); /* list discriminator */ 100 100
+5
net/sunrpc/xprtrdma/transport.c
··· 249 249 xprt->stat.connect_start; 250 250 xprt_set_connected(xprt); 251 251 rc = -EAGAIN; 252 + } else { 253 + /* Force a call to xprt_rdma_close to clean up */ 254 + spin_lock(&xprt->transport_lock); 255 + set_bit(XPRT_CLOSE_WAIT, &xprt->state); 256 + spin_unlock(&xprt->transport_lock); 252 257 } 253 258 xprt_wake_pending_tasks(xprt, rc); 254 259 }
+16 -19
net/sunrpc/xprtrdma/verbs.c
··· 281 281 break; 282 282 case RDMA_CM_EVENT_CONNECT_ERROR: 283 283 ep->re_connect_status = -ENOTCONN; 284 - goto disconnected; 284 + goto wake_connect_worker; 285 285 case RDMA_CM_EVENT_UNREACHABLE: 286 286 ep->re_connect_status = -ENETUNREACH; 287 - goto disconnected; 287 + goto wake_connect_worker; 288 288 case RDMA_CM_EVENT_REJECTED: 289 289 dprintk("rpcrdma: connection to %pISpc rejected: %s\n", 290 290 sap, rdma_reject_msg(id, event->status)); 291 291 ep->re_connect_status = -ECONNREFUSED; 292 292 if (event->status == IB_CM_REJ_STALE_CONN) 293 - ep->re_connect_status = -EAGAIN; 294 - goto disconnected; 293 + ep->re_connect_status = -ENOTCONN; 294 + wake_connect_worker: 295 + wake_up_all(&ep->re_connect_wait); 296 + return 0; 295 297 case RDMA_CM_EVENT_DISCONNECTED: 296 298 ep->re_connect_status = -ECONNABORTED; 297 299 disconnected: ··· 402 400 403 401 ep = kzalloc(sizeof(*ep), GFP_NOFS); 404 402 if (!ep) 405 - return -EAGAIN; 403 + return -ENOTCONN; 406 404 ep->re_xprt = &r_xprt->rx_xprt; 407 405 kref_init(&ep->re_kref); 408 406 409 407 id = rpcrdma_create_id(r_xprt, ep); 410 408 if (IS_ERR(id)) { 411 - rc = PTR_ERR(id); 412 - goto out_free; 409 + kfree(ep); 410 + return PTR_ERR(id); 413 411 } 414 412 __module_get(THIS_MODULE); 415 413 device = id->device; ··· 508 506 out_destroy: 509 507 rpcrdma_ep_put(ep); 510 508 rdma_destroy_id(id); 511 - out_free: 512 - kfree(ep); 513 - r_xprt->rx_ep = NULL; 514 509 return rc; 515 510 } 516 511 ··· 523 524 struct rpcrdma_ep *ep; 524 525 int rc; 525 526 526 - retry: 527 - rpcrdma_xprt_disconnect(r_xprt); 528 527 rc = rpcrdma_ep_create(r_xprt); 529 528 if (rc) 530 529 return rc; ··· 537 540 rpcrdma_ep_get(ep); 538 541 rpcrdma_post_recvs(r_xprt, true); 539 542 540 - rc = rpcrdma_sendctxs_create(r_xprt); 541 - if (rc) 542 - goto out; 543 - 544 543 rc = rdma_connect(ep->re_id, &ep->re_remote_cma); 545 544 if (rc) 546 545 goto out; ··· 546 553 wait_event_interruptible(ep->re_connect_wait, 547 554 ep->re_connect_status != 0); 548 555 if (ep->re_connect_status <= 0) { 549 - if (ep->re_connect_status == -EAGAIN) 550 - goto retry; 551 556 rc = ep->re_connect_status; 557 + goto out; 558 + } 559 + 560 + rc = rpcrdma_sendctxs_create(r_xprt); 561 + if (rc) { 562 + rc = -ENOTCONN; 552 563 goto out; 553 564 } 554 565 555 566 rc = rpcrdma_reqs_setup(r_xprt); 556 567 if (rc) { 557 - rpcrdma_xprt_disconnect(r_xprt); 568 + rc = -ENOTCONN; 558 569 goto out; 559 570 } 560 571 rpcrdma_mrs_create(r_xprt);
+19 -9
net/tipc/link.c
··· 827 827 state |= l->bc_rcvlink->rcv_unacked; 828 828 state |= l->rcv_unacked; 829 829 state |= !skb_queue_empty(&l->transmq); 830 - state |= !skb_queue_empty(&l->deferdq); 831 830 probe = mstate->probing; 832 831 probe |= l->silent_intv_cnt; 833 832 if (probe || mstate->monitoring) 834 833 l->silent_intv_cnt++; 834 + probe |= !skb_queue_empty(&l->deferdq); 835 835 if (l->snd_nxt == l->checkpoint) { 836 836 tipc_link_update_cwin(l, 0, 0); 837 837 probe = true; ··· 919 919 skb_queue_splice_tail(&tmpq, inputq); 920 920 spin_unlock_bh(&inputq->lock); 921 921 922 + } 923 + 924 + /** 925 + * tipc_link_set_skb_retransmit_time - set the time at which retransmission of 926 + * the given skb should be next attempted 927 + * @skb: skb to set a future retransmission time for 928 + * @l: link the skb will be transmitted on 929 + */ 930 + static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb, 931 + struct tipc_link *l) 932 + { 933 + if (link_is_bc_sndlink(l)) 934 + TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 935 + else 936 + TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; 922 937 } 923 938 924 939 void tipc_link_reset(struct tipc_link *l) ··· 1051 1036 return -ENOBUFS; 1052 1037 } 1053 1038 __skb_queue_tail(transmq, skb); 1054 - /* next retransmit attempt */ 1055 - if (link_is_bc_sndlink(l)) 1056 - TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1039 + tipc_link_set_skb_retransmit_time(skb, l); 1057 1040 __skb_queue_tail(xmitq, _skb); 1058 1041 TIPC_SKB_CB(skb)->ackers = l->ackers; 1059 1042 l->rcv_unacked = 0; ··· 1152 1139 if (unlikely(skb == l->backlog[imp].target_bskb)) 1153 1140 l->backlog[imp].target_bskb = NULL; 1154 1141 __skb_queue_tail(&l->transmq, skb); 1155 - /* next retransmit attempt */ 1156 - if (link_is_bc_sndlink(l)) 1157 - TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1142 + tipc_link_set_skb_retransmit_time(skb, l); 1158 1143 1159 1144 __skb_queue_tail(xmitq, _skb); 1160 1145 TIPC_SKB_CB(skb)->ackers = l->ackers; ··· 1595 1584 /* retransmit skb if unrestricted*/ 1596 1585 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1597 1586 continue; 1598 - TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ? 1599 - TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM; 1587 + tipc_link_set_skb_retransmit_time(skb, l); 1600 1588 _skb = pskb_copy(skb, GFP_ATOMIC); 1601 1589 if (!_skb) 1602 1590 continue;
+1 -1
net/vmw_vsock/virtio_transport.c
··· 22 22 #include <net/af_vsock.h> 23 23 24 24 static struct workqueue_struct *virtio_vsock_workqueue; 25 - static struct virtio_vsock *the_virtio_vsock; 25 + static struct virtio_vsock __rcu *the_virtio_vsock; 26 26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ 27 27 28 28 struct virtio_vsock {
+3 -2
net/wireless/nl80211.c
··· 5016 5016 err = nl80211_parse_he_obss_pd( 5017 5017 info->attrs[NL80211_ATTR_HE_OBSS_PD], 5018 5018 &params.he_obss_pd); 5019 - goto out; 5019 + if (err) 5020 + goto out; 5020 5021 } 5021 5022 5022 5023 if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) { ··· 5025 5024 info->attrs[NL80211_ATTR_HE_BSS_COLOR], 5026 5025 &params.he_bss_color); 5027 5026 if (err) 5028 - return err; 5027 + goto out; 5029 5028 } 5030 5029 5031 5030 nl80211_calculate_ap_params(&params);
+4 -50
net/xdp/xsk_buff_pool.c
··· 2 2 3 3 #include <net/xsk_buff_pool.h> 4 4 #include <net/xdp_sock.h> 5 - #include <linux/dma-direct.h> 6 - #include <linux/dma-noncoherent.h> 7 - #include <linux/swiotlb.h> 8 5 9 6 #include "xsk_queue.h" 10 7 ··· 52 55 pool->free_heads_cnt = chunks; 53 56 pool->headroom = headroom; 54 57 pool->chunk_size = chunk_size; 55 - pool->cheap_dma = true; 56 58 pool->unaligned = unaligned; 57 59 pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM; 58 60 INIT_LIST_HEAD(&pool->free_list); ··· 121 125 } 122 126 } 123 127 124 - static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool) 125 - { 126 - #if defined(CONFIG_SWIOTLB) 127 - phys_addr_t paddr; 128 - u32 i; 129 - 130 - for (i = 0; i < pool->dma_pages_cnt; i++) { 131 - paddr = dma_to_phys(pool->dev, pool->dma_pages[i]); 132 - if (is_swiotlb_buffer(paddr)) 133 - return false; 134 - } 135 - #endif 136 - return true; 137 - } 138 - 139 - static bool xp_check_cheap_dma(struct xsk_buff_pool *pool) 140 - { 141 - #if defined(CONFIG_HAS_DMA) 142 - const struct dma_map_ops *ops = get_dma_ops(pool->dev); 143 - 144 - if (ops) { 145 - return !ops->sync_single_for_cpu && 146 - !ops->sync_single_for_device; 147 - } 148 - 149 - if (!dma_is_direct(ops)) 150 - return false; 151 - 152 - if (!xp_check_swiotlb_dma(pool)) 153 - return false; 154 - 155 - if (!dev_is_dma_coherent(pool->dev)) { 156 - #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 157 - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ 158 - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) 159 - return false; 160 - #endif 161 - } 162 - #endif 163 - return true; 164 - } 165 - 166 128 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, 167 129 unsigned long attrs, struct page **pages, u32 nr_pages) 168 130 { ··· 134 180 135 181 pool->dev = dev; 136 182 pool->dma_pages_cnt = nr_pages; 183 + pool->dma_need_sync = false; 137 184 138 185 for (i = 0; i < pool->dma_pages_cnt; i++) { 139 186 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, ··· 143 188 xp_dma_unmap(pool, attrs); 144 189 return -ENOMEM; 145 190 } 191 + if (dma_need_sync(dev, dma)) 192 + pool->dma_need_sync = true; 146 193 pool->dma_pages[i] = dma; 147 194 } 148 195 149 196 if (pool->unaligned) 150 197 xp_check_dma_contiguity(pool); 151 - 152 - pool->dev = dev; 153 - pool->cheap_dma = xp_check_cheap_dma(pool); 154 198 return 0; 155 199 } 156 200 EXPORT_SYMBOL(xp_dma_map); ··· 234 280 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; 235 281 xskb->xdp.data_meta = xskb->xdp.data; 236 282 237 - if (!pool->cheap_dma) { 283 + if (pool->dma_need_sync) { 238 284 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, 239 285 pool->frame_len, 240 286 DMA_BIDIRECTIONAL);
+2
net/xfrm/xfrm_interface.c
··· 37 37 #include <net/ip.h> 38 38 #include <net/ipv6.h> 39 39 #include <net/ip6_route.h> 40 + #include <net/ip_tunnels.h> 40 41 #include <net/addrconf.h> 41 42 #include <net/xfrm.h> 42 43 #include <net/net_namespace.h> ··· 582 581 static void xfrmi_dev_setup(struct net_device *dev) 583 582 { 584 583 dev->netdev_ops = &xfrmi_netdev_ops; 584 + dev->header_ops = &ip_tunnel_header_ops; 585 585 dev->type = ARPHRD_NONE; 586 586 dev->mtu = ETH_DATA_LEN; 587 587 dev->min_mtu = ETH_MIN_MTU;
+2
samples/vfs/test-statx.c
··· 23 23 #include <linux/fcntl.h> 24 24 #define statx foo 25 25 #define statx_timestamp foo_timestamp 26 + struct statx; 27 + struct statx_timestamp; 26 28 #include <sys/stat.h> 27 29 #undef statx 28 30 #undef statx_timestamp
+2
scripts/Makefile.extrawarn
··· 35 35 # The following turn off the warnings enabled by -Wextra 36 36 KBUILD_CFLAGS += -Wno-missing-field-initializers 37 37 KBUILD_CFLAGS += -Wno-sign-compare 38 + KBUILD_CFLAGS += -Wno-type-limits 38 39 39 40 KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1 40 41 ··· 67 66 KBUILD_CFLAGS += $(call cc-option, -Wlogical-op) 68 67 KBUILD_CFLAGS += -Wmissing-field-initializers 69 68 KBUILD_CFLAGS += -Wsign-compare 69 + KBUILD_CFLAGS += -Wtype-limits 70 70 KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized) 71 71 KBUILD_CFLAGS += $(call cc-option, -Wunused-macros) 72 72
+6 -1
scripts/Makefile.lib
··· 212 212 $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s))))))) 213 213 endef 214 214 215 + quiet_cmd_copy = COPY $@ 216 + cmd_copy = cp $< $@ 217 + 215 218 # Shipped files 216 219 # =========================================================================== 217 220 ··· 262 259 # DTC 263 260 # --------------------------------------------------------------------------- 264 261 DTC ?= $(objtree)/scripts/dtc/dtc 262 + DTC_FLAGS += -Wno-interrupt_provider 265 263 266 264 # Disable noisy checks by default 267 265 ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),) ··· 278 274 279 275 ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),) 280 276 DTC_FLAGS += -Wnode_name_chars_strict \ 281 - -Wproperty_name_chars_strict 277 + -Wproperty_name_chars_strict \ 278 + -Winterrupt_provider 282 279 endif 283 280 284 281 DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
+2 -2
scripts/decode_stacktrace.sh
··· 87 87 return 88 88 fi 89 89 90 - # Strip out the base of the path 91 - code=${code#$basepath/} 90 + # Strip out the base of the path on each line 91 + code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code") 92 92 93 93 # In the case of inlines, move everything to same line 94 94 code=${code//$'\n'/' '}
+36 -3
scripts/dtc/checks.c
··· 1022 1022 } 1023 1023 WARNING(i2c_bus_bridge, check_i2c_bus_bridge, NULL, &addr_size_cells); 1024 1024 1025 + #define I2C_OWN_SLAVE_ADDRESS (1U << 30) 1026 + #define I2C_TEN_BIT_ADDRESS (1U << 31) 1027 + 1025 1028 static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node) 1026 1029 { 1027 1030 struct property *prop; ··· 1047 1044 } 1048 1045 1049 1046 reg = fdt32_to_cpu(*cells); 1047 + /* Ignore I2C_OWN_SLAVE_ADDRESS */ 1048 + reg &= ~I2C_OWN_SLAVE_ADDRESS; 1050 1049 snprintf(unit_addr, sizeof(unit_addr), "%x", reg); 1051 1050 if (!streq(unitname, unit_addr)) 1052 1051 FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"", ··· 1056 1051 1057 1052 for (len = prop->val.len; len > 0; len -= 4) { 1058 1053 reg = fdt32_to_cpu(*(cells++)); 1059 - if (reg > 0x3ff) 1054 + /* Ignore I2C_OWN_SLAVE_ADDRESS */ 1055 + reg &= ~I2C_OWN_SLAVE_ADDRESS; 1056 + 1057 + if ((reg & I2C_TEN_BIT_ADDRESS) && ((reg & ~I2C_TEN_BIT_ADDRESS) > 0x3ff)) 1060 1058 FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"", 1061 1059 reg); 1062 - 1060 + else if (reg > 0x7f) 1061 + FAIL_PROP(c, dti, node, prop, "I2C address must be less than 7-bits, got \"0x%x\". Set I2C_TEN_BIT_ADDRESS for 10 bit addresses or fix the property", 1062 + reg); 1063 1063 } 1064 1064 } 1065 1065 WARNING(i2c_bus_reg, check_i2c_bus_reg, NULL, &reg_format, &i2c_bus_bridge); ··· 1557 1547 1558 1548 return false; 1559 1549 } 1550 + 1551 + static void check_interrupt_provider(struct check *c, 1552 + struct dt_info *dti, 1553 + struct node *node) 1554 + { 1555 + struct property *prop; 1556 + 1557 + if (!node_is_interrupt_provider(node)) 1558 + return; 1559 + 1560 + prop = get_property(node, "#interrupt-cells"); 1561 + if (!prop) 1562 + FAIL(c, dti, node, 1563 + "Missing #interrupt-cells in interrupt provider"); 1564 + 1565 + prop = get_property(node, "#address-cells"); 1566 + if (!prop) 1567 + FAIL(c, dti, node, 1568 + "Missing #address-cells in interrupt provider"); 1569 + } 1570 + WARNING(interrupt_provider, check_interrupt_provider, NULL); 1571 + 1560 1572 static void check_interrupts_property(struct check *c, 1561 1573 struct dt_info *dti, 1562 1574 struct node *node) ··· 1636 1604 1637 1605 prop = get_property(irq_node, "#interrupt-cells"); 1638 1606 if (!prop) { 1639 - FAIL(c, dti, irq_node, "Missing #interrupt-cells in interrupt-parent"); 1607 + /* We warn about that already in another test. */ 1640 1608 return; 1641 1609 } 1642 1610 ··· 1860 1828 &deprecated_gpio_property, 1861 1829 &gpios_property, 1862 1830 &interrupts_property, 1831 + &interrupt_provider, 1863 1832 1864 1833 &alias_paths, 1865 1834
+31
scripts/dtc/dtc.h
··· 51 51 52 52 typedef uint32_t cell_t; 53 53 54 + static inline uint16_t dtb_ld16(const void *p) 55 + { 56 + const uint8_t *bp = (const uint8_t *)p; 57 + 58 + return ((uint16_t)bp[0] << 8) 59 + | bp[1]; 60 + } 61 + 62 + static inline uint32_t dtb_ld32(const void *p) 63 + { 64 + const uint8_t *bp = (const uint8_t *)p; 65 + 66 + return ((uint32_t)bp[0] << 24) 67 + | ((uint32_t)bp[1] << 16) 68 + | ((uint32_t)bp[2] << 8) 69 + | bp[3]; 70 + } 71 + 72 + static inline uint64_t dtb_ld64(const void *p) 73 + { 74 + const uint8_t *bp = (const uint8_t *)p; 75 + 76 + return ((uint64_t)bp[0] << 56) 77 + | ((uint64_t)bp[1] << 48) 78 + | ((uint64_t)bp[2] << 40) 79 + | ((uint64_t)bp[3] << 32) 80 + | ((uint64_t)bp[4] << 24) 81 + | ((uint64_t)bp[5] << 16) 82 + | ((uint64_t)bp[6] << 8) 83 + | bp[7]; 84 + } 54 85 55 86 #define streq(a, b) (strcmp((a), (b)) == 0) 56 87 #define strstarts(s, prefix) (strncmp((s), (prefix), strlen(prefix)) == 0)
+1 -1
scripts/dtc/flattree.c
··· 156 156 emit_offset_label(f, m->ref, m->offset); 157 157 158 158 while ((d.len - off) >= sizeof(uint32_t)) { 159 - asm_emit_cell(e, fdt32_to_cpu(*((fdt32_t *)(d.val+off)))); 159 + asm_emit_cell(e, dtb_ld32(d.val + off)); 160 160 off += sizeof(uint32_t); 161 161 } 162 162
+1 -1
scripts/dtc/libfdt/fdt_rw.c
··· 436 436 return struct_size; 437 437 } 438 438 439 - if (can_assume(LIBFDT_ORDER) | 439 + if (can_assume(LIBFDT_ORDER) || 440 440 !fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) { 441 441 /* no further work necessary */ 442 442 err = fdt_move(fdt, buf, bufsize);
+1 -1
scripts/dtc/libfdt/fdt_sw.c
··· 32 32 /* 'memrsv' state: Initial state after fdt_create() 33 33 * 34 34 * Allowed functions: 35 - * fdt_add_reservmap_entry() 35 + * fdt_add_reservemap_entry() 36 36 * fdt_finish_reservemap() [moves to 'struct' state] 37 37 */ 38 38 static int fdt_sw_probe_memrsv_(void *fdt)
+8
scripts/dtc/libfdt/libfdt.h
··· 9 9 #include "libfdt_env.h" 10 10 #include "fdt.h" 11 11 12 + #ifdef __cplusplus 13 + extern "C" { 14 + #endif 15 + 12 16 #define FDT_FIRST_SUPPORTED_VERSION 0x02 13 17 #define FDT_LAST_SUPPORTED_VERSION 0x11 14 18 ··· 2072 2068 /**********************************************************************/ 2073 2069 2074 2070 const char *fdt_strerror(int errval); 2071 + 2072 + #ifdef __cplusplus 2073 + } 2074 + #endif 2075 2075 2076 2076 #endif /* LIBFDT_H */
+4 -4
scripts/dtc/treesource.c
··· 110 110 fprintf(f, "%02"PRIx8, *(const uint8_t*)p); 111 111 break; 112 112 case 2: 113 - fprintf(f, "0x%02"PRIx16, fdt16_to_cpu(*(const fdt16_t*)p)); 113 + fprintf(f, "0x%02"PRIx16, dtb_ld16(p)); 114 114 break; 115 115 case 4: 116 - fprintf(f, "0x%02"PRIx32, fdt32_to_cpu(*(const fdt32_t*)p)); 116 + fprintf(f, "0x%02"PRIx32, dtb_ld32(p)); 117 117 break; 118 118 case 8: 119 - fprintf(f, "0x%02"PRIx64, fdt64_to_cpu(*(const fdt64_t*)p)); 119 + fprintf(f, "0x%02"PRIx64, dtb_ld64(p)); 120 120 break; 121 121 } 122 122 if (p + width < end) ··· 183 183 nnotcelllbl++; 184 184 } 185 185 186 - if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul)) 186 + if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul <= (len-nnul)) 187 187 && (nnotstringlbl == 0)) { 188 188 return TYPE_STRING; 189 189 } else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
+1 -1
scripts/dtc/version_gen.h
··· 1 - #define DTC_VERSION "DTC 1.6.0-g87a656ae" 1 + #define DTC_VERSION "DTC 1.6.0-g9d7888cb"
+3 -3
scripts/dtc/yamltree.c
··· 59 59 sprintf(buf, "0x%"PRIx8, *(uint8_t*)(data + off)); 60 60 break; 61 61 case 2: 62 - sprintf(buf, "0x%"PRIx16, fdt16_to_cpu(*(fdt16_t*)(data + off))); 62 + sprintf(buf, "0x%"PRIx16, dtb_ld16(data + off)); 63 63 break; 64 64 case 4: 65 - sprintf(buf, "0x%"PRIx32, fdt32_to_cpu(*(fdt32_t*)(data + off))); 65 + sprintf(buf, "0x%"PRIx32, dtb_ld32(data + off)); 66 66 m = markers; 67 67 is_phandle = false; 68 68 for_each_marker_of_type(m, REF_PHANDLE) { ··· 73 73 } 74 74 break; 75 75 case 8: 76 - sprintf(buf, "0x%"PRIx64, fdt64_to_cpu(*(fdt64_t*)(data + off))); 76 + sprintf(buf, "0x%"PRIx64, dtb_ld64(data + off)); 77 77 break; 78 78 } 79 79
+1 -1
scripts/gcc-plugins/Kconfig
··· 78 78 source tree isn't cleaned after kernel installation). 79 79 80 80 The seed used for compilation is located at 81 - scripts/gcc-plgins/randomize_layout_seed.h. It remains after 81 + scripts/gcc-plugins/randomize_layout_seed.h. It remains after 82 82 a make clean to allow for external modules to be compiled with 83 83 the existing seed and will be removed by a make mrproper or 84 84 make distclean.
+1 -1
scripts/gdb/linux/symbols.py
··· 96 96 return "" 97 97 attrs = sect_attrs['attrs'] 98 98 section_name_to_address = { 99 - attrs[n]['name'].string(): attrs[n]['address'] 99 + attrs[n]['battr']['attr']['name'].string(): attrs[n]['address'] 100 100 for n in range(int(sect_attrs['nsections']))} 101 101 args = [] 102 102 for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
+106 -67
scripts/kconfig/qconf.cc
··· 4 4 * Copyright (C) 2015 Boris Barbulovski <bbarbulovski@gmail.com> 5 5 */ 6 6 7 - #include <qglobal.h> 8 - 9 - #include <QMainWindow> 10 - #include <QList> 11 - #include <qtextbrowser.h> 12 7 #include <QAction> 8 + #include <QApplication> 9 + #include <QCloseEvent> 10 + #include <QDebug> 11 + #include <QDesktopWidget> 13 12 #include <QFileDialog> 13 + #include <QLabel> 14 + #include <QLayout> 15 + #include <QList> 14 16 #include <QMenu> 15 - 16 - #include <qapplication.h> 17 - #include <qdesktopwidget.h> 18 - #include <qtoolbar.h> 19 - #include <qlayout.h> 20 - #include <qsplitter.h> 21 - #include <qlineedit.h> 22 - #include <qlabel.h> 23 - #include <qpushbutton.h> 24 - #include <qmenubar.h> 25 - #include <qmessagebox.h> 26 - #include <qregexp.h> 27 - #include <qevent.h> 17 + #include <QMenuBar> 18 + #include <QMessageBox> 19 + #include <QToolBar> 28 20 29 21 #include <stdlib.h> 30 22 ··· 437 445 if (rootEntry != &rootmenu && (mode == singleMode || 438 446 (mode == symbolMode && rootEntry->parent != &rootmenu))) { 439 447 item = (ConfigItem *)topLevelItem(0); 440 - if (!item) 448 + if (!item && mode != symbolMode) { 441 449 item = new ConfigItem(this, 0, true); 442 - last = item; 450 + last = item; 451 + } 443 452 } 444 453 if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) && 445 454 rootEntry->sym && rootEntry->prompt) { ··· 538 545 rootEntry = menu; 539 546 updateListAll(); 540 547 if (currentItem()) { 541 - currentItem()->setSelected(hasFocus()); 548 + setSelected(currentItem(), hasFocus()); 542 549 scrollToItem(currentItem()); 543 550 } 544 551 } ··· 866 873 867 874 ConfigItem* item = (ConfigItem *)currentItem(); 868 875 if (item) { 869 - item->setSelected(true); 876 + setSelected(item, true); 870 877 menu = item->menu; 871 878 } 872 879 emit gotFocus(menu); ··· 1014 1021 : Parent(parent), sym(0), _menu(0) 1015 1022 { 1016 1023 setObjectName(name); 1017 - 1024 + setOpenLinks(false); 1018 1025 1019 1026 if (!objectName().isEmpty()) { 1020 1027 configSettings->beginGroup(objectName()); ··· 1087 1094 if (sym->name) { 1088 1095 head += " ("; 1089 1096 if (showDebug()) 1090 - head += QString().sprintf("<a href=\"s%p\">", sym); 1097 + head += QString().sprintf("<a href=\"s%s\">", sym->name); 1091 1098 head += print_filter(sym->name); 1092 1099 if (showDebug()) 1093 1100 head += "</a>"; ··· 1096 1103 } else if (sym->name) { 1097 1104 head += "<big><b>"; 1098 1105 if (showDebug()) 1099 - head += QString().sprintf("<a href=\"s%p\">", sym); 1106 + head += QString().sprintf("<a href=\"s%s\">", sym->name); 1100 1107 head += print_filter(sym->name); 1101 1108 if (showDebug()) 1102 1109 head += "</a>"; ··· 1147 1154 switch (prop->type) { 1148 1155 case P_PROMPT: 1149 1156 case P_MENU: 1150 - debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu); 1157 + debug += QString().sprintf("prompt: <a href=\"m%s\">", sym->name); 1151 1158 debug += print_filter(prop->text); 1152 1159 debug += "</a><br>"; 1153 1160 break; 1154 1161 case P_DEFAULT: 1155 1162 case P_SELECT: 1156 1163 case P_RANGE: 1164 + case P_COMMENT: 1165 + case P_IMPLY: 1166 + case P_SYMBOL: 1157 1167 debug += prop_get_type_name(prop->type); 1158 1168 debug += ": "; 1159 1169 expr_print(prop->expr, expr_print_help, &debug, E_NONE); ··· 1222 1226 QString str2 = print_filter(str); 1223 1227 1224 1228 if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) { 1225 - *text += QString().sprintf("<a href=\"s%p\">", sym); 1229 + *text += QString().sprintf("<a href=\"s%s\">", sym->name); 1226 1230 *text += str2; 1227 1231 *text += "</a>"; 1228 1232 } else 1229 1233 *text += str2; 1234 + } 1235 + 1236 + void ConfigInfoView::clicked(const QUrl &url) 1237 + { 1238 + QByteArray str = url.toEncoded(); 1239 + const std::size_t count = str.size(); 1240 + char *data = new char[count + 1]; 1241 + struct symbol **result; 1242 + struct menu *m = NULL; 1243 + 1244 + if (count < 1) { 1245 + qInfo() << "Clicked link is empty"; 1246 + delete data; 1247 + return; 1248 + } 1249 + 1250 + memcpy(data, str.constData(), count); 1251 + data[count] = '\0'; 1252 + 1253 + /* Seek for exact match */ 1254 + data[0] = '^'; 1255 + strcat(data, "$"); 1256 + result = sym_re_search(data); 1257 + if (!result) { 1258 + qInfo() << "Clicked symbol is invalid:" << data; 1259 + delete data; 1260 + return; 1261 + } 1262 + 1263 + sym = *result; 1264 + 1265 + /* Seek for the menu which holds the symbol */ 1266 + for (struct property *prop = sym->prop; prop; prop = prop->next) { 1267 + if (prop->type != P_PROMPT && prop->type != P_MENU) 1268 + continue; 1269 + m = prop->menu; 1270 + break; 1271 + } 1272 + 1273 + if (!m) { 1274 + /* Symbol is not visible as a menu */ 1275 + symbolInfo(); 1276 + emit showDebugChanged(true); 1277 + } else { 1278 + emit menuSelected(m); 1279 + } 1280 + 1281 + free(result); 1282 + delete data; 1230 1283 } 1231 1284 1232 1285 QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) ··· 1447 1402 addToolBar(toolBar); 1448 1403 1449 1404 backAction = new QAction(QPixmap(xpm_back), "Back", this); 1450 - connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack())); 1451 - backAction->setEnabled(false); 1405 + connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack())); 1406 + 1452 1407 QAction *quitAction = new QAction("&Quit", this); 1453 1408 quitAction->setShortcut(Qt::CTRL + Qt::Key_Q); 1454 - connect(quitAction, SIGNAL(triggered(bool)), SLOT(close())); 1409 + connect(quitAction, SIGNAL(triggered(bool)), SLOT(close())); 1410 + 1455 1411 QAction *loadAction = new QAction(QPixmap(xpm_load), "&Load", this); 1456 1412 loadAction->setShortcut(Qt::CTRL + Qt::Key_L); 1457 - connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig())); 1413 + connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig())); 1414 + 1458 1415 saveAction = new QAction(QPixmap(xpm_save), "&Save", this); 1459 1416 saveAction->setShortcut(Qt::CTRL + Qt::Key_S); 1460 - connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig())); 1417 + connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig())); 1418 + 1461 1419 conf_set_changed_callback(conf_changed); 1420 + 1462 1421 // Set saveAction's initial state 1463 1422 conf_changed(); 1464 1423 configname = xstrdup(conf_get_configname()); ··· 1554 1505 QMenu* helpMenu = menu->addMenu("&Help"); 1555 1506 helpMenu->addAction(showIntroAction); 1556 1507 helpMenu->addAction(showAboutAction); 1508 + 1509 + connect (helpText, SIGNAL (anchorClicked (const QUrl &)), 1510 + helpText, SLOT (clicked (const QUrl &)) ); 1557 1511 1558 1512 connect(configList, SIGNAL(menuChanged(struct menu *)), 1559 1513 helpText, SLOT(setInfo(struct menu *))); ··· 1663 1611 void ConfigMainWindow::changeItens(struct menu *menu) 1664 1612 { 1665 1613 configList->setRootMenu(menu); 1666 - 1667 - if (configList->rootEntry->parent == &rootmenu) 1668 - backAction->setEnabled(false); 1669 - else 1670 - backAction->setEnabled(true); 1671 1614 } 1672 1615 1673 1616 void ConfigMainWindow::changeMenu(struct menu *menu) 1674 1617 { 1675 1618 menuList->setRootMenu(menu); 1676 - 1677 - if (menuList->rootEntry->parent == &rootmenu) 1678 - backAction->setEnabled(false); 1679 - else 1680 - backAction->setEnabled(true); 1681 1619 } 1682 1620 1683 1621 void ConfigMainWindow::setMenuLink(struct menu *menu) ··· 1687 1645 return; 1688 1646 list->setRootMenu(parent); 1689 1647 break; 1690 - case symbolMode: 1648 + case menuMode: 1691 1649 if (menu->flags & MENU_ROOT) { 1692 - configList->setRootMenu(menu); 1650 + menuList->setRootMenu(menu); 1693 1651 configList->clearSelection(); 1694 - list = menuList; 1695 - } else { 1696 1652 list = configList; 1653 + } else { 1697 1654 parent = menu_get_parent_menu(menu->parent); 1698 1655 if (!parent) 1699 1656 return; 1700 - item = menuList->findConfigItem(parent); 1657 + 1658 + /* Select the config view */ 1659 + item = configList->findConfigItem(parent); 1701 1660 if (item) { 1702 - item->setSelected(true); 1703 - menuList->scrollToItem(item); 1661 + configList->setSelected(item, true); 1662 + configList->scrollToItem(item); 1704 1663 } 1705 - list->setRootMenu(parent); 1664 + 1665 + menuList->setRootMenu(parent); 1666 + menuList->clearSelection(); 1667 + list = menuList; 1706 1668 } 1707 1669 break; 1708 1670 case fullMode: ··· 1719 1673 if (list) { 1720 1674 item = list->findConfigItem(menu); 1721 1675 if (item) { 1722 - item->setSelected(true); 1676 + list->setSelected(item, true); 1723 1677 list->scrollToItem(item); 1724 1678 list->setFocus(); 1679 + helpText->setInfo(menu); 1725 1680 } 1726 1681 } 1727 1682 } ··· 1735 1688 1736 1689 void ConfigMainWindow::goBack(void) 1737 1690 { 1738 - ConfigItem* item, *oldSelection; 1739 - 1740 - configList->setParentMenu(); 1691 + qInfo() << __FUNCTION__; 1741 1692 if (configList->rootEntry == &rootmenu) 1742 - backAction->setEnabled(false); 1743 - 1744 - if (menuList->selectedItems().count() == 0) 1745 1693 return; 1746 1694 1747 - item = (ConfigItem*)menuList->selectedItems().first(); 1748 - oldSelection = item; 1749 - while (item) { 1750 - if (item->menu == configList->rootEntry) { 1751 - oldSelection->setSelected(false); 1752 - item->setSelected(true); 1753 - break; 1754 - } 1755 - item = (ConfigItem*)item->parent(); 1756 - } 1695 + configList->setParentMenu(); 1757 1696 } 1758 1697 1759 1698 void ConfigMainWindow::showSingleView(void) ··· 1750 1717 splitViewAction->setChecked(false); 1751 1718 fullViewAction->setEnabled(true); 1752 1719 fullViewAction->setChecked(false); 1720 + 1721 + backAction->setEnabled(true); 1753 1722 1754 1723 menuView->hide(); 1755 1724 menuList->setRootMenu(0); ··· 1771 1736 splitViewAction->setChecked(true); 1772 1737 fullViewAction->setEnabled(true); 1773 1738 fullViewAction->setChecked(false); 1739 + 1740 + backAction->setEnabled(false); 1774 1741 1775 1742 configList->mode = menuMode; 1776 1743 if (configList->rootEntry == &rootmenu) ··· 1796 1759 splitViewAction->setChecked(false); 1797 1760 fullViewAction->setEnabled(false); 1798 1761 fullViewAction->setChecked(true); 1762 + 1763 + backAction->setEnabled(false); 1799 1764 1800 1765 menuView->hide(); 1801 1766 menuList->setRootMenu(0);
+17 -10
scripts/kconfig/qconf.h
··· 3 3 * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> 4 4 */ 5 5 6 - #include <QTextBrowser> 7 - #include <QTreeWidget> 8 - #include <QMainWindow> 9 - #include <QHeaderView> 10 - #include <qsettings.h> 11 - #include <QPushButton> 12 - #include <QSettings> 13 - #include <QLineEdit> 14 - #include <QSplitter> 15 6 #include <QCheckBox> 16 7 #include <QDialog> 8 + #include <QHeaderView> 9 + #include <QLineEdit> 10 + #include <QMainWindow> 11 + #include <QPushButton> 12 + #include <QSettings> 13 + #include <QSplitter> 14 + #include <QTextBrowser> 15 + #include <QTreeWidget> 16 + 17 17 #include "expr.h" 18 18 19 19 class ConfigView; ··· 45 45 public: 46 46 ConfigList(ConfigView* p, const char *name = 0); 47 47 void reinit(void); 48 + ConfigItem* findConfigItem(struct menu *); 48 49 ConfigView* parent(void) const 49 50 { 50 51 return (ConfigView*)Parent::parent(); 51 52 } 52 - ConfigItem* findConfigItem(struct menu *); 53 + void setSelected(QTreeWidgetItem *item, bool enable) { 54 + for (int i = 0; i < selectedItems().size(); i++) 55 + selectedItems().at(i)->setSelected(false); 56 + 57 + item->setSelected(enable); 58 + } 53 59 54 60 protected: 55 61 void keyPressEvent(QKeyEvent *e); ··· 256 250 void setInfo(struct menu *menu); 257 251 void saveSettings(void); 258 252 void setShowDebug(bool); 253 + void clicked (const QUrl &url); 259 254 260 255 signals: 261 256 void showDebugChanged(bool);
+10 -2
scripts/mod/modpost.c
··· 138 138 139 139 char *get_line(char **stringp) 140 140 { 141 + char *orig = *stringp, *next; 142 + 141 143 /* do not return the unwanted extra line at EOF */ 142 - if (*stringp && **stringp == '\0') 144 + if (!orig || *orig == '\0') 143 145 return NULL; 144 146 145 - return strsep(stringp, "\n"); 147 + next = strchr(orig, '\n'); 148 + if (next) 149 + *next++ = '\0'; 150 + 151 + *stringp = next; 152 + 153 + return orig; 146 154 } 147 155 148 156 /* A list of all modules we processed */
+1 -13
security/integrity/iint.c
··· 188 188 int integrity_kernel_read(struct file *file, loff_t offset, 189 189 void *addr, unsigned long count) 190 190 { 191 - mm_segment_t old_fs; 192 - char __user *buf = (char __user *)addr; 193 - ssize_t ret; 194 - 195 - if (!(file->f_mode & FMODE_READ)) 196 - return -EBADF; 197 - 198 - old_fs = get_fs(); 199 - set_fs(KERNEL_DS); 200 - ret = __vfs_read(file, buf, count, &offset); 201 - set_fs(old_fs); 202 - 203 - return ret; 191 + return __kernel_read(file, addr, count, &offset); 204 192 } 205 193 206 194 /*
+1 -1
security/integrity/ima/ima.h
··· 30 30 31 31 enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN, 32 32 IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII }; 33 - enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; 33 + enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 }; 34 34 35 35 /* digest size for IMA, fits SHA1 or MD5 */ 36 36 #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
+14 -1
security/integrity/ima/ima_crypto.c
··· 823 823 if (rc != 0) 824 824 return rc; 825 825 826 - /* cumulative sha1 over tpm registers 0-7 */ 826 + /* cumulative digest over TPM registers 0-7 */ 827 827 for (i = TPM_PCR0; i < TPM_PCR8; i++) { 828 828 ima_pcrread(i, &d); 829 829 /* now accumulate with current aggregate */ 830 830 rc = crypto_shash_update(shash, d.digest, 831 831 crypto_shash_digestsize(tfm)); 832 + } 833 + /* 834 + * Extend cumulative digest over TPM registers 8-9, which contain 835 + * measurement for the kernel command line (reg. 8) and image (reg. 9) 836 + * in a typical PCR allocation. Registers 8-9 are only included in 837 + * non-SHA1 boot_aggregate digests to avoid ambiguity. 838 + */ 839 + if (alg_id != TPM_ALG_SHA1) { 840 + for (i = TPM_PCR8; i < TPM_PCR10; i++) { 841 + ima_pcrread(i, &d); 842 + rc = crypto_shash_update(shash, d.digest, 843 + crypto_shash_digestsize(tfm)); 844 + } 832 845 } 833 846 if (!rc) 834 847 crypto_shash_final(shash, digest);
+16 -1
security/security.c
··· 1414 1414 1415 1415 int security_inode_copy_up_xattr(const char *name) 1416 1416 { 1417 - return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name); 1417 + struct security_hook_list *hp; 1418 + int rc; 1419 + 1420 + /* 1421 + * The implementation can return 0 (accept the xattr), 1 (discard the 1422 + * xattr), -EOPNOTSUPP if it does not know anything about the xattr or 1423 + * any other error code incase of an error. 1424 + */ 1425 + hlist_for_each_entry(hp, 1426 + &security_hook_heads.inode_copy_up_xattr, list) { 1427 + rc = hp->hook.inode_copy_up_xattr(name); 1428 + if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr)) 1429 + return rc; 1430 + } 1431 + 1432 + return LSM_RET_DEFAULT(inode_copy_up_xattr); 1418 1433 } 1419 1434 EXPORT_SYMBOL(security_inode_copy_up_xattr); 1420 1435
+4
sound/core/compress_offload.c
··· 764 764 765 765 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); 766 766 if (!retval) { 767 + /* clear flags and stop any drain wait */ 768 + stream->partial_drain = false; 769 + stream->metadata_set = false; 767 770 snd_compr_drain_notify(stream); 768 771 stream->runtime->total_bytes_available = 0; 769 772 stream->runtime->total_bytes_transferred = 0; ··· 924 921 if (stream->next_track == false) 925 922 return -EPERM; 926 923 924 + stream->partial_drain = true; 927 925 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); 928 926 if (retval) { 929 927 pr_debug("Partial drain returned failure\n");
+3 -1
sound/core/info.c
··· 606 606 { 607 607 int c; 608 608 609 - if (snd_BUG_ON(!buffer || !buffer->buffer)) 609 + if (snd_BUG_ON(!buffer)) 610 + return 1; 611 + if (!buffer->buffer) 610 612 return 1; 611 613 if (len <= 0 || buffer->stop || buffer->error) 612 614 return 1;
+2
sound/drivers/opl3/opl3_synth.c
··· 91 91 { 92 92 struct snd_dm_fm_info info; 93 93 94 + memset(&info, 0, sizeof(info)); 95 + 94 96 info.fm_mode = opl3->fm_mode; 95 97 info.rhythm = opl3->rhythm; 96 98 if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
+6
sound/pci/hda/hda_auto_parser.c
··· 72 72 if (a->type != b->type) 73 73 return (int)(a->type - b->type); 74 74 75 + /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */ 76 + if (a->is_headset_mic && b->is_headphone_mic) 77 + return -1; /* don't swap */ 78 + else if (a->is_headphone_mic && b->is_headset_mic) 79 + return 1; /* swap */ 80 + 75 81 /* In case one has boost and the other one has not, 76 82 pick the one with boost first. */ 77 83 return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
+26 -15
sound/pci/hda/patch_hdmi.c
··· 259 259 if (get_pcm_rec(spec, pcm_idx)->stream == hinfo) 260 260 return pcm_idx; 261 261 262 - codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo); 262 + codec_warn(codec, "HDMI: hinfo %p not tied to a PCM\n", hinfo); 263 263 return -EINVAL; 264 264 } 265 265 ··· 277 277 return pin_idx; 278 278 } 279 279 280 - codec_dbg(codec, "HDMI: hinfo %p not registered\n", hinfo); 280 + codec_dbg(codec, "HDMI: hinfo %p (pcm %d) not registered\n", hinfo, 281 + hinfo_to_pcm_index(codec, hinfo)); 281 282 return -EINVAL; 282 283 } 283 284 ··· 1805 1804 1806 1805 static int hdmi_parse_codec(struct hda_codec *codec) 1807 1806 { 1808 - hda_nid_t nid; 1807 + hda_nid_t start_nid; 1808 + unsigned int caps; 1809 1809 int i, nodes; 1810 1810 1811 - nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid); 1812 - if (!nid || nodes < 0) { 1811 + nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid); 1812 + if (!start_nid || nodes < 0) { 1813 1813 codec_warn(codec, "HDMI: failed to get afg sub nodes\n"); 1814 1814 return -EINVAL; 1815 1815 } 1816 1816 1817 - for (i = 0; i < nodes; i++, nid++) { 1818 - unsigned int caps; 1819 - unsigned int type; 1817 + /* 1818 + * hdmi_add_pin() assumes total amount of converters to 1819 + * be known, so first discover all converters 1820 + */ 1821 + for (i = 0; i < nodes; i++) { 1822 + hda_nid_t nid = start_nid + i; 1820 1823 1821 1824 caps = get_wcaps(codec, nid); 1822 - type = get_wcaps_type(caps); 1823 1825 1824 1826 if (!(caps & AC_WCAP_DIGITAL)) 1825 1827 continue; 1826 1828 1827 - switch (type) { 1828 - case AC_WID_AUD_OUT: 1829 + if (get_wcaps_type(caps) == AC_WID_AUD_OUT) 1829 1830 hdmi_add_cvt(codec, nid); 1830 - break; 1831 - case AC_WID_PIN: 1831 + } 1832 + 1833 + /* discover audio pins */ 1834 + for (i = 0; i < nodes; i++) { 1835 + hda_nid_t nid = start_nid + i; 1836 + 1837 + caps = get_wcaps(codec, nid); 1838 + 1839 + if (!(caps & AC_WCAP_DIGITAL)) 1840 + continue; 1841 + 1842 + if (get_wcaps_type(caps) == AC_WID_PIN) 1832 1843 hdmi_add_pin(codec, nid); 1833 - break; 1834 - } 1835 1844 } 1836 1845 1837 1846 return 0;
+63 -4
sound/pci/hda/patch_realtek.c
··· 6149 6149 ALC236_FIXUP_HP_MUTE_LED, 6150 6150 ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, 6151 6151 ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, 6152 + ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, 6153 + ALC269VC_FIXUP_ACER_HEADSET_MIC, 6154 + ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE, 6155 + ALC289_FIXUP_ASUS_G401, 6156 + ALC256_FIXUP_ACER_MIC_NO_PRESENCE, 6152 6157 }; 6153 6158 6154 6159 static const struct hda_fixup alc269_fixups[] = { ··· 7119 7114 { } 7120 7115 }, 7121 7116 .chained = true, 7122 - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 7117 + .chain_id = ALC269_FIXUP_HEADSET_MIC 7123 7118 }, 7124 7119 [ALC294_FIXUP_ASUS_HEADSET_MIC] = { 7125 7120 .type = HDA_FIXUP_PINS, ··· 7128 7123 { } 7129 7124 }, 7130 7125 .chained = true, 7131 - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 7126 + .chain_id = ALC269_FIXUP_HEADSET_MIC 7132 7127 }, 7133 7128 [ALC294_FIXUP_ASUS_SPK] = { 7134 7129 .type = HDA_FIXUP_VERBS, ··· 7136 7131 /* Set EAPD high */ 7137 7132 { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 }, 7138 7133 { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 }, 7134 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, 7135 + { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 }, 7139 7136 { } 7140 7137 }, 7141 7138 .chained = true, ··· 7334 7327 .chained = true, 7335 7328 .chain_id = ALC269_FIXUP_HEADSET_MODE 7336 7329 }, 7330 + [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = { 7331 + .type = HDA_FIXUP_PINS, 7332 + .v.pins = (const struct hda_pintbl[]) { 7333 + { 0x14, 0x90100120 }, /* use as internal speaker */ 7334 + { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */ 7335 + { 0x1a, 0x01011020 }, /* use as line out */ 7336 + { }, 7337 + }, 7338 + .chained = true, 7339 + .chain_id = ALC269_FIXUP_HEADSET_MIC 7340 + }, 7341 + [ALC269VC_FIXUP_ACER_HEADSET_MIC] = { 7342 + .type = HDA_FIXUP_PINS, 7343 + .v.pins = (const struct hda_pintbl[]) { 7344 + { 0x18, 0x02a11030 }, /* use as headset mic */ 7345 + { } 7346 + }, 7347 + .chained = true, 7348 + .chain_id = ALC269_FIXUP_HEADSET_MIC 7349 + }, 7350 + [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = { 7351 + .type = HDA_FIXUP_PINS, 7352 + .v.pins = (const struct hda_pintbl[]) { 7353 + { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */ 7354 + { } 7355 + }, 7356 + .chained = true, 7357 + .chain_id = ALC269_FIXUP_HEADSET_MIC 7358 + }, 7359 + [ALC289_FIXUP_ASUS_G401] = { 7360 + .type = HDA_FIXUP_PINS, 7361 + .v.pins = (const struct hda_pintbl[]) { 7362 + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ 7363 + { } 7364 + }, 7365 + }, 7366 + [ALC256_FIXUP_ACER_MIC_NO_PRESENCE] = { 7367 + .type = HDA_FIXUP_PINS, 7368 + .v.pins = (const struct hda_pintbl[]) { 7369 + { 0x19, 0x02a11120 }, /* use as headset mic, without its own jack detect */ 7370 + { } 7371 + }, 7372 + .chained = true, 7373 + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE 7374 + }, 7337 7375 }; 7338 7376 7339 7377 static const struct snd_pci_quirk alc269_fixup_tbl[] = { ··· 7394 7342 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), 7395 7343 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 7396 7344 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), 7345 + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), 7397 7346 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), 7398 7347 SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), 7399 7348 SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), 7400 7349 SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK), 7350 + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), 7351 + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE), 7401 7352 SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), 7402 7353 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), 7403 7354 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), 7404 7355 SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), 7405 7356 SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), 7406 7357 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), 7358 + SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), 7407 7359 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 7408 7360 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), 7409 7361 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), ··· 7551 7495 SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), 7552 7496 SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), 7553 7497 SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), 7498 + SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE), 7554 7499 SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), 7555 7500 SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), 7556 7501 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), ··· 7561 7504 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 7562 7505 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 7563 7506 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 7507 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401), 7564 7508 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), 7565 7509 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), 7566 7510 SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), ··· 7581 7523 SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), 7582 7524 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), 7583 7525 SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), 7526 + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK), 7584 7527 SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), 7585 7528 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), 7586 7529 SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), 7587 7530 SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), 7588 7531 SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), 7532 + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), 7589 7533 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 7590 7534 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 7591 7535 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), ··· 7631 7571 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 7632 7572 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 7633 7573 SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 7634 - SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7635 - SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7574 + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7636 7575 SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7637 7576 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 7638 7577 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+1 -3
sound/soc/amd/raven/pci-acp3x.c
··· 232 232 } 233 233 pm_runtime_set_autosuspend_delay(&pci->dev, 2000); 234 234 pm_runtime_use_autosuspend(&pci->dev); 235 - pm_runtime_set_active(&pci->dev); 236 235 pm_runtime_put_noidle(&pci->dev); 237 - pm_runtime_enable(&pci->dev); 238 236 pm_runtime_allow(&pci->dev); 239 237 return 0; 240 238 ··· 301 303 ret = acp3x_deinit(adata->acp3x_base); 302 304 if (ret) 303 305 dev_err(&pci->dev, "ACP de-init failed\n"); 304 - pm_runtime_disable(&pci->dev); 306 + pm_runtime_forbid(&pci->dev); 305 307 pm_runtime_get_noresume(&pci->dev); 306 308 pci_disable_msi(pci); 307 309 pci_release_regions(pci);
+4 -3
sound/soc/amd/renoir/Makefile
··· 2 2 # Renoir platform Support 3 3 snd-rn-pci-acp3x-objs := rn-pci-acp3x.o 4 4 snd-acp3x-pdm-dma-objs := acp3x-pdm-dma.o 5 - obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-rn-pci-acp3x.o 6 - obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-acp3x-pdm-dma.o 7 - obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH) += acp3x-rn.o 5 + snd-acp3x-rn-objs := acp3x-rn.o 6 + obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-rn-pci-acp3x.o 7 + obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-acp3x-pdm-dma.o 8 + obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH) += snd-acp3x-rn.o
-8
sound/soc/codecs/max98373.c
··· 779 779 regmap_write(max98373->regmap, 780 780 MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2, 781 781 0x1); 782 - /* Set inital volume (0dB) */ 783 - regmap_write(max98373->regmap, 784 - MAX98373_R203D_AMP_DIG_VOL_CTRL, 785 - 0x00); 786 - regmap_write(max98373->regmap, 787 - MAX98373_R203E_AMP_PATH_GAIN, 788 - 0x00); 789 782 /* Enable DC blocker */ 790 783 regmap_write(max98373->regmap, 791 784 MAX98373_R203F_AMP_DSP_CFG, ··· 862 869 .num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets), 863 870 .dapm_routes = max98373_audio_map, 864 871 .num_dapm_routes = ARRAY_SIZE(max98373_audio_map), 865 - .idle_bias_on = 1, 866 872 .use_pmdown_time = 1, 867 873 .endianness = 1, 868 874 .non_legacy_dai_naming = 1,
+4 -4
sound/soc/codecs/rt286.c
··· 272 272 regmap_read(rt286->regmap, RT286_GET_MIC1_SENSE, &buf); 273 273 *mic = buf & 0x80000000; 274 274 } 275 - if (!*mic) { 275 + 276 + if (!*hp) { 276 277 snd_soc_dapm_disable_pin(dapm, "HV"); 277 278 snd_soc_dapm_disable_pin(dapm, "VREF"); 278 - } 279 - if (!*hp) 280 279 snd_soc_dapm_disable_pin(dapm, "LDO1"); 281 - snd_soc_dapm_sync(dapm); 280 + snd_soc_dapm_sync(dapm); 281 + } 282 282 283 283 return 0; 284 284 }
+58 -17
sound/soc/codecs/rt5670.c
··· 31 31 #include "rt5670.h" 32 32 #include "rt5670-dsp.h" 33 33 34 - #define RT5670_DEV_GPIO BIT(0) 35 - #define RT5670_IN2_DIFF BIT(1) 36 - #define RT5670_DMIC_EN BIT(2) 37 - #define RT5670_DMIC1_IN2P BIT(3) 38 - #define RT5670_DMIC1_GPIO6 BIT(4) 39 - #define RT5670_DMIC1_GPIO7 BIT(5) 40 - #define RT5670_DMIC2_INR BIT(6) 41 - #define RT5670_DMIC2_GPIO8 BIT(7) 42 - #define RT5670_DMIC3_GPIO5 BIT(8) 43 - #define RT5670_JD_MODE1 BIT(9) 44 - #define RT5670_JD_MODE2 BIT(10) 45 - #define RT5670_JD_MODE3 BIT(11) 34 + #define RT5670_DEV_GPIO BIT(0) 35 + #define RT5670_IN2_DIFF BIT(1) 36 + #define RT5670_DMIC_EN BIT(2) 37 + #define RT5670_DMIC1_IN2P BIT(3) 38 + #define RT5670_DMIC1_GPIO6 BIT(4) 39 + #define RT5670_DMIC1_GPIO7 BIT(5) 40 + #define RT5670_DMIC2_INR BIT(6) 41 + #define RT5670_DMIC2_GPIO8 BIT(7) 42 + #define RT5670_DMIC3_GPIO5 BIT(8) 43 + #define RT5670_JD_MODE1 BIT(9) 44 + #define RT5670_JD_MODE2 BIT(10) 45 + #define RT5670_JD_MODE3 BIT(11) 46 + #define RT5670_GPIO1_IS_EXT_SPK_EN BIT(12) 46 47 47 48 static unsigned long rt5670_quirk; 48 49 static unsigned int quirk_override; ··· 603 602 EXPORT_SYMBOL_GPL(rt5670_set_jack_detect); 604 603 605 604 static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); 606 - static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); 605 + static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); 607 606 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); 608 - static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); 607 + static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); 609 608 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); 610 609 611 610 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ ··· 1448 1447 return 0; 1449 1448 } 1450 1449 1450 + static int rt5670_spk_event(struct snd_soc_dapm_widget *w, 1451 + struct snd_kcontrol *kcontrol, int event) 1452 + { 1453 + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); 1454 + struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); 1455 + 1456 + if (!rt5670->pdata.gpio1_is_ext_spk_en) 1457 + return 0; 1458 + 1459 + switch (event) { 1460 + case SND_SOC_DAPM_POST_PMU: 1461 + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, 1462 + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_HI); 1463 + break; 1464 + 1465 + case SND_SOC_DAPM_PRE_PMD: 1466 + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, 1467 + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_LO); 1468 + break; 1469 + 1470 + default: 1471 + return 0; 1472 + } 1473 + 1474 + return 0; 1475 + } 1476 + 1451 1477 static int rt5670_bst1_event(struct snd_soc_dapm_widget *w, 1452 1478 struct snd_kcontrol *kcontrol, int event) 1453 1479 { ··· 1888 1860 }; 1889 1861 1890 1862 static const struct snd_soc_dapm_widget rt5672_specific_dapm_widgets[] = { 1891 - SND_SOC_DAPM_PGA("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0), 1863 + SND_SOC_DAPM_PGA_E("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0, 1864 + rt5670_spk_event, SND_SOC_DAPM_PRE_PMD | 1865 + SND_SOC_DAPM_POST_PMU), 1892 1866 SND_SOC_DAPM_OUTPUT("SPOLP"), 1893 1867 SND_SOC_DAPM_OUTPUT("SPOLN"), 1894 1868 SND_SOC_DAPM_OUTPUT("SPORP"), ··· 2887 2857 }, 2888 2858 { 2889 2859 .callback = rt5670_quirk_cb, 2890 - .ident = "Lenovo Thinkpad Tablet 10", 2860 + .ident = "Lenovo Miix 2 10", 2891 2861 .matches = { 2892 2862 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 2893 2863 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"), 2894 2864 }, 2895 2865 .driver_data = (unsigned long *)(RT5670_DMIC_EN | 2896 2866 RT5670_DMIC1_IN2P | 2897 - RT5670_DEV_GPIO | 2867 + RT5670_GPIO1_IS_EXT_SPK_EN | 2898 2868 RT5670_JD_MODE2), 2899 2869 }, 2900 2870 { ··· 2953 2923 if (rt5670_quirk & RT5670_DEV_GPIO) { 2954 2924 rt5670->pdata.dev_gpio = true; 2955 2925 dev_info(&i2c->dev, "quirk dev_gpio\n"); 2926 + } 2927 + if (rt5670_quirk & RT5670_GPIO1_IS_EXT_SPK_EN) { 2928 + rt5670->pdata.gpio1_is_ext_spk_en = true; 2929 + dev_info(&i2c->dev, "quirk GPIO1 is external speaker enable\n"); 2956 2930 } 2957 2931 if (rt5670_quirk & RT5670_IN2_DIFF) { 2958 2932 rt5670->pdata.in2_diff = true; ··· 3053 3019 /* for irq */ 3054 3020 regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1, 3055 3021 RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_IRQ); 3022 + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, 3023 + RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); 3024 + } 3025 + 3026 + if (rt5670->pdata.gpio1_is_ext_spk_en) { 3027 + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1, 3028 + RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_GPIO1); 3056 3029 regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, 3057 3030 RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); 3058 3031 }
+1 -1
sound/soc/codecs/rt5670.h
··· 757 757 #define RT5670_PWR_VREF2_BIT 4 758 758 #define RT5670_PWR_FV2 (0x1 << 3) 759 759 #define RT5670_PWR_FV2_BIT 3 760 - #define RT5670_LDO_SEL_MASK (0x3) 760 + #define RT5670_LDO_SEL_MASK (0x7) 761 761 #define RT5670_LDO_SEL_SFT 0 762 762 763 763 /* Power Management for Analog 2 (0x64) */
+37 -18
sound/soc/codecs/rt5682.c
··· 932 932 RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2); 933 933 snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3, 934 934 RT5682_PWR_CBJ, RT5682_PWR_CBJ); 935 - 935 + snd_soc_component_update_bits(component, 936 + RT5682_HP_CHARGE_PUMP_1, 937 + RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0); 936 938 snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, 937 939 RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH); 938 940 ··· 958 956 rt5682->jack_type = SND_JACK_HEADPHONE; 959 957 break; 960 958 } 959 + 960 + snd_soc_component_update_bits(component, 961 + RT5682_HP_CHARGE_PUMP_1, 962 + RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 963 + RT5682_OSW_L_EN | RT5682_OSW_R_EN); 961 964 } else { 962 965 rt5682_enable_push_button_irq(component, false); 963 966 snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, 964 967 RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW); 965 - if (snd_soc_dapm_get_pin_status(dapm, "MICBIAS")) 968 + if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS")) 969 + snd_soc_component_update_bits(component, 970 + RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0); 971 + if (!snd_soc_dapm_get_pin_status(dapm, "Vref2")) 966 972 snd_soc_component_update_bits(component, 967 973 RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0); 968 - else 969 - snd_soc_component_update_bits(component, 970 - RT5682_PWR_ANLG_1, 971 - RT5682_PWR_VREF2 | RT5682_PWR_MB, 0); 972 974 snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3, 973 975 RT5682_PWR_CBJ, 0); 974 976 ··· 991 985 992 986 rt5682->hs_jack = hs_jack; 993 987 994 - if (!rt5682->is_sdw) { 995 - if (!hs_jack) { 996 - regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2, 997 - RT5682_JD1_EN_MASK, RT5682_JD1_DIS); 998 - regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL, 999 - RT5682_POW_JDH | RT5682_POW_JDL, 0); 1000 - cancel_delayed_work_sync(&rt5682->jack_detect_work); 1001 - return 0; 1002 - } 988 + if (!hs_jack) { 989 + regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2, 990 + RT5682_JD1_EN_MASK, RT5682_JD1_DIS); 991 + regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL, 992 + RT5682_POW_JDH | RT5682_POW_JDL, 0); 993 + cancel_delayed_work_sync(&rt5682->jack_detect_work); 1003 994 995 + return 0; 996 + } 997 + 998 + if (!rt5682->is_sdw) { 1004 999 switch (rt5682->pdata.jd_src) { 1005 1000 case RT5682_JD1: 1006 1001 snd_soc_component_update_bits(component, ··· 1082 1075 /* jack was out, report jack type */ 1083 1076 rt5682->jack_type = 1084 1077 rt5682_headset_detect(rt5682->component, 1); 1085 - } else { 1078 + } else if ((rt5682->jack_type & SND_JACK_HEADSET) == 1079 + SND_JACK_HEADSET) { 1086 1080 /* jack is already in, report button event */ 1087 1081 rt5682->jack_type = SND_JACK_HEADSET; 1088 1082 btn_type = rt5682_button_detect(rt5682->component); ··· 1609 1601 0, set_filter_clk, SND_SOC_DAPM_PRE_PMU), 1610 1602 SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0, 1611 1603 rt5682_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), 1612 - SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0, 1613 - NULL, 0), 1604 + SND_SOC_DAPM_SUPPLY("Vref2", SND_SOC_NOPM, 0, 0, NULL, 0), 1614 1605 SND_SOC_DAPM_SUPPLY("MICBIAS", SND_SOC_NOPM, 0, 0, NULL, 0), 1615 1606 1616 1607 /* ASRC */ ··· 2492 2485 snd_soc_dapm_force_enable_pin_unlocked(dapm, "MICBIAS"); 2493 2486 snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, 2494 2487 RT5682_PWR_MB, RT5682_PWR_MB); 2488 + 2489 + snd_soc_dapm_force_enable_pin_unlocked(dapm, "Vref2"); 2490 + snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, 2491 + RT5682_PWR_VREF2 | RT5682_PWR_FV2, 2492 + RT5682_PWR_VREF2); 2493 + usleep_range(55000, 60000); 2494 + snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, 2495 + RT5682_PWR_FV2, RT5682_PWR_FV2); 2496 + 2495 2497 snd_soc_dapm_force_enable_pin_unlocked(dapm, "I2S1"); 2496 2498 snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2F"); 2497 2499 snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2B"); ··· 2526 2510 snd_soc_dapm_mutex_lock(dapm); 2527 2511 2528 2512 snd_soc_dapm_disable_pin_unlocked(dapm, "MICBIAS"); 2513 + snd_soc_dapm_disable_pin_unlocked(dapm, "Vref2"); 2529 2514 if (!rt5682->jack_type) 2530 2515 snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, 2516 + RT5682_PWR_VREF2 | RT5682_PWR_FV2 | 2531 2517 RT5682_PWR_MB, 0); 2518 + 2532 2519 snd_soc_dapm_disable_pin_unlocked(dapm, "I2S1"); 2533 2520 snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2F"); 2534 2521 snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2B");
+5 -1
sound/soc/codecs/wm8974.c
··· 186 186 187 187 /* Boost mixer */ 188 188 static const struct snd_kcontrol_new wm8974_boost_mixer[] = { 189 - SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0), 189 + SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1), 190 190 }; 191 191 192 192 /* Input PGA */ ··· 474 474 iface |= 0x0008; 475 475 break; 476 476 case SND_SOC_DAIFMT_DSP_A: 477 + if ((fmt & SND_SOC_DAIFMT_INV_MASK) == SND_SOC_DAIFMT_IB_IF || 478 + (fmt & SND_SOC_DAIFMT_INV_MASK) == SND_SOC_DAIFMT_NB_IF) { 479 + return -EINVAL; 480 + } 477 481 iface |= 0x00018; 478 482 break; 479 483 default:
+14 -9
sound/soc/fsl/fsl_mqs.c
··· 265 265 static int fsl_mqs_runtime_resume(struct device *dev) 266 266 { 267 267 struct fsl_mqs *mqs_priv = dev_get_drvdata(dev); 268 + int ret; 268 269 269 - if (mqs_priv->ipg) 270 - clk_prepare_enable(mqs_priv->ipg); 270 + ret = clk_prepare_enable(mqs_priv->ipg); 271 + if (ret) { 272 + dev_err(dev, "failed to enable ipg clock\n"); 273 + return ret; 274 + } 271 275 272 - if (mqs_priv->mclk) 273 - clk_prepare_enable(mqs_priv->mclk); 276 + ret = clk_prepare_enable(mqs_priv->mclk); 277 + if (ret) { 278 + dev_err(dev, "failed to enable mclk clock\n"); 279 + clk_disable_unprepare(mqs_priv->ipg); 280 + return ret; 281 + } 274 282 275 283 if (mqs_priv->use_gpr) 276 284 regmap_write(mqs_priv->regmap, IOMUXC_GPR2, ··· 300 292 regmap_read(mqs_priv->regmap, REG_MQS_CTRL, 301 293 &mqs_priv->reg_mqs_ctrl); 302 294 303 - if (mqs_priv->mclk) 304 - clk_disable_unprepare(mqs_priv->mclk); 305 - 306 - if (mqs_priv->ipg) 307 - clk_disable_unprepare(mqs_priv->ipg); 295 + clk_disable_unprepare(mqs_priv->mclk); 296 + clk_disable_unprepare(mqs_priv->ipg); 308 297 309 298 return 0; 310 299 }
+2 -2
sound/soc/generic/audio-graph-card.c
··· 317 317 if (ret < 0) 318 318 goto out_put_node; 319 319 320 - dai_link->dpcm_playback = 1; 321 - dai_link->dpcm_capture = 1; 320 + snd_soc_dai_link_set_capabilities(dai_link); 321 + 322 322 dai_link->ops = &graph_ops; 323 323 dai_link->init = asoc_simple_dai_init; 324 324
+2 -2
sound/soc/generic/simple-card.c
··· 231 231 if (ret < 0) 232 232 goto out_put_node; 233 233 234 - dai_link->dpcm_playback = 1; 235 - dai_link->dpcm_capture = 1; 234 + snd_soc_dai_link_set_capabilities(dai_link); 235 + 236 236 dai_link->ops = &simple_ops; 237 237 dai_link->init = asoc_simple_dai_init; 238 238
+1
sound/soc/intel/boards/bdw-rt5677.c
··· 354 354 { 355 355 .name = "Codec DSP", 356 356 .stream_name = "Wake on Voice", 357 + .capture_only = 1, 357 358 .ops = &bdw_rt5677_dsp_ops, 358 359 SND_SOC_DAILINK_REG(dsp), 359 360 },
+3 -1
sound/soc/intel/boards/bytcht_es8316.c
··· 543 543 544 544 if (cnt) { 545 545 ret = device_add_properties(codec_dev, props); 546 - if (ret) 546 + if (ret) { 547 + put_device(codec_dev); 547 548 return ret; 549 + } 548 550 } 549 551 550 552 devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios);
+11 -12
sound/soc/intel/boards/cht_bsw_rt5672.c
··· 253 253 params_set_format(params, SNDRV_PCM_FORMAT_S24_LE); 254 254 255 255 /* 256 - * Default mode for SSP configuration is TDM 4 slot 256 + * Default mode for SSP configuration is TDM 4 slot. One board/design, 257 + * the Lenovo Miix 2 10 uses not 1 but 2 codecs connected to SSP2. The 258 + * second piggy-backed, output-only codec is inside the keyboard-dock 259 + * (which has extra speakers). Unlike the main rt5672 codec, we cannot 260 + * configure this codec, it is hard coded to use 2 channel 24 bit I2S. 261 + * Since we only support 2 channels anyways, there is no need for TDM 262 + * on any cht-bsw-rt5672 designs. So we simply use I2S 2ch everywhere. 257 263 */ 258 - ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0), 259 - SND_SOC_DAIFMT_DSP_B | 260 - SND_SOC_DAIFMT_IB_NF | 264 + ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0), 265 + SND_SOC_DAIFMT_I2S | 266 + SND_SOC_DAIFMT_NB_NF | 261 267 SND_SOC_DAIFMT_CBS_CFS); 262 268 if (ret < 0) { 263 - dev_err(rtd->dev, "can't set format to TDM %d\n", ret); 264 - return ret; 265 - } 266 - 267 - /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */ 268 - ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(rtd, 0), 0xF, 0xF, 4, 24); 269 - if (ret < 0) { 270 - dev_err(rtd->dev, "can't set codec TDM slot %d\n", ret); 269 + dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret); 271 270 return ret; 272 271 } 273 272
+1 -1
sound/soc/qcom/Kconfig
··· 72 72 73 73 config SND_SOC_QDSP6 74 74 tristate "SoC ALSA audio driver for QDSP6" 75 - depends on QCOM_APR && HAS_DMA 75 + depends on QCOM_APR 76 76 select SND_SOC_QDSP6_COMMON 77 77 select SND_SOC_QDSP6_CORE 78 78 select SND_SOC_QDSP6_AFE
+13
sound/soc/rockchip/rk3399_gru_sound.c
··· 219 219 return 0; 220 220 } 221 221 222 + static int rockchip_sound_startup(struct snd_pcm_substream *substream) 223 + { 224 + struct snd_pcm_runtime *runtime = substream->runtime; 225 + 226 + runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; 227 + return snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, 228 + 8000, 96000); 229 + } 230 + 222 231 static const struct snd_soc_ops rockchip_sound_max98357a_ops = { 232 + .startup = rockchip_sound_startup, 223 233 .hw_params = rockchip_sound_max98357a_hw_params, 224 234 }; 225 235 226 236 static const struct snd_soc_ops rockchip_sound_rt5514_ops = { 237 + .startup = rockchip_sound_startup, 227 238 .hw_params = rockchip_sound_rt5514_hw_params, 228 239 }; 229 240 230 241 static const struct snd_soc_ops rockchip_sound_da7219_ops = { 242 + .startup = rockchip_sound_startup, 231 243 .hw_params = rockchip_sound_da7219_hw_params, 232 244 }; 233 245 234 246 static const struct snd_soc_ops rockchip_sound_dmic_ops = { 247 + .startup = rockchip_sound_startup, 235 248 .hw_params = rockchip_sound_dmic_hw_params, 236 249 }; 237 250
+27
sound/soc/soc-core.c
··· 2573 2573 EXPORT_SYMBOL_GPL(snd_soc_register_component); 2574 2574 2575 2575 /** 2576 + * snd_soc_unregister_component_by_driver - Unregister component using a given driver 2577 + * from the ASoC core 2578 + * 2579 + * @dev: The device to unregister 2580 + * @component_driver: The component driver to unregister 2581 + */ 2582 + void snd_soc_unregister_component_by_driver(struct device *dev, 2583 + const struct snd_soc_component_driver *component_driver) 2584 + { 2585 + struct snd_soc_component *component; 2586 + 2587 + if (!component_driver) 2588 + return; 2589 + 2590 + mutex_lock(&client_mutex); 2591 + component = snd_soc_lookup_component_nolocked(dev, component_driver->name); 2592 + if (!component) 2593 + goto out; 2594 + 2595 + snd_soc_del_component_unlocked(component); 2596 + 2597 + out: 2598 + mutex_unlock(&client_mutex); 2599 + } 2600 + EXPORT_SYMBOL_GPL(snd_soc_unregister_component_by_driver); 2601 + 2602 + /** 2576 2603 * snd_soc_unregister_component - Unregister all related component 2577 2604 * from the ASoC core 2578 2605 *
+38
sound/soc/soc-dai.c
··· 391 391 return stream->channels_min; 392 392 } 393 393 394 + /* 395 + * snd_soc_dai_link_set_capabilities() - set dai_link properties based on its DAIs 396 + */ 397 + void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link) 398 + { 399 + struct snd_soc_dai_link_component *cpu; 400 + struct snd_soc_dai_link_component *codec; 401 + struct snd_soc_dai *dai; 402 + bool supported[SNDRV_PCM_STREAM_LAST + 1]; 403 + int direction; 404 + int i; 405 + 406 + for_each_pcm_streams(direction) { 407 + supported[direction] = true; 408 + 409 + for_each_link_cpus(dai_link, i, cpu) { 410 + dai = snd_soc_find_dai(cpu); 411 + if (!dai || !snd_soc_dai_stream_valid(dai, direction)) { 412 + supported[direction] = false; 413 + break; 414 + } 415 + } 416 + if (!supported[direction]) 417 + continue; 418 + for_each_link_codecs(dai_link, i, codec) { 419 + dai = snd_soc_find_dai(codec); 420 + if (!dai || !snd_soc_dai_stream_valid(dai, direction)) { 421 + supported[direction] = false; 422 + break; 423 + } 424 + } 425 + } 426 + 427 + dai_link->dpcm_playback = supported[SNDRV_PCM_STREAM_PLAYBACK]; 428 + dai_link->dpcm_capture = supported[SNDRV_PCM_STREAM_CAPTURE]; 429 + } 430 + EXPORT_SYMBOL_GPL(snd_soc_dai_link_set_capabilities); 431 + 394 432 void snd_soc_dai_action(struct snd_soc_dai *dai, 395 433 int stream, int action) 396 434 {
+5 -3
sound/soc/soc-devres.c
··· 48 48 49 49 static void devm_component_release(struct device *dev, void *res) 50 50 { 51 - snd_soc_unregister_component(*(struct device **)res); 51 + const struct snd_soc_component_driver **cmpnt_drv = res; 52 + 53 + snd_soc_unregister_component_by_driver(dev, *cmpnt_drv); 52 54 } 53 55 54 56 /** ··· 67 65 const struct snd_soc_component_driver *cmpnt_drv, 68 66 struct snd_soc_dai_driver *dai_drv, int num_dai) 69 67 { 70 - struct device **ptr; 68 + const struct snd_soc_component_driver **ptr; 71 69 int ret; 72 70 73 71 ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL); ··· 76 74 77 75 ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai); 78 76 if (ret == 0) { 79 - *ptr = dev; 77 + *ptr = cmpnt_drv; 80 78 devres_add(dev, ptr); 81 79 } else { 82 80 devres_free(ptr);
+1 -1
sound/soc/soc-generic-dmaengine-pcm.c
··· 478 478 479 479 pcm = soc_component_to_pcm(component); 480 480 481 - snd_soc_unregister_component(dev); 481 + snd_soc_unregister_component_by_driver(dev, component->driver); 482 482 dmaengine_pcm_release_chan(pcm); 483 483 kfree(pcm); 484 484 }
+18 -6
sound/soc/soc-topology.c
··· 1261 1261 list_add(&routes[i]->dobj.list, &tplg->comp->dobj_list); 1262 1262 1263 1263 ret = soc_tplg_add_route(tplg, routes[i]); 1264 - if (ret < 0) 1264 + if (ret < 0) { 1265 + /* 1266 + * this route was added to the list, it will 1267 + * be freed in remove_route() so increment the 1268 + * counter to skip it in the error handling 1269 + * below. 1270 + */ 1271 + i++; 1265 1272 break; 1273 + } 1266 1274 1267 1275 /* add route, but keep going if some fail */ 1268 1276 snd_soc_dapm_add_routes(dapm, routes[i], 1); 1269 1277 } 1270 1278 1271 - /* free memory allocated for all dapm routes in case of error */ 1272 - if (ret < 0) 1273 - for (i = 0; i < count ; i++) 1274 - kfree(routes[i]); 1279 + /* 1280 + * free memory allocated for all dapm routes not added to the 1281 + * list in case of error 1282 + */ 1283 + if (ret < 0) { 1284 + while (i < count) 1285 + kfree(routes[i++]); 1286 + } 1275 1287 1276 1288 /* 1277 1289 * free pointer to array of dapm routes as this is no longer needed. ··· 1371 1359 if (err < 0) { 1372 1360 dev_err(tplg->dev, "ASoC: failed to init %s\n", 1373 1361 mc->hdr.name); 1374 - soc_tplg_free_tlv(tplg, &kc[i]); 1375 1362 goto err_sm; 1376 1363 } 1377 1364 } ··· 1378 1367 1379 1368 err_sm: 1380 1369 for (; i >= 0; i--) { 1370 + soc_tplg_free_tlv(tplg, &kc[i]); 1381 1371 sm = (struct soc_mixer_control *)kc[i].private_value; 1382 1372 kfree(sm); 1383 1373 kfree(kc[i].name);
+5 -5
sound/soc/sof/core.c
··· 345 345 struct snd_sof_pdata *pdata = sdev->pdata; 346 346 int ret; 347 347 348 - ret = snd_sof_dsp_power_down_notify(sdev); 349 - if (ret < 0) 350 - dev_warn(dev, "error: %d failed to prepare DSP for device removal", 351 - ret); 352 - 353 348 if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) 354 349 cancel_work_sync(&sdev->probe_work); 355 350 356 351 if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) { 352 + ret = snd_sof_dsp_power_down_notify(sdev); 353 + if (ret < 0) 354 + dev_warn(dev, "error: %d failed to prepare DSP for device removal", 355 + ret); 356 + 357 357 snd_sof_fw_unload(sdev); 358 358 snd_sof_ipc_free(sdev); 359 359 snd_sof_free_debug(sdev);
+8
sound/soc/sof/imx/imx8.c
··· 375 375 static struct snd_soc_dai_driver imx8_dai[] = { 376 376 { 377 377 .name = "esai-port", 378 + .playback = { 379 + .channels_min = 1, 380 + .channels_max = 8, 381 + }, 382 + .capture = { 383 + .channels_min = 1, 384 + .channels_max = 8, 385 + }, 378 386 }, 379 387 }; 380 388
+8
sound/soc/sof/imx/imx8m.c
··· 240 240 static struct snd_soc_dai_driver imx8m_dai[] = { 241 241 { 242 242 .name = "sai-port", 243 + .playback = { 244 + .channels_min = 1, 245 + .channels_max = 32, 246 + }, 247 + .capture = { 248 + .channels_min = 1, 249 + .channels_max = 32, 250 + }, 243 251 }, 244 252 }; 245 253
+3 -3
sound/usb/card.h
··· 84 84 dma_addr_t sync_dma; /* DMA address of syncbuf */ 85 85 86 86 unsigned int pipe; /* the data i/o pipe */ 87 - unsigned int framesize[2]; /* small/large frame sizes in samples */ 88 - unsigned int sample_rem; /* remainder from division fs/fps */ 87 + unsigned int packsize[2]; /* small/large packet sizes in samples */ 88 + unsigned int sample_rem; /* remainder from division fs/pps */ 89 89 unsigned int sample_accum; /* sample accumulator */ 90 - unsigned int fps; /* frames per second */ 90 + unsigned int pps; /* packets per second */ 91 91 unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */ 92 92 unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */ 93 93 int freqshift; /* how much to shift the feedback value to get Q16.16 */
+9 -9
sound/usb/endpoint.c
··· 159 159 return ep->maxframesize; 160 160 161 161 ep->sample_accum += ep->sample_rem; 162 - if (ep->sample_accum >= ep->fps) { 163 - ep->sample_accum -= ep->fps; 164 - ret = ep->framesize[1]; 162 + if (ep->sample_accum >= ep->pps) { 163 + ep->sample_accum -= ep->pps; 164 + ret = ep->packsize[1]; 165 165 } else { 166 - ret = ep->framesize[0]; 166 + ret = ep->packsize[0]; 167 167 } 168 168 169 169 return ret; ··· 1088 1088 1089 1089 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) { 1090 1090 ep->freqn = get_usb_full_speed_rate(rate); 1091 - ep->fps = 1000; 1091 + ep->pps = 1000 >> ep->datainterval; 1092 1092 } else { 1093 1093 ep->freqn = get_usb_high_speed_rate(rate); 1094 - ep->fps = 8000; 1094 + ep->pps = 8000 >> ep->datainterval; 1095 1095 } 1096 1096 1097 - ep->sample_rem = rate % ep->fps; 1098 - ep->framesize[0] = rate / ep->fps; 1099 - ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps; 1097 + ep->sample_rem = rate % ep->pps; 1098 + ep->packsize[0] = rate / ep->pps; 1099 + ep->packsize[1] = (rate + (ep->pps - 1)) / ep->pps; 1100 1100 1101 1101 /* calculate the frequency in 16.16 format */ 1102 1102 ep->freqm = ep->freqn;
+2
sound/usb/line6/capture.c
··· 286 286 urb->interval = LINE6_ISO_INTERVAL; 287 287 urb->error_count = 0; 288 288 urb->complete = audio_in_callback; 289 + if (usb_urb_ep_type_check(urb)) 290 + return -EINVAL; 289 291 } 290 292 291 293 return 0;
+1 -1
sound/usb/line6/driver.c
··· 840 840 if (WARN_ON(usbdev != line6->usbdev)) 841 841 return; 842 842 843 - cancel_delayed_work(&line6->startup_work); 843 + cancel_delayed_work_sync(&line6->startup_work); 844 844 845 845 if (line6->urb_listen != NULL) 846 846 line6_stop_listen(line6);
+2
sound/usb/line6/playback.c
··· 431 431 urb->interval = LINE6_ISO_INTERVAL; 432 432 urb->error_count = 0; 433 433 urb->complete = audio_out_callback; 434 + if (usb_urb_ep_type_check(urb)) 435 + return -EINVAL; 434 436 } 435 437 436 438 return 0;
+12 -5
sound/usb/midi.c
··· 1499 1499 spin_unlock_irq(&umidi->disc_lock); 1500 1500 up_write(&umidi->disc_rwsem); 1501 1501 1502 + del_timer_sync(&umidi->error_timer); 1503 + 1502 1504 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { 1503 1505 struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; 1504 1506 if (ep->out) ··· 1527 1525 ep->in = NULL; 1528 1526 } 1529 1527 } 1530 - del_timer_sync(&umidi->error_timer); 1531 1528 } 1532 1529 EXPORT_SYMBOL(snd_usbmidi_disconnect); 1533 1530 ··· 2302 2301 } 2303 2302 EXPORT_SYMBOL(snd_usbmidi_input_stop); 2304 2303 2305 - static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep) 2304 + static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi, 2305 + struct snd_usb_midi_in_endpoint *ep) 2306 2306 { 2307 2307 unsigned int i; 2308 + unsigned long flags; 2308 2309 2309 2310 if (!ep) 2310 2311 return; 2311 2312 for (i = 0; i < INPUT_URBS; ++i) { 2312 2313 struct urb *urb = ep->urbs[i]; 2313 - urb->dev = ep->umidi->dev; 2314 - snd_usbmidi_submit_urb(urb, GFP_KERNEL); 2314 + spin_lock_irqsave(&umidi->disc_lock, flags); 2315 + if (!atomic_read(&urb->use_count)) { 2316 + urb->dev = ep->umidi->dev; 2317 + snd_usbmidi_submit_urb(urb, GFP_ATOMIC); 2318 + } 2319 + spin_unlock_irqrestore(&umidi->disc_lock, flags); 2315 2320 } 2316 2321 } 2317 2322 ··· 2333 2326 if (umidi->input_running || !umidi->opened[1]) 2334 2327 return; 2335 2328 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) 2336 - snd_usbmidi_input_start_ep(umidi->endpoints[i].in); 2329 + snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in); 2337 2330 umidi->input_running = 1; 2338 2331 } 2339 2332 EXPORT_SYMBOL(snd_usbmidi_input_start);
+1
sound/usb/pcm.c
··· 368 368 goto add_sync_ep_from_ifnum; 369 369 case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ 370 370 case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ 371 + case USB_ID(0x0d9a, 0x00df): /* RTX6001 */ 371 372 ep = 0x81; 372 373 ifnum = 2; 373 374 goto add_sync_ep_from_ifnum;
+52
sound/usb/quirks-table.h
··· 3633 3633 } 3634 3634 }, 3635 3635 3636 + /* 3637 + * MacroSilicon MS2109 based HDMI capture cards 3638 + * 3639 + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. 3640 + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if 3641 + * they pretend to be 96kHz mono as a workaround for stereo being broken 3642 + * by that... 3643 + * 3644 + * They also have swapped L-R channels, but that's for userspace to deal 3645 + * with. 3646 + */ 3647 + { 3648 + USB_DEVICE(0x534d, 0x2109), 3649 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { 3650 + .vendor_name = "MacroSilicon", 3651 + .product_name = "MS2109", 3652 + .ifnum = QUIRK_ANY_INTERFACE, 3653 + .type = QUIRK_COMPOSITE, 3654 + .data = &(const struct snd_usb_audio_quirk[]) { 3655 + { 3656 + .ifnum = 2, 3657 + .type = QUIRK_AUDIO_ALIGN_TRANSFER, 3658 + }, 3659 + { 3660 + .ifnum = 2, 3661 + .type = QUIRK_AUDIO_STANDARD_MIXER, 3662 + }, 3663 + { 3664 + .ifnum = 3, 3665 + .type = QUIRK_AUDIO_FIXED_ENDPOINT, 3666 + .data = &(const struct audioformat) { 3667 + .formats = SNDRV_PCM_FMTBIT_S16_LE, 3668 + .channels = 2, 3669 + .iface = 3, 3670 + .altsetting = 1, 3671 + .altset_idx = 1, 3672 + .attributes = 0, 3673 + .endpoint = 0x82, 3674 + .ep_attr = USB_ENDPOINT_XFER_ISOC | 3675 + USB_ENDPOINT_SYNC_ASYNC, 3676 + .rates = SNDRV_PCM_RATE_CONTINUOUS, 3677 + .rate_min = 48000, 3678 + .rate_max = 48000, 3679 + } 3680 + }, 3681 + { 3682 + .ifnum = -1 3683 + } 3684 + } 3685 + } 3686 + }, 3687 + 3636 3688 #undef USB_DEVICE_VENDOR_SPEC
+3 -2
tools/arch/x86/include/uapi/asm/kvm.h
··· 408 408 }; 409 409 410 410 struct kvm_vmx_nested_state_hdr { 411 - __u32 flags; 412 411 __u64 vmxon_pa; 413 412 __u64 vmcs12_pa; 414 - __u64 preemption_timer_deadline; 415 413 416 414 struct { 417 415 __u16 flags; 418 416 } smm; 417 + 418 + __u32 flags; 419 + __u64 preemption_timer_deadline; 419 420 }; 420 421 421 422 struct kvm_svm_nested_state_data {
+4
tools/arch/x86/lib/memcpy_64.S
··· 8 8 #include <asm/alternative-asm.h> 9 9 #include <asm/export.h> 10 10 11 + .pushsection .noinstr.text, "ax" 12 + 11 13 /* 12 14 * We build a jump to memcpy_orig by default which gets NOPped out on 13 15 * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which ··· 185 183 .Lend: 186 184 retq 187 185 SYM_FUNC_END(memcpy_orig) 186 + 187 + .popsection 188 188 189 189 #ifndef CONFIG_UML 190 190
+1 -2
tools/include/linux/bits.h
··· 18 18 * position @h. For example 19 19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. 20 20 */ 21 - #if !defined(__ASSEMBLY__) && \ 22 - (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) 21 + #if !defined(__ASSEMBLY__) 23 22 #include <linux/build_bug.h> 24 23 #define GENMASK_INPUT_CHECK(h, l) \ 25 24 (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+21 -20
tools/include/uapi/linux/bpf.h
··· 3171 3171 * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) 3172 3172 * Description 3173 3173 * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 3174 - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of 3175 - * new data availability is sent. 3176 - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of 3177 - * new data availability is sent unconditionally. 3174 + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3175 + * of new data availability is sent. 3176 + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3177 + * of new data availability is sent unconditionally. 3178 3178 * Return 3179 - * 0, on success; 3180 - * < 0, on error. 3179 + * 0 on success, or a negative error in case of failure. 3181 3180 * 3182 3181 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) 3183 3182 * Description ··· 3188 3189 * void bpf_ringbuf_submit(void *data, u64 flags) 3189 3190 * Description 3190 3191 * Submit reserved ring buffer sample, pointed to by *data*. 3191 - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of 3192 - * new data availability is sent. 3193 - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of 3194 - * new data availability is sent unconditionally. 3192 + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3193 + * of new data availability is sent. 3194 + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3195 + * of new data availability is sent unconditionally. 3195 3196 * Return 3196 3197 * Nothing. Always succeeds. 3197 3198 * 3198 3199 * void bpf_ringbuf_discard(void *data, u64 flags) 3199 3200 * Description 3200 3201 * Discard reserved ring buffer sample, pointed to by *data*. 3201 - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of 3202 - * new data availability is sent. 3203 - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of 3204 - * new data availability is sent unconditionally. 3202 + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3203 + * of new data availability is sent. 3204 + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3205 + * of new data availability is sent unconditionally. 3205 3206 * Return 3206 3207 * Nothing. Always succeeds. 3207 3208 * ··· 3209 3210 * Description 3210 3211 * Query various characteristics of provided ring buffer. What 3211 3212 * exactly is queries is determined by *flags*: 3212 - * - BPF_RB_AVAIL_DATA - amount of data not yet consumed; 3213 - * - BPF_RB_RING_SIZE - the size of ring buffer; 3214 - * - BPF_RB_CONS_POS - consumer position (can wrap around); 3215 - * - BPF_RB_PROD_POS - producer(s) position (can wrap around); 3216 - * Data returned is just a momentary snapshots of actual values 3213 + * 3214 + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 3215 + * * **BPF_RB_RING_SIZE**: The size of ring buffer. 3216 + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 3217 + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 3218 + * 3219 + * Data returned is just a momentary snapshot of actual values 3217 3220 * and could be inaccurate, so this facility should be used to 3218 3221 * power heuristics and for reporting, not to make 100% correct 3219 3222 * calculation. 3220 3223 * Return 3221 - * Requested value, or 0, if flags are not recognized. 3224 + * Requested value, or 0, if *flags* are not recognized. 3222 3225 * 3223 3226 * int bpf_csum_level(struct sk_buff *skb, u64 level) 3224 3227 * Description
+2
tools/lib/bpf/bpf.h
··· 233 233 LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, 234 234 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, 235 235 __u64 *probe_offset, __u64 *probe_addr); 236 + 237 + enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ 236 238 LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); 237 239 238 240 #ifdef __cplusplus
+8 -4
tools/lib/bpf/hashmap.h
··· 11 11 #include <stdbool.h> 12 12 #include <stddef.h> 13 13 #include <limits.h> 14 - #ifndef __WORDSIZE 15 - #define __WORDSIZE (__SIZEOF_LONG__ * 8) 16 - #endif 17 14 18 15 static inline size_t hash_bits(size_t h, int bits) 19 16 { 20 17 /* shuffle bits and return requested number of upper bits */ 21 - return (h * 11400714819323198485llu) >> (__WORDSIZE - bits); 18 + #if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) 19 + /* LP64 case */ 20 + return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); 21 + #elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__) 22 + return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits); 23 + #else 24 + # error "Unsupported size_t size" 25 + #endif 22 26 } 23 27 24 28 typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
+8 -2
tools/lib/bpf/libbpf.c
··· 4818 4818 err = -EINVAL; 4819 4819 goto out; 4820 4820 } 4821 - prog = bpf_object__find_program_by_title(obj, sec_name); 4821 + prog = NULL; 4822 + for (i = 0; i < obj->nr_programs; i++) { 4823 + if (!strcmp(obj->programs[i].section_name, sec_name)) { 4824 + prog = &obj->programs[i]; 4825 + break; 4826 + } 4827 + } 4822 4828 if (!prog) { 4823 4829 pr_warn("failed to find program '%s' for CO-RE offset relocation\n", 4824 4830 sec_name); ··· 6659 6653 .expected_attach_type = BPF_TRACE_ITER, 6660 6654 .is_attach_btf = true, 6661 6655 .attach_fn = attach_iter), 6662 - BPF_EAPROG_SEC("xdp_devmap", BPF_PROG_TYPE_XDP, 6656 + BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, 6663 6657 BPF_XDP_DEVMAP), 6664 6658 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), 6665 6659 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
+3
tools/lib/subcmd/parse-options.c
··· 237 237 return err; 238 238 239 239 case OPTION_CALLBACK: 240 + if (opt->set) 241 + *(bool *)opt->set = true; 242 + 240 243 if (unset) 241 244 return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; 242 245 if (opt->flags & PARSE_OPT_NOARG)
+37 -6
tools/lib/traceevent/kbuffer-parse.c
··· 361 361 break; 362 362 363 363 case KBUFFER_TYPE_TIME_EXTEND: 364 + case KBUFFER_TYPE_TIME_STAMP: 364 365 extend = read_4(kbuf, data); 365 366 data += 4; 366 367 extend <<= TS_SHIFT; ··· 370 369 *length = 0; 371 370 break; 372 371 373 - case KBUFFER_TYPE_TIME_STAMP: 374 - data += 12; 375 - *length = 0; 376 - break; 377 372 case 0: 378 373 *length = read_4(kbuf, data) - 4; 379 374 *length = (*length + 3) & ~3; ··· 394 397 395 398 type_len = translate_data(kbuf, ptr, &ptr, &delta, &length); 396 399 397 - kbuf->timestamp += delta; 400 + if (type_len == KBUFFER_TYPE_TIME_STAMP) 401 + kbuf->timestamp = delta; 402 + else 403 + kbuf->timestamp += delta; 404 + 398 405 kbuf->index = calc_index(kbuf, ptr); 399 406 kbuf->next = kbuf->index + length; 400 407 ··· 455 454 if (kbuf->next >= kbuf->size) 456 455 return -1; 457 456 type = update_pointers(kbuf); 458 - } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING); 457 + } while (type == KBUFFER_TYPE_TIME_EXTEND || 458 + type == KBUFFER_TYPE_TIME_STAMP || 459 + type == KBUFFER_TYPE_PADDING); 459 460 460 461 return 0; 461 462 } ··· 548 545 549 546 return 0; 550 547 } 548 + 549 + /** 550 + * kbuffer_subbuf_timestamp - read the timestamp from a sub buffer 551 + * @kbuf: The kbuffer to load 552 + * @subbuf: The subbuffer to read from. 553 + * 554 + * Return the timestamp from a subbuffer. 555 + */ 556 + unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf) 557 + { 558 + return kbuf->read_8(subbuf); 559 + } 560 + 561 + /** 562 + * kbuffer_ptr_delta - read the delta field from a record 563 + * @kbuf: The kbuffer to load 564 + * @ptr: The record in the buffe. 565 + * 566 + * Return the timestamp delta from a record 567 + */ 568 + unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr) 569 + { 570 + unsigned int type_len_ts; 571 + 572 + type_len_ts = read_4(kbuf, ptr); 573 + return ts4host(kbuf, type_len_ts); 574 + } 575 + 551 576 552 577 /** 553 578 * kbuffer_read_event - read the next event in the kbuffer subbuffer
+2
tools/lib/traceevent/kbuffer.h
··· 49 49 void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts); 50 50 void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts); 51 51 unsigned long long kbuffer_timestamp(struct kbuffer *kbuf); 52 + unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf); 53 + unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr); 52 54 53 55 void *kbuffer_translate_data(int swap, void *data, unsigned int *size); 54 56
+1
tools/perf/arch/x86/util/intel-pt.c
··· 641 641 } 642 642 evsel->core.attr.freq = 0; 643 643 evsel->core.attr.sample_period = 1; 644 + evsel->no_aux_samples = true; 644 645 intel_pt_evsel = evsel; 645 646 opts->full_auxtrace = true; 646 647 }
+9 -9
tools/perf/builtin-record.c
··· 852 852 * event synthesis. 853 853 */ 854 854 if (opts->initial_delay || target__has_cpu(&opts->target)) { 855 - if (perf_evlist__add_dummy(evlist)) 856 - return -ENOMEM; 855 + pos = perf_evlist__get_tracking_event(evlist); 856 + if (!evsel__is_dummy_event(pos)) { 857 + /* Set up dummy event. */ 858 + if (perf_evlist__add_dummy(evlist)) 859 + return -ENOMEM; 860 + pos = evlist__last(evlist); 861 + perf_evlist__set_tracking_event(evlist, pos); 862 + } 857 863 858 - /* Disable tracking of mmaps on lead event. */ 859 - pos = evlist__first(evlist); 860 - pos->tracking = 0; 861 - /* Set up dummy event. */ 862 - pos = evlist__last(evlist); 863 - pos->tracking = 1; 864 864 /* 865 865 * Enable the dummy event when the process is forked for 866 866 * initial_delay, immediately for system wide. 867 867 */ 868 - if (opts->initial_delay) 868 + if (opts->initial_delay && !pos->immediate) 869 869 pos->core.attr.enable_on_exec = 1; 870 870 else 871 871 pos->immediate = 1;
+1 -1
tools/perf/builtin-script.c
··· 462 462 return -EINVAL; 463 463 464 464 if (PRINT_FIELD(IREGS) && 465 - evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS)) 465 + evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set)) 466 466 return -EINVAL; 467 467 468 468 if (PRINT_FIELD(UREGS) &&
+1 -1
tools/perf/pmu-events/arch/s390/cf_z15/extended.json
··· 380 380 { 381 381 "Unit": "CPU-M-CF", 382 382 "EventCode": "265", 383 - "EventName": "DFLT_CCERROR", 383 + "EventName": "DFLT_CCFINISH", 384 384 "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2", 385 385 "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2" 386 386 },
+1 -1
tools/perf/scripts/python/export-to-postgresql.py
··· 1055 1055 cbr = data[0] 1056 1056 MHz = (data[4] + 500) / 1000 1057 1057 percent = ((cbr * 1000 / data[2]) + 5) / 10 1058 - value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent) 1058 + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent)) 1059 1059 cbr_file.write(value) 1060 1060 1061 1061 def mwait(id, raw_buf):
+9 -2
tools/perf/scripts/python/exported-sql-viewer.py
··· 768 768 " FROM calls" 769 769 " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" 770 770 " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" 771 - " WHERE symbols.name" + match + 771 + " WHERE calls.id <> 0" 772 + " AND symbols.name" + match + 772 773 " GROUP BY comm_id, thread_id, call_path_id" 773 774 " ORDER BY comm_id, thread_id, call_path_id") 774 775 ··· 964 963 " FROM calls" 965 964 " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" 966 965 " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" 967 - " WHERE symbols.name" + match + 966 + " WHERE calls.id <> 0" 967 + " AND symbols.name" + match + 968 968 " ORDER BY comm_id, thread_id, call_time, calls.id") 969 969 970 970 def FindPath(self, query): ··· 1052 1050 child = self.model.index(row, 0, parent) 1053 1051 if child.internalPointer().dbid == dbid: 1054 1052 found = True 1053 + self.view.setExpanded(parent, True) 1055 1054 self.view.setCurrentIndex(child) 1056 1055 parent = child 1057 1056 break ··· 1130 1127 child = self.model.index(row, 0, parent) 1131 1128 if child.internalPointer().dbid == dbid: 1132 1129 found = True 1130 + self.view.setExpanded(parent, True) 1133 1131 self.view.setCurrentIndex(child) 1134 1132 parent = child 1135 1133 break ··· 1143 1139 return 1144 1140 last_child = None 1145 1141 for row in xrange(n): 1142 + self.view.setExpanded(parent, True) 1146 1143 child = self.model.index(row, 0, parent) 1147 1144 child_call_time = child.internalPointer().call_time 1148 1145 if child_call_time < time: ··· 1156 1151 if not last_child: 1157 1152 if not found: 1158 1153 child = self.model.index(0, 0, parent) 1154 + self.view.setExpanded(parent, True) 1159 1155 self.view.setCurrentIndex(child) 1160 1156 return 1161 1157 found = True 1158 + self.view.setExpanded(parent, True) 1162 1159 self.view.setCurrentIndex(last_child) 1163 1160 parent = last_child 1164 1161
+5 -3
tools/perf/scripts/python/flamegraph.py
··· 17 17 from __future__ import print_function 18 18 import sys 19 19 import os 20 + import io 20 21 import argparse 21 22 import json 22 23 ··· 82 81 83 82 if self.args.format == "html": 84 83 try: 85 - with open(self.args.template) as f: 84 + with io.open(self.args.template, encoding="utf-8") as f: 86 85 output_str = f.read().replace("/** @flamegraph_json **/", 87 86 json_str) 88 87 except IOError as e: ··· 94 93 output_fn = self.args.output or "stacks.json" 95 94 96 95 if output_fn == "-": 97 - sys.stdout.write(output_str) 96 + with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out: 97 + out.write(output_str) 98 98 else: 99 99 print("dumping data to {}".format(output_fn)) 100 100 try: 101 - with open(output_fn, "w") as out: 101 + with io.open(output_fn, "w", encoding="utf-8") as out: 102 102 out.write(output_str) 103 103 except IOError as e: 104 104 print("Error writing output file: {}".format(e), file=sys.stderr)
+29 -7
tools/perf/ui/browsers/hists.c
··· 2288 2288 return browser->he_selection->thread; 2289 2289 } 2290 2290 2291 + static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser) 2292 + { 2293 + return browser->he_selection ? browser->he_selection->res_samples : NULL; 2294 + } 2295 + 2291 2296 /* Check whether the browser is for 'top' or 'report' */ 2292 2297 static inline bool is_report_browser(void *timer) 2293 2298 { ··· 3362 3357 &options[nr_options], NULL, NULL, evsel); 3363 3358 nr_options += add_res_sample_opt(browser, &actions[nr_options], 3364 3359 &options[nr_options], 3365 - hist_browser__selected_entry(browser)->res_samples, 3366 - evsel, A_NORMAL); 3360 + hist_browser__selected_res_sample(browser), 3361 + evsel, A_NORMAL); 3367 3362 nr_options += add_res_sample_opt(browser, &actions[nr_options], 3368 3363 &options[nr_options], 3369 - hist_browser__selected_entry(browser)->res_samples, 3370 - evsel, A_ASM); 3364 + hist_browser__selected_res_sample(browser), 3365 + evsel, A_ASM); 3371 3366 nr_options += add_res_sample_opt(browser, &actions[nr_options], 3372 3367 &options[nr_options], 3373 - hist_browser__selected_entry(browser)->res_samples, 3374 - evsel, A_SOURCE); 3368 + hist_browser__selected_res_sample(browser), 3369 + evsel, A_SOURCE); 3375 3370 nr_options += add_switch_opt(browser, &actions[nr_options], 3376 3371 &options[nr_options]); 3377 3372 skip_scripting: ··· 3603 3598 hbt, warn_lost_event); 3604 3599 } 3605 3600 3601 + static bool perf_evlist__single_entry(struct evlist *evlist) 3602 + { 3603 + int nr_entries = evlist->core.nr_entries; 3604 + 3605 + if (nr_entries == 1) 3606 + return true; 3607 + 3608 + if (nr_entries == 2) { 3609 + struct evsel *last = evlist__last(evlist); 3610 + 3611 + if (evsel__is_dummy_event(last)) 3612 + return true; 3613 + } 3614 + 3615 + return false; 3616 + } 3617 + 3606 3618 int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help, 3607 3619 struct hist_browser_timer *hbt, 3608 3620 float min_pcnt, ··· 3630 3608 int nr_entries = evlist->core.nr_entries; 3631 3609 3632 3610 single_entry: 3633 - if (nr_entries == 1) { 3611 + if (perf_evlist__single_entry(evlist)) { 3634 3612 struct evsel *first = evlist__first(evlist); 3635 3613 3636 3614 return perf_evsel__hists_browse(first, nr_entries, help,
+12
tools/perf/util/evlist.c
··· 1566 1566 list_splice(&move, &evlist->core.entries); 1567 1567 } 1568 1568 1569 + struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist) 1570 + { 1571 + struct evsel *evsel; 1572 + 1573 + evlist__for_each_entry(evlist, evsel) { 1574 + if (evsel->tracking) 1575 + return evsel; 1576 + } 1577 + 1578 + return evlist__first(evlist); 1579 + } 1580 + 1569 1581 void perf_evlist__set_tracking_event(struct evlist *evlist, 1570 1582 struct evsel *tracking_evsel) 1571 1583 {
+1
tools/perf/util/evlist.h
··· 335 335 evlist__cpu_iter_start(evlist); \ 336 336 perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus) 337 337 338 + struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist); 338 339 void perf_evlist__set_tracking_event(struct evlist *evlist, 339 340 struct evsel *tracking_evsel); 340 341
+3 -9
tools/perf/util/evsel.c
··· 898 898 } 899 899 } 900 900 901 - static bool is_dummy_event(struct evsel *evsel) 902 - { 903 - return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) && 904 - (evsel->core.attr.config == PERF_COUNT_SW_DUMMY); 905 - } 906 - 907 901 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type) 908 902 { 909 903 struct evsel_config_term *term, *found_term = NULL; ··· 1014 1020 if (callchain && callchain->enabled && !evsel->no_aux_samples) 1015 1021 evsel__config_callchain(evsel, opts, callchain); 1016 1022 1017 - if (opts->sample_intr_regs) { 1023 + if (opts->sample_intr_regs && !evsel->no_aux_samples) { 1018 1024 attr->sample_regs_intr = opts->sample_intr_regs; 1019 1025 evsel__set_sample_bit(evsel, REGS_INTR); 1020 1026 } 1021 1027 1022 - if (opts->sample_user_regs) { 1028 + if (opts->sample_user_regs && !evsel->no_aux_samples) { 1023 1029 attr->sample_regs_user |= opts->sample_user_regs; 1024 1030 evsel__set_sample_bit(evsel, REGS_USER); 1025 1031 } ··· 1155 1161 * The software event will trigger -EOPNOTSUPP error out, 1156 1162 * if BRANCH_STACK bit is set. 1157 1163 */ 1158 - if (is_dummy_event(evsel)) 1164 + if (evsel__is_dummy_event(evsel)) 1159 1165 evsel__reset_sample_bit(evsel, BRANCH_STACK); 1160 1166 } 1161 1167
+6
tools/perf/util/evsel.h
··· 399 399 evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK; 400 400 } 401 401 402 + static inline bool evsel__is_dummy_event(struct evsel *evsel) 403 + { 404 + return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) && 405 + (evsel->core.attr.config == PERF_COUNT_SW_DUMMY); 406 + } 407 + 402 408 struct perf_env *evsel__env(struct evsel *evsel); 403 409 404 410 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
+8 -4
tools/perf/util/hashmap.h
··· 11 11 #include <stdbool.h> 12 12 #include <stddef.h> 13 13 #include <limits.h> 14 - #ifndef __WORDSIZE 15 - #define __WORDSIZE (__SIZEOF_LONG__ * 8) 16 - #endif 17 14 18 15 static inline size_t hash_bits(size_t h, int bits) 19 16 { 20 17 /* shuffle bits and return requested number of upper bits */ 21 - return (h * 11400714819323198485llu) >> (__WORDSIZE - bits); 18 + #if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) 19 + /* LP64 case */ 20 + return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); 21 + #elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__) 22 + return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits); 23 + #else 24 + # error "Unsupported size_t size" 25 + #endif 22 26 } 23 27 24 28 typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
+3 -2
tools/perf/util/intel-pt.c
··· 1735 1735 u64 sample_type = evsel->core.attr.sample_type; 1736 1736 u64 id = evsel->core.id[0]; 1737 1737 u8 cpumode; 1738 + u64 regs[8 * sizeof(sample.intr_regs.mask)]; 1738 1739 1739 1740 if (intel_pt_skip_event(pt)) 1740 1741 return 0; ··· 1785 1784 } 1786 1785 1787 1786 if (sample_type & PERF_SAMPLE_REGS_INTR && 1788 - items->mask[INTEL_PT_GP_REGS_POS]) { 1789 - u64 regs[sizeof(sample.intr_regs.mask)]; 1787 + (items->mask[INTEL_PT_GP_REGS_POS] || 1788 + items->mask[INTEL_PT_XMM_POS])) { 1790 1789 u64 regs_mask = evsel->core.attr.sample_regs_intr; 1791 1790 u64 *pos; 1792 1791
+3 -1
tools/testing/kunit/kunit.py
··· 82 82 request.make_options) 83 83 build_end = time.time() 84 84 if not success: 85 - return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel') 85 + return KunitResult(KunitStatus.BUILD_FAILURE, 86 + 'could not build kernel', 87 + build_end - build_start) 86 88 if not success: 87 89 return KunitResult(KunitStatus.BUILD_FAILURE, 88 90 'could not build kernel',
+1 -1
tools/testing/kunit/kunit_config.py
··· 10 10 import re 11 11 12 12 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$' 13 - CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+)$' 13 + CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$' 14 14 15 15 KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value']) 16 16
+4 -4
tools/testing/kunit/kunit_parser.py
··· 265 265 return bubble_up_errors(lambda x: x.status, test_suite_list) 266 266 267 267 def parse_test_result(lines: List[str]) -> TestResult: 268 - if not lines: 269 - return TestResult(TestStatus.NO_TESTS, [], lines) 270 268 consume_non_diagnositic(lines) 271 - if not parse_tap_header(lines): 272 - return None 269 + if not lines or not parse_tap_header(lines): 270 + return TestResult(TestStatus.NO_TESTS, [], lines) 273 271 test_suites = [] 274 272 test_suite = parse_test_suite(lines) 275 273 while test_suite: ··· 280 282 failed_tests = 0 281 283 crashed_tests = 0 282 284 test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) 285 + if test_result.status == TestStatus.NO_TESTS: 286 + print_with_timestamp(red('[ERROR] ') + 'no kunit output detected') 283 287 for test_suite in test_result.suites: 284 288 if test_suite.status == TestStatus.SUCCESS: 285 289 print_suite_divider(green('[PASSED] ') + test_suite.name)
+11
tools/testing/kunit/kunit_tool_test.py
··· 170 170 result.status) 171 171 file.close() 172 172 173 + def test_no_kunit_output(self): 174 + crash_log = get_absolute_path( 175 + 'test_data/test_insufficient_memory.log') 176 + file = open(crash_log) 177 + print_mock = mock.patch('builtins.print').start() 178 + result = kunit_parser.parse_run_tests( 179 + kunit_parser.isolate_kunit_output(file.readlines())) 180 + print_mock.assert_any_call(StrContains("no kunit output detected")) 181 + print_mock.stop() 182 + file.close() 183 + 173 184 def test_crashed_test(self): 174 185 crashed_log = get_absolute_path( 175 186 'test_data/test_is_test_passed-crash.log')
tools/testing/kunit/test_data/test_insufficient_memory.log
+1 -1
tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
··· 36 36 fentry_res = (__u64 *)fentry_skel->bss; 37 37 fexit_res = (__u64 *)fexit_skel->bss; 38 38 printf("%lld\n", fentry_skel->bss->test1_result); 39 - for (i = 0; i < 6; i++) { 39 + for (i = 0; i < 8; i++) { 40 40 CHECK(fentry_res[i] != 1, "result", 41 41 "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]); 42 42 CHECK(fexit_res[i] != 1, "result",
+2 -2
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
··· 527 527 528 528 run_tests_skb_less(tap_fd, skel->maps.last_dissection); 529 529 530 - err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); 531 - CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno); 530 + err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); 531 + CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno); 532 532 } 533 533 534 534 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
+34 -10
tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Test that the flow_dissector program can be updated with a single 4 - * syscall by attaching a new program that replaces the existing one. 5 - * 6 - * Corner case - the same program cannot be attached twice. 3 + * Tests for attaching, detaching, and replacing flow_dissector BPF program. 7 4 */ 8 5 9 6 #define _GNU_SOURCE ··· 113 116 CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); 114 117 115 118 out_detach: 116 - err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); 119 + err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR); 117 120 if (CHECK_FAIL(err)) 118 121 perror("bpf_prog_detach"); 119 122 CHECK_FAIL(prog_is_attached(netns)); ··· 149 152 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); 150 153 int err, link; 151 154 152 - err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0); 155 + err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0); 153 156 if (CHECK_FAIL(err)) { 154 157 perror("bpf_prog_attach(prog1)"); 155 158 return; ··· 165 168 close(link); 166 169 CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); 167 170 168 - err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR); 171 + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); 169 172 if (CHECK_FAIL(err)) 170 173 perror("bpf_prog_detach"); 171 174 CHECK_FAIL(prog_is_attached(netns)); ··· 185 188 186 189 /* Expect failure attaching prog when link exists */ 187 190 errno = 0; 188 - err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0); 191 + err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0); 189 192 if (CHECK_FAIL(!err || errno != EEXIST)) 190 193 perror("bpf_prog_attach(prog2) expected EEXIST"); 191 194 CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); ··· 208 211 209 212 /* Expect failure detaching prog when link exists */ 210 213 errno = 0; 211 - err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR); 214 + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); 212 215 if (CHECK_FAIL(!err || errno != EINVAL)) 213 216 perror("bpf_prog_detach expected EINVAL"); 214 217 CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); ··· 228 231 } 229 232 CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); 230 233 231 - err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); 234 + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); 232 235 if (CHECK_FAIL(err)) { 233 236 perror("bpf_prog_detach"); 234 237 return; ··· 300 303 if (CHECK_FAIL(err)) 301 304 perror("bpf_link_update"); 302 305 CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); 306 + 307 + close(link); 308 + CHECK_FAIL(prog_is_attached(netns)); 309 + } 310 + 311 + static void test_link_update_same_prog(int netns, int prog1, int prog2) 312 + { 313 + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); 314 + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); 315 + int err, link; 316 + 317 + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); 318 + if (CHECK_FAIL(link < 0)) { 319 + perror("bpf_link_create(prog1)"); 320 + return; 321 + } 322 + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); 323 + 324 + /* Expect success updating the prog with the same one */ 325 + update_opts.flags = 0; 326 + update_opts.old_prog_fd = 0; 327 + err = bpf_link_update(link, prog1, &update_opts); 328 + if (CHECK_FAIL(err)) 329 + perror("bpf_link_update"); 330 + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); 303 331 304 332 close(link); 305 333 CHECK_FAIL(prog_is_attached(netns)); ··· 593 571 test_link_update_no_old_prog }, 594 572 { "link update with replace old prog", 595 573 test_link_update_replace_old_prog }, 574 + { "link update with same prog", 575 + test_link_update_same_prog }, 596 576 { "link update invalid opts", 597 577 test_link_update_invalid_opts }, 598 578 { "link update invalid prog",
+1 -1
tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
··· 25 25 struct netlink_sock *sk; 26 26 } __attribute__((preserve_access_index)); 27 27 28 - static inline struct inode *SOCK_INODE(struct socket *socket) 28 + static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket) 29 29 { 30 30 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 31 31 }
+22
tools/testing/selftests/bpf/progs/fentry_test.c
··· 55 55 e == (void *)20 && f == 21; 56 56 return 0; 57 57 } 58 + 59 + struct bpf_fentry_test_t { 60 + struct bpf_fentry_test_t *a; 61 + }; 62 + 63 + __u64 test7_result = 0; 64 + SEC("fentry/bpf_fentry_test7") 65 + int BPF_PROG(test7, struct bpf_fentry_test_t *arg) 66 + { 67 + if (arg == 0) 68 + test7_result = 1; 69 + return 0; 70 + } 71 + 72 + __u64 test8_result = 0; 73 + SEC("fentry/bpf_fentry_test8") 74 + int BPF_PROG(test8, struct bpf_fentry_test_t *arg) 75 + { 76 + if (arg->a == 0) 77 + test8_result = 1; 78 + return 0; 79 + }
+22
tools/testing/selftests/bpf/progs/fexit_test.c
··· 56 56 e == (void *)20 && f == 21 && ret == 111; 57 57 return 0; 58 58 } 59 + 60 + struct bpf_fentry_test_t { 61 + struct bpf_fentry_test *a; 62 + }; 63 + 64 + __u64 test7_result = 0; 65 + SEC("fexit/bpf_fentry_test7") 66 + int BPF_PROG(test7, struct bpf_fentry_test_t *arg) 67 + { 68 + if (arg == 0) 69 + test7_result = 1; 70 + return 0; 71 + } 72 + 73 + __u64 test8_result = 0; 74 + SEC("fexit/bpf_fentry_test8") 75 + int BPF_PROG(test8, struct bpf_fentry_test_t *arg) 76 + { 77 + if (arg->a == 0) 78 + test8_result = 1; 79 + return 0; 80 + }
+7 -1
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
··· 79 79 80 80 struct { 81 81 __uint(type, BPF_MAP_TYPE_ARRAY); 82 - __uint(max_entries, 2); 82 + __uint(max_entries, 3); 83 83 __type(key, int); 84 84 __type(value, int); 85 85 } sock_skb_opts SEC(".maps"); ··· 94 94 SEC("sk_skb1") 95 95 int bpf_prog1(struct __sk_buff *skb) 96 96 { 97 + int *f, two = 2; 98 + 99 + f = bpf_map_lookup_elem(&sock_skb_opts, &two); 100 + if (f && *f) { 101 + return *f; 102 + } 97 103 return skb->len; 98 104 } 99 105
+1 -1
tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
··· 27 27 /* valid program on DEVMAP entry via SEC name; 28 28 * has access to egress and ingress ifindex 29 29 */ 30 - SEC("xdp_devmap") 30 + SEC("xdp_devmap/map_prog") 31 31 int xdp_dummy_dm(struct xdp_md *ctx) 32 32 { 33 33 char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
+6 -6
tools/testing/selftests/bpf/test_maps.c
··· 789 789 } 790 790 791 791 err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER); 792 - if (err) { 792 + if (!err) { 793 793 printf("Failed empty parser prog detach\n"); 794 794 goto out_sockmap; 795 795 } 796 796 797 797 err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT); 798 - if (err) { 798 + if (!err) { 799 799 printf("Failed empty verdict prog detach\n"); 800 800 goto out_sockmap; 801 801 } 802 802 803 803 err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT); 804 - if (err) { 804 + if (!err) { 805 805 printf("Failed empty msg verdict prog detach\n"); 806 806 goto out_sockmap; 807 807 } ··· 1090 1090 assert(status == 0); 1091 1091 } 1092 1092 1093 - err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE); 1093 + err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE); 1094 1094 if (!err) { 1095 1095 printf("Detached an invalid prog type.\n"); 1096 1096 goto out_sockmap; 1097 1097 } 1098 1098 1099 - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER); 1099 + err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER); 1100 1100 if (err) { 1101 1101 printf("Failed parser prog detach\n"); 1102 1102 goto out_sockmap; 1103 1103 } 1104 1104 1105 - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); 1105 + err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); 1106 1106 if (err) { 1107 1107 printf("Failed parser prog detach\n"); 1108 1108 goto out_sockmap;
+18
tools/testing/selftests/bpf/test_sockmap.c
··· 85 85 int txmsg_ktls_skb_redir; 86 86 int ktls; 87 87 int peek_flag; 88 + int skb_use_parser; 88 89 89 90 static const struct option long_options[] = { 90 91 {"help", no_argument, NULL, 'h' }, ··· 175 174 txmsg_apply = txmsg_cork = 0; 176 175 txmsg_ingress = txmsg_redir_skb = 0; 177 176 txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0; 177 + skb_use_parser = 0; 178 178 } 179 179 180 180 static int test_start_subtest(const struct _test *t, struct sockmap_options *o) ··· 1213 1211 } 1214 1212 } 1215 1213 1214 + if (skb_use_parser) { 1215 + i = 2; 1216 + err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY); 1217 + } 1218 + 1216 1219 if (txmsg_drop) 1217 1220 options->drop_expected = true; 1218 1221 ··· 1657 1650 test_send(opt, cgrp); 1658 1651 } 1659 1652 1653 + static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt) 1654 + { 1655 + txmsg_pass = 1; 1656 + skb_use_parser = 512; 1657 + opt->iov_length = 256; 1658 + opt->iov_count = 1; 1659 + opt->rate = 2; 1660 + test_exec(cgrp, opt); 1661 + } 1662 + 1660 1663 char *map_names[] = { 1661 1664 "sock_map", 1662 1665 "sock_map_txmsg", ··· 1765 1748 {"txmsg test pull-data", test_txmsg_pull}, 1766 1749 {"txmsg test pop-data", test_txmsg_pop}, 1767 1750 {"txmsg test push/pop data", test_txmsg_push_pop}, 1751 + {"txmsg text ingress parser", test_txmsg_ingress_parser}, 1768 1752 }; 1769 1753 1770 1754 static int check_whitelist(struct _test *t, struct sockmap_options *opt)
+36
tools/testing/selftests/kmod/kmod.sh
··· 63 63 ALL_TESTS="$ALL_TESTS 0009:150:1" 64 64 ALL_TESTS="$ALL_TESTS 0010:1:1" 65 65 ALL_TESTS="$ALL_TESTS 0011:1:1" 66 + ALL_TESTS="$ALL_TESTS 0012:1:1" 67 + ALL_TESTS="$ALL_TESTS 0013:1:1" 66 68 67 69 # Kselftest framework requirement - SKIP code is 4. 68 70 ksft_skip=4 ··· 472 470 echo "$MODPROBE" > /proc/sys/kernel/modprobe 473 471 } 474 472 473 + kmod_check_visibility() 474 + { 475 + local name="$1" 476 + local cmd="$2" 477 + 478 + modprobe $DEFAULT_KMOD_DRIVER 479 + 480 + local priv=$(eval $cmd) 481 + local unpriv=$(capsh --drop=CAP_SYSLOG -- -c "$cmd") 482 + 483 + if [ "$priv" = "$unpriv" ] || \ 484 + [ "${priv:0:3}" = "0x0" ] || \ 485 + [ "${unpriv:0:3}" != "0x0" ] ; then 486 + echo "${FUNCNAME[0]}: FAIL, $name visible to unpriv: '$priv' vs '$unpriv'" >&2 487 + exit 1 488 + else 489 + echo "${FUNCNAME[0]}: OK!" 490 + fi 491 + } 492 + 493 + kmod_test_0012() 494 + { 495 + kmod_check_visibility /proc/modules \ 496 + "grep '^${DEFAULT_KMOD_DRIVER}\b' /proc/modules | awk '{print \$NF}'" 497 + } 498 + 499 + kmod_test_0013() 500 + { 501 + kmod_check_visibility '/sys/module/*/sections/*' \ 502 + "cat /sys/module/${DEFAULT_KMOD_DRIVER}/sections/.*text | head -n1" 503 + } 504 + 475 505 list_tests() 476 506 { 477 507 echo "Test ID list:" ··· 523 489 echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()" 524 490 echo "0010 x $(get_test_count 0010) - test nonexistent modprobe path" 525 491 echo "0011 x $(get_test_count 0011) - test completely disabling module autoloading" 492 + echo "0012 x $(get_test_count 0012) - test /proc/modules address visibility under CAP_SYSLOG" 493 + echo "0013 x $(get_test_count 0013) - test /sys/module/*/sections/* visibility under CAP_SYSLOG" 526 494 } 527 495 528 496 usage()
+1 -1
tools/testing/selftests/kselftest.h
··· 36 36 static struct ksft_count ksft_cnt; 37 37 static unsigned int ksft_plan; 38 38 39 - static inline int ksft_test_num(void) 39 + static inline unsigned int ksft_test_num(void) 40 40 { 41 41 return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail + 42 42 ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
+1 -1
tools/testing/selftests/net/fib_nexthop_multiprefix.sh
··· 144 144 145 145 cleanup() 146 146 { 147 - for n in h1 r1 h2 h3 h4 147 + for n in h0 r1 h1 h2 h3 148 148 do 149 149 ip netns del ${n} 2>/dev/null 150 150 done
+13
tools/testing/selftests/net/fib_nexthops.sh
··· 747 747 run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1" 748 748 run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81" 749 749 750 + # rpfilter and default route 751 + $IP nexthop flush >/dev/null 2>&1 752 + run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP" 753 + run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1" 754 + run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3" 755 + run_cmd "$IP nexthop add id 93 group 91/92" 756 + run_cmd "$IP -6 ro add default nhid 91" 757 + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" 758 + log_test $? 0 "Nexthop with default route and rpfilter" 759 + run_cmd "$IP -6 ro replace default nhid 93" 760 + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" 761 + log_test $? 0 "Nexthop with multipath default route and rpfilter" 762 + 750 763 # TO-DO: 751 764 # existing route with old nexthop; append route with new nexthop 752 765 # existing route with old nexthop; replace route with new
+2
tools/testing/selftests/net/ip_defrag.sh
··· 6 6 set +x 7 7 set -e 8 8 9 + modprobe -q nf_defrag_ipv6 10 + 9 11 readonly NETNS="ns-$(mktemp -u XXXXXX)" 10 12 11 13 setup() {
+1 -1
tools/testing/selftests/net/txtimestamp.sh
··· 75 75 fi 76 76 } 77 77 78 - if [[ "$(ip netns identify)" == "root" ]]; then 78 + if [[ -z "$(ip netns identify)" ]]; then 79 79 ./in_netns.sh $0 $@ 80 80 else 81 81 main $@
+2 -2
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
··· 698 698 699 699 switch (cc) { 700 700 701 - case ERR_NX_TRANSLATION: 701 + case ERR_NX_AT_FAULT: 702 702 703 703 /* We touched the pages ahead of time. In the most common case 704 704 * we shouldn't be here. But may be some pages were paged out. 705 705 * Kernel should have placed the faulting address to fsaddr. 706 706 */ 707 - NXPRT(fprintf(stderr, "ERR_NX_TRANSLATION %p\n", 707 + NXPRT(fprintf(stderr, "ERR_NX_AT_FAULT %p\n", 708 708 (void *)cmdp->crb.csb.fsaddr)); 709 709 710 710 if (pgfault_retries == NX_MAX_FAULTS) {
+2 -2
tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
··· 306 306 lzcounts, cmdp, handle); 307 307 308 308 if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC && 309 - cc != ERR_NX_TRANSLATION) { 309 + cc != ERR_NX_AT_FAULT) { 310 310 fprintf(stderr, "nx error: cc= %d\n", cc); 311 311 exit(-1); 312 312 } 313 313 314 314 /* Page faults are handled by the user code */ 315 - if (cc == ERR_NX_TRANSLATION) { 315 + if (cc == ERR_NX_AT_FAULT) { 316 316 NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc)); 317 317 NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n", 318 318 fault_tries,
+4 -9
tools/testing/selftests/tpm2/test_smoke.sh
··· 1 - #!/bin/bash 1 + #!/bin/sh 2 2 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 3 3 4 4 # Kselftest framework requirement - SKIP code is 4. 5 5 ksft_skip=4 6 6 7 - [ -f /dev/tpm0 ] || exit $ksft_skip 7 + [ -e /dev/tpm0 ] || exit $ksft_skip 8 8 9 - python -m unittest -v tpm2_tests.SmokeTest 10 - python -m unittest -v tpm2_tests.AsyncTest 11 - 12 - CLEAR_CMD=$(which tpm2_clear) 13 - if [ -n $CLEAR_CMD ]; then 14 - tpm2_clear -T device 15 - fi 9 + python3 -m unittest -v tpm2_tests.SmokeTest 10 + python3 -m unittest -v tpm2_tests.AsyncTest
+3 -3
tools/testing/selftests/tpm2/test_space.sh
··· 1 - #!/bin/bash 1 + #!/bin/sh 2 2 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 3 3 4 4 # Kselftest framework requirement - SKIP code is 4. 5 5 ksft_skip=4 6 6 7 - [ -f /dev/tpmrm0 ] || exit $ksft_skip 7 + [ -e /dev/tpmrm0 ] || exit $ksft_skip 8 8 9 - python -m unittest -v tpm2_tests.SpaceTest 9 + python3 -m unittest -v tpm2_tests.SpaceTest
+29 -27
tools/testing/selftests/tpm2/tpm2.py
··· 247 247 class AuthCommand(object): 248 248 """TPMS_AUTH_COMMAND""" 249 249 250 - def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0, 251 - hmac=''): 250 + def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(), 251 + session_attributes=0, hmac=bytes()): 252 252 self.session_handle = session_handle 253 253 self.nonce = nonce 254 254 self.session_attributes = session_attributes 255 255 self.hmac = hmac 256 256 257 - def __str__(self): 257 + def __bytes__(self): 258 258 fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac)) 259 259 return struct.pack(fmt, self.session_handle, len(self.nonce), 260 260 self.nonce, self.session_attributes, len(self.hmac), ··· 268 268 class SensitiveCreate(object): 269 269 """TPMS_SENSITIVE_CREATE""" 270 270 271 - def __init__(self, user_auth='', data=''): 271 + def __init__(self, user_auth=bytes(), data=bytes()): 272 272 self.user_auth = user_auth 273 273 self.data = data 274 274 275 - def __str__(self): 275 + def __bytes__(self): 276 276 fmt = '>H%us H%us' % (len(self.user_auth), len(self.data)) 277 277 return struct.pack(fmt, len(self.user_auth), self.user_auth, 278 278 len(self.data), self.data) ··· 296 296 return '>HHIH%us%usH%us' % \ 297 297 (len(self.auth_policy), len(self.parameters), len(self.unique)) 298 298 299 - def __init__(self, object_type, name_alg, object_attributes, auth_policy='', 300 - parameters='', unique=''): 299 + def __init__(self, object_type, name_alg, object_attributes, 300 + auth_policy=bytes(), parameters=bytes(), 301 + unique=bytes()): 301 302 self.object_type = object_type 302 303 self.name_alg = name_alg 303 304 self.object_attributes = object_attributes ··· 306 305 self.parameters = parameters 307 306 self.unique = unique 308 307 309 - def __str__(self): 308 + def __bytes__(self): 310 309 return struct.pack(self.__fmt(), 311 310 self.object_type, 312 311 self.name_alg, ··· 344 343 345 344 def hex_dump(d): 346 345 d = [format(ord(x), '02x') for x in d] 347 - d = [d[i: i + 16] for i in xrange(0, len(d), 16)] 346 + d = [d[i: i + 16] for i in range(0, len(d), 16)] 348 347 d = [' '.join(x) for x in d] 349 348 d = os.linesep.join(d) 350 349 ··· 402 401 pcrsel_len = max((i >> 3) + 1, 3) 403 402 pcrsel = [0] * pcrsel_len 404 403 pcrsel[i >> 3] = 1 << (i & 7) 405 - pcrsel = ''.join(map(chr, pcrsel)) 404 + pcrsel = ''.join(map(chr, pcrsel)).encode() 406 405 407 406 fmt = '>HII IHB%us' % (pcrsel_len) 408 407 cmd = struct.pack(fmt, ··· 444 443 TPM2_CC_PCR_EXTEND, 445 444 i, 446 445 len(auth_cmd), 447 - str(auth_cmd), 446 + bytes(auth_cmd), 448 447 1, bank_alg, dig) 449 448 450 449 self.send_cmd(cmd) ··· 458 457 TPM2_RH_NULL, 459 458 TPM2_RH_NULL, 460 459 16, 461 - '\0' * 16, 460 + ('\0' * 16).encode(), 462 461 0, 463 462 session_type, 464 463 TPM2_ALG_NULL, ··· 473 472 474 473 for i in pcrs: 475 474 pcr = self.read_pcr(i, bank_alg) 476 - if pcr == None: 475 + if pcr is None: 477 476 return None 478 477 x += pcr 479 478 ··· 490 489 pcrsel = [0] * pcrsel_len 491 490 for i in pcrs: 492 491 pcrsel[i >> 3] |= 1 << (i & 7) 493 - pcrsel = ''.join(map(chr, pcrsel)) 492 + pcrsel = ''.join(map(chr, pcrsel)).encode() 494 493 495 494 fmt = '>HII IH%usIHB3s' % ds 496 495 cmd = struct.pack(fmt, ··· 498 497 struct.calcsize(fmt), 499 498 TPM2_CC_POLICY_PCR, 500 499 handle, 501 - len(dig), str(dig), 500 + len(dig), 501 + bytes(dig), 502 502 1, 503 503 bank_alg, 504 504 pcrsel_len, pcrsel) ··· 536 534 537 535 self.send_cmd(cmd) 538 536 539 - def create_root_key(self, auth_value = ''): 537 + def create_root_key(self, auth_value = bytes()): 540 538 attributes = \ 541 539 Public.FIXED_TPM | \ 542 540 Public.FIXED_PARENT | \ ··· 572 570 TPM2_CC_CREATE_PRIMARY, 573 571 TPM2_RH_OWNER, 574 572 len(auth_cmd), 575 - str(auth_cmd), 573 + bytes(auth_cmd), 576 574 len(sensitive), 577 - str(sensitive), 575 + bytes(sensitive), 578 576 len(public), 579 - str(public), 577 + bytes(public), 580 578 0, 0) 581 579 582 580 return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0] ··· 589 587 attributes = 0 590 588 if not policy_dig: 591 589 attributes |= Public.USER_WITH_AUTH 592 - policy_dig = '' 590 + policy_dig = bytes() 593 591 594 592 auth_cmd = AuthCommand() 595 593 sensitive = SensitiveCreate(user_auth=auth_value, data=data) ··· 610 608 TPM2_CC_CREATE, 611 609 parent_key, 612 610 len(auth_cmd), 613 - str(auth_cmd), 611 + bytes(auth_cmd), 614 612 len(sensitive), 615 - str(sensitive), 613 + bytes(sensitive), 616 614 len(public), 617 - str(public), 615 + bytes(public), 618 616 0, 0) 619 617 620 618 rsp = self.send_cmd(cmd) ··· 637 635 TPM2_CC_LOAD, 638 636 parent_key, 639 637 len(auth_cmd), 640 - str(auth_cmd), 638 + bytes(auth_cmd), 641 639 blob) 642 640 643 641 data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0] ··· 655 653 TPM2_CC_UNSEAL, 656 654 data_handle, 657 655 len(auth_cmd), 658 - str(auth_cmd)) 656 + bytes(auth_cmd)) 659 657 660 658 try: 661 659 rsp = self.send_cmd(cmd) ··· 677 675 TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET, 678 676 TPM2_RH_LOCKOUT, 679 677 len(auth_cmd), 680 - str(auth_cmd)) 678 + bytes(auth_cmd)) 681 679 682 680 self.send_cmd(cmd) 683 681 ··· 695 693 more_data, cap, cnt = struct.unpack('>BII', rsp[:9]) 696 694 rsp = rsp[9:] 697 695 698 - for i in xrange(0, cnt): 696 + for i in range(0, cnt): 699 697 handle = struct.unpack('>I', rsp[:4])[0] 700 698 handles.append(handle) 701 699 rsp = rsp[4:]
+20 -19
tools/testing/selftests/tpm2/tpm2_tests.py
··· 20 20 self.client.close() 21 21 22 22 def test_seal_with_auth(self): 23 - data = 'X' * 64 24 - auth = 'A' * 15 23 + data = ('X' * 64).encode() 24 + auth = ('A' * 15).encode() 25 25 26 26 blob = self.client.seal(self.root_key, data, auth, None) 27 27 result = self.client.unseal(self.root_key, blob, auth, None) ··· 30 30 def test_seal_with_policy(self): 31 31 handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL) 32 32 33 - data = 'X' * 64 34 - auth = 'A' * 15 33 + data = ('X' * 64).encode() 34 + auth = ('A' * 15).encode() 35 35 pcrs = [16] 36 36 37 37 try: ··· 58 58 self.assertEqual(data, result) 59 59 60 60 def test_unseal_with_wrong_auth(self): 61 - data = 'X' * 64 62 - auth = 'A' * 20 61 + data = ('X' * 64).encode() 62 + auth = ('A' * 20).encode() 63 63 rc = 0 64 64 65 65 blob = self.client.seal(self.root_key, data, auth, None) 66 66 try: 67 - result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None) 68 - except ProtocolError, e: 67 + result = self.client.unseal(self.root_key, blob, 68 + auth[:-1] + 'B'.encode(), None) 69 + except ProtocolError as e: 69 70 rc = e.rc 70 71 71 72 self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL) ··· 74 73 def test_unseal_with_wrong_policy(self): 75 74 handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL) 76 75 77 - data = 'X' * 64 78 - auth = 'A' * 17 76 + data = ('X' * 64).encode() 77 + auth = ('A' * 17).encode() 79 78 pcrs = [16] 80 79 81 80 try: ··· 92 91 # This should succeed. 93 92 94 93 ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1) 95 - self.client.extend_pcr(1, 'X' * ds) 94 + self.client.extend_pcr(1, ('X' * ds).encode()) 96 95 97 96 handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY) 98 97 ··· 109 108 110 109 # Then, extend a PCR that is part of the policy and try to unseal. 111 110 # This should fail. 112 - self.client.extend_pcr(16, 'X' * ds) 111 + self.client.extend_pcr(16, ('X' * ds).encode()) 113 112 114 113 handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY) 115 114 ··· 120 119 self.client.policy_password(handle) 121 120 122 121 result = self.client.unseal(self.root_key, blob, auth, handle) 123 - except ProtocolError, e: 122 + except ProtocolError as e: 124 123 rc = e.rc 125 124 self.client.flush_context(handle) 126 125 except: ··· 131 130 132 131 def test_seal_with_too_long_auth(self): 133 132 ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1) 134 - data = 'X' * 64 135 - auth = 'A' * (ds + 1) 133 + data = ('X' * 64).encode() 134 + auth = ('A' * (ds + 1)).encode() 136 135 137 136 rc = 0 138 137 try: 139 138 blob = self.client.seal(self.root_key, data, auth, None) 140 - except ProtocolError, e: 139 + except ProtocolError as e: 141 140 rc = e.rc 142 141 143 142 self.assertEqual(rc, tpm2.TPM2_RC_SIZE) ··· 153 152 0xDEADBEEF) 154 153 155 154 self.client.send_cmd(cmd) 156 - except IOError, e: 155 + except IOError as e: 157 156 rejected = True 158 157 except: 159 158 pass ··· 213 212 self.client.tpm.write(cmd) 214 213 rsp = self.client.tpm.read() 215 214 216 - except IOError, e: 215 + except IOError as e: 217 216 # read the response 218 217 rsp = self.client.tpm.read() 219 218 rejected = True ··· 284 283 rc = 0 285 284 try: 286 285 space1.send_cmd(cmd) 287 - except ProtocolError, e: 286 + except ProtocolError as e: 288 287 rc = e.rc 289 288 290 289 self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
+2 -2
tools/testing/selftests/x86/Makefile
··· 70 70 71 71 EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64) 72 72 73 - $(BINARIES_32): $(OUTPUT)/%_32: %.c 73 + $(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h 74 74 $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm 75 75 76 - $(BINARIES_64): $(OUTPUT)/%_64: %.c 76 + $(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h 77 77 $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl 78 78 79 79 # x86_64 users should be encouraged to install 32-bit libraries
+41
tools/testing/selftests/x86/helpers.h
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #ifndef __SELFTESTS_X86_HELPERS_H 3 + #define __SELFTESTS_X86_HELPERS_H 4 + 5 + #include <asm/processor-flags.h> 6 + 7 + static inline unsigned long get_eflags(void) 8 + { 9 + unsigned long eflags; 10 + 11 + asm volatile ( 12 + #ifdef __x86_64__ 13 + "subq $128, %%rsp\n\t" 14 + "pushfq\n\t" 15 + "popq %0\n\t" 16 + "addq $128, %%rsp" 17 + #else 18 + "pushfl\n\t" 19 + "popl %0" 20 + #endif 21 + : "=r" (eflags) :: "memory"); 22 + 23 + return eflags; 24 + } 25 + 26 + static inline void set_eflags(unsigned long eflags) 27 + { 28 + asm volatile ( 29 + #ifdef __x86_64__ 30 + "subq $128, %%rsp\n\t" 31 + "pushq %0\n\t" 32 + "popfq\n\t" 33 + "addq $128, %%rsp" 34 + #else 35 + "pushl %0\n\t" 36 + "popfl" 37 + #endif 38 + :: "r" (eflags) : "flags", "memory"); 39 + } 40 + 41 + #endif /* __SELFTESTS_X86_HELPERS_H */
+2 -15
tools/testing/selftests/x86/single_step_syscall.c
··· 31 31 #include <sys/ptrace.h> 32 32 #include <sys/user.h> 33 33 34 + #include "helpers.h" 35 + 34 36 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 35 37 int flags) 36 38 { ··· 68 66 # define WIDTH "l" 69 67 # define INT80_CLOBBERS 70 68 #endif 71 - 72 - static unsigned long get_eflags(void) 73 - { 74 - unsigned long eflags; 75 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 76 - return eflags; 77 - } 78 - 79 - static void set_eflags(unsigned long eflags) 80 - { 81 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 82 - : : "rm" (eflags) : "flags"); 83 - } 84 - 85 - #define X86_EFLAGS_TF (1UL << 8) 86 69 87 70 static void sigtrap(int sig, siginfo_t *info, void *ctx_void) 88 71 {
+1 -20
tools/testing/selftests/x86/syscall_arg_fault.c
··· 15 15 #include <setjmp.h> 16 16 #include <errno.h> 17 17 18 - #ifdef __x86_64__ 19 - # define WIDTH "q" 20 - #else 21 - # define WIDTH "l" 22 - #endif 18 + #include "helpers.h" 23 19 24 20 /* Our sigaltstack scratch space. */ 25 21 static unsigned char altstack_data[SIGSTKSZ]; 26 - 27 - static unsigned long get_eflags(void) 28 - { 29 - unsigned long eflags; 30 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 31 - return eflags; 32 - } 33 - 34 - static void set_eflags(unsigned long eflags) 35 - { 36 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 37 - : : "rm" (eflags) : "flags"); 38 - } 39 - 40 - #define X86_EFLAGS_TF (1UL << 8) 41 22 42 23 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 43 24 int flags)
+28 -19
tools/testing/selftests/x86/syscall_nt.c
··· 13 13 #include <signal.h> 14 14 #include <err.h> 15 15 #include <sys/syscall.h> 16 - #include <asm/processor-flags.h> 17 16 18 - #ifdef __x86_64__ 19 - # define WIDTH "q" 20 - #else 21 - # define WIDTH "l" 22 - #endif 17 + #include "helpers.h" 23 18 24 19 static unsigned int nerrs; 25 - 26 - static unsigned long get_eflags(void) 27 - { 28 - unsigned long eflags; 29 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 30 - return eflags; 31 - } 32 - 33 - static void set_eflags(unsigned long eflags) 34 - { 35 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 36 - : : "rm" (eflags) : "flags"); 37 - } 38 20 39 21 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 40 22 int flags) ··· 41 59 set_eflags(get_eflags() | extraflags); 42 60 syscall(SYS_getpid); 43 61 flags = get_eflags(); 62 + set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); 44 63 if ((flags & extraflags) == extraflags) { 45 64 printf("[OK]\tThe syscall worked and flags are still set\n"); 46 65 } else { ··· 56 73 printf("[RUN]\tSet NT and issue a syscall\n"); 57 74 do_it(X86_EFLAGS_NT); 58 75 76 + printf("[RUN]\tSet AC and issue a syscall\n"); 77 + do_it(X86_EFLAGS_AC); 78 + 79 + printf("[RUN]\tSet NT|AC and issue a syscall\n"); 80 + do_it(X86_EFLAGS_NT | X86_EFLAGS_AC); 81 + 59 82 /* 60 83 * Now try it again with TF set -- TF forces returns via IRET in all 61 84 * cases except non-ptregs-using 64-bit full fast path syscalls. ··· 69 80 70 81 sethandler(SIGTRAP, sigtrap, 0); 71 82 83 + printf("[RUN]\tSet TF and issue a syscall\n"); 84 + do_it(X86_EFLAGS_TF); 85 + 72 86 printf("[RUN]\tSet NT|TF and issue a syscall\n"); 73 87 do_it(X86_EFLAGS_NT | X86_EFLAGS_TF); 88 + 89 + printf("[RUN]\tSet AC|TF and issue a syscall\n"); 90 + do_it(X86_EFLAGS_AC | X86_EFLAGS_TF); 91 + 92 + printf("[RUN]\tSet NT|AC|TF and issue a syscall\n"); 93 + do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF); 94 + 95 + /* 96 + * Now try DF. This is evil and it's plausible that we will crash 97 + * glibc, but glibc would have to do something rather surprising 98 + * for this to happen. 99 + */ 100 + printf("[RUN]\tSet DF and issue a syscall\n"); 101 + do_it(X86_EFLAGS_DF); 102 + 103 + printf("[RUN]\tSet TF|DF and issue a syscall\n"); 104 + do_it(X86_EFLAGS_TF | X86_EFLAGS_DF); 74 105 75 106 return nerrs == 0 ? 0 : 1; 76 107 }
+2 -13
tools/testing/selftests/x86/test_vsyscall.c
··· 20 20 #include <setjmp.h> 21 21 #include <sys/uio.h> 22 22 23 + #include "helpers.h" 24 + 23 25 #ifdef __x86_64__ 24 26 # define VSYS(x) (x) 25 27 #else ··· 495 493 } 496 494 497 495 #ifdef __x86_64__ 498 - #define X86_EFLAGS_TF (1UL << 8) 499 496 static volatile sig_atomic_t num_vsyscall_traps; 500 - 501 - static unsigned long get_eflags(void) 502 - { 503 - unsigned long eflags; 504 - asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags)); 505 - return eflags; 506 - } 507 - 508 - static void set_eflags(unsigned long eflags) 509 - { 510 - asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags"); 511 - } 512 497 513 498 static void sigtrap(int sig, siginfo_t *info, void *ctx_void) 514 499 {
+2 -21
tools/testing/selftests/x86/unwind_vdso.c
··· 11 11 #include <features.h> 12 12 #include <stdio.h> 13 13 14 + #include "helpers.h" 15 + 14 16 #if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16 15 17 16 18 int main() ··· 54 52 if (sigaction(sig, &sa, 0)) 55 53 err(1, "sigaction"); 56 54 } 57 - 58 - #ifdef __x86_64__ 59 - # define WIDTH "q" 60 - #else 61 - # define WIDTH "l" 62 - #endif 63 - 64 - static unsigned long get_eflags(void) 65 - { 66 - unsigned long eflags; 67 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 68 - return eflags; 69 - } 70 - 71 - static void set_eflags(unsigned long eflags) 72 - { 73 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 74 - : : "rm" (eflags) : "flags"); 75 - } 76 - 77 - #define X86_EFLAGS_TF (1UL << 8) 78 55 79 56 static volatile sig_atomic_t nerrs; 80 57 static unsigned long sysinfo;
+2 -1
virt/kvm/kvm_main.c
··· 3350 3350 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 3351 3351 goto out; 3352 3352 r = -EFAULT; 3353 - if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset)) 3353 + if (get_compat_sigset(&sigset, 3354 + (compat_sigset_t __user *)sigmask_arg->sigset)) 3354 3355 goto out; 3355 3356 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 3356 3357 } else