Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'asoc-v6.18-2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next

ASoC: Updates for v6.18 round 2

Some more updates for v6.18, mostly fixes for the earlier pull request
with some cleanups and more minor fixes for older code. We do have one
new driver, the TI TAS2783A, and some quirks for new platforms.

+4174 -1030
+3
Documentation/devicetree/bindings/regulator/qcom,sdm845-refgen-regulator.yaml
··· 23 23 - enum: 24 24 - qcom,sc7180-refgen-regulator 25 25 - qcom,sc8180x-refgen-regulator 26 + - qcom,sdm670-refgen-regulator 26 27 - qcom,sm8150-refgen-regulator 27 28 - const: qcom,sdm845-refgen-regulator 28 29 29 30 - items: 30 31 - enum: 32 + - qcom,qcs8300-refgen-regulator 33 + - qcom,sa8775p-refgen-regulator 31 34 - qcom,sc7280-refgen-regulator 32 35 - qcom,sc8280xp-refgen-regulator 33 36 - qcom,sm6350-refgen-regulator
+6
Documentation/devicetree/bindings/sound/cirrus,cs35l41.yaml
··· 151 151 minimum: 0 152 152 maximum: 5 153 153 154 + cirrus,subsystem-id: 155 + $ref: /schemas/types.yaml#/definitions/string 156 + description: 157 + Subsystem ID. If this property is present, it sets the system name, 158 + used to identify the firmware and tuning to load. 159 + 154 160 required: 155 161 - compatible 156 162 - reg
+228
Documentation/devicetree/bindings/sound/mediatek,mt8183-audio.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/sound/mediatek,mt8183-audio.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Mediatek AFE PCM controller for mt8183 8 + 9 + maintainers: 10 + - Julien Massot <jmassot@collabora.com> 11 + 12 + properties: 13 + compatible: 14 + const: mediatek,mt8183-audio 15 + 16 + interrupts: 17 + maxItems: 1 18 + 19 + resets: 20 + maxItems: 1 21 + 22 + reset-names: 23 + const: audiosys 24 + 25 + power-domains: 26 + maxItems: 1 27 + 28 + memory-region: 29 + maxItems: 1 30 + 31 + clocks: 32 + items: 33 + - description: AFE clock 34 + - description: ADDA DAC clock 35 + - description: ADDA DAC pre-distortion clock 36 + - description: ADDA ADC clock 37 + - description: ADDA6 ADC clock 38 + - description: Audio low-jitter 22.5792m clock 39 + - description: Audio low-jitter 24.576m clock 40 + - description: Audio PLL1 tuner clock 41 + - description: Audio PLL2 tuner clock 42 + - description: I2S1 bit clock 43 + - description: I2S2 bit clock 44 + - description: I2S3 bit clock 45 + - description: I2S4 bit clock 46 + - description: Audio Time-Division Multiplexing interface clock 47 + - description: Powerdown Audio test model clock 48 + - description: Audio infra sys clock 49 + - description: Audio infra 26M clock 50 + - description: Mux for audio clock 51 + - description: Mux for audio internal bus clock 52 + - description: Mux main divider by 4 53 + - description: Primary audio mux 54 + - description: Primary audio PLL 55 + - description: Secondary audio mux 56 + - description: Secondary audio PLL 57 + - description: Primary audio en-generator clock 58 + - description: Primary PLL divider by 4 for IEC 59 + - description: Secondary audio en-generator clock 60 + - description: Secondary PLL divider by 8 for IEC 61 + - description: Mux selector for I2S port 0 62 + - description: Mux selector for I2S port 1 63 + - description: Mux selector for I2S port 2 64 + - description: Mux selector for I2S port 3 65 + - description: Mux selector for I2S port 4 66 + - description: Mux selector for I2S port 5 67 + - description: APLL1 and APLL2 divider for I2S port 0 68 + - description: APLL1 and APLL2 divider for I2S port 1 69 + - description: APLL1 and APLL2 divider for I2S port 2 70 + - description: APLL1 and APLL2 divider for I2S port 3 71 + - description: APLL1 and APLL2 divider for I2S port 4 72 + - description: APLL1 and APLL2 divider for IEC 73 + - description: 26MHz clock for audio subsystem 74 + 75 + clock-names: 76 + items: 77 + - const: aud_afe_clk 78 + - const: aud_dac_clk 79 + - const: aud_dac_predis_clk 80 + - const: aud_adc_clk 81 + - const: aud_adc_adda6_clk 82 + - const: aud_apll22m_clk 83 + - const: aud_apll24m_clk 84 + - const: aud_apll1_tuner_clk 85 + - const: aud_apll2_tuner_clk 86 + - const: aud_i2s1_bclk_sw 87 + - const: aud_i2s2_bclk_sw 88 + - const: aud_i2s3_bclk_sw 89 + - const: aud_i2s4_bclk_sw 90 + - const: aud_tdm_clk 91 + - const: aud_tml_clk 92 + - const: aud_infra_clk 93 + - const: mtkaif_26m_clk 94 + - const: top_mux_audio 95 + - const: top_mux_aud_intbus 96 + - const: top_syspll_d2_d4 97 + - const: top_mux_aud_1 98 + - const: top_apll1_ck 99 + - const: top_mux_aud_2 100 + - const: top_apll2_ck 101 + - const: top_mux_aud_eng1 102 + - const: top_apll1_d8 103 + - const: top_mux_aud_eng2 104 + - const: top_apll2_d8 105 + - const: top_i2s0_m_sel 106 + - const: top_i2s1_m_sel 107 + - const: top_i2s2_m_sel 108 + - const: top_i2s3_m_sel 109 + - const: top_i2s4_m_sel 110 + - const: top_i2s5_m_sel 111 + - const: top_apll12_div0 112 + - const: top_apll12_div1 113 + - const: top_apll12_div2 114 + - const: top_apll12_div3 115 + - const: top_apll12_div4 116 + - const: top_apll12_divb 117 + - const: top_clk26m_clk 118 + 119 + required: 120 + - compatible 121 + - interrupts 122 + - resets 123 + - reset-names 124 + - power-domains 125 + - clocks 126 + - clock-names 127 + 128 + additionalProperties: false 129 + 130 + examples: 131 + - | 132 + #include <dt-bindings/clock/mt8183-clk.h> 133 + #include <dt-bindings/interrupt-controller/arm-gic.h> 134 + #include <dt-bindings/interrupt-controller/irq.h> 135 + #include <dt-bindings/power/mt8183-power.h> 136 + #include <dt-bindings/reset/mt8183-resets.h> 137 + 138 + audio-controller { 139 + compatible = "mediatek,mt8183-audio"; 140 + interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>; 141 + resets = <&watchdog MT8183_TOPRGU_AUDIO_SW_RST>; 142 + reset-names = "audiosys"; 143 + power-domains = <&spm MT8183_POWER_DOMAIN_AUDIO>; 144 + clocks = <&audiosys CLK_AUDIO_AFE>, 145 + <&audiosys CLK_AUDIO_DAC>, 146 + <&audiosys CLK_AUDIO_DAC_PREDIS>, 147 + <&audiosys CLK_AUDIO_ADC>, 148 + <&audiosys CLK_AUDIO_PDN_ADDA6_ADC>, 149 + <&audiosys CLK_AUDIO_22M>, 150 + <&audiosys CLK_AUDIO_24M>, 151 + <&audiosys CLK_AUDIO_APLL_TUNER>, 152 + <&audiosys CLK_AUDIO_APLL2_TUNER>, 153 + <&audiosys CLK_AUDIO_I2S1>, 154 + <&audiosys CLK_AUDIO_I2S2>, 155 + <&audiosys CLK_AUDIO_I2S3>, 156 + <&audiosys CLK_AUDIO_I2S4>, 157 + <&audiosys CLK_AUDIO_TDM>, 158 + <&audiosys CLK_AUDIO_TML>, 159 + <&infracfg CLK_INFRA_AUDIO>, 160 + <&infracfg CLK_INFRA_AUDIO_26M_BCLK>, 161 + <&topckgen CLK_TOP_MUX_AUDIO>, 162 + <&topckgen CLK_TOP_MUX_AUD_INTBUS>, 163 + <&topckgen CLK_TOP_SYSPLL_D2_D4>, 164 + <&topckgen CLK_TOP_MUX_AUD_1>, 165 + <&topckgen CLK_TOP_APLL1_CK>, 166 + <&topckgen CLK_TOP_MUX_AUD_2>, 167 + <&topckgen CLK_TOP_APLL2_CK>, 168 + <&topckgen CLK_TOP_MUX_AUD_ENG1>, 169 + <&topckgen CLK_TOP_APLL1_D8>, 170 + <&topckgen CLK_TOP_MUX_AUD_ENG2>, 171 + <&topckgen CLK_TOP_APLL2_D8>, 172 + <&topckgen CLK_TOP_MUX_APLL_I2S0>, 173 + <&topckgen CLK_TOP_MUX_APLL_I2S1>, 174 + <&topckgen CLK_TOP_MUX_APLL_I2S2>, 175 + <&topckgen CLK_TOP_MUX_APLL_I2S3>, 176 + <&topckgen CLK_TOP_MUX_APLL_I2S4>, 177 + <&topckgen CLK_TOP_MUX_APLL_I2S5>, 178 + <&topckgen CLK_TOP_APLL12_DIV0>, 179 + <&topckgen CLK_TOP_APLL12_DIV1>, 180 + <&topckgen CLK_TOP_APLL12_DIV2>, 181 + <&topckgen CLK_TOP_APLL12_DIV3>, 182 + <&topckgen CLK_TOP_APLL12_DIV4>, 183 + <&topckgen CLK_TOP_APLL12_DIVB>, 184 + <&clk26m>; 185 + clock-names = "aud_afe_clk", 186 + "aud_dac_clk", 187 + "aud_dac_predis_clk", 188 + "aud_adc_clk", 189 + "aud_adc_adda6_clk", 190 + "aud_apll22m_clk", 191 + "aud_apll24m_clk", 192 + "aud_apll1_tuner_clk", 193 + "aud_apll2_tuner_clk", 194 + "aud_i2s1_bclk_sw", 195 + "aud_i2s2_bclk_sw", 196 + "aud_i2s3_bclk_sw", 197 + "aud_i2s4_bclk_sw", 198 + "aud_tdm_clk", 199 + "aud_tml_clk", 200 + "aud_infra_clk", 201 + "mtkaif_26m_clk", 202 + "top_mux_audio", 203 + "top_mux_aud_intbus", 204 + "top_syspll_d2_d4", 205 + "top_mux_aud_1", 206 + "top_apll1_ck", 207 + "top_mux_aud_2", 208 + "top_apll2_ck", 209 + "top_mux_aud_eng1", 210 + "top_apll1_d8", 211 + "top_mux_aud_eng2", 212 + "top_apll2_d8", 213 + "top_i2s0_m_sel", 214 + "top_i2s1_m_sel", 215 + "top_i2s2_m_sel", 216 + "top_i2s3_m_sel", 217 + "top_i2s4_m_sel", 218 + "top_i2s5_m_sel", 219 + "top_apll12_div0", 220 + "top_apll12_div1", 221 + "top_apll12_div2", 222 + "top_apll12_div3", 223 + "top_apll12_div4", 224 + "top_apll12_divb", 225 + "top_clk26m_clk"; 226 + }; 227 + 228 + ...
+49
Documentation/devicetree/bindings/sound/mediatek,mt8183_da7219.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/sound/mediatek,mt8183_da7219.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: MediaTek MT8183 sound card with external codecs 8 + 9 + maintainers: 10 + - Julien Massot <jmassot@collabora.com> 11 + 12 + description: 13 + MediaTek MT8183 SoC-based sound cards with DA7219 as headset codec, 14 + and MAX98357A, RT1015 or RT1015P as speaker amplifiers. Optionally includes HDMI codec. 15 + 16 + properties: 17 + compatible: 18 + enum: 19 + - mediatek,mt8183_da7219_max98357 20 + - mediatek,mt8183_da7219_rt1015 21 + - mediatek,mt8183_da7219_rt1015p 22 + 23 + mediatek,headset-codec: 24 + $ref: /schemas/types.yaml#/definitions/phandle 25 + description: Phandle to the DA7219 headset codec. 26 + 27 + mediatek,platform: 28 + $ref: /schemas/types.yaml#/definitions/phandle 29 + description: Phandle to the MT8183 ASoC platform (e.g., AFE node). 30 + 31 + mediatek,hdmi-codec: 32 + $ref: /schemas/types.yaml#/definitions/phandle 33 + description: Optional phandle to the HDMI codec (e.g., IT6505). 34 + 35 + required: 36 + - compatible 37 + - mediatek,headset-codec 38 + - mediatek,platform 39 + 40 + additionalProperties: false 41 + 42 + examples: 43 + - | 44 + sound { 45 + compatible = "mediatek,mt8183_da7219_max98357"; 46 + mediatek,headset-codec = <&da7219>; 47 + mediatek,hdmi-codec = <&it6505dptx>; 48 + mediatek,platform = <&afe>; 49 + };
+59
Documentation/devicetree/bindings/sound/mediatek,mt8183_mt6358_ts3a227.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/sound/mediatek,mt8183_mt6358_ts3a227.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: MediaTek MT8183 sound card with MT6358, TS3A227, and MAX98357/RT1015 codecs 8 + 9 + maintainers: 10 + - Julien Massot <julien.massot@collabora.com> 11 + 12 + description: 13 + MediaTek MT8183 SoC-based sound cards using the MT6358 codec, 14 + with optional TS3A227 headset codec, EC codec (via Chrome EC), and HDMI audio. 15 + Speaker amplifier can be one of MAX98357A/B, RT1015, or RT1015P. 16 + 17 + properties: 18 + compatible: 19 + enum: 20 + - mediatek,mt8183_mt6358_ts3a227_max98357 21 + - mediatek,mt8183_mt6358_ts3a227_max98357b 22 + - mediatek,mt8183_mt6358_ts3a227_rt1015 23 + - mediatek,mt8183_mt6358_ts3a227_rt1015p 24 + 25 + mediatek,platform: 26 + $ref: /schemas/types.yaml#/definitions/phandle 27 + description: Phandle to the MT8183 ASoC platform node (e.g., AFE). 28 + 29 + mediatek,headset-codec: 30 + $ref: /schemas/types.yaml#/definitions/phandle 31 + description: Phandle to the TS3A227 headset codec. 32 + 33 + mediatek,ec-codec: 34 + $ref: /schemas/types.yaml#/definitions/phandle 35 + description: | 36 + Optional phandle to a ChromeOS EC codec node. 37 + See bindings in google,cros-ec-codec.yaml. 38 + 39 + mediatek,hdmi-codec: 40 + $ref: /schemas/types.yaml#/definitions/phandle 41 + description: Optional phandle to an HDMI audio codec node. 42 + 43 + required: 44 + - compatible 45 + - mediatek,platform 46 + 47 + additionalProperties: false 48 + 49 + examples: 50 + - | 51 + sound { 52 + compatible = "mediatek,mt8183_mt6358_ts3a227_max98357"; 53 + mediatek,headset-codec = <&ts3a227>; 54 + mediatek,ec-codec = <&ec_codec>; 55 + mediatek,hdmi-codec = <&it6505dptx>; 56 + mediatek,platform = <&afe>; 57 + }; 58 + 59 + ...
-42
Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
··· 1 - Mediatek AFE PCM controller for mt8183 2 - 3 - Required properties: 4 - - compatible = "mediatek,mt68183-audio"; 5 - - reg: register location and size 6 - - interrupts: should contain AFE interrupt 7 - - resets: Must contain an entry for each entry in reset-names 8 - See ../reset/reset.txt for details. 9 - - reset-names: should have these reset names: 10 - "audiosys"; 11 - - power-domains: should define the power domain 12 - - clocks: Must contain an entry for each entry in clock-names 13 - - clock-names: should have these clock names: 14 - "infra_sys_audio_clk", 15 - "mtkaif_26m_clk", 16 - "top_mux_audio", 17 - "top_mux_aud_intbus", 18 - "top_sys_pll3_d4", 19 - "top_clk26m_clk"; 20 - 21 - Example: 22 - 23 - afe: mt8183-afe-pcm@11220000 { 24 - compatible = "mediatek,mt8183-audio"; 25 - reg = <0 0x11220000 0 0x1000>; 26 - interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>; 27 - resets = <&watchdog MT8183_TOPRGU_AUDIO_SW_RST>; 28 - reset-names = "audiosys"; 29 - power-domains = <&scpsys MT8183_POWER_DOMAIN_AUDIO>; 30 - clocks = <&infrasys CLK_INFRA_AUDIO>, 31 - <&infrasys CLK_INFRA_AUDIO_26M_BCLK>, 32 - <&topckgen CLK_TOP_MUX_AUDIO>, 33 - <&topckgen CLK_TOP_MUX_AUD_INTBUS>, 34 - <&topckgen CLK_TOP_SYSPLL_D2_D4>, 35 - <&clk26m>; 36 - clock-names = "infra_sys_audio_clk", 37 - "mtkaif_26m_clk", 38 - "top_mux_audio", 39 - "top_mux_aud_intbus", 40 - "top_sys_pll_d2_d4", 41 - "top_clk26m_clk"; 42 - };
-21
Documentation/devicetree/bindings/sound/mt8183-da7219-max98357.txt
··· 1 - MT8183 with MT6358, DA7219, MAX98357, and RT1015 CODECS 2 - 3 - Required properties: 4 - - compatible : "mediatek,mt8183_da7219_max98357" for MAX98357A codec 5 - "mediatek,mt8183_da7219_rt1015" for RT1015 codec 6 - "mediatek,mt8183_da7219_rt1015p" for RT1015P codec 7 - - mediatek,headset-codec: the phandles of da7219 codecs 8 - - mediatek,platform: the phandle of MT8183 ASoC platform 9 - 10 - Optional properties: 11 - - mediatek,hdmi-codec: the phandles of HDMI codec 12 - 13 - Example: 14 - 15 - sound { 16 - compatible = "mediatek,mt8183_da7219_max98357"; 17 - mediatek,headset-codec = <&da7219>; 18 - mediatek,hdmi-codec = <&it6505dptx>; 19 - mediatek,platform = <&afe>; 20 - }; 21 -
-25
Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
··· 1 - MT8183 with MT6358, TS3A227, MAX98357, and RT1015 CODECS 2 - 3 - Required properties: 4 - - compatible : "mediatek,mt8183_mt6358_ts3a227_max98357" for MAX98357A codec 5 - "mediatek,mt8183_mt6358_ts3a227_max98357b" for MAX98357B codec 6 - "mediatek,mt8183_mt6358_ts3a227_rt1015" for RT1015 codec 7 - "mediatek,mt8183_mt6358_ts3a227_rt1015p" for RT1015P codec 8 - - mediatek,platform: the phandle of MT8183 ASoC platform 9 - 10 - Optional properties: 11 - - mediatek,headset-codec: the phandles of ts3a227 codecs 12 - - mediatek,ec-codec: the phandle of EC codecs. 13 - See google,cros-ec-codec.txt for more details. 14 - - mediatek,hdmi-codec: the phandles of HDMI codec 15 - 16 - Example: 17 - 18 - sound { 19 - compatible = "mediatek,mt8183_mt6358_ts3a227_max98357"; 20 - mediatek,headset-codec = <&ts3a227>; 21 - mediatek,ec-codec = <&ec_codec>; 22 - mediatek,hdmi-codec = <&it6505dptx>; 23 - mediatek,platform = <&afe>; 24 - }; 25 -
+4 -5
Documentation/netlink/specs/conntrack.yaml
··· 575 575 - nat-dst 576 576 - timeout 577 577 - mark 578 - - counter-orig 579 - - counter-reply 578 + - counters-orig 579 + - counters-reply 580 580 - use 581 581 - id 582 582 - nat-dst ··· 591 591 request: 592 592 value: 0x101 593 593 attributes: 594 - - nfgen-family 595 594 - mark 596 595 - filter 597 596 - status ··· 607 608 - nat-dst 608 609 - timeout 609 610 - mark 610 - - counter-orig 611 - - counter-reply 611 + - counters-orig 612 + - counters-reply 612 613 - use 613 614 - id 614 615 - nat-dst
+2 -2
Documentation/netlink/specs/mptcp_pm.yaml
··· 28 28 traffic-patterns it can take a long time until the 29 29 MPTCP_EVENT_ESTABLISHED is sent. 30 30 Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport, 31 - dport, server-side. 31 + dport, server-side, [flags]. 32 32 - 33 33 name: established 34 34 doc: >- 35 35 A MPTCP connection is established (can start new subflows). 36 36 Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport, 37 - dport, server-side. 37 + dport, server-side, [flags]. 38 38 - 39 39 name: closed 40 40 doc: >-
+5 -2
MAINTAINERS
··· 7430 7430 F: Documentation/devicetree/bindings/dpll/dpll-device.yaml 7431 7431 F: Documentation/devicetree/bindings/dpll/dpll-pin.yaml 7432 7432 F: Documentation/driver-api/dpll.rst 7433 - F: drivers/dpll/* 7433 + F: drivers/dpll/ 7434 7434 F: include/linux/dpll.h 7435 7435 F: include/uapi/linux/dpll.h 7436 7436 ··· 16204 16204 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16205 16205 R: Vlastimil Babka <vbabka@suse.cz> 16206 16206 R: Harry Yoo <harry.yoo@oracle.com> 16207 + R: Jann Horn <jannh@google.com> 16207 16208 L: linux-mm@kvack.org 16208 16209 S: Maintained 16209 16210 F: include/linux/rmap.h ··· 16249 16248 R: Ryan Roberts <ryan.roberts@arm.com> 16250 16249 R: Dev Jain <dev.jain@arm.com> 16251 16250 R: Barry Song <baohua@kernel.org> 16251 + R: Lance Yang <lance.yang@linux.dev> 16252 16252 L: linux-mm@kvack.org 16253 16253 S: Maintained 16254 16254 W: http://www.linux-mm.org ··· 22062 22060 22063 22061 RUNTIME VERIFICATION (RV) 22064 22062 M: Steven Rostedt <rostedt@goodmis.org> 22063 + M: Gabriele Monaco <gmonaco@redhat.com> 22065 22064 L: linux-trace-kernel@vger.kernel.org 22066 22065 S: Maintained 22067 22066 F: Documentation/trace/rv/ ··· 24270 24267 F: drivers/input/keyboard/sun4i-lradc-keys.c 24271 24268 24272 24269 SUNDANCE NETWORK DRIVER 24273 - M: Denis Kirjanov <dkirjanov@suse.de> 24270 + M: Denis Kirjanov <kirjanov@gmail.com> 24274 24271 L: netdev@vger.kernel.org 24275 24272 S: Maintained 24276 24273 F: drivers/net/ethernet/dlink/sundance.c
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 17 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+1
arch/arm64/include/asm/kvm_host.h
··· 1369 1369 } 1370 1370 1371 1371 void kvm_init_host_debug_data(void); 1372 + void kvm_debug_init_vhe(void); 1372 1373 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu); 1373 1374 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu); 1374 1375 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);
-30
arch/arm64/include/asm/kvm_pgtable.h
··· 355 355 return pteref; 356 356 } 357 357 358 - static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) 359 - { 360 - return pteref; 361 - } 362 - 363 358 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) 364 359 { 365 360 /* ··· 382 387 kvm_pteref_t pteref) 383 388 { 384 389 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); 385 - } 386 - 387 - static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) 388 - { 389 - return rcu_dereference_raw(pteref); 390 390 } 391 391 392 392 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) ··· 550 560 * to freeing and therefore no TLB invalidation is performed. 551 561 */ 552 562 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 553 - 554 - /** 555 - * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses. 556 - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 557 - * @addr: Intermediate physical address at which to place the mapping. 558 - * @size: Size of the mapping. 559 - * 560 - * The page-table is assumed to be unreachable by any hardware walkers prior 561 - * to freeing and therefore no TLB invalidation is performed. 562 - */ 563 - void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 564 - u64 addr, u64 size); 565 - 566 - /** 567 - * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table. 568 - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 569 - * 570 - * It is assumed that the rest of the page-table is freed before this operation. 571 - */ 572 - void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); 573 563 574 564 /** 575 565 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
+1 -3
arch/arm64/include/asm/kvm_pkvm.h
··· 179 179 180 180 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 181 181 struct kvm_pgtable_mm_ops *mm_ops); 182 - void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 183 - u64 addr, u64 size); 184 - void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); 182 + void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 185 183 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 186 184 enum kvm_pgtable_prot prot, void *mc, 187 185 enum kvm_pgtable_walk_flags flags);
+3 -1
arch/arm64/kvm/arm.c
··· 2113 2113 { 2114 2114 cpu_set_hyp_vector(); 2115 2115 2116 - if (is_kernel_in_hyp_mode()) 2116 + if (is_kernel_in_hyp_mode()) { 2117 2117 kvm_timer_init_vhe(); 2118 + kvm_debug_init_vhe(); 2119 + } 2118 2120 2119 2121 if (vgic_present) 2120 2122 kvm_vgic_init_cpu_hardware();
+13
arch/arm64/kvm/debug.c
··· 96 96 } 97 97 } 98 98 99 + void kvm_debug_init_vhe(void) 100 + { 101 + /* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */ 102 + if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1))) 103 + write_sysreg_el1(0, SYS_PMSCR); 104 + } 105 + 99 106 /* 100 107 * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host 101 108 * has taken over MDSCR_EL1. ··· 144 137 145 138 /* Must be called before kvm_vcpu_load_vhe() */ 146 139 KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm); 140 + 141 + if (has_vhe()) 142 + *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); 147 143 148 144 /* 149 145 * Determine which of the possible debug states we're in: ··· 194 184 195 185 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu) 196 186 { 187 + if (has_vhe()) 188 + write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); 189 + 197 190 if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 198 191 return; 199 192
-5
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 431 431 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); 432 432 } 433 433 434 - *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); 435 - write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 436 - 437 434 if (cpus_have_final_cap(ARM64_HAS_HCX)) { 438 435 u64 hcrx = vcpu->arch.hcrx_el2; 439 436 if (is_nested_ctxt(vcpu)) { ··· 450 453 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) 451 454 { 452 455 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); 453 - 454 - write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); 455 456 456 457 write_sysreg(0, hstr_el2); 457 458 if (system_supports_pmuv3()) {
+6
arch/arm64/kvm/hyp/nvhe/switch.c
··· 50 50 static void __activate_traps(struct kvm_vcpu *vcpu) 51 51 { 52 52 ___activate_traps(vcpu, vcpu->arch.hcr_el2); 53 + 54 + *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); 55 + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 56 + 53 57 __activate_traps_common(vcpu); 54 58 __activate_cptr_traps(vcpu); 55 59 ··· 96 92 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR); 97 93 isb(); 98 94 } 95 + 96 + write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); 99 97 100 98 __deactivate_traps_common(vcpu); 101 99
+1 -1
arch/arm64/kvm/hyp/nvhe/sys_regs.c
··· 253 253 254 254 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 255 255 *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); 256 - __vcpu_assign_sys_reg(vcpu, read_sysreg_el1(SYS_VBAR), VBAR_EL1); 256 + __vcpu_assign_sys_reg(vcpu, VBAR_EL1, read_sysreg_el1(SYS_VBAR)); 257 257 258 258 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); 259 259
+4 -21
arch/arm64/kvm/hyp/pgtable.c
··· 1551 1551 return 0; 1552 1552 } 1553 1553 1554 - void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 1555 - u64 addr, u64 size) 1554 + void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) 1556 1555 { 1556 + size_t pgd_sz; 1557 1557 struct kvm_pgtable_walker walker = { 1558 1558 .cb = stage2_free_walker, 1559 1559 .flags = KVM_PGTABLE_WALK_LEAF | 1560 1560 KVM_PGTABLE_WALK_TABLE_POST, 1561 1561 }; 1562 1562 1563 - WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker)); 1564 - } 1565 - 1566 - void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt) 1567 - { 1568 - size_t pgd_sz; 1569 - 1563 + WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); 1570 1564 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; 1571 - 1572 - /* 1573 - * Since the pgtable is unlinked at this point, and not shared with 1574 - * other walkers, safely deference pgd with kvm_dereference_pteref_raw() 1575 - */ 1576 - pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz); 1565 + pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz); 1577 1566 pgt->pgd = NULL; 1578 - } 1579 - 1580 - void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) 1581 - { 1582 - kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits)); 1583 - kvm_pgtable_stage2_destroy_pgd(pgt); 1584 1567 } 1585 1568 1586 1569 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
+6 -39
arch/arm64/kvm/mmu.c
··· 904 904 return 0; 905 905 } 906 906 907 - /* 908 - * Assume that @pgt is valid and unlinked from the KVM MMU to free the 909 - * page-table without taking the kvm_mmu_lock and without performing any 910 - * TLB invalidations. 911 - * 912 - * Also, the range of addresses can be large enough to cause need_resched 913 - * warnings, for instance on CONFIG_PREEMPT_NONE kernels. Hence, invoke 914 - * cond_resched() periodically to prevent hogging the CPU for a long time 915 - * and schedule something else, if required. 916 - */ 917 - static void stage2_destroy_range(struct kvm_pgtable *pgt, phys_addr_t addr, 918 - phys_addr_t end) 919 - { 920 - u64 next; 921 - 922 - do { 923 - next = stage2_range_addr_end(addr, end); 924 - KVM_PGT_FN(kvm_pgtable_stage2_destroy_range)(pgt, addr, 925 - next - addr); 926 - if (next != end) 927 - cond_resched(); 928 - } while (addr = next, addr != end); 929 - } 930 - 931 - static void kvm_stage2_destroy(struct kvm_pgtable *pgt) 932 - { 933 - unsigned int ia_bits = VTCR_EL2_IPA(pgt->mmu->vtcr); 934 - 935 - stage2_destroy_range(pgt, 0, BIT(ia_bits)); 936 - KVM_PGT_FN(kvm_pgtable_stage2_destroy_pgd)(pgt); 937 - } 938 - 939 907 /** 940 908 * kvm_init_stage2_mmu - Initialise a S2 MMU structure 941 909 * @kvm: The pointer to the KVM structure ··· 980 1012 return 0; 981 1013 982 1014 out_destroy_pgtable: 983 - kvm_stage2_destroy(pgt); 1015 + KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt); 984 1016 out_free_pgtable: 985 1017 kfree(pgt); 986 1018 return err; ··· 1074 1106 mmu->pgt = NULL; 1075 1107 free_percpu(mmu->last_vcpu_ran); 1076 1108 } 1109 + 1110 + if (kvm_is_nested_s2_mmu(kvm, mmu)) 1111 + kvm_init_nested_s2_mmu(mmu); 1112 + 1077 1113 write_unlock(&kvm->mmu_lock); 1078 1114 1079 1115 if (pgt) { 1080 - kvm_stage2_destroy(pgt); 1116 + KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt); 1081 1117 kfree(pgt); 1082 1118 } 1083 1119 } ··· 1512 1540 write_fault = kvm_is_write_fault(vcpu); 1513 1541 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); 1514 1542 VM_BUG_ON(write_fault && exec_fault); 1515 - 1516 - if (fault_is_perm && !write_fault && !exec_fault) { 1517 - kvm_err("Unexpected L2 read permission error\n"); 1518 - return -EFAULT; 1519 - } 1520 1543 1521 1544 if (!is_protected_kvm_enabled()) 1522 1545 memcache = &vcpu->arch.mmu_page_cache;
+3 -3
arch/arm64/kvm/nested.c
··· 847 847 848 848 ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, 849 849 vt->wr.level)); 850 - ipa_start = vt->wr.pa & (ipa_size - 1); 850 + ipa_start = vt->wr.pa & ~(ipa_size - 1); 851 851 ipa_end = ipa_start + ipa_size; 852 852 853 853 if (ipa_end <= start || ipa_start >= end) ··· 887 887 888 888 va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, 889 889 vt->wr.level)); 890 - va_start = vt->gva & (va_size - 1); 890 + va_start = vt->gva & ~(va_size - 1); 891 891 va_end = va_start + va_size; 892 892 893 893 switch (scope->type) { ··· 1276 1276 !(tcr & TCR_ASID16)) 1277 1277 asid &= GENMASK(7, 0); 1278 1278 1279 - return asid != vt->wr.asid; 1279 + return asid == vt->wr.asid; 1280 1280 } 1281 1281 1282 1282 return true;
+2 -9
arch/arm64/kvm/pkvm.c
··· 316 316 return 0; 317 317 } 318 318 319 - void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 320 - u64 addr, u64 size) 319 + void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) 321 320 { 322 - __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size); 323 - } 324 - 325 - void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt) 326 - { 327 - /* Expected to be called after all pKVM mappings have been released. */ 328 - WARN_ON_ONCE(!RB_EMPTY_ROOT(&pgt->pkvm_mappings.rb_root)); 321 + __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL)); 329 322 } 330 323 331 324 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
+1 -1
arch/arm64/kvm/vgic/vgic-debug.c
··· 69 69 int nr_lpis = 0; 70 70 71 71 xa_for_each(&dist->lpi_xa, intid, irq) { 72 - if (!vgic_try_get_irq_kref(irq)) 72 + if (!vgic_try_get_irq_ref(irq)) 73 73 continue; 74 74 75 75 xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
+3 -3
arch/arm64/kvm/vgic/vgic-init.c
··· 53 53 { 54 54 struct vgic_dist *dist = &kvm->arch.vgic; 55 55 56 - xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ); 56 + xa_init(&dist->lpi_xa); 57 57 } 58 58 59 59 /* CREATION */ ··· 208 208 raw_spin_lock_init(&irq->irq_lock); 209 209 irq->vcpu = NULL; 210 210 irq->target_vcpu = vcpu0; 211 - kref_init(&irq->refcount); 211 + refcount_set(&irq->refcount, 0); 212 212 switch (dist->vgic_model) { 213 213 case KVM_DEV_TYPE_ARM_VGIC_V2: 214 214 irq->targets = 0; ··· 277 277 irq->intid = i; 278 278 irq->vcpu = NULL; 279 279 irq->target_vcpu = vcpu; 280 - kref_init(&irq->refcount); 280 + refcount_set(&irq->refcount, 0); 281 281 if (vgic_irq_is_sgi(i)) { 282 282 /* SGIs */ 283 283 irq->enabled = 1;
+7 -8
arch/arm64/kvm/vgic/vgic-its.c
··· 78 78 { 79 79 struct vgic_dist *dist = &kvm->arch.vgic; 80 80 struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq; 81 - unsigned long flags; 82 81 int ret; 83 82 84 83 /* In this case there is no put, since we keep the reference. */ ··· 88 89 if (!irq) 89 90 return ERR_PTR(-ENOMEM); 90 91 91 - ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT); 92 + ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT); 92 93 if (ret) { 93 94 kfree(irq); 94 95 return ERR_PTR(ret); ··· 98 99 raw_spin_lock_init(&irq->irq_lock); 99 100 100 101 irq->config = VGIC_CONFIG_EDGE; 101 - kref_init(&irq->refcount); 102 + refcount_set(&irq->refcount, 1); 102 103 irq->intid = intid; 103 104 irq->target_vcpu = vcpu; 104 105 irq->group = 1; 105 106 106 - xa_lock_irqsave(&dist->lpi_xa, flags); 107 + xa_lock(&dist->lpi_xa); 107 108 108 109 /* 109 110 * There could be a race with another vgic_add_lpi(), so we need to 110 111 * check that we don't add a second list entry with the same LPI. 111 112 */ 112 113 oldirq = xa_load(&dist->lpi_xa, intid); 113 - if (vgic_try_get_irq_kref(oldirq)) { 114 + if (vgic_try_get_irq_ref(oldirq)) { 114 115 /* Someone was faster with adding this LPI, lets use that. */ 115 116 kfree(irq); 116 117 irq = oldirq; ··· 125 126 } 126 127 127 128 out_unlock: 128 - xa_unlock_irqrestore(&dist->lpi_xa, flags); 129 + xa_unlock(&dist->lpi_xa); 129 130 130 131 if (ret) 131 132 return ERR_PTR(ret); ··· 546 547 rcu_read_lock(); 547 548 548 549 irq = xa_load(&its->translation_cache, cache_key); 549 - if (!vgic_try_get_irq_kref(irq)) 550 + if (!vgic_try_get_irq_ref(irq)) 550 551 irq = NULL; 551 552 552 553 rcu_read_unlock(); ··· 570 571 * its_lock, as the ITE (and the reference it holds) cannot be freed. 571 572 */ 572 573 lockdep_assert_held(&its->its_lock); 573 - vgic_get_irq_kref(irq); 574 + vgic_get_irq_ref(irq); 574 575 575 576 old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT); 576 577
+1 -1
arch/arm64/kvm/vgic/vgic-v4.c
··· 518 518 if (!irq->hw || irq->host_irq != host_irq) 519 519 continue; 520 520 521 - if (!vgic_try_get_irq_kref(irq)) 521 + if (!vgic_try_get_irq_ref(irq)) 522 522 return NULL; 523 523 524 524 return irq;
+58 -22
arch/arm64/kvm/vgic/vgic.c
··· 28 28 * kvm->arch.config_lock (mutex) 29 29 * its->cmd_lock (mutex) 30 30 * its->its_lock (mutex) 31 - * vgic_cpu->ap_list_lock must be taken with IRQs disabled 32 - * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled 31 + * vgic_dist->lpi_xa.xa_lock 32 + * vgic_cpu->ap_list_lock must be taken with IRQs disabled 33 33 * vgic_irq->irq_lock must be taken with IRQs disabled 34 34 * 35 35 * As the ap_list_lock might be taken from the timer interrupt handler, ··· 71 71 rcu_read_lock(); 72 72 73 73 irq = xa_load(&dist->lpi_xa, intid); 74 - if (!vgic_try_get_irq_kref(irq)) 74 + if (!vgic_try_get_irq_ref(irq)) 75 75 irq = NULL; 76 76 77 77 rcu_read_unlock(); ··· 114 114 return vgic_get_irq(vcpu->kvm, intid); 115 115 } 116 116 117 - /* 118 - * We can't do anything in here, because we lack the kvm pointer to 119 - * lock and remove the item from the lpi_list. So we keep this function 120 - * empty and use the return value of kref_put() to trigger the freeing. 121 - */ 122 - static void vgic_irq_release(struct kref *ref) 117 + static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq) 123 118 { 119 + lockdep_assert_held(&dist->lpi_xa.xa_lock); 120 + __xa_erase(&dist->lpi_xa, irq->intid); 121 + kfree_rcu(irq, rcu); 122 + } 123 + 124 + static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) 125 + { 126 + if (irq->intid < VGIC_MIN_LPI) 127 + return false; 128 + 129 + return refcount_dec_and_test(&irq->refcount); 130 + } 131 + 132 + static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq) 133 + { 134 + if (!__vgic_put_irq(kvm, irq)) 135 + return false; 136 + 137 + irq->pending_release = true; 138 + return true; 124 139 } 125 140 126 141 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) 127 142 { 128 143 struct vgic_dist *dist = &kvm->arch.vgic; 129 - unsigned long flags; 130 144 131 - if (irq->intid < VGIC_MIN_LPI) 145 + if (irq->intid >= VGIC_MIN_LPI) 146 + might_lock(&dist->lpi_xa.xa_lock); 147 + 148 + if (!__vgic_put_irq(kvm, irq)) 132 149 return; 133 150 134 - if (!kref_put(&irq->refcount, vgic_irq_release)) 135 - return; 151 + xa_lock(&dist->lpi_xa); 152 + vgic_release_lpi_locked(dist, irq); 153 + xa_unlock(&dist->lpi_xa); 154 + } 136 155 137 - xa_lock_irqsave(&dist->lpi_xa, flags); 138 - __xa_erase(&dist->lpi_xa, irq->intid); 139 - xa_unlock_irqrestore(&dist->lpi_xa, flags); 156 + static void vgic_release_deleted_lpis(struct kvm *kvm) 157 + { 158 + struct vgic_dist *dist = &kvm->arch.vgic; 159 + unsigned long intid; 160 + struct vgic_irq *irq; 140 161 141 - kfree_rcu(irq, rcu); 162 + xa_lock(&dist->lpi_xa); 163 + 164 + xa_for_each(&dist->lpi_xa, intid, irq) { 165 + if (irq->pending_release) 166 + vgic_release_lpi_locked(dist, irq); 167 + } 168 + 169 + xa_unlock(&dist->lpi_xa); 142 170 } 143 171 144 172 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) 145 173 { 146 174 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 147 175 struct vgic_irq *irq, *tmp; 176 + bool deleted = false; 148 177 unsigned long flags; 149 178 150 179 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); ··· 184 155 list_del(&irq->ap_list); 185 156 irq->vcpu = NULL; 186 157 raw_spin_unlock(&irq->irq_lock); 187 - vgic_put_irq(vcpu->kvm, irq); 158 + deleted |= vgic_put_irq_norelease(vcpu->kvm, irq); 188 159 } 189 160 } 190 161 191 162 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 163 + 164 + if (deleted) 165 + vgic_release_deleted_lpis(vcpu->kvm); 192 166 } 193 167 194 168 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) ··· 431 399 * now in the ap_list. This is safe as the caller must already hold a 432 400 * reference on the irq. 433 401 */ 434 - vgic_get_irq_kref(irq); 402 + vgic_get_irq_ref(irq); 435 403 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 436 404 irq->vcpu = vcpu; 437 405 ··· 662 630 { 663 631 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 664 632 struct vgic_irq *irq, *tmp; 633 + bool deleted_lpis = false; 665 634 666 635 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 667 636 ··· 690 657 691 658 /* 692 659 * This vgic_put_irq call matches the 693 - * vgic_get_irq_kref in vgic_queue_irq_unlock, 660 + * vgic_get_irq_ref in vgic_queue_irq_unlock, 694 661 * where we added the LPI to the ap_list. As 695 662 * we remove the irq from the list, we drop 696 663 * also drop the refcount. 697 664 */ 698 - vgic_put_irq(vcpu->kvm, irq); 665 + deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq); 699 666 continue; 700 667 } 701 668 ··· 758 725 } 759 726 760 727 raw_spin_unlock(&vgic_cpu->ap_list_lock); 728 + 729 + if (unlikely(deleted_lpis)) 730 + vgic_release_deleted_lpis(vcpu->kvm); 761 731 } 762 732 763 733 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) ··· 854 818 * the AP list has been sorted already. 855 819 */ 856 820 if (multi_sgi && irq->priority > prio) { 857 - _raw_spin_unlock(&irq->irq_lock); 821 + raw_spin_unlock(&irq->irq_lock); 858 822 break; 859 823 } 860 824
+4 -4
arch/arm64/kvm/vgic/vgic.h
··· 267 267 void vgic_v2_save_state(struct kvm_vcpu *vcpu); 268 268 void vgic_v2_restore_state(struct kvm_vcpu *vcpu); 269 269 270 - static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq) 270 + static inline bool vgic_try_get_irq_ref(struct vgic_irq *irq) 271 271 { 272 272 if (!irq) 273 273 return false; ··· 275 275 if (irq->intid < VGIC_MIN_LPI) 276 276 return true; 277 277 278 - return kref_get_unless_zero(&irq->refcount); 278 + return refcount_inc_not_zero(&irq->refcount); 279 279 } 280 280 281 - static inline void vgic_get_irq_kref(struct vgic_irq *irq) 281 + static inline void vgic_get_irq_ref(struct vgic_irq *irq) 282 282 { 283 - WARN_ON_ONCE(!vgic_try_get_irq_kref(irq)); 283 + WARN_ON_ONCE(!vgic_try_get_irq_ref(irq)); 284 284 } 285 285 286 286 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
+10 -2
arch/loongarch/Kconfig
··· 298 298 config CC_HAS_ANNOTATE_TABLEJUMP 299 299 def_bool $(cc-option,-mannotate-tablejump) 300 300 301 + config RUSTC_HAS_ANNOTATE_TABLEJUMP 302 + depends on RUST 303 + def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump) 304 + 301 305 menu "Kernel type and options" 302 306 303 307 source "kernel/Kconfig.hz" ··· 567 563 -mstrict-align build parameter to prevent unaligned accesses. 568 564 569 565 CPUs with h/w unaligned access support: 570 - Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. 566 + Loongson-2K2000/2K3000 and all of Loongson-3 series processors 567 + based on LoongArch. 571 568 572 569 CPUs without h/w unaligned access support: 573 - Loongson-2K500/2K1000. 570 + Loongson-2K0300/2K0500/2K1000. 571 + 572 + If you want to make sure whether to support unaligned memory access 573 + on your hardware, please read the bit 20 (UAL) of CPUCFG1 register. 574 574 575 575 This option is enabled by default to make the kernel be able to run 576 576 on all LoongArch systems. But you can disable it manually if you want
+10 -5
arch/loongarch/Makefile
··· 102 102 103 103 ifdef CONFIG_OBJTOOL 104 104 ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP 105 + KBUILD_CFLAGS += -mannotate-tablejump 106 + else 107 + KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers 108 + endif 109 + ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP 110 + KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump 111 + else 112 + KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers 113 + endif 114 + ifdef CONFIG_LTO_CLANG 105 115 # The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled. 106 116 # Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to 107 117 # be passed via '-mllvm' to ld.lld. 108 - KBUILD_CFLAGS += -mannotate-tablejump 109 - ifdef CONFIG_LTO_CLANG 110 118 KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump 111 - endif 112 - else 113 - KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers 114 119 endif 115 120 endif 116 121
+3 -4
arch/loongarch/include/asm/acenv.h
··· 10 10 #ifndef _ASM_LOONGARCH_ACENV_H 11 11 #define _ASM_LOONGARCH_ACENV_H 12 12 13 - /* 14 - * This header is required by ACPI core, but we have nothing to fill in 15 - * right now. Will be updated later when needed. 16 - */ 13 + #ifdef CONFIG_ARCH_STRICT_ALIGN 14 + #define ACPI_MISALIGNMENT_NOT_SUPPORTED 15 + #endif /* CONFIG_ARCH_STRICT_ALIGN */ 17 16 18 17 #endif /* _ASM_LOONGARCH_ACENV_H */
+16 -4
arch/loongarch/include/asm/kvm_mmu.h
··· 16 16 */ 17 17 #define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) 18 18 19 + /* 20 + * _PAGE_MODIFIED is a SW pte bit, it records page ever written on host 21 + * kernel, on secondary MMU it records the page writeable attribute, in 22 + * order for fast path handling. 23 + */ 24 + #define KVM_PAGE_WRITEABLE _PAGE_MODIFIED 25 + 19 26 #define _KVM_FLUSH_PGTABLE 0x1 20 27 #define _KVM_HAS_PGMASK 0x2 21 28 #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) ··· 59 52 WRITE_ONCE(*ptep, val); 60 53 } 61 54 62 - static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } 63 - static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } 64 55 static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } 65 56 static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } 57 + static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; } 58 + static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; } 66 59 67 60 static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) 68 61 { ··· 76 69 77 70 static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) 78 71 { 79 - return pte | _PAGE_DIRTY; 72 + return pte | __WRITEABLE; 80 73 } 81 74 82 75 static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) 83 76 { 84 - return pte & ~_PAGE_DIRTY; 77 + return pte & ~__WRITEABLE; 85 78 } 86 79 87 80 static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) ··· 92 85 static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) 93 86 { 94 87 return pte & ~_PAGE_HUGE; 88 + } 89 + 90 + static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte) 91 + { 92 + return pte | KVM_PAGE_WRITEABLE; 95 93 } 96 94 97 95 static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
+3 -1
arch/loongarch/kernel/env.c
··· 86 86 static ssize_t boardinfo_show(struct kobject *kobj, 87 87 struct kobj_attribute *attr, char *buf) 88 88 { 89 - return sprintf(buf, 89 + return sysfs_emit(buf, 90 90 "BIOS Information\n" 91 91 "Vendor\t\t\t: %s\n" 92 92 "Version\t\t\t: %s\n" ··· 109 109 struct kobject *loongson_kobj; 110 110 111 111 loongson_kobj = kobject_create_and_add("loongson", firmware_kobj); 112 + if (!loongson_kobj) 113 + return -ENOMEM; 112 114 113 115 return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr); 114 116 }
+2 -1
arch/loongarch/kernel/stacktrace.c
··· 51 51 if (task == current) { 52 52 regs->regs[3] = (unsigned long)__builtin_frame_address(0); 53 53 regs->csr_era = (unsigned long)__builtin_return_address(0); 54 + regs->regs[22] = 0; 54 55 } else { 55 56 regs->regs[3] = thread_saved_fp(task); 56 57 regs->csr_era = thread_saved_ra(task); 58 + regs->regs[22] = task->thread.reg22; 57 59 } 58 60 regs->regs[1] = 0; 59 - regs->regs[22] = 0; 60 61 61 62 for (unwind_start(&state, task, regs); 62 63 !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
+3
arch/loongarch/kernel/vdso.c
··· 54 54 vdso_info.code_mapping.pages = 55 55 kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL); 56 56 57 + if (!vdso_info.code_mapping.pages) 58 + return -ENOMEM; 59 + 57 60 pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); 58 61 for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) 59 62 vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
+2 -4
arch/loongarch/kvm/exit.c
··· 778 778 return 0; 779 779 default: 780 780 return KVM_HCALL_INVALID_CODE; 781 - }; 782 - 783 - return KVM_HCALL_INVALID_CODE; 784 - }; 781 + } 782 + } 785 783 786 784 /* 787 785 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
+53 -34
arch/loongarch/kvm/intc/eiointc.c
··· 426 426 struct loongarch_eiointc *s = dev->kvm->arch.eiointc; 427 427 428 428 data = (void __user *)attr->addr; 429 + switch (type) { 430 + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: 431 + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: 432 + if (copy_from_user(&val, data, 4)) 433 + return -EFAULT; 434 + break; 435 + default: 436 + break; 437 + } 438 + 429 439 spin_lock_irqsave(&s->lock, flags); 430 440 switch (type) { 431 441 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: 432 - if (copy_from_user(&val, data, 4)) 433 - ret = -EFAULT; 434 - else { 435 - if (val >= EIOINTC_ROUTE_MAX_VCPUS) 436 - ret = -EINVAL; 437 - else 438 - s->num_cpu = val; 439 - } 442 + if (val >= EIOINTC_ROUTE_MAX_VCPUS) 443 + ret = -EINVAL; 444 + else 445 + s->num_cpu = val; 440 446 break; 441 447 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: 442 - if (copy_from_user(&s->features, data, 4)) 443 - ret = -EFAULT; 448 + s->features = val; 444 449 if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION))) 445 450 s->status |= BIT(EIOINTC_ENABLE); 446 451 break; ··· 467 462 468 463 static int kvm_eiointc_regs_access(struct kvm_device *dev, 469 464 struct kvm_device_attr *attr, 470 - bool is_write) 465 + bool is_write, int *data) 471 466 { 472 467 int addr, cpu, offset, ret = 0; 473 468 unsigned long flags; 474 469 void *p = NULL; 475 - void __user *data; 476 470 struct loongarch_eiointc *s; 477 471 478 472 s = dev->kvm->arch.eiointc; 479 473 addr = attr->attr; 480 474 cpu = addr >> 16; 481 475 addr &= 0xffff; 482 - data = (void __user *)attr->addr; 483 476 switch (addr) { 484 477 case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: 485 478 offset = (addr - EIOINTC_NODETYPE_START) / 4; ··· 516 513 } 517 514 518 515 spin_lock_irqsave(&s->lock, flags); 519 - if (is_write) { 520 - if (copy_from_user(p, data, 4)) 521 - ret = -EFAULT; 522 - } else { 523 - if (copy_to_user(data, p, 4)) 524 - ret = -EFAULT; 525 - } 516 + if (is_write) 517 + memcpy(p, data, 4); 518 + else 519 + memcpy(data, p, 4); 526 520 spin_unlock_irqrestore(&s->lock, flags); 527 521 528 522 return ret; ··· 527 527 528 528 static int kvm_eiointc_sw_status_access(struct kvm_device *dev, 529 529 struct kvm_device_attr *attr, 530 - bool is_write) 530 + bool is_write, int *data) 531 531 { 532 532 int addr, ret = 0; 533 533 unsigned long flags; 534 534 void *p = NULL; 535 - void __user *data; 536 535 struct loongarch_eiointc *s; 537 536 538 537 s = dev->kvm->arch.eiointc; 539 538 addr = attr->attr; 540 539 addr &= 0xffff; 541 540 542 - data = (void __user *)attr->addr; 543 541 switch (addr) { 544 542 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: 545 543 if (is_write) ··· 559 561 return -EINVAL; 560 562 } 561 563 spin_lock_irqsave(&s->lock, flags); 562 - if (is_write) { 563 - if (copy_from_user(p, data, 4)) 564 - ret = -EFAULT; 565 - } else { 566 - if (copy_to_user(data, p, 4)) 567 - ret = -EFAULT; 568 - } 564 + if (is_write) 565 + memcpy(p, data, 4); 566 + else 567 + memcpy(data, p, 4); 569 568 spin_unlock_irqrestore(&s->lock, flags); 570 569 571 570 return ret; ··· 571 576 static int kvm_eiointc_get_attr(struct kvm_device *dev, 572 577 struct kvm_device_attr *attr) 573 578 { 579 + int ret, data; 580 + 574 581 switch (attr->group) { 575 582 case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: 576 - return kvm_eiointc_regs_access(dev, attr, false); 583 + ret = kvm_eiointc_regs_access(dev, attr, false, &data); 584 + if (ret) 585 + return ret; 586 + 587 + if (copy_to_user((void __user *)attr->addr, &data, 4)) 588 + ret = -EFAULT; 589 + 590 + return ret; 577 591 case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: 578 - return kvm_eiointc_sw_status_access(dev, attr, false); 592 + ret = kvm_eiointc_sw_status_access(dev, attr, false, &data); 593 + if (ret) 594 + return ret; 595 + 596 + if (copy_to_user((void __user *)attr->addr, &data, 4)) 597 + ret = -EFAULT; 598 + 599 + return ret; 579 600 default: 580 601 return -EINVAL; 581 602 } ··· 600 589 static int kvm_eiointc_set_attr(struct kvm_device *dev, 601 590 struct kvm_device_attr *attr) 602 591 { 592 + int data; 593 + 603 594 switch (attr->group) { 604 595 case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL: 605 596 return kvm_eiointc_ctrl_access(dev, attr); 606 597 case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: 607 - return kvm_eiointc_regs_access(dev, attr, true); 598 + if (copy_from_user(&data, (void __user *)attr->addr, 4)) 599 + return -EFAULT; 600 + 601 + return kvm_eiointc_regs_access(dev, attr, true, &data); 608 602 case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: 609 - return kvm_eiointc_sw_status_access(dev, attr, true); 603 + if (copy_from_user(&data, (void __user *)attr->addr, 4)) 604 + return -EFAULT; 605 + 606 + return kvm_eiointc_sw_status_access(dev, attr, true, &data); 610 607 default: 611 608 return -EINVAL; 612 609 }
+14 -7
arch/loongarch/kvm/intc/pch_pic.c
··· 348 348 struct kvm_device_attr *attr, 349 349 bool is_write) 350 350 { 351 + char buf[8]; 351 352 int addr, offset, len = 8, ret = 0; 352 353 void __user *data; 353 354 void *p = NULL; ··· 398 397 return -EINVAL; 399 398 } 400 399 401 - spin_lock(&s->lock); 402 - /* write or read value according to is_write */ 403 400 if (is_write) { 404 - if (copy_from_user(p, data, len)) 405 - ret = -EFAULT; 406 - } else { 407 - if (copy_to_user(data, p, len)) 408 - ret = -EFAULT; 401 + if (copy_from_user(buf, data, len)) 402 + return -EFAULT; 409 403 } 404 + 405 + spin_lock(&s->lock); 406 + if (is_write) 407 + memcpy(p, buf, len); 408 + else 409 + memcpy(buf, p, len); 410 410 spin_unlock(&s->lock); 411 + 412 + if (!is_write) { 413 + if (copy_to_user(data, buf, len)) 414 + return -EFAULT; 415 + } 411 416 412 417 return ret; 413 418 }
+4 -4
arch/loongarch/kvm/mmu.c
··· 569 569 /* Track access to pages marked old */ 570 570 new = kvm_pte_mkyoung(*ptep); 571 571 if (write && !kvm_pte_dirty(new)) { 572 - if (!kvm_pte_write(new)) { 572 + if (!kvm_pte_writeable(new)) { 573 573 ret = -EFAULT; 574 574 goto out; 575 575 } ··· 856 856 prot_bits |= _CACHE_SUC; 857 857 858 858 if (writeable) { 859 - prot_bits |= _PAGE_WRITE; 859 + prot_bits = kvm_pte_mkwriteable(prot_bits); 860 860 if (write) 861 - prot_bits |= __WRITEABLE; 861 + prot_bits = kvm_pte_mkdirty(prot_bits); 862 862 } 863 863 864 864 /* Disable dirty logging on HugePages */ ··· 904 904 kvm_release_faultin_page(kvm, page, false, writeable); 905 905 spin_unlock(&kvm->mmu_lock); 906 906 907 - if (prot_bits & _PAGE_DIRTY) 907 + if (kvm_pte_dirty(prot_bits)) 908 908 mark_page_dirty_in_slot(kvm, memslot, gfn); 909 909 910 910 out:
+5 -5
arch/s390/include/asm/pci_insn.h
··· 16 16 #define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40 17 17 #define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44 18 18 19 - /* Load/Store return codes */ 20 - #define ZPCI_PCI_LS_OK 0 21 - #define ZPCI_PCI_LS_ERR 1 22 - #define ZPCI_PCI_LS_BUSY 2 23 - #define ZPCI_PCI_LS_INVAL_HANDLE 3 19 + /* PCI instruction condition codes */ 20 + #define ZPCI_CC_OK 0 21 + #define ZPCI_CC_ERR 1 22 + #define ZPCI_CC_BUSY 2 23 + #define ZPCI_CC_INVAL_HANDLE 3 24 24 25 25 /* Load/Store address space identifiers */ 26 26 #define ZPCI_PCIAS_MEMIO_0 0
+11 -4
arch/s390/kvm/interrupt.c
··· 2778 2778 2779 2779 static struct page *get_map_page(struct kvm *kvm, u64 uaddr) 2780 2780 { 2781 + struct mm_struct *mm = kvm->mm; 2781 2782 struct page *page = NULL; 2783 + int locked = 1; 2782 2784 2783 - mmap_read_lock(kvm->mm); 2784 - get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, 2785 - &page, NULL); 2786 - mmap_read_unlock(kvm->mm); 2785 + if (mmget_not_zero(mm)) { 2786 + mmap_read_lock(mm); 2787 + get_user_pages_remote(mm, uaddr, 1, FOLL_WRITE, 2788 + &page, &locked); 2789 + if (locked) 2790 + mmap_read_unlock(mm); 2791 + mmput(mm); 2792 + } 2793 + 2787 2794 return page; 2788 2795 } 2789 2796
+12 -12
arch/s390/kvm/kvm-s390.c
··· 4864 4864 * @vcpu: the vCPU whose gmap is to be fixed up 4865 4865 * @gfn: the guest frame number used for memslots (including fake memslots) 4866 4866 * @gaddr: the gmap address, does not have to match @gfn for ucontrol gmaps 4867 - * @flags: FOLL_* flags 4867 + * @foll: FOLL_* flags 4868 4868 * 4869 4869 * Return: 0 on success, < 0 in case of error. 4870 4870 * Context: The mm lock must not be held before calling. May sleep. 4871 4871 */ 4872 - int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags) 4872 + int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int foll) 4873 4873 { 4874 4874 struct kvm_memory_slot *slot; 4875 4875 unsigned int fault_flags; ··· 4883 4883 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 4884 4884 return vcpu_post_run_addressing_exception(vcpu); 4885 4885 4886 - fault_flags = flags & FOLL_WRITE ? FAULT_FLAG_WRITE : 0; 4886 + fault_flags = foll & FOLL_WRITE ? FAULT_FLAG_WRITE : 0; 4887 4887 if (vcpu->arch.gmap->pfault_enabled) 4888 - flags |= FOLL_NOWAIT; 4888 + foll |= FOLL_NOWAIT; 4889 4889 vmaddr = __gfn_to_hva_memslot(slot, gfn); 4890 4890 4891 4891 try_again: 4892 - pfn = __kvm_faultin_pfn(slot, gfn, flags, &writable, &page); 4892 + pfn = __kvm_faultin_pfn(slot, gfn, foll, &writable, &page); 4893 4893 4894 4894 /* Access outside memory, inject addressing exception */ 4895 4895 if (is_noslot_pfn(pfn)) ··· 4905 4905 return 0; 4906 4906 vcpu->stat.pfault_sync++; 4907 4907 /* Could not setup async pfault, try again synchronously */ 4908 - flags &= ~FOLL_NOWAIT; 4908 + foll &= ~FOLL_NOWAIT; 4909 4909 goto try_again; 4910 4910 } 4911 4911 /* Any other error */ ··· 4925 4925 return rc; 4926 4926 } 4927 4927 4928 - static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int flags) 4928 + static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int foll) 4929 4929 { 4930 4930 unsigned long gaddr_tmp; 4931 4931 gfn_t gfn; ··· 4950 4950 } 4951 4951 gfn = gpa_to_gfn(gaddr_tmp); 4952 4952 } 4953 - return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, flags); 4953 + return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, foll); 4954 4954 } 4955 4955 4956 4956 static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu) 4957 4957 { 4958 - unsigned int flags = 0; 4958 + unsigned int foll = 0; 4959 4959 unsigned long gaddr; 4960 4960 int rc; 4961 4961 4962 4962 gaddr = current->thread.gmap_teid.addr * PAGE_SIZE; 4963 4963 if (kvm_s390_cur_gmap_fault_is_write()) 4964 - flags = FAULT_FLAG_WRITE; 4964 + foll = FOLL_WRITE; 4965 4965 4966 4966 switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) { 4967 4967 case 0: ··· 5003 5003 send_sig(SIGSEGV, current, 0); 5004 5004 if (rc != -ENXIO) 5005 5005 break; 5006 - flags = FAULT_FLAG_WRITE; 5006 + foll = FOLL_WRITE; 5007 5007 fallthrough; 5008 5008 case PGM_PROTECTION: 5009 5009 case PGM_SEGMENT_TRANSLATION: ··· 5013 5013 case PGM_REGION_SECOND_TRANS: 5014 5014 case PGM_REGION_THIRD_TRANS: 5015 5015 kvm_s390_assert_primary_as(vcpu); 5016 - return vcpu_dat_fault_handler(vcpu, gaddr, flags); 5016 + return vcpu_dat_fault_handler(vcpu, gaddr, foll); 5017 5017 default: 5018 5018 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx", 5019 5019 current->thread.gmap_int_code, current->thread.gmap_teid.val);
+11 -5
arch/s390/kvm/pv.c
··· 624 624 int cc, ret; 625 625 u16 dummy; 626 626 627 + /* Add the notifier only once. No races because we hold kvm->lock */ 628 + if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { 629 + /* The notifier will be unregistered when the VM is destroyed */ 630 + kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; 631 + ret = mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); 632 + if (ret) { 633 + kvm->arch.pv.mmu_notifier.ops = NULL; 634 + return ret; 635 + } 636 + } 637 + 627 638 ret = kvm_s390_pv_alloc_vm(kvm); 628 639 if (ret) 629 640 return ret; ··· 670 659 return -EIO; 671 660 } 672 661 kvm->arch.gmap->guest_handle = uvcb.guest_handle; 673 - /* Add the notifier only once. No races because we hold kvm->lock */ 674 - if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { 675 - kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; 676 - mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); 677 - } 678 662 return 0; 679 663 } 680 664
+4 -2
arch/um/drivers/virtio_uml.c
··· 1250 1250 device_set_wakeup_capable(&vu_dev->vdev.dev, true); 1251 1251 1252 1252 rc = register_virtio_device(&vu_dev->vdev); 1253 - if (rc) 1253 + if (rc) { 1254 1254 put_device(&vu_dev->vdev.dev); 1255 + return rc; 1256 + } 1255 1257 vu_dev->registered = 1; 1256 - return rc; 1258 + return 0; 1257 1259 1258 1260 error_init: 1259 1261 os_close_file(vu_dev->sock);
+1 -1
arch/um/os-Linux/file.c
··· 535 535 cmsg->cmsg_type != SCM_RIGHTS) 536 536 return n; 537 537 538 - memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len); 538 + memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0)); 539 539 return n; 540 540 } 541 541
+1 -2
arch/um/os-Linux/util.c
··· 20 20 21 21 void stack_protections(unsigned long address) 22 22 { 23 - if (mprotect((void *) address, UM_THREAD_SIZE, 24 - PROT_READ | PROT_WRITE | PROT_EXEC) < 0) 23 + if (mprotect((void *) address, UM_THREAD_SIZE, PROT_READ | PROT_WRITE) < 0) 25 24 panic("protecting stack failed, errno = %d", errno); 26 25 } 27 26
+19 -19
arch/x86/include/asm/sev.h
··· 562 562 563 563 extern struct ghcb *boot_ghcb; 564 564 565 + static inline void sev_evict_cache(void *va, int npages) 566 + { 567 + volatile u8 val __always_unused; 568 + u8 *bytes = va; 569 + int page_idx; 570 + 571 + /* 572 + * For SEV guests, a read from the first/last cache-lines of a 4K page 573 + * using the guest key is sufficient to cause a flush of all cache-lines 574 + * associated with that 4K page without incurring all the overhead of a 575 + * full CLFLUSH sequence. 576 + */ 577 + for (page_idx = 0; page_idx < npages; page_idx++) { 578 + val = bytes[page_idx * PAGE_SIZE]; 579 + val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; 580 + } 581 + } 582 + 565 583 #else /* !CONFIG_AMD_MEM_ENCRYPT */ 566 584 567 585 #define snp_vmpl 0 ··· 623 605 static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; } 624 606 static inline void __init snp_secure_tsc_prepare(void) { } 625 607 static inline void __init snp_secure_tsc_init(void) { } 608 + static inline void sev_evict_cache(void *va, int npages) {} 626 609 627 610 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 628 611 ··· 638 619 void snp_leak_pages(u64 pfn, unsigned int npages); 639 620 void kdump_sev_callback(void); 640 621 void snp_fixup_e820_tables(void); 641 - 642 - static inline void sev_evict_cache(void *va, int npages) 643 - { 644 - volatile u8 val __always_unused; 645 - u8 *bytes = va; 646 - int page_idx; 647 - 648 - /* 649 - * For SEV guests, a read from the first/last cache-lines of a 4K page 650 - * using the guest key is sufficient to cause a flush of all cache-lines 651 - * associated with that 4K page without incurring all the overhead of a 652 - * full CLFLUSH sequence. 653 - */ 654 - for (page_idx = 0; page_idx < npages; page_idx++) { 655 - val = bytes[page_idx * PAGE_SIZE]; 656 - val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; 657 - } 658 - } 659 622 #else 660 623 static inline bool snp_probe_rmptable_info(void) { return false; } 661 624 static inline int snp_rmptable_init(void) { return -ENOSYS; } ··· 653 652 static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} 654 653 static inline void kdump_sev_callback(void) { } 655 654 static inline void snp_fixup_e820_tables(void) {} 656 - static inline void sev_evict_cache(void *va, int npages) {} 657 655 #endif 658 656 659 657 #endif
+1 -2
arch/x86/kvm/svm/svm.c
··· 4046 4046 struct vcpu_svm *svm = to_svm(vcpu); 4047 4047 u64 cr8; 4048 4048 4049 - if (nested_svm_virtualize_tpr(vcpu) || 4050 - kvm_vcpu_apicv_active(vcpu)) 4049 + if (nested_svm_virtualize_tpr(vcpu)) 4051 4050 return; 4052 4051 4053 4052 cr8 = kvm_get_cr8(vcpu);
+9 -1
crypto/af_alg.c
··· 970 970 } 971 971 972 972 lock_sock(sk); 973 + if (ctx->write) { 974 + release_sock(sk); 975 + return -EBUSY; 976 + } 977 + ctx->write = true; 978 + 973 979 if (ctx->init && !ctx->more) { 974 980 if (ctx->used) { 975 981 err = -EINVAL; ··· 1025 1019 continue; 1026 1020 } 1027 1021 1022 + ctx->merge = 0; 1023 + 1028 1024 if (!af_alg_writable(sk)) { 1029 1025 err = af_alg_wait_for_wmem(sk, msg->msg_flags); 1030 1026 if (err) ··· 1066 1058 ctx->used += plen; 1067 1059 copied += plen; 1068 1060 size -= plen; 1069 - ctx->merge = 0; 1070 1061 } else { 1071 1062 do { 1072 1063 struct page *pg; ··· 1111 1104 1112 1105 unlock: 1113 1106 af_alg_data_wakeup(sk); 1107 + ctx->write = false; 1114 1108 release_sock(sk); 1115 1109 1116 1110 return copied ?: err;
+1
drivers/block/drbd/drbd_nl.c
··· 1330 1330 lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS; 1331 1331 else 1332 1332 lim.max_write_zeroes_sectors = 0; 1333 + lim.max_hw_wzeroes_unmap_sectors = 0; 1333 1334 1334 1335 if ((lim.discard_granularity >> SECTOR_SHIFT) > 1335 1336 lim.max_hw_discard_sectors) {
+3 -5
drivers/block/zram/zram_drv.c
··· 1795 1795 u32 index) 1796 1796 { 1797 1797 zram_slot_lock(zram, index); 1798 + zram_free_page(zram, index); 1798 1799 zram_set_flag(zram, index, ZRAM_SAME); 1799 1800 zram_set_handle(zram, index, fill); 1800 1801 zram_slot_unlock(zram, index); ··· 1833 1832 kunmap_local(src); 1834 1833 1835 1834 zram_slot_lock(zram, index); 1835 + zram_free_page(zram, index); 1836 1836 zram_set_flag(zram, index, ZRAM_HUGE); 1837 1837 zram_set_handle(zram, index, handle); 1838 1838 zram_set_obj_size(zram, index, PAGE_SIZE); ··· 1856 1854 struct zcomp_strm *zstrm; 1857 1855 unsigned long element; 1858 1856 bool same_filled; 1859 - 1860 - /* First, free memory allocated to this slot (if any) */ 1861 - zram_slot_lock(zram, index); 1862 - zram_free_page(zram, index); 1863 - zram_slot_unlock(zram, index); 1864 1857 1865 1858 mem = kmap_local_page(page); 1866 1859 same_filled = page_same_filled(mem, &element); ··· 1898 1901 zcomp_stream_put(zstrm); 1899 1902 1900 1903 zram_slot_lock(zram, index); 1904 + zram_free_page(zram, index); 1901 1905 zram_set_handle(zram, index, handle); 1902 1906 zram_set_obj_size(zram, index, comp_len); 1903 1907 zram_slot_unlock(zram, index);
+19 -1
drivers/clk/renesas/clk-mstp.c
··· 303 303 pm_clk_destroy(dev); 304 304 } 305 305 306 + static struct device_node *cpg_mstp_pd_np __initdata = NULL; 307 + static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL; 308 + 306 309 void __init cpg_mstp_add_clk_domain(struct device_node *np) 307 310 { 308 311 struct generic_pm_domain *pd; ··· 327 324 pd->detach_dev = cpg_mstp_detach_dev; 328 325 pm_genpd_init(pd, &pm_domain_always_on_gov, false); 329 326 330 - of_genpd_add_provider_simple(np, pd); 327 + cpg_mstp_pd_np = of_node_get(np); 328 + cpg_mstp_pd_genpd = pd; 331 329 } 330 + 331 + static int __init cpg_mstp_pd_init_provider(void) 332 + { 333 + int error; 334 + 335 + if (!cpg_mstp_pd_np) 336 + return -ENODEV; 337 + 338 + error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd); 339 + 340 + of_node_put(cpg_mstp_pd_np); 341 + return error; 342 + } 343 + postcore_initcall(cpg_mstp_pd_init_provider);
+1 -1
drivers/clk/sunxi-ng/ccu_mp.c
··· 185 185 p &= (1 << cmp->p.width) - 1; 186 186 187 187 if (cmp->common.features & CCU_FEATURE_DUAL_DIV) 188 - rate = (parent_rate / p) / m; 188 + rate = (parent_rate / (p + cmp->p.offset)) / m; 189 189 else 190 190 rate = (parent_rate >> p) / m; 191 191
+1 -1
drivers/crypto/ccp/sev-dev.c
··· 2430 2430 { 2431 2431 int error; 2432 2432 2433 - __sev_platform_shutdown_locked(NULL); 2433 + __sev_platform_shutdown_locked(&error); 2434 2434 2435 2435 if (sev_es_tmr) { 2436 2436 /*
+2 -2
drivers/dpll/dpll_netlink.c
··· 211 211 dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll, 212 212 struct netlink_ext_ack *extack) 213 213 { 214 + DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 }; 214 215 const struct dpll_device_ops *ops = dpll_device_ops(dpll); 215 - DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 }; 216 216 enum dpll_clock_quality_level ql; 217 217 int ret; 218 218 ··· 221 221 ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack); 222 222 if (ret) 223 223 return ret; 224 - for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) 224 + for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) 225 225 if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql)) 226 226 return -EMSGSIZE; 227 227
+1 -1
drivers/firewire/core-cdev.c
··· 41 41 /* 42 42 * ABI version history is documented in linux/firewire-cdev.h. 43 43 */ 44 - #define FW_CDEV_KERNEL_VERSION 5 44 + #define FW_CDEV_KERNEL_VERSION 6 45 45 #define FW_CDEV_VERSION_EVENT_REQUEST2 4 46 46 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 47 47 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
+9 -2
drivers/gpio/gpiolib-acpi-core.c
··· 942 942 { 943 943 struct acpi_device *adev = to_acpi_device_node(fwnode); 944 944 bool can_fallback = acpi_can_fallback_to_crs(adev, con_id); 945 - struct acpi_gpio_info info; 945 + struct acpi_gpio_info info = {}; 946 946 struct gpio_desc *desc; 947 + int ret; 947 948 948 949 desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info); 949 950 if (IS_ERR(desc)) ··· 958 957 959 958 acpi_gpio_update_gpiod_flags(dflags, &info); 960 959 acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info); 960 + 961 + /* ACPI uses hundredths of milliseconds units */ 962 + ret = gpio_set_debounce_timeout(desc, info.debounce * 10); 963 + if (ret) 964 + return ERR_PTR(ret); 965 + 961 966 return desc; 962 967 } 963 968 ··· 999 992 int ret; 1000 993 1001 994 for (i = 0, idx = 0; idx <= index; i++) { 1002 - struct acpi_gpio_info info; 995 + struct acpi_gpio_info info = {}; 1003 996 struct gpio_desc *desc; 1004 997 1005 998 /* Ignore -EPROBE_DEFER, it only matters if idx matches */
+12
drivers/gpio/gpiolib-acpi-quirks.c
··· 319 319 }, 320 320 { 321 321 /* 322 + * Same as G1619-04. New model. 323 + */ 324 + .matches = { 325 + DMI_MATCH(DMI_SYS_VENDOR, "GPD"), 326 + DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"), 327 + }, 328 + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { 329 + .ignore_wake = "PNP0C50:00@8", 330 + }, 331 + }, 332 + { 333 + /* 322 334 * Spurious wakeups from GPIO 11 323 335 * Found in BIOS 1.04 324 336 * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+12 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 250 250 251 251 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc) 252 252 { 253 - if (adev->kfd.dev) 254 - kgd2kfd_suspend(adev->kfd.dev, suspend_proc); 253 + if (adev->kfd.dev) { 254 + if (adev->in_s0ix) 255 + kgd2kfd_stop_sched_all_nodes(adev->kfd.dev); 256 + else 257 + kgd2kfd_suspend(adev->kfd.dev, suspend_proc); 258 + } 255 259 } 256 260 257 261 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc) 258 262 { 259 263 int r = 0; 260 264 261 - if (adev->kfd.dev) 262 - r = kgd2kfd_resume(adev->kfd.dev, resume_proc); 265 + if (adev->kfd.dev) { 266 + if (adev->in_s0ix) 267 + r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev); 268 + else 269 + r = kgd2kfd_resume(adev->kfd.dev, resume_proc); 270 + } 263 271 264 272 return r; 265 273 }
+12
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
··· 426 426 int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd); 427 427 void kgd2kfd_unlock_kfd(struct kfd_dev *kfd); 428 428 int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); 429 + int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd); 429 430 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); 431 + int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd); 430 432 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); 431 433 bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry, 432 434 bool retry_fault); ··· 518 516 return 0; 519 517 } 520 518 519 + static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) 520 + { 521 + return 0; 522 + } 523 + 521 524 static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) 525 + { 526 + return 0; 527 + } 528 + 529 + static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) 522 530 { 523 531 return 0; 524 532 }
+10 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 5136 5136 adev->in_suspend = true; 5137 5137 5138 5138 if (amdgpu_sriov_vf(adev)) { 5139 - if (!adev->in_s0ix && !adev->in_runpm) 5139 + if (!adev->in_runpm) 5140 5140 amdgpu_amdkfd_suspend_process(adev); 5141 5141 amdgpu_virt_fini_data_exchange(adev); 5142 5142 r = amdgpu_virt_request_full_gpu(adev, false); ··· 5156 5156 5157 5157 amdgpu_device_ip_suspend_phase1(adev); 5158 5158 5159 - if (!adev->in_s0ix) { 5160 - amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5161 - amdgpu_userq_suspend(adev); 5162 - } 5159 + amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5160 + amdgpu_userq_suspend(adev); 5163 5161 5164 5162 r = amdgpu_device_evict_resources(adev); 5165 5163 if (r) ··· 5252 5254 goto exit; 5253 5255 } 5254 5256 5255 - if (!adev->in_s0ix) { 5256 - r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5257 - if (r) 5258 - goto exit; 5257 + r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5258 + if (r) 5259 + goto exit; 5259 5260 5260 - r = amdgpu_userq_resume(adev); 5261 - if (r) 5262 - goto exit; 5263 - } 5261 + r = amdgpu_userq_resume(adev); 5262 + if (r) 5263 + goto exit; 5264 5264 5265 5265 r = amdgpu_device_ip_late_init(adev); 5266 5266 if (r) ··· 5271 5275 amdgpu_virt_init_data_exchange(adev); 5272 5276 amdgpu_virt_release_full_gpu(adev, true); 5273 5277 5274 - if (!adev->in_s0ix && !r && !adev->in_runpm) 5278 + if (!r && !adev->in_runpm) 5275 5279 r = amdgpu_amdkfd_resume_process(adev); 5276 5280 } 5277 5281
+15
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 1654 1654 } 1655 1655 } 1656 1656 break; 1657 + case IP_VERSION(11, 0, 1): 1658 + case IP_VERSION(11, 0, 4): 1659 + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1660 + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1661 + if (adev->gfx.pfp_fw_version >= 102 && 1662 + adev->gfx.mec_fw_version >= 66 && 1663 + adev->mes.fw_version[0] >= 128) { 1664 + adev->gfx.enable_cleaner_shader = true; 1665 + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1666 + if (r) { 1667 + adev->gfx.enable_cleaner_shader = false; 1668 + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1669 + } 1670 + } 1671 + break; 1657 1672 case IP_VERSION(11, 5, 0): 1658 1673 case IP_VERSION(11, 5, 1): 1659 1674 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+36
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 1550 1550 return ret; 1551 1551 } 1552 1552 1553 + int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) 1554 + { 1555 + struct kfd_node *node; 1556 + int i, r; 1557 + 1558 + if (!kfd->init_complete) 1559 + return 0; 1560 + 1561 + for (i = 0; i < kfd->num_nodes; i++) { 1562 + node = kfd->nodes[i]; 1563 + r = node->dqm->ops.unhalt(node->dqm); 1564 + if (r) { 1565 + dev_err(kfd_device, "Error in starting scheduler\n"); 1566 + return r; 1567 + } 1568 + } 1569 + return 0; 1570 + } 1571 + 1553 1572 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) 1554 1573 { 1555 1574 struct kfd_node *node; ··· 1584 1565 1585 1566 node = kfd->nodes[node_id]; 1586 1567 return node->dqm->ops.halt(node->dqm); 1568 + } 1569 + 1570 + int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) 1571 + { 1572 + struct kfd_node *node; 1573 + int i, r; 1574 + 1575 + if (!kfd->init_complete) 1576 + return 0; 1577 + 1578 + for (i = 0; i < kfd->num_nodes; i++) { 1579 + node = kfd->nodes[i]; 1580 + r = node->dqm->ops.halt(node->dqm); 1581 + if (r) 1582 + return r; 1583 + } 1584 + return 0; 1587 1585 } 1588 1586 1589 1587 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
+38 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 8717 8717 static void manage_dm_interrupts(struct amdgpu_device *adev, 8718 8718 struct amdgpu_crtc *acrtc, 8719 8719 struct dm_crtc_state *acrtc_state) 8720 - { 8720 + { /* 8721 + * We cannot be sure that the frontend index maps to the same 8722 + * backend index - some even map to more than one. 8723 + * So we have to go through the CRTC to find the right IRQ. 8724 + */ 8725 + int irq_type = amdgpu_display_crtc_idx_to_irq_type( 8726 + adev, 8727 + acrtc->crtc_id); 8728 + struct drm_device *dev = adev_to_drm(adev); 8729 + 8721 8730 struct drm_vblank_crtc_config config = {0}; 8722 8731 struct dc_crtc_timing *timing; 8723 8732 int offdelay; ··· 8779 8770 8780 8771 drm_crtc_vblank_on_config(&acrtc->base, 8781 8772 &config); 8773 + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/ 8774 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 8775 + case IP_VERSION(3, 0, 0): 8776 + case IP_VERSION(3, 0, 2): 8777 + case IP_VERSION(3, 0, 3): 8778 + case IP_VERSION(3, 2, 0): 8779 + if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type)) 8780 + drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n"); 8781 + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8782 + if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type)) 8783 + drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n"); 8784 + #endif 8785 + } 8786 + 8782 8787 } else { 8788 + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/ 8789 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 8790 + case IP_VERSION(3, 0, 0): 8791 + case IP_VERSION(3, 0, 2): 8792 + case IP_VERSION(3, 0, 3): 8793 + case IP_VERSION(3, 2, 0): 8794 + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8795 + if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type)) 8796 + drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n"); 8797 + #endif 8798 + if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type)) 8799 + drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n"); 8800 + } 8801 + 8783 8802 drm_crtc_vblank_off(&acrtc->base); 8784 8803 } 8785 8804 }
+1 -1
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 2236 2236 return ret; 2237 2237 } 2238 2238 2239 - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 2239 + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { 2240 2240 ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); 2241 2241 if (ret) 2242 2242 return ret;
+4 -2
drivers/gpu/drm/bridge/analogix/anx7625.c
··· 2677 2677 ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq, 2678 2678 NULL, anx7625_intr_hpd_isr, 2679 2679 IRQF_TRIGGER_FALLING | 2680 - IRQF_ONESHOT, 2680 + IRQF_ONESHOT | IRQF_NO_AUTOEN, 2681 2681 "anx7625-intp", platform); 2682 2682 if (ret) { 2683 2683 DRM_DEV_ERROR(dev, "fail to request irq\n"); ··· 2746 2746 } 2747 2747 2748 2748 /* Add work function */ 2749 - if (platform->pdata.intp_irq) 2749 + if (platform->pdata.intp_irq) { 2750 + enable_irq(platform->pdata.intp_irq); 2750 2751 queue_work(platform->workqueue, &platform->work); 2752 + } 2751 2753 2752 2754 if (platform->pdata.audio_en) 2753 2755 anx7625_register_audio(dev, platform);
+4 -2
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
··· 1984 1984 mhdp_state = to_cdns_mhdp_bridge_state(new_state); 1985 1985 1986 1986 mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); 1987 - if (!mhdp_state->current_mode) 1988 - return; 1987 + if (!mhdp_state->current_mode) { 1988 + ret = -EINVAL; 1989 + goto out; 1990 + } 1989 1991 1990 1992 drm_mode_set_name(mhdp_state->current_mode); 1991 1993
-2
drivers/gpu/drm/drm_gpuvm.c
··· 2432 2432 * 2433 2433 * The expected usage is:: 2434 2434 * 2435 - * .. code-block:: c 2436 - * 2437 2435 * vm_bind { 2438 2436 * struct drm_exec exec; 2439 2437 *
+1 -1
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
··· 546 546 luminance_range->max_luminance, 547 547 panel->vbt.backlight.pwm_freq_hz, 548 548 intel_dp->edp_dpcd, &current_level, &current_mode, 549 - false); 549 + panel->backlight.edp.vesa.luminance_control_support); 550 550 if (ret < 0) 551 551 return ret; 552 552
+1
drivers/gpu/drm/xe/abi/guc_actions_abi.h
··· 117 117 XE_GUC_ACTION_ENTER_S_STATE = 0x501, 118 118 XE_GUC_ACTION_EXIT_S_STATE = 0x502, 119 119 XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506, 120 + XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509, 120 121 XE_GUC_ACTION_SCHED_CONTEXT = 0x1000, 121 122 XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001, 122 123 XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
+25
drivers/gpu/drm/xe/abi/guc_klvs_abi.h
··· 17 17 * | 0 | 31:16 | **KEY** - KLV key identifier | 18 18 * | | | - `GuC Self Config KLVs`_ | 19 19 * | | | - `GuC Opt In Feature KLVs`_ | 20 + * | | | - `GuC Scheduling Policies KLVs`_ | 20 21 * | | | - `GuC VGT Policy KLVs`_ | 21 22 * | | | - `GuC VF Configuration KLVs`_ | 22 23 * | | | | ··· 152 151 153 152 #define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_KEY 0x4003 154 153 #define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u 154 + 155 + /** 156 + * DOC: GuC Scheduling Policies KLVs 157 + * 158 + * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV. 159 + * 160 + * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001 161 + * Some platforms do not allow concurrent execution of RCS and CCS 162 + * workloads from different address spaces. By default, the GuC prioritizes 163 + * RCS submissions over CCS ones, which can lead to CCS workloads being 164 + * significantly (or completely) starved of execution time. This KLV allows 165 + * the driver to specify a quantum (in ms) and a ratio (percentage value 166 + * between 0 and 100), and the GuC will prioritize the CCS for that 167 + * percentage of each quantum. For example, specifying 100ms and 30% will 168 + * make the GuC prioritize the CCS for 30ms of every 100ms. 169 + * Note that this does not necessarly mean that RCS and CCS engines will 170 + * only be active for their percentage of the quantum, as the restriction 171 + * only kicks in if both classes are fully busy with non-compatible address 172 + * spaces; i.e., if one engine is idle or running the same address space, 173 + * a pending job on the other engine will still be submitted to the HW no 174 + * matter what the ratio is 175 + */ 176 + #define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001 177 + #define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u 155 178 156 179 /** 157 180 * DOC: GuC VGT Policy KLVs
+6 -2
drivers/gpu/drm/xe/xe_device_sysfs.c
··· 311 311 if (xe->info.platform == XE_BATTLEMAGE) { 312 312 ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs); 313 313 if (ret) 314 - return ret; 314 + goto cleanup; 315 315 316 316 ret = late_bind_create_files(dev); 317 317 if (ret) 318 - return ret; 318 + goto cleanup; 319 319 } 320 320 321 321 return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe); 322 + 323 + cleanup: 324 + xe_device_sysfs_fini(xe); 325 + return ret; 322 326 }
+15 -7
drivers/gpu/drm/xe/xe_exec_queue.c
··· 151 151 return err; 152 152 } 153 153 154 + static void __xe_exec_queue_fini(struct xe_exec_queue *q) 155 + { 156 + int i; 157 + 158 + q->ops->fini(q); 159 + 160 + for (i = 0; i < q->width; ++i) 161 + xe_lrc_put(q->lrc[i]); 162 + } 163 + 154 164 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, 155 165 u32 logical_mask, u16 width, 156 166 struct xe_hw_engine *hwe, u32 flags, ··· 191 181 if (xe_exec_queue_uses_pxp(q)) { 192 182 err = xe_pxp_exec_queue_add(xe->pxp, q); 193 183 if (err) 194 - goto err_post_alloc; 184 + goto err_post_init; 195 185 } 196 186 197 187 return q; 198 188 189 + err_post_init: 190 + __xe_exec_queue_fini(q); 199 191 err_post_alloc: 200 192 __xe_exec_queue_free(q); 201 193 return ERR_PTR(err); ··· 295 283 xe_exec_queue_put(eq); 296 284 } 297 285 298 - q->ops->fini(q); 286 + q->ops->destroy(q); 299 287 } 300 288 301 289 void xe_exec_queue_fini(struct xe_exec_queue *q) 302 290 { 303 - int i; 304 - 305 291 /* 306 292 * Before releasing our ref to lrc and xef, accumulate our run ticks 307 293 * and wakeup any waiters. ··· 308 298 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) 309 299 wake_up_var(&q->xef->exec_queue.pending_removal); 310 300 311 - for (i = 0; i < q->width; ++i) 312 - xe_lrc_put(q->lrc[i]); 313 - 301 + __xe_exec_queue_fini(q); 314 302 __xe_exec_queue_free(q); 315 303 } 316 304
+7 -1
drivers/gpu/drm/xe/xe_exec_queue_types.h
··· 166 166 int (*init)(struct xe_exec_queue *q); 167 167 /** @kill: Kill inflight submissions for backend */ 168 168 void (*kill)(struct xe_exec_queue *q); 169 - /** @fini: Fini exec queue for submission backend */ 169 + /** @fini: Undoes the init() for submission backend */ 170 170 void (*fini)(struct xe_exec_queue *q); 171 + /** 172 + * @destroy: Destroy exec queue for submission backend. The backend 173 + * function must call xe_exec_queue_fini() (which will in turn call the 174 + * fini() backend function) to ensure the queue is properly cleaned up. 175 + */ 176 + void (*destroy)(struct xe_exec_queue *q); 171 177 /** @set_priority: Set priority for exec queue */ 172 178 int (*set_priority)(struct xe_exec_queue *q, 173 179 enum xe_exec_queue_priority priority);
+16 -9
drivers/gpu/drm/xe/xe_execlist.c
··· 385 385 return err; 386 386 } 387 387 388 - static void execlist_exec_queue_fini_async(struct work_struct *w) 388 + static void execlist_exec_queue_fini(struct xe_exec_queue *q) 389 + { 390 + struct xe_execlist_exec_queue *exl = q->execlist; 391 + 392 + drm_sched_entity_fini(&exl->entity); 393 + drm_sched_fini(&exl->sched); 394 + 395 + kfree(exl); 396 + } 397 + 398 + static void execlist_exec_queue_destroy_async(struct work_struct *w) 389 399 { 390 400 struct xe_execlist_exec_queue *ee = 391 - container_of(w, struct xe_execlist_exec_queue, fini_async); 401 + container_of(w, struct xe_execlist_exec_queue, destroy_async); 392 402 struct xe_exec_queue *q = ee->q; 393 403 struct xe_execlist_exec_queue *exl = q->execlist; 394 404 struct xe_device *xe = gt_to_xe(q->gt); ··· 411 401 list_del(&exl->active_link); 412 402 spin_unlock_irqrestore(&exl->port->lock, flags); 413 403 414 - drm_sched_entity_fini(&exl->entity); 415 - drm_sched_fini(&exl->sched); 416 - kfree(exl); 417 - 418 404 xe_exec_queue_fini(q); 419 405 } 420 406 ··· 419 413 /* NIY */ 420 414 } 421 415 422 - static void execlist_exec_queue_fini(struct xe_exec_queue *q) 416 + static void execlist_exec_queue_destroy(struct xe_exec_queue *q) 423 417 { 424 - INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async); 425 - queue_work(system_unbound_wq, &q->execlist->fini_async); 418 + INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async); 419 + queue_work(system_unbound_wq, &q->execlist->destroy_async); 426 420 } 427 421 428 422 static int execlist_exec_queue_set_priority(struct xe_exec_queue *q, ··· 473 467 .init = execlist_exec_queue_init, 474 468 .kill = execlist_exec_queue_kill, 475 469 .fini = execlist_exec_queue_fini, 470 + .destroy = execlist_exec_queue_destroy, 476 471 .set_priority = execlist_exec_queue_set_priority, 477 472 .set_timeslice = execlist_exec_queue_set_timeslice, 478 473 .set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
+1 -1
drivers/gpu/drm/xe/xe_execlist_types.h
··· 42 42 43 43 bool has_run; 44 44 45 - struct work_struct fini_async; 45 + struct work_struct destroy_async; 46 46 47 47 enum xe_exec_queue_priority active_priority; 48 48 struct list_head active_link;
+2 -1
drivers/gpu/drm/xe/xe_gt.c
··· 41 41 #include "xe_gt_topology.h" 42 42 #include "xe_guc_exec_queue_types.h" 43 43 #include "xe_guc_pc.h" 44 + #include "xe_guc_submit.h" 44 45 #include "xe_hw_fence.h" 45 46 #include "xe_hw_engine_class_sysfs.h" 46 47 #include "xe_irq.h" ··· 98 97 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not 99 98 * reload 100 99 */ 101 - gt->uc.guc.submission_state.enabled = false; 100 + xe_guc_submit_disable(&gt->uc.guc); 102 101 } 103 102 104 103 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
-1
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
··· 1632 1632 u64 fair; 1633 1633 1634 1634 fair = div_u64(available, num_vfs); 1635 - fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */ 1636 1635 fair = ALIGN_DOWN(fair, alignment); 1637 1636 #ifdef MAX_FAIR_LMEM 1638 1637 fair = min_t(u64, MAX_FAIR_LMEM, fair);
+2 -4
drivers/gpu/drm/xe/xe_guc.c
··· 880 880 return ret; 881 881 } 882 882 883 - guc->submission_state.enabled = true; 884 - 885 - return 0; 883 + return xe_guc_submit_enable(guc); 886 884 } 887 885 888 886 int xe_guc_reset(struct xe_guc *guc) ··· 1577 1579 { 1578 1580 xe_uc_fw_sanitize(&guc->fw); 1579 1581 xe_guc_ct_disable(&guc->ct); 1580 - guc->submission_state.enabled = false; 1582 + xe_guc_submit_disable(guc); 1581 1583 } 1582 1584 1583 1585 int xe_guc_reset_prepare(struct xe_guc *guc)
+2 -2
drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
··· 35 35 struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE]; 36 36 /** @lr_tdr: long running TDR worker */ 37 37 struct work_struct lr_tdr; 38 - /** @fini_async: do final fini async from this worker */ 39 - struct work_struct fini_async; 38 + /** @destroy_async: do final destroy async from this worker */ 39 + struct work_struct destroy_async; 40 40 /** @resume_time: time of last resume */ 41 41 u64 resume_time; 42 42 /** @state: GuC specific state for this xe_exec_queue */
+98 -22
drivers/gpu/drm/xe/xe_guc_submit.c
··· 32 32 #include "xe_guc_ct.h" 33 33 #include "xe_guc_exec_queue_types.h" 34 34 #include "xe_guc_id_mgr.h" 35 + #include "xe_guc_klv_helpers.h" 35 36 #include "xe_guc_submit_types.h" 36 37 #include "xe_hw_engine.h" 37 38 #include "xe_hw_fence.h" ··· 315 314 guc->submission_state.initialized = true; 316 315 317 316 return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); 317 + } 318 + 319 + /* 320 + * Given that we want to guarantee enough RCS throughput to avoid missing 321 + * frames, we set the yield policy to 20% of each 80ms interval. 322 + */ 323 + #define RC_YIELD_DURATION 80 /* in ms */ 324 + #define RC_YIELD_RATIO 20 /* in percent */ 325 + static u32 *emit_render_compute_yield_klv(u32 *emit) 326 + { 327 + *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD); 328 + *emit++ = RC_YIELD_DURATION; 329 + *emit++ = RC_YIELD_RATIO; 330 + 331 + return emit; 332 + } 333 + 334 + #define SCHEDULING_POLICY_MAX_DWORDS 16 335 + static int guc_init_global_schedule_policy(struct xe_guc *guc) 336 + { 337 + u32 data[SCHEDULING_POLICY_MAX_DWORDS]; 338 + u32 *emit = data; 339 + u32 count = 0; 340 + int ret; 341 + 342 + if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0)) 343 + return 0; 344 + 345 + *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV; 346 + 347 + if (CCS_MASK(guc_to_gt(guc))) 348 + emit = emit_render_compute_yield_klv(emit); 349 + 350 + count = emit - data; 351 + if (count > 1) { 352 + xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS); 353 + 354 + ret = xe_guc_ct_send_block(&guc->ct, data, count); 355 + if (ret < 0) { 356 + xe_gt_err(guc_to_gt(guc), 357 + "failed to enable GuC sheduling policies: %pe\n", 358 + ERR_PTR(ret)); 359 + return ret; 360 + } 361 + } 362 + 363 + return 0; 364 + } 365 + 366 + int xe_guc_submit_enable(struct xe_guc *guc) 367 + { 368 + int ret; 369 + 370 + ret = guc_init_global_schedule_policy(guc); 371 + if (ret) 372 + return ret; 373 + 374 + guc->submission_state.enabled = true; 375 + 376 + return 0; 377 + } 378 + 379 + void xe_guc_submit_disable(struct xe_guc *guc) 380 + { 381 + guc->submission_state.enabled = false; 318 382 } 319 383 320 384 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) ··· 1343 1277 return DRM_GPU_SCHED_STAT_NO_HANG; 1344 1278 } 1345 1279 1346 - static void __guc_exec_queue_fini_async(struct work_struct *w) 1280 + static void guc_exec_queue_fini(struct xe_exec_queue *q) 1347 1281 { 1348 - struct xe_guc_exec_queue *ge = 1349 - container_of(w, struct xe_guc_exec_queue, fini_async); 1350 - struct xe_exec_queue *q = ge->q; 1282 + struct xe_guc_exec_queue *ge = q->guc; 1351 1283 struct xe_guc *guc = exec_queue_to_guc(q); 1352 1284 1353 - xe_pm_runtime_get(guc_to_xe(guc)); 1354 - trace_xe_exec_queue_destroy(q); 1355 - 1356 1285 release_guc_id(guc, q); 1357 - if (xe_exec_queue_is_lr(q)) 1358 - cancel_work_sync(&ge->lr_tdr); 1359 - /* Confirm no work left behind accessing device structures */ 1360 - cancel_delayed_work_sync(&ge->sched.base.work_tdr); 1361 1286 xe_sched_entity_fini(&ge->entity); 1362 1287 xe_sched_fini(&ge->sched); 1363 1288 ··· 1357 1300 * (timeline name). 1358 1301 */ 1359 1302 kfree_rcu(ge, rcu); 1303 + } 1304 + 1305 + static void __guc_exec_queue_destroy_async(struct work_struct *w) 1306 + { 1307 + struct xe_guc_exec_queue *ge = 1308 + container_of(w, struct xe_guc_exec_queue, destroy_async); 1309 + struct xe_exec_queue *q = ge->q; 1310 + struct xe_guc *guc = exec_queue_to_guc(q); 1311 + 1312 + xe_pm_runtime_get(guc_to_xe(guc)); 1313 + trace_xe_exec_queue_destroy(q); 1314 + 1315 + if (xe_exec_queue_is_lr(q)) 1316 + cancel_work_sync(&ge->lr_tdr); 1317 + /* Confirm no work left behind accessing device structures */ 1318 + cancel_delayed_work_sync(&ge->sched.base.work_tdr); 1319 + 1360 1320 xe_exec_queue_fini(q); 1321 + 1361 1322 xe_pm_runtime_put(guc_to_xe(guc)); 1362 1323 } 1363 1324 1364 - static void guc_exec_queue_fini_async(struct xe_exec_queue *q) 1325 + static void guc_exec_queue_destroy_async(struct xe_exec_queue *q) 1365 1326 { 1366 1327 struct xe_guc *guc = exec_queue_to_guc(q); 1367 1328 struct xe_device *xe = guc_to_xe(guc); 1368 1329 1369 - INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); 1330 + INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async); 1370 1331 1371 1332 /* We must block on kernel engines so slabs are empty on driver unload */ 1372 1333 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) 1373 - __guc_exec_queue_fini_async(&q->guc->fini_async); 1334 + __guc_exec_queue_destroy_async(&q->guc->destroy_async); 1374 1335 else 1375 - queue_work(xe->destroy_wq, &q->guc->fini_async); 1336 + queue_work(xe->destroy_wq, &q->guc->destroy_async); 1376 1337 } 1377 1338 1378 - static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) 1339 + static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q) 1379 1340 { 1380 1341 /* 1381 1342 * Might be done from within the GPU scheduler, need to do async as we ··· 1402 1327 * this we and don't really care when everything is fini'd, just that it 1403 1328 * is. 1404 1329 */ 1405 - guc_exec_queue_fini_async(q); 1330 + guc_exec_queue_destroy_async(q); 1406 1331 } 1407 1332 1408 1333 static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg) ··· 1416 1341 if (exec_queue_registered(q)) 1417 1342 disable_scheduling_deregister(guc, q); 1418 1343 else 1419 - __guc_exec_queue_fini(guc, q); 1344 + __guc_exec_queue_destroy(guc, q); 1420 1345 } 1421 1346 1422 1347 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q) ··· 1649 1574 #define STATIC_MSG_CLEANUP 0 1650 1575 #define STATIC_MSG_SUSPEND 1 1651 1576 #define STATIC_MSG_RESUME 2 1652 - static void guc_exec_queue_fini(struct xe_exec_queue *q) 1577 + static void guc_exec_queue_destroy(struct xe_exec_queue *q) 1653 1578 { 1654 1579 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; 1655 1580 1656 1581 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q)) 1657 1582 guc_exec_queue_add_msg(q, msg, CLEANUP); 1658 1583 else 1659 - __guc_exec_queue_fini(exec_queue_to_guc(q), q); 1584 + __guc_exec_queue_destroy(exec_queue_to_guc(q), q); 1660 1585 } 1661 1586 1662 1587 static int guc_exec_queue_set_priority(struct xe_exec_queue *q, ··· 1786 1711 .init = guc_exec_queue_init, 1787 1712 .kill = guc_exec_queue_kill, 1788 1713 .fini = guc_exec_queue_fini, 1714 + .destroy = guc_exec_queue_destroy, 1789 1715 .set_priority = guc_exec_queue_set_priority, 1790 1716 .set_timeslice = guc_exec_queue_set_timeslice, 1791 1717 .set_preempt_timeout = guc_exec_queue_set_preempt_timeout, ··· 1808 1732 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) 1809 1733 xe_exec_queue_put(q); 1810 1734 else if (exec_queue_destroyed(q)) 1811 - __guc_exec_queue_fini(guc, q); 1735 + __guc_exec_queue_destroy(guc, q); 1812 1736 } 1813 1737 if (q->guc->suspend_pending) { 1814 1738 set_exec_queue_suspended(q); ··· 2065 1989 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) 2066 1990 xe_exec_queue_put(q); 2067 1991 else 2068 - __guc_exec_queue_fini(guc, q); 1992 + __guc_exec_queue_destroy(guc, q); 2069 1993 } 2070 1994 2071 1995 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+2
drivers/gpu/drm/xe/xe_guc_submit.h
··· 13 13 struct xe_guc; 14 14 15 15 int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids); 16 + int xe_guc_submit_enable(struct xe_guc *guc); 17 + void xe_guc_submit_disable(struct xe_guc *guc); 16 18 17 19 int xe_guc_submit_reset_prepare(struct xe_guc *guc); 18 20 void xe_guc_submit_reset_wait(struct xe_guc *guc);
+19 -16
drivers/gpu/drm/xe/xe_hwmon.c
··· 286 286 */ 287 287 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value) 288 288 { 289 - u64 reg_val = 0, min, max; 289 + u32 reg_val = 0; 290 290 struct xe_device *xe = hwmon->xe; 291 291 struct xe_reg rapl_limit, pkg_power_sku; 292 292 struct xe_mmio *mmio = xe_root_tile_mmio(xe); ··· 294 294 mutex_lock(&hwmon->hwmon_lock); 295 295 296 296 if (hwmon->xe->info.has_mbx_power_limits) { 297 - xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val); 297 + xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val); 298 298 } else { 299 299 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 300 300 pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); ··· 304 304 /* Check if PL limits are disabled. */ 305 305 if (!(reg_val & PWR_LIM_EN)) { 306 306 *value = PL_DISABLE; 307 - drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n", 307 + drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n", 308 308 PWR_ATTR_TO_STR(attr), channel, reg_val); 309 309 goto unlock; 310 310 } 311 311 312 312 reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val); 313 - *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); 313 + *value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power; 314 314 315 315 /* For platforms with mailbox power limit support clamping would be done by pcode. */ 316 316 if (!hwmon->xe->info.has_mbx_power_limits) { 317 - reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku); 318 - min = REG_FIELD_GET(PKG_MIN_PWR, reg_val); 319 - max = REG_FIELD_GET(PKG_MAX_PWR, reg_val); 317 + u64 pkg_pwr, min, max; 318 + 319 + pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku); 320 + min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr); 321 + max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr); 320 322 min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power); 321 323 max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power); 322 324 if (min && max) ··· 495 493 { 496 494 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 497 495 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 498 - u32 x, y, x_w = 2; /* 2 bits */ 499 - u64 r, tau4, out; 496 + u32 reg_val, x, y, x_w = 2; /* 2 bits */ 497 + u64 tau4, out; 500 498 int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; 501 499 u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; 502 500 ··· 507 505 mutex_lock(&hwmon->hwmon_lock); 508 506 509 507 if (hwmon->xe->info.has_mbx_power_limits) { 510 - ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r); 508 + ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &reg_val); 511 509 if (ret) { 512 510 drm_err(&hwmon->xe->drm, 513 - "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n", 514 - channel, power_attr, r, ret); 515 - r = 0; 511 + "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n", 512 + channel, power_attr, reg_val, ret); 513 + reg_val = 0; 516 514 } 517 515 } else { 518 - r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel)); 516 + reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, 517 + channel)); 519 518 } 520 519 521 520 mutex_unlock(&hwmon->hwmon_lock); 522 521 523 522 xe_pm_runtime_put(hwmon->xe); 524 523 525 - x = REG_FIELD_GET(PWR_LIM_TIME_X, r); 526 - y = REG_FIELD_GET(PWR_LIM_TIME_Y, r); 524 + x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val); 525 + y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val); 527 526 528 527 /* 529 528 * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
+4 -1
drivers/gpu/drm/xe/xe_nvm.c
··· 35 35 36 36 static void xe_nvm_release_dev(struct device *dev) 37 37 { 38 + struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev); 39 + struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev); 40 + 41 + kfree(nvm); 38 42 } 39 43 40 44 static bool xe_nvm_non_posted_erase(struct xe_device *xe) ··· 166 162 167 163 auxiliary_device_delete(&nvm->aux_dev); 168 164 auxiliary_device_uninit(&nvm->aux_dev); 169 - kfree(nvm); 170 165 xe->nvm = NULL; 171 166 }
+7 -5
drivers/gpu/drm/xe/xe_tile_sysfs.c
··· 44 44 kt->tile = tile; 45 45 46 46 err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id); 47 - if (err) { 48 - kobject_put(&kt->base); 49 - return err; 50 - } 47 + if (err) 48 + goto err_object; 51 49 52 50 tile->sysfs = &kt->base; 53 51 54 52 err = xe_vram_freq_sysfs_init(tile); 55 53 if (err) 56 - return err; 54 + goto err_object; 57 55 58 56 return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile); 57 + 58 + err_object: 59 + kobject_put(&kt->base); 60 + return err; 59 61 }
+2 -2
drivers/gpu/drm/xe/xe_vm.c
··· 240 240 241 241 pfence = xe_preempt_fence_create(q, q->lr.context, 242 242 ++q->lr.seqno); 243 - if (!pfence) { 244 - err = -ENOMEM; 243 + if (IS_ERR(pfence)) { 244 + err = PTR_ERR(pfence); 245 245 goto out_fini; 246 246 } 247 247
+1
drivers/iommu/amd/amd_iommu_types.h
··· 555 555 }; 556 556 557 557 struct amd_io_pgtable { 558 + seqcount_t seqcount; /* Protects root/mode update */ 558 559 struct io_pgtable pgtbl; 559 560 int mode; 560 561 u64 *root;
+5 -4
drivers/iommu/amd/init.c
··· 1455 1455 PCI_FUNC(e->devid)); 1456 1456 1457 1457 devid = e->devid; 1458 - for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1459 - if (alias) 1458 + if (alias) { 1459 + for (dev_i = devid_start; dev_i <= devid; ++dev_i) 1460 1460 pci_seg->alias_table[dev_i] = devid_to; 1461 + set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); 1461 1462 } 1462 1463 set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags); 1463 - set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); 1464 1464 break; 1465 1465 case IVHD_DEV_SPECIAL: { 1466 1466 u8 handle, type; ··· 3067 3067 3068 3068 if (!boot_cpu_has(X86_FEATURE_CX16)) { 3069 3069 pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n"); 3070 - return -EINVAL; 3070 + ret = -EINVAL; 3071 + goto out; 3071 3072 } 3072 3073 3073 3074 /*
+21 -4
drivers/iommu/amd/io_pgtable.c
··· 17 17 #include <linux/slab.h> 18 18 #include <linux/types.h> 19 19 #include <linux/dma-mapping.h> 20 + #include <linux/seqlock.h> 20 21 21 22 #include <asm/barrier.h> 22 23 ··· 131 130 132 131 *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root)); 133 132 133 + write_seqcount_begin(&pgtable->seqcount); 134 134 pgtable->root = pte; 135 135 pgtable->mode += 1; 136 + write_seqcount_end(&pgtable->seqcount); 137 + 136 138 amd_iommu_update_and_flush_device_table(domain); 137 139 138 140 pte = NULL; ··· 157 153 { 158 154 unsigned long last_addr = address + (page_size - 1); 159 155 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; 156 + unsigned int seqcount; 160 157 int level, end_lvl; 161 158 u64 *pte, *page; 162 159 ··· 175 170 } 176 171 177 172 178 - level = pgtable->mode - 1; 179 - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 173 + do { 174 + seqcount = read_seqcount_begin(&pgtable->seqcount); 175 + 176 + level = pgtable->mode - 1; 177 + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 178 + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); 179 + 180 + 180 181 address = PAGE_SIZE_ALIGN(address, page_size); 181 182 end_lvl = PAGE_SIZE_LEVEL(page_size); 182 183 ··· 260 249 unsigned long *page_size) 261 250 { 262 251 int level; 252 + unsigned int seqcount; 263 253 u64 *pte; 264 254 265 255 *page_size = 0; ··· 268 256 if (address > PM_LEVEL_SIZE(pgtable->mode)) 269 257 return NULL; 270 258 271 - level = pgtable->mode - 1; 272 - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 259 + do { 260 + seqcount = read_seqcount_begin(&pgtable->seqcount); 261 + level = pgtable->mode - 1; 262 + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 263 + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); 264 + 273 265 *page_size = PTE_LEVEL_PAGE_SIZE(level); 274 266 275 267 while (level > 0) { ··· 557 541 if (!pgtable->root) 558 542 return NULL; 559 543 pgtable->mode = PAGE_MODE_3_LEVEL; 544 + seqcount_init(&pgtable->seqcount); 560 545 561 546 cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap; 562 547 cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
+6 -1
drivers/iommu/intel/iommu.c
··· 1575 1575 unsigned long lvl_pages = lvl_to_nr_pages(level); 1576 1576 struct dma_pte *pte = NULL; 1577 1577 1578 + if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) || 1579 + !IS_ALIGNED(end_pfn + 1, lvl_pages))) 1580 + return; 1581 + 1578 1582 while (start_pfn <= end_pfn) { 1579 1583 if (!pte) 1580 1584 pte = pfn_to_dma_pte(domain, start_pfn, &level, ··· 1654 1650 unsigned long pages_to_remove; 1655 1651 1656 1652 pteval |= DMA_PTE_LARGE_PAGE; 1657 - pages_to_remove = min_t(unsigned long, nr_pages, 1653 + pages_to_remove = min_t(unsigned long, 1654 + round_down(nr_pages, lvl_pages), 1658 1655 nr_pte_to_next_page(pte) * lvl_pages); 1659 1656 end_pfn = iov_pfn + pages_to_remove - 1; 1660 1657 switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
+21 -8
drivers/iommu/s390-iommu.c
··· 612 612 } 613 613 } 614 614 615 + static bool reg_ioat_propagate_error(int cc, u8 status) 616 + { 617 + /* 618 + * If the device is in the error state the reset routine 619 + * will register the IOAT of the newly set domain on re-enable 620 + */ 621 + if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL) 622 + return false; 623 + /* 624 + * If the device was removed treat registration as success 625 + * and let the subsequent error event trigger tear down. 626 + */ 627 + if (cc == ZPCI_CC_INVAL_HANDLE) 628 + return false; 629 + return cc != ZPCI_CC_OK; 630 + } 631 + 615 632 static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, 616 633 struct iommu_domain *domain, u8 *status) 617 634 { ··· 713 696 714 697 /* If we fail now DMA remains blocked via blocking domain */ 715 698 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 716 - if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 699 + if (reg_ioat_propagate_error(cc, status)) 717 700 return -EIO; 718 701 zdev->dma_table = s390_domain->dma_table; 719 702 zdev_s390_domain_update(zdev, domain); ··· 1049 1032 1050 1033 lockdep_assert_held(&zdev->dom_lock); 1051 1034 1052 - if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) 1035 + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || 1036 + zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) 1053 1037 return NULL; 1054 1038 1055 1039 s390_domain = to_s390_domain(zdev->s390_domain); ··· 1141 1123 1142 1124 /* If we fail now DMA remains blocked via blocking domain */ 1143 1125 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 1144 - 1145 - /* 1146 - * If the device is undergoing error recovery the reset code 1147 - * will re-establish the new domain. 1148 - */ 1149 - if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 1126 + if (reg_ioat_propagate_error(cc, status)) 1150 1127 return -EIO; 1151 1128 1152 1129 zdev_s390_domain_update(zdev, domain);
+1 -1
drivers/md/dm-integrity.c
··· 133 133 commit_id_t commit_id; 134 134 }; 135 135 136 - #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) 136 + #define MAX_TAG_SIZE 255 137 137 138 138 #define METADATA_PADDING_SECTORS 8 139 139
+4 -2
drivers/md/dm-raid.c
··· 3813 3813 struct raid_set *rs = ti->private; 3814 3814 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); 3815 3815 3816 - limits->io_min = chunk_size_bytes; 3817 - limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs); 3816 + if (chunk_size_bytes) { 3817 + limits->io_min = chunk_size_bytes; 3818 + limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs); 3819 + } 3818 3820 } 3819 3821 3820 3822 static void raid_presuspend(struct dm_target *ti)
+7 -3
drivers/md/dm-stripe.c
··· 456 456 struct queue_limits *limits) 457 457 { 458 458 struct stripe_c *sc = ti->private; 459 - unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT; 459 + unsigned int io_min, io_opt; 460 460 461 461 limits->chunk_sectors = sc->chunk_size; 462 - limits->io_min = chunk_size; 463 - limits->io_opt = chunk_size * sc->stripes; 462 + 463 + if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) && 464 + !check_mul_overflow(io_min, sc->stripes, &io_opt)) { 465 + limits->io_min = io_min; 466 + limits->io_opt = io_opt; 467 + } 464 468 } 465 469 466 470 static struct target_type stripe_target = {
+1
drivers/md/md-linear.c
··· 73 73 md_init_stacking_limits(&lim); 74 74 lim.max_hw_sectors = mddev->chunk_sectors; 75 75 lim.max_write_zeroes_sectors = mddev->chunk_sectors; 76 + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; 76 77 lim.io_min = mddev->chunk_sectors << 9; 77 78 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); 78 79 if (err)
+1
drivers/md/raid0.c
··· 382 382 md_init_stacking_limits(&lim); 383 383 lim.max_hw_sectors = mddev->chunk_sectors; 384 384 lim.max_write_zeroes_sectors = mddev->chunk_sectors; 385 + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; 385 386 lim.io_min = mddev->chunk_sectors << 9; 386 387 lim.io_opt = lim.io_min * mddev->raid_disks; 387 388 lim.chunk_sectors = mddev->chunk_sectors;
+1
drivers/md/raid1.c
··· 3211 3211 3212 3212 md_init_stacking_limits(&lim); 3213 3213 lim.max_write_zeroes_sectors = 0; 3214 + lim.max_hw_wzeroes_unmap_sectors = 0; 3214 3215 lim.features |= BLK_FEAT_ATOMIC_WRITES; 3215 3216 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); 3216 3217 if (err)
+1
drivers/md/raid10.c
··· 4008 4008 4009 4009 md_init_stacking_limits(&lim); 4010 4010 lim.max_write_zeroes_sectors = 0; 4011 + lim.max_hw_wzeroes_unmap_sectors = 0; 4011 4012 lim.io_min = mddev->chunk_sectors << 9; 4012 4013 lim.chunk_sectors = mddev->chunk_sectors; 4013 4014 lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+1
drivers/md/raid5.c
··· 7732 7732 lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; 7733 7733 lim.discard_granularity = stripe; 7734 7734 lim.max_write_zeroes_sectors = 0; 7735 + lim.max_hw_wzeroes_unmap_sectors = 0; 7735 7736 mddev_stack_rdev_limits(mddev, &lim, 0); 7736 7737 rdev_for_each(rdev, mddev) 7737 7738 queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
+1 -1
drivers/mmc/host/mvsdio.c
··· 292 292 host->pio_ptr = NULL; 293 293 host->pio_size = 0; 294 294 } else { 295 - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, 295 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 296 296 mmc_get_dma_dir(data)); 297 297 } 298 298
+67 -1
drivers/mmc/host/sdhci-pci-gli.c
··· 283 283 #define PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE 0xb 284 284 #define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL BIT(6) 285 285 #define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE 0x1 286 + #define PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN BIT(13) 287 + #define PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE BIT(14) 286 288 287 289 #define GLI_MAX_TUNING_LOOP 40 288 290 ··· 1181 1179 gl9767_vhs_read(pdev); 1182 1180 } 1183 1181 1182 + static void sdhci_gl9767_uhs2_phy_reset(struct sdhci_host *host, bool assert) 1183 + { 1184 + struct sdhci_pci_slot *slot = sdhci_priv(host); 1185 + struct pci_dev *pdev = slot->chip->pdev; 1186 + u32 value, set, clr; 1187 + 1188 + if (assert) { 1189 + /* Assert reset, set RESETN and clean RESETN_VALUE */ 1190 + set = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN; 1191 + clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE; 1192 + } else { 1193 + /* De-assert reset, clean RESETN and set RESETN_VALUE */ 1194 + set = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE; 1195 + clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN; 1196 + } 1197 + 1198 + gl9767_vhs_write(pdev); 1199 + pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value); 1200 + value |= set; 1201 + pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value); 1202 + value &= ~clr; 1203 + pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value); 1204 + gl9767_vhs_read(pdev); 1205 + } 1206 + 1207 + static void __gl9767_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) 1208 + { 1209 + u8 pwr = 0; 1210 + 1211 + if (mode != MMC_POWER_OFF) { 1212 + pwr = sdhci_get_vdd_value(vdd); 1213 + if (!pwr) 1214 + WARN(1, "%s: Invalid vdd %#x\n", 1215 + mmc_hostname(host->mmc), vdd); 1216 + pwr |= SDHCI_VDD2_POWER_180; 1217 + } 1218 + 1219 + if (host->pwr == pwr) 1220 + return; 1221 + 1222 + host->pwr = pwr; 1223 + 1224 + if (pwr == 0) { 1225 + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1226 + } else { 1227 + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1228 + 1229 + pwr |= SDHCI_POWER_ON; 1230 + sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL); 1231 + usleep_range(5000, 6250); 1232 + 1233 + /* Assert reset */ 1234 + sdhci_gl9767_uhs2_phy_reset(host, true); 1235 + pwr |= SDHCI_VDD2_POWER_ON; 1236 + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1237 + usleep_range(5000, 6250); 1238 + } 1239 + } 1240 + 1184 1241 static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock) 1185 1242 { 1186 1243 struct sdhci_pci_slot *slot = sdhci_priv(host); ··· 1266 1205 } 1267 1206 1268 1207 sdhci_enable_clk(host, clk); 1208 + 1209 + if (mmc_card_uhs2(host->mmc)) 1210 + /* De-assert reset */ 1211 + sdhci_gl9767_uhs2_phy_reset(host, false); 1212 + 1269 1213 gl9767_set_low_power_negotiation(pdev, true); 1270 1214 } 1271 1215 ··· 1542 1476 gl9767_vhs_read(pdev); 1543 1477 1544 1478 sdhci_gli_overcurrent_event_enable(host, false); 1545 - sdhci_uhs2_set_power(host, mode, vdd); 1479 + __gl9767_uhs2_set_power(host, mode, vdd); 1546 1480 sdhci_gli_overcurrent_event_enable(host, true); 1547 1481 } else { 1548 1482 gl9767_vhs_write(pdev);
+2 -1
drivers/mmc/host/sdhci-uhs2.c
··· 295 295 else 296 296 sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd); 297 297 298 - sdhci_set_clock(host, host->clock); 298 + host->ops->set_clock(host, ios->clock); 299 + host->clock = ios->clock; 299 300 } 300 301 301 302 static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+17 -17
drivers/mmc/host/sdhci.c
··· 2367 2367 (ios->power_mode == MMC_POWER_UP) && 2368 2368 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2369 2369 sdhci_enable_preset_value(host, false); 2370 - 2371 - if (!ios->clock || ios->clock != host->clock) { 2372 - host->ops->set_clock(host, ios->clock); 2373 - host->clock = ios->clock; 2374 - 2375 - if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2376 - host->clock) { 2377 - host->timeout_clk = mmc->actual_clock ? 2378 - mmc->actual_clock / 1000 : 2379 - host->clock / 1000; 2380 - mmc->max_busy_timeout = 2381 - host->ops->get_max_timeout_count ? 2382 - host->ops->get_max_timeout_count(host) : 2383 - 1 << 27; 2384 - mmc->max_busy_timeout /= host->timeout_clk; 2385 - } 2386 - } 2387 2370 } 2388 2371 EXPORT_SYMBOL_GPL(sdhci_set_ios_common); 2389 2372 ··· 2392 2409 turning_on_clk = ios->clock != host->clock && ios->clock && !host->clock; 2393 2410 2394 2411 sdhci_set_ios_common(mmc, ios); 2412 + 2413 + if (!ios->clock || ios->clock != host->clock) { 2414 + host->ops->set_clock(host, ios->clock); 2415 + host->clock = ios->clock; 2416 + 2417 + if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2418 + host->clock) { 2419 + host->timeout_clk = mmc->actual_clock ? 2420 + mmc->actual_clock / 1000 : 2421 + host->clock / 1000; 2422 + mmc->max_busy_timeout = 2423 + host->ops->get_max_timeout_count ? 2424 + host->ops->get_max_timeout_count(host) : 2425 + 1 << 27; 2426 + mmc->max_busy_timeout /= host->timeout_clk; 2427 + } 2428 + } 2395 2429 2396 2430 if (host->ops->set_power) 2397 2431 host->ops->set_power(host, ios->power_mode, ios->vdd);
+1 -1
drivers/net/bonding/bond_main.c
··· 2132 2132 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); 2133 2133 } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW && 2134 2134 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && 2135 + bond_has_slaves(bond) && 2135 2136 memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) { 2136 2137 /* Set slave to random address to avoid duplicate mac 2137 2138 * address in later fail over. ··· 3356 3355 /* Find out through which dev should the packet go */ 3357 3356 memset(&fl6, 0, sizeof(struct flowi6)); 3358 3357 fl6.daddr = targets[i]; 3359 - fl6.flowi6_oif = bond->dev->ifindex; 3360 3358 3361 3359 dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); 3362 3360 if (dst->error) {
+1 -2
drivers/net/ethernet/broadcom/cnic.c
··· 4230 4230 4231 4231 cnic_bnx2x_delete_wait(dev, 0); 4232 4232 4233 - cancel_delayed_work(&cp->delete_task); 4234 - flush_workqueue(cnic_wq); 4233 + cancel_delayed_work_sync(&cp->delete_task); 4235 4234 4236 4235 if (atomic_read(&cp->iscsi_conn) != 0) 4237 4236 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+1 -1
drivers/net/ethernet/cavium/liquidio/request_manager.c
··· 126 126 oct->io_qmask.iq |= BIT_ULL(iq_no); 127 127 128 128 /* Set the 32B/64B mode for each input queue */ 129 - oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); 129 + oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no); 130 130 iq->iqcmd_64B = (conf->instr_type == 64); 131 131 132 132 oct->fn_list.setup_iq_regs(oct, iq_no);
+1 -1
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 2736 2736 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2737 2737 goto err_get_attr; 2738 2738 } 2739 - ethsw->bpid = dpbp_attrs.id; 2739 + ethsw->bpid = dpbp_attrs.bpid; 2740 2740 2741 2741 return 0; 2742 2742
-3
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 948 948 if (!eop_desc) 949 949 break; 950 950 951 - /* prevent any other reads prior to eop_desc */ 952 - smp_rmb(); 953 - 954 951 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 955 952 /* we have caught up to head, no work left to do */ 956 953 if (tx_head == tx_desc)
+34 -46
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 894 894 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, 895 895 rx_buf->page_offset, size); 896 896 sinfo->xdp_frags_size += size; 897 - /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() 898 - * can pop off frags but driver has to handle it on its own 899 - */ 900 - rx_ring->nr_frags = sinfo->nr_frags; 901 897 902 898 if (page_is_pfmemalloc(rx_buf->page)) 903 899 xdp_buff_set_frag_pfmemalloc(xdp); ··· 964 968 /** 965 969 * ice_get_pgcnts - grab page_count() for gathered fragments 966 970 * @rx_ring: Rx descriptor ring to store the page counts on 971 + * @ntc: the next to clean element (not included in this frame!) 967 972 * 968 973 * This function is intended to be called right before running XDP 969 974 * program so that the page recycling mechanism will be able to take 970 975 * a correct decision regarding underlying pages; this is done in such 971 976 * way as XDP program can change the refcount of page 972 977 */ 973 - static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) 978 + static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc) 974 979 { 975 - u32 nr_frags = rx_ring->nr_frags + 1; 976 980 u32 idx = rx_ring->first_desc; 977 981 struct ice_rx_buf *rx_buf; 978 982 u32 cnt = rx_ring->count; 979 983 980 - for (int i = 0; i < nr_frags; i++) { 984 + while (idx != ntc) { 981 985 rx_buf = &rx_ring->rx_buf[idx]; 982 986 rx_buf->pgcnt = page_count(rx_buf->page); 983 987 ··· 1150 1154 } 1151 1155 1152 1156 /** 1153 - * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags 1157 + * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame 1154 1158 * @rx_ring: Rx ring with all the auxiliary data 1155 1159 * @xdp: XDP buffer carrying linear + frags part 1156 - * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage 1157 - * @ntc: a current next_to_clean value to be stored at rx_ring 1160 + * @ntc: the next to clean element (not included in this frame!) 1158 1161 * @verdict: return code from XDP program execution 1159 1162 * 1160 - * Walk through gathered fragments and satisfy internal page 1161 - * recycle mechanism; we take here an action related to verdict 1162 - * returned by XDP program; 1163 + * Called after XDP program is completed, or on error with verdict set to 1164 + * ICE_XDP_CONSUMED. 1165 + * 1166 + * Walk through buffers from first_desc to the end of the frame, releasing 1167 + * buffers and satisfying internal page recycle mechanism. The action depends 1168 + * on verdict from XDP program. 1163 1169 */ 1164 1170 static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 1165 - u32 *xdp_xmit, u32 ntc, u32 verdict) 1171 + u32 ntc, u32 verdict) 1166 1172 { 1167 - u32 nr_frags = rx_ring->nr_frags + 1; 1168 1173 u32 idx = rx_ring->first_desc; 1169 1174 u32 cnt = rx_ring->count; 1170 - u32 post_xdp_frags = 1; 1171 1175 struct ice_rx_buf *buf; 1172 - int i; 1176 + u32 xdp_frags = 0; 1177 + int i = 0; 1173 1178 1174 1179 if (unlikely(xdp_buff_has_frags(xdp))) 1175 - post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; 1180 + xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; 1176 1181 1177 - for (i = 0; i < post_xdp_frags; i++) { 1182 + while (idx != ntc) { 1178 1183 buf = &rx_ring->rx_buf[idx]; 1184 + if (++idx == cnt) 1185 + idx = 0; 1179 1186 1180 - if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1187 + /* An XDP program could release fragments from the end of the 1188 + * buffer. For these, we need to keep the pagecnt_bias as-is. 1189 + * To do this, only adjust pagecnt_bias for fragments up to 1190 + * the total remaining after the XDP program has run. 1191 + */ 1192 + if (verdict != ICE_XDP_CONSUMED) 1181 1193 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 1182 - *xdp_xmit |= verdict; 1183 - } else if (verdict & ICE_XDP_CONSUMED) { 1194 + else if (i++ <= xdp_frags) 1184 1195 buf->pagecnt_bias++; 1185 - } else if (verdict == ICE_XDP_PASS) { 1186 - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 1187 - } 1188 1196 1189 1197 ice_put_rx_buf(rx_ring, buf); 1190 - 1191 - if (++idx == cnt) 1192 - idx = 0; 1193 - } 1194 - /* handle buffers that represented frags released by XDP prog; 1195 - * for these we keep pagecnt_bias as-is; refcount from struct page 1196 - * has been decremented within XDP prog and we do not have to increase 1197 - * the biased refcnt 1198 - */ 1199 - for (; i < nr_frags; i++) { 1200 - buf = &rx_ring->rx_buf[idx]; 1201 - ice_put_rx_buf(rx_ring, buf); 1202 - if (++idx == cnt) 1203 - idx = 0; 1204 1198 } 1205 1199 1206 1200 xdp->data = NULL; 1207 1201 rx_ring->first_desc = ntc; 1208 - rx_ring->nr_frags = 0; 1209 1202 } 1210 1203 1211 1204 /** ··· 1302 1317 /* retrieve a buffer from the ring */ 1303 1318 rx_buf = ice_get_rx_buf(rx_ring, size, ntc); 1304 1319 1320 + /* Increment ntc before calls to ice_put_rx_mbuf() */ 1321 + if (++ntc == cnt) 1322 + ntc = 0; 1323 + 1305 1324 if (!xdp->data) { 1306 1325 void *hard_start; 1307 1326 ··· 1314 1325 xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); 1315 1326 xdp_buff_clear_frags_flag(xdp); 1316 1327 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { 1317 - ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); 1328 + ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED); 1318 1329 break; 1319 1330 } 1320 - if (++ntc == cnt) 1321 - ntc = 0; 1322 1331 1323 1332 /* skip if it is NOP desc */ 1324 1333 if (ice_is_non_eop(rx_ring, rx_desc)) 1325 1334 continue; 1326 1335 1327 - ice_get_pgcnts(rx_ring); 1336 + ice_get_pgcnts(rx_ring, ntc); 1328 1337 xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); 1329 1338 if (xdp_verdict == ICE_XDP_PASS) 1330 1339 goto construct_skb; 1331 1340 total_rx_bytes += xdp_get_buff_len(xdp); 1332 1341 total_rx_pkts++; 1333 1342 1334 - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); 1343 + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); 1344 + xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR); 1335 1345 1336 1346 continue; 1337 1347 construct_skb: ··· 1343 1355 rx_ring->ring_stats->rx_stats.alloc_buf_failed++; 1344 1356 xdp_verdict = ICE_XDP_CONSUMED; 1345 1357 } 1346 - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); 1358 + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); 1347 1359 1348 1360 if (!skb) 1349 1361 break;
-1
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 358 358 struct ice_tx_ring *xdp_ring; 359 359 struct ice_rx_ring *next; /* pointer to next ring in q_vector */ 360 360 struct xsk_buff_pool *xsk_pool; 361 - u32 nr_frags; 362 361 u16 max_frame; 363 362 u16 rx_buf_len; 364 363 dma_addr_t dma; /* physical address of ring */
+1
drivers/net/ethernet/intel/igc/igc.h
··· 345 345 /* LEDs */ 346 346 struct mutex led_mutex; 347 347 struct igc_led_classdev *leds; 348 + bool leds_available; 348 349 }; 349 350 350 351 void igc_up(struct igc_adapter *adapter);
+9 -3
drivers/net/ethernet/intel/igc/igc_main.c
··· 7335 7335 7336 7336 if (IS_ENABLED(CONFIG_IGC_LEDS)) { 7337 7337 err = igc_led_setup(adapter); 7338 - if (err) 7339 - goto err_register; 7338 + if (err) { 7339 + netdev_warn_once(netdev, 7340 + "LED init failed (%d); continuing without LED support\n", 7341 + err); 7342 + adapter->leds_available = false; 7343 + } else { 7344 + adapter->leds_available = true; 7345 + } 7340 7346 } 7341 7347 7342 7348 return 0; ··· 7398 7392 cancel_work_sync(&adapter->watchdog_task); 7399 7393 hrtimer_cancel(&adapter->hrtimer); 7400 7394 7401 - if (IS_ENABLED(CONFIG_IGC_LEDS)) 7395 + if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available) 7402 7396 igc_led_free(adapter); 7403 7397 7404 7398 /* Release control of h/w to f/w. If f/w is AMT enabled, this
+12 -10
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6973 6973 break; 6974 6974 } 6975 6975 6976 + /* Make sure the SWFW semaphore is in a valid state */ 6977 + if (hw->mac.ops.init_swfw_sync) 6978 + hw->mac.ops.init_swfw_sync(hw); 6979 + 6980 + if (hw->mac.type == ixgbe_mac_e610) 6981 + mutex_init(&hw->aci.lock); 6982 + 6976 6983 #ifdef IXGBE_FCOE 6977 6984 /* FCoE support exists, always init the FCoE lock */ 6978 6985 spin_lock_init(&adapter->fcoe.lock); ··· 11650 11643 if (err) 11651 11644 goto err_sw_init; 11652 11645 11653 - /* Make sure the SWFW semaphore is in a valid state */ 11654 - if (hw->mac.ops.init_swfw_sync) 11655 - hw->mac.ops.init_swfw_sync(hw); 11656 - 11657 11646 if (ixgbe_check_fw_error(adapter)) 11658 11647 return ixgbe_recovery_probe(adapter); 11659 11648 ··· 11853 11850 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); 11854 11851 ixgbe_mac_set_default_filter(adapter); 11855 11852 11856 - if (hw->mac.type == ixgbe_mac_e610) 11857 - mutex_init(&hw->aci.lock); 11858 11853 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); 11859 11854 11860 11855 if (ixgbe_removed(hw->hw_addr)) { ··· 12008 12007 devl_unlock(adapter->devlink); 12009 12008 ixgbe_release_hw_control(adapter); 12010 12009 ixgbe_clear_interrupt_scheme(adapter); 12010 + err_sw_init: 12011 12011 if (hw->mac.type == ixgbe_mac_e610) 12012 12012 mutex_destroy(&adapter->hw.aci.lock); 12013 - err_sw_init: 12014 12013 ixgbe_disable_sriov(adapter); 12015 12014 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 12016 12015 iounmap(adapter->io_addr); ··· 12061 12060 set_bit(__IXGBE_REMOVING, &adapter->state); 12062 12061 cancel_work_sync(&adapter->service_task); 12063 12062 12064 - if (adapter->hw.mac.type == ixgbe_mac_e610) { 12063 + if (adapter->hw.mac.type == ixgbe_mac_e610) 12065 12064 ixgbe_disable_link_status_events(adapter); 12066 - mutex_destroy(&adapter->hw.aci.lock); 12067 - } 12068 12065 12069 12066 if (adapter->mii_bus) 12070 12067 mdiobus_unregister(adapter->mii_bus); ··· 12121 12122 bitmap_free(adapter->af_xdp_zc_qps); 12122 12123 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); 12123 12124 free_netdev(netdev); 12125 + 12126 + if (adapter->hw.mac.type == ixgbe_mac_e610) 12127 + mutex_destroy(&adapter->hw.aci.lock); 12124 12128 12125 12129 if (disable_dev) 12126 12130 pci_disable_device(pdev);
+16
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
··· 1124 1124 return err; 1125 1125 } 1126 1126 1127 + static bool octep_is_vf_valid(struct octep_device *oct, int vf) 1128 + { 1129 + if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) { 1130 + netdev_err(oct->netdev, "Invalid VF ID %d\n", vf); 1131 + return false; 1132 + } 1133 + 1134 + return true; 1135 + } 1136 + 1127 1137 static int octep_get_vf_config(struct net_device *dev, int vf, 1128 1138 struct ifla_vf_info *ivi) 1129 1139 { 1130 1140 struct octep_device *oct = netdev_priv(dev); 1141 + 1142 + if (!octep_is_vf_valid(oct, vf)) 1143 + return -EINVAL; 1131 1144 1132 1145 ivi->vf = vf; 1133 1146 ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); ··· 1155 1142 { 1156 1143 struct octep_device *oct = netdev_priv(dev); 1157 1144 int err; 1145 + 1146 + if (!octep_is_vf_valid(oct, vf)) 1147 + return -EINVAL; 1158 1148 1159 1149 if (!is_valid_ether_addr(mac)) { 1160 1150 dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+3
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
··· 196 196 vf_id); 197 197 return; 198 198 } 199 + ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr); 199 200 rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; 200 201 } 201 202 ··· 206 205 { 207 206 int err; 208 207 208 + /* Reset VF-specific information maintained by the PF */ 209 + memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info)); 209 210 err = octep_ctrl_net_dev_remove(oct, vf_id); 210 211 if (err) { 211 212 rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
··· 491 491 if (!ptp) 492 492 return; 493 493 494 - cancel_delayed_work(&pfvf->ptp->synctstamp_work); 494 + cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work); 495 495 496 496 ptp_clock_unregister(ptp->ptp_clock); 497 497 kfree(ptp);
+1
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
··· 92 92 MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, 93 93 MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, 94 94 MLX5E_ACCEL_FS_POL_FT_LEVEL, 95 + MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL, 95 96 MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, 96 97 #endif 97 98 };
+1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 185 185 u32 family; 186 186 int prio; 187 187 int pol_level; 188 + int pol_miss_level; 188 189 int sa_level; 189 190 int status_level; 190 191 enum mlx5_flow_namespace_type chains_ns;
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 747 747 attr->family = family; 748 748 attr->prio = MLX5E_NIC_PRIO; 749 749 attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL; 750 + attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL; 750 751 attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; 751 752 attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; 752 753 attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL; ··· 834 833 835 834 ft_attr.max_fte = 1; 836 835 ft_attr.autogroup.max_num_groups = 1; 837 - ft_attr.level = attr->pol_level; 836 + ft_attr.level = attr->pol_miss_level; 838 837 ft_attr.prio = attr->prio; 839 838 840 839 ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
-2
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 139 139 if (up) { 140 140 netdev_info(priv->netdev, "Link up\n"); 141 141 netif_carrier_on(priv->netdev); 142 - mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu, 143 - NULL, NULL, NULL); 144 142 } else { 145 143 netdev_info(priv->netdev, "Link down\n"); 146 144 netif_carrier_off(priv->netdev);
+22 -5
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 1506 1506 static int 1507 1507 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) 1508 1508 { 1509 - struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev)); 1510 1509 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); 1510 + struct net_device *netdev; 1511 + struct mlx5e_priv *priv; 1512 + int err; 1511 1513 1514 + netdev = mlx5_uplink_netdev_get(dev); 1515 + if (!netdev) 1516 + return 0; 1517 + 1518 + priv = netdev_priv(netdev); 1512 1519 rpriv->netdev = priv->netdev; 1513 - return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, 1514 - rpriv); 1520 + err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, 1521 + rpriv); 1522 + mlx5_uplink_netdev_put(dev, netdev); 1523 + return err; 1515 1524 } 1516 1525 1517 1526 static void ··· 1647 1638 { 1648 1639 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); 1649 1640 struct net_device *netdev = rpriv->netdev; 1650 - struct mlx5e_priv *priv = netdev_priv(netdev); 1651 - void *ppriv = priv->ppriv; 1641 + struct mlx5e_priv *priv; 1642 + void *ppriv; 1643 + 1644 + if (!netdev) { 1645 + ppriv = rpriv; 1646 + goto free_ppriv; 1647 + } 1648 + 1649 + priv = netdev_priv(netdev); 1650 + ppriv = priv->ppriv; 1652 1651 1653 1652 if (rep->vport == MLX5_VPORT_UPLINK) { 1654 1653 mlx5e_vport_uplink_rep_unload(rpriv);
+1
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 1515 1515 speed = lksettings.base.speed; 1516 1516 1517 1517 out: 1518 + mlx5_uplink_netdev_put(mdev, slave); 1518 1519 return speed; 1519 1520 } 1520 1521
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 114 114 #define ETHTOOL_NUM_PRIOS 11 115 115 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) 116 116 /* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, 117 - * {IPsec RoCE MPV,Alias table},IPsec RoCE policy 117 + * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy 118 118 */ 119 - #define KERNEL_NIC_PRIO_NUM_LEVELS 10 119 + #define KERNEL_NIC_PRIO_NUM_LEVELS 11 120 120 #define KERNEL_NIC_NUM_PRIOS 1 121 121 /* One more level for tc, and one more for promisc */ 122 122 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
+14 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
··· 47 47 48 48 static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev) 49 49 { 50 - return mdev->mlx5e_res.uplink_netdev; 50 + struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res; 51 + struct net_device *netdev; 52 + 53 + mutex_lock(&mlx5e_res->uplink_netdev_lock); 54 + netdev = mlx5e_res->uplink_netdev; 55 + netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL); 56 + mutex_unlock(&mlx5e_res->uplink_netdev_lock); 57 + return netdev; 58 + } 59 + 60 + static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev, 61 + struct net_device *netdev) 62 + { 63 + netdev_put(netdev, &mdev->mlx5e_res.tracker); 51 64 } 52 65 53 66 struct mlx5_sd;
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 1170 1170 mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, 1171 1171 force_legacy); 1172 1172 i = find_first_bit(&temp, max_size); 1173 - if (i < max_size) 1173 + 1174 + /* mlx5e_link_info has holes. Check speed 1175 + * is not zero as indication of one. 1176 + */ 1177 + if (i < max_size && table[i].speed) 1174 1178 return &table[i]; 1175 1179 1176 1180 return NULL;
+6 -7
drivers/net/ethernet/natsemi/ns83820.c
··· 820 820 struct ns83820 *dev = PRIV(ndev); 821 821 struct rx_info *info = &dev->rx_info; 822 822 unsigned next_rx; 823 - int rx_rc, len; 823 + int len; 824 824 u32 cmdsts; 825 825 __le32 *desc; 826 826 unsigned long flags; ··· 881 881 if (likely(CMDSTS_OK & cmdsts)) { 882 882 #endif 883 883 skb_put(skb, len); 884 - if (unlikely(!skb)) 884 + if (unlikely(!skb)) { 885 + ndev->stats.rx_dropped++; 885 886 goto netdev_mangle_me_harder_failed; 887 + } 886 888 if (cmdsts & CMDSTS_DEST_MULTI) 887 889 ndev->stats.multicast++; 888 890 ndev->stats.rx_packets++; ··· 903 901 __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag); 904 902 } 905 903 #endif 906 - rx_rc = netif_rx(skb); 907 - if (NET_RX_DROP == rx_rc) { 908 - netdev_mangle_me_harder_failed: 909 - ndev->stats.rx_dropped++; 910 - } 904 + netif_rx(skb); 911 905 } else { 912 906 dev_kfree_skb_irq(skb); 913 907 } 914 908 909 + netdev_mangle_me_harder_failed: 915 910 nr++; 916 911 next_rx = info->next_rx; 917 912 desc = info->descs + (DESC_SIZE * next_rx);
+4 -3
drivers/net/ethernet/qlogic/qed/qed_debug.c
··· 4462 4462 goto out; 4463 4463 } 4464 4464 4465 - /* Add override window info to buffer */ 4465 + /* Add override window info to buffer, preventing buffer overflow */ 4466 4466 override_window_dwords = 4467 - qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * 4468 - PROTECTION_OVERRIDE_ELEMENT_DWORDS; 4467 + min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * 4468 + PROTECTION_OVERRIDE_ELEMENT_DWORDS, 4469 + PROTECTION_OVERRIDE_DEPTH_DWORDS); 4469 4470 if (override_window_dwords) { 4470 4471 addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); 4471 4472 offset += qed_grc_dump_addr_range(p_hwfn,
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
··· 2092 2092 break; 2093 2093 } 2094 2094 2095 - if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && 2095 + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_7000 && 2096 2096 trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 2097 2097 len = DIV_ROUND_UP(len, 4); 2098 2098
+8
drivers/platform/x86/amd/pmc/pmc-quirks.c
··· 239 239 DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), 240 240 } 241 241 }, 242 + { 243 + .ident = "MECHREVO Yilong15Pro Series GM5HG7A", 244 + .driver_data = &quirk_spurious_8042, 245 + .matches = { 246 + DMI_MATCH(DMI_SYS_VENDOR, "MECHREVO"), 247 + DMI_MATCH(DMI_PRODUCT_NAME, "Yilong15Pro Series GM5HG7A"), 248 + } 249 + }, 242 250 /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */ 243 251 { 244 252 .ident = "PCSpecialist Lafite Pro V 14M",
+1
drivers/platform/x86/amd/pmf/core.c
··· 403 403 {"AMDI0103", 0}, 404 404 {"AMDI0105", 0}, 405 405 {"AMDI0107", 0}, 406 + {"AMDI0108", 0}, 406 407 { } 407 408 }; 408 409 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
+2
drivers/platform/x86/asus-nb-wmi.c
··· 673 673 if (atkbd_reports_vol_keys) 674 674 *code = ASUS_WMI_KEY_IGNORE; 675 675 break; 676 + case 0x5D: /* Wireless console Toggle */ 677 + case 0x5E: /* Wireless console Enable / Keyboard Attach, Detach */ 676 678 case 0x5F: /* Wireless console Disable / Special Key */ 677 679 if (quirks->key_wlan_event) 678 680 *code = quirks->key_wlan_event;
+14
drivers/platform/x86/oxpec.c
··· 126 126 }, 127 127 { 128 128 .matches = { 129 + DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"), 130 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1X"), 131 + }, 132 + .driver_data = (void *)oxp_fly, 133 + }, 134 + { 135 + .matches = { 129 136 DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), 130 137 DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"), 131 138 }, ··· 310 303 .matches = { 311 304 DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), 312 305 DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Pro"), 306 + }, 307 + .driver_data = (void *)oxp_x1, 308 + }, 309 + { 310 + .matches = { 311 + DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), 312 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Pro EVA-02"), 313 313 }, 314 314 .driver_data = (void *)oxp_x1, 315 315 },
+14 -6
drivers/pmdomain/core.c
··· 187 187 #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) 188 188 #define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW) 189 189 #define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE) 190 + #define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON) 190 191 191 192 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 192 193 const struct generic_pm_domain *genpd) ··· 1358 1357 return ret; 1359 1358 } 1360 1359 1361 - #ifndef CONFIG_PM_GENERIC_DOMAINS_OF 1362 1360 static bool pd_ignore_unused; 1363 1361 static int __init pd_ignore_unused_setup(char *__unused) 1364 1362 { ··· 1382 1382 mutex_lock(&gpd_list_lock); 1383 1383 1384 1384 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 1385 - genpd_lock(genpd); 1386 - genpd->stay_on = false; 1387 - genpd_unlock(genpd); 1388 1385 genpd_queue_power_off_work(genpd); 1389 1386 } 1390 1387 ··· 1390 1393 return 0; 1391 1394 } 1392 1395 late_initcall_sync(genpd_power_off_unused); 1393 - #endif 1394 1396 1395 1397 #ifdef CONFIG_PM_SLEEP 1396 1398 ··· 2363 2367 } 2364 2368 } 2365 2369 2370 + #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2371 + static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off) 2372 + { 2373 + genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off; 2374 + } 2375 + #else 2376 + static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off) 2377 + { 2378 + genpd->stay_on = false; 2379 + } 2380 + #endif 2381 + 2366 2382 /** 2367 2383 * pm_genpd_init - Initialize a generic I/O PM domain object. 2368 2384 * @genpd: PM domain object to initialize. ··· 2400 2392 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2401 2393 atomic_set(&genpd->sd_count, 0); 2402 2394 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2403 - genpd->stay_on = !is_off; 2395 + genpd_set_stay_on(genpd, is_off); 2404 2396 genpd->sync_state = GENPD_SYNC_STATE_OFF; 2405 2397 genpd->device_count = 0; 2406 2398 genpd->provider = NULL;
+1
drivers/pmdomain/renesas/rcar-gen4-sysc.c
··· 251 251 genpd->detach_dev = cpg_mssr_detach_dev; 252 252 } 253 253 254 + genpd->flags |= GENPD_FLAG_NO_STAY_ON; 254 255 genpd->power_off = rcar_gen4_sysc_pd_power_off; 255 256 genpd->power_on = rcar_gen4_sysc_pd_power_on; 256 257
+2 -1
drivers/pmdomain/renesas/rcar-sysc.c
··· 241 241 } 242 242 } 243 243 244 + genpd->flags |= GENPD_FLAG_NO_STAY_ON; 244 245 genpd->power_off = rcar_sysc_pd_power_off; 245 246 genpd->power_on = rcar_sysc_pd_power_on; 246 247 ··· 343 342 }; 344 343 345 344 static struct genpd_onecell_data *rcar_sysc_onecell_data; 346 - static struct device_node *rcar_sysc_onecell_np; 345 + static struct device_node *rcar_sysc_onecell_np __initdata = NULL; 347 346 348 347 static int __init rcar_sysc_pd_init(void) 349 348 {
+2 -1
drivers/pmdomain/renesas/rmobile-sysc.c
··· 100 100 struct generic_pm_domain *genpd = &rmobile_pd->genpd; 101 101 struct dev_power_governor *gov = rmobile_pd->gov; 102 102 103 - genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; 103 + genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP | 104 + GENPD_FLAG_NO_STAY_ON; 104 105 genpd->attach_dev = cpg_mstp_attach_dev; 105 106 genpd->detach_dev = cpg_mstp_detach_dev; 106 107
+1 -1
drivers/pmdomain/rockchip/pm-domains.c
··· 865 865 pd->genpd.power_on = rockchip_pd_power_on; 866 866 pd->genpd.attach_dev = rockchip_pd_attach_dev; 867 867 pd->genpd.detach_dev = rockchip_pd_detach_dev; 868 - pd->genpd.flags = GENPD_FLAG_PM_CLK; 868 + pd->genpd.flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_NO_STAY_ON; 869 869 if (pd_info->active_wakeup) 870 870 pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP; 871 871 pm_genpd_init(&pd->genpd, NULL,
+2 -2
drivers/power/supply/bq27xxx_battery.c
··· 1919 1919 bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; 1920 1920 1921 1921 cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); 1922 - if ((cache.flags & 0xff) == 0xff) 1923 - cache.flags = -1; /* read error */ 1922 + if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff) 1923 + cache.flags = -ENODEV; /* bq27000 hdq read error */ 1924 1924 if (cache.flags >= 0) { 1925 1925 cache.capacity = bq27xxx_battery_read_soc(di); 1926 1926
+2 -2
drivers/ufs/core/ufs-mcq.c
··· 243 243 hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, 244 244 &hwq->sqe_dma_addr, 245 245 GFP_KERNEL); 246 - if (!hwq->sqe_dma_addr) { 246 + if (!hwq->sqe_base_addr) { 247 247 dev_err(hba->dev, "SQE allocation failed\n"); 248 248 return -ENOMEM; 249 249 } ··· 252 252 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, 253 253 &hwq->cqe_dma_addr, 254 254 GFP_KERNEL); 255 - if (!hwq->cqe_dma_addr) { 255 + if (!hwq->cqe_base_addr) { 256 256 dev_err(hba->dev, "CQE allocation failed\n"); 257 257 return -ENOMEM; 258 258 }
+8 -1
fs/btrfs/block-group.c
··· 1795 1795 bg1 = list_entry(a, struct btrfs_block_group, bg_list); 1796 1796 bg2 = list_entry(b, struct btrfs_block_group, bg_list); 1797 1797 1798 - return bg1->used > bg2->used; 1798 + /* 1799 + * Some other task may be updating the ->used field concurrently, but it 1800 + * is not serious if we get a stale value or load/store tearing issues, 1801 + * as sorting the list of block groups to reclaim is not critical and an 1802 + * occasional imperfect order is ok. So silence KCSAN and avoid the 1803 + * overhead of locking or any other synchronization. 1804 + */ 1805 + return data_race(bg1->used > bg2->used); 1799 1806 } 1800 1807 1801 1808 static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
+13 -9
fs/btrfs/compression.c
··· 1616 1616 } 1617 1617 1618 1618 /* 1619 - * Convert the compression suffix (eg. after "zlib" starting with ":") to 1620 - * level, unrecognized string will set the default level. Negative level 1621 - * numbers are allowed. 1619 + * Convert the compression suffix (eg. after "zlib" starting with ":") to level. 1620 + * 1621 + * If the resulting level exceeds the algo's supported levels, it will be clamped. 1622 + * 1623 + * Return <0 if no valid string can be found. 1624 + * Return 0 if everything is fine. 1622 1625 */ 1623 - int btrfs_compress_str2level(unsigned int type, const char *str) 1626 + int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret) 1624 1627 { 1625 1628 int level = 0; 1626 1629 int ret; 1627 1630 1628 - if (!type) 1631 + if (!type) { 1632 + *level_ret = btrfs_compress_set_level(type, level); 1629 1633 return 0; 1634 + } 1630 1635 1631 1636 if (str[0] == ':') { 1632 1637 ret = kstrtoint(str + 1, 10, &level); 1633 1638 if (ret) 1634 - level = 0; 1639 + return ret; 1635 1640 } 1636 1641 1637 - level = btrfs_compress_set_level(type, level); 1638 - 1639 - return level; 1642 + *level_ret = btrfs_compress_set_level(type, level); 1643 + return 0; 1640 1644 }
+1 -1
fs/btrfs/compression.h
··· 102 102 bool writeback); 103 103 void btrfs_submit_compressed_read(struct btrfs_bio *bbio); 104 104 105 - int btrfs_compress_str2level(unsigned int type, const char *str); 105 + int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret); 106 106 107 107 struct folio *btrfs_alloc_compr_folio(void); 108 108 void btrfs_free_compr_folio(struct folio *folio);
-3
fs/btrfs/delayed-inode.c
··· 1843 1843 1844 1844 int btrfs_fill_inode(struct btrfs_inode *inode, u32 *rdev) 1845 1845 { 1846 - struct btrfs_fs_info *fs_info = inode->root->fs_info; 1847 1846 struct btrfs_delayed_node *delayed_node; 1848 1847 struct btrfs_inode_item *inode_item; 1849 1848 struct inode *vfs_inode = &inode->vfs_inode; ··· 1863 1864 i_uid_write(vfs_inode, btrfs_stack_inode_uid(inode_item)); 1864 1865 i_gid_write(vfs_inode, btrfs_stack_inode_gid(inode_item)); 1865 1866 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); 1866 - btrfs_inode_set_file_extent_range(inode, 0, 1867 - round_up(i_size_read(vfs_inode), fs_info->sectorsize)); 1868 1867 vfs_inode->i_mode = btrfs_stack_inode_mode(inode_item); 1869 1868 set_nlink(vfs_inode, btrfs_stack_inode_nlink(inode_item)); 1870 1869 inode_set_bytes(vfs_inode, btrfs_stack_inode_nbytes(inode_item));
+5 -6
fs/btrfs/inode.c
··· 3885 3885 bool filled = false; 3886 3886 int first_xattr_slot; 3887 3887 3888 - ret = btrfs_init_file_extent_tree(inode); 3889 - if (ret) 3890 - goto out; 3891 - 3892 3888 ret = btrfs_fill_inode(inode, &rdev); 3893 3889 if (!ret) 3894 3890 filled = true; ··· 3916 3920 i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item)); 3917 3921 i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item)); 3918 3922 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 3919 - btrfs_inode_set_file_extent_range(inode, 0, 3920 - round_up(i_size_read(vfs_inode), fs_info->sectorsize)); 3921 3923 3922 3924 inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime), 3923 3925 btrfs_timespec_nsec(leaf, &inode_item->atime)); ··· 3947 3953 btrfs_set_inode_mapping_order(inode); 3948 3954 3949 3955 cache_index: 3956 + ret = btrfs_init_file_extent_tree(inode); 3957 + if (ret) 3958 + goto out; 3959 + btrfs_inode_set_file_extent_range(inode, 0, 3960 + round_up(i_size_read(vfs_inode), fs_info->sectorsize)); 3950 3961 /* 3951 3962 * If we were modified in the current generation and evicted from memory 3952 3963 * and then re-read we need to do a full sync since we don't have any
+8 -1
fs/btrfs/ref-verify.c
··· 980 980 if (!btrfs_test_opt(fs_info, REF_VERIFY)) 981 981 return 0; 982 982 983 + extent_root = btrfs_extent_root(fs_info, 0); 984 + /* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */ 985 + if (IS_ERR(extent_root)) { 986 + btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling"); 987 + btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); 988 + return 0; 989 + } 990 + 983 991 path = btrfs_alloc_path(); 984 992 if (!path) 985 993 return -ENOMEM; 986 994 987 - extent_root = btrfs_extent_root(fs_info, 0); 988 995 eb = btrfs_read_lock_root_node(extent_root); 989 996 level = btrfs_header_level(eb); 990 997 path->nodes[level] = eb;
+19 -8
fs/btrfs/super.c
··· 276 276 const struct fs_parameter *param, int opt) 277 277 { 278 278 const char *string = param->string; 279 + int ret; 279 280 280 281 /* 281 282 * Provide the same semantics as older kernels that don't use fs ··· 295 294 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 296 295 } else if (btrfs_match_compress_type(string, "zlib", true)) { 297 296 ctx->compress_type = BTRFS_COMPRESS_ZLIB; 298 - ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, 299 - string + 4); 297 + ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, string + 4, 298 + &ctx->compress_level); 299 + if (ret < 0) 300 + goto error; 300 301 btrfs_set_opt(ctx->mount_opt, COMPRESS); 301 302 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 302 303 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 303 304 } else if (btrfs_match_compress_type(string, "lzo", true)) { 304 305 ctx->compress_type = BTRFS_COMPRESS_LZO; 305 - ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, 306 - string + 3); 306 + ret = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, string + 3, 307 + &ctx->compress_level); 308 + if (ret < 0) 309 + goto error; 307 310 if (string[3] == ':' && string[4]) 308 311 btrfs_warn(NULL, "Compression level ignored for LZO"); 309 312 btrfs_set_opt(ctx->mount_opt, COMPRESS); ··· 315 310 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 316 311 } else if (btrfs_match_compress_type(string, "zstd", true)) { 317 312 ctx->compress_type = BTRFS_COMPRESS_ZSTD; 318 - ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, 319 - string + 4); 313 + ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, string + 4, 314 + &ctx->compress_level); 315 + if (ret < 0) 316 + goto error; 320 317 btrfs_set_opt(ctx->mount_opt, COMPRESS); 321 318 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 322 319 btrfs_clear_opt(ctx->mount_opt, NODATASUM); ··· 329 322 btrfs_clear_opt(ctx->mount_opt, COMPRESS); 330 323 btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); 331 324 } else { 332 - btrfs_err(NULL, "unrecognized compression value %s", string); 333 - return -EINVAL; 325 + ret = -EINVAL; 326 + goto error; 334 327 } 335 328 return 0; 329 + error: 330 + btrfs_err(NULL, "failed to parse compression option '%s'", string); 331 + return ret; 332 + 336 333 } 337 334 338 335 static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+2 -2
fs/btrfs/tree-checker.c
··· 1756 1756 while (ptr < end) { 1757 1757 u16 namelen; 1758 1758 1759 - if (unlikely(ptr + sizeof(iref) > end)) { 1759 + if (unlikely(ptr + sizeof(*iref) > end)) { 1760 1760 inode_ref_err(leaf, slot, 1761 1761 "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", 1762 - ptr, end, sizeof(iref)); 1762 + ptr, end, sizeof(*iref)); 1763 1763 return -EUCLEAN; 1764 1764 } 1765 1765
+1 -1
fs/btrfs/tree-log.c
··· 1964 1964 1965 1965 search_key.objectid = log_key.objectid; 1966 1966 search_key.type = BTRFS_INODE_EXTREF_KEY; 1967 - search_key.offset = key->objectid; 1967 + search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len); 1968 1968 ret = backref_in_log(root->log_root, &search_key, key->objectid, &name); 1969 1969 if (ret < 0) { 1970 1970 goto out;
+1 -1
fs/btrfs/zoned.c
··· 2582 2582 spin_lock(&space_info->lock); 2583 2583 space_info->total_bytes -= bg->length; 2584 2584 space_info->disk_total -= bg->length * factor; 2585 + space_info->disk_total -= bg->zone_unusable; 2585 2586 /* There is no allocation ever happened. */ 2586 2587 ASSERT(bg->used == 0); 2587 - ASSERT(bg->zone_unusable == 0); 2588 2588 /* No super block in a block group on the zoned setup. */ 2589 2589 ASSERT(bg->bytes_super == 0); 2590 2590 spin_unlock(&space_info->lock);
+2 -2
fs/nilfs2/sysfs.c
··· 1075 1075 ************************************************************************/ 1076 1076 1077 1077 static ssize_t nilfs_feature_revision_show(struct kobject *kobj, 1078 - struct attribute *attr, char *buf) 1078 + struct kobj_attribute *attr, char *buf) 1079 1079 { 1080 1080 return sysfs_emit(buf, "%d.%d\n", 1081 1081 NILFS_CURRENT_REV, NILFS_MINOR_REV); ··· 1087 1087 "(1) revision\n\tshow current revision of NILFS file system driver.\n"; 1088 1088 1089 1089 static ssize_t nilfs_feature_README_show(struct kobject *kobj, 1090 - struct attribute *attr, 1090 + struct kobj_attribute *attr, 1091 1091 char *buf) 1092 1092 { 1093 1093 return sysfs_emit(buf, features_readme_str);
+4 -4
fs/nilfs2/sysfs.h
··· 50 50 struct completion sg_segments_kobj_unregister; 51 51 }; 52 52 53 - #define NILFS_COMMON_ATTR_STRUCT(name) \ 53 + #define NILFS_KOBJ_ATTR_STRUCT(name) \ 54 54 struct nilfs_##name##_attr { \ 55 55 struct attribute attr; \ 56 - ssize_t (*show)(struct kobject *, struct attribute *, \ 56 + ssize_t (*show)(struct kobject *, struct kobj_attribute *, \ 57 57 char *); \ 58 - ssize_t (*store)(struct kobject *, struct attribute *, \ 58 + ssize_t (*store)(struct kobject *, struct kobj_attribute *, \ 59 59 const char *, size_t); \ 60 60 } 61 61 62 - NILFS_COMMON_ATTR_STRUCT(feature); 62 + NILFS_KOBJ_ATTR_STRUCT(feature); 63 63 64 64 #define NILFS_DEV_ATTR_STRUCT(name) \ 65 65 struct nilfs_##name##_attr { \
+2 -2
fs/smb/client/cifsproto.h
··· 312 312 313 313 extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon); 314 314 315 - extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon, 316 - const char *path); 315 + void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon, 316 + struct dentry *dentry); 317 317 318 318 extern void cifs_mark_open_handles_for_deleted_file(struct inode *inode, 319 319 const char *path);
+18 -5
fs/smb/client/inode.c
··· 1984 1984 } 1985 1985 1986 1986 netfs_wait_for_outstanding_io(inode); 1987 - cifs_close_deferred_file_under_dentry(tcon, full_path); 1987 + cifs_close_deferred_file_under_dentry(tcon, dentry); 1988 1988 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1989 1989 if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1990 1990 le64_to_cpu(tcon->fsUnixInfo.Capability))) { ··· 2003 2003 goto psx_del_no_retry; 2004 2004 } 2005 2005 2006 - if (sillyrename || (server->vals->protocol_id > SMB10_PROT_ID && 2007 - d_is_positive(dentry) && d_count(dentry) > 2)) 2006 + /* For SMB2+, if the file is open, we always perform a silly rename. 2007 + * 2008 + * We check for d_count() right after calling 2009 + * cifs_close_deferred_file_under_dentry() to make sure that the 2010 + * dentry's refcount gets dropped in case the file had any deferred 2011 + * close. 2012 + */ 2013 + if (!sillyrename && server->vals->protocol_id > SMB10_PROT_ID) { 2014 + spin_lock(&dentry->d_lock); 2015 + if (d_count(dentry) > 1) 2016 + sillyrename = true; 2017 + spin_unlock(&dentry->d_lock); 2018 + } 2019 + 2020 + if (sillyrename) 2008 2021 rc = -EBUSY; 2009 2022 else 2010 2023 rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry); ··· 2551 2538 goto cifs_rename_exit; 2552 2539 } 2553 2540 2554 - cifs_close_deferred_file_under_dentry(tcon, from_name); 2541 + cifs_close_deferred_file_under_dentry(tcon, source_dentry); 2555 2542 if (d_inode(target_dentry) != NULL) { 2556 2543 netfs_wait_for_outstanding_io(d_inode(target_dentry)); 2557 - cifs_close_deferred_file_under_dentry(tcon, to_name); 2544 + cifs_close_deferred_file_under_dentry(tcon, target_dentry); 2558 2545 } 2559 2546 2560 2547 rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+15 -21
fs/smb/client/misc.c
··· 832 832 kfree(tmp_list); 833 833 } 834 834 } 835 - void 836 - cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) 835 + 836 + void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, 837 + struct dentry *dentry) 837 838 { 838 - struct cifsFileInfo *cfile; 839 839 struct file_list *tmp_list, *tmp_next_list; 840 - void *page; 841 - const char *full_path; 840 + struct cifsFileInfo *cfile; 842 841 LIST_HEAD(file_head); 843 842 844 - page = alloc_dentry_path(); 845 843 spin_lock(&tcon->open_file_lock); 846 844 list_for_each_entry(cfile, &tcon->openFileList, tlist) { 847 - full_path = build_path_from_dentry(cfile->dentry, page); 848 - if (strstr(full_path, path)) { 849 - if (delayed_work_pending(&cfile->deferred)) { 850 - if (cancel_delayed_work(&cfile->deferred)) { 851 - spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 852 - cifs_del_deferred_close(cfile); 853 - spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 845 + if ((cfile->dentry == dentry) && 846 + delayed_work_pending(&cfile->deferred) && 847 + cancel_delayed_work(&cfile->deferred)) { 848 + spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 849 + cifs_del_deferred_close(cfile); 850 + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 854 851 855 - tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 856 - if (tmp_list == NULL) 857 - break; 858 - tmp_list->cfile = cfile; 859 - list_add_tail(&tmp_list->list, &file_head); 860 - } 861 - } 852 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 853 + if (tmp_list == NULL) 854 + break; 855 + tmp_list->cfile = cfile; 856 + list_add_tail(&tmp_list->list, &file_head); 862 857 } 863 858 } 864 859 spin_unlock(&tcon->open_file_lock); ··· 863 868 list_del(&tmp_list->list); 864 869 kfree(tmp_list); 865 870 } 866 - free_dentry_path(page); 867 871 } 868 872 869 873 /*
+28 -5
fs/smb/client/smbdirect.c
··· 453 453 struct smbdirect_recv_io *response = 454 454 container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe); 455 455 struct smbdirect_socket *sc = response->socket; 456 + struct smbdirect_socket_parameters *sp = &sc->parameters; 456 457 struct smbd_connection *info = 457 458 container_of(sc, struct smbd_connection, socket); 458 - int data_length = 0; 459 + u32 data_offset = 0; 460 + u32 data_length = 0; 461 + u32 remaining_data_length = 0; 459 462 460 463 log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", 461 464 response, sc->recv_io.expected, wc->status, wc->opcode, ··· 490 487 /* SMBD data transfer packet */ 491 488 case SMBDIRECT_EXPECT_DATA_TRANSFER: 492 489 data_transfer = smbdirect_recv_io_payload(response); 490 + 491 + if (wc->byte_len < 492 + offsetof(struct smbdirect_data_transfer, padding)) 493 + goto error; 494 + 495 + remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); 496 + data_offset = le32_to_cpu(data_transfer->data_offset); 493 497 data_length = le32_to_cpu(data_transfer->data_length); 498 + if (wc->byte_len < data_offset || 499 + (u64)wc->byte_len < (u64)data_offset + data_length) 500 + goto error; 501 + 502 + if (remaining_data_length > sp->max_fragmented_recv_size || 503 + data_length > sp->max_fragmented_recv_size || 504 + (u64)remaining_data_length + (u64)data_length > (u64)sp->max_fragmented_recv_size) 505 + goto error; 494 506 495 507 if (data_length) { 496 508 if (sc->recv_io.reassembly.full_packet_received) ··· 1108 1090 log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n", 1109 1091 rc, response->sge.addr, 1110 1092 response->sge.length, response->sge.lkey); 1111 - if (rc) 1093 + if (rc) { 1094 + put_receive_buffer(info, response); 1112 1095 return rc; 1096 + } 1113 1097 1114 1098 init_completion(&info->negotiate_completion); 1115 1099 info->negotiate_done = false; ··· 1349 1329 sc->status == SMBDIRECT_SOCKET_DISCONNECTED); 1350 1330 } 1351 1331 1332 + log_rdma_event(INFO, "cancelling post_send_credits_work\n"); 1333 + disable_work_sync(&info->post_send_credits_work); 1334 + 1352 1335 log_rdma_event(INFO, "destroying qp\n"); 1353 1336 ib_drain_qp(sc->ib.qp); 1354 1337 rdma_destroy_qp(sc->rdma.cm_id); 1355 1338 sc->ib.qp = NULL; 1356 1339 1357 1340 log_rdma_event(INFO, "cancelling idle timer\n"); 1358 - cancel_delayed_work_sync(&info->idle_timer_work); 1341 + disable_delayed_work_sync(&info->idle_timer_work); 1359 1342 1360 1343 /* It's not possible for upper layer to get to reassembly */ 1361 1344 log_rdma_event(INFO, "drain the reassembly queue\n"); ··· 1731 1708 return NULL; 1732 1709 1733 1710 negotiation_failed: 1734 - cancel_delayed_work_sync(&info->idle_timer_work); 1711 + disable_delayed_work_sync(&info->idle_timer_work); 1735 1712 destroy_caches_and_workqueue(info); 1736 1713 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; 1737 1714 rdma_disconnect(sc->rdma.cm_id); ··· 2090 2067 struct smbdirect_socket *sc = &info->socket; 2091 2068 struct smbd_mr *mr, *tmp; 2092 2069 2093 - cancel_work_sync(&info->mr_recovery_work); 2070 + disable_work_sync(&info->mr_recovery_work); 2094 2071 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { 2095 2072 if (mr->state == MR_INVALIDATED) 2096 2073 ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
+126 -59
fs/smb/server/transport_rdma.c
··· 554 554 case SMB_DIRECT_MSG_DATA_TRANSFER: { 555 555 struct smb_direct_data_transfer *data_transfer = 556 556 (struct smb_direct_data_transfer *)recvmsg->packet; 557 - unsigned int data_length; 557 + u32 remaining_data_length, data_offset, data_length; 558 558 int avail_recvmsg_count, receive_credits; 559 559 560 560 if (wc->byte_len < ··· 564 564 return; 565 565 } 566 566 567 + remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); 567 568 data_length = le32_to_cpu(data_transfer->data_length); 568 - if (data_length) { 569 - if (wc->byte_len < sizeof(struct smb_direct_data_transfer) + 570 - (u64)data_length) { 571 - put_recvmsg(t, recvmsg); 572 - smb_direct_disconnect_rdma_connection(t); 573 - return; 574 - } 569 + data_offset = le32_to_cpu(data_transfer->data_offset); 570 + if (wc->byte_len < data_offset || 571 + wc->byte_len < (u64)data_offset + data_length) { 572 + put_recvmsg(t, recvmsg); 573 + smb_direct_disconnect_rdma_connection(t); 574 + return; 575 + } 576 + if (remaining_data_length > t->max_fragmented_recv_size || 577 + data_length > t->max_fragmented_recv_size || 578 + (u64)remaining_data_length + (u64)data_length > 579 + (u64)t->max_fragmented_recv_size) { 580 + put_recvmsg(t, recvmsg); 581 + smb_direct_disconnect_rdma_connection(t); 582 + return; 583 + } 575 584 585 + if (data_length) { 576 586 if (t->full_packet_received) 577 587 recvmsg->first_segment = true; 578 588 ··· 1219 1209 bool need_invalidate, unsigned int remote_key) 1220 1210 { 1221 1211 struct smb_direct_transport *st = smb_trans_direct_transfort(t); 1222 - int remaining_data_length; 1223 - int start, i, j; 1224 - int max_iov_size = st->max_send_size - 1212 + size_t remaining_data_length; 1213 + size_t iov_idx; 1214 + size_t iov_ofs; 1215 + size_t max_iov_size = st->max_send_size - 1225 1216 sizeof(struct smb_direct_data_transfer); 1226 1217 int ret; 1227 - struct kvec vec; 1228 1218 struct smb_direct_send_ctx send_ctx; 1219 + int error = 0; 1229 1220 1230 1221 if (st->status != SMB_DIRECT_CS_CONNECTED) 1231 1222 return -ENOTCONN; 1232 1223 1233 1224 //FIXME: skip RFC1002 header.. 1225 + if (WARN_ON_ONCE(niovs <= 1 || iov[0].iov_len != 4)) 1226 + return -EINVAL; 1234 1227 buflen -= 4; 1228 + iov_idx = 1; 1229 + iov_ofs = 0; 1235 1230 1236 1231 remaining_data_length = buflen; 1237 1232 ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen); 1238 1233 1239 1234 smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key); 1240 - start = i = 1; 1241 - buflen = 0; 1242 - while (true) { 1243 - buflen += iov[i].iov_len; 1244 - if (buflen > max_iov_size) { 1245 - if (i > start) { 1246 - remaining_data_length -= 1247 - (buflen - iov[i].iov_len); 1248 - ret = smb_direct_post_send_data(st, &send_ctx, 1249 - &iov[start], i - start, 1250 - remaining_data_length); 1251 - if (ret) 1252 - goto done; 1253 - } else { 1254 - /* iov[start] is too big, break it */ 1255 - int nvec = (buflen + max_iov_size - 1) / 1256 - max_iov_size; 1235 + while (remaining_data_length) { 1236 + struct kvec vecs[SMB_DIRECT_MAX_SEND_SGES - 1]; /* minus smbdirect hdr */ 1237 + size_t possible_bytes = max_iov_size; 1238 + size_t possible_vecs; 1239 + size_t bytes = 0; 1240 + size_t nvecs = 0; 1257 1241 1258 - for (j = 0; j < nvec; j++) { 1259 - vec.iov_base = 1260 - (char *)iov[start].iov_base + 1261 - j * max_iov_size; 1262 - vec.iov_len = 1263 - min_t(int, max_iov_size, 1264 - buflen - max_iov_size * j); 1265 - remaining_data_length -= vec.iov_len; 1266 - ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1, 1267 - remaining_data_length); 1268 - if (ret) 1269 - goto done; 1270 - } 1271 - i++; 1272 - if (i == niovs) 1273 - break; 1274 - } 1275 - start = i; 1276 - buflen = 0; 1277 - } else { 1278 - i++; 1279 - if (i == niovs) { 1280 - /* send out all remaining vecs */ 1281 - remaining_data_length -= buflen; 1282 - ret = smb_direct_post_send_data(st, &send_ctx, 1283 - &iov[start], i - start, 1284 - remaining_data_length); 1285 - if (ret) 1242 + /* 1243 + * For the last message remaining_data_length should be 1244 + * have been 0 already! 1245 + */ 1246 + if (WARN_ON_ONCE(iov_idx >= niovs)) { 1247 + error = -EINVAL; 1248 + goto done; 1249 + } 1250 + 1251 + /* 1252 + * We have 2 factors which limit the arguments we pass 1253 + * to smb_direct_post_send_data(): 1254 + * 1255 + * 1. The number of supported sges for the send, 1256 + * while one is reserved for the smbdirect header. 1257 + * And we currently need one SGE per page. 1258 + * 2. The number of negotiated payload bytes per send. 1259 + */ 1260 + possible_vecs = min_t(size_t, ARRAY_SIZE(vecs), niovs - iov_idx); 1261 + 1262 + while (iov_idx < niovs && possible_vecs && possible_bytes) { 1263 + struct kvec *v = &vecs[nvecs]; 1264 + int page_count; 1265 + 1266 + v->iov_base = ((u8 *)iov[iov_idx].iov_base) + iov_ofs; 1267 + v->iov_len = min_t(size_t, 1268 + iov[iov_idx].iov_len - iov_ofs, 1269 + possible_bytes); 1270 + page_count = get_buf_page_count(v->iov_base, v->iov_len); 1271 + if (page_count > possible_vecs) { 1272 + /* 1273 + * If the number of pages in the buffer 1274 + * is to much (because we currently require 1275 + * one SGE per page), we need to limit the 1276 + * length. 1277 + * 1278 + * We know possible_vecs is at least 1, 1279 + * so we always keep the first page. 1280 + * 1281 + * We need to calculate the number extra 1282 + * pages (epages) we can also keep. 1283 + * 1284 + * We calculate the number of bytes in the 1285 + * first page (fplen), this should never be 1286 + * larger than v->iov_len because page_count is 1287 + * at least 2, but adding a limitation feels 1288 + * better. 1289 + * 1290 + * Then we calculate the number of bytes (elen) 1291 + * we can keep for the extra pages. 1292 + */ 1293 + size_t epages = possible_vecs - 1; 1294 + size_t fpofs = offset_in_page(v->iov_base); 1295 + size_t fplen = min_t(size_t, PAGE_SIZE - fpofs, v->iov_len); 1296 + size_t elen = min_t(size_t, v->iov_len - fplen, epages*PAGE_SIZE); 1297 + 1298 + v->iov_len = fplen + elen; 1299 + page_count = get_buf_page_count(v->iov_base, v->iov_len); 1300 + if (WARN_ON_ONCE(page_count > possible_vecs)) { 1301 + /* 1302 + * Something went wrong in the above 1303 + * logic... 1304 + */ 1305 + error = -EINVAL; 1286 1306 goto done; 1287 - break; 1307 + } 1288 1308 } 1309 + possible_vecs -= page_count; 1310 + nvecs += 1; 1311 + possible_bytes -= v->iov_len; 1312 + bytes += v->iov_len; 1313 + 1314 + iov_ofs += v->iov_len; 1315 + if (iov_ofs >= iov[iov_idx].iov_len) { 1316 + iov_idx += 1; 1317 + iov_ofs = 0; 1318 + } 1319 + } 1320 + 1321 + remaining_data_length -= bytes; 1322 + 1323 + ret = smb_direct_post_send_data(st, &send_ctx, 1324 + vecs, nvecs, 1325 + remaining_data_length); 1326 + if (unlikely(ret)) { 1327 + error = ret; 1328 + goto done; 1289 1329 } 1290 1330 } 1291 1331 1292 1332 done: 1293 1333 ret = smb_direct_flush_send_list(st, &send_ctx, true); 1334 + if (unlikely(!ret && error)) 1335 + ret = error; 1294 1336 1295 1337 /* 1296 1338 * As an optimization, we don't wait for individual I/O to finish ··· 1806 1744 return -EINVAL; 1807 1745 } 1808 1746 1747 + if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) { 1748 + pr_err("warning: device max_send_sge = %d too small\n", 1749 + device->attrs.max_send_sge); 1750 + return -EINVAL; 1751 + } 1809 1752 if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) { 1810 1753 pr_err("warning: device max_recv_sge = %d too small\n", 1811 1754 device->attrs.max_recv_sge); ··· 1834 1767 1835 1768 cap->max_send_wr = max_send_wrs; 1836 1769 cap->max_recv_wr = t->recv_credit_max; 1837 - cap->max_send_sge = max_sge_per_wr; 1770 + cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES; 1838 1771 cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES; 1839 1772 cap->max_inline_data = 0; 1840 1773 cap->max_rdma_ctxs = t->max_rw_credits;
+6 -4
include/crypto/if_alg.h
··· 135 135 * SG? 136 136 * @enc: Cryptographic operation to be performed when 137 137 * recvmsg is invoked. 138 + * @write: True if we are in the middle of a write. 138 139 * @init: True if metadata has been sent. 139 140 * @len: Length of memory allocated for this data structure. 140 141 * @inflight: Non-zero when AIO requests are in flight. ··· 152 151 size_t used; 153 152 atomic_t rcvused; 154 153 155 - bool more; 156 - bool merge; 157 - bool enc; 158 - bool init; 154 + u32 more:1, 155 + merge:1, 156 + enc:1, 157 + write:1, 158 + init:1; 159 159 160 160 unsigned int len; 161 161
+6 -3
include/kvm/arm_vgic.h
··· 8 8 #include <linux/bits.h> 9 9 #include <linux/kvm.h> 10 10 #include <linux/irqreturn.h> 11 - #include <linux/kref.h> 12 11 #include <linux/mutex.h> 12 + #include <linux/refcount.h> 13 13 #include <linux/spinlock.h> 14 14 #include <linux/static_key.h> 15 15 #include <linux/types.h> ··· 139 139 bool pending_latch; /* The pending latch state used to calculate 140 140 * the pending state for both level 141 141 * and edge triggered IRQs. */ 142 - bool active; /* not used for LPIs */ 142 + bool active; 143 + bool pending_release; /* Used for LPIs only, unreferenced IRQ 144 + * pending a release */ 145 + 143 146 bool enabled; 144 147 bool hw; /* Tied to HW IRQ */ 145 - struct kref refcount; /* Used for LPIs */ 148 + refcount_t refcount; /* Used for LPIs */ 146 149 u32 hwintid; /* HW INTID number */ 147 150 unsigned int host_irq; /* linux irq corresponding to hwintid */ 148 151 union {
+2
include/linux/damon.h
··· 636 636 * @data: Data that will be passed to @fn. 637 637 * @repeat: Repeat invocations. 638 638 * @return_code: Return code from @fn invocation. 639 + * @dealloc_on_cancel: De-allocate when canceled. 639 640 * 640 641 * Control damon_call(), which requests specific kdamond to invoke a given 641 642 * function. Refer to damon_call() for more details. ··· 646 645 void *data; 647 646 bool repeat; 648 647 int return_code; 648 + bool dealloc_on_cancel; 649 649 /* private: internal use only */ 650 650 /* informs if the kdamond finished handling of the request */ 651 651 struct completion completion;
-3
include/linux/io_uring_types.h
··· 420 420 struct list_head defer_list; 421 421 unsigned nr_drained; 422 422 423 - struct io_alloc_cache msg_cache; 424 - spinlock_t msg_lock; 425 - 426 423 #ifdef CONFIG_NET_RX_BUSY_POLL 427 424 struct list_head napi_list; /* track busy poll napi_id */ 428 425 spinlock_t napi_lock; /* napi_list lock */
+1
include/linux/mlx5/driver.h
··· 663 663 bool tisn_valid; 664 664 } hw_objs; 665 665 struct net_device *uplink_netdev; 666 + netdevice_tracker tracker; 666 667 struct mutex uplink_netdev_lock; 667 668 struct mlx5_crypto_dek_priv *dek_priv; 668 669 };
+7
include/linux/pm_domain.h
··· 115 115 * genpd provider specific way, likely through a 116 116 * parent device node. This flag makes genpd to 117 117 * skip its internal support for this. 118 + * 119 + * GENPD_FLAG_NO_STAY_ON: For genpd OF providers a powered-on PM domain at 120 + * initialization is prevented from being 121 + * powered-off until the ->sync_state() callback is 122 + * invoked. This flag informs genpd to allow a 123 + * power-off without waiting for ->sync_state(). 118 124 */ 119 125 #define GENPD_FLAG_PM_CLK (1U << 0) 120 126 #define GENPD_FLAG_IRQ_SAFE (1U << 1) ··· 132 126 #define GENPD_FLAG_OPP_TABLE_FW (1U << 7) 133 127 #define GENPD_FLAG_DEV_NAME_FW (1U << 8) 134 128 #define GENPD_FLAG_NO_SYNC_STATE (1U << 9) 129 + #define GENPD_FLAG_NO_STAY_ON (1U << 10) 135 130 136 131 enum gpd_status { 137 132 GENPD_STATE_ON = 0, /* PM domain is on */
+3 -5
include/linux/rv.h
··· 7 7 #ifndef _LINUX_RV_H 8 8 #define _LINUX_RV_H 9 9 10 - #include <linux/types.h> 11 - #include <linux/list.h> 12 - 13 10 #define MAX_DA_NAME_LEN 32 14 11 #define MAX_DA_RETRY_RACING_EVENTS 3 15 12 16 13 #ifdef CONFIG_RV 17 - #include <linux/bitops.h> 18 - #include <linux/types.h> 19 14 #include <linux/array_size.h> 15 + #include <linux/bitops.h> 16 + #include <linux/list.h> 17 + #include <linux/types.h> 20 18 21 19 /* 22 20 * Deterministic automaton per-object variables.
+10
include/linux/swap.h
··· 385 385 void mark_page_accessed(struct page *); 386 386 void folio_mark_accessed(struct folio *); 387 387 388 + static inline bool folio_may_be_lru_cached(struct folio *folio) 389 + { 390 + /* 391 + * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting. 392 + * Holding small numbers of low-order mTHP folios in per-CPU LRU cache 393 + * will be sensible, but nobody has implemented and tested that yet. 394 + */ 395 + return !folio_test_large(folio); 396 + } 397 + 388 398 extern atomic_t lru_disable_count; 389 399 390 400 static inline bool lru_cache_disabled(void)
+9 -2
include/net/dst_metadata.h
··· 3 3 #define __NET_DST_METADATA_H 1 4 4 5 5 #include <linux/skbuff.h> 6 + #include <net/ip.h> 6 7 #include <net/ip_tunnels.h> 7 8 #include <net/macsec.h> 8 9 #include <net/dst.h> ··· 221 220 int md_size) 222 221 { 223 222 const struct iphdr *iph = ip_hdr(skb); 223 + struct metadata_dst *tun_dst; 224 224 225 - return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl, 226 - 0, flags, tunnel_id, md_size); 225 + tun_dst = __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl, 226 + 0, flags, tunnel_id, md_size); 227 + 228 + if (tun_dst && (iph->frag_off & htons(IP_DF))) 229 + __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, 230 + tun_dst->u.tun_info.key.tun_flags); 231 + return tun_dst; 227 232 } 228 233 229 234 static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
+3 -2
include/net/sock.h
··· 2061 2061 if (sock) { 2062 2062 WRITE_ONCE(sk->sk_uid, SOCK_INODE(sock)->i_uid); 2063 2063 WRITE_ONCE(sk->sk_ino, SOCK_INODE(sock)->i_ino); 2064 + } else { 2065 + /* Note: sk_uid is unchanged. */ 2066 + WRITE_ONCE(sk->sk_ino, 0); 2064 2067 } 2065 2068 } 2066 2069 ··· 2085 2082 sock_set_flag(sk, SOCK_DEAD); 2086 2083 sk_set_socket(sk, NULL); 2087 2084 sk->sk_wq = NULL; 2088 - /* Note: sk_uid is unchanged. */ 2089 - WRITE_ONCE(sk->sk_ino, 0); 2090 2085 write_unlock_bh(&sk->sk_callback_lock); 2091 2086 } 2092 2087
+8
include/sound/soc_sdw_utils.h
··· 248 248 int asoc_sdw_cs42l43_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); 249 249 int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); 250 250 int asoc_sdw_maxim_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); 251 + /* TI */ 252 + int asoc_sdw_ti_amp_init(struct snd_soc_card *card, 253 + struct snd_soc_dai_link *dai_links, 254 + struct asoc_sdw_codec_info *info, 255 + bool playback); 256 + int asoc_sdw_ti_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); 257 + int asoc_sdw_ti_amp_initial_settings(struct snd_soc_card *card, 258 + const char *name_prefix); 251 259 252 260 #endif
+2
include/uapi/linux/mptcp.h
··· 31 31 #define MPTCP_INFO_FLAG_FALLBACK _BITUL(0) 32 32 #define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1) 33 33 34 + #define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 _BITUL(0) 35 + 34 36 #define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) 35 37 #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) 36 38 #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+2 -2
include/uapi/linux/mptcp_pm.h
··· 16 16 * good time to allocate memory and send ADD_ADDR if needed. Depending on the 17 17 * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED 18 18 * is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, 19 - * sport, dport, server-side. 19 + * sport, dport, server-side, [flags]. 20 20 * @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new 21 21 * subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, 22 - * sport, dport, server-side. 22 + * sport, dport, server-side, [flags]. 23 23 * @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token. 24 24 * @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer. 25 25 * Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
+3 -3
io_uring/io-wq.c
··· 352 352 struct io_wq *wq; 353 353 354 354 struct io_wq_acct *acct; 355 - bool do_create = false; 355 + bool activated_free_worker, do_create = false; 356 356 357 357 worker = container_of(cb, struct io_worker, create_work); 358 358 wq = worker->wq; 359 359 acct = worker->acct; 360 360 361 361 rcu_read_lock(); 362 - do_create = !io_acct_activate_free_worker(acct); 362 + activated_free_worker = io_acct_activate_free_worker(acct); 363 363 rcu_read_unlock(); 364 - if (!do_create) 364 + if (activated_free_worker) 365 365 goto no_need_create; 366 366 367 367 raw_spin_lock(&acct->workers_lock);
+4 -6
io_uring/io_uring.c
··· 290 290 io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); 291 291 io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); 292 292 io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free); 293 - io_alloc_cache_free(&ctx->msg_cache, kfree); 294 293 io_futex_cache_free(ctx); 295 294 io_rsrc_cache_free(ctx); 296 295 } ··· 336 337 ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX, 337 338 sizeof(struct io_async_cmd), 338 339 sizeof(struct io_async_cmd)); 339 - spin_lock_init(&ctx->msg_lock); 340 - ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX, 341 - sizeof(struct io_kiocb), 0); 342 340 ret |= io_futex_cache_init(ctx); 343 341 ret |= io_rsrc_cache_init(ctx); 344 342 if (ret) ··· 1402 1406 1403 1407 void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw) 1404 1408 { 1405 - io_tw_lock(req->ctx, tw); 1406 - if (unlikely(io_should_terminate_tw())) 1409 + struct io_ring_ctx *ctx = req->ctx; 1410 + 1411 + io_tw_lock(ctx, tw); 1412 + if (unlikely(io_should_terminate_tw(ctx))) 1407 1413 io_req_defer_failed(req, -EFAULT); 1408 1414 else if (req->flags & REQ_F_FORCE_ASYNC) 1409 1415 io_queue_iowq(req);
+2 -2
io_uring/io_uring.h
··· 476 476 * 2) PF_KTHREAD is set, in which case the invoker of the task_work is 477 477 * our fallback task_work. 478 478 */ 479 - static inline bool io_should_terminate_tw(void) 479 + static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) 480 480 { 481 - return current->flags & (PF_KTHREAD | PF_EXITING); 481 + return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs); 482 482 } 483 483 484 484 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
+2 -22
io_uring/msg_ring.c
··· 11 11 #include "io_uring.h" 12 12 #include "rsrc.h" 13 13 #include "filetable.h" 14 - #include "alloc_cache.h" 15 14 #include "msg_ring.h" 16 15 17 16 /* All valid masks for MSG_RING */ ··· 75 76 struct io_ring_ctx *ctx = req->ctx; 76 77 77 78 io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags); 78 - if (spin_trylock(&ctx->msg_lock)) { 79 - if (io_alloc_cache_put(&ctx->msg_cache, req)) 80 - req = NULL; 81 - spin_unlock(&ctx->msg_lock); 82 - } 83 - if (req) 84 - kfree_rcu(req, rcu_head); 79 + kfree_rcu(req, rcu_head); 85 80 percpu_ref_put(&ctx->refs); 86 81 } 87 82 ··· 97 104 return 0; 98 105 } 99 106 100 - static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx) 101 - { 102 - struct io_kiocb *req = NULL; 103 - 104 - if (spin_trylock(&ctx->msg_lock)) { 105 - req = io_alloc_cache_get(&ctx->msg_cache); 106 - spin_unlock(&ctx->msg_lock); 107 - if (req) 108 - return req; 109 - } 110 - return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 111 - } 112 - 113 107 static int io_msg_data_remote(struct io_ring_ctx *target_ctx, 114 108 struct io_msg *msg) 115 109 { 116 110 struct io_kiocb *target; 117 111 u32 flags = 0; 118 112 119 - target = io_msg_get_kiocb(target_ctx); 113 + target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ; 120 114 if (unlikely(!target)) 121 115 return -ENOMEM; 122 116
+1 -1
io_uring/notif.c
··· 85 85 return -EEXIST; 86 86 87 87 prev_nd = container_of(prev_uarg, struct io_notif_data, uarg); 88 - prev_notif = cmd_to_io_kiocb(nd); 88 + prev_notif = cmd_to_io_kiocb(prev_nd); 89 89 90 90 /* make sure all noifications can be finished in the same task_work */ 91 91 if (unlikely(notif->ctx != prev_notif->ctx ||
+1 -1
io_uring/poll.c
··· 224 224 { 225 225 int v; 226 226 227 - if (unlikely(io_should_terminate_tw())) 227 + if (unlikely(io_should_terminate_tw(req->ctx))) 228 228 return -ECANCELED; 229 229 230 230 do {
+1 -1
io_uring/timeout.c
··· 324 324 int ret; 325 325 326 326 if (prev) { 327 - if (!io_should_terminate_tw()) { 327 + if (!io_should_terminate_tw(req->ctx)) { 328 328 struct io_cancel_data cd = { 329 329 .ctx = req->ctx, 330 330 .data = prev->cqe.user_data,
+1 -1
io_uring/uring_cmd.c
··· 118 118 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 119 119 unsigned int flags = IO_URING_F_COMPLETE_DEFER; 120 120 121 - if (io_should_terminate_tw()) 121 + if (io_should_terminate_tw(req->ctx)) 122 122 flags |= IO_URING_F_TASK_DEAD; 123 123 124 124 /* task_work executor checks the deffered list completion */
+37 -7
kernel/cgroup/cgroup.c
··· 126 126 * of concurrent destructions. Use a separate workqueue so that cgroup 127 127 * destruction work items don't end up filling up max_active of system_wq 128 128 * which may lead to deadlock. 129 + * 130 + * A cgroup destruction should enqueue work sequentially to: 131 + * cgroup_offline_wq: use for css offline work 132 + * cgroup_release_wq: use for css release work 133 + * cgroup_free_wq: use for free work 134 + * 135 + * Rationale for using separate workqueues: 136 + * The cgroup root free work may depend on completion of other css offline 137 + * operations. If all tasks were enqueued to a single workqueue, this could 138 + * create a deadlock scenario where: 139 + * - Free work waits for other css offline work to complete. 140 + * - But other css offline work is queued after free work in the same queue. 141 + * 142 + * Example deadlock scenario with single workqueue (cgroup_destroy_wq): 143 + * 1. umount net_prio 144 + * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx) 145 + * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx) 146 + * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline. 147 + * 5. net_prio root destruction blocks waiting for perf_event CSS A offline, 148 + * which can never complete as it's behind in the same queue and 149 + * workqueue's max_active is 1. 129 150 */ 130 - static struct workqueue_struct *cgroup_destroy_wq; 151 + static struct workqueue_struct *cgroup_offline_wq; 152 + static struct workqueue_struct *cgroup_release_wq; 153 + static struct workqueue_struct *cgroup_free_wq; 131 154 132 155 /* generate an array of cgroup subsystem pointers */ 133 156 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, ··· 4182 4159 cft->release(of); 4183 4160 put_cgroup_ns(ctx->ns); 4184 4161 kfree(ctx); 4162 + of->priv = NULL; 4185 4163 } 4186 4164 4187 4165 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, ··· 5582 5558 cgroup_unlock(); 5583 5559 5584 5560 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 5585 - queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 5561 + queue_rcu_work(cgroup_free_wq, &css->destroy_rwork); 5586 5562 } 5587 5563 5588 5564 static void css_release(struct percpu_ref *ref) ··· 5591 5567 container_of(ref, struct cgroup_subsys_state, refcnt); 5592 5568 5593 5569 INIT_WORK(&css->destroy_work, css_release_work_fn); 5594 - queue_work(cgroup_destroy_wq, &css->destroy_work); 5570 + queue_work(cgroup_release_wq, &css->destroy_work); 5595 5571 } 5596 5572 5597 5573 static void init_and_link_css(struct cgroup_subsys_state *css, ··· 5725 5701 list_del_rcu(&css->sibling); 5726 5702 err_free_css: 5727 5703 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 5728 - queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 5704 + queue_rcu_work(cgroup_free_wq, &css->destroy_rwork); 5729 5705 return ERR_PTR(err); 5730 5706 } 5731 5707 ··· 5963 5939 5964 5940 if (atomic_dec_and_test(&css->online_cnt)) { 5965 5941 INIT_WORK(&css->destroy_work, css_killed_work_fn); 5966 - queue_work(cgroup_destroy_wq, &css->destroy_work); 5942 + queue_work(cgroup_offline_wq, &css->destroy_work); 5967 5943 } 5968 5944 } 5969 5945 ··· 6349 6325 * We would prefer to do this in cgroup_init() above, but that 6350 6326 * is called before init_workqueues(): so leave this until after. 6351 6327 */ 6352 - cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); 6353 - BUG_ON(!cgroup_destroy_wq); 6328 + cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1); 6329 + BUG_ON(!cgroup_offline_wq); 6330 + 6331 + cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1); 6332 + BUG_ON(!cgroup_release_wq); 6333 + 6334 + cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1); 6335 + BUG_ON(!cgroup_free_wq); 6354 6336 return 0; 6355 6337 } 6356 6338 core_initcall(cgroup_wq_init);
+1 -1
kernel/sched/core.c
··· 9551 9551 #ifdef CONFIG_FAIR_GROUP_SCHED 9552 9552 return scale_load_down(tg->shares); 9553 9553 #else 9554 - return sched_weight_from_cgroup(tg->scx_weight); 9554 + return sched_weight_from_cgroup(tg->scx.weight); 9555 9555 #endif 9556 9556 } 9557 9557
+1 -5
kernel/sched/ext.c
··· 6788 6788 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to 6789 6789 * the current local DSQ for running tasks and thus are not 6790 6790 * visible to the BPF scheduler. 6791 - * 6792 - * Also skip re-enqueueing tasks that can only run on this 6793 - * CPU, as they would just be re-added to the same local 6794 - * DSQ without any benefit. 6795 6791 */ 6796 - if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1) 6792 + if (p->migration_pending) 6797 6793 continue; 6798 6794 6799 6795 dispatch_dequeue(rq, p);
+4
kernel/trace/rv/monitors/sleep/sleep.c
··· 127 127 mon = ltl_get_monitor(current); 128 128 129 129 switch (id) { 130 + #ifdef __NR_clock_nanosleep 130 131 case __NR_clock_nanosleep: 132 + #endif 131 133 #ifdef __NR_clock_nanosleep_time64 132 134 case __NR_clock_nanosleep_time64: 133 135 #endif ··· 140 138 ltl_atom_update(current, LTL_CLOCK_NANOSLEEP, true); 141 139 break; 142 140 141 + #ifdef __NR_futex 143 142 case __NR_futex: 143 + #endif 144 144 #ifdef __NR_futex_time64 145 145 case __NR_futex_time64: 146 146 #endif
+2 -2
kernel/trace/rv/rv.c
··· 495 495 */ 496 496 static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos) 497 497 { 498 - struct rv_monitor *mon = p; 498 + struct rv_monitor *mon = container_of(p, struct rv_monitor, list); 499 499 500 500 (*pos)++; 501 501 ··· 805 805 806 806 retval = create_monitor_dir(monitor, parent); 807 807 if (retval) 808 - return retval; 808 + goto out_unlock; 809 809 810 810 /* keep children close to the parent for easier visualisation */ 811 811 if (parent)
+2
kernel/trace/trace_kprobe.c
··· 908 908 return -EINVAL; 909 909 } 910 910 buf = kmemdup(&argv[0][1], len + 1, GFP_KERNEL); 911 + if (!buf) 912 + return -ENOMEM; 911 913 buf[len] = '\0'; 912 914 ret = kstrtouint(buf, 0, &maxactive); 913 915 if (ret || !maxactive) {
+6 -2
mm/damon/core.c
··· 2479 2479 mutex_lock(&ctx->call_controls_lock); 2480 2480 list_del(&control->list); 2481 2481 mutex_unlock(&ctx->call_controls_lock); 2482 - if (!control->repeat) 2482 + if (!control->repeat) { 2483 2483 complete(&control->completion); 2484 - else 2484 + } else if (control->canceled && control->dealloc_on_cancel) { 2485 + kfree(control); 2486 + continue; 2487 + } else { 2485 2488 list_add(&control->list, &repeat_controls); 2489 + } 2486 2490 } 2487 2491 control = list_first_entry_or_null(&repeat_controls, 2488 2492 struct damon_call_control, list);
+15 -8
mm/damon/sysfs.c
··· 1534 1534 return 0; 1535 1535 } 1536 1536 1537 - static struct damon_call_control damon_sysfs_repeat_call_control = { 1538 - .fn = damon_sysfs_repeat_call_fn, 1539 - .repeat = true, 1540 - }; 1541 - 1542 1537 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) 1543 1538 { 1544 1539 struct damon_ctx *ctx; 1540 + struct damon_call_control *repeat_call_control; 1545 1541 int err; 1546 1542 1547 1543 if (damon_sysfs_kdamond_running(kdamond)) ··· 1550 1554 damon_destroy_ctx(kdamond->damon_ctx); 1551 1555 kdamond->damon_ctx = NULL; 1552 1556 1557 + repeat_call_control = kmalloc(sizeof(*repeat_call_control), 1558 + GFP_KERNEL); 1559 + if (!repeat_call_control) 1560 + return -ENOMEM; 1561 + 1553 1562 ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]); 1554 - if (IS_ERR(ctx)) 1563 + if (IS_ERR(ctx)) { 1564 + kfree(repeat_call_control); 1555 1565 return PTR_ERR(ctx); 1566 + } 1556 1567 err = damon_start(&ctx, 1, false); 1557 1568 if (err) { 1569 + kfree(repeat_call_control); 1558 1570 damon_destroy_ctx(ctx); 1559 1571 return err; 1560 1572 } 1561 1573 kdamond->damon_ctx = ctx; 1562 1574 1563 - damon_sysfs_repeat_call_control.data = kdamond; 1564 - damon_call(ctx, &damon_sysfs_repeat_call_control); 1575 + repeat_call_control->fn = damon_sysfs_repeat_call_fn; 1576 + repeat_call_control->data = kdamond; 1577 + repeat_call_control->repeat = true; 1578 + repeat_call_control->dealloc_on_cancel = true; 1579 + damon_call(ctx, repeat_call_control); 1565 1580 return err; 1566 1581 } 1567 1582
+11 -3
mm/gup.c
··· 2287 2287 struct pages_or_folios *pofs) 2288 2288 { 2289 2289 unsigned long collected = 0; 2290 - bool drain_allow = true; 2291 2290 struct folio *folio; 2291 + int drained = 0; 2292 2292 long i = 0; 2293 2293 2294 2294 for (folio = pofs_get_folio(pofs, i); folio; ··· 2307 2307 continue; 2308 2308 } 2309 2309 2310 - if (!folio_test_lru(folio) && drain_allow) { 2310 + if (drained == 0 && folio_may_be_lru_cached(folio) && 2311 + folio_ref_count(folio) != 2312 + folio_expected_ref_count(folio) + 1) { 2313 + lru_add_drain(); 2314 + drained = 1; 2315 + } 2316 + if (drained == 1 && folio_may_be_lru_cached(folio) && 2317 + folio_ref_count(folio) != 2318 + folio_expected_ref_count(folio) + 1) { 2311 2319 lru_add_drain_all(); 2312 - drain_allow = false; 2320 + drained = 2; 2313 2321 } 2314 2322 2315 2323 if (!folio_isolate_lru(folio))
+3 -3
mm/mlock.c
··· 255 255 256 256 folio_get(folio); 257 257 if (!folio_batch_add(fbatch, mlock_lru(folio)) || 258 - folio_test_large(folio) || lru_cache_disabled()) 258 + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) 259 259 mlock_folio_batch(fbatch); 260 260 local_unlock(&mlock_fbatch.lock); 261 261 } ··· 278 278 279 279 folio_get(folio); 280 280 if (!folio_batch_add(fbatch, mlock_new(folio)) || 281 - folio_test_large(folio) || lru_cache_disabled()) 281 + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) 282 282 mlock_folio_batch(fbatch); 283 283 local_unlock(&mlock_fbatch.lock); 284 284 } ··· 299 299 */ 300 300 folio_get(folio); 301 301 if (!folio_batch_add(fbatch, folio) || 302 - folio_test_large(folio) || lru_cache_disabled()) 302 + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) 303 303 mlock_folio_batch(fbatch); 304 304 local_unlock(&mlock_fbatch.lock); 305 305 }
+26 -24
mm/swap.c
··· 164 164 for (i = 0; i < folio_batch_count(fbatch); i++) { 165 165 struct folio *folio = fbatch->folios[i]; 166 166 167 + /* block memcg migration while the folio moves between lru */ 168 + if (move_fn != lru_add && !folio_test_clear_lru(folio)) 169 + continue; 170 + 167 171 folio_lruvec_relock_irqsave(folio, &lruvec, &flags); 168 172 move_fn(lruvec, folio); 169 173 ··· 180 176 } 181 177 182 178 static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, 183 - struct folio *folio, move_fn_t move_fn, 184 - bool on_lru, bool disable_irq) 179 + struct folio *folio, move_fn_t move_fn, bool disable_irq) 185 180 { 186 181 unsigned long flags; 187 - 188 - if (on_lru && !folio_test_clear_lru(folio)) 189 - return; 190 182 191 183 folio_get(folio); 192 184 ··· 191 191 else 192 192 local_lock(&cpu_fbatches.lock); 193 193 194 - if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || 195 - lru_cache_disabled()) 194 + if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || 195 + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) 196 196 folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); 197 197 198 198 if (disable_irq) ··· 201 201 local_unlock(&cpu_fbatches.lock); 202 202 } 203 203 204 - #define folio_batch_add_and_move(folio, op, on_lru) \ 205 - __folio_batch_add_and_move( \ 206 - &cpu_fbatches.op, \ 207 - folio, \ 208 - op, \ 209 - on_lru, \ 210 - offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \ 204 + #define folio_batch_add_and_move(folio, op) \ 205 + __folio_batch_add_and_move( \ 206 + &cpu_fbatches.op, \ 207 + folio, \ 208 + op, \ 209 + offsetof(struct cpu_fbatches, op) >= \ 210 + offsetof(struct cpu_fbatches, lock_irq) \ 211 211 ) 212 212 213 213 static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) ··· 231 231 void folio_rotate_reclaimable(struct folio *folio) 232 232 { 233 233 if (folio_test_locked(folio) || folio_test_dirty(folio) || 234 - folio_test_unevictable(folio)) 234 + folio_test_unevictable(folio) || !folio_test_lru(folio)) 235 235 return; 236 236 237 - folio_batch_add_and_move(folio, lru_move_tail, true); 237 + folio_batch_add_and_move(folio, lru_move_tail); 238 238 } 239 239 240 240 void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, ··· 328 328 329 329 void folio_activate(struct folio *folio) 330 330 { 331 - if (folio_test_active(folio) || folio_test_unevictable(folio)) 331 + if (folio_test_active(folio) || folio_test_unevictable(folio) || 332 + !folio_test_lru(folio)) 332 333 return; 333 334 334 - folio_batch_add_and_move(folio, lru_activate, true); 335 + folio_batch_add_and_move(folio, lru_activate); 335 336 } 336 337 337 338 #else ··· 508 507 lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) 509 508 folio_set_active(folio); 510 509 511 - folio_batch_add_and_move(folio, lru_add, false); 510 + folio_batch_add_and_move(folio, lru_add); 512 511 } 513 512 EXPORT_SYMBOL(folio_add_lru); 514 513 ··· 686 685 void deactivate_file_folio(struct folio *folio) 687 686 { 688 687 /* Deactivating an unevictable folio will not accelerate reclaim */ 689 - if (folio_test_unevictable(folio)) 688 + if (folio_test_unevictable(folio) || !folio_test_lru(folio)) 690 689 return; 691 690 692 691 if (lru_gen_enabled() && lru_gen_clear_refs(folio)) 693 692 return; 694 693 695 - folio_batch_add_and_move(folio, lru_deactivate_file, true); 694 + folio_batch_add_and_move(folio, lru_deactivate_file); 696 695 } 697 696 698 697 /* ··· 705 704 */ 706 705 void folio_deactivate(struct folio *folio) 707 706 { 708 - if (folio_test_unevictable(folio)) 707 + if (folio_test_unevictable(folio) || !folio_test_lru(folio)) 709 708 return; 710 709 711 710 if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio)) 712 711 return; 713 712 714 - folio_batch_add_and_move(folio, lru_deactivate, true); 713 + folio_batch_add_and_move(folio, lru_deactivate); 715 714 } 716 715 717 716 /** ··· 724 723 void folio_mark_lazyfree(struct folio *folio) 725 724 { 726 725 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || 726 + !folio_test_lru(folio) || 727 727 folio_test_swapcache(folio) || folio_test_unevictable(folio)) 728 728 return; 729 729 730 - folio_batch_add_and_move(folio, lru_lazyfree, true); 730 + folio_batch_add_and_move(folio, lru_lazyfree); 731 731 } 732 732 733 733 void lru_add_drain(void)
+1 -1
mm/vmscan.c
··· 4507 4507 } 4508 4508 4509 4509 /* ineligible */ 4510 - if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { 4510 + if (zone > sc->reclaim_idx) { 4511 4511 gen = folio_inc_gen(lruvec, folio, false); 4512 4512 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4513 4513 return true;
+1 -1
net/core/dev.c
··· 6965 6965 * the kthread. 6966 6966 */ 6967 6967 while (true) { 6968 - if (!test_bit(NAPIF_STATE_SCHED_THREADED, &napi->state)) 6968 + if (!test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) 6969 6969 break; 6970 6970 6971 6971 msleep(20);
+2 -2
net/devlink/rate.c
··· 34 34 static struct devlink_rate * 35 35 devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name) 36 36 { 37 - static struct devlink_rate *devlink_rate; 37 + struct devlink_rate *devlink_rate; 38 38 39 39 list_for_each_entry(devlink_rate, &devlink->rate_list, list) { 40 40 if (devlink_rate_is_node(devlink_rate) && ··· 819 819 */ 820 820 void devl_rate_nodes_destroy(struct devlink *devlink) 821 821 { 822 - static struct devlink_rate *devlink_rate, *tmp; 823 822 const struct devlink_ops *ops = devlink->ops; 823 + struct devlink_rate *devlink_rate, *tmp; 824 824 825 825 devl_assert_locked(devlink); 826 826
+2 -2
net/ethtool/common.c
··· 905 905 int err; 906 906 907 907 if (!ops->get_ts_info) 908 - return -ENODEV; 908 + return -EOPNOTSUPP; 909 909 910 910 /* Does ptp comes from netdev */ 911 911 ethtool_init_tsinfo(info); ··· 973 973 int err; 974 974 975 975 err = ethtool_net_get_ts_info_by_phc(dev, info, hwprov_desc); 976 - if (err == -ENODEV) { 976 + if (err == -ENODEV || err == -EOPNOTSUPP) { 977 977 struct phy_device *phy; 978 978 979 979 phy = ethtool_phy_get_ts_info_by_phc(dev, info, hwprov_desc);
+5
net/ipv4/tcp.c
··· 3327 3327 struct inet_connection_sock *icsk = inet_csk(sk); 3328 3328 struct tcp_sock *tp = tcp_sk(sk); 3329 3329 int old_state = sk->sk_state; 3330 + struct request_sock *req; 3330 3331 u32 seq; 3331 3332 3332 3333 if (old_state != TCP_CLOSE) ··· 3443 3442 3444 3443 3445 3444 /* Clean up fastopen related fields */ 3445 + req = rcu_dereference_protected(tp->fastopen_rsk, 3446 + lockdep_sock_is_held(sk)); 3447 + if (req) 3448 + reqsk_fastopen_remove(sk, req, false); 3446 3449 tcp_free_fastopen_req(tp); 3447 3450 inet_clear_bit(DEFER_CONNECT, sk); 3448 3451 tp->fastopen_client_fail = 0;
+3 -1
net/ipv4/tcp_ao.c
··· 1178 1178 if (!ao) 1179 1179 return; 1180 1180 1181 - WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq); 1181 + /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */ 1182 + if (skb) 1183 + WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq); 1182 1184 ao->rcv_sne = 0; 1183 1185 1184 1186 hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
+3 -3
net/mptcp/options.c
··· 985 985 return false; 986 986 } 987 987 988 - if (mp_opt->deny_join_id0) 989 - WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 990 - 991 988 if (unlikely(!READ_ONCE(msk->pm.server_side))) 992 989 pr_warn_once("bogus mpc option on established client sk"); 993 990 994 991 set_fully_established: 992 + if (mp_opt->deny_join_id0) 993 + WRITE_ONCE(msk->pm.remote_deny_join_id0, true); 994 + 995 995 mptcp_data_lock((struct sock *)msk); 996 996 __mptcp_subflow_fully_established(msk, subflow, mp_opt); 997 997 mptcp_data_unlock((struct sock *)msk);
+7
net/mptcp/pm_netlink.c
··· 408 408 const struct sock *ssk) 409 409 { 410 410 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)); 411 + u16 flags = 0; 411 412 412 413 if (err) 413 414 return err; 414 415 415 416 if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) 417 + return -EMSGSIZE; 418 + 419 + if (READ_ONCE(msk->pm.remote_deny_join_id0)) 420 + flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0; 421 + 422 + if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags)) 416 423 return -EMSGSIZE; 417 424 418 425 return mptcp_event_add_subflow(skb, ssk);
+16
net/mptcp/protocol.c
··· 371 371 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 372 372 } 373 373 374 + static void mptcp_shutdown_subflows(struct mptcp_sock *msk) 375 + { 376 + struct mptcp_subflow_context *subflow; 377 + 378 + mptcp_for_each_subflow(msk, subflow) { 379 + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 380 + bool slow; 381 + 382 + slow = lock_sock_fast(ssk); 383 + tcp_shutdown(ssk, SEND_SHUTDOWN); 384 + unlock_sock_fast(ssk, slow); 385 + } 386 + } 387 + 374 388 /* called under the msk socket lock */ 375 389 static bool mptcp_pending_data_fin_ack(struct sock *sk) 376 390 { ··· 409 395 break; 410 396 case TCP_CLOSING: 411 397 case TCP_LAST_ACK: 398 + mptcp_shutdown_subflows(msk); 412 399 mptcp_set_state(sk, TCP_CLOSE); 413 400 break; 414 401 } ··· 578 563 mptcp_set_state(sk, TCP_CLOSING); 579 564 break; 580 565 case TCP_FIN_WAIT2: 566 + mptcp_shutdown_subflows(msk); 581 567 mptcp_set_state(sk, TCP_CLOSE); 582 568 break; 583 569 default:
+4
net/mptcp/subflow.c
··· 883 883 884 884 ctx->subflow_id = 1; 885 885 owner = mptcp_sk(ctx->conn); 886 + 887 + if (mp_opt.deny_join_id0) 888 + WRITE_ONCE(owner->pm.remote_deny_join_id0, true); 889 + 886 890 mptcp_pm_new_connection(owner, child, 1); 887 891 888 892 /* with OoO packets we can reach here without ingress
+12 -8
net/rds/ib_frmr.c
··· 133 133 134 134 ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, 135 135 &off, PAGE_SIZE); 136 - if (unlikely(ret != ibmr->sg_dma_len)) 137 - return ret < 0 ? ret : -EINVAL; 136 + if (unlikely(ret != ibmr->sg_dma_len)) { 137 + ret = ret < 0 ? ret : -EINVAL; 138 + goto out_inc; 139 + } 138 140 139 - if (cmpxchg(&frmr->fr_state, 140 - FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) 141 - return -EBUSY; 141 + if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) { 142 + ret = -EBUSY; 143 + goto out_inc; 144 + } 142 145 143 146 atomic_inc(&ibmr->ic->i_fastreg_inuse_count); 144 147 ··· 169 166 /* Failure here can be because of -ENOMEM as well */ 170 167 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE); 171 168 172 - atomic_inc(&ibmr->ic->i_fastreg_wrs); 173 169 if (printk_ratelimit()) 174 170 pr_warn("RDS/IB: %s returned error(%d)\n", 175 171 __func__, ret); 176 - goto out; 172 + goto out_inc; 177 173 } 178 174 179 175 /* Wait for the registration to complete in order to prevent an invalid ··· 181 179 */ 182 180 wait_event(frmr->fr_reg_done, !frmr->fr_reg); 183 181 184 - out: 182 + return ret; 185 183 184 + out_inc: 185 + atomic_inc(&ibmr->ic->i_fastreg_wrs); 186 186 return ret; 187 187 } 188 188
+2 -2
net/rfkill/rfkill-gpio.c
··· 94 94 static int rfkill_gpio_probe(struct platform_device *pdev) 95 95 { 96 96 struct rfkill_gpio_data *rfkill; 97 - struct gpio_desc *gpio; 97 + const char *type_name = NULL; 98 98 const char *name_property; 99 99 const char *type_property; 100 - const char *type_name; 100 + struct gpio_desc *gpio; 101 101 int ret; 102 102 103 103 if (dmi_check_system(rfkill_gpio_deny_table))
+10 -8
net/rxrpc/rxgk.c
··· 475 475 struct krb5_buffer metadata; 476 476 unsigned int offset = sp->offset, len = sp->len; 477 477 size_t data_offset = 0, data_len = len; 478 - u32 ac; 478 + u32 ac = 0; 479 479 int ret = -ENOMEM; 480 480 481 481 _enter(""); ··· 499 499 ret = rxgk_verify_mic_skb(gk->krb5, gk->rx_Kc, &metadata, 500 500 skb, &offset, &len, &ac); 501 501 kfree(hdr); 502 - if (ret == -EPROTO) { 503 - rxrpc_abort_eproto(call, skb, ac, 504 - rxgk_abort_1_verify_mic_eproto); 502 + if (ret < 0) { 503 + if (ret != -ENOMEM) 504 + rxrpc_abort_eproto(call, skb, ac, 505 + rxgk_abort_1_verify_mic_eproto); 505 506 } else { 506 507 sp->offset = offset; 507 508 sp->len = len; ··· 525 524 struct rxgk_header hdr; 526 525 unsigned int offset = sp->offset, len = sp->len; 527 526 int ret; 528 - u32 ac; 527 + u32 ac = 0; 529 528 530 529 _enter(""); 531 530 532 531 ret = rxgk_decrypt_skb(gk->krb5, gk->rx_enc, skb, &offset, &len, &ac); 533 - if (ret == -EPROTO) 534 - rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto); 535 - if (ret < 0) 532 + if (ret < 0) { 533 + if (ret != -ENOMEM) 534 + rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto); 536 535 goto error; 536 + } 537 537 538 538 if (len < sizeof(hdr)) { 539 539 ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
+20 -9
net/rxrpc/rxgk_app.c
··· 54 54 55 55 _enter(""); 56 56 57 + if (ticket_len < 10 * sizeof(__be32)) 58 + return rxrpc_abort_conn(conn, skb, RXGK_INCONSISTENCY, -EPROTO, 59 + rxgk_abort_resp_short_yfs_tkt); 60 + 57 61 /* Get the session key length */ 58 62 ret = skb_copy_bits(skb, ticket_offset, tmp, sizeof(tmp)); 59 63 if (ret < 0) ··· 191 187 struct key *server_key; 192 188 unsigned int ticket_offset, ticket_len; 193 189 u32 kvno, enctype; 194 - int ret, ec; 190 + int ret, ec = 0; 195 191 196 192 struct { 197 193 __be32 kvno; ··· 199 195 __be32 token_len; 200 196 } container; 201 197 198 + if (token_len < sizeof(container)) 199 + goto short_packet; 200 + 202 201 /* Decode the RXGK_TokenContainer object. This tells us which server 203 202 * key we should be using. We can then fetch the key, get the secret 204 203 * and set up the crypto to extract the token. 205 204 */ 206 205 if (skb_copy_bits(skb, token_offset, &container, sizeof(container)) < 0) 207 - return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO, 208 - rxgk_abort_resp_tok_short); 206 + goto short_packet; 209 207 210 208 kvno = ntohl(container.kvno); 211 209 enctype = ntohl(container.enctype); 212 210 ticket_len = ntohl(container.token_len); 213 211 ticket_offset = token_offset + sizeof(container); 214 212 215 - if (xdr_round_up(ticket_len) > token_len - 3 * 4) 216 - return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO, 217 - rxgk_abort_resp_tok_short); 213 + if (xdr_round_up(ticket_len) > token_len - sizeof(container)) 214 + goto short_packet; 218 215 219 216 _debug("KVNO %u", kvno); 220 217 _debug("ENC %u", enctype); ··· 241 236 &ticket_offset, &ticket_len, &ec); 242 237 crypto_free_aead(token_enc); 243 238 token_enc = NULL; 244 - if (ret < 0) 245 - return rxrpc_abort_conn(conn, skb, ec, ret, 246 - rxgk_abort_resp_tok_dec); 239 + if (ret < 0) { 240 + if (ret != -ENOMEM) 241 + return rxrpc_abort_conn(conn, skb, ec, ret, 242 + rxgk_abort_resp_tok_dec); 243 + } 247 244 248 245 ret = conn->security->default_decode_ticket(conn, skb, ticket_offset, 249 246 ticket_len, _key); ··· 290 283 * also come out this way if the ticket decryption fails. 291 284 */ 292 285 return ret; 286 + 287 + short_packet: 288 + return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO, 289 + rxgk_abort_resp_tok_short); 293 290 }
+12 -2
net/rxrpc/rxgk_common.h
··· 88 88 *_offset += offset; 89 89 *_len = len; 90 90 break; 91 + case -EBADMSG: /* Checksum mismatch. */ 91 92 case -EPROTO: 92 - case -EBADMSG: 93 93 *_error_code = RXGK_SEALEDINCON; 94 94 break; 95 + case -EMSGSIZE: 96 + *_error_code = RXGK_PACKETSHORT; 97 + break; 98 + case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */ 95 99 default: 100 + *_error_code = RXGK_INCONSISTENCY; 96 101 break; 97 102 } 98 103 ··· 132 127 *_offset += offset; 133 128 *_len = len; 134 129 break; 130 + case -EBADMSG: /* Checksum mismatch */ 135 131 case -EPROTO: 136 - case -EBADMSG: 137 132 *_error_code = RXGK_SEALEDINCON; 138 133 break; 134 + case -EMSGSIZE: 135 + *_error_code = RXGK_PACKETSHORT; 136 + break; 137 + case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */ 139 138 default: 139 + *_error_code = RXGK_INCONSISTENCY; 140 140 break; 141 141 } 142 142
+1
net/tls/tls.h
··· 141 141 142 142 int wait_on_pending_writer(struct sock *sk, long *timeo); 143 143 void tls_err_abort(struct sock *sk, int err); 144 + void tls_strp_abort_strp(struct tls_strparser *strp, int err); 144 145 145 146 int init_prot_info(struct tls_prot_info *prot, 146 147 const struct tls_crypto_info *crypto_info,
+9 -5
net/tls/tls_strp.c
··· 13 13 14 14 static struct workqueue_struct *tls_strp_wq; 15 15 16 - static void tls_strp_abort_strp(struct tls_strparser *strp, int err) 16 + void tls_strp_abort_strp(struct tls_strparser *strp, int err) 17 17 { 18 18 if (strp->stopped) 19 19 return; ··· 211 211 struct sk_buff *in_skb, unsigned int offset, 212 212 size_t in_len) 213 213 { 214 + unsigned int nfrag = skb->len / PAGE_SIZE; 214 215 size_t len, chunk; 215 216 skb_frag_t *frag; 216 217 int sz; 217 218 218 - frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; 219 + if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) { 220 + DEBUG_NET_WARN_ON_ONCE(1); 221 + return -EMSGSIZE; 222 + } 223 + 224 + frag = &skb_shinfo(skb)->frags[nfrag]; 219 225 220 226 len = in_len; 221 227 /* First make sure we got the header */ ··· 526 520 tls_strp_load_anchor_with_queue(strp, inq); 527 521 if (!strp->stm.full_len) { 528 522 sz = tls_rx_msg_size(strp, strp->anchor); 529 - if (sz < 0) { 530 - tls_strp_abort_strp(strp, sz); 523 + if (sz < 0) 531 524 return sz; 532 - } 533 525 534 526 strp->stm.full_len = sz; 535 527
+1 -2
net/tls/tls_sw.c
··· 2474 2474 return data_len + TLS_HEADER_SIZE; 2475 2475 2476 2476 read_failure: 2477 - tls_err_abort(strp->sk, ret); 2478 - 2477 + tls_strp_abort_strp(strp, ret); 2479 2478 return ret; 2480 2479 } 2481 2480
+3
samples/damon/mtier.c
··· 208 208 if (enabled == is_enabled) 209 209 return 0; 210 210 211 + if (!init_called) 212 + return 0; 213 + 211 214 if (enabled) { 212 215 err = damon_sample_mtier_start(); 213 216 if (err)
+3
samples/damon/prcl.c
··· 137 137 if (enabled == is_enabled) 138 138 return 0; 139 139 140 + if (!init_called) 141 + return 0; 142 + 140 143 if (enabled) { 141 144 err = damon_sample_prcl_start(); 142 145 if (err)
+3
samples/damon/wsse.c
··· 118 118 return 0; 119 119 120 120 if (enabled) { 121 + if (!init_called) 122 + return 0; 123 + 121 124 err = damon_sample_wsse_start(); 122 125 if (err) 123 126 enabled = false;
+14
sound/soc/codecs/Kconfig
··· 268 268 imply SND_SOC_TAS2770 269 269 imply SND_SOC_TAS2780 270 270 imply SND_SOC_TAS2781_I2C 271 + imply SND_SOC_TAS2783_SDW 271 272 imply SND_SOC_TAS5086 272 273 imply SND_SOC_TAS571X 273 274 imply SND_SOC_TAS5720 ··· 2097 2096 Note the TAS2781 driver implements a flexible and configurable 2098 2097 algo coefficient setting, for one, two or even multiple TAS2781 2099 2098 chips. 2099 + 2100 + config SND_SOC_TAS2783_SDW 2101 + tristate "Texas Instruments TAS2783 speaker amplifier (sdw)" 2102 + depends on SOUNDWIRE 2103 + depends on EFI 2104 + select REGMAP_SOUNDWIRE 2105 + select REGMAP_SOUNDWIRE_MBQ 2106 + select CRC32 2107 + help 2108 + Enable support for Texas Instruments TAS2783A Digital input 2109 + mono Class-D and DSP-inside audio power amplifiers. TAS2783 2110 + driver implements a flexible and configurable algorithm 2111 + cofficient setting, for one, two or multiple TAS2783 chips. 2100 2112 2101 2113 config SND_SOC_TAS5086 2102 2114 tristate "Texas Instruments TAS5086 speaker amplifier"
+2
sound/soc/codecs/Makefile
··· 319 319 snd-soc-tas2781-comlib-i2c-y := tas2781-comlib-i2c.o 320 320 snd-soc-tas2781-fmwlib-y := tas2781-fmwlib.o 321 321 snd-soc-tas2781-i2c-y := tas2781-i2c.o 322 + snd-soc-tas2783-sdw-y := tas2783-sdw.o 322 323 snd-soc-tfa9879-y := tfa9879.o 323 324 snd-soc-tfa989x-y := tfa989x.o 324 325 snd-soc-tlv320adc3xxx-y := tlv320adc3xxx.o ··· 744 743 obj-$(CONFIG_SND_SOC_TAS2781_COMLIB_I2C) += snd-soc-tas2781-comlib-i2c.o 745 744 obj-$(CONFIG_SND_SOC_TAS2781_FMWLIB) += snd-soc-tas2781-fmwlib.o 746 745 obj-$(CONFIG_SND_SOC_TAS2781_I2C) += snd-soc-tas2781-i2c.o 746 + obj-$(CONFIG_SND_SOC_TAS2783_SDW) += snd-soc-tas2783-sdw.o 747 747 obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o 748 748 obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o 749 749 obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
+43 -32
sound/soc/codecs/cs35l41.c
··· 7 7 // Author: David Rhodes <david.rhodes@cirrus.com> 8 8 9 9 #include <linux/acpi.h> 10 + #include <acpi/acpi_bus.h> 10 11 #include <linux/delay.h> 11 12 #include <linux/err.h> 12 13 #include <linux/init.h> ··· 1148 1147 return ret; 1149 1148 } 1150 1149 1151 - #ifdef CONFIG_ACPI 1152 - static int cs35l41_acpi_get_name(struct cs35l41_private *cs35l41) 1150 + static int cs35l41_get_system_name(struct cs35l41_private *cs35l41) 1153 1151 { 1154 1152 struct acpi_device *adev = ACPI_COMPANION(cs35l41->dev); 1155 - acpi_handle handle = acpi_device_handle(adev); 1156 - const char *hid; 1157 - const char *sub; 1153 + const char *sub = NULL; 1154 + const char *tmp; 1155 + int ret = 0; 1158 1156 1159 - /* If there is no acpi_device, there is no ACPI for this system, return 0 */ 1160 - if (!adev) 1161 - return 0; 1157 + /* If there is no acpi_device, there is no ACPI for this system, skip checking ACPI */ 1158 + if (adev) { 1159 + acpi_handle handle = acpi_device_handle(adev); 1162 1160 1163 - sub = acpi_get_subsystem_id(handle); 1164 - if (IS_ERR(sub)) { 1165 - /* If no _SUB, fallback to _HID, otherwise fail */ 1166 - if (PTR_ERR(sub) == -ENODATA) { 1167 - hid = acpi_device_hid(adev); 1168 - /* If dummy hid, return 0 and fallback to legacy firmware path */ 1169 - if (!strcmp(hid, "device")) 1170 - return 0; 1171 - sub = kstrdup(hid, GFP_KERNEL); 1172 - if (!sub) 1173 - sub = ERR_PTR(-ENOMEM); 1174 - 1175 - } else 1176 - return PTR_ERR(sub); 1161 + sub = acpi_get_subsystem_id(handle); 1162 + ret = PTR_ERR_OR_ZERO(sub); 1163 + if (ret) { 1164 + sub = NULL; 1165 + /* If no _SUB, fallback to _HID, otherwise fail */ 1166 + if (ret == -ENODATA) { 1167 + tmp = acpi_device_hid(adev); 1168 + /* If dummy hid, return 0 and fallback to legacy firmware path */ 1169 + if (!strcmp(tmp, "device")) { 1170 + ret = 0; 1171 + goto err; 1172 + } 1173 + sub = kstrdup(tmp, GFP_KERNEL); 1174 + if (!sub) { 1175 + ret = -ENOMEM; 1176 + goto err; 1177 + } 1178 + } 1179 + } 1180 + } else { 1181 + if (!device_property_read_string(cs35l41->dev, "cirrus,subsystem-id", &tmp)) { 1182 + sub = kstrdup(tmp, GFP_KERNEL); 1183 + if (!sub) { 1184 + ret = -ENOMEM; 1185 + goto err; 1186 + } 1187 + } 1177 1188 } 1178 1189 1179 - cs35l41->dsp.system_name = sub; 1180 - dev_dbg(cs35l41->dev, "Subsystem ID: %s\n", cs35l41->dsp.system_name); 1190 + err: 1191 + if (sub) { 1192 + cs35l41->dsp.system_name = sub; 1193 + dev_info(cs35l41->dev, "Subsystem ID: %s\n", cs35l41->dsp.system_name); 1194 + } else 1195 + dev_warn(cs35l41->dev, "Subsystem ID not found\n"); 1181 1196 1182 - return 0; 1197 + return ret; 1183 1198 } 1184 - #else 1185 - static int cs35l41_acpi_get_name(struct cs35l41_private *cs35l41) 1186 - { 1187 - return 0; 1188 - } 1189 - #endif /* CONFIG_ACPI */ 1190 1199 1191 1200 int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *hw_cfg) 1192 1201 { ··· 1328 1317 goto err; 1329 1318 } 1330 1319 1331 - ret = cs35l41_acpi_get_name(cs35l41); 1320 + ret = cs35l41_get_system_name(cs35l41); 1332 1321 if (ret < 0) 1333 1322 goto err; 1334 1323
+1331
sound/soc/codecs/tas2783-sdw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // 3 + // ALSA SoC Texas Instruments TAS2783 Audio Smart Amplifier 4 + // 5 + // Copyright (C) 2025 Texas Instruments Incorporated 6 + // https://www.ti.com 7 + // 8 + // The TAS2783 driver implements a flexible and configurable 9 + // algo coefficient setting for single TAS2783 chips. 10 + // 11 + // Author: Niranjan H Y <niranjanhy@ti.com> 12 + // Author: Baojun Xu <baojun.xu@ti.com> 13 + // Author: Kevin Lu <kevin-lu@ti.com> 14 + 15 + #include <linux/unaligned.h> 16 + #include <linux/crc32.h> 17 + #include <linux/efi.h> 18 + #include <linux/err.h> 19 + #include <linux/firmware.h> 20 + #include <linux/init.h> 21 + #include <linux/module.h> 22 + #include <sound/pcm_params.h> 23 + #include <linux/pm.h> 24 + #include <linux/pm_runtime.h> 25 + #include <linux/regmap.h> 26 + #include <linux/wait.h> 27 + #include <linux/soundwire/sdw.h> 28 + #include <linux/soundwire/sdw_registers.h> 29 + #include <linux/soundwire/sdw_type.h> 30 + #include <sound/sdw.h> 31 + #include <sound/soc.h> 32 + #include <sound/tlv.h> 33 + #include <sound/tas2781-tlv.h> 34 + 35 + #include "tas2783.h" 36 + 37 + #define TIMEOUT_FW_DL_MS (3000) 38 + #define FW_DL_OFFSET 36 39 + #define FW_FL_HDR 12 40 + #define TAS2783_PROBE_TIMEOUT 5000 41 + #define TAS2783_CALI_GUID EFI_GUID(0x1f52d2a1, 0xbb3a, 0x457d, 0xbc, \ 42 + 0x09, 0x43, 0xa3, 0xf4, 0x31, 0x0a, 0x92) 43 + 44 + static const u32 tas2783_cali_reg[] = { 45 + TAS2783_CAL_R0, 46 + TAS2783_CAL_INVR0, 47 + TAS2783_CAL_R0LOW, 48 + TAS2783_CAL_POWER, 49 + TAS2783_CAL_TLIM, 50 + }; 51 + 52 + struct bin_header_t { 53 + u16 vendor_id; 54 + u16 version; 55 + u32 file_id; 56 + u32 length; 57 + }; 58 + 59 + struct calibration_data { 60 + u32 is_valid; 61 + unsigned long read_sz; 62 + u8 data[TAS2783_CALIB_DATA_SZ]; 63 + }; 64 + 65 + struct tas2783_prv { 66 + struct snd_soc_component *component; 67 + struct calibration_data cali_data; 68 + struct sdw_slave *sdw_peripheral; 69 + enum sdw_slave_status status; 70 + /* calibration */ 71 + struct mutex calib_lock; 72 + /* pde and firmware download */ 73 + struct mutex pde_lock; 74 + struct regmap *regmap; 75 + struct device *dev; 76 + struct class *class; 77 + struct attribute_group *cal_attr_groups; 78 + struct tm tm; 79 + u8 rca_binaryname[64]; 80 + u8 dev_name[32]; 81 + bool hw_init; 82 + /* wq for firmware download */ 83 + wait_queue_head_t fw_wait; 84 + bool fw_dl_task_done; 85 + bool fw_dl_success; 86 + }; 87 + 88 + static const struct reg_default tas2783_reg_default[] = { 89 + {TAS2783_AMP_LEVEL, 0x28}, 90 + {TASDEV_REG_SDW(0, 0, 0x03), 0x28}, 91 + {TASDEV_REG_SDW(0, 0, 0x04), 0x21}, 92 + {TASDEV_REG_SDW(0, 0, 0x05), 0x41}, 93 + {TASDEV_REG_SDW(0, 0, 0x06), 0x00}, 94 + {TASDEV_REG_SDW(0, 0, 0x07), 0x20}, 95 + {TASDEV_REG_SDW(0, 0, 0x08), 0x09}, 96 + {TASDEV_REG_SDW(0, 0, 0x09), 0x02}, 97 + {TASDEV_REG_SDW(0, 0, 0x0a), 0x0a}, 98 + {TASDEV_REG_SDW(0, 0, 0x0c), 0x10}, 99 + {TASDEV_REG_SDW(0, 0, 0x0d), 0x13}, 100 + {TASDEV_REG_SDW(0, 0, 0x0e), 0xc2}, 101 + {TASDEV_REG_SDW(0, 0, 0x0f), 0x40}, 102 + {TASDEV_REG_SDW(0, 0, 0x10), 0x04}, 103 + {TASDEV_REG_SDW(0, 0, 0x13), 0x13}, 104 + {TASDEV_REG_SDW(0, 0, 0x14), 0x12}, 105 + {TASDEV_REG_SDW(0, 0, 0x15), 0x00}, 106 + {TASDEV_REG_SDW(0, 0, 0x16), 0x12}, 107 + {TASDEV_REG_SDW(0, 0, 0x17), 0x80}, 108 + {TAS2783_DVC_LVL, 0x00}, 109 + {TASDEV_REG_SDW(0, 0, 0x1b), 0x61}, 110 + {TASDEV_REG_SDW(0, 0, 0x1c), 0x36}, 111 + {TASDEV_REG_SDW(0, 0, 0x1d), 0x00}, 112 + {TASDEV_REG_SDW(0, 0, 0x1f), 0x01}, 113 + {TASDEV_REG_SDW(0, 0, 0x20), 0x2e}, 114 + {TASDEV_REG_SDW(0, 0, 0x21), 0x00}, 115 + {TASDEV_REG_SDW(0, 0, 0x34), 0x06}, 116 + {TASDEV_REG_SDW(0, 0, 0x35), 0xbd}, 117 + {TASDEV_REG_SDW(0, 0, 0x36), 0xad}, 118 + {TASDEV_REG_SDW(0, 0, 0x37), 0xa8}, 119 + {TASDEV_REG_SDW(0, 0, 0x38), 0x00}, 120 + {TASDEV_REG_SDW(0, 0, 0x3b), 0xfc}, 121 + {TASDEV_REG_SDW(0, 0, 0x3d), 0xdd}, 122 + {TASDEV_REG_SDW(0, 0, 0x40), 0xf6}, 123 + {TASDEV_REG_SDW(0, 0, 0x41), 0x14}, 124 + {TASDEV_REG_SDW(0, 0, 0x5c), 0x19}, 125 + {TASDEV_REG_SDW(0, 0, 0x5d), 0x80}, 126 + {TASDEV_REG_SDW(0, 0, 0x63), 0x48}, 127 + {TASDEV_REG_SDW(0, 0, 0x65), 0x08}, 128 + {TASDEV_REG_SDW(0, 0, 0x66), 0xb2}, 129 + {TASDEV_REG_SDW(0, 0, 0x67), 0x00}, 130 + {TASDEV_REG_SDW(0, 0, 0x6a), 0x12}, 131 + {TASDEV_REG_SDW(0, 0, 0x6b), 0xfb}, 132 + {TASDEV_REG_SDW(0, 0, 0x6c), 0x00}, 133 + {TASDEV_REG_SDW(0, 0, 0x6d), 0x00}, 134 + {TASDEV_REG_SDW(0, 0, 0x6e), 0x1a}, 135 + {TASDEV_REG_SDW(0, 0, 0x6f), 0x00}, 136 + {TASDEV_REG_SDW(0, 0, 0x70), 0x96}, 137 + {TASDEV_REG_SDW(0, 0, 0x71), 0x02}, 138 + {TASDEV_REG_SDW(0, 0, 0x73), 0x08}, 139 + {TASDEV_REG_SDW(0, 0, 0x75), 0xe0}, 140 + {TASDEV_REG_SDW(0, 0, 0x7a), 0x60}, 141 + {TASDEV_REG_SDW(0, 0, 0x60), 0x21}, 142 + {TASDEV_REG_SDW(0, 1, 0x02), 0x00}, 143 + {TASDEV_REG_SDW(0, 1, 0x17), 0xc0}, 144 + {TASDEV_REG_SDW(0, 1, 0x19), 0x60}, 145 + {TASDEV_REG_SDW(0, 1, 0x35), 0x75}, 146 + {TASDEV_REG_SDW(0, 1, 0x3d), 0x00}, 147 + {TASDEV_REG_SDW(0, 1, 0x3e), 0x00}, 148 + {TASDEV_REG_SDW(0, 1, 0x3f), 0x00}, 149 + {TASDEV_REG_SDW(0, 1, 0x40), 0x00}, 150 + {TASDEV_REG_SDW(0, 1, 0x41), 0x00}, 151 + {TASDEV_REG_SDW(0, 1, 0x42), 0x00}, 152 + {TASDEV_REG_SDW(0, 1, 0x43), 0x00}, 153 + {TASDEV_REG_SDW(0, 1, 0x44), 0x00}, 154 + {TASDEV_REG_SDW(0, 1, 0x45), 0x00}, 155 + {TASDEV_REG_SDW(0, 1, 0x47), 0xab}, 156 + {TASDEV_REG_SDW(0, 0xfd, 0x0d), 0x0d}, 157 + {TASDEV_REG_SDW(0, 0xfd, 0x39), 0x00}, 158 + {TASDEV_REG_SDW(0, 0xfd, 0x3e), 0x00}, 159 + {TASDEV_REG_SDW(0, 0xfd, 0x45), 0x00}, 160 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS21, 0x02, 0), 0x0}, 161 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS21, 0x10, 0), 0x0}, 162 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS24, 0x02, 0), 0x0}, 163 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS24, 0x10, 0), 0x0}, 164 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS26, 0x02, 0), 0x0}, 165 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS26, 0x10, 0), 0x0}, 166 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS28, 0x02, 0), 0x0}, 167 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS28, 0x10, 0), 0x0}, 168 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS127, 0x02, 0), 0x0}, 169 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS127, 0x10, 0), 0x0}, 170 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU21, 0x01, 1), 0x1}, 171 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU21, 0x02, 1), 0x9c00}, 172 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x01, 0), 0x1}, 173 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x01, 1), 0x1}, 174 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x0b, 1), 0x0}, 175 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x10, 0), 0x0}, 176 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x01, 1), 0x1}, 177 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x01, 0), 0x1}, 178 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x0b, 1), 0x0}, 179 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x10, 0), 0x0}, 180 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x01, 0), 0x1}, 181 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x01, 1), 0x1}, 182 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x01, 2), 0x1}, 183 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x0b, 0), 0x0}, 184 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x0b, 1), 0x0}, 185 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x0b, 2), 0x0}, 186 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x10, 0), 0x0}, 187 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x04, 0), 0x0}, 188 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x08, 0), 0x0}, 189 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x10, 0), 0x0}, 190 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x11, 0), 0x0}, 191 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x04, 0), 0x0}, 192 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x08, 0), 0x0}, 193 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x10, 0), 0x0}, 194 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x11, 0), 0x0}, 195 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x04, 0), 0x0}, 196 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x08, 0), 0x0}, 197 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x10, 0), 0x0}, 198 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x11, 0), 0x0}, 199 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x04, 0), 0x0}, 200 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x08, 0), 0x0}, 201 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x10, 0), 0x0}, 202 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x11, 0), 0x0}, 203 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x01, 0), 0x0}, 204 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x04, 0), 0x0}, 205 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x05, 0), 0x1}, 206 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x08, 0), 0x0}, 207 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x10, 0), 0x0}, 208 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x11, 0), 0x0}, 209 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x12, 0), 0x0}, 210 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x01, 0), 0x0}, 211 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x04, 0), 0x0}, 212 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x05, 0), 0x1}, 213 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x08, 0), 0x0}, 214 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x10, 0), 0x0}, 215 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x11, 0), 0x0}, 216 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x12, 0), 0x0}, 217 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 0), 0x0}, 218 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 1), 0x0}, 219 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 2), 0x0}, 220 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 3), 0x0}, 221 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 4), 0x0}, 222 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 5), 0x0}, 223 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 6), 0x0}, 224 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 7), 0x0}, 225 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x06, 0), 0x0}, 226 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT23, 0x04, 0), 0x0}, 227 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT23, 0x08, 0), 0x0}, 228 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT24, 0x04, 0), 0x0}, 229 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT24, 0x08, 0), 0x0}, 230 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT24, 0x11, 0), 0x0}, 231 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT25, 0x04, 0), 0x0}, 232 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT25, 0x08, 0), 0x0}, 233 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT25, 0x11, 0), 0x0}, 234 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT28, 0x04, 0), 0x0}, 235 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT28, 0x08, 0), 0x0}, 236 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT28, 0x11, 0), 0x0}, 237 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x04, 0), 0x0}, 238 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x08, 0), 0x0}, 239 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x11, 0), 0x0}, 240 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0), 0x0}, 241 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 1), 0x0}, 242 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 2), 0x0}, 243 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 3), 0x0}, 244 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 4), 0x0}, 245 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 5), 0x0}, 246 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 6), 0x0}, 247 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 7), 0x0}, 248 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 8), 0x0}, 249 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 9), 0x0}, 250 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xa), 0x0}, 251 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xb), 0x0}, 252 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xc), 0x0}, 253 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xd), 0x0}, 254 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xe), 0x0}, 255 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xf), 0x0}, 256 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PDE23, 0x1, 0), 0x3}, 257 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PDE23, 0x10, 0), 0x3}, 258 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x06, 0), 0x0}, 259 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x10, 0), 0x0}, 260 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x11, 0), 0x0}, 261 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x12, 0), 0x0}, 262 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x13, 0), 0x0}, 263 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x06, 0), 0x0}, 264 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x10, 0), 0x0}, 265 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x11, 0), 0x0}, 266 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x12, 0), 0x0}, 267 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x13, 0), 0x0}, 268 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x05, 0), 0x0}, 269 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x10, 0), 0x1}, 270 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x11, 0), 0x0}, 271 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x12, 0), 0x0}, 272 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_TG23, 0x10, 0), 0x0}, 273 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x01, 0), 0x1}, 274 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x06, 0), 0x0}, 275 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x07, 0), 0x0}, 276 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x08, 0), 0x0}, 277 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x09, 0), 0x0}, 278 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x0a, 0), 0x0}, 279 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x10, 0), 0x1}, 280 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x12, 0), 0x0}, 281 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x13, 0), 0x0}, 282 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x14, 0), 0x0}, 283 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x15, 0), 0x0}, 284 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x16, 0), 0x0}, 285 + {SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_UDMPU23, 0x10, 0), 0x0}, 286 + }; 287 + 288 + static const struct reg_sequence tas2783_init_seq[] = { 289 + REG_SEQ0(SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x10, 0x00), 0x04), 290 + REG_SEQ0(0x00800418, 0x00), 291 + REG_SEQ0(0x00800419, 0x00), 292 + REG_SEQ0(0x0080041a, 0x00), 293 + REG_SEQ0(0x0080041b, 0x00), 294 + REG_SEQ0(0x00800428, 0x40), 295 + REG_SEQ0(0x00800429, 0x00), 296 + REG_SEQ0(0x0080042a, 0x00), 297 + REG_SEQ0(0x0080042b, 0x00), 298 + REG_SEQ0(SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x1, 0x00), 0x00), 299 + REG_SEQ0(0x0080005c, 0xD9), 300 + REG_SEQ0(0x00800082, 0x20), 301 + REG_SEQ0(0x008000a1, 0x00), 302 + REG_SEQ0(0x00800097, 0xc8), 303 + REG_SEQ0(0x00800099, 0x20), 304 + REG_SEQ0(0x008000c7, 0xaa), 305 + REG_SEQ0(0x008000b5, 0x74), 306 + REG_SEQ0(0x00800082, 0x20), 307 + REG_SEQ0(0x00807e8d, 0x0d), 308 + REG_SEQ0(0x00807eb9, 0x53), 309 + REG_SEQ0(0x00807ebe, 0x42), 310 + REG_SEQ0(0x00807ec5, 0x37), 311 + REG_SEQ0(0x00800066, 0x92), 312 + REG_SEQ0(0x00800003, 0x28), 313 + REG_SEQ0(0x00800004, 0x21), 314 + REG_SEQ0(0x00800005, 0x41), 315 + REG_SEQ0(0x00800006, 0x00), 316 + REG_SEQ0(0x00800007, 0x20), 317 + REG_SEQ0(0x0080000c, 0x10), 318 + REG_SEQ0(0x00800013, 0x08), 319 + REG_SEQ0(0x00800015, 0x00), 320 + REG_SEQ0(0x00800017, 0x80), 321 + REG_SEQ0(0x0080001a, 0x00), 322 + REG_SEQ0(0x0080001b, 0x22), 323 + REG_SEQ0(0x0080001c, 0x36), 324 + REG_SEQ0(0x0080001d, 0x01), 325 + REG_SEQ0(0x0080001f, 0x00), 326 + REG_SEQ0(0x00800020, 0x2e), 327 + REG_SEQ0(0x00800034, 0x06), 328 + REG_SEQ0(0x00800035, 0xb9), 329 + REG_SEQ0(0x00800036, 0xad), 330 + REG_SEQ0(0x00800037, 0xa8), 331 + REG_SEQ0(0x00800038, 0x00), 332 + REG_SEQ0(0x0080003b, 0xfc), 333 + REG_SEQ0(0x0080003d, 0xdd), 334 + REG_SEQ0(0x00800040, 0xf6), 335 + REG_SEQ0(0x00800041, 0x14), 336 + REG_SEQ0(0x0080005c, 0x19), 337 + REG_SEQ0(0x0080005d, 0x80), 338 + REG_SEQ0(0x00800063, 0x48), 339 + REG_SEQ0(0x00800065, 0x08), 340 + REG_SEQ0(0x00800067, 0x00), 341 + REG_SEQ0(0x0080006a, 0x12), 342 + REG_SEQ0(0x0080006b, 0x7b), 343 + REG_SEQ0(0x0080006c, 0x00), 344 + REG_SEQ0(0x0080006d, 0x00), 345 + REG_SEQ0(0x0080006e, 0x1a), 346 + REG_SEQ0(0x0080006f, 0x00), 347 + REG_SEQ0(0x00800070, 0x96), 348 + REG_SEQ0(0x00800071, 0x02), 349 + REG_SEQ0(0x00800073, 0x08), 350 + REG_SEQ0(0x00800075, 0xe0), 351 + REG_SEQ0(0x0080007a, 0x60), 352 + REG_SEQ0(0x008000bd, 0x00), 353 + REG_SEQ0(0x008000be, 0x00), 354 + REG_SEQ0(0x008000bf, 0x00), 355 + REG_SEQ0(0x008000c0, 0x00), 356 + REG_SEQ0(0x008000c1, 0x00), 357 + REG_SEQ0(0x008000c2, 0x00), 358 + REG_SEQ0(0x008000c3, 0x00), 359 + REG_SEQ0(0x008000c4, 0x00), 360 + REG_SEQ0(0x008000c5, 0x00), 361 + REG_SEQ0(0x00800008, 0x49), 362 + REG_SEQ0(0x00800009, 0x02), 363 + REG_SEQ0(0x0080000a, 0x1a), 364 + REG_SEQ0(0x0080000d, 0x93), 365 + REG_SEQ0(0x0080000e, 0x82), 366 + REG_SEQ0(0x0080000f, 0x42), 367 + REG_SEQ0(0x00800010, 0x84), 368 + REG_SEQ0(0x00800014, 0x0a), 369 + REG_SEQ0(0x00800016, 0x00), 370 + REG_SEQ0(0x00800060, 0x21), 371 + }; 372 + 373 + static int tas2783_sdca_mbq_size(struct device *dev, u32 reg) 374 + { 375 + switch (reg) { 376 + case 0x000 ... 0x080: /* Data port 0. */ 377 + case 0x100 ... 0x140: /* Data port 1. */ 378 + case 0x200 ... 0x240: /* Data port 2. */ 379 + case 0x300 ... 0x340: /* Data port 3. */ 380 + case 0x400 ... 0x440: /* Data port 4. */ 381 + case 0x500 ... 0x540: /* Data port 5. */ 382 + case 0x800000 ... 0x803fff: /* Page 0 ~ 127. */ 383 + case 0x807e80 ... 0x807eff: /* Page 253. */ 384 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_UDMPU23, 385 + TAS2783_SDCA_CTL_UDMPU_CLUSTER, 0): 386 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU21, TAS2783_SDCA_CTL_FU_MUTE, 387 + TAS2783_DEVICE_CHANNEL_LEFT): 388 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PDE23, 0x1, 0): 389 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PDE23, 0x10, 0): 390 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x04, 0): 391 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x10, 0): 392 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x11, 0): 393 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x12, 0): 394 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x10, 0): 395 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x11, 0): 396 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x10, 0): 397 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x11, 0): 398 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_TG23, 0x10, 0): 399 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x01, 0): 400 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x08, 0): 401 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x0a, 0): 402 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x10, 0): 403 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x14, 0): 404 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x15, 0): 405 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x16, 0): 406 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x04, 0): 407 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x04, 0): 408 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x04, 0): 409 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT23, 0x04, 0): 410 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT24, 0x04, 0): 411 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT28, 0x04, 0): 412 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x04, 0): 413 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0): 414 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 1): 415 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 2): 416 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 3): 417 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 4): 418 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 5): 419 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 6): 420 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 7): 421 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 8): 422 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 9): 423 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xa): 424 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xb): 425 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xc): 426 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xd): 427 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xe): 428 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x12, 0xf): 429 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS21, 0x02, 0): 430 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS21, 0x10, 0): 431 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS24, 0x02, 0): 432 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS24, 0x10, 0): 433 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS25, 0x02, 0): 434 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS25, 0x10, 0): 435 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS127, 0x02, 0): 436 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS127, 0x10, 0): 437 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS26, 0x02, 0): 438 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS26, 0x10, 0): 439 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS28, 0x02, 0): 440 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_CS28, 0x10, 0): 441 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x01, 0): 442 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x04, 0): 443 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x05, 0): 444 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x10, 0): 445 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x11, 0): 446 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x01, 1): 447 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x01, 2): 448 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x01, 0): 449 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x01, 1): 450 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x01, 0): 451 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x01, 0): 452 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x04, 0): 453 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x05, 0): 454 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x10, 0): 455 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x11, 0): 456 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x01, 0): 457 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x01, 1): 458 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT25, 0x04, 0): 459 + return 1; 460 + 461 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x10, 0): 462 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x11, 0): 463 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x10, 0): 464 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x11, 0): 465 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x10, 0): 466 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x11, 0): 467 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT24, 0x11, 0): 468 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT25, 0x11, 0): 469 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT28, 0x11, 0): 470 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x11, 0): 471 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 0): 472 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 1): 473 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 2): 474 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 3): 475 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 4): 476 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 5): 477 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 6): 478 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x01, 7): 479 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU21, 0x02, 1): 480 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x0b, 1): 481 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x0b, 1): 482 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x0b, 2): 483 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x0b, 0): 484 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x0b, 0): 485 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x0b, 1): 486 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x07, 0): 487 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x09, 0): 488 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x12, 0): 489 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x12, 0): 490 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x12, 0): 491 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x13, 0): 492 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x12, 0): 493 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x13, 0): 494 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x10, 0): 495 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x11, 0): 496 + return 2; 497 + 498 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 0x10, 0): 499 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT21, 0x08, 0): 500 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT26, 0x08, 0): 501 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT28, 0x08, 0): 502 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_IT29, 0x08, 0): 503 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT23, 0x08, 0): 504 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT24, 0x08, 0): 505 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT25, 0x08, 0): 506 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT28, 0x08, 0): 507 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_OT127, 0x08, 0): 508 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MU26, 0x06, 0): 509 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU127, 0x10, 0): 510 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU26, 0x10, 0): 511 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x06, 0): 512 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x12, 0): 513 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_XU22, 0x13, 0): 514 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU21, 0x08, 0): 515 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_MFPU26, 0x08, 0): 516 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_SAPU29, 0x05, 0): 517 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU21, 0x06, 0): 518 + case SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PPU26, 0x06, 0): 519 + return 4; 520 + 521 + default: 522 + return 0; 523 + } 524 + } 525 + 526 + static bool tas2783_readable_register(struct device *dev, unsigned int reg) 527 + { 528 + return tas2783_sdca_mbq_size(dev, reg) > 0; 529 + } 530 + 531 + static bool tas2783_volatile_register(struct device *dev, u32 reg) 532 + { 533 + switch (reg) { 534 + case 0x000 ... 0x080: /* Data port 0. */ 535 + case 0x100 ... 0x140: /* Data port 1. */ 536 + case 0x200 ... 0x240: /* Data port 2. */ 537 + case 0x300 ... 0x340: /* Data port 3. */ 538 + case 0x400 ... 0x440: /* Data port 4. */ 539 + case 0x500 ... 0x540: /* Data port 5. */ 540 + case 0x800001: 541 + return true; 542 + 543 + default: 544 + return false; 545 + } 546 + } 547 + 548 + static const struct regmap_config tas_regmap = { 549 + .reg_bits = 32, 550 + .val_bits = 8, 551 + .readable_reg = tas2783_readable_register, 552 + .volatile_reg = tas2783_volatile_register, 553 + .reg_defaults = tas2783_reg_default, 554 + .num_reg_defaults = ARRAY_SIZE(tas2783_reg_default), 555 + .max_register = 0x41008000 + TASDEV_REG_SDW(0xa1, 0x60, 0x7f), 556 + .cache_type = REGCACHE_MAPLE, 557 + .use_single_read = true, 558 + .use_single_write = true, 559 + }; 560 + 561 + static const struct regmap_sdw_mbq_cfg tas2783_mbq_cfg = { 562 + .mbq_size = tas2783_sdca_mbq_size, 563 + }; 564 + 565 + static s32 tas2783_digital_getvol(struct snd_kcontrol *kcontrol, 566 + struct snd_ctl_elem_value *ucontrol) 567 + { 568 + return snd_soc_get_volsw(kcontrol, ucontrol); 569 + } 570 + 571 + static s32 tas2783_digital_putvol(struct snd_kcontrol *kcontrol, 572 + struct snd_ctl_elem_value *ucontrol) 573 + { 574 + return snd_soc_put_volsw(kcontrol, ucontrol); 575 + } 576 + 577 + static s32 tas2783_amp_getvol(struct snd_kcontrol *kcontrol, 578 + struct snd_ctl_elem_value *ucontrol) 579 + { 580 + return snd_soc_get_volsw(kcontrol, ucontrol); 581 + } 582 + 583 + static s32 tas2783_amp_putvol(struct snd_kcontrol *kcontrol, 584 + struct snd_ctl_elem_value *ucontrol) 585 + { 586 + return snd_soc_put_volsw(kcontrol, ucontrol); 587 + } 588 + 589 + static const struct snd_kcontrol_new tas2783_snd_controls[] = { 590 + SOC_SINGLE_RANGE_EXT_TLV("Amp Volume", TAS2783_AMP_LEVEL, 591 + 1, 0, 20, 0, tas2783_amp_getvol, 592 + tas2783_amp_putvol, tas2781_amp_tlv), 593 + SOC_SINGLE_RANGE_EXT_TLV("Speaker Volume", TAS2783_DVC_LVL, 594 + 0, 0, 200, 1, tas2783_digital_getvol, 595 + tas2783_digital_putvol, tas2781_dvc_tlv), 596 + }; 597 + 598 + static s32 tas2783_validate_calibdata(struct tas2783_prv *tas_dev, 599 + u8 *data, u32 size) 600 + { 601 + u32 ts, spk_count, size_calculated; 602 + u32 crc_calculated, crc_read, i; 603 + u32 *tmp_val; 604 + struct tm tm; 605 + 606 + i = 0; 607 + tmp_val = (u32 *)data; 608 + if (tmp_val[i++] != 2783) { 609 + dev_err(tas_dev->dev, "cal data magic number mismatch"); 610 + return -EINVAL; 611 + } 612 + 613 + spk_count = tmp_val[i++]; 614 + if (spk_count > TAS2783_CALIB_MAX_SPK_COUNT) { 615 + dev_err(tas_dev->dev, "cal data spk_count too large"); 616 + return -EINVAL; 617 + } 618 + 619 + ts = tmp_val[i++]; 620 + time64_to_tm(ts, 0, &tm); 621 + dev_dbg(tas_dev->dev, "cal data timestamp: %ld-%d-%d %d:%d:%d", 622 + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 623 + tm.tm_hour, tm.tm_min, tm.tm_sec); 624 + 625 + size_calculated = 626 + (spk_count * TAS2783_CALIB_PARAMS * sizeof(u32)) + 627 + TAS2783_CALIB_HDR_SZ + TAS2783_CALIB_CRC_SZ; 628 + if (size_calculated > TAS2783_CALIB_DATA_SZ) { 629 + dev_err(tas_dev->dev, "cali data sz too large"); 630 + return -EINVAL; 631 + } else if (size < size_calculated) { 632 + dev_err(tas_dev->dev, "cali data size mismatch calc=%u vs %d\n", 633 + size, size_calculated); 634 + return -EINVAL; 635 + } 636 + 637 + crc_calculated = crc32(~0, data, 638 + size_calculated - TAS2783_CALIB_CRC_SZ) ^ ~0; 639 + crc_read = tmp_val[(size_calculated - TAS2783_CALIB_CRC_SZ) / sizeof(u32)]; 640 + if (crc_calculated != crc_read) { 641 + dev_err(tas_dev->dev, 642 + "calib data integrity check fail, 0x%08x vs 0x%08x\n", 643 + crc_calculated, crc_read); 644 + return -EINVAL; 645 + } 646 + 647 + return 0; 648 + } 649 + 650 + static void tas2783_set_calib_params_to_device(struct tas2783_prv *tas_dev, u32 *cali_data) 651 + { 652 + u32 dev_count, offset, i, device_num; 653 + u32 reg_value; 654 + u8 buf[4]; 655 + 656 + dev_count = cali_data[1]; 657 + offset = 3; 658 + 659 + for (device_num = 0; device_num < dev_count; device_num++) { 660 + if (cali_data[offset] != tas_dev->sdw_peripheral->id.unique_id) { 661 + offset += TAS2783_CALIB_PARAMS; 662 + continue; 663 + } 664 + offset++; 665 + 666 + for (i = 0; i < ARRAY_SIZE(tas2783_cali_reg); i++) { 667 + reg_value = cali_data[offset + i]; 668 + buf[0] = reg_value >> 24; 669 + buf[1] = reg_value >> 16; 670 + buf[2] = reg_value >> 8; 671 + buf[3] = reg_value & 0xff; 672 + regmap_bulk_write(tas_dev->regmap, tas2783_cali_reg[i], 673 + buf, sizeof(u32)); 674 + } 675 + break; 676 + } 677 + 678 + if (device_num == dev_count) 679 + dev_err(tas_dev->dev, "device not found\n"); 680 + else 681 + dev_dbg(tas_dev->dev, "calib data update done\n"); 682 + } 683 + 684 + static s32 tas2783_update_calibdata(struct tas2783_prv *tas_dev) 685 + { 686 + efi_guid_t efi_guid = TAS2783_CALI_GUID; 687 + u32 attr, i, *tmp_val; 688 + unsigned long size; 689 + s32 ret; 690 + efi_status_t status; 691 + static efi_char16_t efi_names[][32] = { 692 + L"SmartAmpCalibrationData", L"CALI_DATA"}; 693 + 694 + tmp_val = (u32 *)tas_dev->cali_data.data; 695 + attr = 0; 696 + i = 0; 697 + 698 + /* 699 + * In some cases, the calibration is performed in Windows, 700 + * and data was saved in UEFI. Linux can access it. 701 + */ 702 + for (i = 0; i < ARRAY_SIZE(efi_names); i++) { 703 + size = 0; 704 + status = efi.get_variable(efi_names[i], &efi_guid, &attr, 705 + &size, NULL); 706 + if (size > TAS2783_CALIB_DATA_SZ) { 707 + dev_err(tas_dev->dev, "cali data too large\n"); 708 + break; 709 + } 710 + 711 + tas_dev->cali_data.read_sz = size; 712 + if (status == EFI_BUFFER_TOO_SMALL) { 713 + status = efi.get_variable(efi_names[i], &efi_guid, &attr, 714 + &tas_dev->cali_data.read_sz, 715 + tas_dev->cali_data.data); 716 + dev_dbg(tas_dev->dev, "cali get %lu bytes result:%ld\n", 717 + tas_dev->cali_data.read_sz, status); 718 + } 719 + if (status == EFI_SUCCESS) 720 + break; 721 + } 722 + 723 + if (status != EFI_SUCCESS) { 724 + /* Failed got calibration data from EFI. */ 725 + dev_dbg(tas_dev->dev, "No calibration data in UEFI."); 726 + return 0; 727 + } 728 + 729 + mutex_lock(&tas_dev->calib_lock); 730 + ret = tas2783_validate_calibdata(tas_dev, tas_dev->cali_data.data, 731 + tas_dev->cali_data.read_sz); 732 + if (!ret) 733 + tas2783_set_calib_params_to_device(tas_dev, tmp_val); 734 + mutex_unlock(&tas_dev->calib_lock); 735 + 736 + return ret; 737 + } 738 + 739 + static s32 read_header(const u8 *data, struct bin_header_t *hdr) 740 + { 741 + hdr->vendor_id = get_unaligned_le16(&data[0]); 742 + hdr->file_id = get_unaligned_le32(&data[2]); 743 + hdr->version = get_unaligned_le16(&data[6]); 744 + hdr->length = get_unaligned_le32(&data[8]); 745 + return 12; 746 + } 747 + 748 + static void tas2783_fw_ready(const struct firmware *fmw, void *context) 749 + { 750 + struct tas2783_prv *tas_dev = 751 + (struct tas2783_prv *)context; 752 + const u8 *buf = NULL; 753 + s32 offset = 0, img_sz, file_blk_size, ret; 754 + struct bin_header_t hdr; 755 + 756 + if (!fmw || !fmw->data) { 757 + /* No firmware binary, devices will work in ROM mode. */ 758 + dev_err(tas_dev->dev, 759 + "Failed to read %s, no side-effect on driver running\n", 760 + tas_dev->rca_binaryname); 761 + ret = -EINVAL; 762 + goto out; 763 + } 764 + 765 + mutex_lock(&tas_dev->pde_lock); 766 + img_sz = fmw->size; 767 + buf = fmw->data; 768 + offset += FW_DL_OFFSET; 769 + while (offset < (img_sz - FW_FL_HDR)) { 770 + memset(&hdr, 0, sizeof(hdr)); 771 + offset += read_header(&buf[offset], &hdr); 772 + dev_dbg(tas_dev->dev, 773 + "vndr=%d, file=%d, version=%d, len=%d, off=%d\n", 774 + hdr.vendor_id, hdr.file_id, hdr.version, 775 + hdr.length, offset); 776 + /* size also includes the header */ 777 + file_blk_size = hdr.length - FW_FL_HDR; 778 + 779 + switch (hdr.file_id) { 780 + case 0: 781 + ret = sdw_nwrite_no_pm(tas_dev->sdw_peripheral, 782 + PRAM_ADDR_START, file_blk_size, 783 + &buf[offset]); 784 + if (ret < 0) 785 + dev_err(tas_dev->dev, 786 + "PRAM update failed: %d", ret); 787 + break; 788 + 789 + case 1: 790 + ret = sdw_nwrite_no_pm(tas_dev->sdw_peripheral, 791 + YRAM_ADDR_START, file_blk_size, 792 + &buf[offset]); 793 + if (ret < 0) 794 + dev_err(tas_dev->dev, 795 + "YRAM update failed: %d", ret); 796 + 797 + break; 798 + 799 + default: 800 + ret = -EINVAL; 801 + dev_err(tas_dev->dev, "Unsupported file"); 802 + break; 803 + } 804 + 805 + if (ret == 0) 806 + offset += file_blk_size; 807 + else 808 + break; 809 + } 810 + mutex_unlock(&tas_dev->pde_lock); 811 + tas2783_update_calibdata(tas_dev); 812 + 813 + out: 814 + if (!ret) 815 + tas_dev->fw_dl_success = true; 816 + tas_dev->fw_dl_task_done = true; 817 + wake_up(&tas_dev->fw_wait); 818 + if (fmw) 819 + release_firmware(fmw); 820 + } 821 + 822 + static inline s32 tas_clear_latch(struct tas2783_prv *priv) 823 + { 824 + return regmap_update_bits(priv->regmap, 825 + TASDEV_REG_SDW(0, 0, 0x5c), 826 + 0x04, 0x04); 827 + } 828 + 829 + static s32 tas_fu21_event(struct snd_soc_dapm_widget *w, 830 + struct snd_kcontrol *k, s32 event) 831 + { 832 + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); 833 + struct tas2783_prv *tas_dev = snd_soc_component_get_drvdata(component); 834 + s32 mute; 835 + 836 + switch (event) { 837 + case SND_SOC_DAPM_POST_PMU: 838 + mute = 0; 839 + break; 840 + 841 + case SND_SOC_DAPM_PRE_PMD: 842 + mute = 1; 843 + break; 844 + } 845 + 846 + return sdw_write_no_pm(tas_dev->sdw_peripheral, 847 + SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU21, 848 + TAS2783_SDCA_CTL_FU_MUTE, 1), mute); 849 + } 850 + 851 + static s32 tas_fu23_event(struct snd_soc_dapm_widget *w, 852 + struct snd_kcontrol *k, s32 event) 853 + { 854 + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); 855 + struct tas2783_prv *tas_dev = snd_soc_component_get_drvdata(component); 856 + s32 mute; 857 + 858 + switch (event) { 859 + case SND_SOC_DAPM_POST_PMU: 860 + mute = 0; 861 + break; 862 + 863 + case SND_SOC_DAPM_PRE_PMD: 864 + mute = 1; 865 + break; 866 + } 867 + 868 + return sdw_write_no_pm(tas_dev->sdw_peripheral, 869 + SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_FU23, 870 + TAS2783_SDCA_CTL_FU_MUTE, 1), mute); 871 + } 872 + 873 + static const struct snd_soc_dapm_widget tas_dapm_widgets[] = { 874 + SND_SOC_DAPM_AIF_IN("ASI", "ASI Playback", 0, SND_SOC_NOPM, 0, 0), 875 + SND_SOC_DAPM_AIF_OUT("ASI OUT", "ASI Capture", 0, SND_SOC_NOPM, 876 + 0, 0), 877 + SND_SOC_DAPM_DAC_E("FU21", NULL, SND_SOC_NOPM, 0, 0, tas_fu21_event, 878 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), 879 + SND_SOC_DAPM_DAC_E("FU23", NULL, SND_SOC_NOPM, 0, 0, tas_fu23_event, 880 + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), 881 + SND_SOC_DAPM_OUTPUT("SPK"), 882 + SND_SOC_DAPM_INPUT("DMIC"), 883 + }; 884 + 885 + static const struct snd_soc_dapm_route tas_audio_map[] = { 886 + {"FU21", NULL, "ASI"}, 887 + {"SPK", NULL, "FU21"}, 888 + {"FU23", NULL, "ASI"}, 889 + {"SPK", NULL, "FU23"}, 890 + {"ASI OUT", NULL, "DMIC"}, 891 + }; 892 + 893 + static s32 tas_set_sdw_stream(struct snd_soc_dai *dai, 894 + void *sdw_stream, s32 direction) 895 + { 896 + if (!sdw_stream) 897 + return 0; 898 + 899 + snd_soc_dai_dma_data_set(dai, direction, sdw_stream); 900 + 901 + return 0; 902 + } 903 + 904 + static void tas_sdw_shutdown(struct snd_pcm_substream *substream, 905 + struct snd_soc_dai *dai) 906 + { 907 + snd_soc_dai_set_dma_data(dai, substream, NULL); 908 + } 909 + 910 + static s32 tas_sdw_hw_params(struct snd_pcm_substream *substream, 911 + struct snd_pcm_hw_params *params, 912 + struct snd_soc_dai *dai) 913 + { 914 + struct snd_soc_component *component = dai->component; 915 + struct tas2783_prv *tas_dev = 916 + snd_soc_component_get_drvdata(component); 917 + struct sdw_stream_config stream_config = {0}; 918 + struct sdw_port_config port_config = {0}; 919 + struct sdw_stream_runtime *sdw_stream; 920 + struct sdw_slave *sdw_peripheral = tas_dev->sdw_peripheral; 921 + s32 ret, retry = 3; 922 + 923 + if (!tas_dev->fw_dl_success) { 924 + dev_err(tas_dev->dev, "error playback without fw download"); 925 + return -EINVAL; 926 + } 927 + 928 + sdw_stream = snd_soc_dai_get_dma_data(dai, substream); 929 + if (!sdw_stream) 930 + return -EINVAL; 931 + 932 + ret = tas_clear_latch(tas_dev); 933 + if (ret) 934 + dev_err(tas_dev->dev, 935 + "clear latch failed, err=%d", ret); 936 + 937 + mutex_lock(&tas_dev->pde_lock); 938 + /* 939 + * Sometimes, there is error returned during power on. 940 + * So added retry logic to ensure power on so that 941 + * port prepare succeeds 942 + */ 943 + do { 944 + ret = regmap_write(tas_dev->regmap, 945 + SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PDE23, 946 + TAS2783_SDCA_CTL_REQ_POW_STATE, 0), 947 + TAS2783_SDCA_POW_STATE_ON); 948 + if (!ret) 949 + break; 950 + usleep_range(2000, 2200); 951 + } while (retry--); 952 + mutex_unlock(&tas_dev->pde_lock); 953 + if (ret) 954 + return ret; 955 + 956 + /* SoundWire specific configuration */ 957 + snd_sdw_params_to_config(substream, params, 958 + &stream_config, &port_config); 959 + /* port 1 for playback */ 960 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 961 + port_config.num = 1; 962 + else 963 + port_config.num = 2; 964 + 965 + ret = sdw_stream_add_slave(sdw_peripheral, 966 + &stream_config, &port_config, 1, sdw_stream); 967 + if (ret) 968 + dev_err(dai->dev, "Unable to configure port\n"); 969 + 970 + return ret; 971 + } 972 + 973 + static s32 tas_sdw_pcm_hw_free(struct snd_pcm_substream *substream, 974 + struct snd_soc_dai *dai) 975 + { 976 + s32 ret; 977 + struct snd_soc_component *component = dai->component; 978 + struct tas2783_prv *tas_dev = 979 + snd_soc_component_get_drvdata(component); 980 + struct sdw_stream_runtime *sdw_stream = 981 + snd_soc_dai_get_dma_data(dai, substream); 982 + 983 + sdw_stream_remove_slave(tas_dev->sdw_peripheral, sdw_stream); 984 + 985 + mutex_lock(&tas_dev->pde_lock); 986 + ret = regmap_write(tas_dev->regmap, 987 + SDW_SDCA_CTL(1, TAS2783_SDCA_ENT_PDE23, 988 + TAS2783_SDCA_CTL_REQ_POW_STATE, 0), 989 + TAS2783_SDCA_POW_STATE_OFF); 990 + mutex_unlock(&tas_dev->pde_lock); 991 + 992 + return ret; 993 + } 994 + 995 + static const struct snd_soc_dai_ops tas_dai_ops = { 996 + .hw_params = tas_sdw_hw_params, 997 + .hw_free = tas_sdw_pcm_hw_free, 998 + .set_stream = tas_set_sdw_stream, 999 + .shutdown = tas_sdw_shutdown, 1000 + }; 1001 + 1002 + static struct snd_soc_dai_driver tas_dai_driver[] = { 1003 + { 1004 + .name = "tas2783-codec", 1005 + .id = 0, 1006 + .playback = { 1007 + .stream_name = "Playback", 1008 + .channels_min = 1, 1009 + .channels_max = 4, 1010 + .rates = TAS2783_DEVICE_RATES, 1011 + .formats = TAS2783_DEVICE_FORMATS, 1012 + }, 1013 + .capture = { 1014 + .stream_name = "Capture", 1015 + .channels_min = 1, 1016 + .channels_max = 4, 1017 + .rates = TAS2783_DEVICE_RATES, 1018 + .formats = TAS2783_DEVICE_FORMATS, 1019 + }, 1020 + .ops = &tas_dai_ops, 1021 + .symmetric_rate = 1, 1022 + }, 1023 + }; 1024 + 1025 + static s32 tas_component_probe(struct snd_soc_component *component) 1026 + { 1027 + struct tas2783_prv *tas_dev = 1028 + snd_soc_component_get_drvdata(component); 1029 + 1030 + tas_dev->component = component; 1031 + tas25xx_register_misc(tas_dev->sdw_peripheral); 1032 + 1033 + return 0; 1034 + } 1035 + 1036 + static void tas_component_remove(struct snd_soc_component *codec) 1037 + { 1038 + struct tas2783_prv *tas_dev = 1039 + snd_soc_component_get_drvdata(codec); 1040 + tas25xx_deregister_misc(); 1041 + tas_dev->component = NULL; 1042 + } 1043 + 1044 + static const struct snd_soc_component_driver soc_codec_driver_tasdevice = { 1045 + .probe = tas_component_probe, 1046 + .remove = tas_component_remove, 1047 + .controls = tas2783_snd_controls, 1048 + .num_controls = ARRAY_SIZE(tas2783_snd_controls), 1049 + .dapm_widgets = tas_dapm_widgets, 1050 + .num_dapm_widgets = ARRAY_SIZE(tas_dapm_widgets), 1051 + .dapm_routes = tas_audio_map, 1052 + .num_dapm_routes = ARRAY_SIZE(tas_audio_map), 1053 + .idle_bias_on = 1, 1054 + .endianness = 1, 1055 + }; 1056 + 1057 + static s32 tas_init(struct tas2783_prv *tas_dev) 1058 + { 1059 + s32 ret; 1060 + 1061 + dev_set_drvdata(tas_dev->dev, tas_dev); 1062 + ret = devm_snd_soc_register_component(tas_dev->dev, 1063 + &soc_codec_driver_tasdevice, 1064 + tas_dai_driver, 1065 + ARRAY_SIZE(tas_dai_driver)); 1066 + if (ret) { 1067 + dev_err(tas_dev->dev, "%s: codec register error:%d.\n", 1068 + __func__, ret); 1069 + return ret; 1070 + } 1071 + 1072 + /* set autosuspend parameters */ 1073 + pm_runtime_set_autosuspend_delay(tas_dev->dev, 3000); 1074 + pm_runtime_use_autosuspend(tas_dev->dev); 1075 + /* make sure the device does not suspend immediately */ 1076 + pm_runtime_mark_last_busy(tas_dev->dev); 1077 + pm_runtime_enable(tas_dev->dev); 1078 + 1079 + return ret; 1080 + } 1081 + 1082 + static s32 tas_read_prop(struct sdw_slave *slave) 1083 + { 1084 + struct sdw_slave_prop *prop = &slave->prop; 1085 + s32 nval; 1086 + s32 i, j; 1087 + u32 bit; 1088 + unsigned long addr; 1089 + struct sdw_dpn_prop *dpn; 1090 + 1091 + prop->scp_int1_mask = 1092 + SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; 1093 + prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY; 1094 + 1095 + prop->paging_support = true; 1096 + 1097 + /* first we need to allocate memory for set bits in port lists */ 1098 + prop->source_ports = 0x04; /* BITMAP: 00000100 */ 1099 + prop->sink_ports = 0x2; /* BITMAP: 00000010 */ 1100 + 1101 + nval = hweight32(prop->source_ports); 1102 + prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval, 1103 + sizeof(*prop->src_dpn_prop), GFP_KERNEL); 1104 + if (!prop->src_dpn_prop) 1105 + return -ENOMEM; 1106 + 1107 + i = 0; 1108 + dpn = prop->src_dpn_prop; 1109 + addr = prop->source_ports; 1110 + for_each_set_bit(bit, &addr, 32) { 1111 + dpn[i].num = bit; 1112 + dpn[i].type = SDW_DPN_FULL; 1113 + dpn[i].simple_ch_prep_sm = false; 1114 + dpn[i].ch_prep_timeout = 10; 1115 + i++; 1116 + } 1117 + 1118 + /* do this again for sink now */ 1119 + nval = hweight32(prop->sink_ports); 1120 + prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval, 1121 + sizeof(*prop->sink_dpn_prop), GFP_KERNEL); 1122 + if (!prop->sink_dpn_prop) 1123 + return -ENOMEM; 1124 + 1125 + j = 0; 1126 + dpn = prop->sink_dpn_prop; 1127 + addr = prop->sink_ports; 1128 + for_each_set_bit(bit, &addr, 32) { 1129 + dpn[j].num = bit; 1130 + dpn[j].type = SDW_DPN_FULL; 1131 + dpn[j].simple_ch_prep_sm = false; 1132 + dpn[j].ch_prep_timeout = 10; 1133 + j++; 1134 + } 1135 + 1136 + /* set the timeout values */ 1137 + prop->clk_stop_timeout = 200; 1138 + 1139 + return 0; 1140 + } 1141 + 1142 + static s32 tas2783_sdca_dev_suspend(struct device *dev) 1143 + { 1144 + struct tas2783_prv *tas_dev = dev_get_drvdata(dev); 1145 + 1146 + if (!tas_dev->hw_init) 1147 + return 0; 1148 + 1149 + regcache_cache_only(tas_dev->regmap, true); 1150 + return 0; 1151 + } 1152 + 1153 + static s32 tas2783_sdca_dev_system_suspend(struct device *dev) 1154 + { 1155 + return tas2783_sdca_dev_suspend(dev); 1156 + } 1157 + 1158 + static s32 tas2783_sdca_dev_resume(struct device *dev) 1159 + { 1160 + struct sdw_slave *slave = dev_to_sdw_dev(dev); 1161 + struct tas2783_prv *tas_dev = dev_get_drvdata(dev); 1162 + unsigned long t; 1163 + 1164 + if (!slave->unattach_request) 1165 + goto regmap_sync; 1166 + 1167 + t = wait_for_completion_timeout(&slave->initialization_complete, 1168 + msecs_to_jiffies(TAS2783_PROBE_TIMEOUT)); 1169 + if (!t) { 1170 + dev_err(&slave->dev, "resume: initialization timed out\n"); 1171 + sdw_show_ping_status(slave->bus, true); 1172 + return -ETIMEDOUT; 1173 + } 1174 + 1175 + slave->unattach_request = 0; 1176 + 1177 + regmap_sync: 1178 + regcache_cache_only(tas_dev->regmap, false); 1179 + regcache_sync(tas_dev->regmap); 1180 + return 0; 1181 + } 1182 + 1183 + static const struct dev_pm_ops tas2783_sdca_pm = { 1184 + SYSTEM_SLEEP_PM_OPS(tas2783_sdca_dev_system_suspend, tas2783_sdca_dev_resume) 1185 + RUNTIME_PM_OPS(tas2783_sdca_dev_suspend, tas2783_sdca_dev_resume, NULL) 1186 + }; 1187 + 1188 + static s32 tas_io_init(struct device *dev, struct sdw_slave *slave) 1189 + { 1190 + struct tas2783_prv *tas_dev = dev_get_drvdata(dev); 1191 + s32 ret; 1192 + u8 unique_id = tas_dev->sdw_peripheral->id.unique_id; 1193 + 1194 + if (tas_dev->hw_init) 1195 + return 0; 1196 + 1197 + tas_dev->fw_dl_task_done = false; 1198 + tas_dev->fw_dl_success = false; 1199 + scnprintf(tas_dev->rca_binaryname, sizeof(tas_dev->rca_binaryname), 1200 + "tas2783-%01x.bin", unique_id); 1201 + 1202 + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 1203 + tas_dev->rca_binaryname, tas_dev->dev, 1204 + GFP_KERNEL, tas_dev, tas2783_fw_ready); 1205 + if (ret) { 1206 + dev_err(tas_dev->dev, 1207 + "firmware request failed for uid=%d, ret=%d\n", 1208 + unique_id, ret); 1209 + return ret; 1210 + } 1211 + 1212 + ret = wait_event_timeout(tas_dev->fw_wait, tas_dev->fw_dl_task_done, 1213 + msecs_to_jiffies(TIMEOUT_FW_DL_MS)); 1214 + if (!ret) { 1215 + dev_err(tas_dev->dev, "fw request, wait_event timeout\n"); 1216 + ret = -EAGAIN; 1217 + } else { 1218 + ret = regmap_multi_reg_write(tas_dev->regmap, tas2783_init_seq, 1219 + ARRAY_SIZE(tas2783_init_seq)); 1220 + tas_dev->hw_init = true; 1221 + } 1222 + 1223 + return ret; 1224 + } 1225 + 1226 + static s32 tas_update_status(struct sdw_slave *slave, 1227 + enum sdw_slave_status status) 1228 + { 1229 + struct tas2783_prv *tas_dev = dev_get_drvdata(&slave->dev); 1230 + struct device *dev = &slave->dev; 1231 + 1232 + dev_dbg(dev, "Peripheral status = %s", 1233 + status == SDW_SLAVE_UNATTACHED ? "unattached" : 1234 + status == SDW_SLAVE_ATTACHED ? "attached" : "alert"); 1235 + 1236 + tas_dev->status = status; 1237 + if (status == SDW_SLAVE_UNATTACHED) 1238 + tas_dev->hw_init = false; 1239 + 1240 + /* Perform initialization only if slave status 1241 + * is present and hw_init flag is false 1242 + */ 1243 + if (tas_dev->hw_init || tas_dev->status != SDW_SLAVE_ATTACHED) 1244 + return 0; 1245 + 1246 + /* updated the cache data to device */ 1247 + regcache_cache_only(tas_dev->regmap, false); 1248 + regcache_sync(tas_dev->regmap); 1249 + 1250 + /* perform I/O transfers required for Slave initialization */ 1251 + return tas_io_init(&slave->dev, slave); 1252 + } 1253 + 1254 + static const struct sdw_slave_ops tas_sdw_ops = { 1255 + .read_prop = tas_read_prop, 1256 + .update_status = tas_update_status, 1257 + }; 1258 + 1259 + static void tas_remove(struct tas2783_prv *tas_dev) 1260 + { 1261 + snd_soc_unregister_component(tas_dev->dev); 1262 + } 1263 + 1264 + static s32 tas_sdw_probe(struct sdw_slave *peripheral, 1265 + const struct sdw_device_id *id) 1266 + { 1267 + struct regmap *regmap; 1268 + struct device *dev = &peripheral->dev; 1269 + struct tas2783_prv *tas_dev; 1270 + 1271 + tas_dev = devm_kzalloc(dev, sizeof(*tas_dev), GFP_KERNEL); 1272 + if (!tas_dev) 1273 + return dev_err_probe(dev, -ENOMEM, 1274 + "Failed devm_kzalloc"); 1275 + 1276 + tas_dev->dev = dev; 1277 + tas_dev->sdw_peripheral = peripheral; 1278 + tas_dev->hw_init = false; 1279 + mutex_init(&tas_dev->calib_lock); 1280 + mutex_init(&tas_dev->pde_lock); 1281 + 1282 + init_waitqueue_head(&tas_dev->fw_wait); 1283 + dev_set_drvdata(dev, tas_dev); 1284 + regmap = devm_regmap_init_sdw_mbq_cfg(peripheral, 1285 + &tas_regmap, 1286 + &tas2783_mbq_cfg); 1287 + if (IS_ERR(regmap)) 1288 + return dev_err_probe(dev, PTR_ERR(regmap), 1289 + "Failed devm_regmap_init_sdw."); 1290 + 1291 + /* keep in cache until the device is fully initialized */ 1292 + regcache_cache_only(regmap, true); 1293 + tas_dev->regmap = regmap; 1294 + return tas_init(tas_dev); 1295 + } 1296 + 1297 + static s32 tas_sdw_remove(struct sdw_slave *peripheral) 1298 + { 1299 + struct tas2783_prv *tas_dev = dev_get_drvdata(&peripheral->dev); 1300 + 1301 + pm_runtime_disable(tas_dev->dev); 1302 + tas_remove(tas_dev); 1303 + mutex_destroy(&tas_dev->calib_lock); 1304 + mutex_destroy(&tas_dev->pde_lock); 1305 + dev_set_drvdata(&peripheral->dev, NULL); 1306 + 1307 + return 0; 1308 + } 1309 + 1310 + static const struct sdw_device_id tas_sdw_id[] = { 1311 + /* chipid for the TAS2783 is 0x0000 */ 1312 + SDW_SLAVE_ENTRY(0x0102, 0x0000, 0), 1313 + {}, 1314 + }; 1315 + MODULE_DEVICE_TABLE(sdw, tas_sdw_id); 1316 + 1317 + static struct sdw_driver tas_sdw_driver = { 1318 + .driver = { 1319 + .name = "slave-tas2783", 1320 + .pm = pm_ptr(&tas2783_sdca_pm), 1321 + }, 1322 + .probe = tas_sdw_probe, 1323 + .remove = tas_sdw_remove, 1324 + .ops = &tas_sdw_ops, 1325 + .id_table = tas_sdw_id, 1326 + }; 1327 + module_sdw_driver(tas_sdw_driver); 1328 + 1329 + MODULE_AUTHOR("Texas Instruments Inc."); 1330 + MODULE_DESCRIPTION("ASoC TAS2783 SoundWire Driver"); 1331 + MODULE_LICENSE("GPL");
+110
sound/soc/codecs/tas2783.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * ALSA SoC Texas Instruments TAS2783 Audio Smart Amplifier 4 + * 5 + * Copyright (C) 2025 Texas Instruments Incorporated 6 + * https://www.ti.com 7 + * 8 + * The TAS2783 driver implements a flexible and configurable 9 + * algo coefficient setting for single TAS2783 chips. 10 + * 11 + * Author: Niranjan H Y <niranjanhy@ti.com> 12 + * Author: Baojun Xu <baojun.xu@ti.com> 13 + */ 14 + #include <linux/workqueue.h> 15 + 16 + #ifndef __TAS2783_H__ 17 + #define __TAS2783_H__ 18 + 19 + #define TAS2783_DEVICE_RATES (SNDRV_PCM_RATE_44100 | \ 20 + SNDRV_PCM_RATE_48000 | \ 21 + SNDRV_PCM_RATE_96000 | \ 22 + SNDRV_PCM_RATE_88200) 23 + #define TAS2783_DEVICE_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ 24 + SNDRV_PCM_FMTBIT_S24_LE | \ 25 + SNDRV_PCM_FMTBIT_S32_LE) 26 + 27 + /* book, page, register */ 28 + #define TASDEV_REG_SDW(book, page, reg) (((book) * 256 * 128) + \ 29 + 0x800000 + ((page) * 128) + (reg)) 30 + 31 + /* Volume control */ 32 + #define TAS2783_DVC_LVL TASDEV_REG_SDW(0x0, 0x00, 0x1A) 33 + #define TAS2783_AMP_LEVEL TASDEV_REG_SDW(0x0, 0x00, 0x03) 34 + #define TAS2783_AMP_LEVEL_MASK GENMASK(5, 1) 35 + 36 + #define PRAM_ADDR_START TASDEV_REG_SDW(0x8c, 0x01, 0x8) 37 + #define PRAM_ADDR_END TASDEV_REG_SDW(0x8c, 0xff, 0x7f) 38 + #define YRAM_ADDR_START TASDEV_REG_SDW(0x00, 0x02, 0x8) 39 + #define YRAM_ADDR_END TASDEV_REG_SDW(0x00, 0x37, 0x7f) 40 + 41 + /* Calibration data */ 42 + #define TAS2783_CAL_R0 TASDEV_REG_SDW(0, 0x16, 0x4C) 43 + #define TAS2783_CAL_INVR0 TASDEV_REG_SDW(0, 0x16, 0x5C) 44 + #define TAS2783_CAL_R0LOW TASDEV_REG_SDW(0, 0x16, 0x64) 45 + #define TAS2783_CAL_POWER TASDEV_REG_SDW(0, 0x15, 0x44) 46 + #define TAS2783_CAL_TLIM TASDEV_REG_SDW(0, 0x17, 0x58) 47 + 48 + /* TAS2783 SDCA Control - function number */ 49 + #define FUNC_NUM_SMART_AMP 0x01 50 + 51 + /* TAS2783 SDCA entity */ 52 + 53 + #define TAS2783_SDCA_ENT_FU21 0x01 54 + #define TAS2783_SDCA_ENT_FU23 0x02 55 + #define TAS2783_SDCA_ENT_FU26 0x03 56 + #define TAS2783_SDCA_ENT_XU22 0x04 57 + #define TAS2783_SDCA_ENT_CS24 0x05 58 + #define TAS2783_SDCA_ENT_CS21 0x06 59 + #define TAS2783_SDCA_ENT_CS25 0x07 60 + #define TAS2783_SDCA_ENT_CS26 0x08 61 + #define TAS2783_SDCA_ENT_CS28 0x09 62 + #define TAS2783_SDCA_ENT_PDE23 0x0C 63 + #define TAS2783_SDCA_ENT_UDMPU23 0x0E 64 + #define TAS2783_SDCA_ENT_SAPU29 0x0F 65 + #define TAS2783_SDCA_ENT_PPU21 0x10 66 + #define TAS2783_SDCA_ENT_PPU26 0x11 67 + #define TAS2783_SDCA_ENT_TG23 0x12 68 + #define TAS2783_SDCA_ENT_IT21 0x13 69 + #define TAS2783_SDCA_ENT_IT29 0x14 70 + #define TAS2783_SDCA_ENT_IT26 0x15 71 + #define TAS2783_SDCA_ENT_IT28 0x16 72 + #define TAS2783_SDCA_ENT_OT24 0x17 73 + #define TAS2783_SDCA_ENT_OT23 0x18 74 + #define TAS2783_SDCA_ENT_OT25 0x19 75 + #define TAS2783_SDCA_ENT_OT28 0x1A 76 + #define TAS2783_SDCA_ENT_MU26 0x1b 77 + #define TAS2783_SDCA_ENT_OT127 0x1E 78 + #define TAS2783_SDCA_ENT_FU127 0x1F 79 + #define TAS2783_SDCA_ENT_CS127 0x20 80 + #define TAS2783_SDCA_ENT_MFPU21 0x22 81 + #define TAS2783_SDCA_ENT_MFPU26 0x23 82 + 83 + /* TAS2783 SDCA control */ 84 + #define TAS2783_SDCA_CTL_REQ_POW_STATE 0x01 85 + #define TAS2783_SDCA_CTL_FU_MUTE 0x01 86 + #define TAS2783_SDCA_CTL_UDMPU_CLUSTER 0x10 87 + 88 + #define TAS2783_DEVICE_CHANNEL_LEFT 1 89 + #define TAS2783_DEVICE_CHANNEL_RIGHT 2 90 + 91 + #define TAS2783_SDCA_POW_STATE_ON 0 92 + #define TAS2783_SDCA_POW_STATE_OFF 3 93 + 94 + /* calibration data */ 95 + #define TAS2783_CALIB_PARAMS 6 /* 5 + 1 unique id */ 96 + #define TAS2783_CALIB_MAX_SPK_COUNT 8 97 + #define TAS2783_CALIB_HDR_SZ 12 98 + #define TAS2783_CALIB_CRC_SZ 4 99 + #define TAS2783_CALIB_DATA_SZ ((TAS2783_CALIB_HDR_SZ) + TAS2783_CALIB_CRC_SZ + \ 100 + ((TAS2783_CALIB_PARAMS) * 4 * (TAS2783_CALIB_MAX_SPK_COUNT))) 101 + 102 + #if IS_ENABLED(CONFIG_SND_SOC_TAS2783_UTIL) 103 + int32_t tas25xx_register_misc(struct sdw_slave *peripheral); 104 + int32_t tas25xx_deregister_misc(void); 105 + #else 106 + static void tas25xx_register_misc(struct sdw_slave *peripheral) {} 107 + static void tas25xx_deregister_misc(void) {} 108 + #endif 109 + 110 + #endif /*__TAS2783_H__ */
+5 -4
sound/soc/codecs/wcd-common.c
··· 62 62 63 63 int wcd_dt_parse_micbias_info(struct wcd_common *common) 64 64 { 65 - int i; 65 + int ret, i; 66 66 67 67 for (i = 0; i < common->max_bias; i++) { 68 - common->micb_vout[i] = wcd_get_micbias_val(common->dev, i + 1, &common->micb_mv[i]); 69 - if (common->micb_vout[i] < 0) 70 - return -EINVAL; 68 + ret = wcd_get_micbias_val(common->dev, i + 1, &common->micb_mv[i]); 69 + if (ret < 0) 70 + return ret; 71 + common->micb_vout[i] = ret; 71 72 } 72 73 73 74 return 0;
+30
sound/soc/intel/common/soc-acpi-intel-mtl-match.c
··· 948 948 } 949 949 }; 950 950 951 + static const struct snd_soc_acpi_adr_device tas2783_0_adr[] = { 952 + { 953 + .adr = 0x0000380102000001ull, 954 + .num_endpoints = 1, 955 + .endpoints = &spk_l_endpoint, 956 + .name_prefix = "tas2783-1" 957 + }, 958 + { 959 + .adr = 0x0000390102000001ull, 960 + .num_endpoints = 1, 961 + .endpoints = &spk_r_endpoint, 962 + .name_prefix = "tas2783-2" 963 + } 964 + }; 965 + 966 + static const struct snd_soc_acpi_link_adr tas2783_link0[] = { 967 + { 968 + .mask = BIT(0), 969 + .num_adr = ARRAY_SIZE(tas2783_0_adr), 970 + .adr_d = tas2783_0_adr, 971 + }, 972 + {} 973 + }; 974 + 951 975 static const struct snd_soc_acpi_link_adr cs42l42_link0_max98363_link2[] = { 952 976 /* Expected order: jack -> amp */ 953 977 { ··· 1103 1079 .links = sdw_mockup_mic_headset_1amp, 1104 1080 .drv_name = "sof_sdw", 1105 1081 .sof_tplg_filename = "sof-mtl-rt715-rt711-rt1308-mono.tplg", 1082 + }, 1083 + { 1084 + .link_mask = BIT(0), 1085 + .links = tas2783_link0, 1086 + .drv_name = "sof_sdw", 1087 + .sof_tplg_filename = "sof-mtl-tas2783.tplg", 1106 1088 }, 1107 1089 { 1108 1090 .link_mask = GENMASK(3, 0),
+131 -30
sound/soc/renesas/rcar/msiof.c
··· 7 7 // 8 8 9 9 /* 10 - * [NOTE] 10 + * [NOTE-CLOCK-MODE] 11 11 * 12 12 * This driver doesn't support Clock/Frame Provider Mode 13 13 * ··· 24 24 * Clock/Frame Consumer Mode. 25 25 */ 26 26 27 + /* 28 + * [NOTE-RESET] 29 + * 30 + * MSIOF has TXRST/RXRST to reset FIFO, but it shouldn't be used during SYNC signal was asserted, 31 + * because it will be cause of HW issue. 32 + * 33 + * When MSIOF is used as Sound driver, this driver is assuming it is used as clock consumer mode 34 + * (= Codec is clock provider). This means, it can't control SYNC signal by itself. 35 + * 36 + * We need to use SW reset (= reset_control_xxx()) instead of TXRST/RXRST. 37 + */ 38 + 39 + /* 40 + * [NOTE-BOTH-SETTING] 41 + * 42 + * SITMDRn / SIRMDRn and some other registers should not be updated during working even though it 43 + * was not related the target direction (for example, do TX settings during RX is working), 44 + * otherwise it cause a FSERR. 45 + * 46 + * Setup both direction (Playback/Capture) in the same time. 47 + */ 48 + 49 + /* 50 + * [NOTE-R/L] 51 + * 52 + * The data of Captured might be R/L opposite. 53 + * 54 + * This driver is assuming MSIOF is used as Clock/Frame Consumer Mode, and there is a case that some 55 + * Codec (= Clock/Frame Provider) might output Clock/Frame before setup MSIOF. It depends on Codec 56 + * driver implementation. 57 + * 58 + * MSIOF will capture data without checking SYNC signal Hi/Low (= R/L). 59 + * 60 + * This means, if MSIOF RXE bit was set as 1 in case of SYNC signal was Hi (= R) timing, it will 61 + * start capture data since next SYNC low singla (= L). Because Linux assumes sound data is lined 62 + * up as R->L->R->L->..., the data R/L will be opposite. 63 + * 64 + * The only solution in this case is start CLK/SYNC *after* MSIOF settings, but it depends when and 65 + * how Codec driver start it. 66 + */ 67 + 68 + /* 69 + * [NOTE-FSERR] 70 + * 71 + * We can't remove all FSERR. 72 + * 73 + * Renesas have tried to minimize the occurrence of FSERR errors as much as possible, but 74 + * unfortunately we cannot remove them completely, because MSIOF might setup its register during 75 + * CLK/SYNC are inputed. It can be happen because MSIOF is working as Clock/Frame Consumer. 76 + */ 77 + 27 78 #include <linux/module.h> 28 79 #include <linux/of.h> 29 80 #include <linux/of_dma.h> 30 81 #include <linux/of_graph.h> 31 82 #include <linux/platform_device.h> 32 83 #include <linux/pm_runtime.h> 84 + #include <linux/reset.h> 33 85 #include <linux/spi/sh_msiof.h> 34 86 #include <sound/dmaengine_pcm.h> 35 87 #include <sound/soc.h> ··· 112 60 struct msiof_priv { 113 61 struct device *dev; 114 62 struct snd_pcm_substream *substream[SNDRV_PCM_STREAM_LAST + 1]; 63 + struct reset_control *reset; 115 64 spinlock_t lock; 116 65 void __iomem *base; 117 66 resource_size_t phy_addr; 67 + 68 + int count; 118 69 119 70 /* for error */ 120 71 int err_syc[SNDRV_PCM_STREAM_LAST + 1]; ··· 176 121 177 122 /* 178 123 * see 179 - * [NOTE] on top of this driver 124 + * [NOTE-CLOCK-MODE] on top of this driver 180 125 */ 181 126 /* 182 127 * see ··· 186 131 * RX: Fig 109.15 187 132 */ 188 133 189 - /* reset errors */ 190 - priv->err_syc[substream->stream] = 134 + /* 135 + * Use reset_control_xx() instead of TXRST/RXRST. 136 + * see 137 + * [NOTE-RESET] 138 + */ 139 + if (!priv->count) 140 + reset_control_deassert(priv->reset); 141 + 142 + priv->count++; 143 + 144 + /* 145 + * Reset errors. ignore 1st FSERR 146 + * 147 + * see 148 + * [NOTE-FSERR] 149 + */ 150 + priv->err_syc[substream->stream] = -1; 191 151 priv->err_ovf[substream->stream] = 192 152 priv->err_udf[substream->stream] = 0; 193 153 194 154 /* Start DMAC */ 195 155 snd_dmaengine_pcm_trigger(substream, cmd); 196 156 157 + /* 158 + * setup both direction (Playback/Capture) in the same time. 159 + * see 160 + * above [NOTE-BOTH-SETTING] 161 + */ 162 + 197 163 /* SITMDRx */ 198 - if (is_play) { 199 - val = SITMDR1_PCON | 200 - FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_LR) | 201 - SIMDR1_SYNCAC | SIMDR1_XXSTP; 202 - if (msiof_flag_has(priv, MSIOF_FLAGS_NEED_DELAY)) 203 - val |= FIELD_PREP(SIMDR1_DTDL, 1); 164 + val = SITMDR1_PCON | SIMDR1_SYNCAC | SIMDR1_XXSTP | 165 + FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_LR); 166 + if (msiof_flag_has(priv, MSIOF_FLAGS_NEED_DELAY)) 167 + val |= FIELD_PREP(SIMDR1_DTDL, 1); 204 168 205 - msiof_write(priv, SITMDR1, val); 169 + msiof_write(priv, SITMDR1, val); 206 170 207 - val = FIELD_PREP(SIMDR2_BITLEN1, width - 1); 208 - msiof_write(priv, SITMDR2, val | FIELD_PREP(SIMDR2_GRP, 1)); 209 - msiof_write(priv, SITMDR3, val); 171 + val = FIELD_PREP(SIMDR2_BITLEN1, width - 1); 172 + msiof_write(priv, SITMDR2, val | FIELD_PREP(SIMDR2_GRP, 1)); 173 + msiof_write(priv, SITMDR3, val); 210 174 211 - } 212 175 /* SIRMDRx */ 213 - else { 214 - val = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_LR) | 215 - SIMDR1_SYNCAC; 216 - if (msiof_flag_has(priv, MSIOF_FLAGS_NEED_DELAY)) 217 - val |= FIELD_PREP(SIMDR1_DTDL, 1); 176 + val = SIMDR1_SYNCAC | 177 + FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_LR); 178 + if (msiof_flag_has(priv, MSIOF_FLAGS_NEED_DELAY)) 179 + val |= FIELD_PREP(SIMDR1_DTDL, 1); 218 180 219 - msiof_write(priv, SIRMDR1, val); 181 + msiof_write(priv, SIRMDR1, val); 220 182 221 - val = FIELD_PREP(SIMDR2_BITLEN1, width - 1); 222 - msiof_write(priv, SIRMDR2, val | FIELD_PREP(SIMDR2_GRP, 1)); 223 - msiof_write(priv, SIRMDR3, val); 224 - } 183 + val = FIELD_PREP(SIMDR2_BITLEN1, width - 1); 184 + msiof_write(priv, SIRMDR2, val | FIELD_PREP(SIMDR2_GRP, 1)); 185 + msiof_write(priv, SIRMDR3, val); 186 + 187 + /* SIFCTR */ 188 + msiof_write(priv, SIFCTR, 189 + FIELD_PREP(SIFCTR_TFWM, SIFCTR_TFWM_1) | 190 + FIELD_PREP(SIFCTR_RFWM, SIFCTR_RFWM_1)); 225 191 226 192 /* SIIER */ 227 193 if (is_play) ··· 259 183 msiof_update(priv, SISTR, val, val); 260 184 261 185 /* SICTR */ 186 + val = SICTR_TEDG | SICTR_REDG; 262 187 if (is_play) 263 - val = SICTR_TXE | SICTR_TEDG; 188 + val |= SICTR_TXE; 264 189 else 265 - val = SICTR_RXE | SICTR_REDG; 190 + val |= SICTR_RXE; 266 191 msiof_update_and_wait(priv, SICTR, val, val, val); 267 192 268 193 return 0; ··· 284 207 val = SIIER_RDREQE | SIIER_RDMAE | SISTR_ERR_RX; 285 208 msiof_update(priv, SIIER, val, 0); 286 209 287 - /* Stop DMAC */ 288 - snd_dmaengine_pcm_trigger(substream, cmd); 289 - 290 210 /* SICTR */ 291 211 if (is_play) 292 212 val = SICTR_TXE; 293 213 else 294 214 val = SICTR_RXE; 295 215 msiof_update_and_wait(priv, SICTR, val, 0, 0); 216 + 217 + /* Stop DMAC */ 218 + snd_dmaengine_pcm_trigger(substream, cmd); 219 + 220 + /* 221 + * Ignore 1st FSERR 222 + * 223 + * see 224 + * [NOTE-FSERR] 225 + */ 226 + if (priv->err_syc[substream->stream] < 0) 227 + priv->err_syc[substream->stream] = 0; 296 228 297 229 /* indicate error status if exist */ 298 230 if (priv->err_syc[substream->stream] || ··· 312 226 priv->err_syc[substream->stream], 313 227 priv->err_ovf[substream->stream], 314 228 priv->err_udf[substream->stream]); 229 + 230 + priv->count--; 231 + 232 + if (!priv->count) 233 + reset_control_assert(priv->reset); 315 234 316 235 return 0; 317 236 } ··· 393 302 .channels_max = 2, 394 303 }, 395 304 .ops = &msiof_dai_ops, 305 + .symmetric_rate = 1, 306 + .symmetric_channels = 1, 307 + .symmetric_sample_bits = 1, 396 308 }; 397 309 398 310 static struct snd_pcm_hardware msiof_pcm_hardware = { ··· 584 490 if (IS_ERR(priv->base)) 585 491 return PTR_ERR(priv->base); 586 492 493 + priv->reset = devm_reset_control_get_exclusive(dev, NULL); 494 + if (IS_ERR(priv->reset)) 495 + return PTR_ERR(priv->reset); 496 + 497 + reset_control_assert(priv->reset); 498 + 587 499 ret = devm_request_irq(dev, irq, msiof_interrupt, 0, dev_name(dev), priv); 588 500 if (ret) 589 501 return ret; 590 502 591 503 priv->dev = dev; 592 504 priv->phy_addr = res->start; 505 + priv->count = 0; 593 506 594 507 spin_lock_init(&priv->lock); 595 508 platform_set_drvdata(pdev, priv);
+2 -1
sound/soc/sdw_utils/Makefile
··· 6 6 soc_sdw_bridge_cs35l56.o \ 7 7 soc_sdw_cs42l42.o soc_sdw_cs42l43.o \ 8 8 soc_sdw_cs_amp.o \ 9 - soc_sdw_maxim.o 9 + soc_sdw_maxim.o \ 10 + soc_sdw_ti_amp.o 10 11 obj-$(CONFIG_SND_SOC_SDW_UTILS) += snd-soc-sdw-utils.o
+92
sound/soc/sdw_utils/soc_sdw_ti_amp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + // Copyright (c) 2025 Texas Instruments Inc. 3 + 4 + /* 5 + * soc_sdw_ti_amp - Helpers to handle TI's soundwire based codecs 6 + */ 7 + 8 + #include <linux/device.h> 9 + #include <linux/errno.h> 10 + #include <sound/soc.h> 11 + #include <sound/soc-acpi.h> 12 + #include <sound/soc-dai.h> 13 + #include <sound/soc_sdw_utils.h> 14 + 15 + #define TIAMP_SPK_VOLUME_0DB 200 16 + 17 + int asoc_sdw_ti_amp_initial_settings(struct snd_soc_card *card, 18 + const char *name_prefix) 19 + { 20 + char *volume_ctl_name; 21 + int ret; 22 + 23 + volume_ctl_name = kasprintf(GFP_KERNEL, "%s Speaker Volume", 24 + name_prefix); 25 + if (!volume_ctl_name) 26 + return -ENOMEM; 27 + 28 + ret = snd_soc_limit_volume(card, volume_ctl_name, 29 + TIAMP_SPK_VOLUME_0DB); 30 + if (ret) 31 + dev_err(card->dev, 32 + "%s update failed %d\n", 33 + volume_ctl_name, ret); 34 + 35 + kfree(volume_ctl_name); 36 + return 0; 37 + } 38 + EXPORT_SYMBOL_NS(asoc_sdw_ti_amp_initial_settings, "SND_SOC_SDW_UTILS"); 39 + 40 + int asoc_sdw_ti_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, 41 + struct snd_soc_dai *dai) 42 + { 43 + struct snd_soc_card *card = rtd->card; 44 + char widget_name[16]; 45 + char speaker[16]; 46 + struct snd_soc_dapm_route route = {speaker, NULL, widget_name}; 47 + struct snd_soc_dai *codec_dai; 48 + const char *prefix; 49 + int i, ret = 0; 50 + 51 + for_each_rtd_codec_dais(rtd, i, codec_dai) { 52 + if (!strstr(codec_dai->name, "tas2783")) 53 + continue; 54 + 55 + prefix = codec_dai->component->name_prefix; 56 + if (!strncmp(prefix, "tas2783-1", strlen("tas2783-1"))) { 57 + strscpy(speaker, "Left Spk", sizeof(speaker)); 58 + } else if (!strncmp(prefix, "tas2783-2", strlen("tas2783-2"))) { 59 + strscpy(speaker, "Right Spk", sizeof(speaker)); 60 + } else { 61 + ret = -EINVAL; 62 + dev_err(card->dev, "unhandled prefix %s", prefix); 63 + break; 64 + } 65 + 66 + snprintf(widget_name, sizeof(widget_name), "%s SPK", prefix); 67 + ret = asoc_sdw_ti_amp_initial_settings(card, prefix); 68 + if (ret) 69 + return ret; 70 + 71 + ret = snd_soc_dapm_add_routes(&card->dapm, &route, 1); 72 + if (ret) 73 + return ret; 74 + } 75 + 76 + return ret; 77 + } 78 + EXPORT_SYMBOL_NS(asoc_sdw_ti_spk_rtd_init, "SND_SOC_SDW_UTILS"); 79 + 80 + int asoc_sdw_ti_amp_init(struct snd_soc_card *card, 81 + struct snd_soc_dai_link *dai_links, 82 + struct asoc_sdw_codec_info *info, 83 + bool playback) 84 + { 85 + if (!playback) 86 + return 0; 87 + 88 + info->amp_num++; 89 + 90 + return 0; 91 + } 92 + EXPORT_SYMBOL_NS(asoc_sdw_ti_amp_init, "SND_SOC_SDW_UTILS");
+28 -10
sound/soc/sdw_utils/soc_sdw_utils.c
··· 35 35 SOC_DAPM_PIN_SWITCH("Speaker"), 36 36 }; 37 37 38 - static const struct snd_soc_dapm_widget maxim_widgets[] = { 38 + static const struct snd_soc_dapm_widget lr_spk_widgets[] = { 39 39 SND_SOC_DAPM_SPK("Left Spk", NULL), 40 40 SND_SOC_DAPM_SPK("Right Spk", NULL), 41 41 }; 42 42 43 - static const struct snd_kcontrol_new maxim_controls[] = { 43 + static const struct snd_kcontrol_new lr_spk_controls[] = { 44 44 SOC_DAPM_PIN_SWITCH("Left Spk"), 45 45 SOC_DAPM_PIN_SWITCH("Right Spk"), 46 46 }; ··· 58 58 }; 59 59 60 60 struct asoc_sdw_codec_info codec_info_list[] = { 61 + { 62 + .part_id = 0x0000, /* TAS2783A */ 63 + .dais = { 64 + { 65 + .direction = {true, true}, 66 + .dai_name = "tas2783-codec", 67 + .dai_type = SOC_SDW_DAI_TYPE_AMP, 68 + .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_AMP_IN_DAI_ID}, 69 + .init = asoc_sdw_ti_amp_init, 70 + .rtd_init = asoc_sdw_ti_spk_rtd_init, 71 + .controls = lr_spk_controls, 72 + .num_controls = ARRAY_SIZE(lr_spk_controls), 73 + .widgets = lr_spk_widgets, 74 + .num_widgets = ARRAY_SIZE(lr_spk_widgets), 75 + }, 76 + }, 77 + .dai_num = 1, 78 + }, 61 79 { 62 80 .part_id = 0x700, 63 81 .dais = { ··· 468 450 .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_AMP_IN_DAI_ID}, 469 451 .init = asoc_sdw_maxim_init, 470 452 .rtd_init = asoc_sdw_maxim_spk_rtd_init, 471 - .controls = maxim_controls, 472 - .num_controls = ARRAY_SIZE(maxim_controls), 473 - .widgets = maxim_widgets, 474 - .num_widgets = ARRAY_SIZE(maxim_widgets), 453 + .controls = lr_spk_controls, 454 + .num_controls = ARRAY_SIZE(lr_spk_controls), 455 + .widgets = lr_spk_widgets, 456 + .num_widgets = ARRAY_SIZE(lr_spk_widgets), 475 457 }, 476 458 }, 477 459 .dai_num = 1, ··· 487 469 .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID}, 488 470 .init = asoc_sdw_maxim_init, 489 471 .rtd_init = asoc_sdw_maxim_spk_rtd_init, 490 - .controls = maxim_controls, 491 - .num_controls = ARRAY_SIZE(maxim_controls), 492 - .widgets = maxim_widgets, 493 - .num_widgets = ARRAY_SIZE(maxim_widgets), 472 + .controls = lr_spk_controls, 473 + .num_controls = ARRAY_SIZE(lr_spk_controls), 474 + .widgets = lr_spk_widgets, 475 + .num_widgets = ARRAY_SIZE(lr_spk_widgets), 494 476 }, 495 477 }, 496 478 .dai_num = 1,
+1
sound/soc/soc-ops.c
··· 118 118 if (mc->sign_bit) 119 119 val = sign_extend32(val, mc->sign_bit); 120 120 121 + val = clamp(val, mc->min, mc->max); 121 122 val -= mc->min; 122 123 123 124 if (mc->invert)
+8
sound/soc/stm/stm32_sai_sub.c
··· 672 672 struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); 673 673 int ret; 674 674 675 + /* 676 + * The mclk rate is determined at runtime from the audio stream rate. 677 + * Skip calls to the set_sysclk callback that are not relevant during the 678 + * initialization phase. 679 + */ 680 + if (!snd_soc_card_is_instantiated(cpu_dai->component->card)) 681 + return 0; 682 + 675 683 if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) { 676 684 ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, 677 685 SAI_XCR1_NODIV,
+12
tools/arch/loongarch/include/asm/inst.h
··· 51 51 bgeu_op = 0x1b, 52 52 }; 53 53 54 + enum reg3_op { 55 + amswapw_op = 0x70c0, 56 + }; 57 + 54 58 struct reg0i15_format { 55 59 unsigned int immediate : 15; 56 60 unsigned int opcode : 17; ··· 100 96 unsigned int opcode : 6; 101 97 }; 102 98 99 + struct reg3_format { 100 + unsigned int rd : 5; 101 + unsigned int rj : 5; 102 + unsigned int rk : 5; 103 + unsigned int opcode : 17; 104 + }; 105 + 103 106 union loongarch_instruction { 104 107 unsigned int word; 105 108 struct reg0i15_format reg0i15_format; ··· 116 105 struct reg2i12_format reg2i12_format; 117 106 struct reg2i14_format reg2i14_format; 118 107 struct reg2i16_format reg2i16_format; 108 + struct reg3_format reg3_format; 119 109 }; 120 110 121 111 #define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
+3
tools/lib/subcmd/help.c
··· 75 75 size_t ci, cj, ei; 76 76 int cmp; 77 77 78 + if (!excludes->cnt) 79 + return; 80 + 78 81 ci = cj = ei = 0; 79 82 while (ci < cmds->cnt && ei < excludes->cnt) { 80 83 cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name);
+30 -3
tools/objtool/arch/loongarch/decode.c
··· 278 278 return true; 279 279 } 280 280 281 + static bool decode_insn_reg3_fomat(union loongarch_instruction inst, 282 + struct instruction *insn) 283 + { 284 + switch (inst.reg3_format.opcode) { 285 + case amswapw_op: 286 + if (inst.reg3_format.rd == LOONGARCH_GPR_ZERO && 287 + inst.reg3_format.rk == LOONGARCH_GPR_RA && 288 + inst.reg3_format.rj == LOONGARCH_GPR_ZERO) { 289 + /* amswap.w $zero, $ra, $zero */ 290 + insn->type = INSN_BUG; 291 + } 292 + break; 293 + default: 294 + return false; 295 + } 296 + 297 + return true; 298 + } 299 + 281 300 int arch_decode_instruction(struct objtool_file *file, const struct section *sec, 282 301 unsigned long offset, unsigned int maxlen, 283 302 struct instruction *insn) ··· 328 309 return 0; 329 310 if (decode_insn_reg2i16_fomat(inst, insn)) 330 311 return 0; 312 + if (decode_insn_reg3_fomat(inst, insn)) 313 + return 0; 331 314 332 - if (inst.word == 0) 315 + if (inst.word == 0) { 316 + /* andi $zero, $zero, 0x0 */ 333 317 insn->type = INSN_NOP; 334 - else if (inst.reg0i15_format.opcode == break_op) { 335 - /* break */ 318 + } else if (inst.reg0i15_format.opcode == break_op && 319 + inst.reg0i15_format.immediate == 0x0) { 320 + /* break 0x0 */ 321 + insn->type = INSN_TRAP; 322 + } else if (inst.reg0i15_format.opcode == break_op && 323 + inst.reg0i15_format.immediate == 0x1) { 324 + /* break 0x1 */ 336 325 insn->type = INSN_BUG; 337 326 } else if (inst.reg2_format.opcode == ertn_op) { 338 327 /* ertn */
+6 -1
tools/perf/builtin-lock.c
··· 2009 2009 .owner = show_lock_owner, 2010 2010 .cgroups = RB_ROOT, 2011 2011 }; 2012 + struct perf_env host_env; 2012 2013 2013 2014 lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table)); 2014 2015 if (!lockhash_table) ··· 2025 2024 eops.mmap = perf_event__process_mmap; 2026 2025 eops.tracing_data = perf_event__process_tracing_data; 2027 2026 2028 - session = perf_session__new(use_bpf ? NULL : &data, &eops); 2027 + perf_env__init(&host_env); 2028 + session = __perf_session__new(use_bpf ? NULL : &data, &eops, 2029 + /*trace_event_repipe=*/false, &host_env); 2030 + 2029 2031 if (IS_ERR(session)) { 2030 2032 pr_err("Initializing perf session failed\n"); 2031 2033 err = PTR_ERR(session); ··· 2146 2142 evlist__delete(con.evlist); 2147 2143 lock_contention_finish(&con); 2148 2144 perf_session__delete(session); 2145 + perf_env__exit(&host_env); 2149 2146 zfree(&lockhash_table); 2150 2147 return err; 2151 2148 }
+5 -4
tools/perf/util/maps.c
··· 477 477 } 478 478 /* Insert the value at the end. */ 479 479 maps_by_address[nr_maps] = map__get(new); 480 + map__set_kmap_maps(new, maps); 480 481 if (maps_by_name) 481 482 maps_by_name[nr_maps] = map__get(new); 482 483 ··· 502 501 } 503 502 if (map__end(new) < map__start(new)) 504 503 RC_CHK_ACCESS(maps)->ends_broken = true; 505 - 506 - map__set_kmap_maps(new, maps); 507 504 508 505 return 0; 509 506 } ··· 890 891 if (before) { 891 892 map__put(maps_by_address[i]); 892 893 maps_by_address[i] = before; 894 + map__set_kmap_maps(before, maps); 893 895 894 896 if (maps_by_name) { 895 897 map__put(maps_by_name[ni]); ··· 918 918 */ 919 919 map__put(maps_by_address[i]); 920 920 maps_by_address[i] = map__get(new); 921 + map__set_kmap_maps(new, maps); 921 922 922 923 if (maps_by_name) { 923 924 map__put(maps_by_name[ni]); ··· 943 942 */ 944 943 map__put(maps_by_address[i]); 945 944 maps_by_address[i] = map__get(new); 945 + map__set_kmap_maps(new, maps); 946 946 947 947 if (maps_by_name) { 948 948 map__put(maps_by_name[ni]); 949 949 maps_by_name[ni] = map__get(new); 950 950 } 951 - 952 - map__set_kmap_maps(new, maps); 953 951 954 952 check_invariants(maps); 955 953 return err; ··· 1019 1019 err = unwind__prepare_access(dest, new, NULL); 1020 1020 if (!err) { 1021 1021 dest_maps_by_address[i] = new; 1022 + map__set_kmap_maps(new, dest); 1022 1023 if (dest_maps_by_name) 1023 1024 dest_maps_by_name[i] = map__get(new); 1024 1025 RC_CHK_ACCESS(dest)->nr_maps = i + 1;
+195 -2
tools/testing/selftests/drivers/net/bonding/bond_options.sh
··· 7 7 prio 8 8 arp_validate 9 9 num_grat_arp 10 + fail_over_mac 11 + vlan_over_bond 10 12 " 11 13 12 14 lib_dir=$(dirname "$0") ··· 354 352 355 353 exp_num=$(echo "${param}" | cut -f6 -d ' ') 356 354 active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") 357 - slowwait_for_counter $((exp_num + 5)) $exp_num \ 358 - tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}" 355 + slowwait_for_counter $((exp_num + 5)) $exp_num tc_rule_handle_stats_get \ 356 + "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}" &> /dev/null 359 357 360 358 # check result 361 359 real_num=$(tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}") ··· 376 374 garp_test "mode active-backup miimon 10 num_grat_arp $val peer_notify_delay 100" 377 375 log_test "num_grat_arp" "active-backup miimon num_grat_arp $val" 378 376 done 377 + } 378 + 379 + check_all_mac_same() 380 + { 381 + RET=0 382 + # all slaves should have same mac address (with the first port's mac) 383 + local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]') 384 + local eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]') 385 + local eth1_mac=$(ip -n "$s_ns" -j link show eth1 | jq -r '.[]["address"]') 386 + local eth2_mac=$(ip -n "$s_ns" -j link show eth2 | jq -r '.[]["address"]') 387 + if [ "$bond_mac" != "${mac[0]}" ] || [ "$eth0_mac" != "$bond_mac" ] || \ 388 + [ "$eth1_mac" != "$bond_mac" ] || [ "$eth2_mac" != "$bond_mac" ]; then 389 + RET=1 390 + fi 391 + } 392 + 393 + check_bond_mac_same_with_first() 394 + { 395 + RET=0 396 + # bond mac address should be same with the first added slave 397 + local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]') 398 + if [ "$bond_mac" != "${mac[0]}" ]; then 399 + RET=1 400 + fi 401 + } 402 + 403 + check_bond_mac_same_with_active() 404 + { 405 + RET=0 406 + # bond mac address should be same with active slave 407 + local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]') 408 + local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") 409 + local active_slave_mac=$(ip -n "$s_ns" -j link show "$active_slave" | jq -r '.[]["address"]') 410 + if [ "$bond_mac" != "$active_slave_mac" ]; then 411 + RET=1 412 + fi 413 + } 414 + 415 + check_backup_slave_mac_not_change() 416 + { 417 + RET=0 418 + # backup slave's mac address is not changed 419 + if ip -n "$s_ns" -d -j link show type bond_slave | jq -e '.[] 420 + | select(.linkinfo.info_slave_data.state=="BACKUP") 421 + | select(.address != .linkinfo.info_slave_data.perm_hwaddr)' &> /dev/null; then 422 + RET=1 423 + fi 424 + } 425 + 426 + check_backup_slave_mac_inherit() 427 + { 428 + local backup_mac 429 + RET=0 430 + 431 + # backup slaves should use mac[1] or mac[2] 432 + local backup_macs=$(ip -n "$s_ns" -d -j link show type bond_slave | \ 433 + jq -r '.[] | select(.linkinfo.info_slave_data.state=="BACKUP") | .address') 434 + for backup_mac in $backup_macs; do 435 + if [ "$backup_mac" != "${mac[1]}" ] && [ "$backup_mac" != "${mac[2]}" ]; then 436 + RET=1 437 + fi 438 + done 439 + } 440 + 441 + check_first_slave_random_mac() 442 + { 443 + RET=0 444 + # remove the first added slave and added it back 445 + ip -n "$s_ns" link set eth0 nomaster 446 + ip -n "$s_ns" link set eth0 master bond0 447 + 448 + # the first slave should use random mac address 449 + eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]') 450 + [ "$eth0_mac" = "${mac[0]}" ] && RET=1 451 + log_test "bond fail_over_mac follow" "random first slave mac" 452 + 453 + # remove the first slave, the permanent MAC address should be restored back 454 + ip -n "$s_ns" link set eth0 nomaster 455 + eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]') 456 + [ "$eth0_mac" != "${mac[0]}" ] && RET=1 457 + } 458 + 459 + do_active_backup_failover() 460 + { 461 + local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") 462 + ip -n ${s_ns} link set ${active_slave} down 463 + slowwait 2 active_slave_changed $active_slave 464 + ip -n ${s_ns} link set ${active_slave} up 465 + } 466 + 467 + fail_over_mac() 468 + { 469 + # Bring down the first interface on the switch to force the bond to 470 + # select another active interface instead of the first one that joined. 471 + ip -n "$g_ns" link set s0 down 472 + 473 + # fail_over_mac none 474 + bond_reset "mode active-backup miimon 100 fail_over_mac 0" 475 + check_all_mac_same 476 + log_test "fail_over_mac 0" "all slaves have same mac" 477 + do_active_backup_failover 478 + check_all_mac_same 479 + log_test "fail_over_mac 0" "failover: all slaves have same mac" 480 + 481 + # fail_over_mac active 482 + bond_reset "mode active-backup miimon 100 fail_over_mac 1" 483 + check_bond_mac_same_with_active 484 + log_test "fail_over_mac 1" "bond mac is same with active slave mac" 485 + check_backup_slave_mac_not_change 486 + log_test "fail_over_mac 1" "backup slave mac is not changed" 487 + do_active_backup_failover 488 + check_bond_mac_same_with_active 489 + log_test "fail_over_mac 1" "failover: bond mac is same with active slave mac" 490 + check_backup_slave_mac_not_change 491 + log_test "fail_over_mac 1" "failover: backup slave mac is not changed" 492 + 493 + # fail_over_mac follow 494 + bond_reset "mode active-backup miimon 100 fail_over_mac 2" 495 + check_bond_mac_same_with_first 496 + log_test "fail_over_mac 2" "bond mac is same with first slave mac" 497 + check_bond_mac_same_with_active 498 + log_test "fail_over_mac 2" "bond mac is same with active slave mac" 499 + check_backup_slave_mac_inherit 500 + log_test "fail_over_mac 2" "backup slave mac inherit" 501 + do_active_backup_failover 502 + check_bond_mac_same_with_first 503 + log_test "fail_over_mac 2" "failover: bond mac is same with first slave mac" 504 + check_bond_mac_same_with_active 505 + log_test "fail_over_mac 2" "failover: bond mac is same with active slave mac" 506 + check_backup_slave_mac_inherit 507 + log_test "fail_over_mac 2" "failover: backup slave mac inherit" 508 + check_first_slave_random_mac 509 + log_test "fail_over_mac 2" "first slave mac random" 510 + } 511 + 512 + vlan_over_bond_arp() 513 + { 514 + local mode="$1" 515 + RET=0 516 + 517 + bond_reset "mode $mode arp_interval 100 arp_ip_target 192.0.3.10" 518 + ip -n "${s_ns}" link add bond0.3 link bond0 type vlan id 3 519 + ip -n "${s_ns}" link set bond0.3 up 520 + ip -n "${s_ns}" addr add 192.0.3.1/24 dev bond0.3 521 + ip -n "${s_ns}" addr add 2001:db8::3:1/64 dev bond0.3 522 + 523 + slowwait_for_counter 5 5 tc_rule_handle_stats_get \ 524 + "dev eth0.3 ingress" 101 ".packets" "-n ${c_ns}" &> /dev/null || RET=1 525 + log_test "vlan over bond arp" "$mode" 526 + } 527 + 528 + vlan_over_bond_ns() 529 + { 530 + local mode="$1" 531 + RET=0 532 + 533 + if skip_ns; then 534 + log_test_skip "vlan_over_bond ns" "$mode" 535 + return 0 536 + fi 537 + 538 + bond_reset "mode $mode arp_interval 100 ns_ip6_target 2001:db8::3:10" 539 + ip -n "${s_ns}" link add bond0.3 link bond0 type vlan id 3 540 + ip -n "${s_ns}" link set bond0.3 up 541 + ip -n "${s_ns}" addr add 192.0.3.1/24 dev bond0.3 542 + ip -n "${s_ns}" addr add 2001:db8::3:1/64 dev bond0.3 543 + 544 + slowwait_for_counter 5 5 tc_rule_handle_stats_get \ 545 + "dev eth0.3 ingress" 102 ".packets" "-n ${c_ns}" &> /dev/null || RET=1 546 + log_test "vlan over bond ns" "$mode" 547 + } 548 + 549 + vlan_over_bond() 550 + { 551 + # add vlan 3 for client 552 + ip -n "${c_ns}" link add eth0.3 link eth0 type vlan id 3 553 + ip -n "${c_ns}" link set eth0.3 up 554 + ip -n "${c_ns}" addr add 192.0.3.10/24 dev eth0.3 555 + ip -n "${c_ns}" addr add 2001:db8::3:10/64 dev eth0.3 556 + 557 + # Add tc rule to check the vlan pkts 558 + tc -n "${c_ns}" qdisc add dev eth0.3 clsact 559 + tc -n "${c_ns}" filter add dev eth0.3 ingress protocol arp \ 560 + handle 101 flower skip_hw arp_op request \ 561 + arp_sip 192.0.3.1 arp_tip 192.0.3.10 action pass 562 + tc -n "${c_ns}" filter add dev eth0.3 ingress protocol ipv6 \ 563 + handle 102 flower skip_hw ip_proto icmpv6 \ 564 + type 135 src_ip 2001:db8::3:1 action pass 565 + 566 + vlan_over_bond_arp "active-backup" 567 + vlan_over_bond_ns "active-backup" 379 568 } 380 569 381 570 trap cleanup EXIT
+3
tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
··· 39 39 s_ip6="2001:db8::1" 40 40 c_ip6="2001:db8::10" 41 41 g_ip6="2001:db8::254" 42 + mac[0]="00:0a:0b:0c:0d:01" 43 + mac[1]="00:0a:0b:0c:0d:02" 42 44 43 45 gateway_create() 44 46 { ··· 64 62 65 63 for i in $(seq 0 1); do 66 64 ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} 65 + ip -n "${s_ns}" link set "eth${i}" addr "${mac[$i]}" 67 66 68 67 ip -n ${g_ns} link set s${i} up 69 68 ip -n ${g_ns} link set s${i} master br0
+2
tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
··· 26 26 # +-------------------------------------+ 27 27 28 28 source bond_topo_2d1c.sh 29 + mac[2]="00:0a:0b:0c:0d:03" 29 30 30 31 setup_prepare() 31 32 { ··· 37 36 # Add the extra device as we use 3 down links for bond0 38 37 local i=2 39 38 ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} 39 + ip -n "${s_ns}" link set "eth${i}" addr "${mac[$i]}" 40 40 ip -n ${g_ns} link set s${i} up 41 41 ip -n ${g_ns} link set s${i} master br0 42 42 ip -n ${s_ns} link set eth${i} master bond0
+1
tools/testing/selftests/drivers/net/bonding/config
··· 10 10 CONFIG_NET_SCH_INGRESS=y 11 11 CONFIG_NLMON=y 12 12 CONFIG_VETH=y 13 + CONFIG_VLAN_8021Q=m
+6 -5
tools/testing/selftests/net/mptcp/mptcp_connect.c
··· 1093 1093 struct pollfd polls; 1094 1094 socklen_t salen; 1095 1095 int remotesock; 1096 + int err = 0; 1096 1097 int fd = 0; 1097 1098 1098 1099 again: ··· 1126 1125 SOCK_TEST_TCPULP(remotesock, 0); 1127 1126 1128 1127 memset(&winfo, 0, sizeof(winfo)); 1129 - copyfd_io(fd, remotesock, 1, true, &winfo); 1128 + err = copyfd_io(fd, remotesock, 1, true, &winfo); 1130 1129 } else { 1131 1130 perror("accept"); 1132 1131 return 1; ··· 1135 1134 if (cfg_input) 1136 1135 close(fd); 1137 1136 1138 - if (--cfg_repeat > 0) 1137 + if (!err && --cfg_repeat > 0) 1139 1138 goto again; 1140 1139 1141 - return 0; 1140 + return err; 1142 1141 } 1143 1142 1144 1143 static void init_rng(void) ··· 1248 1247 else 1249 1248 xerror("bad family"); 1250 1249 1251 - strcpy(cmd, "ss -M | grep -q "); 1250 + strcpy(cmd, "ss -Mnt | grep -q "); 1252 1251 cmdlen = strlen(cmd); 1253 1252 if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen], 1254 1253 sizeof(cmd) - cmdlen)) ··· 1258 1257 1259 1258 /* 1260 1259 * wait until the pending data is completely flushed and all 1261 - * the MPTCP sockets reached the closed status. 1260 + * the sockets reached the closed status. 1262 1261 * disconnect will bypass/ignore/drop any pending data. 1263 1262 */ 1264 1263 for (i = 0; ; i += msec_sleep) {
+5 -1
tools/testing/selftests/net/mptcp/mptcp_connect.sh
··· 211 211 done 212 212 fi 213 213 214 + if $capture; then 215 + rndh="${ns1:4}" 216 + mptcp_lib_pr_info "Packet capture files will have this prefix: ${rndh}-" 217 + fi 218 + 214 219 set_ethtool_flags() { 215 220 local ns="$1" 216 221 local dev="$2" ··· 366 361 367 362 if $capture; then 368 363 local capuser 369 - local rndh="${connector_ns:4}" 370 364 if [ -z $SUDO_USER ] ; then 371 365 capuser="" 372 366 else
+1 -1
tools/testing/selftests/net/mptcp/mptcp_lib.sh
··· 384 384 mptcp_lib_print_file_err() { 385 385 ls -l "${1}" 1>&2 386 386 echo "Trailing bytes are: " 387 - tail -c 27 "${1}" 387 + tail -c 32 "${1}" | od -x | head -n2 388 388 } 389 389 390 390 # $1: input file ; $2: output file ; $3: what kind of file
+10 -6
tools/testing/selftests/net/mptcp/mptcp_sockopt.c
··· 667 667 668 668 do_getsockopts(&s, fd, ret, ret2); 669 669 if (s.mptcpi_rcv_delta != (uint64_t)ret + 1) 670 - xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret); 670 + xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64, 671 + s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1)); 671 672 672 673 /* be nice when running on top of older kernel */ 673 674 if (s.pkt_stats_avail) { 674 675 if (s.last_sample.mptcpi_bytes_sent != ret2) 675 - xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64, 676 + xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64 677 + ", diff %" PRId64, 676 678 s.last_sample.mptcpi_bytes_sent, ret2, 677 679 s.last_sample.mptcpi_bytes_sent - ret2); 678 680 if (s.last_sample.mptcpi_bytes_received != ret) 679 - xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64, 681 + xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64 682 + ", diff %" PRId64, 680 683 s.last_sample.mptcpi_bytes_received, ret, 681 684 s.last_sample.mptcpi_bytes_received - ret); 682 685 if (s.last_sample.mptcpi_bytes_acked != ret) 683 - xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64, 684 - s.last_sample.mptcpi_bytes_acked, ret2, 685 - s.last_sample.mptcpi_bytes_acked - ret2); 686 + xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64 687 + ", diff %" PRId64, 688 + s.last_sample.mptcpi_bytes_acked, ret, 689 + s.last_sample.mptcpi_bytes_acked - ret); 686 690 } 687 691 688 692 close(fd);
+7
tools/testing/selftests/net/mptcp/pm_nl_ctl.c
··· 188 188 fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs)); 189 189 else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE) 190 190 fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs)); 191 + else if (attrs->rta_type == MPTCP_ATTR_FLAGS) { 192 + __u16 flags = *(__u16 *)RTA_DATA(attrs); 193 + 194 + /* only print when present, easier */ 195 + if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0) 196 + fprintf(stderr, ",deny_join_id0:1"); 197 + } 191 198 192 199 attrs = RTA_NEXT(attrs, msg_len); 193 200 }
+11 -3
tools/testing/selftests/net/mptcp/userspace_pm.sh
··· 201 201 is_v6="v4" 202 202 fi 203 203 204 + # set this on the client side only: will not affect the rest 205 + ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0 206 + 204 207 :>"$client_evts" 205 208 :>"$server_evts" 206 209 ··· 226 223 local client_token 227 224 local client_port 228 225 local client_serverside 226 + local client_nojoin 229 227 local server_token 230 228 local server_serverside 229 + local server_nojoin 231 230 232 231 client_token=$(mptcp_lib_evts_get_info token "$client_evts") 233 232 client_port=$(mptcp_lib_evts_get_info sport "$client_evts") 234 233 client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts") 234 + client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts") 235 235 server_token=$(mptcp_lib_evts_get_info token "$server_evts") 236 236 server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts") 237 + server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts") 237 238 238 239 print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1" 239 - if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] && 240 - [ "$server_serverside" = 1 ] 240 + if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] && 241 + [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] && 242 + [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ] 241 243 then 242 244 test_pass 243 245 print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}" 244 246 else 245 - test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})" 247 + test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})" 246 248 mptcp_lib_result_print_all_tap 247 249 exit ${KSFT_FAIL} 248 250 fi
+81 -7
tools/testing/selftests/net/openvswitch/openvswitch.sh
··· 25 25 nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT 26 26 netlink_checks ovsnl: validate netlink attrs and settings 27 27 upcall_interfaces ovs: test the upcall interfaces 28 + tunnel_metadata ovs: test extraction of tunnel metadata 28 29 drop_reason drop: test drop reasons are emitted 29 30 psample psample: Sampling packets with psample" 30 31 ··· 114 113 } 115 114 116 115 ovs_add_if () { 117 - info "Adding IF to DP: br:$2 if:$3" 118 - if [ "$4" != "-u" ]; then 119 - ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if "$2" "$3" \ 120 - || return 1 116 + info "Adding IF to DP: br:$3 if:$4 ($2)" 117 + if [ "$5" != "-u" ]; then 118 + ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if \ 119 + -t "$2" "$3" "$4" || return 1 121 120 else 122 121 python3 $ovs_base/ovs-dpctl.py add-if \ 123 - -u "$2" "$3" >$ovs_dir/$3.out 2>$ovs_dir/$3.err & 122 + -u -t "$2" "$3" "$4" >$ovs_dir/$4.out 2>$ovs_dir/$4.err & 124 123 pid=$! 125 124 on_exit "ovs_sbx $1 kill -TERM $pid 2>/dev/null" 126 125 fi ··· 167 166 fi 168 167 169 168 if [ "$7" != "-u" ]; then 170 - ovs_add_if "$1" "$2" "$4" || return 1 169 + ovs_add_if "$1" "netdev" "$2" "$4" || return 1 171 170 else 172 - ovs_add_if "$1" "$2" "$4" -u || return 1 171 + ovs_add_if "$1" "netdev" "$2" "$4" -u || return 1 173 172 fi 174 173 175 174 if [ $TRACING -eq 1 ]; then ··· 754 753 >$ovs_dir/arping.stdout 2>$ovs_dir/arping.stderr 755 754 756 755 grep -E "MISS upcall\[0/yes\]: .*arp\(sip=172.31.110.1,tip=172.31.110.20,op=1,sha=" $ovs_dir/left0.out >/dev/null 2>&1 || return 1 756 + return 0 757 + } 758 + 759 + ovs_add_kernel_tunnel() { 760 + local sbxname=$1; shift 761 + local ns=$1; shift 762 + local tnl_type=$1; shift 763 + local name=$1; shift 764 + local addr=$1; shift 765 + 766 + info "setting up kernel ${tnl_type} tunnel ${name}" 767 + ovs_sbx "${sbxname}" ip -netns ${ns} link add dev ${name} type ${tnl_type} $* || return 1 768 + on_exit "ovs_sbx ${sbxname} ip -netns ${ns} link del ${name} >/dev/null 2>&1" 769 + ovs_sbx "${sbxname}" ip -netns ${ns} addr add dev ${name} ${addr} || return 1 770 + ovs_sbx "${sbxname}" ip -netns ${ns} link set dev ${name} mtu 1450 up || return 1 771 + } 772 + 773 + test_tunnel_metadata() { 774 + which arping >/dev/null 2>&1 || return $ksft_skip 775 + 776 + sbxname="test_tunnel_metadata" 777 + sbx_add "${sbxname}" || return 1 778 + 779 + info "setting up new DP" 780 + ovs_add_dp "${sbxname}" tdp0 -V 2:1 || return 1 781 + 782 + ovs_add_netns_and_veths "${sbxname}" tdp0 tns left0 l0 \ 783 + 172.31.110.1/24 || return 1 784 + 785 + info "removing veth interface from openvswitch and setting IP" 786 + ovs_del_if "${sbxname}" tdp0 left0 || return 1 787 + ovs_sbx "${sbxname}" ip addr add 172.31.110.2/24 dev left0 || return 1 788 + ovs_sbx "${sbxname}" ip link set left0 up || return 1 789 + 790 + info "setting up tunnel port in openvswitch" 791 + ovs_add_if "${sbxname}" "vxlan" tdp0 ovs-vxlan0 -u || return 1 792 + on_exit "ovs_sbx ${sbxname} ip link del ovs-vxlan0" 793 + ovs_wait ip link show ovs-vxlan0 &>/dev/null || return 1 794 + ovs_sbx "${sbxname}" ip link set ovs-vxlan0 up || return 1 795 + 796 + configs=$(echo ' 797 + 1 172.31.221.1/24 1155332 32 set udpcsum flags\(df\|csum\) 798 + 2 172.31.222.1/24 1234567 45 set noudpcsum flags\(df\) 799 + 3 172.31.223.1/24 1020304 23 unset udpcsum flags\(csum\) 800 + 4 172.31.224.1/24 1357986 15 unset noudpcsum' | sed '/^$/d') 801 + 802 + while read -r i addr id ttl df csum flags; do 803 + ovs_add_kernel_tunnel "${sbxname}" tns vxlan vxlan${i} ${addr} \ 804 + remote 172.31.110.2 id ${id} dstport 4789 \ 805 + ttl ${ttl} df ${df} ${csum} || return 1 806 + done <<< "${configs}" 807 + 808 + ovs_wait grep -q 'listening on upcall packet handler' \ 809 + ${ovs_dir}/ovs-vxlan0.out || return 1 810 + 811 + info "sending arping" 812 + for i in 1 2 3 4; do 813 + ovs_sbx "${sbxname}" ip netns exec tns \ 814 + arping -I vxlan${i} 172.31.22${i}.2 -c 1 \ 815 + >${ovs_dir}/arping.stdout 2>${ovs_dir}/arping.stderr 816 + done 817 + 818 + info "checking that received decapsulated packets carry correct metadata" 819 + while read -r i addr id ttl df csum flags; do 820 + arp_hdr="arp\\(sip=172.31.22${i}.1,tip=172.31.22${i}.2,op=1,sha=" 821 + addrs="src=172.31.110.1,dst=172.31.110.2" 822 + ports="tp_src=[0-9]*,tp_dst=4789" 823 + tnl_md="tunnel\\(tun_id=${id},${addrs},ttl=${ttl},${ports},${flags}\\)" 824 + 825 + ovs_sbx "${sbxname}" grep -qE "MISS upcall.*${tnl_md}.*${arp_hdr}" \ 826 + ${ovs_dir}/ovs-vxlan0.out || return 1 827 + done <<< "${configs}" 828 + 757 829 return 0 758 830 } 759 831
+26
tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-disconnect.pkt
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + `./defaults.sh 3 + ./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen=0x602 /proc/sys/net/ipv4/tcp_timestamps=0` 4 + 5 + 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3 6 + +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 7 + +0 bind(3, ..., ...) = 0 8 + +0 listen(3, 1) = 0 9 + 10 + +0 < S 0:10(10) win 32792 <mss 1460,nop,nop,sackOK> 11 + +0 > S. 0:0(0) ack 11 win 65535 <mss 1460,nop,nop,sackOK> 12 + 13 + // sk->sk_state is TCP_SYN_RECV 14 + +.1 accept(3, ..., ...) = 4 15 + 16 + // tcp_disconnect() sets sk->sk_state to TCP_CLOSE 17 + +0 connect(4, AF_UNSPEC, ...) = 0 18 + +0 > R. 1:1(0) ack 11 win 65535 19 + 20 + // connect() sets sk->sk_state to TCP_SYN_SENT 21 + +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 22 + +0 connect(4, ..., ...) = -1 EINPROGRESS (Operation is now in progress) 23 + +0 > S 0:0(0) win 65535 <mss 1460,nop,nop,sackOK,nop,wscale 8> 24 + 25 + // tp->fastopen_rsk must be NULL 26 + +1 > S 0:0(0) win 65535 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+16
tools/testing/selftests/net/tls.c
··· 2770 2770 } 2771 2771 } 2772 2772 2773 + /* Use OOB+large send to trigger copy mode due to memory pressure. 2774 + * OOB causes a short read. 2775 + */ 2776 + TEST_F(tls_err, oob_pressure) 2777 + { 2778 + char buf[1<<16]; 2779 + int i; 2780 + 2781 + memrnd(buf, sizeof(buf)); 2782 + 2783 + EXPECT_EQ(send(self->fd2, buf, 5, MSG_OOB), 5); 2784 + EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf)); 2785 + for (i = 0; i < 64; i++) 2786 + EXPECT_EQ(send(self->fd2, buf, 5, MSG_OOB), 5); 2787 + } 2788 + 2773 2789 TEST(non_established) { 2774 2790 struct tls12_crypto_info_aes_gcm_256 tls12; 2775 2791 struct sockaddr_in addr;