Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 4.2-rc4 into usb-next

We want the USB fixes that went into that release in this branch as
well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+2105 -1578
+3
Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
··· 35 35 36 36 NOTE: this only applies to the SMMU itself, not 37 37 masters connected upstream of the SMMU. 38 + 39 + - hisilicon,broken-prefetch-cmd 40 + : Avoid sending CMD_PREFETCH_* commands to the SMMU.
-2
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
··· 17 17 "fsl,imx6sx-usdhc" 18 18 19 19 Optional properties: 20 - - fsl,cd-controller : Indicate to use controller internal card detection 21 20 - fsl,wp-controller : Indicate to use controller internal write protection 22 21 - fsl,delay-line : Specify the number of delay cells for override mode. 23 22 This is used to set the clock delay for DLL(Delay Line) on override mode ··· 34 35 compatible = "fsl,imx51-esdhc"; 35 36 reg = <0x70004000 0x4000>; 36 37 interrupts = <1>; 37 - fsl,cd-controller; 38 38 fsl,wp-controller; 39 39 }; 40 40
+15 -1
MAINTAINERS
··· 5899 5899 F: Documentation/s390/kvm.txt 5900 5900 F: arch/s390/include/asm/kvm* 5901 5901 F: arch/s390/kvm/ 5902 - F: drivers/s390/kvm/ 5903 5902 5904 5903 KERNEL VIRTUAL MACHINE (KVM) FOR ARM 5905 5904 M: Christoffer Dall <christoffer.dall@linaro.org> ··· 6837 6838 T: git git://linuxtv.org/anttip/media_tree.git 6838 6839 S: Maintained 6839 6840 F: drivers/media/usb/msi2500/ 6841 + 6842 + MSYSTEMS DISKONCHIP G3 MTD DRIVER 6843 + M: Robert Jarzmik <robert.jarzmik@free.fr> 6844 + L: linux-mtd@lists.infradead.org 6845 + S: Maintained 6846 + F: drivers/mtd/devices/docg3* 6840 6847 6841 6848 MT9M032 APTINA SENSOR DRIVER 6842 6849 M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> ··· 10900 10895 F: drivers/block/virtio_blk.c 10901 10896 F: include/linux/virtio_*.h 10902 10897 F: include/uapi/linux/virtio_*.h 10898 + 10899 + VIRTIO DRIVERS FOR S390 10900 + M: Christian Borntraeger <borntraeger@de.ibm.com> 10901 + M: Cornelia Huck <cornelia.huck@de.ibm.com> 10902 + L: linux-s390@vger.kernel.org 10903 + L: virtualization@lists.linux-foundation.org 10904 + L: kvm@vger.kernel.org 10905 + S: Supported 10906 + F: drivers/s390/virtio/ 10903 10907 10904 10908 VIRTIO GPU DRIVER 10905 10909 M: David Airlie <airlied@linux.ie>
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 2 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Hurr durr I'ma sheep 6 6 7 7 # *DOCUMENTATION*
+3 -2
arch/arm/boot/dts/imx25-pdk.dts
··· 10 10 */ 11 11 12 12 /dts-v1/; 13 + #include <dt-bindings/gpio/gpio.h> 13 14 #include <dt-bindings/input/input.h> 14 15 #include "imx25.dtsi" 15 16 ··· 115 114 &esdhc1 { 116 115 pinctrl-names = "default"; 117 116 pinctrl-0 = <&pinctrl_esdhc1>; 118 - cd-gpios = <&gpio2 1 0>; 119 - wp-gpios = <&gpio2 0 0>; 117 + cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; 118 + wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; 120 119 status = "okay"; 121 120 }; 122 121
+1 -1
arch/arm/boot/dts/imx51-apf51dev.dts
··· 98 98 &esdhc1 { 99 99 pinctrl-names = "default"; 100 100 pinctrl-0 = <&pinctrl_esdhc1>; 101 - cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; 101 + cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>; 102 102 bus-width = <4>; 103 103 status = "okay"; 104 104 };
+2 -2
arch/arm/boot/dts/imx53-ard.dts
··· 103 103 &esdhc1 { 104 104 pinctrl-names = "default"; 105 105 pinctrl-0 = <&pinctrl_esdhc1>; 106 - cd-gpios = <&gpio1 1 0>; 107 - wp-gpios = <&gpio1 9 0>; 106 + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; 107 + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; 108 108 status = "okay"; 109 109 }; 110 110
+2 -2
arch/arm/boot/dts/imx53-m53evk.dts
··· 124 124 &esdhc1 { 125 125 pinctrl-names = "default"; 126 126 pinctrl-0 = <&pinctrl_esdhc1>; 127 - cd-gpios = <&gpio1 1 0>; 128 - wp-gpios = <&gpio1 9 0>; 127 + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; 128 + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; 129 129 status = "okay"; 130 130 }; 131 131
+2 -2
arch/arm/boot/dts/imx53-qsb-common.dtsi
··· 147 147 &esdhc3 { 148 148 pinctrl-names = "default"; 149 149 pinctrl-0 = <&pinctrl_esdhc3>; 150 - cd-gpios = <&gpio3 11 0>; 151 - wp-gpios = <&gpio3 12 0>; 150 + cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>; 151 + wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>; 152 152 bus-width = <8>; 153 153 status = "okay"; 154 154 };
+2 -2
arch/arm/boot/dts/imx53-smd.dts
··· 41 41 &esdhc1 { 42 42 pinctrl-names = "default"; 43 43 pinctrl-0 = <&pinctrl_esdhc1>; 44 - cd-gpios = <&gpio3 13 0>; 45 - wp-gpios = <&gpio4 11 0>; 44 + cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; 45 + wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; 46 46 status = "okay"; 47 47 }; 48 48
+2 -2
arch/arm/boot/dts/imx53-tqma53.dtsi
··· 41 41 pinctrl-0 = <&pinctrl_esdhc2>, 42 42 <&pinctrl_esdhc2_cdwp>; 43 43 vmmc-supply = <&reg_3p3v>; 44 - wp-gpios = <&gpio1 2 0>; 45 - cd-gpios = <&gpio1 4 0>; 44 + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; 45 + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; 46 46 status = "disabled"; 47 47 }; 48 48
+2 -2
arch/arm/boot/dts/imx53-tx53.dtsi
··· 183 183 }; 184 184 185 185 &esdhc1 { 186 - cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; 186 + cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>; 187 187 fsl,wp-controller; 188 188 pinctrl-names = "default"; 189 189 pinctrl-0 = <&pinctrl_esdhc1>; ··· 191 191 }; 192 192 193 193 &esdhc2 { 194 - cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; 194 + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; 195 195 fsl,wp-controller; 196 196 pinctrl-names = "default"; 197 197 pinctrl-0 = <&pinctrl_esdhc2>;
+2 -2
arch/arm/boot/dts/imx53-voipac-bsb.dts
··· 119 119 &esdhc2 { 120 120 pinctrl-names = "default"; 121 121 pinctrl-0 = <&pinctrl_esdhc2>; 122 - cd-gpios = <&gpio3 25 0>; 123 - wp-gpios = <&gpio2 19 0>; 122 + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; 123 + wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>; 124 124 vmmc-supply = <&reg_3p3v>; 125 125 status = "okay"; 126 126 };
+4 -4
arch/arm/boot/dts/imx6dl-riotboard.dts
··· 305 305 &usdhc2 { 306 306 pinctrl-names = "default"; 307 307 pinctrl-0 = <&pinctrl_usdhc2>; 308 - cd-gpios = <&gpio1 4 0>; 309 - wp-gpios = <&gpio1 2 0>; 308 + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; 309 + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; 310 310 vmmc-supply = <&reg_3p3v>; 311 311 status = "okay"; 312 312 }; ··· 314 314 &usdhc3 { 315 315 pinctrl-names = "default"; 316 316 pinctrl-0 = <&pinctrl_usdhc3>; 317 - cd-gpios = <&gpio7 0 0>; 318 - wp-gpios = <&gpio7 1 0>; 317 + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; 318 + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; 319 319 vmmc-supply = <&reg_3p3v>; 320 320 status = "okay"; 321 321 };
+3 -2
arch/arm/boot/dts/imx6q-arm2.dts
··· 11 11 */ 12 12 13 13 /dts-v1/; 14 + #include <dt-bindings/gpio/gpio.h> 14 15 #include "imx6q.dtsi" 15 16 16 17 / { ··· 197 196 }; 198 197 199 198 &usdhc3 { 200 - cd-gpios = <&gpio6 11 0>; 201 - wp-gpios = <&gpio6 14 0>; 199 + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; 200 + wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>; 202 201 vmmc-supply = <&reg_3p3v>; 203 202 pinctrl-names = "default"; 204 203 pinctrl-0 = <&pinctrl_usdhc3
+2 -1
arch/arm/boot/dts/imx6q-gk802.dts
··· 7 7 */ 8 8 9 9 /dts-v1/; 10 + #include <dt-bindings/gpio/gpio.h> 10 11 #include "imx6q.dtsi" 11 12 12 13 / { ··· 162 161 pinctrl-names = "default"; 163 162 pinctrl-0 = <&pinctrl_usdhc3>; 164 163 bus-width = <4>; 165 - cd-gpios = <&gpio6 11 0>; 164 + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; 166 165 vmmc-supply = <&reg_3p3v>; 167 166 status = "okay"; 168 167 };
+2 -2
arch/arm/boot/dts/imx6q-tbs2910.dts
··· 251 251 pinctrl-names = "default"; 252 252 pinctrl-0 = <&pinctrl_usdhc2>; 253 253 bus-width = <4>; 254 - cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; 254 + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 255 255 vmmc-supply = <&reg_3p3v>; 256 256 status = "okay"; 257 257 }; ··· 260 260 pinctrl-names = "default"; 261 261 pinctrl-0 = <&pinctrl_usdhc3>; 262 262 bus-width = <4>; 263 - cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; 263 + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; 264 264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; 265 265 vmmc-supply = <&reg_3p3v>; 266 266 status = "okay";
+2 -2
arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
··· 173 173 pinctrl-names = "default"; 174 174 pinctrl-0 = <&pinctrl_usdhc1>; 175 175 vmmc-supply = <&reg_3p3v>; 176 - cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; 176 + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; 177 177 status = "okay"; 178 178 }; 179 179 ··· 181 181 pinctrl-names = "default"; 182 182 pinctrl-0 = <&pinctrl_usdhc2>; 183 183 vmmc-supply = <&reg_3p3v>; 184 - cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; 184 + cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>; 185 185 status = "okay"; 186 186 }; 187 187
+2 -2
arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
··· 392 392 &usdhc1 { 393 393 pinctrl-names = "default"; 394 394 pinctrl-0 = <&pinctrl_usdhc1>; 395 - cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; 395 + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; 396 396 no-1-8-v; 397 397 status = "okay"; 398 398 }; ··· 400 400 &usdhc2 { 401 401 pinctrl-names = "default"; 402 402 pinctrl-0 = <&pinctrl_usdhc2>; 403 - cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; 403 + cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; 404 404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 405 405 no-1-8-v; 406 406 status = "okay";
+1 -1
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
··· 258 258 pinctrl-names = "default"; 259 259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; 260 260 vmmc-supply = <&reg_3p3v>; 261 - cd-gpios = <&gpio1 4 0>; 261 + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; 262 262 status = "okay"; 263 263 };
+3 -1
arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
··· 1 + #include <dt-bindings/gpio/gpio.h> 2 + 1 3 / { 2 4 regulators { 3 5 compatible = "simple-bus"; ··· 183 181 &usdhc2 { /* module slot */ 184 182 pinctrl-names = "default"; 185 183 pinctrl-0 = <&pinctrl_usdhc2>; 186 - cd-gpios = <&gpio2 2 0>; 184 + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 187 185 status = "okay"; 188 186 }; 189 187
+1 -1
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
··· 318 318 &usdhc3 { 319 319 pinctrl-names = "default"; 320 320 pinctrl-0 = <&pinctrl_usdhc3>; 321 - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 321 + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; 322 322 vmmc-supply = <&reg_3p3v>; 323 323 status = "okay"; 324 324 };
+1 -1
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
··· 324 324 &usdhc3 { 325 325 pinctrl-names = "default"; 326 326 pinctrl-0 = <&pinctrl_usdhc3>; 327 - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 327 + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; 328 328 vmmc-supply = <&reg_3p3v>; 329 329 status = "okay"; 330 330 };
+1 -1
arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
··· 417 417 &usdhc3 { 418 418 pinctrl-names = "default"; 419 419 pinctrl-0 = <&pinctrl_usdhc3>; 420 - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 420 + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; 421 421 vmmc-supply = <&reg_3p3v>; 422 422 status = "okay"; 423 423 };
+1 -1
arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
··· 299 299 &pinctrl_hummingboard_usdhc2 300 300 >; 301 301 vmmc-supply = <&reg_3p3v>; 302 - cd-gpios = <&gpio1 4 0>; 302 + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; 303 303 status = "okay"; 304 304 };
+2 -2
arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
··· 453 453 &usdhc3 { 454 454 pinctrl-names = "default"; 455 455 pinctrl-0 = <&pinctrl_usdhc3>; 456 - cd-gpios = <&gpio7 0 0>; 456 + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; 457 457 vmmc-supply = <&reg_3p3v>; 458 458 status = "okay"; 459 459 }; ··· 461 461 &usdhc4 { 462 462 pinctrl-names = "default"; 463 463 pinctrl-0 = <&pinctrl_usdhc4>; 464 - cd-gpios = <&gpio2 6 0>; 464 + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; 465 465 vmmc-supply = <&reg_3p3v>; 466 466 status = "okay"; 467 467 };
+4 -4
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
··· 409 409 &usdhc2 { 410 410 pinctrl-names = "default"; 411 411 pinctrl-0 = <&pinctrl_usdhc2>; 412 - cd-gpios = <&gpio1 4 0>; 413 - wp-gpios = <&gpio1 2 0>; 412 + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; 413 + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; 414 414 status = "disabled"; 415 415 }; 416 416 ··· 418 418 pinctrl-names = "default"; 419 419 pinctrl-0 = <&pinctrl_usdhc3 420 420 &pinctrl_usdhc3_cdwp>; 421 - cd-gpios = <&gpio1 27 0>; 422 - wp-gpios = <&gpio1 29 0>; 421 + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; 422 + wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; 423 423 status = "disabled"; 424 424 };
+2 -2
arch/arm/boot/dts/imx6qdl-rex.dtsi
··· 342 342 pinctrl-0 = <&pinctrl_usdhc2>; 343 343 bus-width = <4>; 344 344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 345 - wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; 345 + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; 346 346 status = "okay"; 347 347 }; 348 348 ··· 351 351 pinctrl-0 = <&pinctrl_usdhc3>; 352 352 bus-width = <4>; 353 353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; 354 - wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; 354 + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; 355 355 status = "okay"; 356 356 };
+2 -2
arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
··· 467 467 pinctrl-0 = <&pinctrl_usdhc3>; 468 468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 469 469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 470 - cd-gpios = <&gpio6 15 0>; 471 - wp-gpios = <&gpio1 13 0>; 470 + cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>; 471 + wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; 472 472 status = "okay"; 473 473 }; 474 474
+3 -3
arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
··· 448 448 &usdhc3 { 449 449 pinctrl-names = "default"; 450 450 pinctrl-0 = <&pinctrl_usdhc3>; 451 - cd-gpios = <&gpio7 0 0>; 452 - wp-gpios = <&gpio7 1 0>; 451 + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; 452 + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; 453 453 vmmc-supply = <&reg_3p3v>; 454 454 status = "okay"; 455 455 }; ··· 457 457 &usdhc4 { 458 458 pinctrl-names = "default"; 459 459 pinctrl-0 = <&pinctrl_usdhc4>; 460 - cd-gpios = <&gpio2 6 0>; 460 + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; 461 461 vmmc-supply = <&reg_3p3v>; 462 462 status = "okay"; 463 463 };
+4 -4
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
··· 562 562 pinctrl-names = "default"; 563 563 pinctrl-0 = <&pinctrl_usdhc2>; 564 564 bus-width = <8>; 565 - cd-gpios = <&gpio2 2 0>; 566 - wp-gpios = <&gpio2 3 0>; 565 + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 566 + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; 567 567 status = "okay"; 568 568 }; 569 569 ··· 571 571 pinctrl-names = "default"; 572 572 pinctrl-0 = <&pinctrl_usdhc3>; 573 573 bus-width = <8>; 574 - cd-gpios = <&gpio2 0 0>; 575 - wp-gpios = <&gpio2 1 0>; 574 + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; 575 + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; 576 576 status = "okay"; 577 577 }; 578 578
+2 -2
arch/arm/boot/dts/imx6qdl-tx6.dtsi
··· 680 680 pinctrl-0 = <&pinctrl_usdhc1>; 681 681 bus-width = <4>; 682 682 no-1-8-v; 683 - cd-gpios = <&gpio7 2 0>; 683 + cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>; 684 684 fsl,wp-controller; 685 685 status = "okay"; 686 686 }; ··· 690 690 pinctrl-0 = <&pinctrl_usdhc2>; 691 691 bus-width = <4>; 692 692 no-1-8-v; 693 - cd-gpios = <&gpio7 3 0>; 693 + cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>; 694 694 fsl,wp-controller; 695 695 status = "okay"; 696 696 };
+4 -2
arch/arm/boot/dts/imx6qdl-wandboard.dtsi
··· 9 9 * 10 10 */ 11 11 12 + #include <dt-bindings/gpio/gpio.h> 13 + 12 14 / { 13 15 regulators { 14 16 compatible = "simple-bus"; ··· 252 250 &usdhc1 { 253 251 pinctrl-names = "default"; 254 252 pinctrl-0 = <&pinctrl_usdhc1>; 255 - cd-gpios = <&gpio1 2 0>; 253 + cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; 256 254 status = "okay"; 257 255 }; 258 256 259 257 &usdhc3 { 260 258 pinctrl-names = "default"; 261 259 pinctrl-0 = <&pinctrl_usdhc3>; 262 - cd-gpios = <&gpio3 9 0>; 260 + cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>; 263 261 status = "okay"; 264 262 };
+5 -5
arch/arm/boot/dts/imx6sl-evk.dts
··· 617 617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>; 618 618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>; 619 619 bus-width = <8>; 620 - cd-gpios = <&gpio4 7 0>; 621 - wp-gpios = <&gpio4 6 0>; 620 + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; 621 + wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; 622 622 status = "okay"; 623 623 }; 624 624 ··· 627 627 pinctrl-0 = <&pinctrl_usdhc2>; 628 628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>; 629 629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>; 630 - cd-gpios = <&gpio5 0 0>; 631 - wp-gpios = <&gpio4 29 0>; 630 + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; 631 + wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>; 632 632 status = "okay"; 633 633 }; 634 634 ··· 637 637 pinctrl-0 = <&pinctrl_usdhc3>; 638 638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 639 639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 640 - cd-gpios = <&gpio3 22 0>; 640 + cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; 641 641 status = "okay"; 642 642 };
+2 -2
arch/arm/boot/dts/imx6sx-sabreauto.dts
··· 49 49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 50 50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 51 51 bus-width = <8>; 52 - cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; 52 + cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>; 53 53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; 54 54 keep-power-in-suspend; 55 55 enable-sdio-wakeup; ··· 61 61 pinctrl-names = "default"; 62 62 pinctrl-0 = <&pinctrl_usdhc4>; 63 63 bus-width = <8>; 64 - cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; 64 + cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; 65 65 no-1-8-v; 66 66 keep-power-in-suspend; 67 67 enable-sdio-wakup;
+2 -2
arch/arm/boot/dts/imx6sx-sdb.dtsi
··· 293 293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 294 294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 295 295 bus-width = <8>; 296 - cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 296 + cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; 297 297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; 298 298 keep-power-in-suspend; 299 299 enable-sdio-wakeup; ··· 304 304 &usdhc4 { 305 305 pinctrl-names = "default"; 306 306 pinctrl-0 = <&pinctrl_usdhc4>; 307 - cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; 307 + cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>; 308 308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; 309 309 status = "okay"; 310 310 };
+2 -2
arch/arm/boot/dts/imx7d-sdb.dts
··· 234 234 &usdhc1 { 235 235 pinctrl-names = "default"; 236 236 pinctrl-0 = <&pinctrl_usdhc1>; 237 - cd-gpios = <&gpio5 0 0>; 238 - wp-gpios = <&gpio5 1 0>; 237 + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; 238 + wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; 239 239 enable-sdio-wakeup; 240 240 keep-power-in-suspend; 241 241 status = "okay";
+44 -13
arch/arm/net/bpf_jit_32.c
··· 74 74 75 75 int bpf_jit_enable __read_mostly; 76 76 77 - static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77 + static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, 78 + unsigned int size) 79 + { 80 + void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); 81 + 82 + if (!ptr) 83 + return -EFAULT; 84 + memcpy(ret, ptr, size); 85 + return 0; 86 + } 87 + 88 + static u64 jit_get_skb_b(struct sk_buff *skb, int offset) 78 89 { 79 90 u8 ret; 80 91 int err; 81 92 82 - err = skb_copy_bits(skb, offset, &ret, 1); 93 + if (offset < 0) 94 + err = call_neg_helper(skb, offset, &ret, 1); 95 + else 96 + err = skb_copy_bits(skb, offset, &ret, 1); 83 97 84 98 return (u64)err << 32 | ret; 85 99 } 86 100 87 - static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 101 + static u64 jit_get_skb_h(struct sk_buff *skb, int offset) 88 102 { 89 103 u16 ret; 90 104 int err; 91 105 92 - err = skb_copy_bits(skb, offset, &ret, 2); 106 + if (offset < 0) 107 + err = call_neg_helper(skb, offset, &ret, 2); 108 + else 109 + err = skb_copy_bits(skb, offset, &ret, 2); 93 110 94 111 return (u64)err << 32 | ntohs(ret); 95 112 } 96 113 97 - static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 114 + static u64 jit_get_skb_w(struct sk_buff *skb, int offset) 98 115 { 99 116 u32 ret; 100 117 int err; 101 118 102 - err = skb_copy_bits(skb, offset, &ret, 4); 119 + if (offset < 0) 120 + err = call_neg_helper(skb, offset, &ret, 4); 121 + else 122 + err = skb_copy_bits(skb, offset, &ret, 4); 103 123 104 124 return (u64)err << 32 | ntohl(ret); 105 125 } ··· 556 536 case BPF_LD | BPF_B | BPF_ABS: 557 537 load_order = 0; 558 538 load: 559 - /* the interpreter will deal with the negative K */ 560 - if ((int)k < 0) 561 - return -ENOTSUPP; 562 539 emit_mov_i(r_off, k, ctx); 563 540 load_common: 564 541 ctx->seen |= SEEN_DATA | SEEN_CALL; ··· 564 547 emit(ARM_SUB_I(r_scratch, r_skb_hl, 565 548 1 << load_order), ctx); 566 549 emit(ARM_CMP_R(r_scratch, r_off), ctx); 567 - condt = ARM_COND_HS; 550 + condt = ARM_COND_GE; 568 551 } else { 569 552 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 570 553 condt = ARM_COND_HI; 571 554 } 555 + 556 + /* 557 + * test for negative offset, only if we are 558 + * currently scheduled to take the fast 559 + * path. this will update the flags so that 560 + * the slowpath instruction are ignored if the 561 + * offset is negative. 562 + * 563 + * for loard_order == 0 the HI condition will 564 + * make loads at offset 0 take the slow path too. 565 + */ 566 + _emit(condt, ARM_CMP_I(r_off, 0), ctx); 572 567 573 568 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 574 569 ctx); ··· 889 860 off = offsetof(struct sk_buff, vlan_tci); 890 861 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 891 862 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 892 - OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 893 - else 894 - OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 863 + OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); 864 + else { 865 + OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); 866 + OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); 867 + } 895 868 break; 896 869 case BPF_ANC | SKF_AD_QUEUE: 897 870 ctx->seen |= SEEN_SKB;
+3 -2
arch/arm64/kernel/entry.S
··· 585 585 * 586 586 */ 587 587 ENTRY(cpu_switch_to) 588 - add x8, x0, #THREAD_CPU_CONTEXT 588 + mov x10, #THREAD_CPU_CONTEXT 589 + add x8, x0, x10 589 590 mov x9, sp 590 591 stp x19, x20, [x8], #16 // store callee-saved registers 591 592 stp x21, x22, [x8], #16 ··· 595 594 stp x27, x28, [x8], #16 596 595 stp x29, x9, [x8], #16 597 596 str lr, [x8] 598 - add x8, x1, #THREAD_CPU_CONTEXT 597 + add x8, x1, x10 599 598 ldp x19, x20, [x8], #16 // restore callee-saved registers 600 599 ldp x21, x22, [x8], #16 601 600 ldp x23, x24, [x8], #16
+2 -2
arch/arm64/kernel/irq.c
··· 61 61 static bool migrate_one_irq(struct irq_desc *desc) 62 62 { 63 63 struct irq_data *d = irq_desc_get_irq_data(desc); 64 - const struct cpumask *affinity = d->affinity; 64 + const struct cpumask *affinity = irq_data_get_affinity_mask(d); 65 65 struct irq_chip *c; 66 66 bool ret = false; 67 67 ··· 81 81 if (!c->irq_set_affinity) 82 82 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 83 83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 84 - cpumask_copy(d->affinity, affinity); 84 + cpumask_copy(irq_data_get_affinity_mask(d), affinity); 85 85 86 86 return ret; 87 87 }
+33 -32
arch/avr32/kernel/time.c
··· 18 18 19 19 #include <mach/pm.h> 20 20 21 + static bool disable_cpu_idle_poll; 21 22 22 23 static cycle_t read_cycle_count(struct clocksource *cs) 23 24 { ··· 81 80 return 0; 82 81 } 83 82 84 - static void comparator_mode(enum clock_event_mode mode, 85 - struct clock_event_device *evdev) 83 + static int comparator_shutdown(struct clock_event_device *evdev) 86 84 { 87 - switch (mode) { 88 - case CLOCK_EVT_MODE_ONESHOT: 89 - pr_debug("%s: start\n", evdev->name); 90 - /* FALLTHROUGH */ 91 - case CLOCK_EVT_MODE_RESUME: 85 + pr_debug("%s: %s\n", __func__, evdev->name); 86 + sysreg_write(COMPARE, 0); 87 + 88 + if (disable_cpu_idle_poll) { 89 + disable_cpu_idle_poll = false; 92 90 /* 93 - * If we're using the COUNT and COMPARE registers we 94 - * need to force idle poll. 91 + * Only disable idle poll if we have forced that 92 + * in a previous call. 95 93 */ 96 - cpu_idle_poll_ctrl(true); 97 - break; 98 - case CLOCK_EVT_MODE_UNUSED: 99 - case CLOCK_EVT_MODE_SHUTDOWN: 100 - sysreg_write(COMPARE, 0); 101 - pr_debug("%s: stop\n", evdev->name); 102 - if (evdev->mode == CLOCK_EVT_MODE_ONESHOT || 103 - evdev->mode == CLOCK_EVT_MODE_RESUME) { 104 - /* 105 - * Only disable idle poll if we have forced that 106 - * in a previous call. 107 - */ 108 - cpu_idle_poll_ctrl(false); 109 - } 110 - break; 111 - default: 112 - BUG(); 94 + cpu_idle_poll_ctrl(false); 113 95 } 96 + return 0; 97 + } 98 + 99 + static int comparator_set_oneshot(struct clock_event_device *evdev) 100 + { 101 + pr_debug("%s: %s\n", __func__, evdev->name); 102 + 103 + disable_cpu_idle_poll = true; 104 + /* 105 + * If we're using the COUNT and COMPARE registers we 106 + * need to force idle poll. 107 + */ 108 + cpu_idle_poll_ctrl(true); 109 + 110 + return 0; 114 111 } 115 112 116 113 static struct clock_event_device comparator = { 117 - .name = "avr32_comparator", 118 - .features = CLOCK_EVT_FEAT_ONESHOT, 119 - .shift = 16, 120 - .rating = 50, 121 - .set_next_event = comparator_next_event, 122 - .set_mode = comparator_mode, 114 + .name = "avr32_comparator", 115 + .features = CLOCK_EVT_FEAT_ONESHOT, 116 + .shift = 16, 117 + .rating = 50, 118 + .set_next_event = comparator_next_event, 119 + .set_state_shutdown = comparator_shutdown, 120 + .set_state_oneshot = comparator_set_oneshot, 121 + .tick_resume = comparator_set_oneshot, 123 122 }; 124 123 125 124 void read_persistent_clock(struct timespec *ts)
+5
arch/m32r/include/asm/io.h
··· 174 174 #define iowrite16 writew 175 175 #define iowrite32 writel 176 176 177 + #define ioread16be(addr) be16_to_cpu(readw(addr)) 178 + #define ioread32be(addr) be32_to_cpu(readl(addr)) 179 + #define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr)) 180 + #define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr)) 181 + 177 182 #define mmiowb() 178 183 179 184 #define flush_write_buffers() do { } while (0) /* M32R_FIXME */
+7 -8
arch/s390/kernel/asm-offsets.c
··· 23 23 24 24 int main(void) 25 25 { 26 - DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 27 - DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 28 - DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); 29 - BLANK(); 26 + DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); 27 + DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); 30 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 31 29 BLANK(); 32 - DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); 33 - DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); 34 - DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); 30 + DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); 31 + DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); 32 + DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); 33 + DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); 34 + DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); 35 35 BLANK(); 36 36 DEFINE(__TI_task, offsetof(struct thread_info, task)); 37 37 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); ··· 176 176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 177 177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 178 178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 179 - DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); 180 179 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 181 180 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 182 181 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
+9 -4
arch/s390/kernel/entry.S
··· 178 178 */ 179 179 ENTRY(__switch_to) 180 180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 181 - stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev 182 - lg %r4,__THREAD_info(%r2) # get thread_info of prev 183 - lg %r5,__THREAD_info(%r3) # get thread_info of next 181 + lgr %r1,%r2 182 + aghi %r1,__TASK_thread # thread_struct of prev task 183 + lg %r4,__TASK_thread_info(%r2) # get thread_info of prev 184 + lg %r5,__TASK_thread_info(%r3) # get thread_info of next 185 + stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev 186 + lgr %r1,%r3 187 + aghi %r1,__TASK_thread # thread_struct of next task 184 188 lgr %r15,%r5 185 189 aghi %r15,STACK_INIT # end of kernel stack of next 186 190 stg %r3,__LC_CURRENT # store task struct of next 187 191 stg %r5,__LC_THREAD_INFO # store thread info of next 188 192 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 193 + lg %r15,__THREAD_ksp(%r1) # load kernel stack of next 189 194 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 190 195 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next 191 - lg %r15,__THREAD_ksp(%r3) # load kernel stack of next 192 196 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 193 197 br %r14 194 198 ··· 421 417 LAST_BREAK %r14 422 418 lg %r15,__LC_KERNEL_STACK 423 419 lg %r14,__TI_task(%r12) 420 + aghi %r14,__TASK_thread # pointer to thread_struct 424 421 lghi %r13,__LC_PGM_TDB 425 422 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 426 423 jz 2f
+2 -2
arch/s390/kernel/traps.c
··· 259 259 } 260 260 261 261 /* get vector interrupt code from fpc */ 262 - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 262 + asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); 263 263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; 264 264 switch (vic) { 265 265 case 1: /* invalid vector operation */ ··· 297 297 298 298 location = get_trap_ip(regs); 299 299 300 - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 300 + asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); 301 301 /* Check for vector register enablement */ 302 302 if (MACHINE_HAS_VX && !current->thread.vxrs && 303 303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
+1 -1
arch/tile/kernel/setup.c
··· 1139 1139 1140 1140 void __init free_initrd_mem(unsigned long begin, unsigned long end) 1141 1141 { 1142 - free_bootmem(__pa(begin), end - begin); 1142 + free_bootmem_late(__pa(begin), end - begin); 1143 1143 } 1144 1144 1145 1145 static int __init setup_initrd(char *str)
+9 -5
arch/x86/entry/entry_64_compat.S
··· 205 205 movl RDX(%rsp), %edx /* arg3 */ 206 206 movl RSI(%rsp), %ecx /* arg4 */ 207 207 movl RDI(%rsp), %r8d /* arg5 */ 208 - movl %ebp, %r9d /* arg6 */ 209 208 .endm 210 209 211 210 .macro auditsys_exit exit ··· 235 236 236 237 sysenter_auditsys: 237 238 auditsys_entry_common 239 + movl %ebp, %r9d /* reload 6th syscall arg */ 238 240 jmp sysenter_dispatch 239 241 240 242 sysexit_audit: ··· 336 336 * 32-bit zero extended: 337 337 */ 338 338 ASM_STAC 339 - 1: movl (%r8), %ebp 339 + 1: movl (%r8), %r9d 340 340 _ASM_EXTABLE(1b, ia32_badarg) 341 341 ASM_CLAC 342 342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) ··· 346 346 cstar_do_call: 347 347 /* 32-bit syscall -> 64-bit C ABI argument conversion */ 348 348 movl %edi, %r8d /* arg5 */ 349 - movl %ebp, %r9d /* arg6 */ 349 + /* r9 already loaded */ /* arg6 */ 350 350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ 351 351 movl %ebx, %edi /* arg1 */ 352 352 movl %edx, %edx /* arg3 (zero extension) */ ··· 358 358 call *ia32_sys_call_table(, %rax, 8) 359 359 movq %rax, RAX(%rsp) 360 360 1: 361 - movl RCX(%rsp), %ebp 362 361 DISABLE_INTERRUPTS(CLBR_NONE) 363 362 TRACE_IRQS_OFF 364 363 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) ··· 391 392 392 393 #ifdef CONFIG_AUDITSYSCALL 393 394 cstar_auditsys: 395 + movl %r9d, R9(%rsp) /* register to be clobbered by call */ 394 396 auditsys_entry_common 397 + movl R9(%rsp), %r9d /* reload 6th syscall arg */ 395 398 jmp cstar_dispatch 396 399 397 400 sysretl_audit: ··· 405 404 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 406 405 jz cstar_auditsys 407 406 #endif 407 + xchgl %r9d, %ebp 408 408 SAVE_EXTRA_REGS 409 409 xorl %eax, %eax /* Do not leak kernel information */ 410 410 movq %rax, R11(%rsp) 411 411 movq %rax, R10(%rsp) 412 - movq %rax, R9(%rsp) 412 + movq %r9, R9(%rsp) 413 413 movq %rax, R8(%rsp) 414 414 movq %rsp, %rdi /* &pt_regs -> arg1 */ 415 415 call syscall_trace_enter 416 + movl R9(%rsp), %r9d 416 417 417 418 /* Reload arg registers from stack. (see sysenter_tracesys) */ 418 419 movl RCX(%rsp), %ecx ··· 424 421 movl %eax, %eax /* zero extension */ 425 422 426 423 RESTORE_EXTRA_REGS 424 + xchgl %ebp, %r9d 427 425 jmp cstar_do_call 428 426 END(entry_SYSCALL_compat) 429 427
+2 -2
arch/x86/include/uapi/asm/kvm.h
··· 354 354 struct kvm_sync_regs { 355 355 }; 356 356 357 - #define KVM_QUIRK_LINT0_REENABLED (1 << 0) 358 - #define KVM_QUIRK_CD_NW_CLEARED (1 << 1) 357 + #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) 358 + #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) 359 359 360 360 #endif /* _ASM_X86_KVM_H */
+8
arch/x86/kernel/cpu/perf_event_intel_cqm.c
··· 952 952 return 0; 953 953 954 954 /* 955 + * Getting up-to-date values requires an SMP IPI which is not 956 + * possible if we're being called in interrupt context. Return 957 + * the cached values instead. 958 + */ 959 + if (unlikely(in_interrupt())) 960 + goto out; 961 + 962 + /* 955 963 * Notice that we don't perform the reading of an RMID 956 964 * atomically, because we can't hold a spin lock across the 957 965 * IPIs.
+6
arch/x86/kernel/fpu/init.c
··· 351 351 352 352 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 353 353 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 354 + setup_clear_cpu_cap(X86_FEATURE_XSAVEC); 354 355 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 355 356 setup_clear_cpu_cap(X86_FEATURE_AVX); 356 357 setup_clear_cpu_cap(X86_FEATURE_AVX2); 358 + setup_clear_cpu_cap(X86_FEATURE_AVX512F); 359 + setup_clear_cpu_cap(X86_FEATURE_AVX512PF); 360 + setup_clear_cpu_cap(X86_FEATURE_AVX512ER); 361 + setup_clear_cpu_cap(X86_FEATURE_AVX512CD); 362 + setup_clear_cpu_cap(X86_FEATURE_MPX); 357 363 358 364 return 1; 359 365 }
+1 -1
arch/x86/kvm/lapic.c
··· 1595 1595 for (i = 0; i < APIC_LVT_NUM; i++) 1596 1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1597 1597 apic_update_lvtt(apic); 1598 - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) 1598 + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1599 1599 apic_set_reg(apic, APIC_LVT0, 1600 1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1601 1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
+30 -10
arch/x86/kvm/mtrr.c
··· 120 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 121 121 } 122 122 123 + static u8 mtrr_disabled_type(void) 124 + { 125 + /* 126 + * Intel SDM 11.11.2.2: all MTRRs are disabled when 127 + * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC 128 + * memory type is applied to all of physical memory. 129 + */ 130 + return MTRR_TYPE_UNCACHABLE; 131 + } 132 + 123 133 /* 124 134 * Three terms are used in the following code: 125 135 * - segment, it indicates the address segments covered by fixed MTRRs. ··· 444 434 445 435 /* output fields. */ 446 436 int mem_type; 437 + /* mtrr is completely disabled? */ 438 + bool mtrr_disabled; 447 439 /* [start, end) is not fully covered in MTRRs? */ 448 440 bool partial_map; 449 441 ··· 561 549 static void mtrr_lookup_start(struct mtrr_iter *iter) 562 550 { 563 551 if (!mtrr_is_enabled(iter->mtrr_state)) { 564 - iter->partial_map = true; 552 + iter->mtrr_disabled = true; 565 553 return; 566 554 } 567 555 ··· 575 563 iter->mtrr_state = mtrr_state; 576 564 iter->start = start; 577 565 iter->end = end; 566 + iter->mtrr_disabled = false; 578 567 iter->partial_map = false; 579 568 iter->fixed = false; 580 569 iter->range = NULL; ··· 669 656 return MTRR_TYPE_WRBACK; 670 657 } 671 658 672 - /* It is not covered by MTRRs. */ 673 - if (iter.partial_map) { 674 - /* 675 - * We just check one page, partially covered by MTRRs is 676 - * impossible. 677 - */ 678 - WARN_ON(type != -1); 679 - type = mtrr_default_type(mtrr_state); 680 - } 659 + if (iter.mtrr_disabled) 660 + return mtrr_disabled_type(); 661 + 662 + /* 663 + * We just check one page, partially covered by MTRRs is 664 + * impossible. 665 + */ 666 + WARN_ON(iter.partial_map); 667 + 668 + /* not contained in any MTRRs. */ 669 + if (type == -1) 670 + return mtrr_default_type(mtrr_state); 671 + 681 672 return type; 682 673 } 683 674 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); ··· 705 688 if (type != iter.mem_type) 706 689 return false; 707 690 } 691 + 692 + if (iter.mtrr_disabled) 693 + return true; 708 694 709 695 if (!iter.partial_map) 710 696 return true;
+1 -1
arch/x86/kvm/svm.c
··· 1672 1672 * does not do it - this results in some delay at 1673 1673 * reboot 1674 1674 */ 1675 - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) 1675 + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 1676 1676 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1677 1677 svm->vmcb->save.cr0 = cr0; 1678 1678 mark_dirty(svm->vmcb, VMCB_CR);
+4 -1
arch/x86/kvm/vmx.c
··· 8650 8650 8651 8651 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 8652 8652 ipat = VMX_EPT_IPAT_BIT; 8653 - cache = MTRR_TYPE_UNCACHABLE; 8653 + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 8654 + cache = MTRR_TYPE_WRBACK; 8655 + else 8656 + cache = MTRR_TYPE_UNCACHABLE; 8654 8657 goto exit; 8655 8658 } 8656 8659
+5
arch/x86/kvm/x86.h
··· 147 147 return kvm_register_write(vcpu, reg, val); 148 148 } 149 149 150 + static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 151 + { 152 + return !(kvm->arch.disabled_quirks & quirk); 153 + } 154 + 150 155 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 151 156 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 152 157 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
+6 -17
arch/x86/mm/ioremap.c
··· 63 63 !PageReserved(pfn_to_page(start_pfn + i))) 64 64 return 1; 65 65 66 - WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); 67 - 68 66 return 0; 69 67 } 70 68 ··· 92 94 pgprot_t prot; 93 95 int retval; 94 96 void __iomem *ret_addr; 95 - int ram_region; 96 97 97 98 /* Don't allow wraparound or zero size */ 98 99 last_addr = phys_addr + size - 1; ··· 114 117 /* 115 118 * Don't allow anybody to remap normal RAM that we're using.. 116 119 */ 117 - /* First check if whole region can be identified as RAM or not */ 118 - ram_region = region_is_ram(phys_addr, size); 119 - if (ram_region > 0) { 120 - WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", 121 - (unsigned long int)phys_addr, 122 - (unsigned long int)last_addr); 120 + pfn = phys_addr >> PAGE_SHIFT; 121 + last_pfn = last_addr >> PAGE_SHIFT; 122 + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, 123 + __ioremap_check_ram) == 1) { 124 + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", 125 + &phys_addr, &last_addr); 123 126 return NULL; 124 127 } 125 128 126 - /* If could not be identified(-1), check page by page */ 127 - if (ram_region < 0) { 128 - pfn = phys_addr >> PAGE_SHIFT; 129 - last_pfn = last_addr >> PAGE_SHIFT; 130 - if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, 131 - __ioremap_check_ram) == 1) 132 - return NULL; 133 - } 134 129 /* 135 130 * Mappings have to be page-aligned 136 131 */
+7
arch/x86/mm/mmap.c
··· 126 126 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 127 127 } 128 128 } 129 + 130 + const char *arch_vma_name(struct vm_area_struct *vma) 131 + { 132 + if (vma->vm_flags & VM_MPX) 133 + return "[mpx]"; 134 + return NULL; 135 + }
+3 -21
arch/x86/mm/mpx.c
··· 20 20 #define CREATE_TRACE_POINTS 21 21 #include <asm/trace/mpx.h> 22 22 23 - static const char *mpx_mapping_name(struct vm_area_struct *vma) 24 - { 25 - return "[mpx]"; 26 - } 27 - 28 - static struct vm_operations_struct mpx_vma_ops = { 29 - .name = mpx_mapping_name, 30 - }; 31 - 32 - static int is_mpx_vma(struct vm_area_struct *vma) 33 - { 34 - return (vma->vm_ops == &mpx_vma_ops); 35 - } 36 - 37 23 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) 38 24 { 39 25 if (is_64bit_mm(mm)) ··· 39 53 /* 40 54 * This is really a simplified "vm_mmap". it only handles MPX 41 55 * bounds tables (the bounds directory is user-allocated). 42 - * 43 - * Later on, we use the vma->vm_ops to uniquely identify these 44 - * VMAs. 45 56 */ 46 57 static unsigned long mpx_mmap(unsigned long len) 47 58 { ··· 84 101 ret = -ENOMEM; 85 102 goto out; 86 103 } 87 - vma->vm_ops = &mpx_vma_ops; 88 104 89 105 if (vm_flags & VM_LOCKED) { 90 106 up_write(&mm->mmap_sem); ··· 794 812 * so stop immediately and return an error. This 795 813 * probably results in a SIGSEGV. 796 814 */ 797 - if (!is_mpx_vma(vma)) 815 + if (!(vma->vm_flags & VM_MPX)) 798 816 return -EINVAL; 799 817 800 818 len = min(vma->vm_end, end) - addr; ··· 927 945 * lots of tables even though we have no actual table 928 946 * entries in use. 929 947 */ 930 - while (next && is_mpx_vma(next)) 948 + while (next && (next->vm_flags & VM_MPX)) 931 949 next = next->vm_next; 932 - while (prev && is_mpx_vma(prev)) 950 + while (prev && (prev->vm_flags & VM_MPX)) 933 951 prev = prev->vm_prev; 934 952 /* 935 953 * We know 'start' and 'end' lie within an area controlled
+1 -1
arch/x86/mm/tlb.c
··· 117 117 } else { 118 118 unsigned long addr; 119 119 unsigned long nr_pages = 120 - f->flush_end - f->flush_start / PAGE_SIZE; 120 + (f->flush_end - f->flush_start) / PAGE_SIZE; 121 121 addr = f->flush_start; 122 122 while (addr < f->flush_end) { 123 123 __flush_tlb_single(addr);
+14 -3
block/bio.c
··· 1831 1831 * Allocates and returns a new bio which represents @sectors from the start of 1832 1832 * @bio, and updates @bio to represent the remaining sectors. 1833 1833 * 1834 - * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's 1835 - * responsibility to ensure that @bio is not freed before the split. 1834 + * Unless this is a discard request the newly allocated bio will point 1835 + * to @bio's bi_io_vec; it is the caller's responsibility to ensure that 1836 + * @bio is not freed before the split. 1836 1837 */ 1837 1838 struct bio *bio_split(struct bio *bio, int sectors, 1838 1839 gfp_t gfp, struct bio_set *bs) ··· 1843 1842 BUG_ON(sectors <= 0); 1844 1843 BUG_ON(sectors >= bio_sectors(bio)); 1845 1844 1846 - split = bio_clone_fast(bio, gfp, bs); 1845 + /* 1846 + * Discards need a mutable bio_vec to accommodate the payload 1847 + * required by the DSM TRIM and UNMAP commands. 1848 + */ 1849 + if (bio->bi_rw & REQ_DISCARD) 1850 + split = bio_clone_bioset(bio, gfp, bs); 1851 + else 1852 + split = bio_clone_fast(bio, gfp, bs); 1853 + 1847 1854 if (!split) 1848 1855 return NULL; 1849 1856 ··· 2018 2009 bio->bi_css = blkcg_css; 2019 2010 return 0; 2020 2011 } 2012 + EXPORT_SYMBOL_GPL(bio_associate_blkcg); 2021 2013 2022 2014 /** 2023 2015 * bio_associate_current - associate a bio with %current ··· 2049 2039 bio->bi_css = task_get_css(current, blkio_cgrp_id); 2050 2040 return 0; 2051 2041 } 2042 + EXPORT_SYMBOL_GPL(bio_associate_current); 2052 2043 2053 2044 /** 2054 2045 * bio_disassociate_task - undo bio_associate_current()
+5 -1
block/blk-cgroup.c
··· 718 718 return -EINVAL; 719 719 720 720 disk = get_gendisk(MKDEV(major, minor), &part); 721 - if (!disk || part) 721 + if (!disk) 722 722 return -EINVAL; 723 + if (part) { 724 + put_disk(disk); 725 + return -EINVAL; 726 + } 723 727 724 728 rcu_read_lock(); 725 729 spin_lock_irq(disk->queue->queue_lock);
+18 -3
drivers/ata/libata-core.c
··· 2478 2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2479 2479 dev->max_sectors); 2480 2480 2481 + if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) 2482 + dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, 2483 + dev->max_sectors); 2484 + 2481 2485 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2482 2486 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2483 2487 ··· 4150 4146 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4151 4147 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4152 4148 4149 + /* 4150 + * Causes silent data corruption with higher max sects. 4151 + * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com 4152 + */ 4153 + { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4154 + 4153 4155 /* Devices we expect to fail diagnostics */ 4154 4156 4155 4157 /* Devices where NCQ should be avoided */ ··· 4184 4174 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4185 4175 ATA_HORKAGE_FIRMWARE_WARN }, 4186 4176 4187 - /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4177 + /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ 4188 4178 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4189 4179 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4180 + { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4190 4181 4191 4182 /* Blacklist entries taken from Silicon Image 3124/3132 4192 4183 Windows driver .inf file - also several Linux problem reports */ ··· 4240 4229 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4241 4230 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4242 4231 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4243 - { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4232 + { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4244 4233 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4245 4234 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4246 4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, ··· 4248 4237 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4249 4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4250 4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4240 + 4241 + /* devices that don't properly handle TRIM commands */ 4242 + { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, 4251 4243 4252 4244 /* 4253 4245 * As defined, the DRAT (Deterministic Read After Trim) and RZAT ··· 4515 4501 else /* In the ancient relic department - skip all of this */ 4516 4502 return 0; 4517 4503 4518 - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4504 + /* On some disks, this command causes spin-up, so we need longer timeout */ 4505 + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); 4519 4506 4520 4507 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4521 4508 return err_mask;
+7
drivers/ata/libata-pmp.c
··· 460 460 ATA_LFLAG_NO_SRST | 461 461 ATA_LFLAG_ASSUME_ATA; 462 462 } 463 + } else if (vendor == 0x11ab && devid == 0x4140) { 464 + /* Marvell 4140 quirks */ 465 + ata_for_each_link(link, ap, EDGE) { 466 + /* port 4 is for SEMB device and it doesn't like SRST */ 467 + if (link->pmp == 4) 468 + link->flags |= ATA_LFLAG_DISABLED; 469 + } 463 470 } 464 471 } 465 472
+2 -1
drivers/ata/libata-scsi.c
··· 2568 2568 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2569 2569 rbuf[15] = lowest_aligned; 2570 2570 2571 - if (ata_id_has_trim(args->id)) { 2571 + if (ata_id_has_trim(args->id) && 2572 + !(dev->horkage & ATA_HORKAGE_NOTRIM)) { 2572 2573 rbuf[14] |= 0x80; /* LBPME */ 2573 2574 2574 2575 if (ata_id_has_zero_after_trim(args->id) &&
+2
drivers/ata/libata-transport.c
··· 569 569 570 570 if (!ata_id_has_trim(ata_dev->id)) 571 571 mode = "unsupported"; 572 + else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM) 573 + mode = "forced_unsupported"; 572 574 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) 573 575 mode = "forced_unqueued"; 574 576 else if (ata_fpdma_dsm_supported(ata_dev))
+9 -9
drivers/block/null_blk.c
··· 240 240 while ((entry = llist_del_all(&cq->list)) != NULL) { 241 241 entry = llist_reverse_order(entry); 242 242 do { 243 + struct request_queue *q = NULL; 244 + 243 245 cmd = container_of(entry, struct nullb_cmd, ll_list); 244 246 entry = entry->next; 247 + if (cmd->rq) 248 + q = cmd->rq->q; 245 249 end_cmd(cmd); 246 250 247 - if (cmd->rq) { 248 - struct request_queue *q = cmd->rq->q; 249 - 250 - if (!q->mq_ops && blk_queue_stopped(q)) { 251 - spin_lock(q->queue_lock); 252 - if (blk_queue_stopped(q)) 253 - blk_start_queue(q); 254 - spin_unlock(q->queue_lock); 255 - } 251 + if (q && !q->mq_ops && blk_queue_stopped(q)) { 252 + spin_lock(q->queue_lock); 253 + if (blk_queue_stopped(q)) 254 + blk_start_queue(q); 255 + spin_unlock(q->queue_lock); 256 256 } 257 257 } while (entry); 258 258 }
+5 -6
drivers/bluetooth/btbcm.c
··· 472 472 473 473 /* Read Verbose Config Version Info */ 474 474 skb = btbcm_read_verbose_config(hdev); 475 - if (IS_ERR(skb)) 476 - return PTR_ERR(skb); 477 - 478 - BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 479 - get_unaligned_le16(skb->data + 5)); 480 - kfree_skb(skb); 475 + if (!IS_ERR(skb)) { 476 + BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 477 + get_unaligned_le16(skb->data + 5)); 478 + kfree_skb(skb); 479 + } 481 480 482 481 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 483 482
+12 -3
drivers/firmware/efi/cper.c
··· 305 305 return ret; 306 306 } 307 307 308 - static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) 308 + static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, 309 + int len) 309 310 { 310 311 struct cper_mem_err_compact cmem; 311 312 313 + /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ 314 + if (len == sizeof(struct cper_sec_mem_err_old) && 315 + (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) { 316 + pr_err(FW_WARN "valid bits set for fields beyond structure\n"); 317 + return; 318 + } 312 319 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) 313 320 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); 314 321 if (mem->validation_bits & CPER_MEM_VALID_PA) ··· 412 405 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { 413 406 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); 414 407 printk("%s""section_type: memory error\n", newpfx); 415 - if (gdata->error_data_length >= sizeof(*mem_err)) 416 - cper_print_mem(newpfx, mem_err); 408 + if (gdata->error_data_length >= 409 + sizeof(struct cper_sec_mem_err_old)) 410 + cper_print_mem(newpfx, mem_err, 411 + gdata->error_data_length); 417 412 else 418 413 goto err_section_too_small; 419 414 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+4
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1614 1614 #define AMDGPU_MAX_VCE_HANDLES 16 1615 1615 #define AMDGPU_VCE_FIRMWARE_OFFSET 256 1616 1616 1617 + #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) 1618 + #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) 1619 + 1617 1620 struct amdgpu_vce { 1618 1621 struct amdgpu_bo *vcpu_bo; 1619 1622 uint64_t gpu_addr; ··· 1629 1626 const struct firmware *fw; /* VCE firmware */ 1630 1627 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1631 1628 struct amdgpu_irq_src irq; 1629 + unsigned harvest_config; 1632 1630 }; 1633 1631 1634 1632 /*
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 459 459 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); 460 460 dev_info.vram_type = adev->mc.vram_type; 461 461 dev_info.vram_bit_width = adev->mc.vram_width; 462 + dev_info.vce_harvest_config = adev->vce.harvest_config; 462 463 463 464 return copy_to_user(out, &dev_info, 464 465 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
+53 -15
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
··· 494 494 amdgpu_free_extended_power_table(adev); 495 495 } 496 496 497 + #define ixSMUSVI_NB_CURRENTVID 0xD8230044 498 + #define CURRENT_NB_VID_MASK 0xff000000 499 + #define CURRENT_NB_VID__SHIFT 24 500 + #define ixSMUSVI_GFX_CURRENTVID 0xD8230048 501 + #define CURRENT_GFX_VID_MASK 0xff000000 502 + #define CURRENT_GFX_VID__SHIFT 24 503 + 497 504 static void 498 505 cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 499 506 struct seq_file *m) 500 507 { 508 + struct cz_power_info *pi = cz_get_pi(adev); 501 509 struct amdgpu_clock_voltage_dependency_table *table = 502 510 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 503 - u32 current_index = 504 - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 505 - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 506 - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 507 - u32 sclk, tmp; 508 - u16 vddc; 511 + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 512 + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 513 + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 514 + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 515 + u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), 516 + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); 517 + u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), 518 + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); 519 + u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), 520 + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); 521 + u32 sclk, vclk, dclk, ecclk, tmp; 522 + u16 vddnb, vddgfx; 509 523 510 - if (current_index >= NUM_SCLK_LEVELS) { 511 - seq_printf(m, "invalid dpm profile %d\n", current_index); 524 + if (sclk_index >= NUM_SCLK_LEVELS) { 525 + seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); 512 526 } else { 513 - sclk = table->entries[current_index].clk; 514 - tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 515 - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 516 - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 517 - vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 518 - seq_printf(m, "power level %d sclk: %u vddc: %u\n", 519 - current_index, sclk, vddc); 527 + sclk = table->entries[sclk_index].clk; 528 + seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); 529 + } 530 + 531 + tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & 532 + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; 533 + vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 534 + tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & 535 + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; 536 + vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 537 + seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); 538 + 539 + seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 540 + if (!pi->uvd_power_gated) { 541 + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 542 + seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); 543 + } else { 544 + vclk = uvd_table->entries[uvd_index].vclk; 545 + dclk = uvd_table->entries[uvd_index].dclk; 546 + seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); 547 + } 548 + } 549 + 550 + seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 551 + if (!pi->vce_power_gated) { 552 + if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 553 + seq_printf(m, "invalid vce dpm level %d\n", vce_index); 554 + } else { 555 + ecclk = vce_table->entries[vce_index].ecclk; 556 + seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); 557 + } 520 558 } 521 559 } 522 560
+4
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 2632 2632 struct drm_device *dev = crtc->dev; 2633 2633 struct amdgpu_device *adev = dev->dev_private; 2634 2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2635 + unsigned type; 2635 2636 2636 2637 switch (mode) { 2637 2638 case DRM_MODE_DPMS_ON: ··· 2641 2640 dce_v10_0_vga_enable(crtc, true); 2642 2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2643 2642 dce_v10_0_vga_enable(crtc, false); 2643 + /* Make sure VBLANK interrupt is still enabled */ 2644 + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2645 + amdgpu_irq_update(adev, &adev->crtc_irq, type); 2644 2646 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2645 2647 dce_v10_0_crtc_load_lut(crtc); 2646 2648 break;
+4
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 2631 2631 struct drm_device *dev = crtc->dev; 2632 2632 struct amdgpu_device *adev = dev->dev_private; 2633 2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2634 + unsigned type; 2634 2635 2635 2636 switch (mode) { 2636 2637 case DRM_MODE_DPMS_ON: ··· 2640 2639 dce_v11_0_vga_enable(crtc, true); 2641 2640 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2642 2641 dce_v11_0_vga_enable(crtc, false); 2642 + /* Make sure VBLANK interrupt is still enabled */ 2643 + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2644 + amdgpu_irq_update(adev, &adev->crtc_irq, type); 2643 2645 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2644 2646 dce_v11_0_crtc_load_lut(crtc); 2645 2647 break;
+48
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 35 35 #include "oss/oss_2_0_d.h" 36 36 #include "oss/oss_2_0_sh_mask.h" 37 37 #include "gca/gfx_8_0_d.h" 38 + #include "smu/smu_7_1_2_d.h" 39 + #include "smu/smu_7_1_2_sh_mask.h" 38 40 39 41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 40 42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 ··· 114 112 115 113 mutex_lock(&adev->grbm_idx_mutex); 116 114 for (idx = 0; idx < 2; ++idx) { 115 + 116 + if (adev->vce.harvest_config & (1 << idx)) 117 + continue; 118 + 117 119 if(idx == 0) 118 120 WREG32_P(mmGRBM_GFX_INDEX, 0, 119 121 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); ··· 196 190 return 0; 197 191 } 198 192 193 + #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 194 + #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 195 + #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 196 + 197 + static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 198 + { 199 + u32 tmp; 200 + unsigned ret; 201 + 202 + if (adev->flags & AMDGPU_IS_APU) 203 + tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 204 + VCE_HARVEST_FUSE_MACRO__MASK) >> 205 + VCE_HARVEST_FUSE_MACRO__SHIFT; 206 + else 207 + tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 208 + CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 209 + CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 210 + 211 + switch (tmp) { 212 + case 1: 213 + ret = AMDGPU_VCE_HARVEST_VCE0; 214 + break; 215 + case 2: 216 + ret = AMDGPU_VCE_HARVEST_VCE1; 217 + break; 218 + case 3: 219 + ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 220 + break; 221 + default: 222 + ret = 0; 223 + } 224 + 225 + return ret; 226 + } 227 + 199 228 static int vce_v3_0_early_init(void *handle) 200 229 { 201 230 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 231 + 232 + adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 233 + 234 + if ((adev->vce.harvest_config & 235 + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 236 + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 237 + return -ENOENT; 202 238 203 239 vce_v3_0_set_ring_funcs(adev); 204 240 vce_v3_0_set_irq_funcs(adev);
+1
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
··· 355 355 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; 356 356 357 357 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); 358 + drm_crtc_vblank_reset(&crtc->base); 358 359 359 360 dc->crtc = &crtc->base; 360 361
+6 -6
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
··· 313 313 314 314 pm_runtime_enable(dev->dev); 315 315 316 + ret = drm_vblank_init(dev, 1); 317 + if (ret < 0) { 318 + dev_err(dev->dev, "failed to initialize vblank\n"); 319 + goto err_periph_clk_disable; 320 + } 321 + 316 322 ret = atmel_hlcdc_dc_modeset_init(dev); 317 323 if (ret < 0) { 318 324 dev_err(dev->dev, "failed to initialize mode setting\n"); ··· 326 320 } 327 321 328 322 drm_mode_config_reset(dev); 329 - 330 - ret = drm_vblank_init(dev, 1); 331 - if (ret < 0) { 332 - dev_err(dev->dev, "failed to initialize vblank\n"); 333 - goto err_periph_clk_disable; 334 - } 335 323 336 324 pm_runtime_get_sync(dev->dev); 337 325 ret = drm_irq_install(dev, dc->hlcdc->irq);
+1 -4
drivers/gpu/drm/drm_crtc.c
··· 5398 5398 if (encoder->funcs->reset) 5399 5399 encoder->funcs->reset(encoder); 5400 5400 5401 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 5402 - connector->status = connector_status_unknown; 5403 - 5401 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) 5404 5402 if (connector->funcs->reset) 5405 5403 connector->funcs->reset(connector); 5406 - } 5407 5404 } 5408 5405 EXPORT_SYMBOL(drm_mode_config_reset); 5409 5406
+19 -7
drivers/gpu/drm/i915/intel_uncore.c
··· 1274 1274 struct drm_i915_private *dev_priv = dev->dev_private; 1275 1275 struct drm_i915_reg_read *reg = data; 1276 1276 struct register_whitelist const *entry = whitelist; 1277 + unsigned size; 1278 + u64 offset; 1277 1279 int i, ret = 0; 1278 1280 1279 1281 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1280 - if (entry->offset == reg->offset && 1282 + if (entry->offset == (reg->offset & -entry->size) && 1281 1283 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1282 1284 break; 1283 1285 } ··· 1287 1285 if (i == ARRAY_SIZE(whitelist)) 1288 1286 return -EINVAL; 1289 1287 1288 + /* We use the low bits to encode extra flags as the register should 1289 + * be naturally aligned (and those that are not so aligned merely 1290 + * limit the available flags for that register). 1291 + */ 1292 + offset = entry->offset; 1293 + size = entry->size; 1294 + size |= reg->offset ^ offset; 1295 + 1290 1296 intel_runtime_pm_get(dev_priv); 1291 1297 1292 - switch (entry->size) { 1298 + switch (size) { 1299 + case 8 | 1: 1300 + reg->val = I915_READ64_2x32(offset, offset+4); 1301 + break; 1293 1302 case 8: 1294 - reg->val = I915_READ64(reg->offset); 1303 + reg->val = I915_READ64(offset); 1295 1304 break; 1296 1305 case 4: 1297 - reg->val = I915_READ(reg->offset); 1306 + reg->val = I915_READ(offset); 1298 1307 break; 1299 1308 case 2: 1300 - reg->val = I915_READ16(reg->offset); 1309 + reg->val = I915_READ16(offset); 1301 1310 break; 1302 1311 case 1: 1303 - reg->val = I915_READ8(reg->offset); 1312 + reg->val = I915_READ8(offset); 1304 1313 break; 1305 1314 default: 1306 - MISSING_CASE(entry->size); 1307 1315 ret = -EINVAL; 1308 1316 goto out; 1309 1317 }
+2 -1
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 490 490 else if (boot_cpu_data.x86 > 3) 491 491 tmp = pgprot_noncached(tmp); 492 492 #endif 493 - #if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) 493 + #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ 494 + defined(__powerpc__) 494 495 if (caching_flags & TTM_PL_FLAG_WC) 495 496 tmp = pgprot_writecombine(tmp); 496 497 else
+2
drivers/hid/hid-cp2112.c
··· 356 356 struct cp2112_force_read_report report; 357 357 int ret; 358 358 359 + if (size > sizeof(dev->read_data)) 360 + size = sizeof(dev->read_data); 359 361 report.report = CP2112_DATA_READ_FORCE_SEND; 360 362 report.length = cpu_to_be16(size); 361 363
+7
drivers/hid/hid-multitouch.c
··· 778 778 /* 779 779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" 780 780 * for the stylus. 781 + * The check for mt_report_id ensures we don't process 782 + * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical 783 + * collection, but within the report ID. 781 784 */ 782 785 if (field->physical == HID_DG_STYLUS) 786 + return 0; 787 + else if ((field->physical == 0) && 788 + (field->report->id != td->mt_report_id) && 789 + (td->mt_report_id != -1)) 783 790 return 0; 784 791 785 792 if (field->application == HID_DG_TOUCHSCREEN ||
+3
drivers/hid/usbhid/hid-quirks.c
··· 87 87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 88 88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 89 89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 90 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS }, 91 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, 92 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, 90 93 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 91 94 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 92 95 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
+4 -2
drivers/hid/wacom_sys.c
··· 1271 1271 pad_input_dev = NULL; 1272 1272 wacom_wac->pad_registered = false; 1273 1273 fail_register_pad_input: 1274 - input_unregister_device(touch_input_dev); 1274 + if (touch_input_dev) 1275 + input_unregister_device(touch_input_dev); 1275 1276 wacom_wac->touch_input = NULL; 1276 1277 wacom_wac->touch_registered = false; 1277 1278 fail_register_touch_input: 1278 - input_unregister_device(pen_input_dev); 1279 + if (pen_input_dev) 1280 + input_unregister_device(pen_input_dev); 1279 1281 wacom_wac->pen_input = NULL; 1280 1282 wacom_wac->pen_registered = false; 1281 1283 fail_register_pen_input:
+3
drivers/hid/wacom_wac.c
··· 2213 2213 features->x_max = 4096; 2214 2214 features->y_max = 4096; 2215 2215 } 2216 + else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { 2217 + features->device_type |= WACOM_DEVICETYPE_PAD; 2218 + } 2216 2219 } 2217 2220 2218 2221 /*
+4 -4
drivers/iio/accel/mma8452.c
··· 557 557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE) 558 558 iio_push_event(indio_dev, 559 559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, 560 - IIO_EV_TYPE_THRESH, 560 + IIO_EV_TYPE_MAG, 561 561 IIO_EV_DIR_RISING), 562 562 ts); 563 563 564 564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE) 565 565 iio_push_event(indio_dev, 566 566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, 567 - IIO_EV_TYPE_THRESH, 567 + IIO_EV_TYPE_MAG, 568 568 IIO_EV_DIR_RISING), 569 569 ts); 570 570 571 571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) 572 572 iio_push_event(indio_dev, 573 573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, 574 - IIO_EV_TYPE_THRESH, 574 + IIO_EV_TYPE_MAG, 575 575 IIO_EV_DIR_RISING), 576 576 ts); 577 577 } ··· 644 644 645 645 static const struct iio_event_spec mma8452_transient_event[] = { 646 646 { 647 - .type = IIO_EV_TYPE_THRESH, 647 + .type = IIO_EV_TYPE_MAG, 648 648 .dir = IIO_EV_DIR_RISING, 649 649 .mask_separate = BIT(IIO_EV_INFO_ENABLE), 650 650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+2
drivers/iio/adc/mcp320x.c
··· 299 299 indio_dev->channels = chip_info->channels; 300 300 indio_dev->num_channels = chip_info->num_channels; 301 301 302 + adc->chip_info = chip_info; 303 + 302 304 adc->transfer[0].tx_buf = &adc->tx_buf; 303 305 adc->transfer[0].len = sizeof(adc->tx_buf); 304 306 adc->transfer[1].rx_buf = adc->rx_buf;
+1 -1
drivers/iio/adc/vf610_adc.c
··· 635 635 struct vf610_adc *info = iio_priv(indio_dev); 636 636 637 637 if ((readval == NULL) || 638 - (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) 638 + ((reg % 4) || (reg > VF610_REG_ADC_PCTL))) 639 639 return -EINVAL; 640 640 641 641 *readval = readl(info->regs + reg);
+13 -13
drivers/iio/light/stk3310.c
··· 200 200 int *val, int *val2) 201 201 { 202 202 u8 reg; 203 - u16 buf; 203 + __be16 buf; 204 204 int ret; 205 205 struct stk3310_data *data = iio_priv(indio_dev); 206 206 ··· 222 222 dev_err(&data->client->dev, "register read failed\n"); 223 223 return ret; 224 224 } 225 - *val = swab16(buf); 225 + *val = be16_to_cpu(buf); 226 226 227 227 return IIO_VAL_INT; 228 228 } ··· 235 235 int val, int val2) 236 236 { 237 237 u8 reg; 238 - u16 buf; 238 + __be16 buf; 239 239 int ret; 240 240 unsigned int index; 241 241 struct stk3310_data *data = iio_priv(indio_dev); ··· 252 252 else 253 253 return -EINVAL; 254 254 255 - buf = swab16(val); 255 + buf = cpu_to_be16(val); 256 256 ret = regmap_bulk_write(data->regmap, reg, &buf, 2); 257 257 if (ret < 0) 258 258 dev_err(&client->dev, "failed to set PS threshold!\n"); ··· 301 301 int *val, int *val2, long mask) 302 302 { 303 303 u8 reg; 304 - u16 buf; 304 + __be16 buf; 305 305 int ret; 306 306 unsigned int index; 307 307 struct stk3310_data *data = iio_priv(indio_dev); ··· 322 322 mutex_unlock(&data->lock); 323 323 return ret; 324 324 } 325 - *val = swab16(buf); 325 + *val = be16_to_cpu(buf); 326 326 mutex_unlock(&data->lock); 327 327 return IIO_VAL_INT; 328 328 case IIO_CHAN_INFO_INT_TIME: ··· 608 608 if (ret < 0) 609 609 return ret; 610 610 611 - ret = iio_device_register(indio_dev); 612 - if (ret < 0) { 613 - dev_err(&client->dev, "device_register failed\n"); 614 - stk3310_set_state(data, STK3310_STATE_STANDBY); 615 - } 616 - 617 - if (client->irq <= 0) 611 + if (client->irq < 0) 618 612 client->irq = stk3310_gpio_probe(client); 619 613 620 614 if (client->irq >= 0) { ··· 621 627 if (ret < 0) 622 628 dev_err(&client->dev, "request irq %d failed\n", 623 629 client->irq); 630 + } 631 + 632 + ret = iio_device_register(indio_dev); 633 + if (ret < 0) { 634 + dev_err(&client->dev, "device_register failed\n"); 635 + stk3310_set_state(data, STK3310_STATE_STANDBY); 624 636 } 625 637 626 638 return ret;
+1
drivers/iio/magnetometer/Kconfig
··· 90 90 config BMC150_MAGN 91 91 tristate "Bosch BMC150 Magnetometer Driver" 92 92 depends on I2C 93 + select REGMAP_I2C 93 94 select IIO_BUFFER 94 95 select IIO_TRIGGERED_BUFFER 95 96 help
+2 -2
drivers/iio/magnetometer/bmc150_magn.c
··· 706 706 goto err_poweroff; 707 707 } 708 708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { 709 - dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); 709 + dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id); 710 710 ret = -ENODEV; 711 711 goto err_poweroff; 712 712 } 713 - dev_dbg(&data->client->dev, "Chip id %x\n", ret); 713 + dev_dbg(&data->client->dev, "Chip id %x\n", chip_id); 714 714 715 715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; 716 716 ret = bmc150_magn_set_odr(data, preset.odr);
+7 -5
drivers/iio/magnetometer/mmc35240.c
··· 202 202 coil_bit = MMC35240_CTRL0_RESET_BIT; 203 203 204 204 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, 205 - MMC35240_CTRL0_REFILL_BIT, 206 - coil_bit); 205 + coil_bit, coil_bit); 206 + 207 207 } 208 208 209 209 static int mmc35240_init(struct mmc35240_data *data) ··· 222 222 223 223 /* 224 224 * make sure we restore sensor characteristics, by doing 225 - * a RESET/SET sequence 225 + * a SET/RESET sequence, the axis polarity being naturally 226 + * aligned after RESET 226 227 */ 227 - ret = mmc35240_hw_set(data, false); 228 + ret = mmc35240_hw_set(data, true); 228 229 if (ret < 0) 229 230 return ret; 230 231 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); 231 232 232 - ret = mmc35240_hw_set(data, true); 233 + ret = mmc35240_hw_set(data, false); 233 234 if (ret < 0) 234 235 return ret; 235 236 ··· 504 503 } 505 504 506 505 data = iio_priv(indio_dev); 506 + i2c_set_clientdata(client, indio_dev); 507 507 data->client = client; 508 508 data->regmap = regmap; 509 509 data->res = MMC35240_16_BITS_SLOW;
+1 -1
drivers/iio/temperature/mlx90614.c
··· 204 204 *val = ret; 205 205 return IIO_VAL_INT; 206 206 case IIO_CHAN_INFO_OFFSET: 207 - *val = 13657; 207 + *val = -13657; 208 208 *val2 = 500000; 209 209 return IIO_VAL_INT_PLUS_MICRO; 210 210 case IIO_CHAN_INFO_SCALE:
+4 -2
drivers/infiniband/hw/ipath/ipath_driver.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 34 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 35 + 34 36 #include <linux/sched.h> 35 37 #include <linux/spinlock.h> 36 38 #include <linux/idr.h> ··· 401 399 u32 bar0 = 0, bar1 = 0; 402 400 403 401 #ifdef CONFIG_X86_64 404 - if (WARN(pat_enabled(), 405 - "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { 402 + if (pat_enabled()) { 403 + pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n"); 406 404 ret = -ENODEV; 407 405 goto bail; 408 406 }
+14 -2
drivers/input/input-leds.c
··· 71 71 { 72 72 } 73 73 74 + static int input_leds_get_count(struct input_dev *dev) 75 + { 76 + unsigned int led_code; 77 + int count = 0; 78 + 79 + for_each_set_bit(led_code, dev->ledbit, LED_CNT) 80 + if (input_led_info[led_code].name) 81 + count++; 82 + 83 + return count; 84 + } 85 + 74 86 static int input_leds_connect(struct input_handler *handler, 75 87 struct input_dev *dev, 76 88 const struct input_device_id *id) ··· 93 81 int led_no; 94 82 int error; 95 83 96 - num_leds = bitmap_weight(dev->ledbit, LED_CNT); 84 + num_leds = input_leds_get_count(dev); 97 85 if (!num_leds) 98 86 return -ENXIO; 99 87 ··· 124 112 led->handle = &leds->handle; 125 113 led->code = led_code; 126 114 127 - if (WARN_ON(!input_led_info[led_code].name)) 115 + if (!input_led_info[led_code].name) 128 116 continue; 129 117 130 118 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
+8 -5
drivers/input/mouse/elantech.c
··· 1167 1167 struct input_dev *dev = psmouse->dev; 1168 1168 struct elantech_data *etd = psmouse->private; 1169 1169 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; 1170 - unsigned int x_res = 0, y_res = 0; 1170 + unsigned int x_res = 31, y_res = 31; 1171 1171 1172 1172 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) 1173 1173 return -1; ··· 1232 1232 /* For X to recognize me as touchpad. */ 1233 1233 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); 1234 1234 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); 1235 - input_abs_set_res(dev, ABS_X, x_res); 1236 - input_abs_set_res(dev, ABS_Y, y_res); 1237 1235 /* 1238 1236 * range of pressure and width is the same as v2, 1239 1237 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. ··· 1244 1246 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); 1245 1247 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); 1246 1248 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); 1247 - input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); 1248 - input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); 1249 1249 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, 1250 1250 ETP_PMAX_V2, 0, 0); 1251 1251 /* ··· 1253 1257 input_set_abs_params(dev, ABS_MT_TOUCH_MAJOR, 0, 1254 1258 ETP_WMAX_V2 * width, 0, 0); 1255 1259 break; 1260 + } 1261 + 1262 + input_abs_set_res(dev, ABS_X, x_res); 1263 + input_abs_set_res(dev, ABS_Y, y_res); 1264 + if (etd->hw_version > 1) { 1265 + input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); 1266 + input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); 1256 1267 } 1257 1268 1258 1269 etd->y_max = y_max;
+36
drivers/input/touchscreen/goodix.c
··· 15 15 */ 16 16 17 17 #include <linux/kernel.h> 18 + #include <linux/dmi.h> 18 19 #include <linux/i2c.h> 19 20 #include <linux/input.h> 20 21 #include <linux/input/mt.h> ··· 35 34 int abs_y_max; 36 35 unsigned int max_touch_num; 37 36 unsigned int int_trigger_type; 37 + bool rotated_screen; 38 38 }; 39 39 40 40 #define GOODIX_MAX_HEIGHT 4096 ··· 60 58 IRQ_TYPE_EDGE_FALLING, 61 59 IRQ_TYPE_LEVEL_LOW, 62 60 IRQ_TYPE_LEVEL_HIGH, 61 + }; 62 + 63 + /* 64 + * Those tablets have their coordinates origin at the bottom right 65 + * of the tablet, as if rotated 180 degrees 66 + */ 67 + static const struct dmi_system_id rotated_screen[] = { 68 + #if defined(CONFIG_DMI) && defined(CONFIG_X86) 69 + { 70 + .ident = "WinBook TW100", 71 + .matches = { 72 + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), 73 + DMI_MATCH(DMI_PRODUCT_NAME, "TW100") 74 + } 75 + }, 76 + { 77 + .ident = "WinBook TW700", 78 + .matches = { 79 + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), 80 + DMI_MATCH(DMI_PRODUCT_NAME, "TW700") 81 + }, 82 + }, 83 + #endif 84 + {} 63 85 }; 64 86 65 87 /** ··· 154 128 int input_x = get_unaligned_le16(&coor_data[1]); 155 129 int input_y = get_unaligned_le16(&coor_data[3]); 156 130 int input_w = get_unaligned_le16(&coor_data[5]); 131 + 132 + if (ts->rotated_screen) { 133 + input_x = ts->abs_x_max - input_x; 134 + input_y = ts->abs_y_max - input_y; 135 + } 157 136 158 137 input_mt_slot(ts->input_dev, id); 159 138 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); ··· 254 223 ts->abs_y_max = GOODIX_MAX_HEIGHT; 255 224 ts->max_touch_num = GOODIX_MAX_CONTACTS; 256 225 } 226 + 227 + ts->rotated_screen = dmi_check_system(rotated_screen); 228 + if (ts->rotated_screen) 229 + dev_dbg(&ts->client->dev, 230 + "Applying '180 degrees rotated screen' quirk\n"); 257 231 } 258 232 259 233 /**
+3
drivers/input/touchscreen/usbtouchscreen.c
··· 627 627 goto err_out; 628 628 } 629 629 630 + /* TSC-25 data sheet specifies a delay after the RESET command */ 631 + msleep(150); 632 + 630 633 /* set coordinate output rate */ 631 634 buf[0] = buf[1] = 0xFF; 632 635 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+1 -1
drivers/input/touchscreen/zforce_ts.c
··· 429 429 goto unlock; 430 430 } 431 431 432 - if (buf[PAYLOAD_LENGTH] == 0) { 432 + if (buf[PAYLOAD_LENGTH] == 0 || buf[PAYLOAD_LENGTH] > FRAME_MAXSIZE) { 433 433 dev_err(&client->dev, "invalid payload length: %d\n", 434 434 buf[PAYLOAD_LENGTH]); 435 435 ret = -EIO;
+46 -14
drivers/iommu/arm-smmu-v3.c
··· 199 199 * Stream table. 200 200 * 201 201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries 202 - * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) 202 + * 2lvl: 128k L1 entries, 203 + * 256 lazy entries per table (each table covers a PCI bus) 203 204 */ 204 - #define STRTAB_L1_SZ_SHIFT 16 205 + #define STRTAB_L1_SZ_SHIFT 20 205 206 #define STRTAB_SPLIT 8 206 207 207 208 #define STRTAB_L1_DESC_DWORDS 1 ··· 270 269 #define ARM64_TCR_TG0_SHIFT 14 271 270 #define ARM64_TCR_TG0_MASK 0x3UL 272 271 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 273 - #define ARM64_TCR_IRGN0_SHIFT 24 272 + #define ARM64_TCR_IRGN0_SHIFT 8 274 273 #define ARM64_TCR_IRGN0_MASK 0x3UL 275 274 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 276 - #define ARM64_TCR_ORGN0_SHIFT 26 275 + #define ARM64_TCR_ORGN0_SHIFT 10 277 276 #define ARM64_TCR_ORGN0_MASK 0x3UL 278 277 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12 279 278 #define ARM64_TCR_SH0_SHIFT 12 ··· 543 542 #define ARM_SMMU_FEAT_HYP (1 << 12) 544 543 u32 features; 545 544 545 + #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) 546 + u32 options; 547 + 546 548 struct arm_smmu_cmdq cmdq; 547 549 struct arm_smmu_evtq evtq; 548 550 struct arm_smmu_priq priq; ··· 606 602 static DEFINE_SPINLOCK(arm_smmu_devices_lock); 607 603 static LIST_HEAD(arm_smmu_devices); 608 604 605 + struct arm_smmu_option_prop { 606 + u32 opt; 607 + const char *prop; 608 + }; 609 + 610 + static struct arm_smmu_option_prop arm_smmu_options[] = { 611 + { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, 612 + { 0, NULL}, 613 + }; 614 + 609 615 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 610 616 { 611 617 return container_of(dom, struct arm_smmu_domain, domain); 618 + } 619 + 620 + static void parse_driver_options(struct arm_smmu_device *smmu) 621 + { 622 + int i = 0; 623 + 624 + do { 625 + if (of_property_read_bool(smmu->dev->of_node, 626 + arm_smmu_options[i].prop)) { 627 + smmu->options |= arm_smmu_options[i].opt; 628 + dev_notice(smmu->dev, "option %s\n", 629 + arm_smmu_options[i].prop); 630 + } 631 + } while (arm_smmu_options[++i].opt); 612 632 } 613 633 614 634 /* Low-level queue manipulation functions */ ··· 1064 1036 arm_smmu_sync_ste_for_sid(smmu, sid); 1065 1037 1066 1038 /* It's likely that we'll want to use the new STE soon */ 1067 - arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1039 + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) 1040 + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1068 1041 } 1069 1042 1070 1043 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) ··· 1093 1064 return 0; 1094 1065 1095 1066 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); 1096 - strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; 1067 + strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; 1097 1068 1098 1069 desc->span = STRTAB_SPLIT + 1; 1099 1070 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, ··· 2049 2020 { 2050 2021 void *strtab; 2051 2022 u64 reg; 2052 - u32 size; 2023 + u32 size, l1size; 2053 2024 int ret; 2054 2025 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2055 2026 2056 2027 /* Calculate the L1 size, capped to the SIDSIZE */ 2057 2028 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); 2058 2029 size = min(size, smmu->sid_bits - STRTAB_SPLIT); 2059 - if (size + STRTAB_SPLIT < smmu->sid_bits) 2030 + cfg->num_l1_ents = 1 << size; 2031 + 2032 + size += STRTAB_SPLIT; 2033 + if (size < smmu->sid_bits) 2060 2034 dev_warn(smmu->dev, 2061 2035 "2-level strtab only covers %u/%u bits of SID\n", 2062 - size + STRTAB_SPLIT, smmu->sid_bits); 2036 + size, smmu->sid_bits); 2063 2037 2064 - cfg->num_l1_ents = 1 << size; 2065 - size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); 2066 - strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma, 2038 + l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); 2039 + strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, 2067 2040 GFP_KERNEL); 2068 2041 if (!strtab) { 2069 2042 dev_err(smmu->dev, ··· 2086 2055 ret = arm_smmu_init_l1_strtab(smmu); 2087 2056 if (ret) 2088 2057 dma_free_coherent(smmu->dev, 2089 - cfg->num_l1_ents * 2090 - (STRTAB_L1_DESC_DWORDS << 3), 2058 + l1size, 2091 2059 strtab, 2092 2060 cfg->strtab_dma); 2093 2061 return ret; ··· 2602 2572 irq = platform_get_irq_byname(pdev, "gerror"); 2603 2573 if (irq > 0) 2604 2574 smmu->gerr_irq = irq; 2575 + 2576 + parse_driver_options(smmu); 2605 2577 2606 2578 /* Probe the h/w */ 2607 2579 ret = arm_smmu_device_probe(smmu);
+6 -3
drivers/iommu/intel-iommu.c
··· 1830 1830 1831 1831 static void domain_exit(struct dmar_domain *domain) 1832 1832 { 1833 + struct dmar_drhd_unit *drhd; 1834 + struct intel_iommu *iommu; 1833 1835 struct page *freelist = NULL; 1834 - int i; 1835 1836 1836 1837 /* Domain 0 is reserved, so dont process it */ 1837 1838 if (!domain) ··· 1852 1851 1853 1852 /* clear attached or cached domains */ 1854 1853 rcu_read_lock(); 1855 - for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) 1856 - iommu_detach_domain(domain, g_iommus[i]); 1854 + for_each_active_iommu(iommu, drhd) 1855 + if (domain_type_is_vm(domain) || 1856 + test_bit(iommu->seq_id, domain->iommu_bmp)) 1857 + iommu_detach_domain(domain, iommu); 1857 1858 rcu_read_unlock(); 1858 1859 1859 1860 dma_free_pagelist(freelist);
+10 -25
drivers/isdn/gigaset/ser-gigaset.c
··· 524 524 cs->hw.ser->tty = tty; 525 525 atomic_set(&cs->hw.ser->refcnt, 1); 526 526 init_completion(&cs->hw.ser->dead_cmp); 527 - 528 527 tty->disc_data = cs; 528 + 529 + /* Set the amount of data we're willing to receive per call 530 + * from the hardware driver to half of the input buffer size 531 + * to leave some reserve. 532 + * Note: We don't do flow control towards the hardware driver. 533 + * If more data is received than will fit into the input buffer, 534 + * it will be dropped and an error will be logged. This should 535 + * never happen as the device is slow and the buffer size ample. 536 + */ 537 + tty->receive_room = RBUFSIZE/2; 529 538 530 539 /* OK.. Initialization of the datastructures and the HW is done.. Now 531 540 * startup system and notify the LL that we are ready to run ··· 604 595 { 605 596 gigaset_tty_close(tty); 606 597 return 0; 607 - } 608 - 609 - /* 610 - * Read on the tty. 611 - * Unused, received data goes only to the Gigaset driver. 612 - */ 613 - static ssize_t 614 - gigaset_tty_read(struct tty_struct *tty, struct file *file, 615 - unsigned char __user *buf, size_t count) 616 - { 617 - return -EAGAIN; 618 - } 619 - 620 - /* 621 - * Write on the tty. 622 - * Unused, transmit data comes only from the Gigaset driver. 623 - */ 624 - static ssize_t 625 - gigaset_tty_write(struct tty_struct *tty, struct file *file, 626 - const unsigned char *buf, size_t count) 627 - { 628 - return -EAGAIN; 629 598 } 630 599 631 600 /* ··· 739 752 .open = gigaset_tty_open, 740 753 .close = gigaset_tty_close, 741 754 .hangup = gigaset_tty_hangup, 742 - .read = gigaset_tty_read, 743 - .write = gigaset_tty_write, 744 755 .ioctl = gigaset_tty_ioctl, 745 756 .receive_buf = gigaset_tty_receive, 746 757 .write_wakeup = gigaset_tty_wakeup,
+17 -11
drivers/md/bitmap.c
··· 494 494 bitmap_super_t *sb; 495 495 unsigned long chunksize, daemon_sleep, write_behind; 496 496 497 - bitmap->storage.sb_page = alloc_page(GFP_KERNEL); 497 + bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 498 498 if (bitmap->storage.sb_page == NULL) 499 499 return -ENOMEM; 500 500 bitmap->storage.sb_page->index = 0; ··· 541 541 sb->state = cpu_to_le32(bitmap->flags); 542 542 bitmap->events_cleared = bitmap->mddev->events; 543 543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 544 + bitmap->mddev->bitmap_info.nodes = 0; 544 545 545 546 kunmap_atomic(sb); 546 547 ··· 559 558 unsigned long sectors_reserved = 0; 560 559 int err = -EINVAL; 561 560 struct page *sb_page; 561 + loff_t offset = bitmap->mddev->bitmap_info.offset; 562 562 563 563 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 564 564 chunksize = 128 * 1024 * 1024; ··· 586 584 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 587 585 /* to 4k blocks */ 588 586 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 589 - bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); 587 + offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); 590 588 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 591 - bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); 589 + bitmap->cluster_slot, offset); 592 590 } 593 591 594 592 if (bitmap->storage.file) { ··· 599 597 bitmap, bytes, sb_page); 600 598 } else { 601 599 err = read_sb_page(bitmap->mddev, 602 - bitmap->mddev->bitmap_info.offset, 600 + offset, 603 601 sb_page, 604 602 0, sizeof(bitmap_super_t)); 605 603 } ··· 613 611 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 614 612 write_behind = le32_to_cpu(sb->write_behind); 615 613 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 616 - nodes = le32_to_cpu(sb->nodes); 617 - strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); 614 + /* XXX: This is a hack to ensure that we don't use clustering 615 + * in case: 616 + * - dm-raid is in use and 617 + * - the nodes written in bitmap_sb is erroneous. 618 + */ 619 + if (!bitmap->mddev->sync_super) { 620 + nodes = le32_to_cpu(sb->nodes); 621 + strlcpy(bitmap->mddev->bitmap_info.cluster_name, 622 + sb->cluster_name, 64); 623 + } 618 624 619 625 /* verify that the bitmap-specific fields are valid */ 620 626 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) ··· 681 671 kunmap_atomic(sb); 682 672 /* Assiging chunksize is required for "re_read" */ 683 673 bitmap->mddev->bitmap_info.chunksize = chunksize; 684 - if (nodes && (bitmap->cluster_slot < 0)) { 674 + if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { 685 675 err = md_setup_cluster(bitmap->mddev, nodes); 686 676 if (err) { 687 677 pr_err("%s: Could not setup cluster service (%d)\n", ··· 1875 1865 1876 1866 if (IS_ERR(bitmap)) 1877 1867 return PTR_ERR(bitmap); 1878 - 1879 - rv = bitmap_read_sb(bitmap); 1880 - if (rv) 1881 - goto err; 1882 1868 1883 1869 rv = bitmap_init_from_disk(bitmap, 0); 1884 1870 if (rv)
+11 -1
drivers/md/md-cluster.c
··· 44 44 45 45 /* md_cluster_info flags */ 46 46 #define MD_CLUSTER_WAITING_FOR_NEWDISK 1 47 + #define MD_CLUSTER_SUSPEND_READ_BALANCING 2 47 48 48 49 49 50 struct md_cluster_info { ··· 276 275 277 276 static void recover_prep(void *arg) 278 277 { 278 + struct mddev *mddev = arg; 279 + struct md_cluster_info *cinfo = mddev->cluster_info; 280 + set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); 279 281 } 280 282 281 283 static void recover_slot(void *arg, struct dlm_slot *slot) ··· 311 307 312 308 cinfo->slot_number = our_slot; 313 309 complete(&cinfo->completion); 310 + clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); 314 311 } 315 312 316 313 static const struct dlm_lockspace_ops md_ls_ops = { ··· 821 816 resync_send(mddev, RESYNCING, 0, 0); 822 817 } 823 818 824 - static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) 819 + static int area_resyncing(struct mddev *mddev, int direction, 820 + sector_t lo, sector_t hi) 825 821 { 826 822 struct md_cluster_info *cinfo = mddev->cluster_info; 827 823 int ret = 0; 828 824 struct suspend_info *s; 825 + 826 + if ((direction == READ) && 827 + test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) 828 + return 1; 829 829 830 830 spin_lock_irq(&cinfo->suspend_lock); 831 831 if (list_empty(&cinfo->suspend_list))
+1 -1
drivers/md/md-cluster.h
··· 18 18 int (*metadata_update_start)(struct mddev *mddev); 19 19 int (*metadata_update_finish)(struct mddev *mddev); 20 20 int (*metadata_update_cancel)(struct mddev *mddev); 21 - int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); 21 + int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); 22 22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); 23 23 int (*add_new_disk_finish)(struct mddev *mddev); 24 24 int (*new_disk_ack)(struct mddev *mddev, bool ack);
+3 -1
drivers/md/md.c
··· 5382 5382 { 5383 5383 struct md_personality *pers = mddev->pers; 5384 5384 mddev_detach(mddev); 5385 + /* Ensure ->event_work is done */ 5386 + flush_workqueue(md_misc_wq); 5385 5387 spin_lock(&mddev->lock); 5386 5388 mddev->ready = 0; 5387 5389 mddev->pers = NULL; ··· 7439 7437 err = request_module("md-cluster"); 7440 7438 if (err) { 7441 7439 pr_err("md-cluster module not found.\n"); 7442 - return err; 7440 + return -ENOENT; 7443 7441 } 7444 7442 7445 7443 spin_lock(&pers_lock);
+5 -4
drivers/md/raid1.c
··· 336 336 spin_lock_irqsave(&conf->device_lock, flags); 337 337 if (r1_bio->mddev->degraded == conf->raid_disks || 338 338 (r1_bio->mddev->degraded == conf->raid_disks-1 && 339 - !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 339 + test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) 340 340 uptodate = 1; 341 341 spin_unlock_irqrestore(&conf->device_lock, flags); 342 342 } ··· 541 541 542 542 if ((conf->mddev->recovery_cp < this_sector + sectors) || 543 543 (mddev_is_clustered(conf->mddev) && 544 - md_cluster_ops->area_resyncing(conf->mddev, this_sector, 544 + md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 545 545 this_sector + sectors))) 546 546 choose_first = 1; 547 547 else ··· 1111 1111 ((bio_end_sector(bio) > mddev->suspend_lo && 1112 1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1113 1113 (mddev_is_clustered(mddev) && 1114 - md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1114 + md_cluster_ops->area_resyncing(mddev, WRITE, 1115 + bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1115 1116 /* As the suspend_* range is controlled by 1116 1117 * userspace, we want an interruptible 1117 1118 * wait. ··· 1125 1124 if (bio_end_sector(bio) <= mddev->suspend_lo || 1126 1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1127 1126 (mddev_is_clustered(mddev) && 1128 - !md_cluster_ops->area_resyncing(mddev, 1127 + !md_cluster_ops->area_resyncing(mddev, WRITE, 1129 1128 bio->bi_iter.bi_sector, bio_end_sector(bio)))) 1130 1129 break; 1131 1130 schedule();
+4 -1
drivers/md/raid10.c
··· 3556 3556 /* far_copies must be 1 */ 3557 3557 conf->prev.stride = conf->dev_sectors; 3558 3558 } 3559 + conf->reshape_safe = conf->reshape_progress; 3559 3560 spin_lock_init(&conf->device_lock); 3560 3561 INIT_LIST_HEAD(&conf->retry_list); 3561 3562 ··· 3761 3760 } 3762 3761 conf->offset_diff = min_offset_diff; 3763 3762 3764 - conf->reshape_safe = conf->reshape_progress; 3765 3763 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3766 3764 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3767 3765 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); ··· 4103 4103 conf->reshape_progress = size; 4104 4104 } else 4105 4105 conf->reshape_progress = 0; 4106 + conf->reshape_safe = conf->reshape_progress; 4106 4107 spin_unlock_irq(&conf->device_lock); 4107 4108 4108 4109 if (mddev->delta_disks && mddev->bitmap) { ··· 4171 4170 rdev->new_data_offset = rdev->data_offset; 4172 4171 smp_wmb(); 4173 4172 conf->reshape_progress = MaxSector; 4173 + conf->reshape_safe = MaxSector; 4174 4174 mddev->reshape_position = MaxSector; 4175 4175 spin_unlock_irq(&conf->device_lock); 4176 4176 return ret; ··· 4526 4524 md_finish_reshape(conf->mddev); 4527 4525 smp_wmb(); 4528 4526 conf->reshape_progress = MaxSector; 4527 + conf->reshape_safe = MaxSector; 4529 4528 spin_unlock_irq(&conf->device_lock); 4530 4529 4531 4530 /* read-ahead size must cover two whole stripes, which is
+28 -7
drivers/md/raid5.c
··· 2162 2162 if (!sc) 2163 2163 return -ENOMEM; 2164 2164 2165 + /* Need to ensure auto-resizing doesn't interfere */ 2166 + mutex_lock(&conf->cache_size_mutex); 2167 + 2165 2168 for (i = conf->max_nr_stripes; i; i--) { 2166 2169 nsh = alloc_stripe(sc, GFP_KERNEL); 2167 2170 if (!nsh) ··· 2181 2178 kmem_cache_free(sc, nsh); 2182 2179 } 2183 2180 kmem_cache_destroy(sc); 2181 + mutex_unlock(&conf->cache_size_mutex); 2184 2182 return -ENOMEM; 2185 2183 } 2186 2184 /* Step 2 - Must use GFP_NOIO now. ··· 2228 2224 } else 2229 2225 err = -ENOMEM; 2230 2226 2227 + mutex_unlock(&conf->cache_size_mutex); 2231 2228 /* Step 4, return new stripes to service */ 2232 2229 while(!list_empty(&newstripes)) { 2233 2230 nsh = list_entry(newstripes.next, struct stripe_head, lru); ··· 4066 4061 &first_bad, &bad_sectors)) 4067 4062 set_bit(R5_ReadRepl, &dev->flags); 4068 4063 else { 4069 - if (rdev) 4064 + if (rdev && !test_bit(Faulty, &rdev->flags)) 4070 4065 set_bit(R5_NeedReplace, &dev->flags); 4066 + else 4067 + clear_bit(R5_NeedReplace, &dev->flags); 4071 4068 rdev = rcu_dereference(conf->disks[i].rdev); 4072 4069 clear_bit(R5_ReadRepl, &dev->flags); 4073 4070 } ··· 5864 5857 pr_debug("%d stripes handled\n", handled); 5865 5858 5866 5859 spin_unlock_irq(&conf->device_lock); 5867 - if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { 5860 + if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && 5861 + mutex_trylock(&conf->cache_size_mutex)) { 5868 5862 grow_one_stripe(conf, __GFP_NOWARN); 5869 5863 /* Set flag even if allocation failed. This helps 5870 5864 * slow down allocation requests when mem is short 5871 5865 */ 5872 5866 set_bit(R5_DID_ALLOC, &conf->cache_state); 5867 + mutex_unlock(&conf->cache_size_mutex); 5873 5868 } 5874 5869 5875 5870 async_tx_issue_pending_all(); ··· 5903 5894 return -EINVAL; 5904 5895 5905 5896 conf->min_nr_stripes = size; 5897 + mutex_lock(&conf->cache_size_mutex); 5906 5898 while (size < conf->max_nr_stripes && 5907 5899 drop_one_stripe(conf)) 5908 5900 ; 5901 + mutex_unlock(&conf->cache_size_mutex); 5909 5902 5910 5903 5911 5904 err = md_allow_write(mddev); 5912 5905 if (err) 5913 5906 return err; 5914 5907 5908 + mutex_lock(&conf->cache_size_mutex); 5915 5909 while (size > conf->max_nr_stripes) 5916 5910 if (!grow_one_stripe(conf, GFP_KERNEL)) 5917 5911 break; 5912 + mutex_unlock(&conf->cache_size_mutex); 5918 5913 5919 5914 return 0; 5920 5915 } ··· 6384 6371 struct shrink_control *sc) 6385 6372 { 6386 6373 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6387 - int ret = 0; 6388 - while (ret < sc->nr_to_scan) { 6389 - if (drop_one_stripe(conf) == 0) 6390 - return SHRINK_STOP; 6391 - ret++; 6374 + unsigned long ret = SHRINK_STOP; 6375 + 6376 + if (mutex_trylock(&conf->cache_size_mutex)) { 6377 + ret= 0; 6378 + while (ret < sc->nr_to_scan) { 6379 + if (drop_one_stripe(conf) == 0) { 6380 + ret = SHRINK_STOP; 6381 + break; 6382 + } 6383 + ret++; 6384 + } 6385 + mutex_unlock(&conf->cache_size_mutex); 6392 6386 } 6393 6387 return ret; 6394 6388 } ··· 6464 6444 goto abort; 6465 6445 spin_lock_init(&conf->device_lock); 6466 6446 seqcount_init(&conf->gen_lock); 6447 + mutex_init(&conf->cache_size_mutex); 6467 6448 init_waitqueue_head(&conf->wait_for_quiescent); 6468 6449 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { 6469 6450 init_waitqueue_head(&conf->wait_for_stripe[i]);
+2 -1
drivers/md/raid5.h
··· 482 482 */ 483 483 int active_name; 484 484 char cache_name[2][32]; 485 - struct kmem_cache *slab_cache; /* for allocating stripes */ 485 + struct kmem_cache *slab_cache; /* for allocating stripes */ 486 + struct mutex cache_size_mutex; /* Protect changes to cache size */ 486 487 487 488 int seq_flush, seq_write; 488 489 int quiesce;
+9 -6
drivers/media/pci/ivtv/ivtvfb.c
··· 38 38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 39 39 */ 40 40 41 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 42 + 41 43 #include <linux/module.h> 42 44 #include <linux/kernel.h> 43 45 #include <linux/fb.h> ··· 1173 1171 { 1174 1172 int rc; 1175 1173 1174 + #ifdef CONFIG_X86_64 1175 + if (pat_enabled()) { 1176 + pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); 1177 + return -ENODEV; 1178 + } 1179 + #endif 1180 + 1176 1181 if (itv->osd_info) { 1177 1182 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); 1178 1183 return -EBUSY; ··· 1274 1265 int registered = 0; 1275 1266 int err; 1276 1267 1277 - #ifdef CONFIG_X86_64 1278 - if (WARN(pat_enabled(), 1279 - "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { 1280 - return -ENODEV; 1281 - } 1282 - #endif 1283 1268 1284 1269 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { 1285 1270 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
+1 -1
drivers/misc/mei/main.c
··· 682 682 /* Fill in the data structures */ 683 683 devno = MKDEV(MAJOR(mei_devt), dev->minor); 684 684 cdev_init(&dev->cdev, &mei_fops); 685 - dev->cdev.owner = mei_fops.owner; 685 + dev->cdev.owner = parent->driver->owner; 686 686 687 687 /* Add the device */ 688 688 ret = cdev_add(&dev->cdev, devno, 1);
+5 -10
drivers/misc/mic/scif/scif_nodeqp.c
··· 357 357 } 358 358 359 359 static struct scatterlist * 360 - scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) 360 + scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt) 361 361 { 362 362 struct scatterlist *sg; 363 363 struct page *page; ··· 368 368 return NULL; 369 369 sg_init_table(sg, page_cnt); 370 370 for (i = 0; i < page_cnt; i++) { 371 - page = vmalloc_to_page((void __force *)va); 372 - if (!page) 373 - goto p2p_sg_err; 371 + page = pfn_to_page(pa >> PAGE_SHIFT); 374 372 sg_set_page(&sg[i], page, page_size, 0); 375 - va += page_size; 373 + pa += page_size; 376 374 } 377 375 return sg; 378 - p2p_sg_err: 379 - kfree(sg); 380 - return NULL; 381 376 } 382 377 383 378 /* Init p2p mappings required to access peerdev from scifdev */ ··· 390 395 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); 391 396 if (!p2p) 392 397 return NULL; 393 - p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, 398 + p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa, 394 399 PAGE_SIZE, num_mmio_pages); 395 400 if (!p2p->ppi_sg[SCIF_PPI_MMIO]) 396 401 goto free_p2p; 397 402 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; 398 403 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); 399 404 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); 400 - p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, 405 + p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa, 401 406 1 << sg_page_shift, 402 407 num_aper_chunks); 403 408 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
+2
drivers/mmc/card/block.c
··· 208 208 209 209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 210 210 211 + mmc_blk_put(md); 212 + 211 213 return ret; 212 214 } 213 215
+1
drivers/mmc/host/Kconfig
··· 779 779 780 780 config MMC_MTK 781 781 tristate "MediaTek SD/MMC Card Interface support" 782 + depends on HAS_DMA 782 783 help 783 784 This selects the MediaTek(R) Secure digital and Multimedia card Interface. 784 785 If you have a machine with a integrated SD/MMC card reader, say Y or M here.
+6 -5
drivers/mmc/host/omap_hsmmc.c
··· 1062 1062 1063 1063 if (status & (CTO_EN | CCRC_EN)) 1064 1064 end_cmd = 1; 1065 + if (host->data || host->response_busy) { 1066 + end_trans = !end_cmd; 1067 + host->response_busy = 0; 1068 + } 1065 1069 if (status & (CTO_EN | DTO_EN)) 1066 1070 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); 1067 - else if (status & (CCRC_EN | DCRC_EN)) 1071 + else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN | 1072 + BADA_EN)) 1068 1073 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); 1069 1074 1070 1075 if (status & ACE_EN) { ··· 1085 1080 hsmmc_command_incomplete(host, error, end_cmd); 1086 1081 } 1087 1082 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); 1088 - } 1089 - if (host->data || host->response_busy) { 1090 - end_trans = !end_cmd; 1091 - host->response_busy = 0; 1092 1083 } 1093 1084 } 1094 1085
+104 -106
drivers/mmc/host/sdhci-esdhc-imx.c
··· 581 581 static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 582 582 { 583 583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 584 - struct pltfm_imx_data *imx_data = pltfm_host->priv; 585 - struct esdhc_platform_data *boarddata = &imx_data->boarddata; 586 584 587 - if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) 588 - return boarddata->f_max; 589 - else 590 - return pltfm_host->clock; 585 + return pltfm_host->clock; 591 586 } 592 587 593 588 static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) ··· 873 878 static int 874 879 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 875 880 struct sdhci_host *host, 876 - struct esdhc_platform_data *boarddata) 881 + struct pltfm_imx_data *imx_data) 877 882 { 878 883 struct device_node *np = pdev->dev.of_node; 879 - 880 - if (!np) 881 - return -ENODEV; 882 - 883 - if (of_get_property(np, "non-removable", NULL)) 884 - boarddata->cd_type = ESDHC_CD_PERMANENT; 885 - 886 - if (of_get_property(np, "fsl,cd-controller", NULL)) 887 - boarddata->cd_type = ESDHC_CD_CONTROLLER; 884 + struct esdhc_platform_data *boarddata = &imx_data->boarddata; 885 + int ret; 888 886 889 887 if (of_get_property(np, "fsl,wp-controller", NULL)) 890 888 boarddata->wp_type = ESDHC_WP_CONTROLLER; 891 889 892 - boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); 893 - if (gpio_is_valid(boarddata->cd_gpio)) 894 - boarddata->cd_type = ESDHC_CD_GPIO; 895 - 896 890 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 897 891 if (gpio_is_valid(boarddata->wp_gpio)) 898 892 boarddata->wp_type = ESDHC_WP_GPIO; 899 - 900 - of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); 901 - 902 - of_property_read_u32(np, "max-frequency", &boarddata->f_max); 903 893 904 894 if (of_find_property(np, "no-1-8-v", NULL)) 905 895 boarddata->support_vsel = false; ··· 896 916 897 917 mmc_of_parse_voltage(np, &host->ocr_mask); 898 918 919 + /* sdr50 and sdr104 needs work on 1.8v signal voltage */ 920 + if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && 921 + !IS_ERR(imx_data->pins_default)) { 922 + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, 923 + ESDHC_PINCTRL_STATE_100MHZ); 924 + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, 925 + ESDHC_PINCTRL_STATE_200MHZ); 926 + if (IS_ERR(imx_data->pins_100mhz) || 927 + IS_ERR(imx_data->pins_200mhz)) { 928 + dev_warn(mmc_dev(host->mmc), 929 + "could not get ultra high speed state, work on normal mode\n"); 930 + /* 931 + * fall back to not support uhs by specify no 1.8v quirk 932 + */ 933 + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 934 + } 935 + } else { 936 + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 937 + } 938 + 899 939 /* call to generic mmc_of_parse to support additional capabilities */ 900 - return mmc_of_parse(host->mmc); 940 + ret = mmc_of_parse(host->mmc); 941 + if (ret) 942 + return ret; 943 + 944 + if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) 945 + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 946 + 947 + return 0; 901 948 } 902 949 #else 903 950 static inline int 904 951 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 905 952 struct sdhci_host *host, 906 - struct esdhc_platform_data *boarddata) 953 + struct pltfm_imx_data *imx_data) 907 954 { 908 955 return -ENODEV; 909 956 } 910 957 #endif 958 + 959 + static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, 960 + struct sdhci_host *host, 961 + struct pltfm_imx_data *imx_data) 962 + { 963 + struct esdhc_platform_data *boarddata = &imx_data->boarddata; 964 + int err; 965 + 966 + if (!host->mmc->parent->platform_data) { 967 + dev_err(mmc_dev(host->mmc), "no board data!\n"); 968 + return -EINVAL; 969 + } 970 + 971 + imx_data->boarddata = *((struct esdhc_platform_data *) 972 + host->mmc->parent->platform_data); 973 + /* write_protect */ 974 + if (boarddata->wp_type == ESDHC_WP_GPIO) { 975 + err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); 976 + if (err) { 977 + dev_err(mmc_dev(host->mmc), 978 + "failed to request write-protect gpio!\n"); 979 + return err; 980 + } 981 + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 982 + } 983 + 984 + /* card_detect */ 985 + switch (boarddata->cd_type) { 986 + case ESDHC_CD_GPIO: 987 + err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); 988 + if (err) { 989 + dev_err(mmc_dev(host->mmc), 990 + "failed to request card-detect gpio!\n"); 991 + return err; 992 + } 993 + /* fall through */ 994 + 995 + case ESDHC_CD_CONTROLLER: 996 + /* we have a working card_detect back */ 997 + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 998 + break; 999 + 1000 + case ESDHC_CD_PERMANENT: 1001 + host->mmc->caps |= MMC_CAP_NONREMOVABLE; 1002 + break; 1003 + 1004 + case ESDHC_CD_NONE: 1005 + break; 1006 + } 1007 + 1008 + switch (boarddata->max_bus_width) { 1009 + case 8: 1010 + host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; 1011 + break; 1012 + case 4: 1013 + host->mmc->caps |= MMC_CAP_4_BIT_DATA; 1014 + break; 1015 + case 1: 1016 + default: 1017 + host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; 1018 + break; 1019 + } 1020 + 1021 + return 0; 1022 + } 911 1023 912 1024 static int sdhci_esdhc_imx_probe(struct platform_device *pdev) 913 1025 { ··· 1007 935 of_match_device(imx_esdhc_dt_ids, &pdev->dev); 1008 936 struct sdhci_pltfm_host *pltfm_host; 1009 937 struct sdhci_host *host; 1010 - struct esdhc_platform_data *boarddata; 1011 938 int err; 1012 939 struct pltfm_imx_data *imx_data; 1013 - bool dt = true; 1014 940 1015 941 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); 1016 942 if (IS_ERR(host)) ··· 1100 1030 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) 1101 1031 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1102 1032 1103 - boarddata = &imx_data->boarddata; 1104 - if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { 1105 - if (!host->mmc->parent->platform_data) { 1106 - dev_err(mmc_dev(host->mmc), "no board data!\n"); 1107 - err = -EINVAL; 1108 - goto disable_clk; 1109 - } 1110 - imx_data->boarddata = *((struct esdhc_platform_data *) 1111 - host->mmc->parent->platform_data); 1112 - dt = false; 1113 - } 1114 - /* write_protect */ 1115 - if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) { 1116 - err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); 1117 - if (err) { 1118 - dev_err(mmc_dev(host->mmc), 1119 - "failed to request write-protect gpio!\n"); 1120 - goto disable_clk; 1121 - } 1122 - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 1123 - } 1124 - 1125 - /* card_detect */ 1126 - switch (boarddata->cd_type) { 1127 - case ESDHC_CD_GPIO: 1128 - if (dt) 1129 - break; 1130 - err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); 1131 - if (err) { 1132 - dev_err(mmc_dev(host->mmc), 1133 - "failed to request card-detect gpio!\n"); 1134 - goto disable_clk; 1135 - } 1136 - /* fall through */ 1137 - 1138 - case ESDHC_CD_CONTROLLER: 1139 - /* we have a working card_detect back */ 1140 - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 1141 - break; 1142 - 1143 - case ESDHC_CD_PERMANENT: 1144 - host->mmc->caps |= MMC_CAP_NONREMOVABLE; 1145 - break; 1146 - 1147 - case ESDHC_CD_NONE: 1148 - break; 1149 - } 1150 - 1151 - switch (boarddata->max_bus_width) { 1152 - case 8: 1153 - host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; 1154 - break; 1155 - case 4: 1156 - host->mmc->caps |= MMC_CAP_4_BIT_DATA; 1157 - break; 1158 - case 1: 1159 - default: 1160 - host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; 1161 - break; 1162 - } 1163 - 1164 - /* sdr50 and sdr104 needs work on 1.8v signal voltage */ 1165 - if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && 1166 - !IS_ERR(imx_data->pins_default)) { 1167 - imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, 1168 - ESDHC_PINCTRL_STATE_100MHZ); 1169 - imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, 1170 - ESDHC_PINCTRL_STATE_200MHZ); 1171 - if (IS_ERR(imx_data->pins_100mhz) || 1172 - IS_ERR(imx_data->pins_200mhz)) { 1173 - dev_warn(mmc_dev(host->mmc), 1174 - "could not get ultra high speed state, work on normal mode\n"); 1175 - /* fall back to not support uhs by specify no 1.8v quirk */ 1176 - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 1177 - } 1178 - } else { 1179 - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 1180 - } 1033 + if (of_id) 1034 + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); 1035 + else 1036 + err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data); 1037 + if (err) 1038 + goto disable_clk; 1181 1039 1182 1040 err = sdhci_add_host(host); 1183 1041 if (err)
+1 -1
drivers/mmc/host/sdhci-esdhc.h
··· 45 45 #define ESDHC_DMA_SYSCTL 0x40c 46 46 #define ESDHC_DMA_SNOOP 0x00000040 47 47 48 - #define ESDHC_HOST_CONTROL_RES 0x05 48 + #define ESDHC_HOST_CONTROL_RES 0x01 49 49 50 50 #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
+1
drivers/mmc/host/sdhci-pxav3.c
··· 411 411 goto err_of_parse; 412 412 sdhci_get_of_property(pdev); 413 413 pdata = pxav3_get_mmc_pdata(dev); 414 + pdev->dev.platform_data = pdata; 414 415 } else if (pdata) { 415 416 /* on-chip device */ 416 417 if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
+12 -4
drivers/mmc/host/sdhci.c
··· 2866 2866 u32 max_current_caps; 2867 2867 unsigned int ocr_avail; 2868 2868 unsigned int override_timeout_clk; 2869 + u32 max_clk; 2869 2870 int ret; 2870 2871 2871 2872 WARN_ON(host == NULL); ··· 2979 2978 GFP_KERNEL); 2980 2979 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); 2981 2980 if (!host->adma_table || !host->align_buffer) { 2982 - dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 2983 - host->adma_table, host->adma_addr); 2981 + if (host->adma_table) 2982 + dma_free_coherent(mmc_dev(mmc), 2983 + host->adma_table_sz, 2984 + host->adma_table, 2985 + host->adma_addr); 2984 2986 kfree(host->align_buffer); 2985 2987 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2986 2988 mmc_hostname(mmc)); ··· 3051 3047 * Set host parameters. 3052 3048 */ 3053 3049 mmc->ops = &sdhci_ops; 3054 - mmc->f_max = host->max_clk; 3050 + max_clk = host->max_clk; 3051 + 3055 3052 if (host->ops->get_min_clock) 3056 3053 mmc->f_min = host->ops->get_min_clock(host); 3057 3054 else if (host->version >= SDHCI_SPEC_300) { 3058 3055 if (host->clk_mul) { 3059 3056 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3060 - mmc->f_max = host->max_clk * host->clk_mul; 3057 + max_clk = host->max_clk * host->clk_mul; 3061 3058 } else 3062 3059 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3063 3060 } else 3064 3061 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3062 + 3063 + if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk))) 3064 + mmc->f_max = max_clk; 3065 3065 3066 3066 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3067 3067 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
+31 -3
drivers/net/bonding/bond_main.c
··· 625 625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 626 626 } 627 627 628 + static struct slave *bond_get_old_active(struct bonding *bond, 629 + struct slave *new_active) 630 + { 631 + struct slave *slave; 632 + struct list_head *iter; 633 + 634 + bond_for_each_slave(bond, slave, iter) { 635 + if (slave == new_active) 636 + continue; 637 + 638 + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) 639 + return slave; 640 + } 641 + 642 + return NULL; 643 + } 644 + 628 645 /* bond_do_fail_over_mac 629 646 * 630 647 * Perform special MAC address swapping for fail_over_mac settings ··· 668 651 */ 669 652 if (!new_active) 670 653 return; 654 + 655 + if (!old_active) 656 + old_active = bond_get_old_active(bond, new_active); 671 657 672 658 if (old_active) { 673 659 ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ··· 1745 1725 1746 1726 err_undo_flags: 1747 1727 /* Enslave of first slave has failed and we need to fix master's mac */ 1748 - if (!bond_has_slaves(bond) && 1749 - ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) 1750 - eth_hw_addr_random(bond_dev); 1728 + if (!bond_has_slaves(bond)) { 1729 + if (ether_addr_equal_64bits(bond_dev->dev_addr, 1730 + slave_dev->dev_addr)) 1731 + eth_hw_addr_random(bond_dev); 1732 + if (bond_dev->type != ARPHRD_ETHER) { 1733 + ether_setup(bond_dev); 1734 + bond_dev->flags |= IFF_MASTER; 1735 + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1736 + } 1737 + } 1751 1738 1752 1739 return res; 1753 1740 } ··· 1943 1916 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1944 1917 netdev_info(bond_dev, "Destroying bond %s\n", 1945 1918 bond_dev->name); 1919 + bond_remove_proc_entry(bond); 1946 1920 unregister_netdevice(bond_dev); 1947 1921 } 1948 1922 return ret;
+4 -4
drivers/net/can/at91_can.c
··· 577 577 578 578 cf->can_id |= CAN_ERR_CRTL; 579 579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 580 - netif_receive_skb(skb); 581 580 582 581 stats->rx_packets++; 583 582 stats->rx_bytes += cf->can_dlc; 583 + netif_receive_skb(skb); 584 584 } 585 585 586 586 /** ··· 642 642 } 643 643 644 644 at91_read_mb(dev, mb, cf); 645 - netif_receive_skb(skb); 646 645 647 646 stats->rx_packets++; 648 647 stats->rx_bytes += cf->can_dlc; 648 + netif_receive_skb(skb); 649 649 650 650 can_led_event(dev, CAN_LED_EVENT_RX); 651 651 } ··· 802 802 return 0; 803 803 804 804 at91_poll_err_frame(dev, cf, reg_sr); 805 - netif_receive_skb(skb); 806 805 807 806 dev->stats.rx_packets++; 808 807 dev->stats.rx_bytes += cf->can_dlc; 808 + netif_receive_skb(skb); 809 809 810 810 return 1; 811 811 } ··· 1067 1067 return; 1068 1068 1069 1069 at91_irq_err_state(dev, cf, new_state); 1070 - netif_rx(skb); 1071 1070 1072 1071 dev->stats.rx_packets++; 1073 1072 dev->stats.rx_bytes += cf->can_dlc; 1073 + netif_rx(skb); 1074 1074 1075 1075 priv->can.state = new_state; 1076 1076 }
+2 -4
drivers/net/can/bfin_can.c
··· 424 424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 425 425 } 426 426 427 - netif_rx(skb); 428 - 429 427 stats->rx_packets++; 430 428 stats->rx_bytes += cf->can_dlc; 429 + netif_rx(skb); 431 430 } 432 431 433 432 static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) ··· 507 508 508 509 priv->can.state = state; 509 510 510 - netif_rx(skb); 511 - 512 511 stats->rx_packets++; 513 512 stats->rx_bytes += cf->can_dlc; 513 + netif_rx(skb); 514 514 515 515 return 0; 516 516 }
+2 -2
drivers/net/can/cc770/cc770.c
··· 504 504 for (i = 0; i < cf->can_dlc; i++) 505 505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); 506 506 } 507 - netif_rx(skb); 508 507 509 508 stats->rx_packets++; 510 509 stats->rx_bytes += cf->can_dlc; 510 + netif_rx(skb); 511 511 } 512 512 513 513 static int cc770_err(struct net_device *dev, u8 status) ··· 584 584 } 585 585 } 586 586 587 - netif_rx(skb); 588 587 589 588 stats->rx_packets++; 590 589 stats->rx_bytes += cf->can_dlc; 590 + netif_rx(skb); 591 591 592 592 return 0; 593 593 }
+3 -4
drivers/net/can/flexcan.c
··· 577 577 return 0; 578 578 579 579 do_bus_err(dev, cf, reg_esr); 580 - netif_receive_skb(skb); 581 580 582 581 dev->stats.rx_packets++; 583 582 dev->stats.rx_bytes += cf->can_dlc; 583 + netif_receive_skb(skb); 584 584 585 585 return 1; 586 586 } ··· 622 622 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 623 623 can_bus_off(dev); 624 624 625 - netif_receive_skb(skb); 626 - 627 625 dev->stats.rx_packets++; 628 626 dev->stats.rx_bytes += cf->can_dlc; 627 + netif_receive_skb(skb); 629 628 630 629 return 1; 631 630 } ··· 669 670 } 670 671 671 672 flexcan_read_fifo(dev, cf); 672 - netif_receive_skb(skb); 673 673 674 674 stats->rx_packets++; 675 675 stats->rx_bytes += cf->can_dlc; 676 + netif_receive_skb(skb); 676 677 677 678 can_led_event(dev, CAN_LED_EVENT_RX); 678 679
+2 -1
drivers/net/can/grcan.c
··· 1216 1216 cf->data[i] = (u8)(slot[j] >> shift); 1217 1217 } 1218 1218 } 1219 - netif_receive_skb(skb); 1220 1219 1221 1220 /* Update statistics and read pointer */ 1222 1221 stats->rx_packets++; 1223 1222 stats->rx_bytes += cf->can_dlc; 1223 + netif_receive_skb(skb); 1224 + 1224 1225 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); 1225 1226 } 1226 1227
+2 -4
drivers/net/can/sja1000/sja1000.c
··· 377 377 /* release receive buffer */ 378 378 sja1000_write_cmdreg(priv, CMD_RRB); 379 379 380 - netif_rx(skb); 381 - 382 380 stats->rx_packets++; 383 381 stats->rx_bytes += cf->can_dlc; 382 + netif_rx(skb); 384 383 385 384 can_led_event(dev, CAN_LED_EVENT_RX); 386 385 } ··· 483 484 can_bus_off(dev); 484 485 } 485 486 486 - netif_rx(skb); 487 - 488 487 stats->rx_packets++; 489 488 stats->rx_bytes += cf->can_dlc; 489 + netif_rx(skb); 490 490 491 491 return 0; 492 492 }
+1 -1
drivers/net/can/slcan.c
··· 218 218 219 219 memcpy(skb_put(skb, sizeof(struct can_frame)), 220 220 &cf, sizeof(struct can_frame)); 221 - netif_rx_ni(skb); 222 221 223 222 sl->dev->stats.rx_packets++; 224 223 sl->dev->stats.rx_bytes += cf.can_dlc; 224 + netif_rx_ni(skb); 225 225 } 226 226 227 227 /* parse tty input stream */
+8 -9
drivers/net/can/spi/mcp251x.c
··· 1086 1086 if (ret) 1087 1087 goto out_clk; 1088 1088 1089 - priv->power = devm_regulator_get(&spi->dev, "vdd"); 1090 - priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1089 + priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); 1090 + priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); 1091 1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1092 1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { 1093 1093 ret = -EPROBE_DEFER; ··· 1222 1222 struct spi_device *spi = to_spi_device(dev); 1223 1223 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1224 1224 1225 - if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1225 + if (priv->after_suspend & AFTER_SUSPEND_POWER) 1226 1226 mcp251x_power_enable(priv->power, 1); 1227 + 1228 + if (priv->after_suspend & AFTER_SUSPEND_UP) { 1229 + mcp251x_power_enable(priv->transceiver, 1); 1227 1230 queue_work(priv->wq, &priv->restart_work); 1228 1231 } else { 1229 - if (priv->after_suspend & AFTER_SUSPEND_UP) { 1230 - mcp251x_power_enable(priv->transceiver, 1); 1231 - queue_work(priv->wq, &priv->restart_work); 1232 - } else { 1233 - priv->after_suspend = 0; 1234 - } 1232 + priv->after_suspend = 0; 1235 1233 } 1234 + 1236 1235 priv->force_quit = 0; 1237 1236 enable_irq(spi->irq); 1238 1237 return 0;
+1 -1
drivers/net/can/ti_hecc.c
··· 747 747 } 748 748 } 749 749 750 - netif_rx(skb); 751 750 stats->rx_packets++; 752 751 stats->rx_bytes += cf->can_dlc; 752 + netif_rx(skb); 753 753 754 754 return 0; 755 755 }
+2 -4
drivers/net/can/usb/ems_usb.c
··· 324 324 cf->data[i] = msg->msg.can_msg.msg[i]; 325 325 } 326 326 327 - netif_rx(skb); 328 - 329 327 stats->rx_packets++; 330 328 stats->rx_bytes += cf->can_dlc; 329 + netif_rx(skb); 331 330 } 332 331 333 332 static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) ··· 399 400 stats->rx_errors++; 400 401 } 401 402 402 - netif_rx(skb); 403 - 404 403 stats->rx_packets++; 405 404 stats->rx_bytes += cf->can_dlc; 405 + netif_rx(skb); 406 406 } 407 407 408 408 /*
+2 -4
drivers/net/can/usb/esd_usb2.c
··· 301 301 cf->data[7] = rxerr; 302 302 } 303 303 304 - netif_rx(skb); 305 - 306 304 priv->bec.txerr = txerr; 307 305 priv->bec.rxerr = rxerr; 308 306 309 307 stats->rx_packets++; 310 308 stats->rx_bytes += cf->can_dlc; 309 + netif_rx(skb); 311 310 } 312 311 } 313 312 ··· 346 347 cf->data[i] = msg->msg.rx.data[i]; 347 348 } 348 349 349 - netif_rx(skb); 350 - 351 350 stats->rx_packets++; 352 351 stats->rx_bytes += cf->can_dlc; 352 + netif_rx(skb); 353 353 } 354 354 355 355 return;
+3 -4
drivers/net/can/usb/peak_usb/pcan_usb.c
··· 526 526 hwts->hwtstamp = timeval_to_ktime(tv); 527 527 } 528 528 529 - netif_rx(skb); 530 529 mc->netdev->stats.rx_packets++; 531 530 mc->netdev->stats.rx_bytes += cf->can_dlc; 531 + netif_rx(skb); 532 532 533 533 return 0; 534 534 } ··· 659 659 hwts = skb_hwtstamps(skb); 660 660 hwts->hwtstamp = timeval_to_ktime(tv); 661 661 662 - /* push the skb */ 663 - netif_rx(skb); 664 - 665 662 /* update statistics */ 666 663 mc->netdev->stats.rx_packets++; 667 664 mc->netdev->stats.rx_bytes += cf->can_dlc; 665 + /* push the skb */ 666 + netif_rx(skb); 668 667 669 668 return 0; 670 669
+2 -2
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
··· 553 553 hwts = skb_hwtstamps(skb); 554 554 hwts->hwtstamp = timeval_to_ktime(tv); 555 555 556 - netif_rx(skb); 557 556 netdev->stats.rx_packets++; 558 557 netdev->stats.rx_bytes += can_frame->can_dlc; 558 + netif_rx(skb); 559 559 560 560 return 0; 561 561 } ··· 670 670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 671 671 hwts = skb_hwtstamps(skb); 672 672 hwts->hwtstamp = timeval_to_ktime(tv); 673 - netif_rx(skb); 674 673 netdev->stats.rx_packets++; 675 674 netdev->stats.rx_bytes += can_frame->can_dlc; 675 + netif_rx(skb); 676 676 677 677 return 0; 678 678 }
+2 -4
drivers/net/can/usb/usb_8dev.c
··· 461 461 priv->bec.txerr = txerr; 462 462 priv->bec.rxerr = rxerr; 463 463 464 - netif_rx(skb); 465 - 466 464 stats->rx_packets++; 467 465 stats->rx_bytes += cf->can_dlc; 466 + netif_rx(skb); 468 467 } 469 468 470 469 /* Read data and status frames */ ··· 493 494 else 494 495 memcpy(cf->data, msg->data, cf->can_dlc); 495 496 496 - netif_rx(skb); 497 - 498 497 stats->rx_packets++; 499 498 stats->rx_bytes += cf->can_dlc; 499 + netif_rx(skb); 500 500 501 501 can_led_event(priv->netdev, CAN_LED_EVENT_RX); 502 502 } else {
+13 -2
drivers/net/dsa/bcm_sf2.c
··· 696 696 } 697 697 698 698 /* Include the pseudo-PHY address and the broadcast PHY address to 699 - * divert reads towards our workaround 699 + * divert reads towards our workaround. This is only required for 700 + * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such 701 + * that we can use the regular SWITCH_MDIO master controller instead. 702 + * 703 + * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask 704 + * to have a 1:1 mapping between Port address and PHY address in order 705 + * to utilize the slave_mii_bus instance to read from Port PHYs. This is 706 + * not what we want here, so we initialize phys_mii_mask 0 to always 707 + * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. 700 708 */ 701 - ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 709 + if (of_machine_is_compatible("brcm,bcm7445d0")) 710 + ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 711 + else 712 + ds->phys_mii_mask = 0; 702 713 703 714 rev = reg_readl(priv, REG_SWITCH_REVISION); 704 715 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
+1 -1
drivers/net/dsa/mv88e6xxx.c
··· 1163 1163 1164 1164 newfid = __ffs(ps->fid_mask); 1165 1165 ps->fid[port] = newfid; 1166 - ps->fid_mask &= (1 << newfid); 1166 + ps->fid_mask &= ~(1 << newfid); 1167 1167 ps->bridge_mask[fid] &= ~(1 << port); 1168 1168 ps->bridge_mask[newfid] = 1 << port; 1169 1169
+13 -75
drivers/net/ethernet/freescale/fec_main.c
··· 24 24 #include <linux/module.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/string.h> 27 - #include <linux/pm_runtime.h> 28 27 #include <linux/ptrace.h> 29 28 #include <linux/errno.h> 30 29 #include <linux/ioport.h> ··· 77 78 #define FEC_ENET_RAEM_V 0x8 78 79 #define FEC_ENET_RAFL_V 0x8 79 80 #define FEC_ENET_OPD_V 0xFFF0 80 - #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 81 81 82 82 static struct platform_device_id fec_devtype[] = { 83 83 { ··· 1767 1769 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1768 1770 { 1769 1771 struct fec_enet_private *fep = bus->priv; 1770 - struct device *dev = &fep->pdev->dev; 1771 1772 unsigned long time_left; 1772 - int ret = 0; 1773 - 1774 - ret = pm_runtime_get_sync(dev); 1775 - if (IS_ERR_VALUE(ret)) 1776 - return ret; 1777 1773 1778 1774 fep->mii_timeout = 0; 1779 1775 init_completion(&fep->mdio_done); ··· 1783 1791 if (time_left == 0) { 1784 1792 fep->mii_timeout = 1; 1785 1793 netdev_err(fep->netdev, "MDIO read timeout\n"); 1786 - ret = -ETIMEDOUT; 1787 - goto out; 1794 + return -ETIMEDOUT; 1788 1795 } 1789 1796 1790 - ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1791 - 1792 - out: 1793 - pm_runtime_mark_last_busy(dev); 1794 - pm_runtime_put_autosuspend(dev); 1795 - 1796 - return ret; 1797 + /* return value */ 1798 + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1797 1799 } 1798 1800 1799 1801 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1800 1802 u16 value) 1801 1803 { 1802 1804 struct fec_enet_private *fep = bus->priv; 1803 - struct device *dev = &fep->pdev->dev; 1804 1805 unsigned long time_left; 1805 - int ret = 0; 1806 - 1807 - ret = pm_runtime_get_sync(dev); 1808 - if (IS_ERR_VALUE(ret)) 1809 - return ret; 1810 1806 1811 1807 fep->mii_timeout = 0; 1812 1808 init_completion(&fep->mdio_done); ··· 1811 1831 if (time_left == 0) { 1812 1832 fep->mii_timeout = 1; 1813 1833 netdev_err(fep->netdev, "MDIO write timeout\n"); 1814 - ret = -ETIMEDOUT; 1834 + return -ETIMEDOUT; 1815 1835 } 1816 1836 1817 - pm_runtime_mark_last_busy(dev); 1818 - pm_runtime_put_autosuspend(dev); 1819 - 1820 - return ret; 1837 + return 0; 1821 1838 } 1822 1839 1823 1840 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ··· 1826 1849 ret = clk_prepare_enable(fep->clk_ahb); 1827 1850 if (ret) 1828 1851 return ret; 1852 + ret = clk_prepare_enable(fep->clk_ipg); 1853 + if (ret) 1854 + goto failed_clk_ipg; 1829 1855 if (fep->clk_enet_out) { 1830 1856 ret = clk_prepare_enable(fep->clk_enet_out); 1831 1857 if (ret) ··· 1852 1872 } 1853 1873 } else { 1854 1874 clk_disable_unprepare(fep->clk_ahb); 1875 + clk_disable_unprepare(fep->clk_ipg); 1855 1876 if (fep->clk_enet_out) 1856 1877 clk_disable_unprepare(fep->clk_enet_out); 1857 1878 if (fep->clk_ptp) { ··· 1874 1893 if (fep->clk_enet_out) 1875 1894 clk_disable_unprepare(fep->clk_enet_out); 1876 1895 failed_clk_enet_out: 1896 + clk_disable_unprepare(fep->clk_ipg); 1897 + failed_clk_ipg: 1877 1898 clk_disable_unprepare(fep->clk_ahb); 1878 1899 1879 1900 return ret; ··· 2847 2864 struct fec_enet_private *fep = netdev_priv(ndev); 2848 2865 int ret; 2849 2866 2850 - ret = pm_runtime_get_sync(&fep->pdev->dev); 2851 - if (IS_ERR_VALUE(ret)) 2852 - return ret; 2853 - 2854 2867 pinctrl_pm_select_default_state(&fep->pdev->dev); 2855 2868 ret = fec_enet_clk_enable(ndev, true); 2856 2869 if (ret) 2857 - goto clk_enable; 2870 + return ret; 2858 2871 2859 2872 /* I should reset the ring buffers here, but I don't yet know 2860 2873 * a simple way to do that. ··· 2881 2902 fec_enet_free_buffers(ndev); 2882 2903 err_enet_alloc: 2883 2904 fec_enet_clk_enable(ndev, false); 2884 - clk_enable: 2885 - pm_runtime_mark_last_busy(&fep->pdev->dev); 2886 - pm_runtime_put_autosuspend(&fep->pdev->dev); 2887 2905 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2888 2906 return ret; 2889 2907 } ··· 2903 2927 2904 2928 fec_enet_clk_enable(ndev, false); 2905 2929 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2906 - pm_runtime_mark_last_busy(&fep->pdev->dev); 2907 - pm_runtime_put_autosuspend(&fep->pdev->dev); 2908 - 2909 2930 fec_enet_free_buffers(ndev); 2910 2931 2911 2932 return 0; ··· 3388 3415 if (ret) 3389 3416 goto failed_clk; 3390 3417 3391 - ret = clk_prepare_enable(fep->clk_ipg); 3392 - if (ret) 3393 - goto failed_clk_ipg; 3394 - 3395 3418 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3396 3419 if (!IS_ERR(fep->reg_phy)) { 3397 3420 ret = regulator_enable(fep->reg_phy); ··· 3434 3465 netif_carrier_off(ndev); 3435 3466 fec_enet_clk_enable(ndev, false); 3436 3467 pinctrl_pm_select_sleep_state(&pdev->dev); 3437 - pm_runtime_set_active(&pdev->dev); 3438 - pm_runtime_enable(&pdev->dev); 3439 3468 3440 3469 ret = register_netdev(ndev); 3441 3470 if (ret) ··· 3447 3480 3448 3481 fep->rx_copybreak = COPYBREAK_DEFAULT; 3449 3482 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3450 - 3451 - pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3452 - pm_runtime_use_autosuspend(&pdev->dev); 3453 - pm_runtime_mark_last_busy(&pdev->dev); 3454 - pm_runtime_put_autosuspend(&pdev->dev); 3455 - 3456 3483 return 0; 3457 3484 3458 3485 failed_register: ··· 3457 3496 if (fep->reg_phy) 3458 3497 regulator_disable(fep->reg_phy); 3459 3498 failed_regulator: 3460 - clk_disable_unprepare(fep->clk_ipg); 3461 - failed_clk_ipg: 3462 3499 fec_enet_clk_enable(ndev, false); 3463 3500 failed_clk: 3464 3501 failed_phy: ··· 3568 3609 return ret; 3569 3610 } 3570 3611 3571 - static int __maybe_unused fec_runtime_suspend(struct device *dev) 3572 - { 3573 - struct net_device *ndev = dev_get_drvdata(dev); 3574 - struct fec_enet_private *fep = netdev_priv(ndev); 3575 - 3576 - clk_disable_unprepare(fep->clk_ipg); 3577 - 3578 - return 0; 3579 - } 3580 - 3581 - static int __maybe_unused fec_runtime_resume(struct device *dev) 3582 - { 3583 - struct net_device *ndev = dev_get_drvdata(dev); 3584 - struct fec_enet_private *fep = netdev_priv(ndev); 3585 - 3586 - return clk_prepare_enable(fep->clk_ipg); 3587 - } 3588 - 3589 - static const struct dev_pm_ops fec_pm_ops = { 3590 - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 3591 - SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 3592 - }; 3612 + static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); 3593 3613 3594 3614 static struct platform_driver fec_driver = { 3595 3615 .driver = {
+10 -12
drivers/net/ethernet/marvell/mvneta.c
··· 1462 1462 struct mvneta_rx_queue *rxq) 1463 1463 { 1464 1464 struct net_device *dev = pp->dev; 1465 - int rx_done, rx_filled; 1465 + int rx_done; 1466 1466 u32 rcvd_pkts = 0; 1467 1467 u32 rcvd_bytes = 0; 1468 1468 ··· 1473 1473 rx_todo = rx_done; 1474 1474 1475 1475 rx_done = 0; 1476 - rx_filled = 0; 1477 1476 1478 1477 /* Fairness NAPI loop */ 1479 1478 while (rx_done < rx_todo) { ··· 1483 1484 int rx_bytes, err; 1484 1485 1485 1486 rx_done++; 1486 - rx_filled++; 1487 1487 rx_status = rx_desc->status; 1488 1488 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1489 1489 data = (unsigned char *)rx_desc->buf_cookie; ··· 1522 1524 continue; 1523 1525 } 1524 1526 1527 + /* Refill processing */ 1528 + err = mvneta_rx_refill(pp, rx_desc); 1529 + if (err) { 1530 + netdev_err(dev, "Linux processing - Can't refill\n"); 1531 + rxq->missed++; 1532 + goto err_drop_frame; 1533 + } 1534 + 1525 1535 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1526 1536 if (!skb) 1527 1537 goto err_drop_frame; ··· 1549 1543 mvneta_rx_csum(pp, rx_status, skb); 1550 1544 1551 1545 napi_gro_receive(&pp->napi, skb); 1552 - 1553 - /* Refill processing */ 1554 - err = mvneta_rx_refill(pp, rx_desc); 1555 - if (err) { 1556 - netdev_err(dev, "Linux processing - Can't refill\n"); 1557 - rxq->missed++; 1558 - rx_filled--; 1559 - } 1560 1546 } 1561 1547 1562 1548 if (rcvd_pkts) { ··· 1561 1563 } 1562 1564 1563 1565 /* Update rxq management counters */ 1564 - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1566 + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1565 1567 1566 1568 return rx_done; 1567 1569 }
+38 -36
drivers/net/ethernet/renesas/ravb_main.c
··· 228 228 struct ravb_desc *desc = NULL; 229 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 230 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 231 - struct sk_buff *skb; 232 231 dma_addr_t dma_addr; 233 - void *buffer; 234 232 int i; 235 233 236 234 priv->cur_rx[q] = 0; ··· 239 241 memset(priv->rx_ring[q], 0, rx_ring_size); 240 242 /* Build RX ring buffer */ 241 243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 242 - priv->rx_skb[q][i] = NULL; 243 - skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); 244 - if (!skb) 245 - break; 246 - ravb_set_buffer_align(skb); 247 244 /* RX descriptor */ 248 245 rx_desc = &priv->rx_ring[q][i]; 249 246 /* The size of the buffer should be on 16-byte boundary. */ 250 247 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 251 - dma_addr = dma_map_single(&ndev->dev, skb->data, 248 + dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, 252 249 ALIGN(PKT_BUF_SZ, 16), 253 250 DMA_FROM_DEVICE); 254 - if (dma_mapping_error(&ndev->dev, dma_addr)) { 255 - dev_kfree_skb(skb); 256 - break; 257 - } 258 - priv->rx_skb[q][i] = skb; 251 + /* We just set the data size to 0 for a failed mapping which 252 + * should prevent DMA from happening... 253 + */ 254 + if (dma_mapping_error(&ndev->dev, dma_addr)) 255 + rx_desc->ds_cc = cpu_to_le16(0); 259 256 rx_desc->dptr = cpu_to_le32(dma_addr); 260 257 rx_desc->die_dt = DT_FEMPTY; 261 258 } 262 259 rx_desc = &priv->rx_ring[q][i]; 263 260 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 264 261 rx_desc->die_dt = DT_LINKFIX; /* type */ 265 - priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); 266 262 267 263 memset(priv->tx_ring[q], 0, tx_ring_size); 268 264 /* Build TX ring buffer */ 269 265 for (i = 0; i < priv->num_tx_ring[q]; i++) { 270 - priv->tx_skb[q][i] = NULL; 271 - priv->tx_buffers[q][i] = NULL; 272 - buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); 273 - if (!buffer) 274 - break; 275 - /* Aligned TX buffer */ 276 - priv->tx_buffers[q][i] = buffer; 277 266 tx_desc = &priv->tx_ring[q][i]; 278 267 tx_desc->die_dt = DT_EEMPTY; 279 268 } ··· 283 298 static int ravb_ring_init(struct net_device *ndev, int q) 284 299 { 285 300 struct ravb_private *priv = netdev_priv(ndev); 301 + struct sk_buff *skb; 286 302 int ring_size; 303 + void *buffer; 304 + int i; 287 305 288 306 /* Allocate RX and TX skb rings */ 289 307 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], ··· 296 308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 297 309 goto error; 298 310 311 + for (i = 0; i < priv->num_rx_ring[q]; i++) { 312 + skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); 313 + if (!skb) 314 + goto error; 315 + ravb_set_buffer_align(skb); 316 + priv->rx_skb[q][i] = skb; 317 + } 318 + 299 319 /* Allocate rings for the aligned buffers */ 300 320 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 301 321 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 302 322 if (!priv->tx_buffers[q]) 303 323 goto error; 324 + 325 + for (i = 0; i < priv->num_tx_ring[q]; i++) { 326 + buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); 327 + if (!buffer) 328 + goto error; 329 + /* Aligned TX buffer */ 330 + priv->tx_buffers[q][i] = buffer; 331 + } 304 332 305 333 /* Allocate all RX descriptors. */ 306 334 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); ··· 528 524 if (--boguscnt < 0) 529 525 break; 530 526 527 + /* We use 0-byte descriptors to mark the DMA mapping errors */ 528 + if (!pkt_len) 529 + continue; 530 + 531 531 if (desc_status & MSC_MC) 532 532 stats->multicast++; 533 533 ··· 551 543 552 544 skb = priv->rx_skb[q][entry]; 553 545 priv->rx_skb[q][entry] = NULL; 554 - dma_sync_single_for_cpu(&ndev->dev, 555 - le32_to_cpu(desc->dptr), 556 - ALIGN(PKT_BUF_SZ, 16), 557 - DMA_FROM_DEVICE); 546 + dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), 547 + ALIGN(PKT_BUF_SZ, 16), 548 + DMA_FROM_DEVICE); 558 549 get_ts &= (q == RAVB_NC) ? 559 550 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 560 551 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; ··· 591 584 if (!skb) 592 585 break; /* Better luck next round. */ 593 586 ravb_set_buffer_align(skb); 594 - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), 595 - ALIGN(PKT_BUF_SZ, 16), 596 - DMA_FROM_DEVICE); 597 587 dma_addr = dma_map_single(&ndev->dev, skb->data, 598 588 le16_to_cpu(desc->ds_cc), 599 589 DMA_FROM_DEVICE); 600 590 skb_checksum_none_assert(skb); 601 - if (dma_mapping_error(&ndev->dev, dma_addr)) { 602 - dev_kfree_skb_any(skb); 603 - break; 604 - } 591 + /* We just set the data size to 0 for a failed mapping 592 + * which should prevent DMA from happening... 593 + */ 594 + if (dma_mapping_error(&ndev->dev, dma_addr)) 595 + desc->ds_cc = cpu_to_le16(0); 605 596 desc->dptr = cpu_to_le32(dma_addr); 606 597 priv->rx_skb[q][entry] = skb; 607 598 } ··· 1284 1279 u32 dma_addr; 1285 1280 void *buffer; 1286 1281 u32 entry; 1287 - u32 tccr; 1288 1282 1289 1283 spin_lock_irqsave(&priv->lock, flags); 1290 1284 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { ··· 1332 1328 dma_wmb(); 1333 1329 desc->die_dt = DT_FSINGLE; 1334 1330 1335 - tccr = ravb_read(ndev, TCCR); 1336 - if (!(tccr & (TCCR_TSRQ0 << q))) 1337 - ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); 1331 + ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); 1338 1332 1339 1333 priv->cur_tx[q]++; 1340 1334 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2843 2843 if (res->mac) 2844 2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2845 2845 2846 - dev_set_drvdata(device, priv); 2846 + dev_set_drvdata(device, priv->dev); 2847 2847 2848 2848 /* Verify driver arguments */ 2849 2849 stmmac_verify_args();
+3 -6
drivers/net/ethernet/ti/cpsw.c
··· 793 793 static int cpsw_poll(struct napi_struct *napi, int budget) 794 794 { 795 795 struct cpsw_priv *priv = napi_to_priv(napi); 796 - int num_tx, num_rx; 797 - 798 - num_tx = cpdma_chan_process(priv->txch, 128); 796 + int num_rx; 799 797 800 798 num_rx = cpdma_chan_process(priv->rxch, budget); 801 799 if (num_rx < budget) { ··· 808 810 } 809 811 } 810 812 811 - if (num_rx || num_tx) 812 - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 813 - num_rx, num_tx); 813 + if (num_rx) 814 + cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); 814 815 815 816 return num_rx; 816 817 }
+1 -1
drivers/net/ethernet/ti/netcp_core.c
··· 1617 1617 } 1618 1618 mutex_unlock(&netcp_modules_lock); 1619 1619 1620 - netcp_rxpool_refill(netcp); 1621 1620 napi_enable(&netcp->rx_napi); 1622 1621 napi_enable(&netcp->tx_napi); 1623 1622 knav_queue_enable_notify(netcp->tx_compl_q); 1624 1623 knav_queue_enable_notify(netcp->rx_queue); 1624 + netcp_rxpool_refill(netcp); 1625 1625 netif_tx_wake_all_queues(ndev); 1626 1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1627 1627 return 0;
+6 -3
drivers/net/ipvlan/ipvlan.h
··· 67 67 struct ipvl_port *port; 68 68 struct net_device *phy_dev; 69 69 struct list_head addrs; 70 - int ipv4cnt; 71 - int ipv6cnt; 72 70 struct ipvl_pcpu_stats __percpu *pcpu_stats; 73 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 74 72 netdev_features_t sfeatures; ··· 104 106 return rcu_dereference(d->rx_handler_data); 105 107 } 106 108 109 + static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d) 110 + { 111 + return rcu_dereference_bh(d->rx_handler_data); 112 + } 113 + 107 114 static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) 108 115 { 109 116 return rtnl_dereference(d->rx_handler_data); ··· 127 124 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); 128 125 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 129 126 const void *iaddr, bool is_v6); 130 - void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 127 + void ipvlan_ht_addr_del(struct ipvl_addr *addr); 131 128 #endif /* __IPVLAN_H */
+2 -4
drivers/net/ipvlan/ipvlan_core.c
··· 85 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 86 86 } 87 87 88 - void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88 + void ipvlan_ht_addr_del(struct ipvl_addr *addr) 89 89 { 90 90 hlist_del_init_rcu(&addr->hlnode); 91 - if (sync) 92 - synchronize_rcu(); 93 91 } 94 92 95 93 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, ··· 529 531 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 530 532 { 531 533 struct ipvl_dev *ipvlan = netdev_priv(dev); 532 - struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 534 + struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); 533 535 534 536 if (!port) 535 537 goto out;
+19 -23
drivers/net/ipvlan/ipvlan_main.c
··· 153 153 else 154 154 dev->flags &= ~IFF_NOARP; 155 155 156 - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 157 - list_for_each_entry(addr, &ipvlan->addrs, anode) 158 - ipvlan_ht_addr_add(ipvlan, addr); 159 - } 156 + list_for_each_entry(addr, &ipvlan->addrs, anode) 157 + ipvlan_ht_addr_add(ipvlan, addr); 158 + 160 159 return dev_uc_add(phy_dev, phy_dev->dev_addr); 161 160 } 162 161 ··· 170 171 171 172 dev_uc_del(phy_dev, phy_dev->dev_addr); 172 173 173 - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 174 - list_for_each_entry(addr, &ipvlan->addrs, anode) 175 - ipvlan_ht_addr_del(addr, !dev->dismantle); 176 - } 174 + list_for_each_entry(addr, &ipvlan->addrs, anode) 175 + ipvlan_ht_addr_del(addr); 176 + 177 177 return 0; 178 178 } 179 179 ··· 469 471 ipvlan->port = port; 470 472 ipvlan->sfeatures = IPVLAN_FEATURES; 471 473 INIT_LIST_HEAD(&ipvlan->addrs); 472 - ipvlan->ipv4cnt = 0; 473 - ipvlan->ipv6cnt = 0; 474 474 475 475 /* TODO Probably put random address here to be presented to the 476 476 * world but keep using the physical-dev address for the outgoing ··· 504 508 struct ipvl_dev *ipvlan = netdev_priv(dev); 505 509 struct ipvl_addr *addr, *next; 506 510 507 - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 508 - list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 509 - ipvlan_ht_addr_del(addr, !dev->dismantle); 510 - list_del(&addr->anode); 511 - } 511 + list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 512 + ipvlan_ht_addr_del(addr); 513 + list_del(&addr->anode); 514 + kfree_rcu(addr, rcu); 512 515 } 516 + 513 517 list_del_rcu(&ipvlan->pnode); 514 518 unregister_netdevice_queue(dev, head); 515 519 netdev_upper_dev_unlink(ipvlan->phy_dev, dev); ··· 623 627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 624 628 addr->atype = IPVL_IPV6; 625 629 list_add_tail(&addr->anode, &ipvlan->addrs); 626 - ipvlan->ipv6cnt++; 630 + 627 631 /* If the interface is not up, the address will be added to the hash 628 632 * list by ipvlan_open. 629 633 */ ··· 641 645 if (!addr) 642 646 return; 643 647 644 - ipvlan_ht_addr_del(addr, true); 648 + ipvlan_ht_addr_del(addr); 645 649 list_del(&addr->anode); 646 - ipvlan->ipv6cnt--; 647 - WARN_ON(ipvlan->ipv6cnt < 0); 648 650 kfree_rcu(addr, rcu); 649 651 650 652 return; ··· 654 660 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr; 655 661 struct net_device *dev = (struct net_device *)if6->idev->dev; 656 662 struct ipvl_dev *ipvlan = netdev_priv(dev); 663 + 664 + /* FIXME IPv6 autoconf calls us from bh without RTNL */ 665 + if (in_softirq()) 666 + return NOTIFY_DONE; 657 667 658 668 if (!netif_is_ipvlan(dev)) 659 669 return NOTIFY_DONE; ··· 697 699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 698 700 addr->atype = IPVL_IPV4; 699 701 list_add_tail(&addr->anode, &ipvlan->addrs); 700 - ipvlan->ipv4cnt++; 702 + 701 703 /* If the interface is not up, the address will be added to the hash 702 704 * list by ipvlan_open. 703 705 */ ··· 715 717 if (!addr) 716 718 return; 717 719 718 - ipvlan_ht_addr_del(addr, true); 720 + ipvlan_ht_addr_del(addr); 719 721 list_del(&addr->anode); 720 - ipvlan->ipv4cnt--; 721 - WARN_ON(ipvlan->ipv4cnt < 0); 722 722 kfree_rcu(addr, rcu); 723 723 724 724 return;
+1 -1
drivers/net/phy/dp83867.c
··· 164 164 return ret; 165 165 } 166 166 167 - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || 167 + if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && 168 168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { 169 169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, 170 170 DP83867_DEVADDR, phydev->addr);
+17 -2
drivers/net/phy/mdio_bus.c
··· 421 421 { 422 422 struct phy_device *phydev = to_phy_device(dev); 423 423 struct phy_driver *phydrv = to_phy_driver(drv); 424 + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); 425 + int i; 424 426 425 427 if (of_driver_match_device(dev, drv)) 426 428 return 1; ··· 430 428 if (phydrv->match_phy_device) 431 429 return phydrv->match_phy_device(phydev); 432 430 433 - return (phydrv->phy_id & phydrv->phy_id_mask) == 434 - (phydev->phy_id & phydrv->phy_id_mask); 431 + if (phydev->is_c45) { 432 + for (i = 1; i < num_ids; i++) { 433 + if (!(phydev->c45_ids.devices_in_package & (1 << i))) 434 + continue; 435 + 436 + if ((phydrv->phy_id & phydrv->phy_id_mask) == 437 + (phydev->c45_ids.device_ids[i] & 438 + phydrv->phy_id_mask)) 439 + return 1; 440 + } 441 + return 0; 442 + } else { 443 + return (phydrv->phy_id & phydrv->phy_id_mask) == 444 + (phydev->phy_id & phydrv->phy_id_mask); 445 + } 435 446 } 436 447 437 448 #ifdef CONFIG_PM
+1
drivers/net/usb/qmi_wwan.c
··· 757 757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 758 758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 759 759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 760 + {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */ 760 761 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 761 762 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ 762 763 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
+2 -1
drivers/net/virtio_net.c
··· 1828 1828 else 1829 1829 vi->hdr_len = sizeof(struct virtio_net_hdr); 1830 1830 1831 - if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1831 + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 1832 + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 1832 1833 vi->any_header_sg = true; 1833 1834 1834 1835 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+1
drivers/net/wireless/ath/ath9k/hw.c
··· 279 279 return; 280 280 case AR9300_DEVID_QCA956X: 281 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 282 + return; 282 283 } 283 284 284 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+6
drivers/net/wireless/iwlwifi/iwl-fh.h
··· 438 438 #define RX_QUEUE_MASK 255 439 439 #define RX_QUEUE_SIZE_LOG 8 440 440 441 + /* 442 + * RX related structures and functions 443 + */ 444 + #define RX_FREE_BUFFERS 64 445 + #define RX_LOW_WATERMARK 8 446 + 441 447 /** 442 448 * struct iwl_rb_status - reserve buffer status 443 449 * host memory mapped FH registers
+5 -7
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
··· 540 540 hw_addr = (const u8 *)(mac_override + 541 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 542 542 543 - /* The byte order is little endian 16 bit, meaning 214365 */ 544 - data->hw_addr[0] = hw_addr[1]; 545 - data->hw_addr[1] = hw_addr[0]; 546 - data->hw_addr[2] = hw_addr[3]; 547 - data->hw_addr[3] = hw_addr[2]; 548 - data->hw_addr[4] = hw_addr[5]; 549 - data->hw_addr[5] = hw_addr[4]; 543 + /* 544 + * Store the MAC address from MAO section. 545 + * No byte swapping is required in MAO section 546 + */ 547 + memcpy(data->hw_addr, hw_addr, ETH_ALEN); 550 548 551 549 /* 552 550 * Force the use of the OTP MAC address in case of reserved MAC
+2 -1
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
··· 660 660 * iwl_umac_scan_flags 661 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 662 662 * can be preempted by other scan requests with higher priority. 663 - * The low priority scan is aborted. 663 + * The low priority scan will be resumed when the higher proirity scan is 664 + * completed. 664 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 666 * when scan starts. 666 667 */
+3
drivers/net/wireless/iwlwifi/mvm/scan.c
··· 1109 1109 cmd->uid = cpu_to_le32(uid); 1110 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1111 1111 1112 + if (type == IWL_MVM_SCAN_SCHED) 1113 + cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); 1114 + 1112 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1113 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1114 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+2 -1
drivers/net/wireless/iwlwifi/mvm/sta.c
··· 1401 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1402 1402 u8 sta_id; 1403 1403 int ret; 1404 + static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 1404 1405 1405 1406 lockdep_assert_held(&mvm->mutex); 1406 1407 ··· 1468 1467 end: 1469 1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1470 1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1471 - sta->addr, ret); 1470 + sta ? sta->addr : zero_addr, ret); 1472 1471 return ret; 1473 1472 } 1474 1473
+1 -1
drivers/net/wireless/iwlwifi/mvm/time-event.c
··· 86 86 { 87 87 lockdep_assert_held(&mvm->time_event_lock); 88 88 89 - if (te_data->id == TE_MAX) 89 + if (!te_data->vif) 90 90 return; 91 91 92 92 list_del(&te_data->list);
+1 -1
drivers/net/wireless/iwlwifi/mvm/tx.c
··· 252 252 253 253 if (info->band == IEEE80211_BAND_2GHZ && 254 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 255 - rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; 256 256 else 257 257 rate_flags = 258 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+3 -2
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 368 368 /* 3165 Series */ 369 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 370 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 371 + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, 371 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 378 + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, 377 379 378 380 /* 7265 Series */ 379 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, ··· 428 426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 429 427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 430 428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 429 + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, 431 430 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 - {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, 433 - {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, 434 431 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 435 432 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 436 433 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+8 -43
drivers/net/wireless/iwlwifi/pcie/internal.h
··· 44 44 #include "iwl-io.h" 45 45 #include "iwl-op-mode.h" 46 46 47 - /* 48 - * RX related structures and functions 49 - */ 50 - #define RX_NUM_QUEUES 1 51 - #define RX_POST_REQ_ALLOC 2 52 - #define RX_CLAIM_REQ_ALLOC 8 53 - #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) 54 - #define RX_LOW_WATERMARK 8 55 - 56 47 struct iwl_host_cmd; 57 48 58 49 /*This file includes the declaration that are internal to the ··· 77 86 * struct iwl_rxq - Rx queue 78 87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 79 88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 89 + * @pool: 90 + * @queue: 80 91 * @read: Shared index to newest available Rx buffer 81 92 * @write: Shared index to oldest written Rx packet 82 93 * @free_count: Number of pre-allocated buffers in rx_free 83 - * @used_count: Number of RBDs handled to allocator to use for allocation 84 94 * @write_actual: 85 - * @rx_free: list of RBDs with allocated RB ready for use 86 - * @rx_used: list of RBDs with no RB attached 95 + * @rx_free: list of free SKBs for use 96 + * @rx_used: List of Rx buffers with no SKB 87 97 * @need_update: flag to indicate we need to update read/write index 88 98 * @rb_stts: driver's pointer to receive buffer status 89 99 * @rb_stts_dma: bus address of receive buffer status 90 100 * @lock: 91 - * @pool: initial pool of iwl_rx_mem_buffer for the queue 92 - * @queue: actual rx queue 93 101 * 94 102 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 95 103 */ 96 104 struct iwl_rxq { 97 105 __le32 *bd; 98 106 dma_addr_t bd_dma; 107 + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 108 + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 99 109 u32 read; 100 110 u32 write; 101 111 u32 free_count; 102 - u32 used_count; 103 112 u32 write_actual; 104 113 struct list_head rx_free; 105 114 struct list_head rx_used; ··· 107 116 struct iwl_rb_status *rb_stts; 108 117 dma_addr_t rb_stts_dma; 109 118 spinlock_t lock; 110 - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; 111 - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 112 - }; 113 - 114 - /** 115 - * struct iwl_rb_allocator - Rx allocator 116 - * @pool: initial pool of allocator 117 - * @req_pending: number of requests the allcator had not processed yet 118 - * @req_ready: number of requests honored and ready for claiming 119 - * @rbd_allocated: RBDs with pages allocated and ready to be handled to 120 - * the queue. This is a list of &struct iwl_rx_mem_buffer 121 - * @rbd_empty: RBDs with no page attached for allocator use. This is a list 122 - * of &struct iwl_rx_mem_buffer 123 - * @lock: protects the rbd_allocated and rbd_empty lists 124 - * @alloc_wq: work queue for background calls 125 - * @rx_alloc: work struct for background calls 126 - */ 127 - struct iwl_rb_allocator { 128 - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; 129 - atomic_t req_pending; 130 - atomic_t req_ready; 131 - struct list_head rbd_allocated; 132 - struct list_head rbd_empty; 133 - spinlock_t lock; 134 - struct workqueue_struct *alloc_wq; 135 - struct work_struct rx_alloc; 136 119 }; 137 120 138 121 struct iwl_dma_ptr { ··· 250 285 /** 251 286 * struct iwl_trans_pcie - PCIe transport specific data 252 287 * @rxq: all the RX queue data 253 - * @rba: allocator for RX replenishing 288 + * @rx_replenish: work that will be called when buffers need to be allocated 254 289 * @drv - pointer to iwl_drv 255 290 * @trans: pointer to the generic transport area 256 291 * @scd_base_addr: scheduler sram base address in SRAM ··· 273 308 */ 274 309 struct iwl_trans_pcie { 275 310 struct iwl_rxq rxq; 276 - struct iwl_rb_allocator rba; 311 + struct work_struct rx_replenish; 277 312 struct iwl_trans *trans; 278 313 struct iwl_drv *drv; 279 314
+83 -331
drivers/net/wireless/iwlwifi/pcie/rx.c
··· 1 1 /****************************************************************************** 2 2 * 3 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 5 5 * 6 6 * Portions of this file are derived from the ipw3945 project, as well 7 7 * as portions of the ieee80211 subsystem header files. ··· 74 74 * resets the Rx queue buffers with new memory. 75 75 * 76 76 * The management in the driver is as follows: 77 - * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 78 - * When the interrupt handler is called, the request is processed. 79 - * The page is either stolen - transferred to the upper layer 80 - * or reused - added immediately to the iwl->rxq->rx_free list. 81 - * + When the page is stolen - the driver updates the matching queue's used 82 - * count, detaches the RBD and transfers it to the queue used list. 83 - * When there are two used RBDs - they are transferred to the allocator empty 84 - * list. Work is then scheduled for the allocator to start allocating 85 - * eight buffers. 86 - * When there are another 6 used RBDs - they are transferred to the allocator 87 - * empty list and the driver tries to claim the pre-allocated buffers and 88 - * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 89 - * until ready. 90 - * When there are 8+ buffers in the free list - either from allocation or from 91 - * 8 reused unstolen pages - restock is called to update the FW and indexes. 92 - * + In order to make sure the allocator always has RBDs to use for allocation 93 - * the allocator has initial pool in the size of num_queues*(8-2) - the 94 - * maximum missing RBDs per allocation request (request posted with 2 95 - * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 96 - * The queues supplies the recycle of the rest of the RBDs. 77 + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 78 + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 79 + * to replenish the iwl->rxq->rx_free. 80 + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the 81 + * iwl->rxq is replenished and the READ INDEX is updated (updating the 82 + * 'processed' and 'read' driver indexes as well) 97 83 * + A received packet is processed and handed to the kernel network stack, 98 84 * detached from the iwl->rxq. The driver 'processed' index is updated. 99 - * + If there are no allocated buffers in iwl->rxq->rx_free, 85 + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the 86 + * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, 100 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 101 88 * If there were enough free buffers and RX_STALLED is set it is cleared. 102 89 * ··· 92 105 * 93 106 * iwl_rxq_alloc() Allocates rx_free 94 107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 95 - * iwl_pcie_rxq_restock. 96 - * Used only during initialization. 108 + * iwl_pcie_rxq_restock 97 109 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 98 110 * queue, updates firmware pointers, and updates 99 - * the WRITE index. 100 - * iwl_pcie_rx_allocator() Background work for allocating pages. 111 + * the WRITE index. If insufficient rx_free buffers 112 + * are available, schedules iwl_pcie_rx_replenish 101 113 * 102 114 * -- enable interrupts -- 103 115 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 104 116 * READ INDEX, detaching the SKB from the pool. 105 117 * Moves the packet buffer from queue to rx_used. 106 - * Posts and claims requests to the allocator. 107 118 * Calls iwl_pcie_rxq_restock to refill any empty 108 119 * slots. 109 - * 110 - * RBD life-cycle: 111 - * 112 - * Init: 113 - * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 114 - * 115 - * Regular Receive interrupt: 116 - * Page Stolen: 117 - * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 118 - * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 119 - * Page not Stolen: 120 - * rxq.queue -> rxq.rx_free -> rxq.queue 121 120 * ... 122 121 * 123 122 */ ··· 240 267 rxq->free_count--; 241 268 } 242 269 spin_unlock(&rxq->lock); 270 + /* If the pre-allocated buffer pool is dropping low, schedule to 271 + * refill it */ 272 + if (rxq->free_count <= RX_LOW_WATERMARK) 273 + schedule_work(&trans_pcie->rx_replenish); 243 274 244 275 /* If we've added more space for the firmware to place data, tell it. 245 276 * Increment device's write pointer in multiples of 8. */ ··· 255 278 } 256 279 257 280 /* 258 - * iwl_pcie_rx_alloc_page - allocates and returns a page. 259 - * 260 - */ 261 - static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) 262 - { 263 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 264 - struct iwl_rxq *rxq = &trans_pcie->rxq; 265 - struct page *page; 266 - gfp_t gfp_mask = GFP_KERNEL; 267 - 268 - if (rxq->free_count > RX_LOW_WATERMARK) 269 - gfp_mask |= __GFP_NOWARN; 270 - 271 - if (trans_pcie->rx_page_order > 0) 272 - gfp_mask |= __GFP_COMP; 273 - 274 - /* Alloc a new receive buffer */ 275 - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 276 - if (!page) { 277 - if (net_ratelimit()) 278 - IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 279 - trans_pcie->rx_page_order); 280 - /* Issue an error if the hardware has consumed more than half 281 - * of its free buffer list and we don't have enough 282 - * pre-allocated buffers. 283 - ` */ 284 - if (rxq->free_count <= RX_LOW_WATERMARK && 285 - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && 286 - net_ratelimit()) 287 - IWL_CRIT(trans, 288 - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", 289 - rxq->free_count); 290 - return NULL; 291 - } 292 - return page; 293 - } 294 - 295 - /* 296 281 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 297 282 * 298 283 * A used RBD is an Rx buffer that has been given to the stack. To use it again ··· 263 324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 264 325 * allocated buffers. 265 326 */ 266 - static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 327 + static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) 267 328 { 268 329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 269 330 struct iwl_rxq *rxq = &trans_pcie->rxq; 270 331 struct iwl_rx_mem_buffer *rxb; 271 332 struct page *page; 333 + gfp_t gfp_mask = priority; 272 334 273 335 while (1) { 274 336 spin_lock(&rxq->lock); ··· 279 339 } 280 340 spin_unlock(&rxq->lock); 281 341 342 + if (rxq->free_count > RX_LOW_WATERMARK) 343 + gfp_mask |= __GFP_NOWARN; 344 + 345 + if (trans_pcie->rx_page_order > 0) 346 + gfp_mask |= __GFP_COMP; 347 + 282 348 /* Alloc a new receive buffer */ 283 - page = iwl_pcie_rx_alloc_page(trans); 284 - if (!page) 349 + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 350 + if (!page) { 351 + if (net_ratelimit()) 352 + IWL_DEBUG_INFO(trans, "alloc_pages failed, " 353 + "order: %d\n", 354 + trans_pcie->rx_page_order); 355 + 356 + if ((rxq->free_count <= RX_LOW_WATERMARK) && 357 + net_ratelimit()) 358 + IWL_CRIT(trans, "Failed to alloc_pages with %s." 359 + "Only %u free buffers remaining.\n", 360 + priority == GFP_ATOMIC ? 361 + "GFP_ATOMIC" : "GFP_KERNEL", 362 + rxq->free_count); 363 + /* We don't reschedule replenish work here -- we will 364 + * call the restock method and if it still needs 365 + * more buffers it will schedule replenish */ 285 366 return; 367 + } 286 368 287 369 spin_lock(&rxq->lock); 288 370 ··· 355 393 356 394 lockdep_assert_held(&rxq->lock); 357 395 358 - for (i = 0; i < RX_QUEUE_SIZE; i++) { 396 + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 359 397 if (!rxq->pool[i].page) 360 398 continue; 361 399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, ··· 372 410 * When moving to rx_free an page is allocated for the slot. 373 411 * 374 412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 375 - * This is called only during initialization 413 + * This is called as a scheduled work item (except for during initialization) 376 414 */ 377 - static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 415 + static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) 378 416 { 379 - iwl_pcie_rxq_alloc_rbs(trans); 417 + iwl_pcie_rxq_alloc_rbs(trans, gfp); 380 418 381 419 iwl_pcie_rxq_restock(trans); 382 420 } 383 421 384 - /* 385 - * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 386 - * 387 - * Allocates for each received request 8 pages 388 - * Called as a scheduled work item. 389 - */ 390 - static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 422 + static void iwl_pcie_rx_replenish_work(struct work_struct *data) 391 423 { 392 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 393 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 394 - 395 - while (atomic_read(&rba->req_pending)) { 396 - int i; 397 - struct list_head local_empty; 398 - struct list_head local_allocated; 399 - 400 - INIT_LIST_HEAD(&local_allocated); 401 - spin_lock(&rba->lock); 402 - /* swap out the entire rba->rbd_empty to a local list */ 403 - list_replace_init(&rba->rbd_empty, &local_empty); 404 - spin_unlock(&rba->lock); 405 - 406 - for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 407 - struct iwl_rx_mem_buffer *rxb; 408 - struct page *page; 409 - 410 - /* List should never be empty - each reused RBD is 411 - * returned to the list, and initial pool covers any 412 - * possible gap between the time the page is allocated 413 - * to the time the RBD is added. 414 - */ 415 - BUG_ON(list_empty(&local_empty)); 416 - /* Get the first rxb from the rbd list */ 417 - rxb = list_first_entry(&local_empty, 418 - struct iwl_rx_mem_buffer, list); 419 - BUG_ON(rxb->page); 420 - 421 - /* Alloc a new receive buffer */ 422 - page = iwl_pcie_rx_alloc_page(trans); 423 - if (!page) 424 - continue; 425 - rxb->page = page; 426 - 427 - /* Get physical address of the RB */ 428 - rxb->page_dma = dma_map_page(trans->dev, page, 0, 429 - PAGE_SIZE << trans_pcie->rx_page_order, 430 - DMA_FROM_DEVICE); 431 - if (dma_mapping_error(trans->dev, rxb->page_dma)) { 432 - rxb->page = NULL; 433 - __free_pages(page, trans_pcie->rx_page_order); 434 - continue; 435 - } 436 - /* dma address must be no more than 36 bits */ 437 - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 438 - /* and also 256 byte aligned! */ 439 - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 440 - 441 - /* move the allocated entry to the out list */ 442 - list_move(&rxb->list, &local_allocated); 443 - i++; 444 - } 445 - 446 - spin_lock(&rba->lock); 447 - /* add the allocated rbds to the allocator allocated list */ 448 - list_splice_tail(&local_allocated, &rba->rbd_allocated); 449 - /* add the unused rbds back to the allocator empty list */ 450 - list_splice_tail(&local_empty, &rba->rbd_empty); 451 - spin_unlock(&rba->lock); 452 - 453 - atomic_dec(&rba->req_pending); 454 - atomic_inc(&rba->req_ready); 455 - } 456 - } 457 - 458 - /* 459 - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages 460 - .* 461 - .* Called by queue when the queue posted allocation request and 462 - * has freed 8 RBDs in order to restock itself. 463 - */ 464 - static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 465 - struct iwl_rx_mem_buffer 466 - *out[RX_CLAIM_REQ_ALLOC]) 467 - { 468 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 469 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 470 - int i; 471 - 472 - if (atomic_dec_return(&rba->req_ready) < 0) { 473 - atomic_inc(&rba->req_ready); 474 - IWL_DEBUG_RX(trans, 475 - "Allocation request not ready, pending requests = %d\n", 476 - atomic_read(&rba->req_pending)); 477 - return -ENOMEM; 478 - } 479 - 480 - spin_lock(&rba->lock); 481 - for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 482 - /* Get next free Rx buffer, remove it from free list */ 483 - out[i] = list_first_entry(&rba->rbd_allocated, 484 - struct iwl_rx_mem_buffer, list); 485 - list_del(&out[i]->list); 486 - } 487 - spin_unlock(&rba->lock); 488 - 489 - return 0; 490 - } 491 - 492 - static void iwl_pcie_rx_allocator_work(struct work_struct *data) 493 - { 494 - struct iwl_rb_allocator *rba_p = 495 - container_of(data, struct iwl_rb_allocator, rx_alloc); 496 424 struct iwl_trans_pcie *trans_pcie = 497 - container_of(rba_p, struct iwl_trans_pcie, rba); 425 + container_of(data, struct iwl_trans_pcie, rx_replenish); 498 426 499 - iwl_pcie_rx_allocator(trans_pcie->trans); 427 + iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); 500 428 } 501 429 502 430 static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 503 431 { 504 432 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 505 433 struct iwl_rxq *rxq = &trans_pcie->rxq; 506 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 507 434 struct device *dev = trans->dev; 508 435 509 436 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 510 437 511 438 spin_lock_init(&rxq->lock); 512 - spin_lock_init(&rba->lock); 513 439 514 440 if (WARN_ON(rxq->bd || rxq->rb_stts)) 515 441 return -EINVAL; ··· 487 637 INIT_LIST_HEAD(&rxq->rx_free); 488 638 INIT_LIST_HEAD(&rxq->rx_used); 489 639 rxq->free_count = 0; 490 - rxq->used_count = 0; 491 640 492 - for (i = 0; i < RX_QUEUE_SIZE; i++) 641 + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 493 642 list_add(&rxq->pool[i].list, &rxq->rx_used); 494 - } 495 - 496 - static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) 497 - { 498 - int i; 499 - 500 - lockdep_assert_held(&rba->lock); 501 - 502 - INIT_LIST_HEAD(&rba->rbd_allocated); 503 - INIT_LIST_HEAD(&rba->rbd_empty); 504 - 505 - for (i = 0; i < RX_POOL_SIZE; i++) 506 - list_add(&rba->pool[i].list, &rba->rbd_empty); 507 - } 508 - 509 - static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) 510 - { 511 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 512 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 513 - int i; 514 - 515 - lockdep_assert_held(&rba->lock); 516 - 517 - for (i = 0; i < RX_POOL_SIZE; i++) { 518 - if (!rba->pool[i].page) 519 - continue; 520 - dma_unmap_page(trans->dev, rba->pool[i].page_dma, 521 - PAGE_SIZE << trans_pcie->rx_page_order, 522 - DMA_FROM_DEVICE); 523 - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); 524 - rba->pool[i].page = NULL; 525 - } 526 643 } 527 644 528 645 int iwl_pcie_rx_init(struct iwl_trans *trans) 529 646 { 530 647 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 531 648 struct iwl_rxq *rxq = &trans_pcie->rxq; 532 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 533 649 int i, err; 534 650 535 651 if (!rxq->bd) { ··· 503 687 if (err) 504 688 return err; 505 689 } 506 - if (!rba->alloc_wq) 507 - rba->alloc_wq = alloc_workqueue("rb_allocator", 508 - WQ_HIGHPRI | WQ_UNBOUND, 1); 509 - INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); 510 - 511 - spin_lock(&rba->lock); 512 - atomic_set(&rba->req_pending, 0); 513 - atomic_set(&rba->req_ready, 0); 514 - /* free all first - we might be reconfigured for a different size */ 515 - iwl_pcie_rx_free_rba(trans); 516 - iwl_pcie_rx_init_rba(rba); 517 - spin_unlock(&rba->lock); 518 690 519 691 spin_lock(&rxq->lock); 692 + 693 + INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); 520 694 521 695 /* free all first - we might be reconfigured for a different size */ 522 696 iwl_pcie_rxq_free_rbs(trans); ··· 522 716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 523 717 spin_unlock(&rxq->lock); 524 718 525 - iwl_pcie_rx_replenish(trans); 719 + iwl_pcie_rx_replenish(trans, GFP_KERNEL); 526 720 527 721 iwl_pcie_rx_hw_init(trans, rxq); 528 722 ··· 537 731 { 538 732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 539 733 struct iwl_rxq *rxq = &trans_pcie->rxq; 540 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 541 734 542 735 /*if rxq->bd is NULL, it means that nothing has been allocated, 543 736 * exit now */ ··· 545 740 return; 546 741 } 547 742 548 - cancel_work_sync(&rba->rx_alloc); 549 - if (rba->alloc_wq) { 550 - destroy_workqueue(rba->alloc_wq); 551 - rba->alloc_wq = NULL; 552 - } 553 - 554 - spin_lock(&rba->lock); 555 - iwl_pcie_rx_free_rba(trans); 556 - spin_unlock(&rba->lock); 743 + cancel_work_sync(&trans_pcie->rx_replenish); 557 744 558 745 spin_lock(&rxq->lock); 559 746 iwl_pcie_rxq_free_rbs(trans); ··· 564 767 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); 565 768 rxq->rb_stts_dma = 0; 566 769 rxq->rb_stts = NULL; 567 - } 568 - 569 - /* 570 - * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 571 - * 572 - * Called when a RBD can be reused. The RBD is transferred to the allocator. 573 - * When there are 2 empty RBDs - a request for allocation is posted 574 - */ 575 - static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 576 - struct iwl_rx_mem_buffer *rxb, 577 - struct iwl_rxq *rxq) 578 - { 579 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 580 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 581 - 582 - /* Count the used RBDs */ 583 - rxq->used_count++; 584 - 585 - /* Move the RBD to the used list, will be moved to allocator in batches 586 - * before claiming or posting a request*/ 587 - list_add_tail(&rxb->list, &rxq->rx_used); 588 - 589 - /* If we have RX_POST_REQ_ALLOC new released rx buffers - 590 - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 591 - * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 592 - * after but we still need to post another request. 593 - */ 594 - if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 595 - /* Move the 2 RBDs to the allocator ownership. 596 - Allocator has another 6 from pool for the request completion*/ 597 - spin_lock(&rba->lock); 598 - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 599 - spin_unlock(&rba->lock); 600 - 601 - atomic_inc(&rba->req_pending); 602 - queue_work(rba->alloc_wq, &rba->rx_alloc); 603 - } 604 770 } 605 771 606 772 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ··· 688 928 */ 689 929 __free_pages(rxb->page, trans_pcie->rx_page_order); 690 930 rxb->page = NULL; 691 - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 931 + list_add_tail(&rxb->list, &rxq->rx_used); 692 932 } else { 693 933 list_add_tail(&rxb->list, &rxq->rx_free); 694 934 rxq->free_count++; 695 935 } 696 936 } else 697 - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 937 + list_add_tail(&rxb->list, &rxq->rx_used); 698 938 } 699 939 700 940 /* ··· 704 944 { 705 945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 706 946 struct iwl_rxq *rxq = &trans_pcie->rxq; 707 - u32 r, i, j; 947 + u32 r, i; 948 + u8 fill_rx = 0; 949 + u32 count = 8; 950 + int total_empty; 708 951 709 952 restart: 710 953 spin_lock(&rxq->lock); ··· 720 957 if (i == r) 721 958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 722 959 960 + /* calculate total frames need to be restock after handling RX */ 961 + total_empty = r - rxq->write_actual; 962 + if (total_empty < 0) 963 + total_empty += RX_QUEUE_SIZE; 964 + 965 + if (total_empty > (RX_QUEUE_SIZE / 2)) 966 + fill_rx = 1; 967 + 723 968 while (i != r) { 724 969 struct iwl_rx_mem_buffer *rxb; 725 970 ··· 739 968 iwl_pcie_rx_handle_rb(trans, rxb); 740 969 741 970 i = (i + 1) & RX_QUEUE_MASK; 742 - 743 - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 744 - * try to claim the pre-allocated buffers from the allocator */ 745 - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 746 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 747 - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 748 - 749 - /* Add the remaining 6 empty RBDs for allocator use */ 750 - spin_lock(&rba->lock); 751 - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 752 - spin_unlock(&rba->lock); 753 - 754 - /* If not ready - continue, will try to reclaim later. 755 - * No need to reschedule work - allocator exits only on 756 - * success */ 757 - if (!iwl_pcie_rx_allocator_get(trans, out)) { 758 - /* If success - then RX_CLAIM_REQ_ALLOC 759 - * buffers were retrieved and should be added 760 - * to free list */ 761 - rxq->used_count -= RX_CLAIM_REQ_ALLOC; 762 - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { 763 - list_add_tail(&out[j]->list, 764 - &rxq->rx_free); 765 - rxq->free_count++; 766 - } 971 + /* If there are a lot of unused frames, 972 + * restock the Rx queue so ucode wont assert. */ 973 + if (fill_rx) { 974 + count++; 975 + if (count >= 8) { 976 + rxq->read = i; 977 + spin_unlock(&rxq->lock); 978 + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); 979 + count = 0; 980 + goto restart; 767 981 } 768 - } 769 - /* handle restock for two cases: 770 - * - we just pulled buffers from the allocator 771 - * - we have 8+ unstolen pages accumulated */ 772 - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { 773 - rxq->read = i; 774 - spin_unlock(&rxq->lock); 775 - iwl_pcie_rxq_restock(trans); 776 - goto restart; 777 982 } 778 983 } 779 984 780 985 /* Backtrack one entry */ 781 986 rxq->read = i; 782 987 spin_unlock(&rxq->lock); 988 + 989 + if (fill_rx) 990 + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); 991 + else 992 + iwl_pcie_rxq_restock(trans); 783 993 784 994 if (trans_pcie->napi.poll) 785 995 napi_gro_flush(&trans_pcie->napi, false);
+29 -23
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 182 182 183 183 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 184 184 { 185 - if (!trans->cfg->apmg_not_supported) 185 + if (trans->cfg->apmg_not_supported) 186 186 return; 187 187 188 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) ··· 2459 2459 struct iwl_trans_pcie *trans_pcie; 2460 2460 struct iwl_trans *trans; 2461 2461 u16 pci_cmd; 2462 - int err; 2462 + int ret; 2463 2463 2464 2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2465 2465 &pdev->dev, cfg, &trans_ops_pcie, 0); ··· 2474 2474 spin_lock_init(&trans_pcie->ref_lock); 2475 2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2476 2476 2477 - err = pci_enable_device(pdev); 2478 - if (err) 2477 + ret = pci_enable_device(pdev); 2478 + if (ret) 2479 2479 goto out_no_pci; 2480 2480 2481 2481 if (!cfg->base_params->pcie_l1_allowed) { ··· 2491 2491 2492 2492 pci_set_master(pdev); 2493 2493 2494 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2495 - if (!err) 2496 - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2497 - if (err) { 2498 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2499 - if (!err) 2500 - err = pci_set_consistent_dma_mask(pdev, 2494 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2495 + if (!ret) 2496 + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2497 + if (ret) { 2498 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2499 + if (!ret) 2500 + ret = pci_set_consistent_dma_mask(pdev, 2501 2501 DMA_BIT_MASK(32)); 2502 2502 /* both attempts failed: */ 2503 - if (err) { 2503 + if (ret) { 2504 2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2505 2505 goto out_pci_disable_device; 2506 2506 } 2507 2507 } 2508 2508 2509 - err = pci_request_regions(pdev, DRV_NAME); 2510 - if (err) { 2509 + ret = pci_request_regions(pdev, DRV_NAME); 2510 + if (ret) { 2511 2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2512 2512 goto out_pci_disable_device; 2513 2513 } ··· 2515 2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2516 2516 if (!trans_pcie->hw_base) { 2517 2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2518 - err = -ENODEV; 2518 + ret = -ENODEV; 2519 2519 goto out_pci_release_regions; 2520 2520 } 2521 2521 ··· 2527 2527 trans_pcie->pci_dev = pdev; 2528 2528 iwl_disable_interrupts(trans); 2529 2529 2530 - err = pci_enable_msi(pdev); 2531 - if (err) { 2532 - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2530 + ret = pci_enable_msi(pdev); 2531 + if (ret) { 2532 + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); 2533 2533 /* enable rfkill interrupt: hw bug w/a */ 2534 2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2535 2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { ··· 2547 2547 */ 2548 2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2549 2549 unsigned long flags; 2550 - int ret; 2551 2550 2552 2551 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2553 2552 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2553 + 2554 + ret = iwl_pcie_prepare_card_hw(trans); 2555 + if (ret) { 2556 + IWL_WARN(trans, "Exit HW not ready\n"); 2557 + goto out_pci_disable_msi; 2558 + } 2554 2559 2555 2560 /* 2556 2561 * in-order to recognize C step driver should read chip version ··· 2596 2591 /* Initialize the wait queue for commands */ 2597 2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2598 2593 2599 - if (iwl_pcie_alloc_ict(trans)) 2594 + ret = iwl_pcie_alloc_ict(trans); 2595 + if (ret) 2600 2596 goto out_pci_disable_msi; 2601 2597 2602 - err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2598 + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2603 2599 iwl_pcie_irq_handler, 2604 2600 IRQF_SHARED, DRV_NAME, trans); 2605 - if (err) { 2601 + if (ret) { 2606 2602 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2607 2603 goto out_free_ict; 2608 2604 } ··· 2623 2617 pci_disable_device(pdev); 2624 2618 out_no_pci: 2625 2619 iwl_trans_free(trans); 2626 - return ERR_PTR(err); 2620 + return ERR_PTR(ret); 2627 2621 }
+3 -3
drivers/net/xen-netback/netback.c
··· 1566 1566 smp_rmb(); 1567 1567 1568 1568 while (dc != dp) { 1569 - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1569 + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); 1570 1570 pending_idx = 1571 1571 queue->dealloc_ring[pending_index(dc++)]; 1572 1572 1573 - pending_idx_release[gop-queue->tx_unmap_ops] = 1573 + pending_idx_release[gop - queue->tx_unmap_ops] = 1574 1574 pending_idx; 1575 - queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1575 + queue->pages_to_unmap[gop - queue->tx_unmap_ops] = 1576 1576 queue->mmap_pages[pending_idx]; 1577 1577 gnttab_set_unmap_op(gop, 1578 1578 idx_to_kaddr(queue, pending_idx),
+5
drivers/nvdimm/region_devs.c
··· 458 458 nvdimm_bus_unlock(dev); 459 459 } 460 460 if (is_nd_btt(dev) && probe) { 461 + struct nd_btt *nd_btt = to_nd_btt(dev); 462 + 461 463 nd_region = to_nd_region(dev->parent); 462 464 nvdimm_bus_lock(dev); 463 465 if (nd_region->btt_seed == dev) 464 466 nd_region_create_btt_seed(nd_region); 467 + if (nd_region->ns_seed == &nd_btt->ndns->dev && 468 + is_nd_blk(dev->parent)) 469 + nd_region_create_blk_seed(nd_region); 465 470 nvdimm_bus_unlock(dev); 466 471 } 467 472 }
+6 -5
drivers/parport/share.c
··· 891 891 par_dev->dev.release = free_pardevice; 892 892 par_dev->devmodel = true; 893 893 ret = device_register(&par_dev->dev); 894 - if (ret) 895 - goto err_put_dev; 894 + if (ret) { 895 + put_device(&par_dev->dev); 896 + goto err_put_port; 897 + } 896 898 897 899 /* Chain this onto the list */ 898 900 par_dev->prev = NULL; ··· 909 907 spin_unlock(&port->physport->pardevice_lock); 910 908 pr_debug("%s: cannot grant exclusive access for device %s\n", 911 909 port->name, name); 912 - goto err_put_dev; 910 + device_unregister(&par_dev->dev); 911 + goto err_put_port; 913 912 } 914 913 port->flags |= PARPORT_FLAG_EXCL; 915 914 } ··· 941 938 942 939 return par_dev; 943 940 944 - err_put_dev: 945 - put_device(&par_dev->dev); 946 941 err_free_devname: 947 942 kfree(devname); 948 943 err_free_par_dev:
+2
drivers/phy/Kconfig
··· 56 56 57 57 config PHY_PXA_28NM_HSIC 58 58 tristate "Marvell USB HSIC 28nm PHY Driver" 59 + depends on HAS_IOMEM 59 60 select GENERIC_PHY 60 61 help 61 62 Enable this to support Marvell USB HSIC PHY driver for Marvell ··· 67 66 68 67 config PHY_PXA_28NM_USB2 69 68 tristate "Marvell USB 2.0 28nm PHY Driver" 69 + depends on HAS_IOMEM 70 70 select GENERIC_PHY 71 71 help 72 72 Enable this to support Marvell USB 2.0 PHY driver for Marvell
+2 -2
drivers/phy/phy-berlin-usb.c
··· 105 105 106 106 static const u32 phy_berlin_pll_dividers[] = { 107 107 /* Berlin 2 */ 108 - CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), 109 - /* Berlin 2CD */ 110 108 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), 109 + /* Berlin 2CD/Q */ 110 + CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), 111 111 }; 112 112 113 113 struct phy_berlin_usb_priv {
+40 -130
drivers/phy/phy-ti-pipe3.c
··· 28 28 #include <linux/delay.h> 29 29 #include <linux/phy/omap_control_phy.h> 30 30 #include <linux/of_platform.h> 31 - #include <linux/spinlock.h> 32 31 33 32 #define PLL_STATUS 0x00000004 34 33 #define PLL_GO 0x00000008 ··· 82 83 struct clk *refclk; 83 84 struct clk *div_clk; 84 85 struct pipe3_dpll_map *dpll_map; 85 - bool enabled; 86 - spinlock_t lock; /* serialize clock enable/disable */ 87 - /* the below flag is needed specifically for SATA */ 88 - bool refclk_enabled; 89 86 }; 90 87 91 88 static struct pipe3_dpll_map dpll_map_usb[] = { ··· 131 136 132 137 return NULL; 133 138 } 139 + 140 + static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy); 141 + static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy); 134 142 135 143 static int ti_pipe3_power_off(struct phy *x) 136 144 { ··· 215 217 u32 val; 216 218 int ret = 0; 217 219 220 + ti_pipe3_enable_clocks(phy); 218 221 /* 219 222 * Set pcie_pcs register to 0x96 for proper functioning of phy 220 223 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table ··· 249 250 u32 val; 250 251 unsigned long timeout; 251 252 252 - /* SATA DPLL can't be powered down due to Errata i783 and PCIe 253 - * does not have internal DPLL 254 - */ 255 - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || 256 - of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) 253 + /* SATA DPLL can't be powered down due to Errata i783 */ 254 + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) 257 255 return 0; 258 256 259 - /* Put DPLL in IDLE mode */ 260 - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); 261 - val |= PLL_IDLE; 262 - ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); 257 + /* PCIe doesn't have internal DPLL */ 258 + if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { 259 + /* Put DPLL in IDLE mode */ 260 + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); 261 + val |= PLL_IDLE; 262 + ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); 263 263 264 - /* wait for LDO and Oscillator to power down */ 265 - timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); 266 - do { 267 - cpu_relax(); 268 - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 269 - if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) 270 - break; 271 - } while (!time_after(jiffies, timeout)); 264 + /* wait for LDO and Oscillator to power down */ 265 + timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); 266 + do { 267 + cpu_relax(); 268 + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 269 + if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) 270 + break; 271 + } while (!time_after(jiffies, timeout)); 272 272 273 - if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { 274 - dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", 275 - val); 276 - return -EBUSY; 273 + if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { 274 + dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", 275 + val); 276 + return -EBUSY; 277 + } 277 278 } 279 + 280 + ti_pipe3_disable_clocks(phy); 278 281 279 282 return 0; 280 283 } ··· 307 306 return -ENOMEM; 308 307 309 308 phy->dev = &pdev->dev; 310 - spin_lock_init(&phy->lock); 311 309 312 310 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 313 311 match = of_match_device(ti_pipe3_id_table, &pdev->dev); ··· 402 402 403 403 platform_set_drvdata(pdev, phy); 404 404 pm_runtime_enable(phy->dev); 405 + /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ 406 + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) 407 + if (!IS_ERR(phy->refclk)) 408 + clk_prepare_enable(phy->refclk); 405 409 406 410 generic_phy = devm_phy_create(phy->dev, NULL, &ops); 407 411 if (IS_ERR(generic_phy)) ··· 417 413 if (IS_ERR(phy_provider)) 418 414 return PTR_ERR(phy_provider); 419 415 420 - pm_runtime_get(&pdev->dev); 421 - 422 416 return 0; 423 417 } 424 418 425 419 static int ti_pipe3_remove(struct platform_device *pdev) 426 420 { 427 - if (!pm_runtime_suspended(&pdev->dev)) 428 - pm_runtime_put(&pdev->dev); 429 421 pm_runtime_disable(&pdev->dev); 430 422 431 423 return 0; 432 424 } 433 425 434 - #ifdef CONFIG_PM 435 - static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy) 426 + static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) 436 427 { 437 - if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { 438 - int ret; 428 + int ret = 0; 439 429 430 + if (!IS_ERR(phy->refclk)) { 440 431 ret = clk_prepare_enable(phy->refclk); 441 432 if (ret) { 442 433 dev_err(phy->dev, "Failed to enable refclk %d\n", ret); 443 434 return ret; 444 435 } 445 - phy->refclk_enabled = true; 446 436 } 447 - 448 - return 0; 449 - } 450 - 451 - static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy) 452 - { 453 - if (!IS_ERR(phy->refclk)) 454 - clk_disable_unprepare(phy->refclk); 455 - 456 - phy->refclk_enabled = false; 457 - } 458 - 459 - static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) 460 - { 461 - int ret = 0; 462 - unsigned long flags; 463 - 464 - spin_lock_irqsave(&phy->lock, flags); 465 - if (phy->enabled) 466 - goto err1; 467 - 468 - ret = ti_pipe3_enable_refclk(phy); 469 - if (ret) 470 - goto err1; 471 437 472 438 if (!IS_ERR(phy->wkupclk)) { 473 439 ret = clk_prepare_enable(phy->wkupclk); 474 440 if (ret) { 475 441 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); 476 - goto err2; 442 + goto disable_refclk; 477 443 } 478 444 } 479 445 ··· 451 477 ret = clk_prepare_enable(phy->div_clk); 452 478 if (ret) { 453 479 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); 454 - goto err3; 480 + goto disable_wkupclk; 455 481 } 456 482 } 457 483 458 - phy->enabled = true; 459 - spin_unlock_irqrestore(&phy->lock, flags); 460 484 return 0; 461 485 462 - err3: 486 + disable_wkupclk: 463 487 if (!IS_ERR(phy->wkupclk)) 464 488 clk_disable_unprepare(phy->wkupclk); 465 489 466 - err2: 490 + disable_refclk: 467 491 if (!IS_ERR(phy->refclk)) 468 492 clk_disable_unprepare(phy->refclk); 469 493 470 - ti_pipe3_disable_refclk(phy); 471 - err1: 472 - spin_unlock_irqrestore(&phy->lock, flags); 473 494 return ret; 474 495 } 475 496 476 497 static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) 477 498 { 478 - unsigned long flags; 479 - 480 - spin_lock_irqsave(&phy->lock, flags); 481 - if (!phy->enabled) { 482 - spin_unlock_irqrestore(&phy->lock, flags); 483 - return; 484 - } 485 - 486 499 if (!IS_ERR(phy->wkupclk)) 487 500 clk_disable_unprepare(phy->wkupclk); 488 - /* Don't disable refclk for SATA PHY due to Errata i783 */ 489 - if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) 490 - ti_pipe3_disable_refclk(phy); 501 + if (!IS_ERR(phy->refclk)) 502 + clk_disable_unprepare(phy->refclk); 491 503 if (!IS_ERR(phy->div_clk)) 492 504 clk_disable_unprepare(phy->div_clk); 493 - phy->enabled = false; 494 - spin_unlock_irqrestore(&phy->lock, flags); 495 505 } 496 - 497 - static int ti_pipe3_runtime_suspend(struct device *dev) 498 - { 499 - struct ti_pipe3 *phy = dev_get_drvdata(dev); 500 - 501 - ti_pipe3_disable_clocks(phy); 502 - return 0; 503 - } 504 - 505 - static int ti_pipe3_runtime_resume(struct device *dev) 506 - { 507 - struct ti_pipe3 *phy = dev_get_drvdata(dev); 508 - int ret = 0; 509 - 510 - ret = ti_pipe3_enable_clocks(phy); 511 - return ret; 512 - } 513 - 514 - static int ti_pipe3_suspend(struct device *dev) 515 - { 516 - struct ti_pipe3 *phy = dev_get_drvdata(dev); 517 - 518 - ti_pipe3_disable_clocks(phy); 519 - return 0; 520 - } 521 - 522 - static int ti_pipe3_resume(struct device *dev) 523 - { 524 - struct ti_pipe3 *phy = dev_get_drvdata(dev); 525 - int ret; 526 - 527 - ret = ti_pipe3_enable_clocks(phy); 528 - if (ret) 529 - return ret; 530 - 531 - pm_runtime_disable(dev); 532 - pm_runtime_set_active(dev); 533 - pm_runtime_enable(dev); 534 - return 0; 535 - } 536 - #endif 537 - 538 - static const struct dev_pm_ops ti_pipe3_pm_ops = { 539 - SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend, 540 - ti_pipe3_runtime_resume, NULL) 541 - SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume) 542 - }; 543 506 544 507 static const struct of_device_id ti_pipe3_id_table[] = { 545 508 { ··· 503 592 .remove = ti_pipe3_remove, 504 593 .driver = { 505 594 .name = "ti-pipe3", 506 - .pm = &ti_pipe3_pm_ops, 507 595 .of_match_table = ti_pipe3_id_table, 508 596 }, 509 597 };
+2
drivers/pinctrl/bcm/pinctrl-bcm2835.c
··· 473 473 474 474 spin_lock_irqsave(&pc->irq_lock[bank], flags); 475 475 bcm2835_gpio_irq_config(pc, gpio, false); 476 + /* Clear events that were latched prior to clearing event sources */ 477 + bcm2835_gpio_set_bit(pc, GPEDS0, gpio); 476 478 clear_bit(offset, &pc->enabled_irq_map[bank]); 477 479 spin_unlock_irqrestore(&pc->irq_lock[bank], flags); 478 480 }
+1 -2
drivers/pinctrl/freescale/pinctrl-imx1-core.c
··· 403 403 unsigned num_configs) 404 404 { 405 405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); 406 - const struct imx1_pinctrl_soc_info *info = ipctl->info; 407 406 int i; 408 407 409 408 for (i = 0; i != num_configs; ++i) { 410 409 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); 411 410 412 411 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", 413 - info->pins[pin_id].name); 412 + pin_desc_get(pctldev, pin_id)->name); 414 413 } 415 414 416 415 return 0;
-1
drivers/pinctrl/nomadik/pinctrl-abx500.c
··· 787 787 .set_mux = abx500_pmx_set, 788 788 .gpio_request_enable = abx500_gpio_request_enable, 789 789 .gpio_disable_free = abx500_gpio_disable_free, 790 - .strict = true, 791 790 }; 792 791 793 792 static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
+2 -2
drivers/pinctrl/pinctrl-lpc18xx.c
··· 823 823 break; 824 824 825 825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 826 - if (param) 826 + if (param_val) 827 827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); 828 828 else 829 829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); ··· 876 876 break; 877 877 878 878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 879 - if (param) 879 + if (param_val) 880 880 *reg &= ~LPC18XX_SCU_PIN_ZIF; 881 881 else 882 882 *reg |= LPC18XX_SCU_PIN_ZIF;
+2 -1
drivers/pinctrl/pinctrl-single.c
··· 1760 1760 int res; 1761 1761 1762 1762 res = request_irq(pcs_soc->irq, pcs_irq_handler, 1763 - IRQF_SHARED | IRQF_NO_SUSPEND, 1763 + IRQF_SHARED | IRQF_NO_SUSPEND | 1764 + IRQF_NO_THREAD, 1764 1765 name, pcs_soc); 1765 1766 if (res) { 1766 1767 pcs_soc->irq = -1;
-5
drivers/pinctrl/samsung/pinctrl-samsung.c
··· 33 33 #include "../core.h" 34 34 #include "pinctrl-samsung.h" 35 35 36 - #define GROUP_SUFFIX "-grp" 37 - #define GSUFFIX_LEN sizeof(GROUP_SUFFIX) 38 - #define FUNCTION_SUFFIX "-mux" 39 - #define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX) 40 - 41 36 /* list of all possible config options supported */ 42 37 static struct pin_config { 43 38 const char *property;
+1 -1
drivers/pinctrl/sh-pfc/sh_pfc.h
··· 224 224 225 225 /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ 226 226 #define _GP_GPIO(bank, _pin, _name, sfx) \ 227 - [(bank * 32) + _pin] = { \ 227 + { \ 228 228 .pin = (bank * 32) + _pin, \ 229 229 .name = __stringify(_name), \ 230 230 .enum_id = _name##_DATA, \
+1 -1
drivers/regulator/88pm800.c
··· 130 130 .owner = THIS_MODULE, \ 131 131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \ 132 132 .vsel_reg = PM800_##vreg##_VOUT, \ 133 - .vsel_mask = 0x1f, \ 133 + .vsel_mask = 0xf, \ 134 134 .enable_reg = PM800_##ereg, \ 135 135 .enable_mask = 1 << (ebit), \ 136 136 .volt_table = ldo_volt_table, \
+15 -4
drivers/regulator/core.c
··· 109 109 static struct regulator *create_regulator(struct regulator_dev *rdev, 110 110 struct device *dev, 111 111 const char *supply_name); 112 + static void _regulator_put(struct regulator *regulator); 112 113 113 114 static const char *rdev_get_name(struct regulator_dev *rdev) 114 115 { ··· 1106 1105 1107 1106 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); 1108 1107 1108 + if (!try_module_get(supply_rdev->owner)) 1109 + return -ENODEV; 1110 + 1109 1111 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); 1110 1112 if (rdev->supply == NULL) { 1111 1113 err = -ENOMEM; ··· 1385 1381 } 1386 1382 1387 1383 if (!r) { 1388 - dev_err(dev, "Failed to resolve %s-supply for %s\n", 1389 - rdev->supply_name, rdev->desc->name); 1390 - return -EPROBE_DEFER; 1384 + if (have_full_constraints()) { 1385 + r = dummy_regulator_rdev; 1386 + } else { 1387 + dev_err(dev, "Failed to resolve %s-supply for %s\n", 1388 + rdev->supply_name, rdev->desc->name); 1389 + return -EPROBE_DEFER; 1390 + } 1391 1391 } 1392 1392 1393 1393 /* Recursively resolve the supply of the supply */ ··· 1406 1398 /* Cascade always-on state to supply */ 1407 1399 if (_regulator_is_enabled(rdev)) { 1408 1400 ret = regulator_enable(rdev->supply); 1409 - if (ret < 0) 1401 + if (ret < 0) { 1402 + if (rdev->supply) 1403 + _regulator_put(rdev->supply); 1410 1404 return ret; 1405 + } 1411 1406 } 1412 1407 1413 1408 return 0;
+1 -1
drivers/regulator/max8973-regulator.c
··· 450 450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; 451 451 452 452 if (of_property_read_bool(np, "maxim,enable-bias-control")) 453 - pdata->control_flags |= MAX8973_BIAS_ENABLE; 453 + pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE; 454 454 455 455 return pdata; 456 456 }
+10 -4
drivers/regulator/s2mps11.c
··· 34 34 #include <linux/mfd/samsung/s2mps14.h> 35 35 #include <linux/mfd/samsung/s2mpu02.h> 36 36 37 + /* The highest number of possible regulators for supported devices. */ 38 + #define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX 37 39 struct s2mps11_info { 38 40 unsigned int rdev_num; 39 41 int ramp_delay2; ··· 51 49 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether 52 50 * the suspend mode was enabled. 53 51 */ 54 - unsigned long long s2mps14_suspend_state:50; 52 + DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX); 55 53 56 54 /* Array of size rdev_num with GPIO-s for external sleep control */ 57 55 int *ext_control_gpio; ··· 502 500 switch (s2mps11->dev_type) { 503 501 case S2MPS13X: 504 502 case S2MPS14X: 505 - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 503 + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) 506 504 val = S2MPS14_ENABLE_SUSPEND; 507 505 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) 508 506 val = S2MPS14_ENABLE_EXT_CONTROL; ··· 510 508 val = rdev->desc->enable_mask; 511 509 break; 512 510 case S2MPU02: 513 - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 511 + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) 514 512 val = S2MPU02_ENABLE_SUSPEND; 515 513 else 516 514 val = rdev->desc->enable_mask; ··· 564 562 if (ret < 0) 565 563 return ret; 566 564 567 - s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); 565 + set_bit(rdev_get_id(rdev), s2mps11->suspend_state); 568 566 /* 569 567 * Don't enable suspend mode if regulator is already disabled because 570 568 * this would effectively for a short time turn on the regulator after ··· 962 960 case S2MPS11X: 963 961 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); 964 962 regulators = s2mps11_regulators; 963 + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); 965 964 break; 966 965 case S2MPS13X: 967 966 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); 968 967 regulators = s2mps13_regulators; 968 + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); 969 969 break; 970 970 case S2MPS14X: 971 971 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); 972 972 regulators = s2mps14_regulators; 973 + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); 973 974 break; 974 975 case S2MPU02: 975 976 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); 976 977 regulators = s2mpu02_regulators; 978 + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); 977 979 break; 978 980 default: 979 981 dev_err(&pdev->dev, "Invalid device type: %u\n",
+1 -1
drivers/s390/Makefile
··· 2 2 # Makefile for the S/390 specific device drivers 3 3 # 4 4 5 - obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ 5 + obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/ 6 6 7 7 drivers-y += drivers/s390/built-in.o 8 8
drivers/s390/kvm/Makefile drivers/s390/virtio/Makefile
drivers/s390/kvm/kvm_virtio.c drivers/s390/virtio/kvm_virtio.c
drivers/s390/kvm/virtio_ccw.c drivers/s390/virtio/virtio_ccw.c
+3 -1
drivers/scsi/virtio_scsi.c
··· 949 949 { 950 950 struct Scsi_Host *shost; 951 951 struct virtio_scsi *vscsi; 952 - int err, host_prot; 952 + int err; 953 953 u32 sg_elems, num_targets; 954 954 u32 cmd_per_lun; 955 955 u32 num_queues; ··· 1009 1009 1010 1010 #ifdef CONFIG_BLK_DEV_INTEGRITY 1011 1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 1012 + int host_prot; 1013 + 1012 1014 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 1013 1015 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 1014 1016 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+1 -1
drivers/spi/Kconfig
··· 612 612 613 613 config SPI_ZYNQMP_GQSPI 614 614 tristate "Xilinx ZynqMP GQSPI controller" 615 - depends on SPI_MASTER 615 + depends on SPI_MASTER && HAS_DMA 616 616 help 617 617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. 618 618
+2
drivers/spi/spi-img-spfi.c
··· 40 40 #define SPFI_CONTROL_SOFT_RESET BIT(11) 41 41 #define SPFI_CONTROL_SEND_DMA BIT(10) 42 42 #define SPFI_CONTROL_GET_DMA BIT(9) 43 + #define SPFI_CONTROL_SE BIT(8) 43 44 #define SPFI_CONTROL_TMODE_SHIFT 5 44 45 #define SPFI_CONTROL_TMODE_MASK 0x7 45 46 #define SPFI_CONTROL_TMODE_SINGLE 0 ··· 492 491 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 493 492 xfer->rx_nbits == SPI_NBITS_QUAD) 494 493 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 494 + val |= SPFI_CONTROL_SE; 495 495 spfi_writel(spfi, val, SPFI_CONTROL); 496 496 } 497 497
+3 -2
drivers/spi/spi-imx.c
··· 201 201 { 202 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 203 203 204 - if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 205 - && (transfer->len > spi_imx->tx_wml)) 204 + if (spi_imx->dma_is_inited 205 + && transfer->len > spi_imx->rx_wml * sizeof(u32) 206 + && transfer->len > spi_imx->tx_wml * sizeof(u32)) 206 207 return true; 207 208 return false; 208 209 }
+1
drivers/spi/spi-zynqmp-gqspi.c
··· 214 214 case GQSPI_SELECT_FLASH_CS_BOTH: 215 215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | 216 216 GQSPI_GENFIFO_CS_UPPER; 217 + break; 217 218 case GQSPI_SELECT_FLASH_CS_UPPER: 218 219 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; 219 220 break;
+1
drivers/spi/spidev.c
··· 693 693 #ifdef CONFIG_OF 694 694 static const struct of_device_id spidev_dt_ids[] = { 695 695 { .compatible = "rohm,dh2228fv" }, 696 + { .compatible = "lineartechnology,ltc2488" }, 696 697 {}, 697 698 }; 698 699 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+13 -3
drivers/tty/n_tty.c
··· 1108 1108 * Locking: ctrl_lock 1109 1109 */ 1110 1110 1111 - static void isig(int sig, struct tty_struct *tty) 1111 + static void __isig(int sig, struct tty_struct *tty) 1112 1112 { 1113 - struct n_tty_data *ldata = tty->disc_data; 1114 1113 struct pid *tty_pgrp = tty_get_pgrp(tty); 1115 1114 if (tty_pgrp) { 1116 1115 kill_pgrp(tty_pgrp, sig, 1); 1117 1116 put_pid(tty_pgrp); 1118 1117 } 1118 + } 1119 1119 1120 - if (!L_NOFLSH(tty)) { 1120 + static void isig(int sig, struct tty_struct *tty) 1121 + { 1122 + struct n_tty_data *ldata = tty->disc_data; 1123 + 1124 + if (L_NOFLSH(tty)) { 1125 + /* signal only */ 1126 + __isig(sig, tty); 1127 + 1128 + } else { /* signal and flush */ 1121 1129 up_read(&tty->termios_rwsem); 1122 1130 down_write(&tty->termios_rwsem); 1131 + 1132 + __isig(sig, tty); 1123 1133 1124 1134 /* clear echo buffer */ 1125 1135 mutex_lock(&ldata->output_lock);
+1 -1
drivers/tty/serial/Kconfig
··· 1185 1185 config SERIAL_SC16IS7XX 1186 1186 tristate "SC16IS7xx serial support" 1187 1187 select SERIAL_CORE 1188 - depends on I2C || SPI_MASTER 1188 + depends on (SPI_MASTER && !I2C) || I2C 1189 1189 help 1190 1190 This selects support for SC16IS7xx serial ports. 1191 1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
+2 -2
drivers/tty/serial/amba-pl011.c
··· 2310 2310 void __iomem *base; 2311 2311 2312 2312 base = devm_ioremap_resource(dev, mmiobase); 2313 - if (!base) 2314 - return -ENOMEM; 2313 + if (IS_ERR(base)) 2314 + return PTR_ERR(base); 2315 2315 2316 2316 index = pl011_probe_dt_alias(index, dev); 2317 2317
+1 -1
drivers/tty/serial/etraxfs-uart.c
··· 950 950 951 951 port = platform_get_drvdata(pdev); 952 952 uart_remove_one_port(&etraxfs_uart_driver, port); 953 - etraxfs_uart_ports[pdev->id] = NULL; 953 + etraxfs_uart_ports[port->line] = NULL; 954 954 955 955 return 0; 956 956 }
+7 -8
drivers/tty/serial/imx.c
··· 1121 1121 1122 1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 1123 1123 1124 - /* Can we enable the DMA support? */ 1125 - if (is_imx6q_uart(sport) && !uart_console(port) && 1126 - !sport->dma_is_inited) 1127 - imx_uart_dma_init(sport); 1128 - 1129 1124 spin_lock_irqsave(&sport->port.lock, flags); 1130 1125 /* Reset fifo's and state machines */ 1131 1126 i = 100; ··· 1137 1142 */ 1138 1143 writel(USR1_RTSD, sport->port.membase + USR1); 1139 1144 writel(USR2_ORE, sport->port.membase + USR2); 1140 - 1141 - if (sport->dma_is_inited && !sport->dma_is_enabled) 1142 - imx_enable_dma(sport); 1143 1145 1144 1146 temp = readl(sport->port.membase + UCR1); 1145 1147 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; ··· 1308 1316 } else { 1309 1317 ucr2 |= UCR2_CTSC; 1310 1318 } 1319 + 1320 + /* Can we enable the DMA support? */ 1321 + if (is_imx6q_uart(sport) && !uart_console(port) 1322 + && !sport->dma_is_inited) 1323 + imx_uart_dma_init(sport); 1311 1324 } else { 1312 1325 termios->c_cflag &= ~CRTSCTS; 1313 1326 } ··· 1429 1432 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1430 1433 imx_enable_ms(&sport->port); 1431 1434 1435 + if (sport->dma_is_inited && !sport->dma_is_enabled) 1436 + imx_enable_dma(sport); 1432 1437 spin_unlock_irqrestore(&sport->port.lock, flags); 1433 1438 } 1434 1439
+23 -7
drivers/tty/serial/sc16is7xx.c
··· 354 354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val); 355 355 } 356 356 357 + static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen) 358 + { 359 + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); 360 + u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line; 361 + 362 + regcache_cache_bypass(s->regmap, true); 363 + regmap_raw_read(s->regmap, addr, s->buf, rxlen); 364 + regcache_cache_bypass(s->regmap, false); 365 + } 366 + 367 + static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send) 368 + { 369 + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); 370 + u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line; 371 + 372 + regcache_cache_bypass(s->regmap, true); 373 + regmap_raw_write(s->regmap, addr, s->buf, to_send); 374 + regcache_cache_bypass(s->regmap, false); 375 + } 376 + 357 377 static void sc16is7xx_port_update(struct uart_port *port, u8 reg, 358 378 u8 mask, u8 val) 359 379 { ··· 528 508 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 529 509 bytes_read = 1; 530 510 } else { 531 - regcache_cache_bypass(s->regmap, true); 532 - regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG, 533 - s->buf, rxlen); 534 - regcache_cache_bypass(s->regmap, false); 511 + sc16is7xx_fifo_read(port, rxlen); 535 512 bytes_read = rxlen; 536 513 } 537 514 ··· 608 591 s->buf[i] = xmit->buf[xmit->tail]; 609 592 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 610 593 } 611 - regcache_cache_bypass(s->regmap, true); 612 - regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); 613 - regcache_cache_bypass(s->regmap, false); 594 + 595 + sc16is7xx_fifo_write(port, to_send); 614 596 } 615 597 616 598 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+2 -1
drivers/tty/serial/serial_core.c
··· 1418 1418 mutex_lock(&port->mutex); 1419 1419 uart_shutdown(tty, state); 1420 1420 tty_port_tty_set(port, NULL); 1421 - tty->closing = 0; 1421 + 1422 1422 spin_lock_irqsave(&port->lock, flags); 1423 1423 1424 1424 if (port->blocked_open) { ··· 1444 1444 mutex_unlock(&port->mutex); 1445 1445 1446 1446 tty_ldisc_flush(tty); 1447 + tty->closing = 0; 1447 1448 } 1448 1449 1449 1450 static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
+1
drivers/tty/vt/selection.c
··· 356 356 schedule(); 357 357 continue; 358 358 } 359 + __set_current_state(TASK_RUNNING); 359 360 count = sel_buffer_lth - pasted; 360 361 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, 361 362 count);
+2
drivers/tty/vt/vt.c
··· 742 742 __module_get(vc->vc_sw->owner); 743 743 vc->vc_num = num; 744 744 vc->vc_display_fg = &master_display_fg; 745 + if (vc->vc_uni_pagedir_loc) 746 + con_free_unimap(vc); 745 747 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; 746 748 vc->vc_uni_pagedir = NULL; 747 749 vc->vc_hi_font_mask = 0;
+1
drivers/usb/class/cdc-acm.c
··· 1944 1944 usb_deregister(&acm_driver); 1945 1945 tty_unregister_driver(acm_tty_driver); 1946 1946 put_tty_driver(acm_tty_driver); 1947 + idr_destroy(&acm_minors); 1947 1948 } 1948 1949 1949 1950 module_init(acm_init);
+1 -1
drivers/usb/common/ulpi.c
··· 242 242 { 243 243 return bus_register(&ulpi_bus); 244 244 } 245 - module_init(ulpi_init); 245 + subsys_initcall(ulpi_init); 246 246 247 247 static void __exit ulpi_exit(void) 248 248 {
+5 -2
drivers/usb/core/hcd.c
··· 1022 1022 dev_name(&usb_dev->dev), retval); 1023 1023 return (retval < 0) ? retval : -EMSGSIZE; 1024 1024 } 1025 - if (usb_dev->speed == USB_SPEED_SUPER) { 1025 + 1026 + if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) { 1026 1027 retval = usb_get_bos_descriptor(usb_dev); 1027 - if (retval < 0) { 1028 + if (!retval) { 1029 + usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); 1030 + } else if (usb_dev->speed == USB_SPEED_SUPER) { 1028 1031 mutex_unlock(&usb_bus_list_lock); 1029 1032 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", 1030 1033 dev_name(&usb_dev->dev), retval);
+1 -1
drivers/usb/core/hub.c
··· 122 122 return usb_get_intfdata(hdev->actconfig->interface[0]); 123 123 } 124 124 125 - static int usb_device_supports_lpm(struct usb_device *udev) 125 + int usb_device_supports_lpm(struct usb_device *udev) 126 126 { 127 127 /* USB 2.1 (and greater) devices indicate LPM support through 128 128 * their USB 2.0 Extended Capabilities BOS descriptor.
+1
drivers/usb/core/usb.h
··· 65 65 extern void usb_hub_cleanup(void); 66 66 extern int usb_major_init(void); 67 67 extern void usb_major_cleanup(void); 68 + extern int usb_device_supports_lpm(struct usb_device *udev); 68 69 69 70 #ifdef CONFIG_PM 70 71
+4
drivers/usb/dwc3/ep0.c
··· 727 727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 728 728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 729 729 break; 730 + case USB_REQ_SET_INTERFACE: 731 + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE"); 732 + dwc->start_config_issued = false; 733 + /* Fall through */ 730 734 default: 731 735 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 732 736 ret = dwc3_ep0_delegate_req(dwc, ctrl);
+1 -1
drivers/usb/gadget/udc/mv_udc_core.c
··· 2167 2167 return -ENODEV; 2168 2168 } 2169 2169 2170 - udc->phy_regs = ioremap(r->start, resource_size(r)); 2170 + udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); 2171 2171 if (udc->phy_regs == NULL) { 2172 2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n"); 2173 2173 return -EBUSY;
+8 -6
drivers/usb/gadget/udc/udc-core.c
··· 60 60 int usb_gadget_map_request(struct usb_gadget *gadget, 61 61 struct usb_request *req, int is_in) 62 62 { 63 + struct device *dev = gadget->dev.parent; 64 + 63 65 if (req->length == 0) 64 66 return 0; 65 67 66 68 if (req->num_sgs) { 67 69 int mapped; 68 70 69 - mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, 71 + mapped = dma_map_sg(dev, req->sg, req->num_sgs, 70 72 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 71 73 if (mapped == 0) { 72 74 dev_err(&gadget->dev, "failed to map SGs\n"); ··· 77 75 78 76 req->num_mapped_sgs = mapped; 79 77 } else { 80 - req->dma = dma_map_single(&gadget->dev, req->buf, req->length, 78 + req->dma = dma_map_single(dev, req->buf, req->length, 81 79 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 82 80 83 - if (dma_mapping_error(&gadget->dev, req->dma)) { 84 - dev_err(&gadget->dev, "failed to map buffer\n"); 81 + if (dma_mapping_error(dev, req->dma)) { 82 + dev_err(dev, "failed to map buffer\n"); 85 83 return -EFAULT; 86 84 } 87 85 } ··· 97 95 return; 98 96 99 97 if (req->num_mapped_sgs) { 100 - dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, 98 + dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs, 101 99 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 102 100 103 101 req->num_mapped_sgs = 0; 104 102 } else { 105 - dma_unmap_single(&gadget->dev, req->dma, req->length, 103 + dma_unmap_single(gadget->dev.parent, req->dma, req->length, 106 104 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 107 105 } 108 106 }
+1 -6
drivers/usb/host/ohci-q.c
··· 981 981 int completed, modified; 982 982 __hc32 *prev; 983 983 984 - /* Is this ED already invisible to the hardware? */ 985 - if (ed->state == ED_IDLE) 986 - goto ed_idle; 987 - 988 984 /* only take off EDs that the HC isn't using, accounting for 989 985 * frame counter wraps and EDs with partially retired TDs 990 986 */ ··· 1008 1012 } 1009 1013 1010 1014 /* ED's now officially unlinked, hc doesn't see */ 1011 - ed->state = ED_IDLE; 1012 1015 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); 1013 1016 ed->hwNextED = 0; 1014 1017 wmb(); 1015 1018 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); 1016 - ed_idle: 1017 1019 1018 1020 /* reentrancy: if we drop the schedule lock, someone might 1019 1021 * have modified this list. normally it's just prepending ··· 1082 1088 if (list_empty(&ed->td_list)) { 1083 1089 *last = ed->ed_next; 1084 1090 ed->ed_next = NULL; 1091 + ed->state = ED_IDLE; 1085 1092 list_del(&ed->in_use_list); 1086 1093 } else if (ohci->rh_state == OHCI_RH_RUNNING) { 1087 1094 *last = ed->ed_next;
+1 -1
drivers/usb/host/ohci-tmio.c
··· 58 58 #define CCR_PM_CKRNEN 0x0002 59 59 #define CCR_PM_USBPW1 0x0004 60 60 #define CCR_PM_USBPW2 0x0008 61 - #define CCR_PM_USBPW3 0x0008 61 + #define CCR_PM_USBPW3 0x0010 62 62 #define CCR_PM_PMEE 0x0100 63 63 #define CCR_PM_PMES 0x8000 64 64
+16 -6
drivers/usb/host/xhci-hub.c
··· 484 484 u32 pls = status_reg & PORT_PLS_MASK; 485 485 486 486 /* resume state is a xHCI internal state. 487 - * Do not report it to usb core. 487 + * Do not report it to usb core, instead, pretend to be U3, 488 + * thus usb core knows it's not ready for transfer 488 489 */ 489 - if (pls == XDEV_RESUME) 490 + if (pls == XDEV_RESUME) { 491 + *status |= USB_SS_PORT_LS_U3; 490 492 return; 493 + } 491 494 492 495 /* When the CAS bit is set then warm reset 493 496 * should be performed on port ··· 591 588 status |= USB_PORT_STAT_C_RESET << 16; 592 589 /* USB3.0 only */ 593 590 if (hcd->speed == HCD_USB3) { 594 - if ((raw_port_status & PORT_PLC)) 591 + /* Port link change with port in resume state should not be 592 + * reported to usbcore, as this is an internal state to be 593 + * handled by xhci driver. Reporting PLC to usbcore may 594 + * cause usbcore clearing PLC first and port change event 595 + * irq won't be generated. 596 + */ 597 + if ((raw_port_status & PORT_PLC) && 598 + (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) 595 599 status |= USB_PORT_STAT_C_LINK_STATE << 16; 596 600 if ((raw_port_status & PORT_WRC)) 597 601 status |= USB_PORT_STAT_C_BH_RESET << 16; ··· 1130 1120 spin_lock_irqsave(&xhci->lock, flags); 1131 1121 1132 1122 if (hcd->self.root_hub->do_remote_wakeup) { 1133 - if (bus_state->resuming_ports) { 1123 + if (bus_state->resuming_ports || /* USB2 */ 1124 + bus_state->port_remote_wakeup) { /* USB3 */ 1134 1125 spin_unlock_irqrestore(&xhci->lock, flags); 1135 - xhci_dbg(xhci, "suspend failed because " 1136 - "a port is resuming\n"); 1126 + xhci_dbg(xhci, "suspend failed because a port is resuming\n"); 1137 1127 return -EBUSY; 1138 1128 } 1139 1129 }
+1 -1
drivers/usb/host/xhci-mem.c
··· 1427 1427 /* Attempt to use the ring cache */ 1428 1428 if (virt_dev->num_rings_cached == 0) 1429 1429 return -ENOMEM; 1430 + virt_dev->num_rings_cached--; 1430 1431 virt_dev->eps[ep_index].new_ring = 1431 1432 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1432 1433 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1433 - virt_dev->num_rings_cached--; 1434 1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1435 1435 1, type); 1436 1436 }
+54 -3
drivers/usb/host/xhci-pci.c
··· 23 23 #include <linux/pci.h> 24 24 #include <linux/slab.h> 25 25 #include <linux/module.h> 26 + #include <linux/acpi.h> 26 27 27 28 #include "xhci.h" 28 29 #include "xhci-trace.h" 30 + 31 + #define PORT2_SSIC_CONFIG_REG2 0x883c 32 + #define PROG_DONE (1 << 30) 33 + #define SSIC_PORT_UNUSED (1 << 31) 29 34 30 35 /* Device for a quirk */ 31 36 #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 ··· 181 176 } 182 177 183 178 /* 179 + * In some Intel xHCI controllers, in order to get D3 working, 180 + * through a vendor specific SSIC CONFIG register at offset 0x883c, 181 + * SSIC PORT need to be marked as "unused" before putting xHCI 182 + * into D3. After D3 exit, the SSIC port need to be marked as "used". 183 + * Without this change, xHCI might not enter D3 state. 184 184 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear 185 185 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 186 186 */ 187 - static void xhci_pme_quirk(struct xhci_hcd *xhci) 187 + static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) 188 188 { 189 + struct xhci_hcd *xhci = hcd_to_xhci(hcd); 190 + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 189 191 u32 val; 190 192 void __iomem *reg; 193 + 194 + if (pdev->vendor == PCI_VENDOR_ID_INTEL && 195 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { 196 + 197 + reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; 198 + 199 + /* Notify SSIC that SSIC profile programming is not done */ 200 + val = readl(reg) & ~PROG_DONE; 201 + writel(val, reg); 202 + 203 + /* Mark SSIC port as unused(suspend) or used(resume) */ 204 + val = readl(reg); 205 + if (suspend) 206 + val |= SSIC_PORT_UNUSED; 207 + else 208 + val &= ~SSIC_PORT_UNUSED; 209 + writel(val, reg); 210 + 211 + /* Notify SSIC that SSIC profile programming is done */ 212 + val = readl(reg) | PROG_DONE; 213 + writel(val, reg); 214 + readl(reg); 215 + } 191 216 192 217 reg = (void __iomem *) xhci->cap_regs + 0x80a4; 193 218 val = readl(reg); 194 219 writel(val | BIT(28), reg); 195 220 readl(reg); 196 221 } 222 + 223 + #ifdef CONFIG_ACPI 224 + static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) 225 + { 226 + static const u8 intel_dsm_uuid[] = { 227 + 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, 228 + 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, 229 + }; 230 + acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); 231 + } 232 + #else 233 + static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } 234 + #endif /* CONFIG_ACPI */ 197 235 198 236 /* called during probe() after chip reset completes */ 199 237 static int xhci_pci_setup(struct usb_hcd *hcd) ··· 311 263 HCC_MAX_PSA(xhci->hcc_params) >= 4) 312 264 xhci->shared_hcd->can_do_streams = 1; 313 265 266 + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 267 + xhci_pme_acpi_rtd3_enable(dev); 268 + 314 269 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ 315 270 pm_runtime_put_noidle(&dev->dev); 316 271 ··· 358 307 pdev->no_d3cold = true; 359 308 360 309 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 361 - xhci_pme_quirk(xhci); 310 + xhci_pme_quirk(hcd, true); 362 311 363 312 return xhci_suspend(xhci, do_wakeup); 364 313 } ··· 391 340 usb_enable_intel_xhci_ports(pdev); 392 341 393 342 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 394 - xhci_pme_quirk(xhci); 343 + xhci_pme_quirk(hcd, false); 395 344 396 345 retval = xhci_resume(xhci, hibernated); 397 346 return retval;
+3
drivers/usb/host/xhci-ring.c
··· 1546 1546 usb_hcd_resume_root_hub(hcd); 1547 1547 } 1548 1548 1549 + if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) 1550 + bus_state->port_remote_wakeup &= ~(1 << faked_port_index); 1551 + 1549 1552 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1550 1553 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1551 1554
+3
drivers/usb/host/xhci.c
··· 3453 3453 return -EINVAL; 3454 3454 } 3455 3455 3456 + if (virt_dev->tt_info) 3457 + old_active_eps = virt_dev->tt_info->active_eps; 3458 + 3456 3459 if (virt_dev->udev != udev) { 3457 3460 /* If the virt_dev and the udev does not match, this virt_dev 3458 3461 * may belong to another udev.
+1
drivers/usb/host/xhci.h
··· 285 285 #define XDEV_U0 (0x0 << 5) 286 286 #define XDEV_U2 (0x2 << 5) 287 287 #define XDEV_U3 (0x3 << 5) 288 + #define XDEV_INACTIVE (0x6 << 5) 288 289 #define XDEV_RESUME (0xf << 5) 289 290 /* true: port has power (see HCC_PPC) */ 290 291 #define PORT_POWER (1 << 9)
+23
drivers/usb/storage/unusual_devs.h
··· 2065 2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2066 2066 US_FL_NO_READ_DISC_INFO ), 2067 2067 2068 + /* Reported by Oliver Neukum <oneukum@suse.com> 2069 + * This device morphes spontaneously into another device if the access 2070 + * pattern of Windows isn't followed. Thus writable media would be dirty 2071 + * if the initial instance is used. So the device is limited to its 2072 + * virtual CD. 2073 + * And yes, the concept that BCD goes up to 9 is not heeded */ 2074 + UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff, 2075 + "ZTE,Incorporated", 2076 + "ZTE WCDMA Technologies MSM", 2077 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2078 + US_FL_SINGLE_LUN ), 2079 + 2068 2080 /* Reported by Sven Geggus <sven-usbst@geggus.net> 2069 2081 * This encrypted pen drive returns bogus data for the initial READ(10). 2070 2082 */ ··· 2085 2073 "Padlock v2", 2086 2074 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2087 2075 US_FL_INITIAL_READ10 ), 2076 + 2077 + /* Reported by Hans de Goede <hdegoede@redhat.com> 2078 + * These are mini projectors using USB for both power and video data transport 2079 + * The usb-storage interface is a virtual windows driver CD, which the gm12u320 2080 + * driver automatically converts into framebuffer & kms dri device nodes. 2081 + */ 2082 + UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff, 2083 + "Grain-media Technology Corp.", 2084 + "USB3.0 Device GM12U320", 2085 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2086 + US_FL_IGNORE_DEVICE ), 2088 2087 2089 2088 /* Patch by Richard Schütz <r.schtz@t-online.de> 2090 2089 * This external hard drive enclosure uses a JMicron chip which
+51 -16
drivers/vhost/vhost.c
··· 22 22 #include <linux/file.h> 23 23 #include <linux/highmem.h> 24 24 #include <linux/slab.h> 25 + #include <linux/vmalloc.h> 25 26 #include <linux/kthread.h> 26 27 #include <linux/cgroup.h> 27 28 #include <linux/module.h> 29 + #include <linux/sort.h> 28 30 29 31 #include "vhost.h" 30 32 33 + static ushort max_mem_regions = 64; 34 + module_param(max_mem_regions, ushort, 0444); 35 + MODULE_PARM_DESC(max_mem_regions, 36 + "Maximum number of memory regions in memory map. (default: 64)"); 37 + 31 38 enum { 32 - VHOST_MEMORY_MAX_NREGIONS = 64, 33 39 VHOST_MEMORY_F_LOG = 0x1, 34 40 }; 35 41 ··· 549 543 fput(dev->log_file); 550 544 dev->log_file = NULL; 551 545 /* No one will access memory at this point */ 552 - kfree(dev->memory); 546 + kvfree(dev->memory); 553 547 dev->memory = NULL; 554 548 WARN_ON(!list_empty(&dev->work_list)); 555 549 if (dev->worker) { ··· 669 663 } 670 664 EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 671 665 666 + static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2) 667 + { 668 + const struct vhost_memory_region *r1 = p1, *r2 = p2; 669 + if (r1->guest_phys_addr < r2->guest_phys_addr) 670 + return 1; 671 + if (r1->guest_phys_addr > r2->guest_phys_addr) 672 + return -1; 673 + return 0; 674 + } 675 + 676 + static void *vhost_kvzalloc(unsigned long size) 677 + { 678 + void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 679 + 680 + if (!n) { 681 + n = vzalloc(size); 682 + if (!n) 683 + return ERR_PTR(-ENOMEM); 684 + } 685 + return n; 686 + } 687 + 672 688 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 673 689 { 674 690 struct vhost_memory mem, *newmem, *oldmem; ··· 701 673 return -EFAULT; 702 674 if (mem.padding) 703 675 return -EOPNOTSUPP; 704 - if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) 676 + if (mem.nregions > max_mem_regions) 705 677 return -E2BIG; 706 - newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); 678 + newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); 707 679 if (!newmem) 708 680 return -ENOMEM; 709 681 710 682 memcpy(newmem, &mem, size); 711 683 if (copy_from_user(newmem->regions, m->regions, 712 684 mem.nregions * sizeof *m->regions)) { 713 - kfree(newmem); 685 + kvfree(newmem); 714 686 return -EFAULT; 715 687 } 688 + sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions), 689 + vhost_memory_reg_sort_cmp, NULL); 716 690 717 691 if (!memory_access_ok(d, newmem, 0)) { 718 - kfree(newmem); 692 + kvfree(newmem); 719 693 return -EFAULT; 720 694 } 721 695 oldmem = d->memory; ··· 729 699 d->vqs[i]->memory = newmem; 730 700 mutex_unlock(&d->vqs[i]->mutex); 731 701 } 732 - kfree(oldmem); 702 + kvfree(oldmem); 733 703 return 0; 734 704 } 735 705 ··· 1022 992 static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 1023 993 __u64 addr, __u32 len) 1024 994 { 1025 - struct vhost_memory_region *reg; 1026 - int i; 995 + const struct vhost_memory_region *reg; 996 + int start = 0, end = mem->nregions; 1027 997 1028 - /* linear search is not brilliant, but we really have on the order of 6 1029 - * regions in practice */ 1030 - for (i = 0; i < mem->nregions; ++i) { 1031 - reg = mem->regions + i; 1032 - if (reg->guest_phys_addr <= addr && 1033 - reg->guest_phys_addr + reg->memory_size - 1 >= addr) 1034 - return reg; 998 + while (start < end) { 999 + int slot = start + (end - start) / 2; 1000 + reg = mem->regions + slot; 1001 + if (addr >= reg->guest_phys_addr) 1002 + end = slot; 1003 + else 1004 + start = slot + 1; 1035 1005 } 1006 + 1007 + reg = mem->regions + start; 1008 + if (addr >= reg->guest_phys_addr && 1009 + reg->guest_phys_addr + reg->memory_size > addr) 1010 + return reg; 1036 1011 return NULL; 1037 1012 } 1038 1013
+1
fs/fs-writeback.c
··· 702 702 else 703 703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 704 704 } 705 + EXPORT_SYMBOL_GPL(wbc_account_io); 705 706 706 707 /** 707 708 * inode_congested - test whether an inode is congested
+33 -9
fs/namespace.c
··· 1361 1361 UMOUNT_PROPAGATE = 2, 1362 1362 UMOUNT_CONNECTED = 4, 1363 1363 }; 1364 + 1365 + static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) 1366 + { 1367 + /* Leaving mounts connected is only valid for lazy umounts */ 1368 + if (how & UMOUNT_SYNC) 1369 + return true; 1370 + 1371 + /* A mount without a parent has nothing to be connected to */ 1372 + if (!mnt_has_parent(mnt)) 1373 + return true; 1374 + 1375 + /* Because the reference counting rules change when mounts are 1376 + * unmounted and connected, umounted mounts may not be 1377 + * connected to mounted mounts. 1378 + */ 1379 + if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) 1380 + return true; 1381 + 1382 + /* Has it been requested that the mount remain connected? */ 1383 + if (how & UMOUNT_CONNECTED) 1384 + return false; 1385 + 1386 + /* Is the mount locked such that it needs to remain connected? */ 1387 + if (IS_MNT_LOCKED(mnt)) 1388 + return false; 1389 + 1390 + /* By default disconnect the mount */ 1391 + return true; 1392 + } 1393 + 1364 1394 /* 1365 1395 * mount_lock must be held 1366 1396 * namespace_sem must be held for write ··· 1428 1398 if (how & UMOUNT_SYNC) 1429 1399 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1430 1400 1431 - disconnect = !(((how & UMOUNT_CONNECTED) && 1432 - mnt_has_parent(p) && 1433 - (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || 1434 - IS_MNT_LOCKED_AND_LAZY(p)); 1401 + disconnect = disconnect_mount(p, how); 1435 1402 1436 1403 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, 1437 1404 disconnect ? &unmounted : NULL); ··· 1565 1538 while (!hlist_empty(&mp->m_list)) { 1566 1539 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1567 1540 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1568 - struct mount *p, *tmp; 1569 - list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1570 - hlist_add_head(&p->mnt_umount.s_list, &unmounted); 1571 - umount_mnt(p); 1572 - } 1541 + hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); 1542 + umount_mnt(mnt); 1573 1543 } 1574 1544 else umount_tree(mnt, UMOUNT_CONNECTED); 1575 1545 }
+20 -14
fs/notify/mark.c
··· 152 152 BUG(); 153 153 154 154 list_del_init(&mark->g_list); 155 + 155 156 spin_unlock(&mark->lock); 156 157 157 158 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) 158 159 iput(inode); 160 + /* release lock temporarily */ 161 + mutex_unlock(&group->mark_mutex); 159 162 160 163 spin_lock(&destroy_lock); 161 164 list_add(&mark->g_list, &destroy_list); 162 165 spin_unlock(&destroy_lock); 163 166 wake_up(&destroy_waitq); 167 + /* 168 + * We don't necessarily have a ref on mark from caller so the above destroy 169 + * may have actually freed it, unless this group provides a 'freeing_mark' 170 + * function which must be holding a reference. 171 + */ 172 + 173 + /* 174 + * Some groups like to know that marks are being freed. This is a 175 + * callback to the group function to let it know that this mark 176 + * is being freed. 177 + */ 178 + if (group->ops->freeing_mark) 179 + group->ops->freeing_mark(mark, group); 164 180 165 181 /* 166 182 * __fsnotify_update_child_dentry_flags(inode); ··· 191 175 */ 192 176 193 177 atomic_dec(&group->num_marks); 178 + 179 + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); 194 180 } 195 181 196 182 void fsnotify_destroy_mark(struct fsnotify_mark *mark, ··· 205 187 206 188 /* 207 189 * Destroy all marks in the given list. The marks must be already detached from 208 - * the original inode / vfsmount. Note that we can race with 209 - * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each 210 - * mark so they won't get freed from under us and nobody else touches our 211 - * free_list list_head. 190 + * the original inode / vfsmount. 212 191 */ 213 192 void fsnotify_destroy_marks(struct list_head *to_free) 214 193 { ··· 406 391 } 407 392 408 393 /* 409 - * Clear any marks in a group in which mark->flags & flags is true. 394 + * clear any marks in a group in which mark->flags & flags is true 410 395 */ 411 396 void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, 412 397 unsigned int flags) ··· 460 445 { 461 446 struct fsnotify_mark *mark, *next; 462 447 struct list_head private_destroy_list; 463 - struct fsnotify_group *group; 464 448 465 449 for (;;) { 466 450 spin_lock(&destroy_lock); ··· 471 457 472 458 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { 473 459 list_del_init(&mark->g_list); 474 - group = mark->group; 475 - /* 476 - * Some groups like to know that marks are being freed. 477 - * This is a callback to the group function to let it 478 - * know that this mark is being freed. 479 - */ 480 - if (group && group->ops->freeing_mark) 481 - group->ops->freeing_mark(mark, group); 482 460 fsnotify_put_mark(mark); 483 461 } 484 462
-2
fs/pnode.h
··· 20 20 #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) 21 21 #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) 22 22 #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) 23 - #define IS_MNT_LOCKED_AND_LAZY(m) \ 24 - (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED) 25 23 26 24 #define CL_EXPIRE 0x01 27 25 #define CL_SLAVE 0x02
+7 -12
fs/udf/inode.c
··· 1652 1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize - 1653 1653 sizeof(struct unallocSpaceEntry)); 1654 1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); 1655 - use->descTag.tagLocation = 1656 - cpu_to_le32(iinfo->i_location.logicalBlockNum); 1657 - crclen = sizeof(struct unallocSpaceEntry) + 1658 - iinfo->i_lenAlloc - sizeof(struct tag); 1659 - use->descTag.descCRCLength = cpu_to_le16(crclen); 1660 - use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + 1661 - sizeof(struct tag), 1662 - crclen)); 1663 - use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); 1655 + crclen = sizeof(struct unallocSpaceEntry); 1664 1656 1665 - goto out; 1657 + goto finish; 1666 1658 } 1667 1659 1668 1660 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) ··· 1774 1782 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1775 1783 crclen = sizeof(struct extendedFileEntry); 1776 1784 } 1785 + 1786 + finish: 1777 1787 if (iinfo->i_strat4096) { 1778 1788 fe->icbTag.strategyType = cpu_to_le16(4096); 1779 1789 fe->icbTag.strategyParameter = cpu_to_le16(1); ··· 1785 1791 fe->icbTag.numEntries = cpu_to_le16(1); 1786 1792 } 1787 1793 1788 - if (S_ISDIR(inode->i_mode)) 1794 + if (iinfo->i_use) 1795 + fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE; 1796 + else if (S_ISDIR(inode->i_mode)) 1789 1797 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; 1790 1798 else if (S_ISREG(inode->i_mode)) 1791 1799 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; ··· 1824 1828 crclen)); 1825 1829 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); 1826 1830 1827 - out: 1828 1831 set_buffer_uptodate(bh); 1829 1832 unlock_buffer(bh); 1830 1833
+1
include/linux/ata.h
··· 45 45 ATA_SECT_SIZE = 512, 46 46 ATA_MAX_SECTORS_128 = 128, 47 47 ATA_MAX_SECTORS = 256, 48 + ATA_MAX_SECTORS_1024 = 1024, 48 49 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ 49 50 ATA_MAX_SECTORS_TAPE = 65535, 50 51
+21 -1
include/linux/cper.h
··· 340 340 __u64 mm_reg_addr; 341 341 }; 342 342 343 - /* Memory Error Section */ 343 + /* Old Memory Error Section UEFI 2.1, 2.2 */ 344 + struct cper_sec_mem_err_old { 345 + __u64 validation_bits; 346 + __u64 error_status; 347 + __u64 physical_addr; 348 + __u64 physical_addr_mask; 349 + __u16 node; 350 + __u16 card; 351 + __u16 module; 352 + __u16 bank; 353 + __u16 device; 354 + __u16 row; 355 + __u16 column; 356 + __u16 bit_pos; 357 + __u64 requestor_id; 358 + __u64 responder_id; 359 + __u64 target_id; 360 + __u8 error_type; 361 + }; 362 + 363 + /* Memory Error Section UEFI >= 2.3 */ 344 364 struct cper_sec_mem_err { 345 365 __u64 validation_bits; 346 366 __u64 error_status;
+3
include/linux/ftrace.h
··· 116 116 * SAVE_REGS. If another ops with this flag set is already registered 117 117 * for any of the functions that this ops will be registered for, then 118 118 * this ops will fail to register or set_filter_ip. 119 + * PID - Is affected by set_ftrace_pid (allows filtering on those pids) 119 120 */ 120 121 enum { 121 122 FTRACE_OPS_FL_ENABLED = 1 << 0, ··· 133 132 FTRACE_OPS_FL_MODIFYING = 1 << 11, 134 133 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 135 134 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 135 + FTRACE_OPS_FL_PID = 1 << 14, 136 136 }; 137 137 138 138 #ifdef CONFIG_DYNAMIC_FTRACE ··· 161 159 struct ftrace_ops *next; 162 160 unsigned long flags; 163 161 void *private; 162 + ftrace_func_t saved_func; 164 163 int __percpu *disabled; 165 164 #ifdef CONFIG_DYNAMIC_FTRACE 166 165 int nr_trampolines;
+2
include/linux/libata.h
··· 431 431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 432 432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ 433 433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ 434 + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ 435 + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ 434 436 435 437 /* DMA mask for user DMA control: User visible values; DO NOT 436 438 renumber */
+5 -5
include/linux/mtd/nand.h
··· 178 178 /* Chip may not exist, so silence any errors in scan */ 179 179 #define NAND_SCAN_SILENT_NODEV 0x00040000 180 180 /* 181 - * This option could be defined by controller drivers to protect against 182 - * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers 183 - */ 184 - #define NAND_USE_BOUNCE_BUFFER 0x00080000 185 - /* 186 181 * Autodetect nand buswidth with readid/onfi. 187 182 * This suppose the driver will configure the hardware in 8 bits mode 188 183 * when calling nand_scan_ident, and update its configuration 189 184 * before calling nand_scan_tail. 190 185 */ 191 186 #define NAND_BUSWIDTH_AUTO 0x00080000 187 + /* 188 + * This option could be defined by controller drivers to protect against 189 + * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers 190 + */ 191 + #define NAND_USE_BOUNCE_BUFFER 0x00100000 192 192 193 193 /* Options set by nand scan */ 194 194 /* Nand scan has allocated controller struct */
-1
include/linux/platform_data/mmc-esdhc-imx.h
··· 43 43 enum wp_types wp_type; 44 44 enum cd_types cd_type; 45 45 int max_bus_width; 46 - unsigned int f_max; 47 46 bool support_vsel; 48 47 unsigned int delay_line; 49 48 };
+17
include/net/cfg80211.h
··· 4868 4868 struct cfg80211_chan_def *chandef, 4869 4869 enum nl80211_iftype iftype); 4870 4870 4871 + /** 4872 + * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation 4873 + * @wiphy: the wiphy 4874 + * @chandef: the channel definition 4875 + * @iftype: interface type 4876 + * 4877 + * Return: %true if there is no secondary channel or the secondary channel(s) 4878 + * can be used for beaconing (i.e. is not a radar channel etc.). This version 4879 + * also checks if IR-relaxation conditions apply, to allow beaconing under 4880 + * more permissive conditions. 4881 + * 4882 + * Requires the RTNL to be held. 4883 + */ 4884 + bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, 4885 + struct cfg80211_chan_def *chandef, 4886 + enum nl80211_iftype iftype); 4887 + 4871 4888 /* 4872 4889 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4873 4890 * @dev: the device which switched channels
+1
include/net/ip.h
··· 161 161 } 162 162 163 163 /* datagram.c */ 164 + int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 164 165 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165 166 166 167 void ip4_datagram_release_cb(struct sock *sk);
+2
include/uapi/drm/amdgpu_drm.h
··· 614 614 uint32_t vram_type; 615 615 /** video memory bit width*/ 616 616 uint32_t vram_bit_width; 617 + /* vce harvesting instance */ 618 + uint32_t vce_harvest_config; 617 619 }; 618 620 619 621 struct drm_amdgpu_info_hw_ip {
+8
include/uapi/drm/i915_drm.h
··· 1070 1070 __u64 offset; 1071 1071 __u64 val; /* Return value */ 1072 1072 }; 1073 + /* Known registers: 1074 + * 1075 + * Render engine timestamp - 0x2358 + 64bit - gen7+ 1076 + * - Note this register returns an invalid value if using the default 1077 + * single instruction 8byte read, in order to workaround that use 1078 + * offset (0x2538 | 1) instead. 1079 + * 1080 + */ 1073 1081 1074 1082 struct drm_i915_reset_stats { 1075 1083 __u32 ctx_id;
+16
include/uapi/linux/virtio_net.h
··· 34 34 /* The feature bitmap for virtio net */ 35 35 #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 36 36 #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 37 + #define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */ 37 38 #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 38 39 #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 39 40 #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ ··· 226 225 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 227 226 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 228 227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 228 + 229 + /* 230 + * Control network offloads 231 + * 232 + * Reconfigures the network offloads that Guest can handle. 233 + * 234 + * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit. 235 + * 236 + * Command data format matches the feature bit mask exactly. 237 + * 238 + * See VIRTIO_NET_F_GUEST_* for the list of offloads 239 + * that can be enabled/disabled. 240 + */ 241 + #define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 242 + #define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 229 243 230 244 #endif /* _LINUX_VIRTIO_NET_H */
+6
include/uapi/linux/virtio_pci.h
··· 157 157 __le32 queue_used_hi; /* read-write */ 158 158 }; 159 159 160 + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ 161 + struct virtio_pci_cfg_cap { 162 + struct virtio_pci_cap cap; 163 + __u8 pci_cfg_data[4]; /* Data for BAR access. */ 164 + }; 165 + 160 166 /* Macro versions of offsets for the Old Timers! */ 161 167 #define VIRTIO_PCI_CAP_VNDR 0 162 168 #define VIRTIO_PCI_CAP_NEXT 1
+4 -1
include/uapi/linux/virtio_ring.h
··· 31 31 * SUCH DAMAGE. 32 32 * 33 33 * Copyright Rusty Russell IBM Corporation 2007. */ 34 + #ifndef __KERNEL__ 35 + #include <stdint.h> 36 + #endif 34 37 #include <linux/types.h> 35 38 #include <linux/virtio_types.h> 36 39 ··· 146 143 vr->num = num; 147 144 vr->desc = p; 148 145 vr->avail = p + num*sizeof(struct vring_desc); 149 - vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) 146 + vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16) 150 147 + align-1) & ~(align - 1)); 151 148 } 152 149
+3 -3
kernel/resource.c
··· 504 504 { 505 505 struct resource *p; 506 506 resource_size_t end = start + size - 1; 507 - int flags = IORESOURCE_MEM | IORESOURCE_BUSY; 507 + unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 508 508 const char *name = "System RAM"; 509 509 int ret = -1; 510 510 511 511 read_lock(&resource_lock); 512 512 for (p = iomem_resource.child; p ; p = p->sibling) { 513 - if (end < p->start) 513 + if (p->end < start) 514 514 continue; 515 515 516 516 if (p->start <= start && end <= p->end) { ··· 521 521 ret = 1; 522 522 break; 523 523 } 524 - if (p->end < start) 524 + if (end < p->start) 525 525 break; /* not found */ 526 526 } 527 527 read_unlock(&resource_lock);
+34 -18
kernel/trace/ftrace.c
··· 98 98 struct pid *pid; 99 99 }; 100 100 101 + static bool ftrace_pids_enabled(void) 102 + { 103 + return !list_empty(&ftrace_pids); 104 + } 105 + 106 + static void ftrace_update_trampoline(struct ftrace_ops *ops); 107 + 101 108 /* 102 109 * ftrace_disabled is set when an anomaly is discovered. 103 110 * ftrace_disabled is much stronger than ftrace_enabled. ··· 116 109 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 117 110 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 118 111 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 119 - ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 120 112 static struct ftrace_ops global_ops; 121 113 static struct ftrace_ops control_ops; 122 114 ··· 189 183 if (!test_tsk_trace_trace(current)) 190 184 return; 191 185 192 - ftrace_pid_function(ip, parent_ip, op, regs); 193 - } 194 - 195 - static void set_ftrace_pid_function(ftrace_func_t func) 196 - { 197 - /* do not set ftrace_pid_function to itself! */ 198 - if (func != ftrace_pid_func) 199 - ftrace_pid_function = func; 186 + op->saved_func(ip, parent_ip, op, regs); 200 187 } 201 188 202 189 /** ··· 201 202 void clear_ftrace_function(void) 202 203 { 203 204 ftrace_trace_function = ftrace_stub; 204 - ftrace_pid_function = ftrace_stub; 205 205 } 206 206 207 207 static void control_ops_disable_all(struct ftrace_ops *ops) ··· 434 436 } else 435 437 add_ftrace_ops(&ftrace_ops_list, ops); 436 438 439 + /* Always save the function, and reset at unregistering */ 440 + ops->saved_func = ops->func; 441 + 442 + if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled()) 443 + ops->func = ftrace_pid_func; 444 + 437 445 ftrace_update_trampoline(ops); 438 446 439 447 if (ftrace_enabled) ··· 467 463 if (ftrace_enabled) 468 464 update_ftrace_function(); 469 465 466 + ops->func = ops->saved_func; 467 + 470 468 return 0; 471 469 } 472 470 473 471 static void ftrace_update_pid_func(void) 474 472 { 473 + bool enabled = ftrace_pids_enabled(); 474 + struct ftrace_ops *op; 475 + 475 476 /* Only do something if we are tracing something */ 476 477 if (ftrace_trace_function == ftrace_stub) 477 478 return; 479 + 480 + do_for_each_ftrace_op(op, ftrace_ops_list) { 481 + if (op->flags & FTRACE_OPS_FL_PID) { 482 + op->func = enabled ? ftrace_pid_func : 483 + op->saved_func; 484 + ftrace_update_trampoline(op); 485 + } 486 + } while_for_each_ftrace_op(op); 478 487 479 488 update_ftrace_function(); 480 489 } ··· 1150 1133 .local_hash.filter_hash = EMPTY_HASH, 1151 1134 INIT_OPS_HASH(global_ops) 1152 1135 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1153 - FTRACE_OPS_FL_INITIALIZED, 1136 + FTRACE_OPS_FL_INITIALIZED | 1137 + FTRACE_OPS_FL_PID, 1154 1138 }; 1155 1139 1156 1140 /* ··· 5041 5023 5042 5024 static struct ftrace_ops global_ops = { 5043 5025 .func = ftrace_stub, 5044 - .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 5026 + .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5027 + FTRACE_OPS_FL_INITIALIZED | 5028 + FTRACE_OPS_FL_PID, 5045 5029 }; 5046 5030 5047 5031 static int __init ftrace_nodyn_init(void) ··· 5100 5080 if (WARN_ON(tr->ops->func != ftrace_stub)) 5101 5081 printk("ftrace ops had %pS for function\n", 5102 5082 tr->ops->func); 5103 - /* Only the top level instance does pid tracing */ 5104 - if (!list_empty(&ftrace_pids)) { 5105 - set_ftrace_pid_function(func); 5106 - func = ftrace_pid_func; 5107 - } 5108 5083 } 5109 5084 tr->ops->func = func; 5110 5085 tr->ops->private = tr; ··· 5386 5371 { 5387 5372 mutex_lock(&ftrace_lock); 5388 5373 5389 - if (list_empty(&ftrace_pids) && (!*pos)) 5374 + if (!ftrace_pids_enabled() && (!*pos)) 5390 5375 return (void *) 1; 5391 5376 5392 5377 return seq_list_start(&ftrace_pids, *pos); ··· 5625 5610 .func = ftrace_stub, 5626 5611 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5627 5612 FTRACE_OPS_FL_INITIALIZED | 5613 + FTRACE_OPS_FL_PID | 5628 5614 FTRACE_OPS_FL_STUB, 5629 5615 #ifdef FTRACE_GRAPH_TRAMP_ADDR 5630 5616 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
+1
net/9p/trans_virtio.c
··· 704 704 705 705 mutex_unlock(&virtio_9p_lock); 706 706 707 + vdev->config->reset(vdev); 707 708 vdev->config->del_vqs(vdev); 708 709 709 710 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
+1
net/ax25/ax25_subr.c
··· 264 264 { 265 265 ax25_clear_queues(ax25); 266 266 267 + ax25_stop_heartbeat(ax25); 267 268 ax25_stop_t1timer(ax25); 268 269 ax25_stop_t2timer(ax25); 269 270 ax25_stop_t3timer(ax25);
-1
net/bridge/br_mdb.c
··· 351 351 if (state == MDB_TEMPORARY) 352 352 mod_timer(&p->timer, now + br->multicast_membership_interval); 353 353 354 - br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 355 354 return 0; 356 355 } 357 356
+30 -7
net/bridge/br_multicast.c
··· 39 39 struct bridge_mcast_own_query *query); 40 40 static void br_multicast_add_router(struct net_bridge *br, 41 41 struct net_bridge_port *port); 42 + static void br_ip4_multicast_leave_group(struct net_bridge *br, 43 + struct net_bridge_port *port, 44 + __be32 group, 45 + __u16 vid); 46 + #if IS_ENABLED(CONFIG_IPV6) 47 + static void br_ip6_multicast_leave_group(struct net_bridge *br, 48 + struct net_bridge_port *port, 49 + const struct in6_addr *group, 50 + __u16 vid); 51 + #endif 42 52 unsigned int br_mdb_rehash_seq; 43 53 44 54 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) ··· 1020 1010 continue; 1021 1011 } 1022 1012 1023 - err = br_ip4_multicast_add_group(br, port, group, vid); 1024 - if (err) 1025 - break; 1013 + if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1014 + type == IGMPV3_MODE_IS_INCLUDE) && 1015 + ntohs(grec->grec_nsrcs) == 0) { 1016 + br_ip4_multicast_leave_group(br, port, group, vid); 1017 + } else { 1018 + err = br_ip4_multicast_add_group(br, port, group, vid); 1019 + if (err) 1020 + break; 1021 + } 1026 1022 } 1027 1023 1028 1024 return err; ··· 1087 1071 continue; 1088 1072 } 1089 1073 1090 - err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1091 - vid); 1092 - if (err) 1093 - break; 1074 + if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1075 + grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1076 + ntohs(*nsrcs) == 0) { 1077 + br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1078 + vid); 1079 + } else { 1080 + err = br_ip6_multicast_add_group(br, port, 1081 + &grec->grec_mca, vid); 1082 + if (!err) 1083 + break; 1084 + } 1094 1085 } 1095 1086 1096 1087 return err;
+8 -11
net/caif/caif_socket.c
··· 121 121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 122 122 * not dropped, but CAIF is sending flow off instead. 123 123 */ 124 - static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124 + static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 125 125 { 126 126 int err; 127 127 unsigned long flags; 128 128 struct sk_buff_head *list = &sk->sk_receive_queue; 129 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 130 + bool queued = false; 130 131 131 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { ··· 140 139 141 140 err = sk_filter(sk, skb); 142 141 if (err) 143 - return err; 142 + goto out; 143 + 144 144 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { 145 145 set_rx_flow_off(cf_sk); 146 146 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); ··· 149 147 } 150 148 skb->dev = NULL; 151 149 skb_set_owner_r(skb, sk); 152 - /* Cache the SKB length before we tack it onto the receive 153 - * queue. Once it is added it no longer belongs to us and 154 - * may be freed by other threads of control pulling packets 155 - * from the queue. 156 - */ 157 150 spin_lock_irqsave(&list->lock, flags); 158 - if (!sock_flag(sk, SOCK_DEAD)) 151 + queued = !sock_flag(sk, SOCK_DEAD); 152 + if (queued) 159 153 __skb_queue_tail(list, skb); 160 154 spin_unlock_irqrestore(&list->lock, flags); 161 - 162 - if (!sock_flag(sk, SOCK_DEAD)) 155 + out: 156 + if (queued) 163 157 sk->sk_data_ready(sk); 164 158 else 165 159 kfree_skb(skb); 166 - return 0; 167 160 } 168 161 169 162 /* Packet Receive Callback function called from CAIF Stack */
+47 -9
net/core/datagram.c
··· 131 131 goto out; 132 132 } 133 133 134 + static int skb_set_peeked(struct sk_buff *skb) 135 + { 136 + struct sk_buff *nskb; 137 + 138 + if (skb->peeked) 139 + return 0; 140 + 141 + /* We have to unshare an skb before modifying it. */ 142 + if (!skb_shared(skb)) 143 + goto done; 144 + 145 + nskb = skb_clone(skb, GFP_ATOMIC); 146 + if (!nskb) 147 + return -ENOMEM; 148 + 149 + skb->prev->next = nskb; 150 + skb->next->prev = nskb; 151 + nskb->prev = skb->prev; 152 + nskb->next = skb->next; 153 + 154 + consume_skb(skb); 155 + skb = nskb; 156 + 157 + done: 158 + skb->peeked = 1; 159 + 160 + return 0; 161 + } 162 + 134 163 /** 135 164 * __skb_recv_datagram - Receive a datagram skbuff 136 165 * @sk: socket ··· 194 165 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, 195 166 int *peeked, int *off, int *err) 196 167 { 168 + struct sk_buff_head *queue = &sk->sk_receive_queue; 197 169 struct sk_buff *skb, *last; 170 + unsigned long cpu_flags; 198 171 long timeo; 199 172 /* 200 173 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() ··· 215 184 * Look at current nfs client by the way... 216 185 * However, this function was correct in any case. 8) 217 186 */ 218 - unsigned long cpu_flags; 219 - struct sk_buff_head *queue = &sk->sk_receive_queue; 220 187 int _off = *off; 221 188 222 189 last = (struct sk_buff *)queue; ··· 228 199 _off -= skb->len; 229 200 continue; 230 201 } 231 - skb->peeked = 1; 202 + 203 + error = skb_set_peeked(skb); 204 + if (error) 205 + goto unlock_err; 206 + 232 207 atomic_inc(&skb->users); 233 208 } else 234 209 __skb_unlink(skb, queue); ··· 256 223 257 224 return NULL; 258 225 226 + unlock_err: 227 + spin_unlock_irqrestore(&queue->lock, cpu_flags); 259 228 no_packet: 260 229 *err = error; 261 230 return NULL; ··· 657 622 !skb->csum_complete_sw) 658 623 netdev_rx_csum_fault(skb->dev); 659 624 } 660 - skb->csum_valid = !sum; 625 + if (!skb_shared(skb)) 626 + skb->csum_valid = !sum; 661 627 return sum; 662 628 } 663 629 EXPORT_SYMBOL(__skb_checksum_complete_head); ··· 678 642 netdev_rx_csum_fault(skb->dev); 679 643 } 680 644 681 - /* Save full packet checksum */ 682 - skb->csum = csum; 683 - skb->ip_summed = CHECKSUM_COMPLETE; 684 - skb->csum_complete_sw = 1; 685 - skb->csum_valid = !sum; 645 + if (!skb_shared(skb)) { 646 + /* Save full packet checksum */ 647 + skb->csum = csum; 648 + skb->ip_summed = CHECKSUM_COMPLETE; 649 + skb->csum_complete_sw = 1; 650 + skb->csum_valid = !sum; 651 + } 686 652 687 653 return sum; 688 654 }
+3 -1
net/core/dst.c
··· 284 284 int newrefcnt; 285 285 286 286 newrefcnt = atomic_dec_return(&dst->__refcnt); 287 - WARN_ON(newrefcnt < 0); 287 + if (unlikely(newrefcnt < 0)) 288 + net_warn_ratelimited("%s: dst:%p refcnt:%d\n", 289 + __func__, dst, newrefcnt); 288 290 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 289 291 call_rcu(&dst->rcu_head, dst_destroy_rcu); 290 292 }
+7 -4
net/core/rtnetlink.c
··· 1804 1804 goto errout; 1805 1805 1806 1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 1807 - if (nla_type(attr) != IFLA_VF_PORT) 1808 - continue; 1809 - err = nla_parse_nested(port, IFLA_PORT_MAX, 1810 - attr, ifla_port_policy); 1807 + if (nla_type(attr) != IFLA_VF_PORT || 1808 + nla_len(attr) < NLA_HDRLEN) { 1809 + err = -EINVAL; 1810 + goto errout; 1811 + } 1812 + err = nla_parse_nested(port, IFLA_PORT_MAX, attr, 1813 + ifla_port_policy); 1811 1814 if (err < 0) 1812 1815 goto errout; 1813 1816 if (!port[IFLA_PORT_VF]) {
+12 -4
net/ipv4/datagram.c
··· 20 20 #include <net/route.h> 21 21 #include <net/tcp_states.h> 22 22 23 - int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 23 + int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 24 24 { 25 25 struct inet_sock *inet = inet_sk(sk); 26 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; ··· 38 38 return -EAFNOSUPPORT; 39 39 40 40 sk_dst_reset(sk); 41 - 42 - lock_sock(sk); 43 41 44 42 oif = sk->sk_bound_dev_if; 45 43 saddr = inet->inet_saddr; ··· 80 82 sk_dst_set(sk, &rt->dst); 81 83 err = 0; 82 84 out: 83 - release_sock(sk); 84 85 return err; 86 + } 87 + EXPORT_SYMBOL(__ip4_datagram_connect); 88 + 89 + int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 90 + { 91 + int res; 92 + 93 + lock_sock(sk); 94 + res = __ip4_datagram_connect(sk, uaddr, addr_len); 95 + release_sock(sk); 96 + return res; 85 97 } 86 98 EXPORT_SYMBOL(ip4_datagram_connect); 87 99
+5 -6
net/ipv4/inet_hashtables.c
··· 624 624 625 625 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 626 626 { 627 + unsigned int locksz = sizeof(spinlock_t); 627 628 unsigned int i, nblocks = 1; 628 629 629 - if (sizeof(spinlock_t) != 0) { 630 + if (locksz != 0) { 630 631 /* allocate 2 cache lines or at least one spinlock per cpu */ 631 - nblocks = max_t(unsigned int, 632 - 2 * L1_CACHE_BYTES / sizeof(spinlock_t), 633 - 1); 632 + nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); 634 633 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 635 634 636 635 /* no more locks than number of hash buckets */ 637 636 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 638 637 639 - hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), 638 + hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, 640 639 GFP_KERNEL | __GFP_NOWARN); 641 640 if (!hashinfo->ehash_locks) 642 - hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); 641 + hashinfo->ehash_locks = vmalloc(nblocks * locksz); 643 642 644 643 if (!hashinfo->ehash_locks) 645 644 return -ENOMEM;
+4 -2
net/ipv4/ip_fragment.c
··· 351 351 ihl = ip_hdrlen(skb); 352 352 353 353 /* Determine the position of this fragment. */ 354 - end = offset + skb->len - ihl; 354 + end = offset + skb->len - skb_network_offset(skb) - ihl; 355 355 err = -EINVAL; 356 356 357 357 /* Is this the final fragment? */ ··· 381 381 goto err; 382 382 383 383 err = -ENOMEM; 384 - if (!pskb_pull(skb, ihl)) 384 + if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) 385 385 goto err; 386 386 387 387 err = pskb_trim_rcsum(skb, end - offset); ··· 640 640 } else { 641 641 iph->frag_off = 0; 642 642 } 643 + 644 + ip_send_check(iph); 643 645 644 646 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 645 647 qp->q.fragments = NULL;
+1 -2
net/ipv4/tcp_input.c
··· 1917 1917 const struct inet_connection_sock *icsk = inet_csk(sk); 1918 1918 struct tcp_sock *tp = tcp_sk(sk); 1919 1919 struct sk_buff *skb; 1920 - bool new_recovery = false; 1920 + bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; 1921 1921 bool is_reneg; /* is receiver reneging on SACKs? */ 1922 1922 1923 1923 /* Reduce ssthresh if it has not yet been made inside this window. */ 1924 1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1925 1925 !after(tp->high_seq, tp->snd_una) || 1926 1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1927 - new_recovery = true; 1928 1927 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1929 1928 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1930 1929 tcp_ca_event(sk, CA_EVENT_LOSS);
+15 -5
net/ipv6/datagram.c
··· 40 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 41 41 } 42 42 43 - int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 43 + static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 44 44 { 45 45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 46 46 struct inet_sock *inet = inet_sk(sk); ··· 56 56 if (usin->sin6_family == AF_INET) { 57 57 if (__ipv6_only_sock(sk)) 58 58 return -EAFNOSUPPORT; 59 - err = ip4_datagram_connect(sk, uaddr, addr_len); 59 + err = __ip4_datagram_connect(sk, uaddr, addr_len); 60 60 goto ipv4_connected; 61 61 } 62 62 ··· 98 98 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 99 99 sin.sin_port = usin->sin6_port; 100 100 101 - err = ip4_datagram_connect(sk, 102 - (struct sockaddr *) &sin, 103 - sizeof(sin)); 101 + err = __ip4_datagram_connect(sk, 102 + (struct sockaddr *) &sin, 103 + sizeof(sin)); 104 104 105 105 ipv4_connected: 106 106 if (err) ··· 203 203 out: 204 204 fl6_sock_release(flowlabel); 205 205 return err; 206 + } 207 + 208 + int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 209 + { 210 + int res; 211 + 212 + lock_sock(sk); 213 + res = __ip6_datagram_connect(sk, uaddr, addr_len); 214 + release_sock(sk); 215 + return res; 206 216 } 207 217 EXPORT_SYMBOL_GPL(ip6_datagram_connect); 208 218
-2
net/ipv6/ip6_offload.c
··· 292 292 static const struct net_offload sit_offload = { 293 293 .callbacks = { 294 294 .gso_segment = ipv6_gso_segment, 295 - .gro_receive = ipv6_gro_receive, 296 - .gro_complete = ipv6_gro_complete, 297 295 }, 298 296 }; 299 297
+1
net/mac80211/debugfs_netdev.c
··· 723 723 724 724 debugfs_remove_recursive(sdata->vif.debugfs_dir); 725 725 sdata->vif.debugfs_dir = NULL; 726 + sdata->debugfs.subdir_stations = NULL; 726 727 } 727 728 728 729 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
+14 -11
net/mac80211/iface.c
··· 1863 1863 ieee80211_teardown_sdata(sdata); 1864 1864 } 1865 1865 1866 - /* 1867 - * Remove all interfaces, may only be called at hardware unregistration 1868 - * time because it doesn't do RCU-safe list removals. 1869 - */ 1870 1866 void ieee80211_remove_interfaces(struct ieee80211_local *local) 1871 1867 { 1872 1868 struct ieee80211_sub_if_data *sdata, *tmp; ··· 1871 1875 1872 1876 ASSERT_RTNL(); 1873 1877 1874 - /* 1875 - * Close all AP_VLAN interfaces first, as otherwise they 1876 - * might be closed while the AP interface they belong to 1877 - * is closed, causing unregister_netdevice_many() to crash. 1878 + /* Before destroying the interfaces, make sure they're all stopped so 1879 + * that the hardware is stopped. Otherwise, the driver might still be 1880 + * iterating the interfaces during the shutdown, e.g. from a worker 1881 + * or from RX processing or similar, and if it does so (using atomic 1882 + * iteration) while we're manipulating the list, the iteration will 1883 + * crash. 1884 + * 1885 + * After this, the hardware should be stopped and the driver should 1886 + * have stopped all of its activities, so that we can do RCU-unaware 1887 + * manipulations of the interface list below. 1878 1888 */ 1879 - list_for_each_entry(sdata, &local->interfaces, list) 1880 - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1881 - dev_close(sdata->dev); 1889 + cfg80211_shutdown_all_interfaces(local->hw.wiphy); 1890 + 1891 + WARN(local->open_count, "%s: open count remains %d\n", 1892 + wiphy_name(local->hw.wiphy), local->open_count); 1882 1893 1883 1894 mutex_lock(&local->iflist_mtx); 1884 1895 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
+4 -1
net/mac80211/mesh_plink.c
··· 306 306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) { 307 307 /* AID */ 308 308 pos = skb_put(skb, 2); 309 - put_unaligned_le16(plid, pos + 2); 309 + put_unaligned_le16(plid, pos); 310 310 } 311 311 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 312 312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || ··· 1122 1122 WLAN_SP_MESH_PEERING_CONFIRM) { 1123 1123 baseaddr += 4; 1124 1124 baselen += 4; 1125 + 1126 + if (baselen > len) 1127 + return; 1125 1128 } 1126 1129 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); 1127 1130 mesh_process_plink_frame(sdata, mgmt, &elems);
+16
net/mac80211/pm.c
··· 76 76 if (sdata->vif.type != NL80211_IFTYPE_STATION) 77 77 continue; 78 78 ieee80211_mgd_quiesce(sdata); 79 + /* If suspended during TX in progress, and wowlan 80 + * is enabled (connection will be active) there 81 + * can be a race where the driver is put out 82 + * of power-save due to TX and during suspend 83 + * dynamic_ps_timer is cancelled and TX packet 84 + * is flushed, leaving the driver in ACTIVE even 85 + * after resuming until dynamic_ps_timer puts 86 + * driver back in DOZE. 87 + */ 88 + if (sdata->u.mgd.associated && 89 + sdata->u.mgd.powersave && 90 + !(local->hw.conf.flags & IEEE80211_CONF_PS)) { 91 + local->hw.conf.flags |= IEEE80211_CONF_PS; 92 + ieee80211_hw_config(local, 93 + IEEE80211_CONF_CHANGE_PS); 94 + } 79 95 } 80 96 81 97 err = drv_suspend(local, wowlan);
+3 -3
net/mac80211/tdls.c
··· 60 60 struct ieee80211_channel *ch; 61 61 struct cfg80211_chan_def chandef; 62 62 int i, subband_start; 63 + struct wiphy *wiphy = sdata->local->hw.wiphy; 63 64 64 65 for (i = start; i <= end; i += spacing) { 65 66 if (!ch_cnt) ··· 71 70 /* we will be active on the channel */ 72 71 cfg80211_chandef_create(&chandef, ch, 73 72 NL80211_CHAN_NO_HT); 74 - if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, 75 - &chandef, 76 - sdata->wdev.iftype)) { 73 + if (cfg80211_reg_can_beacon_relax(wiphy, &chandef, 74 + sdata->wdev.iftype)) { 77 75 ch_cnt++; 78 76 /* 79 77 * check if the next channel is also part of
+3 -1
net/mac80211/tx.c
··· 1117 1117 queued = true; 1118 1118 info->control.vif = &tx->sdata->vif; 1119 1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1120 - info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 1120 + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS | 1121 + IEEE80211_TX_CTL_NO_PS_BUFFER | 1122 + IEEE80211_TX_STATUS_EOSP; 1121 1123 __skb_queue_tail(&tid_tx->pending, skb); 1122 1124 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1123 1125 purge_skb = __skb_dequeue(&tid_tx->pending);
+47 -32
net/netlink/af_netlink.c
··· 357 357 return NULL; 358 358 } 359 359 360 + 361 + static void 362 + __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, 363 + unsigned int order) 364 + { 365 + struct netlink_sock *nlk = nlk_sk(sk); 366 + struct sk_buff_head *queue; 367 + struct netlink_ring *ring; 368 + 369 + queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 370 + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 371 + 372 + spin_lock_bh(&queue->lock); 373 + 374 + ring->frame_max = req->nm_frame_nr - 1; 375 + ring->head = 0; 376 + ring->frame_size = req->nm_frame_size; 377 + ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; 378 + 379 + swap(ring->pg_vec_len, req->nm_block_nr); 380 + swap(ring->pg_vec_order, order); 381 + swap(ring->pg_vec, pg_vec); 382 + 383 + __skb_queue_purge(queue); 384 + spin_unlock_bh(&queue->lock); 385 + 386 + WARN_ON(atomic_read(&nlk->mapped)); 387 + 388 + if (pg_vec) 389 + free_pg_vec(pg_vec, order, req->nm_block_nr); 390 + } 391 + 360 392 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, 361 - bool closing, bool tx_ring) 393 + bool tx_ring) 362 394 { 363 395 struct netlink_sock *nlk = nlk_sk(sk); 364 396 struct netlink_ring *ring; 365 - struct sk_buff_head *queue; 366 397 void **pg_vec = NULL; 367 398 unsigned int order = 0; 368 - int err; 369 399 370 400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 371 - queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 372 401 373 - if (!closing) { 374 - if (atomic_read(&nlk->mapped)) 375 - return -EBUSY; 376 - if (atomic_read(&ring->pending)) 377 - return -EBUSY; 378 - } 402 + if (atomic_read(&nlk->mapped)) 403 + return -EBUSY; 404 + if (atomic_read(&ring->pending)) 405 + return -EBUSY; 379 406 380 407 if (req->nm_block_nr) { 381 408 if (ring->pg_vec != NULL) ··· 434 407 return -EINVAL; 435 408 } 436 409 437 - err = -EBUSY; 438 410 mutex_lock(&nlk->pg_vec_lock); 439 - if (closing || atomic_read(&nlk->mapped) == 0) { 440 - err = 0; 441 - spin_lock_bh(&queue->lock); 442 - 443 - ring->frame_max = req->nm_frame_nr - 1; 444 - ring->head = 0; 445 - ring->frame_size = req->nm_frame_size; 446 - ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; 447 - 448 - swap(ring->pg_vec_len, req->nm_block_nr); 449 - swap(ring->pg_vec_order, order); 450 - swap(ring->pg_vec, pg_vec); 451 - 452 - __skb_queue_purge(queue); 453 - spin_unlock_bh(&queue->lock); 454 - 455 - WARN_ON(atomic_read(&nlk->mapped)); 411 + if (atomic_read(&nlk->mapped) == 0) { 412 + __netlink_set_ring(sk, req, tx_ring, pg_vec, order); 413 + mutex_unlock(&nlk->pg_vec_lock); 414 + return 0; 456 415 } 416 + 457 417 mutex_unlock(&nlk->pg_vec_lock); 458 418 459 419 if (pg_vec) 460 420 free_pg_vec(pg_vec, order, req->nm_block_nr); 461 - return err; 421 + 422 + return -EBUSY; 462 423 } 463 424 464 425 static void netlink_mm_open(struct vm_area_struct *vma) ··· 915 900 916 901 memset(&req, 0, sizeof(req)); 917 902 if (nlk->rx_ring.pg_vec) 918 - netlink_set_ring(sk, &req, true, false); 903 + __netlink_set_ring(sk, &req, false, NULL, 0); 919 904 memset(&req, 0, sizeof(req)); 920 905 if (nlk->tx_ring.pg_vec) 921 - netlink_set_ring(sk, &req, true, true); 906 + __netlink_set_ring(sk, &req, true, NULL, 0); 922 907 } 923 908 #endif /* CONFIG_NETLINK_MMAP */ 924 909 ··· 2238 2223 return -EINVAL; 2239 2224 if (copy_from_user(&req, optval, sizeof(req))) 2240 2225 return -EFAULT; 2241 - err = netlink_set_ring(sk, &req, false, 2226 + err = netlink_set_ring(sk, &req, 2242 2227 optname == NETLINK_TX_RING); 2243 2228 break; 2244 2229 }
+1 -1
net/openvswitch/flow_table.c
··· 752 752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 753 753 754 754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 755 - + (num_possible_nodes() 755 + + (nr_node_ids 756 756 * sizeof(struct flow_stats *)), 757 757 0, 0, NULL); 758 758 if (flow_cache == NULL)
+3
net/sched/act_bpf.c
··· 339 339 bpf_prog_put(prog->filter); 340 340 else 341 341 bpf_prog_destroy(prog->filter); 342 + 343 + kfree(prog->bpf_ops); 344 + kfree(prog->bpf_name); 342 345 } 343 346 344 347 static struct tc_action_ops act_bpf_ops __read_mostly = {
+1 -1
net/sched/cls_bpf.c
··· 378 378 goto errout; 379 379 380 380 if (oldprog) { 381 - list_replace_rcu(&prog->link, &oldprog->link); 381 + list_replace_rcu(&oldprog->link, &prog->link); 382 382 tcf_unbind_filter(tp, &oldprog->res); 383 383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 384 384 } else {
+3 -2
net/sched/cls_flow.c
··· 425 425 if (!fnew) 426 426 goto err2; 427 427 428 + tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); 429 + 428 430 fold = (struct flow_filter *)*arg; 429 431 if (fold) { 430 432 err = -EINVAL; ··· 488 486 fnew->mask = ~0U; 489 487 fnew->tp = tp; 490 488 get_random_bytes(&fnew->hashrnd, 4); 491 - tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); 492 489 } 493 490 494 491 fnew->perturb_timer.function = flow_perturbation; ··· 527 526 if (*arg == 0) 528 527 list_add_tail_rcu(&fnew->list, &head->filters); 529 528 else 530 - list_replace_rcu(&fnew->list, &fold->list); 529 + list_replace_rcu(&fold->list, &fnew->list); 531 530 532 531 *arg = (unsigned long)fnew; 533 532
+1 -1
net/sched/cls_flower.c
··· 499 499 *arg = (unsigned long) fnew; 500 500 501 501 if (fold) { 502 - list_replace_rcu(&fnew->list, &fold->list); 502 + list_replace_rcu(&fold->list, &fnew->list); 503 503 tcf_unbind_filter(tp, &fold->res); 504 504 call_rcu(&fold->rcu, fl_destroy_filter); 505 505 } else {
+11 -2
net/sched/sch_fq_codel.c
··· 155 155 skb = dequeue_head(flow); 156 156 len = qdisc_pkt_len(skb); 157 157 q->backlogs[idx] -= len; 158 - kfree_skb(skb); 159 158 sch->q.qlen--; 160 159 qdisc_qstats_drop(sch); 161 160 qdisc_qstats_backlog_dec(sch, skb); 161 + kfree_skb(skb); 162 162 flow->dropped++; 163 163 return idx; 164 + } 165 + 166 + static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) 167 + { 168 + unsigned int prev_backlog; 169 + 170 + prev_backlog = sch->qstats.backlog; 171 + fq_codel_drop(sch); 172 + return prev_backlog - sch->qstats.backlog; 164 173 } 165 174 166 175 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ··· 613 604 .enqueue = fq_codel_enqueue, 614 605 .dequeue = fq_codel_dequeue, 615 606 .peek = qdisc_peek_dequeued, 616 - .drop = fq_codel_drop, 607 + .drop = fq_codel_qdisc_drop, 617 608 .init = fq_codel_init, 618 609 .reset = fq_codel_reset, 619 610 .destroy = fq_codel_destroy,
+1 -1
net/sched/sch_sfq.c
··· 306 306 len = qdisc_pkt_len(skb); 307 307 slot->backlog -= len; 308 308 sfq_dec(q, x); 309 - kfree_skb(skb); 310 309 sch->q.qlen--; 311 310 qdisc_qstats_drop(sch); 312 311 qdisc_qstats_backlog_dec(sch, skb); 312 + kfree_skb(skb); 313 313 return len; 314 314 } 315 315
+34 -11
net/wireless/chan.c
··· 797 797 return false; 798 798 } 799 799 800 - bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 801 - struct cfg80211_chan_def *chandef, 802 - enum nl80211_iftype iftype) 800 + static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy, 801 + struct cfg80211_chan_def *chandef, 802 + enum nl80211_iftype iftype, 803 + bool check_no_ir) 803 804 { 804 805 bool res; 805 806 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 806 807 IEEE80211_CHAN_RADAR; 807 808 808 - trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); 809 + trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); 809 810 810 - /* 811 - * Under certain conditions suggested by some regulatory bodies a 812 - * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag 813 - * only if such relaxations are not enabled and the conditions are not 814 - * met. 815 - */ 816 - if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan)) 811 + if (check_no_ir) 817 812 prohibited_flags |= IEEE80211_CHAN_NO_IR; 818 813 819 814 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && ··· 822 827 trace_cfg80211_return_bool(res); 823 828 return res; 824 829 } 830 + 831 + bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 832 + struct cfg80211_chan_def *chandef, 833 + enum nl80211_iftype iftype) 834 + { 835 + return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true); 836 + } 825 837 EXPORT_SYMBOL(cfg80211_reg_can_beacon); 838 + 839 + bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, 840 + struct cfg80211_chan_def *chandef, 841 + enum nl80211_iftype iftype) 842 + { 843 + bool check_no_ir; 844 + 845 + ASSERT_RTNL(); 846 + 847 + /* 848 + * Under certain conditions suggested by some regulatory bodies a 849 + * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag 850 + * only if such relaxations are not enabled and the conditions are not 851 + * met. 852 + */ 853 + check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype, 854 + chandef->chan); 855 + 856 + return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); 857 + } 858 + EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax); 826 859 827 860 int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 828 861 struct cfg80211_chan_def *chandef)
+8 -6
net/wireless/nl80211.c
··· 2003 2003 switch (iftype) { 2004 2004 case NL80211_IFTYPE_AP: 2005 2005 case NL80211_IFTYPE_P2P_GO: 2006 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { 2006 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, 2007 + iftype)) { 2007 2008 result = -EINVAL; 2008 2009 break; 2009 2010 } ··· 3404 3403 } else if (!nl80211_get_ap_channel(rdev, &params)) 3405 3404 return -EINVAL; 3406 3405 3407 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 3408 - wdev->iftype)) 3406 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef, 3407 + wdev->iftype)) 3409 3408 return -EINVAL; 3410 3409 3411 3410 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { ··· 6493 6492 if (err) 6494 6493 return err; 6495 6494 6496 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 6497 - wdev->iftype)) 6495 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef, 6496 + wdev->iftype)) 6498 6497 return -EINVAL; 6499 6498 6500 6499 err = cfg80211_chandef_dfs_required(wdev->wiphy, ··· 10171 10170 return -EINVAL; 10172 10171 10173 10172 /* we will be active on the TDLS link */ 10174 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) 10173 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, 10174 + wdev->iftype)) 10175 10175 return -EINVAL; 10176 10176 10177 10177 /* don't allow switching to DFS channels */
+4 -4
net/wireless/reg.c
··· 544 544 reg_regdb_query(alpha2); 545 545 546 546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { 547 - pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); 547 + pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); 548 548 return -EINVAL; 549 549 } 550 550 551 551 if (!is_world_regdom((char *) alpha2)) 552 - pr_info("Calling CRDA for country: %c%c\n", 552 + pr_debug("Calling CRDA for country: %c%c\n", 553 553 alpha2[0], alpha2[1]); 554 554 else 555 - pr_info("Calling CRDA to update world regulatory domain\n"); 555 + pr_debug("Calling CRDA to update world regulatory domain\n"); 556 556 557 557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); 558 558 } ··· 1589 1589 case NL80211_IFTYPE_AP: 1590 1590 case NL80211_IFTYPE_P2P_GO: 1591 1591 case NL80211_IFTYPE_ADHOC: 1592 - return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); 1592 + return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); 1593 1593 case NL80211_IFTYPE_STATION: 1594 1594 case NL80211_IFTYPE_P2P_CLIENT: 1595 1595 return cfg80211_chandef_usable(wiphy, &chandef,
+7 -4
net/wireless/trace.h
··· 2358 2358 2359 2359 TRACE_EVENT(cfg80211_reg_can_beacon, 2360 2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, 2361 - enum nl80211_iftype iftype), 2362 - TP_ARGS(wiphy, chandef, iftype), 2361 + enum nl80211_iftype iftype, bool check_no_ir), 2362 + TP_ARGS(wiphy, chandef, iftype, check_no_ir), 2363 2363 TP_STRUCT__entry( 2364 2364 WIPHY_ENTRY 2365 2365 CHAN_DEF_ENTRY 2366 2366 __field(enum nl80211_iftype, iftype) 2367 + __field(bool, check_no_ir) 2367 2368 ), 2368 2369 TP_fast_assign( 2369 2370 WIPHY_ASSIGN; 2370 2371 CHAN_DEF_ASSIGN(chandef); 2371 2372 __entry->iftype = iftype; 2373 + __entry->check_no_ir = check_no_ir; 2372 2374 ), 2373 - TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", 2374 - WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) 2375 + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s", 2376 + WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype, 2377 + BOOL_TO_STR(__entry->check_no_ir)) 2375 2378 ); 2376 2379 2377 2380 TRACE_EVENT(cfg80211_chandef_dfs_required,
+5 -2
samples/trace_events/trace-events-sample.h
··· 168 168 * 169 169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) 170 170 * Use __get_dynamic_array_len(foo) to get the length of the array 171 - * saved. 171 + * saved. Note, __get_dynamic_array_len() returns the total allocated 172 + * length of the dynamic array; __print_array() expects the second 173 + * parameter to be the number of elements. To get that, the array length 174 + * needs to be divided by the element size. 172 175 * 173 176 * For __string(foo, bar) use __get_str(foo) 174 177 * ··· 291 288 * This prints out the array that is defined by __array in a nice format. 292 289 */ 293 290 __print_array(__get_dynamic_array(list), 294 - __get_dynamic_array_len(list), 291 + __get_dynamic_array_len(list) / sizeof(int), 295 292 sizeof(int)), 296 293 __get_str(str), __get_bitmask(cpus)) 297 294 );
+1 -1
sound/core/pcm_native.c
··· 85 85 void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 86 86 { 87 87 if (substream->pcm->nonatomic) { 88 - down_read(&snd_pcm_link_rwsem); 88 + down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING); 89 89 mutex_lock(&substream->self_group.mutex); 90 90 } else { 91 91 read_lock(&snd_pcm_link_rwlock);
+4 -1
sound/hda/hdac_i915.c
··· 56 56 enable ? "enable" : "disable"); 57 57 58 58 if (enable) { 59 - if (!bus->i915_power_refcount++) 59 + if (!bus->i915_power_refcount++) { 60 60 acomp->ops->get_power(acomp->dev); 61 + snd_hdac_set_codec_wakeup(bus, true); 62 + snd_hdac_set_codec_wakeup(bus, false); 63 + } 61 64 } else { 62 65 WARN_ON(!bus->i915_power_refcount); 63 66 if (!--bus->i915_power_refcount)
+18 -8
sound/pci/hda/hda_intel.c
··· 979 979 if (!azx_has_pm_runtime(chip)) 980 980 return 0; 981 981 982 - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 983 - && hda->need_i915_power) { 984 - bus = azx_bus(chip); 985 - snd_hdac_display_power(bus, true); 986 - haswell_set_bclk(hda); 987 - /* toggle codec wakeup bit for STATESTS read */ 988 - snd_hdac_set_codec_wakeup(bus, true); 989 - snd_hdac_set_codec_wakeup(bus, false); 982 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 983 + bus = azx_bus(chip); 984 + if (hda->need_i915_power) { 985 + snd_hdac_display_power(bus, true); 986 + haswell_set_bclk(hda); 987 + } else { 988 + /* toggle codec wakeup bit for STATESTS read */ 989 + snd_hdac_set_codec_wakeup(bus, true); 990 + snd_hdac_set_codec_wakeup(bus, false); 991 + } 990 992 } 991 993 992 994 /* Read STATESTS before controller reset */ ··· 2184 2182 /* ATI HDMI */ 2185 2183 { PCI_DEVICE(0x1002, 0x1308), 2186 2184 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2185 + { PCI_DEVICE(0x1002, 0x157a), 2186 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2187 2187 { PCI_DEVICE(0x1002, 0x793b), 2188 2188 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, 2189 2189 { PCI_DEVICE(0x1002, 0x7919), ··· 2240 2236 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2241 2237 { PCI_DEVICE(0x1002, 0xaab0), 2242 2238 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2239 + { PCI_DEVICE(0x1002, 0xaac0), 2240 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2243 2241 { PCI_DEVICE(0x1002, 0xaac8), 2242 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2243 + { PCI_DEVICE(0x1002, 0xaad8), 2244 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2245 + { PCI_DEVICE(0x1002, 0xaae8), 2244 2246 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2245 2247 /* VIA VT8251/VT8237A */ 2246 2248 { PCI_DEVICE(0x1106, 0x3288),
+2
sound/pci/hda/patch_hdmi.c
··· 3512 3512 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, 3513 3513 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3514 3514 { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, 3515 + { .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi }, 3515 3516 { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3516 3517 { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3517 3518 { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, ··· 3577 3576 MODULE_ALIAS("snd-hda-codec-id:10de0070"); 3578 3577 MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3579 3578 MODULE_ALIAS("snd-hda-codec-id:10de0072"); 3579 + MODULE_ALIAS("snd-hda-codec-id:10de007d"); 3580 3580 MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3581 3581 MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3582 3582 MODULE_ALIAS("snd-hda-codec-id:11069f81");
+31 -8
sound/pci/hda/patch_realtek.c
··· 5061 5061 { 0x14, 0x90170110 }, 5062 5062 { 0x17, 0x40000008 }, 5063 5063 { 0x18, 0x411111f0 }, 5064 - { 0x19, 0x411111f0 }, 5064 + { 0x19, 0x01a1913c }, 5065 5065 { 0x1a, 0x411111f0 }, 5066 5066 { 0x1b, 0x411111f0 }, 5067 5067 { 0x1d, 0x40f89b2d }, ··· 5430 5430 {0x15, 0x0221401f}, \ 5431 5431 {0x1a, 0x411111f0}, \ 5432 5432 {0x1b, 0x411111f0}, \ 5433 - {0x1d, 0x40700001}, \ 5434 - {0x1e, 0x411111f0} 5433 + {0x1d, 0x40700001} 5435 5434 5436 5435 #define ALC298_STANDARD_PINS \ 5437 5436 {0x18, 0x411111f0}, \ ··· 5461 5462 {0x17, 0x40000000}, 5462 5463 {0x1d, 0x40700001}, 5463 5464 {0x21, 0x02211030}), 5465 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5466 + {0x12, 0x40000000}, 5467 + {0x14, 0x90170130}, 5468 + {0x17, 0x411111f0}, 5469 + {0x18, 0x411111f0}, 5470 + {0x19, 0x411111f0}, 5471 + {0x1a, 0x411111f0}, 5472 + {0x1b, 0x01014020}, 5473 + {0x1d, 0x4054c029}, 5474 + {0x1e, 0x411111f0}, 5475 + {0x21, 0x0221103f}), 5464 5476 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5465 5477 {0x12, 0x90a60160}, 5466 5478 {0x14, 0x90170120}, ··· 5700 5690 {0x13, 0x411111f0}, 5701 5691 {0x16, 0x01014020}, 5702 5692 {0x18, 0x411111f0}, 5703 - {0x19, 0x01a19030}), 5693 + {0x19, 0x01a19030}, 5694 + {0x1e, 0x411111f0}), 5704 5695 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, 5705 5696 ALC292_STANDARD_PINS, 5706 5697 {0x12, 0x90a60140}, 5707 5698 {0x13, 0x411111f0}, 5708 5699 {0x16, 0x01014020}, 5709 5700 {0x18, 0x02a19031}, 5710 - {0x19, 0x01a1903e}), 5701 + {0x19, 0x01a1903e}, 5702 + {0x1e, 0x411111f0}), 5711 5703 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, 5712 5704 ALC292_STANDARD_PINS, 5713 5705 {0x12, 0x90a60140}, 5714 5706 {0x13, 0x411111f0}, 5715 5707 {0x16, 0x411111f0}, 5716 5708 {0x18, 0x411111f0}, 5717 - {0x19, 0x411111f0}), 5709 + {0x19, 0x411111f0}, 5710 + {0x1e, 0x411111f0}), 5718 5711 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5719 5712 ALC292_STANDARD_PINS, 5720 5713 {0x12, 0x40000000}, 5721 5714 {0x13, 0x90a60140}, 5722 5715 {0x16, 0x21014020}, 5723 5716 {0x18, 0x411111f0}, 5724 - {0x19, 0x21a19030}), 5717 + {0x19, 0x21a19030}, 5718 + {0x1e, 0x411111f0}), 5725 5719 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5726 5720 ALC292_STANDARD_PINS, 5727 5721 {0x12, 0x40000000}, 5728 5722 {0x13, 0x90a60140}, 5729 5723 {0x16, 0x411111f0}, 5730 5724 {0x18, 0x411111f0}, 5731 - {0x19, 0x411111f0}), 5725 + {0x19, 0x411111f0}, 5726 + {0x1e, 0x411111f0}), 5727 + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5728 + ALC292_STANDARD_PINS, 5729 + {0x12, 0x40000000}, 5730 + {0x13, 0x90a60140}, 5731 + {0x16, 0x21014020}, 5732 + {0x18, 0x411111f0}, 5733 + {0x19, 0x21a19030}, 5734 + {0x1e, 0x411111ff}), 5732 5735 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5733 5736 ALC298_STANDARD_PINS, 5734 5737 {0x12, 0x90a60130},
+1
sound/sparc/amd7930.c
··· 956 956 if (!amd->regs) { 957 957 snd_printk(KERN_ERR 958 958 "amd7930-%d: Unable to map chip registers.\n", dev); 959 + kfree(amd); 959 960 return -EIO; 960 961 } 961 962